query
stringlengths 9
9.05k
| document
stringlengths 10
222k
| metadata
dict | negatives
listlengths 30
30
| negative_scores
listlengths 30
30
| document_score
stringlengths 4
10
| document_rank
stringclasses 2
values |
---|---|---|---|---|---|---|
Plot the scores obtained by a list of methods on a list of graphs.
|
def plot_scoring(
graphs: list,
ref_partitions: object,
graph_names: list,
methods: list,
scoring: Callable[
[object, object], object
] = cdlib.evaluation.adjusted_mutual_information,
nbRuns: int = 5,
) -> object:
forDF = []
for i, g in enumerate(graphs):
for m in methods:
for r in range(nbRuns):
partition = m(g)
score = scoring(partition, ref_partitions[i]).score
forDF.append([graph_names[i], score, partition.get_description()])
df = pd.DataFrame(columns=["graph", "score", "method"], data=forDF)
ax = sns.lineplot(x="graph", y="score", hue="method", data=df, legend="brief")
ax.legend(loc="best")
for tick in ax.get_xticklabels():
tick.set_rotation(90)
plt.tight_layout()
return ax
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def plot_graph(self) -> None:",
"def plot_time_advances(self, path: str = None, methods: list = None, title=None):\n df = self.get_results(methods)\n ax = plt.axes()\n sns.lineplot(x='epoch', y='value', data=df, hue='statistics', style='method', ax=ax)\n plt.ylim(-31, 11)\n plt.ylabel('test statistics of log-lkl')\n lgd = plt.legend(loc='upper left', bbox_to_anchor=[1.01, -0.1, 0.2, 0.8], ncol=1)\n ax.set_position([0.1, 0.1, 0.75, 0.8])\n if title is not None:\n ax.set_title(title)\n if path is not None:\n for extension in ['eps', 'png']:\n plt.savefig(f'{path}.{extension}', format=extension)",
"def add_plot(self, method, x, y, *args, **kwargs):\n self.plots.append([self.Plot(method, x, y, args, kwargs)])",
"def plot(self,plotAll=True):\n\t\tfor i in range(0,4):\n\t\t\tfor j in range(0,len(self.fbPolNames[i])):\n\t\t\t\t\n\t\t\t\tif plotAll==True:\n\t\t\t\t\tnewPlot=self.plotOfSinglePol(i,j,alsoPlotRawAndFit=True)\n\t\t\t\telse:\n\t\t\t\t\tnewPlot=self.plotOfSinglePol(i,j,alsoPlotRawAndFit=False)\n\t\t\t\tnewPlot.plot()",
"def plot_all(best_results: BestResults,\n *args,\n **kwargs) -> plt.Figure:\n if isinstance(best_results, BestResultsOne):\n return plot_all_one(best_results, *args, **kwargs)\n elif isinstance(best_results, BestResultsTwo):\n return plot_all_two(best_results, *args, **kwargs)\n else:\n raise ValueError('best_results argument is of unknown type')",
"def main(self, args):\n for plot in args.plots:\n if plot == 'no_plot':\n break\n print \"plotting\", plot\n\n fig = self.plot_figure(plot)\n\n fformat = '{plot}_{index}.{ext}'\n fname = fformat.format(plot=plot, index=self.index, ext='png')\n fpath = os.path.join(plot_dir, fname)\n fig.savefig(fpath)\n\n if args.distributions == 'all':\n distributions = ['Uf', 'Wf', 'uf_abs',\n 'vorticity', 'vertical_shear']\n else:\n distributions = args.distributions\n for dist in distributions:\n range = self.properties[dist]['range']\n name = self.properties[dist]['name']\n print \"plotting distribution\", dist, name\n fig = self.plot_distribution(getattr(self, dist), range, name)\n\n fformat = 'distribution_{q}_{index}.{ext}'\n fname = fformat.format(q=dist, index=self.index, ext='png')\n fpath = os.path.join(plot_dir, fname)\n fig.savefig(fpath)\n\n if args.funcs:\n for func in args.funcs:\n print \"multiprocessing\", func\n f = getattr(self, 'plot_' + func)\n f()",
"def plot(self, fname=None):\n x = np.linspace(self.bounds[0], self.bounds[-1], 200)\n y = [self.evaluate(xi) for xi in x]\n plt.figure()\n plt.plot(x, y, label='Class func')\n plt.plot(self.bounds, self.gis, 'o', label='Algorithm')\n plt.grid(color='0.7')\n plt.xlabel('Dependent Variable')\n plt.ylabel('PP Transformed Class Value')\n if fname:\n plt.savefig(fname)\n else:\n plt.show()",
"def plot(self, *names):\r\n for name in names:\r\n if name in self.__obs.keys():\r\n list_obs = self.__obs[name]\r\n if not isinstance(list_obs[0], matrix):\r\n fig = plt.figure()\r\n plt.plot(self.__obs[name])\r\n else:\r\n fig = plt.figure()\r\n for i in range(list_obs[0].size):\r\n plt.plot([float(obs[i]) for obs in list_obs], label=\"Dimension {0}\".format(i))\r\n plt.legend()\r\n plt.ylabel(name)\r\n plt.show()\r\n else:\r\n for sous_objet in self.__sous_objets:\r\n if re.match((sous_objet+\"?\").encode('string-escape'), name.lower()):\r\n self.__dict__[sous_objet].plot(name)",
"def evaluate(self, plot):",
"def plot_results(self, callback, logs, report_rate):\n format = \"svg\"\n\n self._history.append((self._epoch, logs))\n self._epoch += 1\n\n metrics = [list(history[1].keys()) for history in self._history]\n metrics = set([item for sublist in metrics for item in sublist])\n\n if callback._figure is not None:\n # figure and axes objects have already been created\n fig, loss_ax, acc_ax = callback._figure\n loss_ax.clear()\n if acc_ax is not None:\n acc_ax.clear()\n else:\n # first time called, so create figure and axes objects\n if (\n (\"acc\" in metrics)\n or (\"val_acc\" in metrics)\n or (\"accuracy\" in metrics)\n or (\"val_accuary\" in metrics)\n ):\n fig, (loss_ax, acc_ax) = plt.subplots(1, 2, figsize=(10, 4))\n else:\n fig, loss_ax = plt.subplots(1)\n acc_ax = None\n callback._figure = fig, loss_ax, acc_ax\n\n def get_xy(name):\n return [\n (history[0], history[1][name])\n for history in self._history\n if name in history[1]\n ]\n\n for metric in metrics:\n xys = get_xy(metric)\n x_values = [xy[0] for xy in xys]\n y_values = [xy[1] for xy in xys]\n if metric == \"loss\":\n loss_ax.plot(x_values, y_values, label=metric, color=\"r\") # red\n elif metric == \"val_loss\":\n loss_ax.plot(x_values, y_values, label=metric, color=\"orange\")\n elif metric in [\"acc\", \"accuracy\"] and acc_ax is not None:\n acc_ax.plot(x_values, y_values, label=metric, color=\"b\") # blue\n elif metric in [\"val_acc\", \"val_accuracy\"] and acc_ax is not None:\n acc_ax.plot(x_values, y_values, label=metric, color=\"c\") # cyan\n # FIXME: add a chart for each metric\n # else:\n # loss_ax.plot(x_values, y_values, label=metric)\n\n loss_ax.set_ylim(bottom=0)\n loss_ax.set_title(\"%s: Training Loss\" % (self.name,))\n loss_ax.set_xlabel(\"Epoch\")\n loss_ax.legend(loc=\"best\")\n if acc_ax is not None:\n acc_ax.set_ylim([-0.1, 1.1])\n acc_ax.set_title(\"%s: Traning Accuracy\" % (self.name,))\n acc_ax.set_xlabel(\"Epoch\")\n acc_ax.legend(loc=\"best\")\n\n if True or format == \"svg\":\n # if (callback is not None and not callback.in_console) or format == \"svg\":\n bytes = io.BytesIO()\n plt.savefig(bytes, format=\"svg\")\n img_bytes = bytes.getvalue()\n if HTML is not None:\n clear_output(wait=True)\n display(HTML(img_bytes.decode()))\n else:\n raise Exception(\"need to install `IPython` to display matplotlib plots\")\n else: # format is None\n plt.pause(0.01)\n # plt.show(block=False)",
"def plotGraph(self, dayArray, commentsArray, upvotesArray, retweetsArray, likesArray):\n self.canvas.get_tk_widget().place(relx=0.219, rely=0.519, relheight=0.389, relwidth=0.352)\n\n # Clears graph before plotting to prevent appending two graphs at once\n self.figure.clear()\n # self.figure.\n plt = self.figure.add_subplot(1, 1, 1)\n x = []\n max_log_size = 5000\n for i in dayArray:\n i = ''.join(i.split())\n i = i[:-5]\n x.append(i)\n\n # now there's 3 sets of points\n yCO = commentsArray\n yUV = upvotesArray\n yRT = retweetsArray\n yLK = likesArray\n\n if max(yCO)>=max_log_size or max(yUV)>=max_log_size or max(yRT)>=max_log_size or max(yLK)>=max_log_size:\n plt.set(yscale=\"log\")\n plt.plot(x, yCO, label='Comments', marker='o', color='red')\n plt.plot(x, yUV, label='Upvotes', marker='o', color='#fa93b0')\n plt.plot(x, yRT, label='Retweets', marker='o', color='#2374f7')\n plt.plot(x, yLK, label='Likes', marker='o', color='#accafa')\n\n plt.legend()\n self.figure.canvas.draw()",
"def plot_list(self):\n wrapper = TextWrapper(subsequent_indent = \" \" * 22,\n width = 78)\n for method, func in self.get_available_figures():\n if method != \"list\":\n wrapper.initial_indent = (\"%-20s \" % method).ljust(22)\n print wrapper.fill(func.figure_name)",
"def plot_solutions(self, solutions_list, plot_w=False, savefig_filename=None, display=True):\n plt.figure(figsize=(13, 8))\n for s in solutions_list:\n U = s[0]\n method, T = s[1]\n plt.plot(self.x, U[:self.N], label=rf\"$U$ : {method}, $T = {T}$\")\n if plot_w:\n plt.plot(self.x, U[self.N:], label=rf\"$U_t$ : {method}, $T = {T}$\")\n plt.xlabel(r\"$x$\")\n plt.ylabel(r\"$U, U_t$\")\n plt.title(\"Plot of Various Models at Different Times\")\n plt.legend()\n plt.grid()\n if savefig_filename:\n plt.savefig(savefig_filename) if savefig_filename.endswith(\".png\") else plt.savefig(savefig_filename+\".jpg\")\n if display:\n plt.show()",
"def plot(self, rerun=False, ylabel=\"Time (seconds)\"):\n if self.results is None or rerun is True:\n self.run_methods()\n # an alias\n data = self.results\n\n methods = sorted(data, key=lambda x: pylab.mean(data[x]))\n pylab.boxplot([data[x] for x in methods])\n # pylab.xticks([1+this for this in range(len(methods))], methods)\n pylab.xticks(*zip(*enumerate(methods, start=1)))\n pylab.grid(True)\n pylab.ylabel(ylabel)\n pylab.xlim([0, len(methods)+1])",
"def plot_data(array_list, params):\n\tkey = array_list[0]\n\tvals = array_list[1]\n\tprint key\n\tprint len(vals)\n\tfigure_id = 1\n\tfor item in params:\n\t\tx_axis = get_by_keys(item['x_keys'], key, vals)\n\t\ty_axis = get_by_keys(item['y_keys'], key, vals)\n\t\tplt.figure(figure_id)\n\t\tplt.plot(x_axis, y_axis)\n\t\tfigure_id = figure_id + 1\n\tplt.show()",
"def plot_scores(scores, cfg, plot_agents=True, mas=[100], plot_individual_episodes=False):\n scores = pd.Series(scores.max(axis=1))\n\n if plot_agents:\n scores_all = pd.DataFrame(scores)\n\n # plot the scores\n fig = plt.figure(figsize=(12, 8))\n ax = fig.add_subplot(111)\n\n for ma_i in mas:\n\n if plot_individual_episodes:\n ax.plot(scores.index, scores.values, ls=\"-\", color=\"k\", label=\"Scores\")\n\n ma = scores.rolling(ma_i).mean()\n ax.plot(ma.index, ma.values, ls=\"-\", color=\"r\", lw=2,\n label=\"Moving average scores ({:d} episodes)\".format(ma_i))\n\n # need to define other colors for individual agents\n if plot_agents and scores_all.shape[1] > 1:\n for iagent in range(scores_all.shape[1]):\n scores_i = scores_all[iagent]\n\n if plot_individual_episodes:\n ax.plot(scores_all.index, scores_i, ls=\"-\", color=\"k\",\n label=\"Scores (Agent {:d})\".format(iagent))\n\n ma = scores.rolling(ma_i).mean()\n ax.plot(ma.index, ma.values, ls=\"-\", color=\"r\", lw=2,\n label=\"Moving average scores ({:d} episodes, Agent {:d})\".format(ma_i,\n iagent))\n\n plt.gca().xaxis.set_major_formatter(FormatStrFormatter(\"%.0f\"))\n\n plt.ylabel('Score')\n plt.xlabel('Episode #')\n plt.legend(loc=\"upper left\")\n\n plt.savefig(os.path.join(cfg.experiment_path, \"scores.pdf\"))\n plt.savefig(os.path.join(cfg.experiment_path, \"scores.png\"))",
"def test_exercise_1():\n a, b = 5, 0\n fvals = []\n grid = np.linspace(-3, 4)\n for value in grid:\n fvals.append(get_test_function(value, a, b))\n plt.plot(grid, fvals)",
"def plot(self, ax=None, savefile=None, shells=None, color='b', title=None,\n xlabel=None, ylabel=None, withavg=False):\n import matplotlib.pyplot as plt\n if ax is None:\n plt.figure()\n axset=plt\n else:\n axset=ax\n\n cmax = float(max(self.counts))\n total = sum(self.counts)\n nalpha = 0.85 if cmax/total > 0.33 else 0.65\n maxy = 1.\n for di, df in enumerate(self.dfs):\n alpha=nalpha*self.counts[di]/cmax\n axset.plot(df.x, df.df, color=color, alpha=alpha)\n maxy_ = np.max(df.df)\n if maxy_ > maxy:\n maxy = maxy_\n\n if withavg and len(self) > 0:\n x = self.dfs[0].x\n axset.plot(x, self.average, 'r-')\n maxy_ = np.max(self.average)\n if maxy_ > maxy:# pragma: no cover\n maxy = maxy_\n\n if len(self) > 0:\n dtype = self.dfs[0].dtype\n unit = \"Ang.\" if dtype == \"R\" else \"Rad.\"\n tstr = \"Radial\" if dtype == \"R\" else \"Angular\"\n else:# pragma: no cover\n unit = \"unknown units\"\n tstr = \"\"\n \n if ax is None:\n if title is None:\n plt.title(\"{} Distribution Function of Collection\".format(tstr))\n else:\n plt.title(title)\n if xlabel is None:\n plt.xlabel(\"Distance ({})\".format(unit))\n else:\n plt.xlabel(xlabel)\n if ylabel is None:\n plt.ylabel(\"Accumulated Density\")\n else:\n plt.ylabel(ylabel)\n\n _plot_shells(axset, shells, maxy)\n \n if savefile is not None:\n plt.savefig(savefile)\n\n from gblearn.base import testmode\n if not testmode:# pragma: no cover\n plt.show()\n return axset",
"def _apply_plot(self, *args, cmap=None, values=None, **kwargs):\n # Deprecated functionality\n if cmap is not None:\n warnings._warn_proplot(\n 'Drawing \"parametric\" plots with ax.plot(x, y, values=values, cmap=cmap) '\n 'is deprecated and will be removed in the next major release. Please use '\n 'ax.parametric(x, y, values, cmap=cmap) instead.'\n )\n return self.parametric(*args, cmap=cmap, values=values, **kwargs)\n\n # Plot line(s)\n method = kwargs.pop('_method')\n name = method.__name__\n sx = 'y' if 'x' in name else 'x' # i.e. plotx\n objs = []\n args = list(args)\n while args:\n # Support e.g. x1, y1, fmt, x2, y2, fmt2 input\n # NOTE: Copied from _process_plot_var_args.__call__ to avoid relying\n # on public API. ProPlot already supports passing extra positional\n # arguments beyond x, y so can feed (x1, y1, fmt) through wrappers.\n # Instead represent (x2, y2, fmt, ...) as successive calls to plot().\n iargs, args = args[:2], args[2:]\n if args and isinstance(args[0], str):\n iargs.append(args[0])\n args = args[1:]\n\n # Call function\n iobjs = method(self, *iargs, values=values, **kwargs)\n\n # Add sticky edges\n # NOTE: Skip edges when error bars present or caps are flush against axes edge\n lines = all(isinstance(obj, mlines.Line2D) for obj in iobjs)\n if lines and not getattr(self, '_no_sticky_edges', False):\n for obj in iobjs:\n data = getattr(obj, 'get_' + sx + 'data')()\n if not data.size:\n continue\n convert = getattr(self, 'convert_' + sx + 'units')\n edges = getattr(obj.sticky_edges, sx)\n edges.append(convert(min(data)))\n edges.append(convert(max(data)))\n\n objs.extend(iobjs)\n\n return tuple(objs)",
"def plot(self, *args, **kwargs):\n pass",
"def show_learning_curve(self):\n\n # Loop output classes\n for c in range(1,self.n_output_classes):\n # Get data\n x_values = np.array(self.n_class_samples_list[c])\n accuracy = np.array(self.accuracy_list[c])\n precision = np.array(self.precision_list[c])\n recall = np.array(self.recall_list[c])\n F1 = np.array(self.F1_list[c])\n\n # Make plot\n with sns.axes_style(\"ticks\"):\n fig,ax = plt.subplots()\n plt.plot([np.min(x_values),np.max(x_values)],[0.5,0.5],\n color='#777777',linestyle='--')\n plt.plot([np.min(x_values),np.max(x_values)],[0.66,0.66],\n color='#777777',linestyle=':')\n plt.plot([np.min(x_values),np.max(x_values)],[0.8,0.8],\n color='#777777',linestyle=':')\n plt.plot([np.min(x_values),np.max(x_values)],[0.9,0.9],\n color='#777777',linestyle=':')\n\n plt.plot( x_values, accuracy, color='#000000',\n linewidth=1, label='Accuracy' )\n plt.plot( x_values, precision, color='#0000aa',\n linewidth=1, label='Precision' )\n plt.plot( x_values, recall, color='#00aa00',\n linewidth=1, label='Recall' )\n plt.plot( x_values, F1, color='#aa0000',\n linewidth=2, label='F1' )\n\n plt.yticks( [0, 0.5, 0.66, 0.8, 0.9, 1.0],\n ['0','0.5','0.66','0.8','0.9','1.0'], ha='right' )\n plt.xlim(np.max(x_values)*-0.02,np.max(x_values)*1.02)\n plt.ylim(-0.02,1.02)\n plt.xlabel('Number of training samples')\n plt.ylabel('Performance')\n plt.title('Learning curve, class {}'.format(c))\n sns.despine(ax=ax, offset=0, trim=True)\n lgnd = plt.legend(loc=4, ncol=1, frameon=True, fontsize=9)\n lgnd.get_frame().set_facecolor('#ffffff')\n ax.spines['left'].set_bounds(0,1)\n ax.spines['bottom'].set_bounds(np.min(x_values),np.max(x_values))",
"def multiple_simulated_annealing(scores):\n\n\tplt.plot(range(0, len(scores[0])), scores[0], label = \"geman\")\n\tplt.plot(range(0, len(scores[1])), scores[1], label = \"linear\")\n\tplt.plot(range(0, len(scores[2])), scores[2], label = \"sigmoidal\")\n\tplt.plot(range(0, len(scores[3])), scores[3], label = \"exponential\")\n\tplt.ylabel(\"Score\")\n\tplt.xlabel(\"Runs\")\n\tplt.title(\"Simulated annealing\")\n\tplt.legend()\n\tplt.show()",
"def plot_results(epochs: int = 20, segments: int = 5, plot: bool = True):\n \"\"\"\n plt.figure(0)\n plot_approximation(\"product\", modelSetProd, 1, epochs, gpus=0)\n \"\"\"\n\n data = [\n {\n \"title\": \"Piecewise Discontinuous Function Approximation\",\n \"layer\": \"discontinuous\",\n \"model_set\": modelSetD,\n },\n {\n \"title\": \"Piecewise Continuous Function Approximation\",\n \"layer\": \"continuous\",\n \"model_set\": modelSetC,\n },\n {\n \"title\": \"Polynomial function approximation\",\n \"layer\": \"polynomial\",\n \"model_set\": modelSetP,\n },\n {\n \"title\": \"Fourier function approximation\",\n \"layer\": \"fourier\",\n \"model_set\": modelSetF,\n },\n ]\n\n for index, element in enumerate(data):\n if plot is True:\n plt.figure(index)\n plot_approximation(\n element[\"layer\"],\n element[\"model_set\"],\n 5,\n epochs,\n accelerator=\"cpu\",\n periodicity=2,\n )\n\n if plot is True:\n plt.title(\"Piecewise Discontinuous Function Approximation\")\n\n if plot is True:\n plt.show()",
"def _plot(\n self, \n frame_idx: int, \n scores: List[float], \n losses: List[float],\n ):\n clear_output(True)\n plt.figure(figsize=(20, 5))\n plt.subplot(131)\n plt.title('frame %s. score: %s' % (frame_idx, np.mean(scores[-10:])))\n plt.plot(scores)\n plt.subplot(132)\n plt.title('loss')\n plt.plot(losses)\n plt.show()",
"def plot_results_matrix(algorithm, classes):\r\n print('Generating result matrix for ' + algorithm)\r\n\r\n blue_coords = extract_lists(classes, 'blue')\r\n red_coords = extract_lists(classes, 'red')\r\n\r\n plt.plot(blue_coords[0], blue_coords[1], 'bd')\r\n plt.plot(red_coords[0], red_coords[1], 'rd')\r\n\r\n plt.savefig(algorithm + '.png')\r\n print('Saving file ' + algorithm + '.png')",
"def plot(self, *args, **kwargs):\n raise NotImplementedError",
"def plot_files(plot_file_name, files):\n curve_names, metric_sets, set_of_number_of_embeddings = _read_result_pickle(files)\n\n _plot_curves(plot_file_name, curve_names, metric_sets, set_of_number_of_embeddings)",
"def plot_live(X, y, evaluator, param_name, param_range, scale='log', ylim=(0,1), ylabel='score'):\n # Plot interactively\n plt.ion()\n plt.ylabel(ylabel)\n plt.xlabel(param_name)\n \n # Make the scale look nice\n plt.xscale(scale)\n plt.xlim(param_range[0],param_range[-1])\n plt.ylim(ylim)\n \n # Start from empty plot, then fill it\n series = {}\n lines = {}\n xvals = []\n for i in param_range:\n scores = evaluator(X, y, i) \n if i == param_range[0]: # initialize series\n for k in scores.keys():\n lines[k], = plt.plot(xvals, [], marker = '.', label = k)\n series[k] = []\n xvals.append(i)\n for k in scores.keys(): # append new data\n series[k].append(scores[k])\n lines[k].set_data(xvals, series[k])\n # refresh plot\n plt.legend(loc='best')\n plt.margins(0.1)\n display.display(plt.gcf())\n display.clear_output(wait=True)",
"def graph_results(loss, acc):\n N = len(loss)\n x = np.linspace(0, N, N)\n plt.subplot(1,2,1)\n plt.plot(x, loss)\n plt.subplot(1,2,2)\n plt.plot(x,acc)\n plt.show()",
"def plot(self, axes=None, seqs=[\"best_feas_seq\"],\n extend_to=None, filename=None, dpi=300, **kwargs):\n if extend_to is None:\n extend_to = self.solver.cpu.total\n for seq in seqs:\n seq = getattr(self, seq)\n tseries = SolutionList.make_tseries(seq(), extend_to=extend_to)\n axes = tseries.plot(axes=axes, xlabel=\"CPU time (s)\", ylabel=\"Objective\",\n filename=filename, dpi=dpi, **kwargs)\n return axes"
] |
[
"0.612018",
"0.60104775",
"0.59162647",
"0.58902407",
"0.5849361",
"0.5841088",
"0.5839306",
"0.5805346",
"0.5803431",
"0.57910824",
"0.57711995",
"0.5769438",
"0.57638323",
"0.57449424",
"0.57376575",
"0.5734237",
"0.5729009",
"0.5682087",
"0.5679241",
"0.5673704",
"0.5671183",
"0.5666197",
"0.56294495",
"0.5613235",
"0.5610584",
"0.55823696",
"0.55775213",
"0.55732757",
"0.5568422",
"0.55644804"
] |
0.7017071
|
0
|
This function returns the elevation based on tif files located in the ./data/elevation/.tif folder using rasterio library.
|
def get_elevation_data(lat, lon):
logging.info("Getting elevation data for the coordinate ({}, {}).".format(lat, lon))
# Initialising function variables
grid_lat = None
grid_lon = None
coord = (lon, lat)
config_data = get_config()["gis"]
elev_file_name = config_data["input_file_name"]
logging.info("Determining the appropriate tif file for the coordinate ({}, {}).".format(lat, lon))
# Determine location's latitude data from the image
# grid. Valid values are 1 and 2.
for key, value in config_data["latitude_condition"].items():
if value["min_lat"] <= lat <= value["max_lat"]:
grid_lat = value["grid_lat"]
# Determine location's longitude data from the image
# grid. Valid values are A, B, C and D.
for key, value in config_data["longitude_condition"].items():
if value["min_lon"] <= lon <= value["max_lon"]:
grid_lon = value["grid_lon"]
# Determine that there is a valid grid_lat and grid_lon data.
if grid_lat is None or grid_lon is None:
logging.error("Invalid coordinate ({}, {}). Please check the value!".format(lat, lon))
raise ValueError
grid_id = "".join([grid_lon, grid_lat])
file_name = elev_file_name.format(grid_id=grid_id)
# Retrieve the elevation tif file path based on grid_id.
elev_file_path = get_file_path(folder_name="data"
,subdirectory=config_data["input_subdirectory"]
,file_name=file_name)
logging.info("Retrieving elevation data for the coordinate ({}, {}) is in {} file.".format(lat, lon, file_name))
# Retrieve the elevation data found in elev_file_path.
with rio.open(elev_file_path) as file:
elevs = file.sample((coord, coord))
elev = next(elevs)[0]
logging.info("Completed retrieving elevation data for the coordinate ({}, {}). Elevation value: {}.".format(lat, lon, elev))
return elev
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def elevation(x, y):\n file = os.path.abspath(\"..\") + \"\\Shape\\Shape.vrt\"\n layer = gdal.Open(file)\n gt = layer.GetGeoTransform()\n rasterx = int((x - gt[0]) / gt[1])\n rastery = int((y - gt[3]) / gt[5])\n print('elevation =', layer.GetRasterBand(1).ReadAsArray(rasterx, rastery, 1, 1)[0][0], 'm above sea level')",
"def get_raster_elevation(dataset, resample=None, **kwargs):\n extent = get_raster_extent(dataset)\n src_ds = wradlib.io.dem.get_srtm(extent, **kwargs)\n\n driver = gdal.GetDriverByName(\"MEM\")\n dst_ds = driver.CreateCopy(\"ds\", dataset)\n\n if resample is None:\n src_gt = src_ds.GetGeoTransform()\n dst_gt = dst_ds.GetGeoTransform()\n src_scale = min(abs(src_gt[1]), abs(src_gt[5]))\n dst_scale = min(abs(dst_gt[1]), abs(dst_gt[5]))\n ratio = dst_scale / src_scale\n\n resample = gdal.GRA_Bilinear\n if ratio > 2:\n resample = gdal.GRA_Average\n if ratio < 0.5:\n resample = gdal.GRA_NearestNeighbour\n\n gdal.ReprojectImage(\n src_ds, dst_ds, src_ds.GetProjection(), dst_ds.GetProjection(), resample\n )\n elevation = read_gdal_values(dst_ds)\n\n return elevation",
"def get_surface_elevation(wind_lat, wind_lon):\n # Load the NetCDF file containing the geopotential of Europe.\n nc = Dataset(path_join(era5_data_dir, geopotential_file_name))\n \n # Read the variables from the netCDF file.\n geopot_lat = nc.variables['latitude'][:]\n geopot_lon = nc.variables['longitude'][:]\n \n \n # Check if wind and geopotential data use same grid.\n assert np.array_equal(geopot_lat, wind_lat) and np.array_equal(geopot_lon, wind_lon), \\\n \"Requested latitudes and/or longitudes do not correspond to those in the NetCDF file.\"\n\n geopot_z = nc.variables['z'][0, :, :]\n nc.close()\n\n surface_elevation = geopot_z/9.81\n print(\"Minimum and maximum elevation found are respectively {:.1f}m and {:.1f}m, removing those below zero.\"\n .format(np.amin(surface_elevation), np.amax(surface_elevation)))\n\n # Get rid of negative elevation values.\n for i, row in enumerate(surface_elevation):\n for j, val in enumerate(row):\n if val < 0.:\n surface_elevation[i, j] = 0.\n\n return surface_elevation",
"def read_elevation(filepath):\n h = 83 #distance between elevation measures\n N = 1201\n theta = np.pi / 6\n elev_array = np.zeros((N, N))\n grad_array = np.zeros((N, N, 2))\n I_array = np.zeros((N, N))\n # Read the elevation data as described in Question 3, and store in the elvation array\n f = open(filepath, \"rb\")\n for i in range(N):\n for j in range(N):\n buf = f.read(2)\n val = struct.unpack(\">h\", buf)[0]\n elev_array[i][j] = val\n f.close()\n # Populate the gradient array\n for i in range(N):\n for j in range(N):\n #This if statements handle the border cases\n if j == 0:\n grad_array[i][j][0] = (elev_array[i][j+1] - elev_array[i][j]) / h\n elif j == N - 1:\n grad_array[i][j][0] = (elev_array[i][j] - elev_array[i][j-1]) / h\n else:\n grad_array[i][j][0] = (elev_array[i][j+1] - elev_array[i][j-1]) / (2 * h)\n \n if i == 0:\n grad_array[i][j][1] = (elev_array[i][j] - elev_array[i-1][j]) / h\n elif i == N - 1:\n grad_array[i][j][1] = (elev_array[i-1][j] - elev_array[i][j]) / h\n else:\n grad_array[i][j][1] = (elev_array[i-1][j] - elev_array[i+1][j]) / (2 * h)\n \n # Populate intensities\n for i in range(N):\n for j in range(N):\n denom = np.sqrt(grad_array[i][j][0] ** 2 + grad_array[i][j][1] ** 2 + 1)\n numer = np.cos(theta) * grad_array[i][j][0] + np.sin(theta) * grad_array[i][j][1]\n I_array[i][j] = -1 * numer / denom\n \n return elev_array, I_array",
"def imu_get_elevation(self):\n return self.imu.get_elevation()",
"def parse_azimuth_elevation(filename):\n match = REGEX.match(filename)\n return int(match.group(1)), int(match.group(2))",
"def elevation(source):\n srcAzEl = subarrayControl.s.azel(source, 0.0);\n return srcAzEl[1];",
"def elevation(self):\n return self.container['elevation']",
"def elevation(self):\n\n\t\twidth = self.no_name_level[0]\n\t\theight = self.no_name_level[1]\n\t\ttile = self.no_name_level[2]\n\t\tx = self.no_name_level[3]\n\t\ty = self.no_name_level[4]\n\t\t\n\t\ttiles = []\n\t\tfor i in tile:\n\t\t\ti = i[:-1]\n\t\t\ttiles.append(i)\t\n\t\ttiles_arranged = [tiles[i:i + width] for i in range(0, len(tile), width)]\n\t\n\t\tplanet_co = []\n\t\t\n\t\tfor i in tiles_arranged:\n\t\t\t\n\t\t\tplanet = []\n\t\t\tfor n in i:\n\t\t\t\tn = n.split(',')\n\t\t\t\tif len(n) != 3:\n\t\t\t\t\ta = ['-']\n\t\t\t\t\tn += a\n\t\t\t\t\t\n\t\t\t\t\tplanet.append(n)\n\t\t\t\telse:\n\t\t\t\t\tplanet.append(n)\n\t\t\t\t\t\n\t\t\tplanet_co.append(planet)\n\t\t\t\n\t\n\t\tplanet_map = Planet(planet_co, width, height)\n\t\tcoordinates = Planet(planet_co, width, height)\n\t\tcoordinates = Planet.coordinates(coordinates)\n\t\tplanet_map = Planet.coordinates_dict(planet_map)#this is my map in dictionary format(coordinates : tile)\n\t\t\n\t\tfor y1 in coordinates:\n\t\t\tif coordinates.index(y1) == y:\n\t\t\t\ty_value = coordinates.index(y1)\n\t\t\t\tfor x1 in y1:\n\t\t\t\t\tif x1 == [x, y]:\n\t\t\t\t\t\tx_value = y1.index(x1)\n\t\trover_d = coordinates[y_value][x_value]\n\t\n\t\tx1 = x_value + 1\n\t\tx2 = x_value + 2\n\t\ty1 = y_value + 1\n\t\ty2 = y_value + 2\n\t\n\t\tif x1 == len(coordinates[1]):\n\t\t\tx1 == 0\n\t\tif y1 == len(coordinates):\n\t\t\ty1 == 0\n\t\n\t\tif x2 > len(coordinates[1]):\n\t\t\tx2 = 1\n\t\tif y2 > len(coordinates[1]):\n\t\t\ty2 == 1\n\t\n\t\tfront2 = coordinates[y2][x_value]\n\t\tfront1 = coordinates[y1][x_value]\n\t\tback1 = coordinates[y_value-1][x_value]\n\t\tback2 = coordinates[y_value-2][x_value]\n\t\tright1 = coordinates[y_value][x1]\n\t\tright2 = coordinates[y_value][x2]\n\t\tleft1 = coordinates[y_value][x_value-1]\n\t\tleft2 = coordinates[y_value][x_value-2]\n\t\n\t\n\t\tfront1_right1 = coordinates[y1][x1]\n\t\tfront1_right2 = coordinates[y1][x2]\n\t\tfront2_right1 = coordinates[y2][x1]\n\t\tfront2_right2 = coordinates[y2][x2]\n\t\tfront1_left1 = coordinates[y1][x_value-1]\n\t\tfront1_left2 = coordinates[y1][x_value-2]\n\t\tfront2_left1 = coordinates[y2][x_value-1]\n\t\tfront2_left2 = coordinates[y2][x_value-2]\n\t\n\t\tback1_right1 = coordinates[y_value-1][x1]\n\t\tback1_right2 = coordinates[y_value-1][x2]\n\t\tback2_right1 = coordinates[y_value-2][x1]\n\t\tback2_right2 = coordinates[y_value-2][x2]\n\t\tback1_left1 = coordinates[y_value-1][x_value-1]\n\t\tback1_left2 = coordinates[y_value-1][x_value-2]\n\t\tback2_left1 = coordinates[y_value-2][x_value-1]\n\t\tback2_left2 = coordinates[y_value-2][x_value-2]\n\t\t\n\t\tco_f2r2 = planet_map[str(front2_right2)]\n\t\tco_f2r1 = planet_map[str(front2_right1)]\n\t\tco_f2 = planet_map[str(front2)]\n\t\tco_f2l1 = planet_map[str(front2_left1)]\n\t\tco_f2l2 = planet_map[str(front2_left2)]\n\t\tco_f1r2 = planet_map[str(front1_right2)]\n\t\tco_f1r1 = planet_map[str(front1_right1)]\n\t\tco_f1 = planet_map[str(front1)]\n\t\tco_f1l1 = planet_map[str(front1_left1)]\n\t\tco_f1l2 = planet_map[str(front1_left2)]\n\t\tco_r2 = planet_map[str(right2)]\n\t\tco_r1 = planet_map[str(right1)]\n\t\tco_rover = planet_map[str([x, y])]\n\t\tco_l1 = planet_map[str(left1)]\n\t\tco_l2 = planet_map[str(left2)]\n\t\tco_b1r2 = planet_map[str(back1_right2)]\n\t\tco_b1r1 = planet_map[str(back1_right1)]\n\t\tco_b1 = planet_map[str(back1)]\n\t\tco_b1l1 = planet_map[str(back1_left1)]\n\t\tco_b1l2 = planet_map[str(back1_left2)]\n\t\tco_b2r2 = planet_map[str(back2_right2)]\n\t\tco_b2r1 = planet_map[str(back2_right1)]\n\t\tco_b2 = planet_map[str(back2)]\n\t\tco_b2l1 = planet_map[str(back2_left1)]\n\t\tco_b2l2 = planet_map[str(back2_left2)]\n\t\n\t\tfirst_lineco = [co_f2l2, co_f2l1, co_f2, co_f2r1, co_f2r2]\n\t\tsecond_lineco = [co_f1l2, co_f1l1, co_f1, co_f1r1, co_f1r2]\n\t\tthird_lineco = [co_l2, co_l1, co_rover, co_r1, co_r2]\n\t\tfourth_lineco = [co_b1l2, co_b1l1, co_b1, co_b1r1, co_b1r2]\n\t\tfifth_lineco = [co_b2l2, co_b2l1, co_b2, co_b2r1, co_b2r2]\n\n\t\tfirst_line = ['|']\n\t\tsec_line = ['|']\n\t\tthird_line = ['|']\n\t\tfourth_line = ['|']\n\t\tfifth_line = ['|']\n\t\tfor i in first_lineco:\n\t\t\tif i[2] == '-' and co_rover[2] == '-':\n\t\t\t\tif int(i[1]) == int(co_rover[1]):\n\t\t\t\t\tfirst_line.append(' |')\n\t\t\t\telif int(i[1]) < int(co_rover[1]):\n\t\t\t\t\tfirst_line.append(\"-|\")\n\t\t\t\telse:\n\t\t\t\t\tfirst_line.append('+|')\n\t\t\tif i[2] == '-' and co_rover[2] != '-':\n\t\t\t\tif int(co_rover[2]) == int(i[1]):\n\t\t\t\t\tfirst_line.append(' |')\n\t\t\t\telif int(co_rover[2]) > int(i[1]):\n\t\t\t\t\tfirst_line.append(\"-|\")\n\t\t\t\telse:\n\t\t\t\t\tif int(i[1]) == int(co_rover[1]):\n\t\t\t\t\t\tfirst_line.append(' |')\n\t\t\t\t\t\n\t\t\t\t\telif int(i[1]) > int(co_rover[1]):\n\t\t\t\t\t\tfirst_line.append('+|')\n\t\t\tif i[2] != '-' and co_rover[2] == '-':\n\t\t\t\tif int(co_rover[1]) == int(i[2]):\n\t\t\t\t\tfirst_line.append('/|')\n\t\t\t\telif int(co_rover[1]) < int(i[2]):\n\t\t\t\t\tfirst_line.append(\"+|\")\n\t\t\t\telse:\n\t\t\t\t\tif int(i[1]) == int(co_rover[1]):\n\t\t\t\t\t\tfirst_line.append(\"\\|\")\n\t\t\t\t\t\n\t\t\t\t\telif int(i[1]) < int(co_rover[1]):\n\t\t\t\t\t\tfirst_line.append('-|')\n\t\t\tif i[2] != '-' and co_rover[2] != '-':\n\t\t\t\tif int(i[2]) == int(co_rover[2]):\n\t\t\t\t\tfirst_line.append(' |')\n\t\t\t\telif int(i[2]) < int(co_rover[2]):\n\t\t\t\t\tif int(co_rover[2]) == int(i[1]):\n\t\t\t\t\t\tfirst_line.append(\"'\\'|\")\n\t\t\t\t\telif int(co_rover[2]) > int(i[1]):\n\t\t\t\t\t\tfirst_line.append('-|')\n\t\t\t\telif int(i[2]) > int(co_rover[2]):\n\t\t\t\t\tif int(i[2]) == int(co_rover[1]):\n\t\t\t\t\t\tfirst_line.append(\"/|\")\n\t\t\t\t\telif int(i[2]) > int(co_rover[1]):\n\t\t\t\t\t\tfirst_line.append(\"+|\")\n\n\n\n\t\tfor i in second_lineco:\n\t\t\tif i[2] == '-' and co_rover[2] == '-':\n\t\t\t\tif int(i[1]) == int(co_rover[1]):\n\t\t\t\t\tsec_line.append(' |')\n\t\t\t\telif int(i[1]) < int(co_rover[1]):\n\t\t\t\t\tsec_line.append(\"-|\")\n\t\t\t\telse:\n\t\t\t\t\tsec_line.append('+|')\n\t\t\tif i[2] == '-' and co_rover[2] != '-':\n\t\t\t\tif int(co_rover[2]) == int(i[1]):\n\t\t\t\t\tsec_line.append(' |')\n\t\t\t\telif int(co_rover[2]) > int(i[1]):\n\t\t\t\t\tsec_line.append(\"-|\")\n\t\t\t\telse:\n\t\t\t\t\tif int(i[1]) == int(co_rover[1]):\n\t\t\t\t\t\tsec_line.append(' |')\n\t\t\t\t\t\n\t\t\t\t\telif int(i[1]) > int(co_rover[1]):\n\t\t\t\t\t\tsec_line.append('+|')\n\t\t\tif i[2] != '-' and co_rover[2] == '-':\n\t\t\t\tif int(co_rover[1]) == int(i[2]):\n\t\t\t\t\tsec_line.append('/|')\n\t\t\t\telif int(co_rover[1]) < int(i[2]):\n\t\t\t\t\tsec_line.append(\"+|\")\n\t\t\t\telse:\n\t\t\t\t\tif int(i[1]) == int(co_rover[1]):\n\t\t\t\t\t\tsec_line.append(\"'\\'|\")\n\t\t\t\t\t\n\t\t\t\t\telif int(i[1]) < int(co_rover[1]):\n\t\t\t\t\t\tsec_line.append('-|')\n\t\t\tif i[2] != '-' and co_rover[2] != '-':\n\t\t\t\tif int(i[2]) == int(co_rover[2]):\n\t\t\t\t\tsec_line.append(' |')\n\t\t\t\telif int(i[2]) < int(co_rover[2]):\n\t\t\t\t\tif int(co_rover[2]) == int(i[1]):\n\t\t\t\t\t\tsec_line.append(\"'\\'|\")\n\t\t\t\t\telif int(co_rover[2]) > int(i[1]):\n\t\t\t\t\t\tsec_line.append('-|')\n\t\t\t\telif int(i[2]) > int(co_rover[2]):\n\t\t\t\t\tif int(i[2]) == int(co_rover[1]):\n\t\t\t\t\t\tsec_line.append(\"/|\")\n\t\t\t\t\telif int(i[2]) > int(co_rover[1]):\n\t\t\t\t\t\tsec_line.append(\"+|\")\n\t\n\t\tfor i in third_lineco:\n\t\t\tif i[2] == '-' and co_rover[2] == '-':\n\t\t\t\tif int(i[1]) == int(co_rover[1]):\n\t\t\t\t\tthird_line.append(' |')\n\t\t\t\telif int(i[1]) < int(co_rover[1]):\n\t\t\t\t\tthird_line.append(\"-|\")\n\t\t\t\telse:\n\t\t\t\t\tthird_line.append('+|')\n\t\t\tif i[2] == '-' and co_rover[2] != '-':\n\t\t\t\tif int(co_rover[2]) == int(i[1]):\n\t\t\t\t\tthird_line.append(' |')\n\t\t\t\telif int(co_rover[2]) > int(i[1]):\n\t\t\t\t\tthird_line.append(\"-|\")\n\t\t\t\telse:\n\t\t\t\t\tif int(i[1]) == int(co_rover[1]):\n\t\t\t\t\t\tthird_line.append(' |')\n\t\t\t\t\t\n\t\t\t\t\telif int(i[1]) > int(co_rover[1]):\n\t\t\t\t\t\tthird_line.append('+|')\n\t\t\tif i[2] != '-' and co_rover[2] == '-':\n\t\t\t\tif int(co_rover[1]) == int(i[2]):\n\t\t\t\t\tthird_line.append('/|')\n\t\t\t\telif int(co_rover[1]) < int(i[2]):\n\t\t\t\t\tthird_line.append(\"+|\")\n\t\t\t\telse:\n\t\t\t\t\tif int(i[1]) == int(co_rover[1]):\n\t\t\t\t\t\tthird_line.append(\"'\\'|\")\n\t\t\t\t\t\n\t\t\t\t\telif int(i[1]) < int(co_rover[1]):\n\t\t\t\t\t\tthird_line.append('-|')\n\t\t\tif i[2] != '-' and co_rover[2] != '-':\n\t\t\t\tif int(i[2]) == int(co_rover[2]):\n\t\t\t\t\tthird_line.append(' |')\n\t\t\t\telif int(i[2]) < int(co_rover[2]):\n\t\t\t\t\tif int(co_rover[2]) == int(i[1]):\n\t\t\t\t\t\tthird_line.append(\"'\\'|\")\n\t\t\t\t\telif int(co_rover[2]) > int(i[1]):\n\t\t\t\t\t\tthird_line.append('-|')\n\t\t\t\telif int(i[2]) > int(co_rover[2]):\n\t\t\t\t\tif int(i[2]) == int(co_rover[1]):\n\t\t\t\t\t\tthird_line.append(\"/|\")\n\t\t\t\t\telif int(i[2]) > int(co_rover[1]):\n\t\t\t\t\t\tthird_line.append(\"+|\")\n\t\n\t\tfor i in fourth_lineco:\n\t\t\tif i[2] == '-' and co_rover[2] == '-':\n\t\t\t\tif int(i[1]) == int(co_rover[1]):\n\t\t\t\t\tfourth_line.append(' |')\n\t\t\t\telif int(i[1]) < int(co_rover[1]):\n\t\t\t\t\tfourth_line.append(\"-|\")\n\t\t\t\telse:\n\t\t\t\t\tfourth_line.append('+|')\n\t\t\tif i[2] == '-' and co_rover[2] != '-':\n\t\t\t\tif int(co_rover[2]) == int(i[1]):\n\t\t\t\t\tfourth_line.append(' |')\n\t\t\t\telif int(co_rover[2]) > int(i[1]):\n\t\t\t\t\tfourth_line.append(\"-|\")\n\t\t\t\telse:\n\t\t\t\t\tif int(i[1]) == int(co_rover[1]):\n\t\t\t\t\t\tfourth_line.append(' |')\n\t\t\t\t\t\n\t\t\t\t\telif int(i[1]) > int(co_rover[1]):\n\t\t\t\t\t\tfourth_line.append('+|')\n\t\t\tif i[2] != '-' and co_rover[2] == '-':\n\t\t\t\tif int(co_rover[1]) == int(i[2]):\n\t\t\t\t\tfourth_line.append('/|')\n\t\t\t\telif int(co_rover[1]) < int(i[2]):\n\t\t\t\t\tfourth_line.append(\"+|\")\n\t\t\t\telse:\n\t\t\t\t\tif int(i[1]) == int(co_rover[1]):\n\t\t\t\t\t\tfourth_line.append(\"'\\'|\")\n\t\t\t\t\t\n\t\t\t\t\telif int(i[1]) < int(co_rover[1]):\n\t\t\t\t\t\tfourth_line.append('-|')\n\t\t\tif i[2] != '-' and co_rover[2] != '-':\n\t\t\t\tif int(i[2]) == int(co_rover[2]):\n\t\t\t\t\tfourth_line.append(' |')\n\t\t\t\telif int(i[2]) < int(co_rover[2]):\n\t\t\t\t\tif int(co_rover[2]) == int(i[1]):\n\t\t\t\t\t\tfourth_line.append(\"'\\'|\")\n\t\t\t\t\telif int(co_rover[2]) > int(i[1]):\n\t\t\t\t\t\tfourth_line.append('-|')\n\t\t\t\telif int(i[2]) > int(co_rover[2]):\n\t\t\t\t\tif int(i[2]) == int(co_rover[1]):\n\t\t\t\t\t\tfourth_line.append(\"/|\")\n\t\t\t\t\telif int(i[2]) > int(co_rover[1]):\n\t\t\t\t\t\tfourth_line.append(\"+|\")\n\t\n\t\tfor i in fifth_lineco:\n\t\t\tif i[2] == '-' and co_rover[2] == '-':\n\t\t\t\tif int(i[1]) == int(co_rover[1]):\n\t\t\t\t\tfifth_line.append(' |')\n\t\t\t\telif int(i[1]) < int(co_rover[1]):\n\t\t\t\t\tfifth_line.append(\"-|\")\n\t\t\t\telse:\n\t\t\t\t\tfifth_line.append('+|')\n\t\t\tif i[2] == '-' and co_rover[2] != '-':\n\t\t\t\tif int(co_rover[2]) == int(i[1]):\n\t\t\t\t\tfifth_line.append(' |')\n\t\t\t\telif int(co_rover[2]) > int(i[1]):\n\t\t\t\t\tfifth_line.append(\"-|\")\n\t\t\t\telse:\n\t\t\t\t\tif int(i[1]) == int(co_rover[1]):\n\t\t\t\t\t\tfifth_line.append(' |')\n\t\t\t\t\t\n\t\t\t\t\telif int(i[1]) > int(co_rover[1]):\n\t\t\t\t\t\tfifth_line.append('+|')\n\t\t\tif i[2] != '-' and co_rover[2] == '-':\n\t\t\t\tif int(co_rover[1]) == int(i[2]):\n\t\t\t\t\tfifth_line.append('/|')\n\t\t\t\telif int(co_rover[1]) < int(i[2]):\n\t\t\t\t\tfifth_line.append(\"+|\")\n\t\t\t\telse:\n\t\t\t\t\tif int(i[1]) == int(co_rover[1]):\n\t\t\t\t\t\tfifth_line.append(\"'\\'|\")\n\t\t\t\t\t\n\t\t\t\t\telif int(i[1]) < int(co_rover[1]):\n\t\t\t\t\t\tfifth_line.append('-|')\n\t\t\tif i[2] != '-' and co_rover[2] != '-':\n\t\t\t\tif int(i[2]) == int(co_rover[2]):\n\t\t\t\t\tfifth_line.append(' |')\n\t\t\t\telif int(i[2]) < int(co_rover[2]):\n\t\t\t\t\tif int(co_rover[2]) == int(i[1]):\n\t\t\t\t\t\tfifth_line.append(\"'\\'|\")\n\t\t\t\t\telif int(co_rover[2]) > int(i[1]):\n\t\t\t\t\t\tfifth_line.append('-|')\n\t\t\t\telif int(i[2]) > int(co_rover[2]):\n\t\t\t\t\tif int(i[2]) == int(co_rover[1]):\n\t\t\t\t\t\tfifth_line.append(\"/|\")\n\t\t\t\t\telif int(i[2]) > int(co_rover[1]):\n\t\t\t\t\t\tfifth_line.append(\"+|\")\n\t\tthird_line2 = []\n\t\n\t\tfor n, i in enumerate(third_line):\n\t\t\tif n == 3:\n\t\t\t\ta = \"H|\"\n\t\t\t\t \n\t\t\t\tthird_line2.append(a)\n\t\t\telse:\n\t\t\t\tthird_line2.append(i)\n\t\tnumber1_line = \"\\n{}\\n{}\\n{}\\n{}\\n{}\\n\".format(\"\".join(fifth_line), \"\".join(fourth_line), \"\".join(third_line2),\"\".join(sec_line) , \"\".join(first_line))\n\t\t\n\t\treturn number1_line\n\n\n\n\n\t\tpass",
"def get_elevation(self):\n return self.elevation",
"def elevation(self, rover):\n\t\tcurrent_tile = rover.planet.tiles[rover.y][rover.x]\n\t\t#current_tile is slope\n\t\tif current_tile.is_slope():\n\t\t\t#self is slope current_tile is slope\n\t\t\tif self.is_slope():\n\t\t\t\tif current_tile.high_elevation == self.low_elevation:\n\t\t\t\t\treturn \"/\"\n\t\t\t\tif current_tile.low_elevation == self.high_elevation:\n\t\t\t\t\treturn \"\\\\\"\n\t\t\t\tif self.high_elevation < current_tile.low_elevation:\n\t\t\t\t\treturn \"-\"\n\t\t\t\tif self.low_elevation > current_tile.high_elevation:\n\t\t\t\t\treturn \"+\"\n\t\t\t\tif self.low_elevation == current_tile.low_elevation\\\n\t\t\t\t\tand self.high_elevation == current_tile.high_elevation:\n\t\t\t\t\treturn \" \"\n\t\t\t#self is flat current_tile is slope\n\t\t\telse:\n\t\t\t\tif self.low_elevation > current_tile.high_elevation:\n\t\t\t\t\treturn \"+\"\n\t\t\t\tif self.low_elevation < current_tile.low_elevation:\n\t\t\t\t\treturn \"-\"\n\t\t\t\treturn \" \"\n\n\n\t\telse: #current_tile is flat\n\t\t\t#self is slope current_tile is flat\n\t\t\tif self.is_slope():\n\t\t\t\tif self.low_elevation == current_tile.low_elevation:\n\t\t\t\t\treturn \"/\"\n\t\t\t\tif self.high_elevation == current_tile.low_elevation:\n\t\t\t\t\treturn \"\\\\\"\n\t\t\t\tif self.low_elevation > current_tile.low_elevation:\n\t\t\t\t\treturn \"+\"\n\t\t\t\tif self.high_elevation < current_tile.low_elevation:\n\t\t\t\t\treturn \"-\"\n\t\t\t#self is flat current_tile is flat\n\t\t\telse:\n\t\t\t\tif self.low_elevation > current_tile.low_elevation:\n\t\t\t\t\treturn \"+\"\n\t\t\t\tif self.high_elevation < current_tile.low_elevation:\n\t\t\t\t\treturn \"-\"\n\t\t\treturn \" \"",
"def elevation(latitude, longitude):\n elevation = maps.Elevation()\n request = {'locations': '%f,%f' % (latitude, longitude)}\n results, status = elevation.elevation(request)\n if results:\n # We are only interested in the actual elevation\n return results[0]['elevation']\n else:\n raise UnknownLocationError(_('The location could not be found by the elevation API.'))",
"def elevation(self) -> typing.Union[None, int]:\n elevation = self.data[5]\n elevation = re.findall(r'THR (\\d+) FT', elevation)\n return int(elevation[0]) if elevation else None",
"def view_elevation(self):\n if self.elevation_toggle:\n return rasterize(self.tri_mesh, aggregator=ds.mean('z'), precompute=True)\n else:\n return hv.Curve([])",
"def query_elevation(self, xy_pos=None):\r\n query_pos = xy_pos or self.vehicleNP.get_pos()\r\n \"\"\"\r\n This method is accurate and may be useful for placing \r\n objects on the terrain surface.\r\n \"\"\"\r\n result = self.world.ray_test_closest(\r\n LPoint3(query_pos.x, query_pos.y, -10000),\r\n LPoint3(query_pos.x, query_pos.y, 10000))\r\n if result.has_hit():\r\n hit_pos = result.get_hit_pos()\r\n if not xy_pos:\r\n print(\"Bullet heightfield elevation at \"\r\n \"X {:.2f} | Y {:.2f} is {:.3f}\".format(\r\n hit_pos.x, hit_pos.y, hit_pos.z))\r\n else:\r\n hit_pos = None\r\n if not xy_pos:\r\n print(\"Could not query elevation at {}\".format(xy_pos))\r\n \r\n \"\"\"\r\n This method is less accurate than the one above.\r\n Under heavy ray-testing stress (ray tests are performed for all vehicle\r\n wheels, the above elevation query etc.) Bullet sometimes seems to be a\r\n little unreliable.\r\n \"\"\"\r\n texspace_pos = self.terrain.get_relative_point(render, query_pos)\r\n stm_pos = self.terrain_node.uv_to_world(\r\n LTexCoord(texspace_pos.x, texspace_pos.y))\r\n if not xy_pos:\r\n print(\"ShaderTerrainMesh elevation at \"\r\n \"X {:.2f} | Y {:.2f} is {:.3f}\".format(\r\n stm_pos.x, stm_pos.y, stm_pos.z))\r\n \r\n return hit_pos or stm_pos",
"def get_temperature(elevation, sea_level):\n if elevation <= sea_level:\n return 0.8\n else:\n return (-1.0 / (1.0 - sea_level)) * (elevation - sea_level) + 1.0",
"def _elevation(self, node):\n return self.graph_provider.get_coords(node)['z']",
"def elevation_json(tiff_id, heightmap):\n tiff_base64 = base64.b64encode(heightmap).decode()\n uploader = \"[email protected]\"\n datapackage = [tiff_id, tiff_base64, uploader]\n return datapackage",
"def read_DEM(fn=None, fjord=None):\n # intake.open_rasterio accepts a list of input files and may effectively do what this function does!\n # try using cropped versions of the input files. Doesn't seem to make a difference r.e. crashing\n '''\n cropped_fn = fn.rpartition(\".tif\")[0] + \"_cropped.tif\"\n print(cropped_fn)\n if os._exists(cropped_fn):\n fn = cropped_fn\n elif fjord != None:\n bbox = fjord_props.get_fjord_bounds(fjord)\n ds = rioxarray.open_rasterio(fn)\n trimmed_ds = ds.rio.slice_xy(*bbox)\n trimmed_ds.rio.to_raster(fn.rpartition(\".tif\")[0] + \"_cropped.tif\")\n del ds\n del trimmed_ds\n fn = cropped_fn \n '''\n\n # try bringing in the rasters as virtual rasters (i.e. lazily loading)\n with rasterio.open(fn) as src:\n # print('Source CRS:' +str(src.crs))\n # print(src.is_tiled)\n # print(src.block_shapes)\n with WarpedVRT(src,src_crs=src.crs,crs=src.crs) as vrt:\n # warp_mem_limit=12000,warp_extras={'NUM_THREADS':2}) as vrt:\n # print('Destination CRS:' +str(vrt.crs))\n darr = xr.open_rasterio(vrt)\n # ds = rioxarray.open_rasterio(vrt).chunk({'x':1500,'y':1500,'band':1}).to_dataset(name='HLS_Red')\n\n\n # Rasterio automatically checks that the file exists\n # ultimately switch to using rioxarray, but it causes issues down the pipeline so it will need to be debugged through\n # with rioxarray.open_rasterio(fn) as src:\n # with xr.open_rasterio(fn) as darr:\n # darr = src\n\n # open_rasterio automatically brings the geotiff in as a DataArray with 'band' as a dimensional coordinate\n # we rename it and remove the band as a coordinate, since our DEM only has one dimension\n # squeeze removes dimensions of length 0 or 1, in this case our 'band'\n # Then, drop('band') actually removes the 'band' dimension from the Dataset\n darr = darr.rename('elevation').squeeze().drop_vars('band')\n # darr = darr.rename({'band':'dtime'})\n \n # if we wanted to instead convert it to a dataset\n # attr = darr.attrs\n # darr = darr.to_dataset(name='elevation').squeeze().drop('band')\n # darr.attrs = attr\n # attr=None\n # newest version of xarray (0.16) has promote_attrs=True kwarg. Earlier versions don't...\n # darr = darr.to_dataset(name='elevation', promote_attrs=True).squeeze().drop('band')\n\n # mask out the nodata values, since the nodatavals attribute is wrong\n darr = darr.where(darr != -9999.)\n\n # the gdalwarp geoid files have this extra attribute in the geoTiff, which when brought in\n # ultimately causes a \"__module__\" related error when trying to plot with hvplot\n try:\n del darr.attrs[\"units\"] \n except KeyError:\n pass\n\n if fjord != None:\n # USE RIOXARRAY - specifically, slicexy() which can be fed the bounding box\n # darr = darr.rio.slice_xy(fjord_props.get_fjord_bounds(fjord))\n bbox = fjord_props.get_fjord_bounds(fjord)\n if pd.Series(darr.y).is_monotonic_increasing:\n darr = darr.sel(x=slice(bbox[0], bbox[2]), y=slice(bbox[1], bbox[3]))\n else:\n darr = darr.sel(x=slice(bbox[0], bbox[2]), y=slice(bbox[3], bbox[1]))\n \n return darr",
"def preprocess_elevation(\n src_files, dst_dir, dst_crs, dst_bounds, dst_res, geom=None, overwrite=False\n):\n log.info(\"Starting preprocessing of elevation data.\")\n dst_dem = os.path.join(dst_dir, \"elevation.tif\")\n dst_slope = os.path.join(dst_dir, \"slope.tif\")\n dst_aspect = os.path.join(dst_dir, \"aspect.tif\")\n all_exists = all([os.path.isfile(f) for f in (dst_dem, dst_slope, dst_aspect)])\n if all_exists and not overwrite:\n log.info(\"All topograpy rasters already exists. Skipping processing.\")\n return\n\n with TemporaryDirectory(prefix=\"geohealthaccess_\") as tmpdir:\n\n # unzip all tiles in a temporary directory\n tmpdir = Path(tmpdir)\n for tile in src_files:\n unzip(tile, tmpdir)\n\n # merge tiles into a single mosaic if necessary\n tiles = [f.as_posix() for f in tmpdir.glob(\"*.hgt\")]\n if len(tiles) > 1:\n dem = merge_tiles(tiles, os.path.join(tmpdir, \"mosaic.tif\"), nodata=-32768)\n else:\n dem = tiles[0]\n\n # compute slope and aspect before reprojection\n if not os.path.isfile(dst_slope) or overwrite:\n slope = compute_slope(\n dem, os.path.join(tmpdir, \"slope.tif\"), percent=False, scale=111120\n )\n else:\n log.info(\"Slope raster already exists. Skipping processing.\")\n slope = dst_slope\n if not os.path.isfile(dst_aspect) or overwrite:\n aspect = compute_aspect(\n dem, os.path.join(tmpdir, \"aspect.tif\"), trigonometric=True\n )\n else:\n log.info(\"Aspect raster already exists. Skipping processing.\")\n aspect = dst_aspect\n\n for src, dst in zip((dem, slope, aspect), (dst_dem, dst_slope, dst_aspect)):\n\n if os.path.isfile(dst) and not overwrite:\n log.info(\n f\"{os.path.basename(dst)} already exists. Skipping processing.\"\n )\n continue\n\n # dtype is Int16 for elevation, and Float32 for slope & aspect\n nodata = -9999\n dtype = \"Float32\"\n if \"elevation\" in dst:\n nodata = -32768\n dtype = \"Int16\"\n\n dst = reproject(\n src_raster=src,\n dst_raster=dst,\n dst_crs=dst_crs,\n dst_bounds=dst_bounds,\n dst_res=dst_res,\n src_nodata=nodata,\n dst_nodata=nodata,\n dst_dtype=dtype,\n resampling_method=\"cubic\",\n overwrite=overwrite,\n )\n if geom:\n mask_raster(dst, geom)",
"def elevation(self):\n return self.altitude - self.heightAboveGround",
"def add_elevation_bands(img,\n dem_img):\n elevation = ee.Image(dem_img)\n slope = ee.Terrain.slope(elevation)\n aspect = ee.Terrain.aspect(elevation)\n topo = elevation.addBands(slope).addBands(aspect)\\\n .select([0, 1, 2], ['elevation', 'slope', 'aspect'])\n return ee.Image(img).addBands(topo)",
"def getCubeElevationEstimate(cubePath, workDir=''):\n\n DEFAULT_MOON_RADIUS = 1737400 # In meters\n\n # TODO: Get these values from the file!\n sample = 2500\n line = 25000\n\n # Make sure the input file exists\n if not os.path.exists(cubePath):\n raise Exception('Cube file ' + cubePath + ' not found!')\n\n # Default working directory is the cubePath folder\n outputFolder = workDir\n if workDir == '':\n outputFolder = os.path.dirname(cubePath)\n \n if not os.path.exists(outputFolder):\n os.mkdir(outputFolder)\n\n # Call ISIS campt function to compute the pixel location\n tempTextPath = os.path.join(outputFolder, 'camptOutput.txt')\n if os.path.exists(tempTextPath):\n os.remove(tempTextPath) # Make sure any existing file is removed!\n \n # Use subprocess to suppress the command output\n cmd = ['campt', 'from=', cubePath, 'to=', tempTextPath, 'sample=', str(sample), 'line=', str(line)]\n FNULL = open(os.devnull, 'w')\n subprocess.call(cmd, stdout=FNULL, stderr=subprocess.STDOUT)\n\n # Check that we created the temporary file\n if not os.path.exists(tempTextPath):\n raise Exception('campt failed to create temporary file ' + tempTextPath)\n \n # Read in the output file to extract the pixel coordinates\n foundLine = ''\n infoFile = open(tempTextPath, 'r')\n for line in infoFile:\n if (line.find('LocalRadius') >= 0):\n foundLine = line\n break\n\n os.remove(tempTextPath) # Remove the file to clean up\n\n # Make sure we found the desired lines\n if (foundLine == ''):\n raise Exception(\"Unable to find LocalRadius in file \" + tempTextPath)\n\n # ExtractfoundLine the desired coordinates\n eqPos = foundLine.find('=')\n endPos = foundLine.find('<')\n numString = foundLine[eqPos+2:endPos-2]\n\n # Convert the absolute radius into a height relative to the mean radius of the moon\n localRadius = float(numString) - DEFAULT_MOON_RADIUS\n print 'found local radius ' + str(localRadius)\n\n return localRadius",
"def set_elevation(tiff_file, api_key, turn=0,\n api_endpoint=(\"https://engine.tygron.com/api/session/\"\n \"event/editorgeotiff/add/?\")):\n with open(tiff_file, 'rb') as f:\n heightmap = f.read()\n # the \"True\" value in below's if statement should be \"start\"\n json = elevation_json(turn, heightmap)\n r = requests.post(url=api_endpoint+api_key, json=json)\n try:\n heightmap_id = r.json()\n except ValueError:\n print(\"UPLOAD FAILED: Received no heightmap id from Tygron.\")\n api_endpoint = (\"https://engine.tygron.com/api/session/event/\"\n \"editormap/set_height_geotiff/?\")\n r = requests.post(url=api_endpoint+api_key, json=[heightmap_id])\n return heightmap_id",
"def solar_elevation(self, dateandtime=None):\n\n if self.astral is None:\n self.astral = Astral()\n\n if dateandtime is None:\n dateandtime = datetime.datetime.now(tz=self.tz)\n\n return self.astral.solar_elevation(dateandtime, self.latitude, self.longitude)",
"def plot_satellite_elevation(\n self,\n figure_name: str=\"plot_satellite_elevation_{system}.{FIGURE_FORMAT}\",\n ) -> List[pathlib.PosixPath]:\n figure_paths = list()\n \n # Convert elevation from radian to degree\n elevation = np.rad2deg(self.dset.site_pos.elevation)\n \n # Limit x-axis range to rundate\n day_start, day_end = self._get_day_limits()\n \n # Generate x- and y-axis data per system\n for sys in sorted(self.dset.unique(\"system\")):\n x_arrays = []\n y_arrays = []\n labels = []\n \n figure_path = self.figure_dir / figure_name.replace(\"{system}\", sys).replace(\"{FIGURE_FORMAT}\", FIGURE_FORMAT)\n figure_paths.append(figure_path)\n \n for sat in sorted(self.dset.unique(\"satellite\")):\n if not sat.startswith(sys):\n continue\n idx = self.dset.filter(satellite=sat)\n x_arrays.append(self.dset.time.gps.datetime[idx])\n y_arrays.append(elevation[idx])\n labels.append(sat)\n \n # Plot with scatter plot\n plot(\n x_arrays=x_arrays,\n y_arrays=y_arrays,\n xlabel=\"Time [GPS]\",\n ylabel=\"Elevation [deg]\",\n y_unit=\"\",\n labels=labels,\n figure_path=figure_path,\n opt_args={\n \"colormap\": \"hsv\",\n \"figsize\": (7, 8),\n \"legend\": True,\n \"legend_ncol\": 6,\n \"legend_location\": \"bottom\",\n \"plot_to\": \"file\",\n \"plot_type\": \"scatter\",\n \"title\": f\"Satellite elevation for {enums.gnss_id_to_name[sys]}\",\n \"xlim\": [day_start, day_end],\n },\n )\n \n return figure_paths",
"def calibrateElevation(self,elevation):\n if len(self.values) == self.values.maxlen:\n self.elevcomp = self.value / ((1.0 - ((elevation + self.heightAboveGround) * 0.3048 / 44330.0)) ** 5.255)\n self.calibrated = True\n else:\n self.calibratedElevation = elevation",
"def elevation_from_depth(depth_origin, depth_cells):\n\n vv = False\n if isinstance(depth_cells, gxvv.GXvv):\n depth_cells = list(depth_cells.np)\n vv = True\n\n # elevation origin is the deepest cell\n elevation_origin = -locations_from_cells(depth_cells, depth_origin)[len(depth_cells) - 1]\n elevation_cells = list(reversed(depth_cells))\n if vv:\n return elevation_origin, gxvv.GXvv(elevation_cells)\n return elevation_origin, list(reversed(depth_cells))",
"def load_gldas_elevation_dataset(gldas_elevation_file): \n d1 = xr.open_dataset(gldas_elevation_file).load()\n return d1",
"def getSlantRangeElevation(self, groundRange, el):\r\n \r\n lat = self.ctrLat * pi / 180.0\r\n theta = el * pi / 180.0\r\n \r\n #figure out earth's radius at radar's lat ... non-spherical earth model\r\n e2 = self.eccen # First eccentricity squared - WGS-84 value = 0.00669437999013\r\n a = self.Requator # Equatorial radius - WGS-84 value = 6378137.0\r\n Rearth = a/sqrt(1-e2*(sin(lat))**2) # radius of curvature\r\n \r\n # Inverse of eq. 2.28b in Doviak and Zrnic 1993\r\n # Inverse of eq. 2.28c in Doviak and Zrnic 1993\r\n\r\n Rprime = self.effectiveRadiusMultiplier * self.Requator\r\n\r\n s = array(groundRange, dtype='float64')\r\n\r\n h = Rprime * ( math.cos(theta) / math.cos( theta + s / Rprime) - 1)\r\n\r\n r = (Rprime + h) * math.sin(s / Rprime) / math.cos(theta);\r\n\r\n # Use law of cosines (Side-Angle-Side triangle theorem) with \r\n # R', R'+h as sides and s/R' as the angle to get slant range\r\n #r = sqrt(Rprime**2.0 + (Rprime+h)**2.0 - 2*(Rprime+h)*Rprime*cos(s/Rprime))\r\n # Will return NaN for r=0\r\n #el = arccos((Rprime+h) * sin(s/Rprime) / r) \r\n #el *= 180.0 / pi\r\n \r\n return r,h"
] |
[
"0.71581376",
"0.7124947",
"0.64889693",
"0.640733",
"0.6338503",
"0.6217897",
"0.6069941",
"0.6061986",
"0.6008313",
"0.5963957",
"0.5874497",
"0.5836849",
"0.5830222",
"0.572488",
"0.570814",
"0.57011336",
"0.5688908",
"0.564884",
"0.56215036",
"0.5600793",
"0.55930495",
"0.5527328",
"0.54693264",
"0.5418363",
"0.53991026",
"0.5342033",
"0.5260453",
"0.52604485",
"0.52390337",
"0.5202644"
] |
0.7472302
|
0
|
This function retrieves the baseline historical weather data supplied in 'location key' of config.yaml. It uses Dark Sky API to retrieve historical weather data such as temperature, humidity and pressure.
|
def get_gis_historical_data():
logging.info("Generating baseline reference and historical weather data.")
# Initialising function variables
fake = Faker()
geolocator = Nominatim()
config_data = get_config()
locations = config_data["location"]
# Check if there are no duplicate locations in the config.yaml file.
if len(locations) != len(set(locations)):
logging.error("Duplicate location found. Please check config.yaml file.")
raise ValueError
# Initialise pandas dataframe column name for baseline reference
# and historical data.
df_ref = pd.DataFrame(columns=["Location", "Latitude"
,"Longitude", "Elevation"
,"Timezone"])
df_hist = pd.DataFrame(columns=["Location", "Date"
,"Month", "Temperature_Min"
,"Temperature_Max", "Humidity"
,"Pressure"])
# Generate weather data for each location.
for idx, loc in enumerate(locations):
logging.info("Retrieving geolocation data for {}.".format(loc))
# Retrieving geolocation data from geopy library.
loc_data = geolocator.geocode(loc)
logging.info("Check if the location {} is valid.".format(loc))
if loc_data is None:
logging.error("Invalid location value supplied ({}). Please check config.yaml file.".format(loc))
raise ValueError
logging.info("The location {} is valid.".format(loc))
city = get_city(loc)
lat = loc_data.latitude
lon = loc_data.longitude
# Retrieving elevation data for the location.
elev = get_elevation_data(lat, lon)
for month in range(1, 13):
logging.info("Retrieving {} weather data for month {}.".format(loc, month))
for sample in range(config_data["gis"]["sampling_number"]):
temp_min = None
temp_max = None
humidity = None
pressure = None
while temp_min is None or temp_max is None or humidity is None or pressure is None:
year = random.randint(config_data["gis"]["year_start"], config_data["gis"]["year_end"])
_, last_day = calendar.monthrange(year, month)
datetime_start = datetime.datetime(year, month, 1)
datetime_end = datetime.datetime(year, month, last_day)
date_gen = fake.date_time_between_dates(datetime_start=datetime_start
,datetime_end=datetime_end)
forecast = forecastio.load_forecast(config_data["forecastio_api_key"]
,lat
,lon
,time=date_gen
,units="si")
historical_data = forecast.json["daily"]["data"][0]
timezone = forecast.json.get("timezone", None)
temp_min = historical_data.get("temperatureMin", None)
temp_max = historical_data.get("temperatureMax", None)
humidity = historical_data.get("humidity", None) * 100
pressure = historical_data.get("pressure", None)
df_temp_hist = pd.Series(dict(zip(df_hist.columns
,[city, date_gen
,date_gen.month, temp_min
,temp_max, humidity
,pressure])))
df_hist = df_hist.append(df_temp_hist, ignore_index=True)
df_temp_ref = pd.Series(dict(zip(df_ref.columns
,[city, lat
,lon, elev
,timezone])))
df_ref = df_ref.append(df_temp_ref, ignore_index=True)
logging.info("Generating position to consolidate latitude, longitude and elevation data")
df_pos = df_ref[["Latitude", "Longitude", "Elevation"]].round(2)
df_pos["Elevation"] = df_pos["Elevation"].astype(int)
df_ref["Position"] = df_pos.astype(str).apply(lambda x: ",".join(x), axis=1)
logging.info("Saving baseline reference data.")
df_ref.to_csv(get_file_path(folder_name="data"
,subdirectory=config_data["gis"]["output_subdirectory"]
,file_name=config_data["gis"]["output_base_reference_file_name"])
,index=False)
logging.info("Completed saving baseline reference data.")
logging.info("Saving baseline historical data.")
df_hist.to_csv(get_file_path(folder_name="data"
,subdirectory=config_data["gis"]["output_subdirectory"]
,file_name=config_data["gis"]["output_base_historical_file_name"])
,index=False)
logging.info("Completed saving baseline historical data.")
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def get_weather_data(weather_station):\n now = datetime.datetime.now()\n then = now - datetime.timedelta(days=7)\n\n query_date_start = (\"%d%02d%02d\" % (then.year, then.month, then.day))\n query_date_end = (\"%d%02d%02d\" % (now.year, now.month, now.day))\n\n api_key = '/api/%s' % WUNDERGROUND_KEY\n history_key = '/history_%s%s/lang:EN/units:english/bestfct:1/v:2.0' % (query_date_start, query_date_end)\n query = '/q/%s.json?showObs=0&ttl=120' % weather_station\n\n weather_url = (\"%s%s%s%s\" % (WUNDERGROUND_HOST, api_key, history_key, query))\n\n logger.info('Weather URL: %s', weather_url)\n response = requests.get(weather_url).text\n\n max_temp_avg = json.loads(response)['history']['summary']['max_temperature_avg']\n sum_precip = json.loads(response)['history']['summary']['precip_sum']\n\n return max_temp_avg, sum_precip",
"def get_hourly(location_list):\n location, human_location = location_list\n query = location\n url = \"http://api.wunderground.com/auto/wui/geo/WXCurrentObXML/index.xml?query=%s\" % query\n f = urllib2.urlopen(url)\n xml = f.read()\n root = ET.XML(xml)\n \n current = {'location': location, 'human_location': human_location}\n current['observation_time'] = parser.parse(root.find('observation_time').text.replace('Last Updated on',''))\n current['temperature'] = root.find('temp_f').text\n current['humidity'] = root.find('relative_humidity').text.strip('%') #Remove %\n current['wind_speed'] = root.find('wind_mph').text\n current['wind_direction'] = root.find('wind_dir').text\n current['icon'] = root.find('icon').text\n current['conditions'] = root.find('weather').text\n try:\n f = Forecast(**current)\n f.save()\n except:\n logging.info(\"Hourly Forecast Data missing or no new data available\")",
"def getting_user_weather_1(location_key):\n\n API_Key = \"zIGuOeUd0aE4O621Gj1KGDc6JiZ3PAGb\"\n http_request = f\"http://dataservice.accuweather.com/forecasts/v1/daily/1day/{location_key}?apikey={API_Key}&language=pt-br&metric=true\"\n\n accu_request = requests.get(http_request)\n\n if accu_request.status_code != 200:\n print(\"It was not possible to stablish connection with the metherological server. Please, try again later!\")\n exit()\n\n else:\n accu_response = accu_request.json()\n\n return accu_response",
"def get_weather_data(lat='40.761440',lng='-73.981806'):\r\n key ='********************************'\r\n x = pd.DataFrame()\r\n unix_now = int((dt.datetime.now()- dt.datetime(1970,1,1)).total_seconds())\r\n for time in range(unix_now-86400, unix_now+604800, 86400):\r\n rsp = rq.get('https://api.darksky.net/forecast/{}/{},{},{}'.format(key, lat, lng, time))\r\n rsp_json = json.loads(rsp.text)\r\n row = json_normalize(rsp_json[\"daily\"]['data'])\r\n x = x.append(row)\r\n \r\n x = x[['icon','apparentTemperatureHigh','apparentTemperatureLow','cloudCover','humidity','precipProbability',\r\n 'pressure','visibility','windBearing','windGust','windSpeed']].reset_index(drop=True)\r\n return x",
"def pull_forecast(city, api_key):\n base_url = \"http://api.openweathermap.org/data/2.5/forecast?\"\n url = base_url + \"appid=\" + api_key + \"&q=\" + city\n r = requests.get(url)\n r.raise_for_status()\n data = r.json()\n return data",
"def get_weather_data() -> dict:\n # Creating the url for the api call\n api_key = \"96bba64ba34672da132c1a987ad2fee6\"\n lat = 49.24\n long = -123.15\n config = '&units=metric'\n url = f'https://api.openweathermap.org/data/2.5/onecall?lat={lat}&lon={long}&appid={api_key}{config}'\n\n # Querying and JSON parsing\n api_return = requests.get(url)\n weather_data = api_return.json()\n return weather_data",
"def weather():\r\n def weather_api_call():\r\n with open('config.json', 'r') as conf:\r\n conf = json.load(conf)\r\n # Gets the API key from the config.json file\r\n weather_api_key = conf[\"weather_api_key\"]\r\n weather_city_name = conf['weather_city_name']\r\n response = requests.get(\r\n 'http://api.openweathermap.org/data/2.5/weather?'\r\n 'q=' + weather_city_name + '&units=metric&appid=' + weather_api_key)\r\n resp_json = response.json()\r\n with open('weather.json', 'w') as outfile:\r\n # Uses the data from the API to overwrite the weather data\r\n json.dump(resp_json, outfile)\r\n outfile.close()\r\n\r\n def weather_data_extractor():\r\n with open('weather.json', 'r') as weather_json:\r\n weather_json = json.load(weather_json)\r\n temp = weather_json[\"main\"]\r\n weather_item = weather_json[\"weather\"]\r\n desc = weather_item[0]\r\n current_temperature = \"The current temperature is: \" + \\\r\n str(int(temp[\"temp\"])) + \"C\"\r\n current_feels_like = \"Feels like: \" + \\\r\n str(int(temp[\"feels_like\"])) + \"C\"\r\n forecast = desc[\"main\"]\r\n return current_feels_like, current_temperature, forecast\r\n\r\n weather_api_call()\r\n return weather_data_extractor()",
"def get_current(location_list):\n import re\n import feedparser\n location, human_location = location_list\n city, state = human_location.split(',')\n url = \"http://rss.wunderground.com/auto/rss_full/%s/%s.xml\" % (state.strip(), city.strip())\n feed = feedparser.parse(url)\n s = feed.entries[0].summary\n current = {'location': location, 'human_location': human_location}\n \n current['observation_time'] = parser.parse(feed.entries[0].updated)\n temperature = re.compile('Temperature: ([\\d\\.]+)')\n current['temperature'] = temperature.search(s).group(1)\n humidity = re.compile('Humidity: (\\d+)')\n current['humidity'] = humidity.search(s).group(1)\n conditions = re.compile('Conditions: ([\\w\\s]+)')\n current['conditions'] = conditions.search(s).group(1)\n windspeed = re.compile('Wind Speed: ([\\d\\.]+)')\n current['wind_speed'] = windspeed.search(s).group(1)\n winddirection = re.compile('Wind Direction: (\\w+)')\n current['wind_direction'] = winddirection.search(s).group(1)\n try:\n f = Forecast(**current)\n f.save()\n except:\n logging.info(\"Current Forecast Data missing or no new data available\")",
"def get_weather(lat, lon):\r\n\r\n # API key, retrieved from configure.py\r\n api_key = configure.WEATHER_KEY\r\n\r\n # API endpoint\r\n url = f'https://api.openweathermap.org/data/2.5/onecall?lat={lat}&lon={lon}&appid={api_key}'\r\n\r\n # API call\r\n response = requests.get(url)\r\n\r\n # Collect response in json format\r\n weather = response.json()\r\n\r\n # Interpret Current Weather\r\n current_weather = weather['current']\r\n\r\n # By default, the API returns all requested times in unix format\r\n current_weather['dt'] = epoch_to_human_readable_date(current_weather['dt'])\r\n current_weather['sunrise'] = epoch_to_human_readable_date(current_weather['sunrise'])\r\n current_weather['sunset'] = epoch_to_human_readable_date(current_weather['sunset'])\r\n\r\n # By default, the API returns all temperature values in Kelvin\r\n current_weather['dew_point'] = {'kelvin': current_weather['dew_point'], \r\n 'fahrenheit': round(kelvin_to_fahrenheit(current_weather['dew_point']), 2),\r\n 'celsius': round(kelvin_to_celsius(current_weather['dew_point']), 2)}\r\n\r\n current_weather['feels_like'] = {'kelvin': current_weather['feels_like'], \r\n 'fahrenheit': round(kelvin_to_fahrenheit(current_weather['feels_like']), 2),\r\n 'celsius': round(kelvin_to_celsius(current_weather['feels_like']), 2)}\r\n\r\n current_weather['temp'] = {'kelvin': current_weather['temp'], \r\n 'fahrenheit': round(kelvin_to_fahrenheit(current_weather['temp']), 2),\r\n 'celsius': round(kelvin_to_celsius(current_weather['temp']), 2)}\r\n\r\n # Change icon value to image url to be used in html img tag as src\r\n current_weather['weather'][0]['icon'] = 'http://openweathermap.org/img/wn/' + current_weather['weather'][0]['icon'] + '@2x.png'\r\n\r\n # Interpret Daily Weather\r\n daily_forcast = weather['daily']\r\n\r\n for day in daily_forcast:\r\n # Get readable dates and times\r\n day['dt'] = epoch_to_human_readable_date(day['dt'])\r\n day['sunrise'] = epoch_to_human_readable_date(day['sunrise'])\r\n day['sunset'] = epoch_to_human_readable_date(day['sunset'])\r\n\r\n # Change icon value to image url to be used in html img tag as src\r\n day['weather'][0]['icon'] = 'http://openweathermap.org/img/wn/' + day['weather'][0]['icon'] + '@2x.png'\r\n\r\n\r\n # Convert temperatures in 'feels_like' dictionary from Kelvin to Fahrenheit and Celsius\r\n\r\n for temp in day['feels_like']:\r\n day['feels_like'][temp] = {'kelvin': day['feels_like'][temp], \r\n 'fahrenheit': round(kelvin_to_fahrenheit(day['feels_like'][temp]), 2),\r\n 'celsius': round(kelvin_to_celsius(day['feels_like'][temp]), 2)}\r\n\r\n\r\n # Convert temperatures in 'temp' dictionary from Kelvin to Fahrenheit\r\n\r\n for temp in day['temp']:\r\n day['temp'][temp] = {'kelvin': day['temp'][temp], \r\n 'fahrenheit': round(kelvin_to_fahrenheit(day['temp'][temp]), 2),\r\n 'celsius': round(kelvin_to_celsius(day['temp'][temp]), 2)}\r\n\r\n # Interpret Hourly Weather\r\n hourly_weather = weather['hourly']\r\n\r\n # Only manipulating data for hours of the current date, rest will be ommitted\r\n\r\n curr_date = epoch_to_human_readable_date(hourly_weather[0]['dt']).split(\",\", 1)[1][:3]\r\n\r\n last_hour = 0\r\n\r\n for index, hour in enumerate(hourly_weather):\r\n # Get date in relation to the hour\r\n date = epoch_to_human_readable_date(hour['dt']).split(\",\", 1)[1][:3]\r\n if date != curr_date:\r\n last_hour = index\r\n break\r\n \r\n # Convert temperatures in 'dew_point' dictionary from Kelvin to Fahrenheit and Celsius\r\n hour['dew_point'] = {'Kelvin':hour['dew_point'],\r\n 'fahrenheit': round(kelvin_to_fahrenheit(hour['dew_point']), 2),\r\n 'celsius': round(kelvin_to_celsius(hour['dew_point']), 2)}\r\n\r\n # Get readable dates and times\r\n hour['dt'] = epoch_to_human_readable_date(hour['dt'])\r\n\r\n # Convert temperatures in 'feels_like' dictionary from Kelvin to Fahrenheit and Celsius\r\n hour['feels_like'] = {'kelvin': hour['feels_like'], \r\n 'fahrenheit': round(kelvin_to_fahrenheit(hour['feels_like']), 2),\r\n 'celsius': round(kelvin_to_celsius(hour['feels_like']), 2)}\r\n\r\n # Convert temperatures in 'temp' dictionary from Kelvin to Fahrenheit\r\n hour['temp'] = {'kelvin': hour['temp'], \r\n 'fahrenheit': round(kelvin_to_fahrenheit(hour['temp']), 2),\r\n 'celsius': round(kelvin_to_celsius(hour['temp']), 2)}\r\n\r\n hour['weather'][0]['icon'] = 'http://openweathermap.org/img/wn/' + hour['weather'][0]['icon'] + '@2x.png'\r\n\r\n\r\n return current_weather, daily_forcast, hourly_weather[:last_hour]",
"def get_weather_forecast(address, update=''):\n \n def method_url():\n url_base = visit_forecast_home(address)\n return url_base\n \n def method_dict():\n basedate = datetime.now()\n basemonth = basedate.strftime(\"%B\").lower()\n \n url_stem = weather_profile(address, \"weather_forecast_url\")\n url_stem = str(url_stem).replace(\"weather-forecast\", str(basemonth) + \"-weather\")\n url_base = url_stem\n \n today = date.today()\n date_last1 = somonth(today.year, today.month - 2)\n date_last = somonth(today.year, today.month - 1)\n date_current = somonth(today.year, today.month)\n date_next = somonth(today.year, today.month+1)\n\n url_last1 = str(str(url_base) +\"?monyr=\"+str(date_last1.month)+\"/1/\"+str(date_last1.year)+\"&view=table\")\n url_last = str(str(url_base) +\"?monyr=\"+str(date_last.month)+\"/1/\"+str(date_last.year)+\"&view=table\")\n url_current = str(str(url_base)+\"?monyr=\"+str(date_current.month)+\"/1/\"+str(date_current.year)+\"&view=table\")\n url_next = str(str(url_base) +\"?monyr=\"+str(date_next.month)+\"/1/\"+str(date_next.year)+\"&view=table\")\n \n url_list = [url_current, url_last, url_next, url_last1] #will give priority to first in this list\n #print(url_list)\n combined_dict = {}\n \n for url in url_list:\n forecast_dict = download_forecast(url)\n print(str(url) + \" downloaded\")\n combined_dict = join_dicts(combined_dict, forecast_dict)\n weather_profile(address, \"weather_forecast_dict\", combined_dict, update)\n print(str(url) + \" added\")\n print(combined_dict)\n \n return combined_dict\n \n title = \"weather_forecast_url\"\n if weather_profile(address, title) == None:\n print(\"/t-Adding/refreshing data...\")\n data = method_url()\n print(data)\n weather_profile(address, title, data, update)\n else:\n print(\"There is existing data for: \" + str(title))\n\n\n title = \"weather_forecast_dict\"\n if weather_profile(address, title) == None or update != '':\n print(\"/t-Adding/refreshing data...\")\n data = method_dict()\n print(data)\n weather_profile(address, title, data, update)\n return weather_profile(address, title)\n else:\n return weather_profile(address, title)",
"async def historic(self) -> dict:\n return await self._request(\n \"get\", \"https://www.asthmaforecast.com/api/forecast/historic/asthma\"\n )",
"def LoadingData(self, ticker, FullHistory=False):\r\n if FullHistory == False:\r\n url = \"https://www.alphavantage.co/query?function=TIME_SERIES_DAILY_ADJUSTED&symbol={}&apikey={}\"\r\n else:\r\n url = \"https://www.alphavantage.co/query?function=TIME_SERIES_DAILY_ADJUSTED&symbol={}&outputsize=full&apikey={}\"\r\n\r\n try:\r\n response = requests.get(url.format(ticker, self.key))\r\n response.raise_for_status()\r\n except requests.exceptions.RequestException as e:\r\n raise SystemExit(e)\r\n\r\n # The API returns 200 status even after you have a typo\r\n try:\r\n outputjson = response.json()['Time Series (Daily)']\r\n except:\r\n print(\"Please check ticker for typos or mismatches\")\r\n outputjson = None\r\n\r\n return outputjson, ticker",
"def getData(tme=currentTime):\n # attempts request 10 times\n for attempt in range(10):\n try:\n # make a request to the url and return it in json format\n url = \"https://api.darksky.net/forecast/%s/%s,%s,%s?exclude=minutely,hourly,daily,alerts,flags\" % (API_KEY, LAT, LNG, tme)\n return get(url).json()\n except:\n # Wait .05 seconds and try again\n sleep(.05)\n pass",
"async def current(self) -> dict:\n return await self._request(\n \"get\", \"https://www.asthmaforecast.com/api/forecast/current/asthma\"\n )",
"def get_weather(self):\n with urllib.request.urlopen(self.url) as response:\n json_data = response.read().decode('utf-8')\n\n data = json.loads(json_data)\n\n weather = {}\n weather['current'] = {\n 'temp': round(data['current']['temp_f']),\n 'humidity': round(data['current']['humidity']),\n 'summary': data['current']['condition']['text']\n }\n today = data['forecast']['forecastday'][0]['day']\n weather['today'] = {\n 'temp': round(today['maxtemp_f']),\n 'summary': today['condition']['text']\n }\n \n return weather",
"def getting_user_weather_5days(location_key):\n\n API_Key = \"zIGuOeUd0aE4O621Gj1KGDc6JiZ3PAGb\"\n http_request = f\"http://dataservice.accuweather.com/forecasts/v1/daily/5day/{location_key}?apikey={API_Key}&language=pt-br&metric=true\"\n\n accu_request_5 = requests.get(http_request)\n\n if accu_request_5.status_code != 200:\n print(\"It was not possible to stablish connection with the metherological server. Please, try again later!\")\n exit()\n\n else:\n accu_response_5 = accu_request_5.json()\n\n return accu_response_5",
"def GetWeatherByLocation():\n Location = GetLocation()\n WeatherUrl =\"http://api.openweathermap.org/data/2.5/weather?\"+ Location +\"&appid=b4bacbe2dc824431289800439f1ec3df&units=metric\"\n WeatherRequest = requests.get(WeatherUrl)\n WeatherInfo = WeatherRequest.json()\n pprint(WeatherInfo)\n WindSpeed = WeatherInfo['wind']['speed']\n pprint(WindSpeed)\n Temp = WeatherInfo['main']['temp']\n Humidity = WeatherInfo['main']['humidity']\n Description = WeatherInfo['weather'][0]['description']\n print(type(Humidity))\n return(Temp, Humidity, Description)",
"def weather_fetch(city, weather_key):\n #Allows for customizable API key and weather location.\n base_url = \"http://api.openweathermap.org/data/2.5/weather?q=\"\n city = str(city)\n key = str(\"&appid=\" + weather_key + \"&units=metric\")\n complete_url = base_url + city + key\n #Gets API with requests and convert to .json\n weather_api = requests.get(complete_url)\n weather_json = weather_api.json()\n return weather_json",
"def fetch_weather():\n\n # Fetch the current weather.\n response = requests.get(f'https://api.openweathermap.org/data/2.5/weather?q=Manchester,UK&units=metric&APPID={WEATHER_API_KEY}')\n\n # Return the data.\n return response.json()",
"def __weather_api_call(\n self, time: datetime, location: tuple, index: int,\n ) -> Weather:\n URL = (\n 'https://weather.visualcrossing.com/VisualCrossingWebServices'\n + '/rest/services/weatherdata/history?'\n )\n time_start = time.strftime('%Y-%m-%dT%H:%M:%S')\n # time_end = (time + timedelta(hours=1, seconds=0)\n # ).strftime('%Y-%m-%dT%H:%M:%S')\n location0_str = f'{location[0]:.5f}'\n location1_str = f'{location[1]:.5f}'\n\n PARAMS = {\n 'aggregateHours': 1,\n 'combinationMethod': 'aggregate',\n 'startDateTime': time_start,\n 'endDateTime': time_start,\n 'maxStations': -1,\n 'maxDistance': -1,\n 'contentType': 'json',\n 'unitGroup': self.unit_group,\n 'locationMode': 'single',\n 'key': self.vc_api_key,\n 'dataElements': 'all',\n 'locations': f'{location0_str}, {location1_str}',\n }\n # sending get request and saving the response as response object\n r = requests.get(url=URL, params=PARAMS)\n # extracting data in json format\n response_data = r.json()\n data_values = response_data['location']['values'][0]\n return Weather(\n temperature=data_values['temp'],\n maximum_temperature=data_values['maxt'],\n minimum_temperature=data_values['mint'],\n wind_chill=data_values['windchill'],\n heat_index=data_values['heatindex'],\n precipitation=data_values['precip'],\n snow_depth=data_values['snowdepth'],\n wind_speed=data_values['wspd'],\n wind_direction=data_values['wdir'],\n sea_level_pressure=data_values['sealevelpressure'],\n visibility=data_values['visibility'],\n cloud_cover=data_values['cloudcover'],\n dew_point=data_values['dew'],\n solar_radiation=data_values['solarradiation'],\n relative_humidity=data_values['humidity'],\n weather_type=data_values['weathertype'],\n conditions=data_values['conditions'],\n date=time,\n location=location,\n index=index,\n )",
"def get_weather_forecast(self, base_url):\n url = self._parsing_url(base_url)\n api_response = requests.get(url)\n if not api_response.ok:\n logging.error(\n \"\"\"Error occured while trying to get response from AccuWeather\n API\"\"\")\n weather_forcast = api_response.json()\n return weather_forcast",
"def _get_dict_weather_data(self, weather_current):\n\n returned_dict = dict()\n returned_dict[\"weather_status\"] = weather_current.get_detailed_status()\n\n time_format = '%H:%M'\n if self.am_pm_time:\n time_format = '%I:%M %p'\n\n returned_dict[\"sunset\"] = datetime.fromtimestamp(weather_current.get_sunset_time()).strftime(time_format)\n returned_dict[\"sunrise\"] = datetime.fromtimestamp(weather_current.get_sunrise_time()).strftime(time_format)\n\n returned_dict[\"temperature\"] = int(round(weather_current.get_temperature(unit=self.temp_unit)[\"temp\"]))\n returned_dict[\"temperature_min\"] = int(round(weather_current.get_temperature(unit=self.temp_unit)[\"temp_min\"]))\n returned_dict[\"temperature_max\"] = int(round(weather_current.get_temperature(unit=self.temp_unit)[\"temp_max\"]))\n\n returned_dict[\"pressure\"] = weather_current.get_pressure()[\"press\"]\n returned_dict[\"sea_level_pressure\"] = weather_current.get_pressure()[\"sea_level\"]\n\n returned_dict[\"humidity\"] = weather_current.get_humidity()\n\n wind = weather_current.get_wind()\n wind_deg = wind.get(\"deg\", None)\n wind_speed = wind.get(\"speed\", None)\n returned_dict[\"wind_deg\"] = wind_deg\n returned_dict[\"wind_speed\"] = wind_speed\n\n snow_current = weather_current.get_snow()\n snow_current = snow_current.get('all', None)\n rain_current = weather_current.get_rain()\n rain_current = rain_current.get('all', None)\n returned_dict[\"rainfall\"] = rain_current\n returned_dict[\"snow\"] = snow_current\n\n returned_dict[\"clouds_coverage\"] = weather_current.get_clouds()\n\n return returned_dict",
"def get_weather(self, time=None, location=None):\n req = requests.get(self.source_url)\n text = req.text\n moment = self.extract_datetime(text)\n met_data = self.parse_hms_data(text)\n met_data['time'] = moment\n met_data['text'] = text\n return self.source_label, met_data",
"def ping_darksky(time, key):\n boston = forecast(key, *BOSTON, time=time.isoformat())\n\n fetch = {\n 'day': time,\n 'tempMin': boston[\"daily\"][\"data\"][0].get('temperatureMin', np.nan),\n 'tempMax': boston[\"daily\"][\"data\"][0].get('temperatureMax', np.nan),\n 'summary': boston[\"daily\"][\"data\"][0].get('summary', np.nan),\n 'desc': boston[\"daily\"][\"data\"][0].get('icon', np.nan),\n 'cloudCover': boston[\"daily\"][\"data\"][0].get('cloudCover', np.nan)}\n return fetch",
"def fetch_production(zone_key='US-NY', session=None, target_datetime=None, logger=None):\n if target_datetime:\n # ensure we have an arrow object\n target_datetime = arrow.get(target_datetime)\n else:\n target_datetime = arrow.now('America/New_York')\n\n if (arrow.now() - target_datetime).days > 9:\n raise NotImplementedError('you can get data older than 9 days at the '\n 'url http://mis.nyiso.com/public/')\n\n ny_date = target_datetime.format('YYYYMMDD')\n mix_url = 'http://mis.nyiso.com/public/csv/rtfuelmix/{}rtfuelmix.csv'.format(ny_date)\n try:\n raw_data = read_csv_data(mix_url)\n except HTTPError:\n # this can happen when target_datetime has no data available\n return None\n\n clean_data = data_parser(raw_data)\n\n production_mix = []\n for datapoint in clean_data:\n data = {\n 'zoneKey': zone_key,\n 'datetime': timestamp_converter(datapoint[0]),\n 'production': datapoint[1],\n 'storage': {},\n 'source': 'nyiso.com'\n }\n\n production_mix.append(data)\n\n return production_mix",
"def main(config, model, stid, forecast_date):\n # Get the API key from the config\n try:\n api_key = config['Models'][model]['api_key']\n except KeyError:\n raise KeyError('wunderground.py: no api_key parameter defined for model %s in config!' % model)\n\n # Get forecast\n forecast = get_twc_forecast(stid, api_key, forecast_date)\n\n return forecast",
"def read_weather_data():\n # Check if UTC to gmt+1 conversion is being handled correctly\n weather = pd.read_csv('//datc//opschaler//weather_data//knmi_10_min_raw_data//output//df_combined_uncleaned.csv',\n delimiter='\\t', comment='#',\n parse_dates=['datetime'])\n weather = weather.set_index(['datetime'])\n return weather",
"def get_humidity_data(zone):\n\n zone = zone[1:len(zone)-1]\n humidity_response = {}\n conn = sqlite3.connect(os.path.abspath('database.db'))\n\n # get humidity data\n query = \"Select humidity_date, humidity_relative From humidity Left join fire_danger_zone on humidity.humidity_station=fire_danger_zone.fdz_station Where fire_danger_zone.fdz_station == '\" + zone + \"' and humidity.humidity_date >= date('2010-01-01') Order by humidity.humidity_date;\"\n dataframe = pd.read_sql_query(query, conn) \n humidity = dataframe['humidity_relative'].values.tolist()\n\n # get dates\n dates = dataframe['humidity_date'].values.tolist()\n \n # add data in dictionary \n data_name = 'humidity_'+zone\n humidity_response[data_name] = humidity\n humidity_response['labels'] = dates\n \n # return data\n response = jsonify(humidity_response)\n response.headers.add('Access-Control-Allow-Origin', '*')\n \n # close database connection\n conn.close()\n return response",
"def fetch_weather(y):\r\n # request parameter(s): Start with '?'\r\n # separate name and value with '='\r\n # multiple parameter name value pairs are separate with '&'\r\n query_string = \"?id={}&units=imperial&APIKEY={}\".format(y, API_KEY)\r\n request_url = WS_URL + query_string\r\n print(\"Request URL: \", request_url)\r\n response = requests.get(request_url)\r\n if response.status_code == 200:\r\n city_name = response.json()[\"city\"][\"name\"]\r\n lst = response.json()[\"list\"]\r\n tmp_list = []\r\n for i in range(len(lst) // 8):\r\n li = [x for x in range(len(lst)) if x // 8 == i]\r\n tmp_list.append(max([lst[j][\"main\"][\"temp_max\"] for j in li]))\r\n return City(city_name, tmp_list)\r\n else:\r\n print(\"How should I know?\")\r\n return None",
"def update_weather(location_request, db):\n with open(expanduser(\"~/bin/my_utilities/config/darksky-key\")) as f:\n ds_key = f.readline().strip()\n current = []\n current_day = 0\n with forecast(ds_key, *location_request, units=\"uk2\") as location:\n raw = location['hourly']['data'][0]\n current.append(datetime.datetime.now().hour)\n current.append(day_relative_to_absolute(current_day))\n current.append(raw[\"temperature\"])\n current.append(raw[\"apparentTemperature\"])\n current.append(raw[\"precipIntensity\"])\n current.append(raw[\"precipProbability\"] * 100)\n current.append(raw[\"humidity\"] * 100)\n current.append(raw[\"dewPoint\"])\n current.append(raw[\"windSpeed\"])\n current.append(raw[\"windBearing\"])\n current.append(raw[\"windGust\"])\n current.append(raw[\"pressure\"])\n current.append(raw[\"cloudCover\"] * 100)\n current.append(raw[\"uvIndex\"])\n current.append(raw[\"visibility\"])\n current = format_list_for_db(current)\n\n columns = [\"hour\", \"day\", \"temp\", \"apptemp\", \"precipint\", \"precipprob\",\n \"humidity\", \"dewpoint\", \"windspeed\", \"windbearing\",\n \"windgust\", \"pressure\", \"cloudcover\", \"uvindex\", \"visibility\"]\n columns = format_list_for_db(columns)\n statement = f\"INSERT INTO WEATHER {columns} VALUES {current}\"\n print(statement)\n cursor = db.cursor()\n cursor.execute(statement)\n cursor.close()"
] |
[
"0.65205216",
"0.63677955",
"0.6169985",
"0.5902983",
"0.58921283",
"0.58830535",
"0.5816516",
"0.5792356",
"0.577339",
"0.57286644",
"0.57247823",
"0.572364",
"0.56794244",
"0.5657562",
"0.5617185",
"0.55872756",
"0.55848914",
"0.55707026",
"0.5563872",
"0.5557767",
"0.5535043",
"0.5510888",
"0.54791254",
"0.54536414",
"0.54070807",
"0.54068345",
"0.53980285",
"0.5385053",
"0.5384998",
"0.53848636"
] |
0.6846133
|
0
|
This function aggregates baseline historical data by location and month
|
def aggregate_gis_historical_data():
logging.info("Processing historical weather data aggregation.")
# Initialising function variables
config_data = get_config()
# Initialise pandas dataframe column name for baseline reference
# and historical data.
hist_file_path = get_file_path(folder_name="data"
,subdirectory=config_data["gis"]["output_subdirectory"]
,file_name=config_data["gis"]["output_base_historical_file_name"])
# Define group by columns.
group_by_cols = ["Location", "Month"]
# Define aggregate columns.
aggregate_cols = {"Temperature_Min": "mean"
,"Temperature_Max": "mean"
,"Humidity": ["min", "max"]
,"Pressure": ["min", "max"]}
logging.info("Reading historical weather data.")
# Read baseline historical data.
df = pd.read_csv(hist_file_path)
logging.info("Completed reading historical weather data.")
logging.info("Aggregating historical weather data.")
df_aggregate = df.groupby(group_by_cols, as_index=False).aggregate(aggregate_cols)
df_aggregate.columns = ["".join(name) for name in df_aggregate.columns.ravel()]
df_aggregate.rename(columns={"Temperature_Minmean": "T_avg_min"
,"Temperature_Maxmean": "T_avg_max"
,"Humiditymin": "H_min"
,"Humiditymax": "H_max"
,"Pressuremin": "P_min"
,"Pressuremax": "P_max"}
,inplace=True)
df_aggregate ["T_avg_range"] = df_aggregate ["T_avg_max"] - df_aggregate ["T_avg_min"]
df_aggregate ["H_range"] = df_aggregate ["H_max"] - df_aggregate ["H_min"]
df_aggregate ["P_range"] = df_aggregate ["P_max"] - df_aggregate ["P_min"]
logging.info("Saving baseline aggregate data.")
df_aggregate.to_csv(get_file_path(folder_name="data"
,subdirectory=config_data["gis"]["output_subdirectory"]
,file_name=config_data["gis"]["output_base_aggregate_file_name"])
,index=False)
logging.info("Completed saving baseline aggregate data.")
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def get_gis_historical_data():\n logging.info(\"Generating baseline reference and historical weather data.\")\n \n # Initialising function variables\n fake = Faker()\n geolocator = Nominatim()\n config_data = get_config()\n locations = config_data[\"location\"]\n \n # Check if there are no duplicate locations in the config.yaml file.\n if len(locations) != len(set(locations)):\n logging.error(\"Duplicate location found. Please check config.yaml file.\")\n raise ValueError\n \n # Initialise pandas dataframe column name for baseline reference\n # and historical data.\n df_ref = pd.DataFrame(columns=[\"Location\", \"Latitude\"\n ,\"Longitude\", \"Elevation\"\n ,\"Timezone\"])\n df_hist = pd.DataFrame(columns=[\"Location\", \"Date\"\n ,\"Month\", \"Temperature_Min\"\n ,\"Temperature_Max\", \"Humidity\"\n ,\"Pressure\"])\n \n # Generate weather data for each location.\n for idx, loc in enumerate(locations):\n \n logging.info(\"Retrieving geolocation data for {}.\".format(loc))\n \n # Retrieving geolocation data from geopy library.\n loc_data = geolocator.geocode(loc)\n \n logging.info(\"Check if the location {} is valid.\".format(loc))\n if loc_data is None:\n logging.error(\"Invalid location value supplied ({}). Please check config.yaml file.\".format(loc))\n raise ValueError\n logging.info(\"The location {} is valid.\".format(loc))\n \n city = get_city(loc)\n lat = loc_data.latitude\n lon = loc_data.longitude\n \n # Retrieving elevation data for the location.\n elev = get_elevation_data(lat, lon)\n \n for month in range(1, 13):\n \n logging.info(\"Retrieving {} weather data for month {}.\".format(loc, month))\n \n for sample in range(config_data[\"gis\"][\"sampling_number\"]):\n \n temp_min = None\n temp_max = None\n humidity = None\n pressure = None\n \n while temp_min is None or temp_max is None or humidity is None or pressure is None:\n \n year = random.randint(config_data[\"gis\"][\"year_start\"], config_data[\"gis\"][\"year_end\"])\n\n _, last_day = calendar.monthrange(year, month)\n\n datetime_start = datetime.datetime(year, month, 1)\n datetime_end = datetime.datetime(year, month, last_day)\n\n date_gen = fake.date_time_between_dates(datetime_start=datetime_start\n ,datetime_end=datetime_end)\n\n forecast = forecastio.load_forecast(config_data[\"forecastio_api_key\"]\n ,lat\n ,lon\n ,time=date_gen\n ,units=\"si\")\n\n historical_data = forecast.json[\"daily\"][\"data\"][0]\n \n timezone = forecast.json.get(\"timezone\", None)\n temp_min = historical_data.get(\"temperatureMin\", None)\n temp_max = historical_data.get(\"temperatureMax\", None)\n humidity = historical_data.get(\"humidity\", None) * 100\n pressure = historical_data.get(\"pressure\", None)\n \n df_temp_hist = pd.Series(dict(zip(df_hist.columns\n ,[city, date_gen\n ,date_gen.month, temp_min\n ,temp_max, humidity\n ,pressure])))\n \n df_hist = df_hist.append(df_temp_hist, ignore_index=True)\n \n df_temp_ref = pd.Series(dict(zip(df_ref.columns\n ,[city, lat\n ,lon, elev\n ,timezone])))\n df_ref = df_ref.append(df_temp_ref, ignore_index=True)\n \n logging.info(\"Generating position to consolidate latitude, longitude and elevation data\")\n df_pos = df_ref[[\"Latitude\", \"Longitude\", \"Elevation\"]].round(2)\n df_pos[\"Elevation\"] = df_pos[\"Elevation\"].astype(int) \n df_ref[\"Position\"] = df_pos.astype(str).apply(lambda x: \",\".join(x), axis=1)\n \n logging.info(\"Saving baseline reference data.\")\n df_ref.to_csv(get_file_path(folder_name=\"data\"\n ,subdirectory=config_data[\"gis\"][\"output_subdirectory\"]\n ,file_name=config_data[\"gis\"][\"output_base_reference_file_name\"])\n ,index=False)\n logging.info(\"Completed saving baseline reference data.\")\n\n logging.info(\"Saving baseline historical data.\")\n df_hist.to_csv(get_file_path(folder_name=\"data\"\n ,subdirectory=config_data[\"gis\"][\"output_subdirectory\"]\n ,file_name=config_data[\"gis\"][\"output_base_historical_file_name\"])\n ,index=False)\n logging.info(\"Completed saving baseline historical data.\")",
"def getEPAHistData(month, yr):\n\n # Change this to use the csv file being modified every hour\n try:\n try:\n s3 = s3fs.S3FileSystem()\n myopen = s3.open\n s3_resource = boto3.resource('s3')\n s3_resource.Object('midscapstone-whos-polluting-my-air', 'EpaRaw/epa_20{}{}.parquet'.format(yr, month)).load()\n pf=ParquetFile('midscapstone-whos-polluting-my-air/EpaRaw/epa_20{}{}.parquet'.format(yr, month), open_with=myopen)\n epa_df=pf.to_pandas()\n except:\n raise CustomError(\"FILE ERROR: Epa Raw Dataframe not found\")\n\n # Add a datekey column based on local date\n epa_df.rename(columns={'Latitude':'lat', 'Longitude':'lon', 'UTC':'utc', 'Parameter':'parameter', 'Unit':'epa_pm25_unit', 'Value':'epa_pm25_value',\n 'RawConcentration':'raw_concentration', 'AQI':'aqi', 'Category':'category', 'SiteName':'site_name', 'AgencyName':'agency_name',\n 'FullAQSCode':'full_aqs_code', 'IntlAQSCode':'intl_aqs_code'}, inplace=True)\n epa_df['created'] = epa_df['utc'].apply(lambda x: int(datetime.datetime.strptime(str(x), '%Y-%m-%d %H:%M:%S').replace(tzinfo=tz.tzutc()).astimezone(timezone('US/Pacific')).strftime(\"%Y%m%d%H%M\")))\n except Exception as e:\n print(\"*** EXCEPTION IN GET EPA HIST DATA *** {}\".format(e))\n return epa_df",
"def recalculate_aggregate_table(model_class):\n state_ids = (\n SQLLocation.objects\n .filter(domain='icds-cas', location_type__name='state')\n .values_list('id', flat=True)\n )\n\n for state_id in state_ids:\n for year in (2015, 2016, 2017):\n for month in range(1, 13):\n model_class.aggregate(state_id, date(year, month, 1))\n\n for month in range(1, date.today().month + 1):\n model_class.aggregate(state_id, date(2018, month, 1))",
"def reduce_data():\n snapshots = Snapshot.objects.all()\n locations = Location.objects.all()\n lst = []\n for snapshot in snapshots:\n lst.append([snapshot.location.name, snapshot.avail_bikes,\n snapshot.free_stands, snapshot.timestamp])\n cols = ['location', 'avail_bikes', 'free_stands', 'timestamp']\n df = pd.DataFrame(lst, columns=cols)\n df['time'] = df['timestamp'].dt.round('30min').dt.strftime('%H:%M')\n\n group = df.groupby(['location', 'time'])\n means = group.mean()\n sd = group.std()\n today = date.today()\n first = today.replace(day=1)\n last_month = first - timedelta(days=1)\n\n for name, time in means.index:\n subset_mean = means.xs((name, time), level=(0, 1), axis=0)\n subset_sd = sd.xs((name, time), level=(0, 1), axis=0)\n m = Stat.objects.get_or_create(\n location=locations.get(name=name),\n avail_bikes_mean=subset_mean['avail_bikes'],\n free_stands_mean=subset_mean['free_stands'],\n avail_bikes_sd=subset_sd['avail_bikes'],\n free_stands_sd=subset_sd['free_stands'],\n time=time,\n month=last_month\n )\n\n # snaps = Snapshot.objects.all()\n # i = 0\n # length = len(snaps)\n # for s in snaps:\n # i += 1\n # print(i)\n # if i > 35000:\n # s.save()\n # reduce_data()",
"def depart_arrive_stats_by_month(flights):\n\n return ...",
"def _calculate_aggregated_metrics(self):\n\n # using the historical values, calculate the aggregate\n # there are two kinds of metrics:\n # a) cumulative metrics - only the delta of the last 2 recorded values is used (eg cpu cycles)\n # b) absolute metrics - the last absolute value is used\n\n running_pids_set = set(self.__pids)\n\n for pid, process_metrics in self.__metrics_history.items():\n for _metric, _metric_values in process_metrics.items():\n if not self.__aggregated_metrics.get(_metric):\n self.__aggregated_metrics[_metric] = 0\n if _metric.is_cumulative:\n if pid in running_pids_set:\n if len(_metric_values) > 1:\n # only report the cumulative metrics for more than one sample\n self.__aggregated_metrics[_metric] += (\n _metric_values[-1] - _metric_values[-2]\n )\n else:\n if pid in running_pids_set:\n # absolute metric - accumulate the last reported value\n self.__aggregated_metrics[_metric] += _metric_values[-1]",
"def get_total_data():\n return pd.merge(compute_aggregate_load_data(), compute_aggregate_weather_data(),on=\"Date\")",
"def organise_baseline_data(self):\n self.baseline_data = {}\n for injkey in self.data_sets.keys():\n data = {}\n baseline_result = self.data_sets[injkey].pop('full_syst_baseline')\n datakey = baseline_result.keys()[0]\n baseline_data = self.systtest_fit_extract(\n fit_data=baseline_result[datakey],\n datakey=datakey,\n labels=self.labels[injkey]['full_syst_baseline'].dict\n )\n self.baseline_data[injkey] = baseline_data",
"def _calculate(self):\n source = self.source\n res = {}\n l_cols = [[], [], [], []]\n r_lines = {}\n dateline=None\n ###delete the below code when fetch data from database(assume: data in database has been pretreatment)\n if source[t.ror].min() > -99.0:\n pass\n else:\n source[t.ror] = np.where(\n source[t.ror] > -99.0, source[t.ror], -99.0)\n ###\n for account in self.accounts:\n source_account = source[source[t.account] == account]\n source_account = source_account.reset_index(drop=True)\n dateline=source_account[t.effective_date]\n ror=source_account[t.ror]/100\n returns_cum = ROR.ror_cum_ann(source_account, self.annualized)\n # double_return_cum=round(double_return_cum,2)+1\n returns_cum = returns_cum + 1\n growth_amounts = returns_cum * self.starting_value\n returns_cum, growth_amounts = round(returns_cum - 1, 4), \\\n round(growth_amounts, 2)\n l_cols[0].append(growth_amounts.iloc[-1, 0])#account growth amount\n l_cols[1].append(growth_amounts.iloc[-1, 1])#bench growth amount\n l_cols[2].append(returns_cum.iloc[-1, 0])#account return\n l_cols[3].append(returns_cum.iloc[-1, 1])#bench return\n r_lines[account] = [list(returns_cum.iloc[:,0]), list(growth_amounts.iloc[:, 0]),#list(returns_cum.iloc[:, 0])\n list(growth_amounts.iloc[:, 1])]#account return, account growth amount, bench growth amount\n res['account_vs_benchmark'] = {'xAxis': self.accounts,\n 'series': l_cols}\n res['growth_of_unit'] = {'xAxis': list(dateline),\n 'series': r_lines}\n return res\n # ret_dict = self._ret(accounts, starting_value, source, annualized)\n # return ret_dict",
"def gather_data(self, *args, **kwargs):\n instrument_arg = kwargs.get('instrument', 'EUR_USD')\n granularity_arg = kwargs.get('granularity', 'M1')\n candle_format = kwargs.get('candleFormat', 'bidask')\n start_time = kwargs.get('start', '2014-10-01T00:00:00.000000Z')\n count_arg = kwargs.get('count', 5000)\n out_data = []\n data_complete = False\n while(not data_complete):\n response = self.oanda.get_history(instrument=instrument_arg,\n granularity=granularity_arg,\n candleFormat=candle_format,\n start=start_time,\n count=count_arg)\n raw_data = response['candles']\n if (len(out_data) == 0):\n out_data = out_data + raw_data\n elif (len(out_data) > 1):\n # raw_data[0] is already in out_data as raw_data[-1] from last\n # iteration\n out_data = out_data + raw_data[1:]\n start_time = raw_data[-1]['time']\n if (len(raw_data) < 5000):\n data_complete = True\n\n out_data = self._list_to_df(out_data)\n return out_data",
"def all_data(self):\n return pd.concat([self.historic_data, self.dayahead_data])",
"def get_alpaca_data(self,ticker_list,start,end, timeframe = \"1D\"):\n s = pd.Timestamp(start,tz = \"America/New_York\").isoformat()\n e = pd.Timestamp(end,tz = \"America/New_York\").isoformat()\n \n df = api.get_barset(\n ticker_list,\n timeframe,\n start = s,\n end = e\n\n ).df\n return df",
"def breakdown_by_month(\n df,\n start_column,\n end_column,\n key_column,\n value_column,\n output_columns=None,\n aggfunc=\"count\",\n):\n\n def build_df(t):\n start_date = getattr(t, start_column)\n end_date = getattr(t, end_column)\n key = getattr(t, key_column)\n value = getattr(t, value_column)\n\n if end_date is pd.NaT:\n end_date = pd.Timestamp.today()\n\n first_month = (\n start_date.normalize().to_period(\"M\").to_timestamp(\"D\", \"S\")\n )\n last_month = end_date.normalize().to_period(\"M\").to_timestamp(\"D\", \"S\")\n\n index = pd.date_range(first_month, last_month, freq=\"MS\")\n\n return pd.DataFrame(index=index, data=[[key]], columns=[value])\n\n breakdown = (\n pd.concat([build_df(t) for t in df.itertuples()], sort=True)\n .resample(\"MS\")\n .agg(aggfunc)\n )\n\n if output_columns:\n breakdown = breakdown[\n [s for s in output_columns if s in breakdown.columns]\n ]\n\n return breakdown",
"def get_monthly_avg(all_stock_data):\n try:\n monthly_data = {}\n for data in all_stock_data:\n month = data[0][0:7]\n if month not in monthly_data:\n monthly_data[month] = []\n monthly_data[month].append(data)\n monthly_avg_list = []\n for month, stock_data in monthly_data.items():\n monthly_avg_list.append((month, get_avg(stock_data)))\n return monthly_avg_list\n\n except Exception as e:\n print(e)\n exit()",
"def generate_weather_data(self):\n months = pd.to_datetime(self.output['Local Time']).dt.month\n self.output['Month'] = months # set month values for later joins\n\n # merge output data frame with historical data to get ranges\n keys = ['Location', 'Month']\n m = pd.merge(self.output, self.histdata, how='left',\n left_on=keys, right_on=keys)\n\n # uniformly select random pressure, temperature\n # and humidity values between the historical max and min ranges\n r = np.random.rand(m.shape[0])\n m['Temperature'] = ((m['Tmean_high'] - m['Tmean_low']\n ) * r + m['Tmean_low']).round(1)\n m['Pressure'] = ((m['Pmax'] - m['Pmin']) * r + m['Pmin']).round(1)\n m['Humidity'] = ((m['Hmax'] - m['Hmin']) * r + m['Hmin']).astype(int)\n\n # drop redundant columns and assign to output\n dcols = ['Month', 'Timezone', 'Pmax', 'Pmin',\n 'Hmax', 'Hmin', 'Tmean_high', 'Tmean_low']\n m.drop(columns=dcols, inplace=True)\n self.output = m",
"def agg_history(self):\n cd_list, cr_list = zip(*self._history)\n return pd.concat(cd_list), pd.concat(cr_list)",
"def monthly_per_min_comparison(df):\n df = convert_to_datetime(df)\n today = datetime.date.today()\n this_month_start = today - timedelta(days=30)\n last_month_start = today - timedelta(days=60)\n month_per_min = []\n lastmonth_per_min = []\n thismonth_viewed = []\n lastmonth_viewed = []\n for index, row in df.iterrows():\n if row['session_start'].date() >= this_month_start:\n per_min = get_cards_per_min(row)\n month_per_min.append(per_min)\n thismonth_viewed.append(row['total_looked_at'])\n if last_month_start <= row['session_start'].date() < this_month_start:\n per_min = get_cards_per_min(row)\n lastmonth_per_min.append(per_min)\n lastmonth_viewed.append(row['total_looked_at'])\n month_average = 0\n lastmonth_average = 0\n if len(month_per_min) > 0 and len(lastmonth_per_min) > 0:\n month_average = sum(month_per_min) / len(month_per_min)\n lastmonth_average = sum(lastmonth_per_min) / len(lastmonth_per_min)\n elif len(month_per_min) == 0:\n month_average = 0\n elif len(lastmonth_per_min) == 0:\n lastmonth_average = 0\n if month_average > lastmonth_average:\n color_code = \"09B109\"\n arrow = \"\\u2191\"\n elif month_average < lastmonth_average:\n color_code = \"CE2929\"\n arrow = \"\\u2193\"\n else:\n color_code = \"000000\"\n arrow = \"\\u003D\"\n try:\n difference = abs((month_average - lastmonth_average) / lastmonth_average) * 100\n except ZeroDivisionError:\n difference = 100\n # if no sessions last month, difference is up 100%\n # if both averages are zero, this will display '0 100% =' in black\n result = make_results_dict(month_average, difference, color_code, arrow)\n result['monthly_cards_min'] = result.pop('metric')\n return result",
"def monthly_viewed(df):\n df = convert_to_datetime(df)\n today = datetime.date.today()\n this_month_start = today - timedelta(days=30)\n last_month_start = today - timedelta(days=60)\n month_per_min = []\n lastmonth_per_min = []\n thismonth_viewed = []\n lastmonth_viewed = []\n for index, row in df.iterrows():\n if row['session_start'].date() >= this_month_start:\n per_min = get_cards_per_min(row)\n month_per_min.append(per_min)\n thismonth_viewed.append(row['total_looked_at'])\n if last_month_start <= row['session_start'].date() < this_month_start:\n per_min = get_cards_per_min(row)\n lastmonth_per_min.append(per_min)\n lastmonth_viewed.append(row['total_looked_at'])\n month_viewed_result = total_viewed(thismonth_viewed, lastmonth_viewed)\n month_viewed_result['total_viewed_monthly'] = month_viewed_result.pop('total_viewed')\n return month_viewed_result",
"def get_monthly_history_metric(func, site, date_for, months_back,\n include_current_in_history=True): # pylint: disable=unused-argument\n date_for = as_date(date_for)\n history = []\n\n for month in previous_months_iterator(month_for=date_for, months_back=months_back,):\n period = period_str(month)\n value = func(\n site=site,\n start_date=datetime.date(month[0], month[1], 1),\n end_date=datetime.date(month[0], month[1], month[2]),\n )\n history.append(dict(period=period, value=value,))\n\n if history:\n # use the last entry\n current_month = history[-1]['value']\n else:\n # This should work for float too since '0 == 0.0' resolves to True\n current_month = 0\n return dict(\n current_month=current_month,\n history=history,)",
"def monthly_table(self):\n htable = [0 for i in range(12)]\n for i in range(self.dataset.shape[0]):\n stime = time.localtime(np.int32(self.dataset[i][2]))\n evtime = stime[1]\n htable[evtime - 1] += 1\n return htable",
"def compute(self, today, asset_ids, out, high):\n start_month = today.month\n current_month = start_month - 1 if start_month - 1 > 0 else 12\n prev_month = current_month - 1 if current_month - 1 > 0 else 12\n idx = -1\n date_idx = today - 1\n month_idx = date_idx.month\n\n current_end_month_idx = 0\n current_start_month_idx = 0\n\n while idx > -MonthlyHigh.window_length:\n if month_idx == current_month:\n if current_end_month_idx == 0:\n current_end_month_idx = idx\n if month_idx == prev_month:\n if current_start_month_idx == 0:\n current_start_month_idx = idx + 1\n\n date_idx = date_idx - 1\n month_idx = date_idx.month\n idx = idx - 1\n\n if current_end_month_idx == -1:\n current_month_high = high[current_start_month_idx:, :].max(\n axis=0)\n # current_month_low = low[current_start_month_idx:, :].min(axis=0)\n else:\n current_month_high = high[current_start_month_idx:current_end_month_idx + 1, :].max(\n axis=0)\n\n # current_month_low = low[current_start_month_idx:current_end_month_idx + 1, :].min(axis=0)\n out[:] = current_month_high",
"def getEPADailyData(dateint, dt_ind, month, epa_df, yr):\n\n try:\n start = dateint + dt_ind * 10000\n end = start + 10001\n dly_epa_df = epa_df[(epa_df.created >= start) & (epa_df.created < end)]\n dly_epa_df.reset_index(inplace=True, drop=True)\n\n new_df = pd.DataFrame(columns=['lat', 'lon', 'utc', 'parameter', 'epa_pm25_unit', 'epa_pm25_value', 'raw_concentration', 'aqi', 'category', 'site_name', 'agency_name', 'full_aqs_code', 'intl_aqs_code', 'created'])\n for sitenm in dly_epa_df.site_name.unique():\n indx_ct = 0\n site_df = dly_epa_df[dly_epa_df.site_name == sitenm]\n for i in site_df.created.unique():\n indx_ct += 1\n new_df = pd.concat([new_df,site_df.iloc[indx_ct - 1:indx_ct]],ignore_index=True)\n\n if i != site_df.created.max(): # Don't interpolate the last record\n tmp_df = site_df.iloc[indx_ct - 1:indx_ct][['lat', 'lon', 'utc', 'parameter', 'epa_pm25_unit', 'category', 'site_name', 'agency_name', 'full_aqs_code', 'intl_aqs_code']]\n for j in range(1,6):\n new_dt = i + j * 10\n tmp_df['created'] = int(new_dt)\n tmp_df['epa_pm25_value'] = np.nan\n tmp_df['raw_concentration'] = np.nan\n tmp_df['aqi'] = np.nan\n new_df = pd.concat([new_df,tmp_df],ignore_index=True)\n\n # Convert aqi to numerica for so that it gets interpolated\n new_df[['aqi']] = new_df[['aqi']].replace(\"nan\", np.nan, regex=True)\n new_df[['aqi']] = new_df[['aqi']].apply(pd.to_numeric)\n\n new_df = new_df.interpolate(method='linear', limit_direction='forward', axis=0)\n\n int_epa_df = new_df[(new_df.created >= start) & (new_df.created < (end - 1))]\n int_epa_df.reset_index(inplace=True, drop=True)\n \n # Write to S3\n s3 = s3fs.S3FileSystem()\n myopen = s3.open\n write('midscapstone-whos-polluting-my-air/EpaDaily/epa_20{2}{0}{1:02}.parquet'.format(month, dt_ind, yr), int_epa_df, compression='GZIP', open_with=myopen)\n s3_resource = boto3.resource('s3')\n s3_resource.Object('midscapstone-whos-polluting-my-air', 'EpaDaily/epa_20{2}{0}{1:02}.parquet'.format(month, dt_ind, yr)).Acl().put(ACL='public-read')\n\n except Exception as e:\n print(\"*** EXCEPTION IN GET EPA DAILY DATA *** {}\".format(e))\n return int_epa_df",
"def _calculate_month_roam_distances(df):\n\n month_df = (\n df\n .groupby('month')\n .sum()\n )\n\n return month_df",
"def preprocess_raw_data(df):\n def convert_date_to_datetime(_df):\n return _df.assign(Date=pd.to_datetime(_df['Date']))\n\n def fill_missing_miles_with_zero(_df):\n return _df.fillna({'Miles': 0})\n\n def filter_dates_prior_to_today(_df):\n return _df[_df['Date'] < datetime.datetime.today()]\n\n def calculate_rolling_averages(_df):\n _df['MA_10day'] = _df['Miles'].rolling(window=10).mean().fillna(0)\n _df['MA_30day'] = _df['Miles'].rolling(window=30).mean().fillna(0)\n return _df.sort_values('Date')\n\n pipeline = [\n convert_date_to_datetime,\n fill_missing_miles_with_zero,\n filter_dates_prior_to_today,\n calculate_rolling_averages,\n ]\n for func in pipeline:\n df = func(df)\n\n df['date_str_label'] = df['Date'].dt.strftime('%b-%d')\n\n return df",
"def obtain_monthly_mean(data=pd.DataFrame()):\n return data.resample(\"MS\").mean()",
"def compliance_time_series(county, core_path , patterns_path, backfill = False, GEOID_type = 'CBG'):\n #Load data about places and patterns\n county_places = pd.read_csv(core_path+'places-'+ county +'.csv', index_col='safegraph_place_id') \n \n pattern_dates = [x[5:9] for x in sorted(os.listdir(patterns_path+'main-file-'+ county +'/'))]\n w = 0\n next_date = pattern_dates[w]\n #Create visitor table\n place_cts = pd.DataFrame() #First two months don't have weekly patterns, use empty data.frame\n\n #List files for social distancing metrics in that county\n months_path = '../social_distancing/social_dist_'+ county +'/'\n month_list = sorted(os.listdir(months_path))\n os.makedirs( '../stats/time_series/', exist_ok = True)\n #metrics dictionary to be filled looping through every day\n if not os.path.isfile('../stats/time_series/metrics_{}_CT.csv'.format(county)) or backfill:\n metrics = pd.DataFrame()\n existing_dates = []\n else:\n metrics = pd.read_csv('../stats/time_series/metrics_{}_CT.csv'.format(county), dtype = {'origin_census_tract':str})\n #dates already processed\n existing_dates = metrics['date'].unique() #series of unique dates\n\n #Initialize columns of new data frame\n if GEOID_type == 'CBG':\n columns= {'date':[], 'origin_census_block_group':[], 'low_device_count':[], 'pct_at_home':[], 'pct_within_neighborhood':[], 'median_distance_traveled':[],\n 'median_percentage_time_home':[], 'total_visits_to_places':[], 'normalized_visits_to_places':[], 'total_expected_contacts':[], 'places_visited':[], 'cbgs_visited':[]}\n if GEOID_type == 'CT':\n columns= {'date':[], 'origin_census_tract':[], 'low_device_count':[], 'pct_at_home':[], 'pct_within_neighborhood':[], 'median_distance_traveled':[],\n 'median_percentage_time_home':[], 'total_visits_to_places':[], 'normalized_visits_to_places':[], 'places_visited':[]} \n changed = False\n for month in month_list:\n #Loop through every day\n day_list = sorted(os.listdir(months_path + month))\n for day in day_list: \n date_name = month + '-' + day\n print(date_name)\n if month+'-'+day == next_date:\n w = w+1\n if w < len(pattern_dates):\n next_date = pattern_dates[w]\n if date_name in existing_dates:\n continue\n print(\"--changing to next patterns file\")\n county_patterns = pd.read_csv(patterns_path + 'main-file-{}/2020-{}-weekly-patterns.csv.gz'.format(county, date_name),\n index_col='safegraph_place_id')\n norm_factors = pd.read_csv(\n '../social_distancing/normalization/'+'normalization_{}.csv'.format(county),\n dtype={'origin_census_block_group':str})\n norm_factors.set_index('origin_census_block_group', drop =True, inplace=True)\n\n\n #Establish prior for hourly distribution of visits at the top_category level\n county_patterns= county_patterns.join(county_places[['top_category','sub_category']], how='inner')\n restaurants = county_patterns['top_category'] == 'Restaurants and Other Eating Places'\n county_patterns.loc[restaurants, 'top_category'] = county_patterns.loc[restaurants, 'sub_category']\n norm_factor = norm_factors.loc[norm_factors.date == date_name].norm_factor\n\n prior_dict = {}\n for category in county_patterns.top_category.value_counts().index:\n places_in_cat = county_patterns['top_category'] == category\n dirich_samples = [np.array(json.loads(x)) for x in county_patterns.loc[places_in_cat, 'visits_by_each_hour'] ] \n prior_dict[category] = dirichlet.getInitAlphas(dirich_samples)\n if GEOID_type == 'CBG':\n place_cbgs = ccc.place_cbg_contacts_table(\n county_patterns,\n prior_dict,\n norm_factor,\n GEOID_type)\n place_cbgs = place_cbgs.loc[place_cbgs['expected_contacts']>1]\n place_cbgs = place_cbgs.join(county_places[['location_name','latitude','longitude']], how='inner')\n place_cbgs.reset_index(inplace=True, drop=False)\n place_cbgs.set_index('origin_census_block_group', inplace=True, drop=True)\n if GEOID_type == 'CT':\n place_cts = ccc.place_cbg_contacts_table(\n county_patterns,\n prior_dict,\n norm_factor,\n GEOID_type)\n place_cts = place_cts.join(county_places[['location_name','latitude','longitude']], how='inner')\n place_cts.reset_index(inplace=True, drop=False)\n place_cts.set_index('origin_census_tract', inplace=True, drop=True)\n print('--computed bipartite contact network')\n\n if date_name in existing_dates:\n continue\n\n\n file_name = os.listdir(months_path+ '/' + month+'/'+day)[0]\n data_soc_dist = pd.read_csv(months_path+ '/' + month+'/'+day+'/'+file_name, dtype={'origin_census_block_group':str})\n data_soc_dist['median_distance_traveled_from_home'] = [0 if math.isnan(x) else x for x in data_soc_dist['distance_traveled_from_home']] #NOT NEEDED?\n \n if GEOID_type == 'CBG':\n [update_metrics_columns(row, date_name, place_cbgs, county_places, columns) for i, row in data_soc_dist.iterrows()]\n else:\n ####CREATE FUNCTION THAT AGGREGATES TO CENSUS TRACT\n data_soc_dist['origin_census_tract'] = [x[:-1] for x in data_soc_dist['origin_census_block_group']]\n ct_data_soc_dist = data_soc_dist.groupby('origin_census_tract').apply(ddd.aggregate_to_ct).reset_index()\n [update_metrics_columns_CT(row, date_name, place_cts, county_places, columns) for i, row in ct_data_soc_dist.iterrows()]\n changed = True\n\n print(\"--merging rows from new dates\")\n new_metrics = pd.DataFrame.from_dict(columns, orient='index').transpose()\n\n metrics = pd.concat([metrics, new_metrics], ignore_index=True)\n if changed:\n if GEOID_type == 'CT':\n metrics.to_csv('../stats/time_series/metrics_{}_CT.csv'.format(county),index=False)\n else:\n metrics.to_csv('../stats/time_series/metrics_{}_CBG.csv'.format(county),index=False)\n print(\"--finished updating time series for {}\".format(county))\n return(0)",
"def get_metric_history_chart_data(self, slugs, since=None, granularity='daily'):\n slugs = sorted(slugs)\n history = self.get_metric_history(slugs, since, granularity=granularity)\n\n # Convert the history into an intermediate data structure organized\n # by periods. Since the history is sorted by key (which includes both\n # the slug and the date, the values should be ordered correctly.\n periods = []\n data = OrderedDict()\n for k, v in history:\n period = template_tags.strip_metric_prefix(k)\n if period not in periods:\n periods.append(period)\n\n slug = template_tags.metric_slug(k)\n if slug not in data:\n data[slug] = []\n data[slug].append(v)\n\n # Now, reorganize data for our end result.\n metrics = {'periods': periods, 'data': []}\n for slug, values in data.items():\n metrics['data'].append({\n 'slug': slug,\n 'values': values\n })\n\n return metrics # templates still don't like defaultdict's",
"def rapl_timeline():\n\n return [{ \"timestamp\": \"2021-10-05T09:14:58.226\", \"sensor\": \"toto\", \"target\": \"all\", \"groups\": { \"rapl\": { \"0\": { \"7\": { \"RAPL_ENERGY_PKG\": 5558763520.0, \"time_enabled\": 1000770053.0, \"time_running\": 1000770053.0 } } } } },\n { \"timestamp\": \"2021-10-05T09:14:59.226\", \"sensor\": \"toto\", \"target\": \"all\", \"groups\": { \"rapl\": { \"0\": { \"7\": { \"RAPL_ENERGY_PKG\": 4777050112.0, \"time_enabled\": 2001065535.0, \"time_running\": 2001065535.0 } } } } },\n { \"timestamp\": \"2021-10-05T09:15:00.227\", \"sensor\": \"toto\", \"target\": \"all\", \"groups\": { \"rapl\": { \"0\": { \"7\": { \"RAPL_ENERGY_PKG\": 6847987712.0, \"time_enabled\": 3001449088.0, \"time_running\": 3001449088.0 } } } } },\n { \"timestamp\": \"2021-10-05T09:15:01.227\", \"sensor\": \"toto\", \"target\": \"all\", \"groups\": { \"rapl\": { \"0\": { \"7\": { \"RAPL_ENERGY_PKG\": 5054922752.0, \"time_enabled\": 4001882359.0, \"time_running\": 4001882359.0 } } } } },\n { \"timestamp\": \"2021-10-05T09:15:02.228\", \"sensor\": \"toto\", \"target\": \"all\", \"groups\": { \"rapl\": { \"0\": { \"7\": { \"RAPL_ENERGY_PKG\": 5434507264.0, \"time_enabled\": 5002352709.0, \"time_running\": 5002352709.0 } } } } }\n ]",
"def top_down_forecast_data_processing(original_df,predictions_by_month):\n transformed_df=original_df.groupby('Month_Year').sum().reset_index()\\\n [['Month_Year','Eviction_Notice']]\n\n percentage_of_month_df = pd.merge(original_df[['Month_Year','Address_Zipcode',\\\n 'Eviction_Notice']],transformed_df,how = 'left',left_on='Month_Year',\\\n right_on='Month_Year',suffixes=('','_for_month'))\n\n percentage_of_month_df.dropna(inplace=True)\n\n percentage_of_month_df['perc_of_month']=\\\n percentage_of_month_df['Eviction_Notice']/percentage_of_month_df['Eviction_Notice_for_month']\n\n group_by_zip_df = percentage_of_month_df.groupby('Address_Zipcode').mean().reset_index()[['Address_Zipcode','perc_of_month']]\n\n return group_by_zip_df",
"def get_course_mau_history_metrics(site, course_id, date_for, months_back):\n date_for = as_date(date_for)\n history = []\n\n for year, month, _ in previous_months_iterator(month_for=date_for,\n months_back=months_back,):\n\n period = '{year}/{month}'.format(year=year, month=str(month).zfill(2))\n active_users = get_mau_from_site_course(site=site,\n course_id=course_id,\n year=year,\n month=month)\n history.append(dict(period=period, value=active_users.count(),))\n\n if history:\n # use the last entry\n current_month = history[-1]['value']\n else:\n # This should work for float too since '0 == 0.0' resolves to True\n current_month = 0\n return dict(current_month=current_month, history=history)"
] |
[
"0.58381313",
"0.5734232",
"0.56948924",
"0.5653308",
"0.5646137",
"0.554423",
"0.5541146",
"0.5489446",
"0.5416555",
"0.5412332",
"0.5408023",
"0.536637",
"0.53220284",
"0.52999943",
"0.5291678",
"0.5265832",
"0.52515584",
"0.52434707",
"0.523636",
"0.52346766",
"0.5223585",
"0.52215946",
"0.5217667",
"0.52151525",
"0.5202634",
"0.5182495",
"0.51742667",
"0.51630604",
"0.5133433",
"0.5111438"
] |
0.6543381
|
0
|
Constructs a densenet201 model.
|
def densenet201(pretrained=False, **kwargs):
model = ResNetFeatures(BasicBlock, [3, 4, 6, 3], **kwargs)
if pretrained:
_load_pretrained(model, model_zoo.load_url(model_urls['densenet201']))
return model
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def make_model(self):\n if self.model_type=='densenet_121':\n model = self.make_densenet_121(self.weights)\n\n\n return model",
"def create_model():\r\n parser = argparse.ArgumentParser()\r\n parser.add_argument('--DISC_LR', type=float, default=1e-4)\r\n parser.add_argument('--GEN_LR', type=float, default=1e-3)\r\n parser.add_argument('--GEN_BETA1', type=float, default=0.9)\r\n parser.add_argument('--GEN_BETA2', type=float, default=0.999)\r\n parser.add_argument('--IMAGE_SIZE', type=int, default=None)\r\n parser.add_argument('--QUES_SIZE', type=int, default=None)\r\n parser.add_argument('--QUES_EMBED_SIZE', type=int, default=2048)\r\n parser.add_argument('--WORD_EMBED_SIZE', type=int, default=512)\r\n parser.add_argument('--VOCAB_SIZE', type=int, default=1004)\r\n args, task_args = parser.parse_known_args()\r\n override_if_not_in_args('--max_steps', '1000', task_args)\r\n override_if_not_in_args('--batch_size', '64', task_args)\r\n override_if_not_in_args('--eval_set_size', '370', task_args)\r\n override_if_not_in_args('--eval_interval_secs', '2', task_args)\r\n override_if_not_in_args('--log_interval_secs', '2', task_args)\r\n override_if_not_in_args('--min_train_eval_rate', '2', task_args)\r\n\r\n return Model(args.DISC_LR, args.GEN_LR, args.GEN_BETA1, args.GEN_BETA2,\r\n args.IMAGE_SIZE, args.QUES_EMBED_SIZE, args.WORD_EMBED_SIZE,\r\n args.QUES_SIZE, args.VOCAB_SIZE), task_args",
"def build(model_name):\n return pretrain.factory.create(model_name)",
"def _create_model(self):\n config = {\n \"input_features\": self.input_features,\n \"output_features\": self.output_features,\n \"combiner\": {\"type\": \"concat\", \"output_size\": 14},\n TRAINER: {\"epochs\": 2, BATCH_SIZE: 128},\n }\n return LudwigModel(config, logging_level=logging.WARN)",
"def create_model(self): # noqa: D103\n # reference for creation of the model https://yilundu.github.io/2016/12/24/Deep-Q-Learning-on-Space-Invaders.html\n model=Sequential()\n model.add(Flatten( input_shape=(84,84,4)))\n model.add(Dense(self.num_actions)) \n\n return model",
"def build_model():",
"def make_model():\n m = model_class(*argv[2:-1])\n modelobj[\"model\"] = m",
"def create_model(config, rng, example_batch):\n example_batch = train_utils.prepare_example_batch(example_batch)\n\n key0, rng = random.split(rng, 2)\n model, variables, metric_collector = MODEL_DICT[config.model.name](\n key0, example_batch, config\n )\n\n return model, variables, metric_collector",
"def create_model(self):\n model = solph.Model(self.es)\n return model",
"def gen_model():\n\n\tmodel = skipthoughts.load_model()\n\treturn model",
"def MakeModel(self):\n pass",
"def create_model():\n\n class Net(nn.Cell):\n def construct(self, x, y):\n return x\n\n net = Net()\n model_simple = Model(net)\n\n return model_simple",
"def create_model(model_name, random_state, epoch, device, log_path, **hparams):\n model = eval(f'{model_name}')(\n **hparams, epoch=int(epoch), random_state=random_state, device=device,\n log_path=log_path\n )\n\n return model",
"def create_model(self, **inputs):\n raise NotImplementedError('This method has to be overwritten.')",
"def create_model(self):\n pass",
"def create_model(self):\n pass",
"def build_model(config):\n # Load the pretrained model\n detr = get_detr_model(config, include_top=True, weights=\"detr\")\n detr.summary()\n return detr",
"def build_model(self):\n model = None\n if self.model_name == 'vgg16':\n model = vgg16(img_dim=(self.ex_input_size, self.ex_input_size, self.ex_channels))\n elif self.model_name == 'resnet':\n model = ResNet(img_dim=(self.ex_input_size, self.ex_input_size, self.ex_channels), num_classes=num_classes).create()\n elif self.model_name == 'densenet':\n model = DenseNet(img_dim=(self.ex_input_size, self.ex_input_size, self.ex_channels), num_classes=num_classes).create()\n elif self.model_name == 'mobilenet':\n model = MobileNet(img_dim=(self.ex_input_size, self.ex_input_size, self.ex_channels), num_classes=num_classes).create()\n elif self.model_name == 'efficientnet':\n model = efficientnet(input_shape=(self.ex_input_size, self.ex_input_size, self.ex_channels))\n elif self.model_name == 'vit':\n model = VisionTransformer(image_size=self.ex_input_size,\n patch_size=vit_settings['patch_size'],\n num_layers=vit_settings['num_layers'],\n num_classes=num_classes,\n d_model=vit_settings['d_model'],\n num_heads=vit_settings['num_heads'],\n mlp_dim=vit_settings['mlp_dim'],\n channels=self.ex_channels,\n dropout=vit_settings['dropout']).build_VisionTransformer()\n model.load_weights(self.path_to_weights)\n model.summary()\n return model",
"def build_model_mobilenet(num_classes):",
"def build_model(cls, args):\n base_architecture(args) \n return StyleGANGeneratorPretrain(args)",
"def build_model(self):\r\n self.images, self.labels = self.dataloader.get_model_inputs()\r\n\r\n model = SimpleModel(self.images, self.labels, output_dim=F.output_dim, scope='source_regressor')\r\n self.out, _ = model.get_model()\r\n self.get_loss()",
"def create_model(self):\n self.create_model_file()\n self.create_model_unit_test()\n self.add_model_to_list()\n self.readme_reminder()",
"def build_model():\n model = Sequential()\n model.add(Dense(beer_emb.EMB_DIM, activation=\"relu\",\n input_dim=beer_emb.EMB_DIM))\n model.add(Dropout(0.5))\n model.add(Dense(64, activation=\"relu\"))\n model.add(Dropout(0.5))\n model.add(Dense(32, activation=\"relu\"))\n model.add(Dense(1, activation='sigmoid'))\n model.compile(loss='binary_crossentropy',\n metrics=['accuracy'], optimizer='adam')\n\n return model",
"def make_model(self):\n model = Sequential()\n model.add(Embedding(self.vocab, self.embd_size,\n input_length=self.sentence_size))\n model.add(LSTM(self.lstm_size, return_sequences=False))\n if self.den1_size > 0:\n model.add(Dense(self.den1_size, activation='relu'))\n if self.drop_rate > 0:\n model.add(Dropout(self.drop_rate))\n if self.den2_size > 0:\n model.add(Dense(self.den2_size, activation='relu'))\n model.add(Dense(16, activation='relu'))\n model.add(Activation(self.activation))\n model.compile(optimizer=self.optimizer,\n loss=self.loss_func,\n metrics=['accuracy'])\n return model",
"def create(self) -> tf.keras.models.Model:\n raise NotImplementedError()",
"def create_detr(num_classes: int, num_queries: int, backbone: str):\n\n model = DETR(num_classes, num_queries, backbone)\n return model",
"def model_creator(config):\n return nn.Linear(1, 1)",
"def create_model(self) -> None:\n self._model = create_model_with_temperature_scaling(self.config)",
"def build_model():\n # noise for soise sampling in NCE\n noise = build_unigram_noise(\n torch.FloatTensor(corpus.vocab.idx2count)\n )\n\n norm_term = 'auto' if args.norm_term == -1 else args.norm_term\n # setting up NCELoss modules\n if args.index_module == 'linear':\n criterion = IndexLinear(\n args.emsize,\n ntoken,\n noise=noise,\n noise_ratio=args.noise_ratio,\n norm_term=norm_term,\n loss_type=args.loss,\n reduction='none',\n )\n model = RNNModel(\n ntoken, args.emsize, args.nhid, args.nlayers,\n criterion=criterion, dropout=args.dropout,\n )\n elif args.index_module == 'gru':\n if args.nlayers != 1:\n logger.warning('Falling into one layer GRU due to Index_GRU supporting')\n nce_criterion = IndexGRU(\n ntoken, args.emsize, args.nhid,\n args.dropout,\n noise=noise,\n noise_ratio=args.noise_ratio,\n norm_term=norm_term,\n )\n model = GenModel(\n criterion=nce_criterion,\n )\n else:\n logger.error('The index module [%s] is not supported yet' % args.index_module)\n raise(NotImplementedError('index module not supported'))\n\n if args.cuda:\n model.cuda()\n\n logger.info('model definition:\\n %s', model)\n return model",
"def create_model(self):\n # Create the generator and discriminators\n self.generator_lungs = self.generator_model()\n self.generator_organs = self.generator_model()\n\n self.disc_lungs = self.discriminator_model_lungs()\n self.disc_organs = self.discriminator_model_organs()\n\n # Initialize the optimizer and backend\n self.generator_optimizer = tf.keras.optimizers.Adam(learning_rate=2e-4, beta_1=0.5)\n self.discriminator_optimizer = tf.keras.optimizers.Adam(learning_rate=2e-4, beta_1=0.5)\n self.set_backend = tf.keras.backend.set_floatx('float32')\n\n # Create the summary writer\n self.create_summary_writer()\n print('Models are created.')\n return self"
] |
[
"0.74119705",
"0.66543114",
"0.66498333",
"0.6570754",
"0.64200634",
"0.63234144",
"0.62984204",
"0.6281156",
"0.6251362",
"0.6214036",
"0.617731",
"0.61620563",
"0.6139047",
"0.6125915",
"0.6096478",
"0.6096478",
"0.6068243",
"0.60617614",
"0.6011719",
"0.6006324",
"0.59922916",
"0.5977954",
"0.59654146",
"0.59559894",
"0.59529203",
"0.5941722",
"0.5925558",
"0.59176886",
"0.58931506",
"0.5892324"
] |
0.7408212
|
1
|
This method is called whenever data is received from a client. The only message that a client sends to the server is a RPC Request message. If the RPC Request message is valid, then the method is called in a thread
|
def dataReceived(self, data):
if self.__buffer:
# We have some data from the last dataReceived() so lets prepend it
data = self.__buffer + data
self.__buffer = None
while data:
dobj = zlib.decompressobj()
try:
request = rencode.loads(dobj.decompress(data))
except Exception, e:
#log.debug("Received possible invalid message (%r): %s", data, e)
# This could be cut-off data, so we'll save this in the buffer
# and try to prepend it on the next dataReceived()
self.__buffer = data
return
else:
data = dobj.unused_data
if type(request) is not tuple:
log.debug("Received invalid message: type is not tuple")
return
if len(request) < 1:
log.debug("Received invalid message: there are no items")
return
for call in request:
if len(call) != 4:
log.debug("Received invalid rpc request: number of items "
"in request is %s", len(call))
continue
# Format the RPCRequest message for debug printing
try:
s = call[1] + "("
if call[2]:
s += ", ".join([str(x) for x in call[2]])
if call[3]:
if call[2]:
s += ", "
s += ", ".join([key + "=" + str(value) for key, value in
call[3].items()])
s += ")"
except UnicodeEncodeError:
log.debug("RPCRequest had some non-ascii text..")
pass
else:
log.debug("RPCRequest: %s", s)
pass
reactor.callLater(0, self.dispatch, *call)
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def _dispatch_from_client_request(self):\n # Listen for client connection\n self._from_client_request.listen()\n\n while not self._exit_request:\n readable, _, _ = select([self._from_client_request], [], [self._from_client_request], 0.1)\n\n if readable:\n client_conn, client_addr = readable[0].accept()\n client_conn.setblocking(False)\n\n client_name_read, _, _ = select([client_conn], [], [client_conn])\n if client_name_read:\n client_name = json.loads(client_name_read[0].recv(cfg.HEADER).decode('utf-8'))\n else:\n print(\"Connection closed\")\n continue\n\n self._thread_lock.acquire()\n self._from_client_connections[client_conn] = client_name\n self._state[client_name] = 0\n self._thread_lock.release()\n\n print(\"Receiving commands from [\" + client_name + \", \" + client_addr[0] + \", \" + str(client_addr[1]) + ']')",
"def handle(self):\n self.ip = self.client_address[0]\n self.port = self.client_address[1]\n self.connection = self.request\n #self.logged_in = False\n\n # Loop that listens for messages from the client\n while True:\n received_string = self.connection.recv(4096).strip()\n if received_string:\n jsonObject = json.loads(received_string)\n request = jsonObject.get('request')\n #print(received_string)\n #self.handle_data(received_string)\n if request == 'login':\n print 'logging in'\n self.login(jsonObject)\n elif request == 'logout':\n self.logout()\n elif request == 'msg':\n self.send(jsonObject)\n elif request == 'names':\n self.getNames()\n elif request == 'help':\n return \"geiegohruuhiegr\"\n else:\n return \"you suck\"\n\n else:\n print('The client is disconnected.')\n break \n # TODO: Add handling of received payload from client",
"def handle(self):\n self.ip = self.client_address[0]\n self.port = self.client_address[1]\n self.connection = self.request\n\n state.addConnection(self.connection)\n\n # Loop that listens for messages from the client\n while True:\n received_string = self.connection.recv(4096)\n\n if len(received_string) == 0:\n continue\n\n # TODO: Add handling of received payload from client\n\n # Convert payload from JSON to object\n payloadToData = json.loads(received_string)\n\n # determine what request is being made\n request_handler = RequestHandler(payloadToData,\n state,\n self.connection)\n\n # execute and generate response (JSON formatted)\n jsonResponse = request_handler.callHandler()\n\n if not jsonResponse == 'BROADCAST':\n # send response\n self.connection.send(bytes(jsonResponse, \"ascii\"))",
"def data_received(self, data):\n pass",
"def handle(self):\n try:\n # Wait for data\n data = json.loads(self.request.recv(1024).decode('UTF-8').strip())\n\n # Process data\n self.process_data(data)\n\n except Exception as e:\n print(\"Exception wile receiving message: \", e)\n self.request.sendall(\n bytes(json.dumps({'return': 'error'}), 'UTF-8'))",
"def on_message(self, data):\n req = json.loads(data)\n self.serve(req)",
"def on_message(self, data):\n req = json.loads(data)\n self.serve(req)",
"def requestDataFromServer(self):\r\n self.tempVar = None\r\n dataAvailable = threading.Event()\r\n\r\n self.sendMessage(\"requestData\", \"\")\r\n\r\n @self.ursinaClient.event\r\n def receiveData(Content):\r\n self.ursinaClient.lock.acquire()\r\n\r\n self.tempVar = Content\r\n dataAvailable.set()\r\n\r\n self.ursinaClient.lock.release()\r\n\r\n # print(\"Len Data Recvd: \", len(Content))\r\n\r\n dataAvailable.wait()\r\n\r\n tempVar = self.tempVar\r\n\r\n del self.tempVar\r\n return tempVar",
"def dataReceived(self, data):",
"def on_recv(self, callback):\n return self.message_client.on_recv(callback)",
"def manage_read_request(self, client):\n\n # obtain the message\n message = client.recv()\n message = json.loads(message)\n msg = message[\"payload\"].strip()\n if msg.startswith(\"/\"):\n type = \"c2s\"\n elif msg.startswith(\"@\"):\n type = \"c2c\"\n else:\n type = \"c2g\"\n\n func = getattr(self, \"request_\"+type)\n func(client, message)\n # self.msg_map[message['type']](client, message)",
"def handleRecvData(self, data):\n\n\t\t#Look for commands\n\t\tif data == 'Hello':\n\t\t\t#Inform client it is 'connected'\n\t\t\tself.transmit(\"Welcome\")\n\n\t\telif data == 'kill':\t\n\t\t\t#Stop the server running\n\t\t\tself.running = False\n\n\t\telif data == 'control':\n\t\t\t#Print out if in control of car\n\t\t\tprint(\"Control query\")\n\t\t\tif self.arduino:\n\t\t\t\t#print(self.address)\n\t\t\t\tself.transmit(\"Control: True\")\n\t\t\telse:\n\t\t\t\t#print(self.address)\n\t\t\t\tself.transmit(\"Control: False\")\n\n\t\telif data == 'Hello?':\n\t\t\t#The client is still alive therefore set missing count to 0\n\t\t\tself.missingCount = 0\n\n\t\t#Look for Arduino Data\n\t\telif self.arduino:\n\t\t\t#Check if controlling the car and it's a valid car command\n\t\t\tif self.arduino.readPack(data): \n\t\t\t\tprint(self.address)\n\t\t\t\tprint(\"Sent to arduino: %s\" % data)\n\t\t\telse:\n\t\t\t\t#Print out other data\n\t\t\t\tprint(\"Not valid Arduino data\")\n\t\t\t\tprint(self.address)\n\t\t\t\tprint(data)\n\n\t\telse:\n\t\t\t#All other data print out\n\t\t\tprint(self.address)\n\t\t\tprint(data)",
"async def data_received(self, data: bytes) -> None:\n\n self.response_message.set_result(data)",
"def dataReceived(self, data):\r\n try:\r\n address = self.guid\r\n data = json.loads(data)\r\n threads.deferToThread(send_signal, self.dispatcher, data)\r\n\r\n if 'hx_subscribe' in data:\r\n return self.dispatcher.subscribe(self.transport, data)\r\n\r\n if 'address' in data:\r\n address = data['address']\r\n else:\r\n address = self.guid\r\n\r\n self.dispatcher.send(address, data)\r\n\r\n except Exception, exc:\r\n raise\r\n self.dispatcher.send(\r\n self.guid,\r\n {'message': data, 'error': str(exc)}\r\n )",
"def handle(self):\n data = self.request.recv(1024)\n self.request.send(data)",
"def handle(self):\n # Since we only expect small packets, we don't need to read a buffer of\n # size, say, 4096.\n data = self.request.recv(64)\n\n logging.debug(\"Received '%s' from '%s'\", repr(data), self.client_address[0])\n\n if data[0:2] == \"\\x01\\x00\":\n # Authentication attempt. Since this address is whitelisted (as it\n # was successfully verified by the server in `verify_request`), we\n # can safely acknowledge the authentication.\n client = None\n\n try:\n client = self.server.authenticator.add_new_client(\n self.client_address[0],\n data.split(\"\\x00\")[1]\n )\n\n except:\n logging.exception(\"An exception occurred creating a new client\")\n return\n\n\n self.server.storage_manager.add_client(client)\n\n self.request.send(\"\\x01\\x00%s\\x00%s\\x00%s\\x00\" % (\n client.token,\n self.server.receiver_address[0],\n self.server.receiver_address[1]\n )\n )\n\n else:\n logging.info(\"Unknown request '%s' from '%s'\", \n data, self.client_address[0])\n\n self.request.send(\"\")",
"def ServerSyncReceived(self,message):",
"def receive_message(self):\r\n try:\r\n if self.is_connected:\r\n # Receive the messages.\r\n self.message_recv = self.server_connection.recv(1024)\r\n \r\n # Check if the message is not null.\r\n if self.message_recv != b\"\":\r\n\r\n # Decrypt the messages.\r\n self.message_recv = pickle.loads(self.message_recv)\r\n\r\n # Server request to update the online users list.\r\n if self.message_recv[0] == \"Update User\":\r\n self.updt_user = True\r\n self.data_user[\"Online_User\"] = self.message_recv[1]\r\n\r\n # Server request to exit the server.\r\n elif self.message_recv[0] == \"Exit Server\":\r\n self.new_msg = True\r\n self.message_recv[0] = [\"System\", \"Système\"]\r\n \r\n self.is_stopped = True\r\n self.is_connected = False\r\n\r\n else:\r\n self.new_msg = True\r\n\r\n # Avoid an error when shutting down the server.\r\n except ConnectionAbortedError as e:\r\n print(e)",
"def callback_client_receive(data):\n data: GameStateModel = JSONSerializer.deserialize(data)\n logger.debug(f\"Client received {data.__class__.__name__} object from host.\")\n # print(f\"Client received {data.__class__.__name__} object from host.\")\n if isinstance(data, GameStateModel):\n GameStateModel.set_game(data)\n return\n if isinstance(data, TurnEvent) or isinstance(data, ActionEvent):\n exec_thread = threading.Thread(target=data.execute)\n exec_thread.start()",
"def data_received(self, data: str) -> None:\n logger.debug('Received: {}'.format(data))\n try:\n self.buffer += data.decode()\n except:\n logger.exception('Could not decode data from client')\n\n idx = self.buffer.find('\\r\\n')\n\n while idx >= 0: # While there are separators\n frame = self.buffer[:idx + 2].strip() # Extract the JSON object\n self.buffer = self.buffer[idx + 2:] # Removes the JSON object from the buffer\n\n self.on_frame(frame) # Process the frame\n idx = self.buffer.find('\\r\\n')\n\n if len(self.buffer) > 4096 * 1024 * 1024: # If buffer is larger than 4M\n logger.warning('Buffer to large')\n self.buffer = ''\n self.transport.close()",
"def slot_recv(self, dummy_sender, data):\r\n (str_json) = data\r\n handler = None\r\n if type(str_json) == dict:\r\n msg = str_json # was already a dict\r\n else:\r\n msg = json.loads(str_json)\r\n self.msg = msg\r\n\r\n if \"stamp\" in msg:\r\n delay = time.time() * 1e6 - int(msg[\"stamp\"])\r\n self.socket_lag = (self.socket_lag * 29 + delay) / 30\r\n\r\n if \"op\" in msg:\r\n try:\r\n msg_op = msg[\"op\"]\r\n handler = getattr(self, \"_on_op_\" + msg_op)\r\n\r\n except AttributeError:\r\n self.debug(\"slot_recv() ignoring: op=%s\" % msg_op)\r\n else:\r\n self.debug(\"slot_recv() ignoring:\", msg)\r\n\r\n if handler:\r\n handler(msg)",
"def _incoming_read(self, client, data, error):\n\n if error is not None:\n client.close()\n del self._incoming[client]\n return\n\n incoming = self._incoming[client]\n incoming.unpacker.feed(data)\n for req_id, message in incoming.unpacker:\n self._call_handler(\n partial(self._queue_response,\n client, req_id),\n self._call_interface.queue_call,\n message,\n )",
"def receive_data_from_server(self):\n while not self._stop_receive.is_set():\n # seems irrelevant now\n # if not self._pause_receive.is_set():\n try:\n # We are doing handshaking, so this is fine\n _server_reply = self.receive(True)\n if _server_reply:\n self._reply_queue.append(_server_reply)\n self.callback_client_receive(_server_reply)\n except MastermindErrorClient:\n logger.error(\"Mastermind Error:\")\n info = sys.exc_info()\n traceback.print_exception(*info)\n self.callback_disconnect()\n except OSError:\n logger.warning(\"OS ERROR, disconnecting client.\")\n info = sys.exc_info()\n traceback.print_exception(*info)\n self.callback_disconnect()",
"def handle_rpc(self):\n while True: # loop handling\n self.rbuf.seek(0)\n length_prefix = self.rbuf.read(4)\n if len(length_prefix) < 4: # half-package\n break\n\n try:\n length, = struct.unpack(\"I\", length_prefix.encode(\"utf-8\"))\n except Exception as e:\n print(e.__traceback__)\n body = self.rbuf.read(length)\n if len(body) < length: # half-package\n break\n\n request = json.loads(body)\n input = request[\"in\"]\n params = request[\"params\"]\n handler = self.handlers[input]\n handler(params)\n # cut read buffer\n left = self.rbuf.getvalue()[length + 4:]\n self.rbuf = StringIO()\n self.rbuf.write(left)\n # move position to EOF\n self.rbuf.seek(0, 2)",
"async def data_received(self, data: bytes):\n logging.info('received: %s' % data.decode())",
"def data_received(self, transport, line):\n\n if transport not in self.clients:\n return\n\n client = self.clients[transport]\n log.debug(f'{client} << {line!r}')\n\n try:\n func_name, *args = line.split()\n except ValueError:\n return\n func = getattr(IncomingCommand, func_name, None)\n\n # Dispatch and handle errors\n try:\n if func is None:\n raise UnknownCommand(func_name)\n if not client.ident.registered and func_name not in self.allowed_unregistered_cmds:\n raise UnregisteredDisallow()\n func(client, *args)\n\n except UnknownCommand as e:\n log.info(f'{client} *** Unknown command {e} ***')\n client.send_as_server(ERR_UNKNOWNCOMMAND, f'{client.ident.nick} {e} :Unknown command')\n\n except TypeError as e:\n # A TypeError calling func() means the arguments were incorrect\n if str(e).startswith(func_name + '()'):\n client.send_as_server(ERR_NEEDSMOREPARAMS, f'{client.ident.nick} {func_name} :{e}')\n # Or it could be an exception from the function execution itself\n else:\n raise\n\n except UnregisteredDisallow as e:\n client.send_as_server(ERR_NOTREGISTERED, f'* :You have not registered')",
"def dataReceived(self, data):\n print \"received:\", data",
"def client_request(self, evt):\n threads.deferToThread(self.cli_db.accept, evt)",
"def handle_client_data(self, data, client_sock):\n prot = data[0].lower()\n if prot == \"n\":\n # Sent by the central server when a new node joins\n peer = json.loads(data[1:])\n self._worker.add_peer(peer)\n client_sock.close()\n elif prot == \"b\":\n self._handle_block(data, client_sock)\n elif prot == \"t\":\n self._handle_transaction(data, client_sock)\n elif prot == \"r\":\n self._handle_transaction_proof(data, client_sock)\n elif prot == \"x\":\n self._handle_balance(data, client_sock)\n else:\n # either header or wrong message format\n client_sock.close()",
"def process(self):\n\n try:\n self._read_buffer += self._socket.recv(4096)\n except socket.error as exc:\n if exc.errno not in [errno.EAGAIN,\n errno.EWOULDBLOCK,\n errno.WSAEWOULDBLOCK]:\n raise\n response, self._read_buffer = Message.decode(self._read_buffer)\n # Check if terminating RESPONSE_VALUE with body 00 01 00 00\n if (response.type == Message.SERVERDATA_RESPONSE_VALUE and\n response.body.encode(\"ascii\") == \"\\x00\\x01\\x00\\x00\"):\n response = Message(self._response[0].id,\n self._response[0].type,\n \"\".join([r.body for r in self._response]))\n self._active_requests[response.id].response = response\n self._response = []\n self._active_requests[response.id]\n elif response.type == Message.SERVERDATA_RESPONSE_VALUE:\n self._response.append(response)\n elif response.type == Message.SERVERDATA_AUTH_RESPONSE:\n self._active_requests[self._response[0].id].response = response\n # Clear empty SERVERDATA_RESPONSE_VALUE sent before\n # SERVERDATA_AUTH_RESPONSE\n self._response = []\n self._active_requests[response.id]"
] |
[
"0.68748134",
"0.6768327",
"0.6757667",
"0.67148477",
"0.6711134",
"0.66720986",
"0.66720986",
"0.66050917",
"0.6565084",
"0.65585506",
"0.6556971",
"0.6539188",
"0.6431191",
"0.64153564",
"0.6412267",
"0.638672",
"0.6384882",
"0.6378129",
"0.6362476",
"0.6343157",
"0.63170666",
"0.6315612",
"0.6312527",
"0.63017666",
"0.62888277",
"0.6279002",
"0.62659955",
"0.62368405",
"0.6230136",
"0.6208725"
] |
0.70811373
|
0
|
Load data from disk. If ``kind`` is `gwosc` assumes input is an strain HDF5 file downloaded
|
def read(cls, path, kind=None, **kws):
kind = (kind or '').lower()
if not kind:
# attempt to guess filetype
ext = os.path.splitext(path)[1].lower().strip('.')
if ext in ['h5', 'hdf5', 'hdf']:
kind = 'hdf'
elif ext in ['txt', 'gz', 'dat', 'csv']:
kind = 'csv'
else:
raise ValueError("unrecognized extension: {}".format(ext))
if kind == 'gwosc':
with h5py.File(path, 'r') as f:
t0 = f['meta/GPSstart'][()]
T = f['meta/Duration'][()]
h = f['strain/Strain'][:]
dt = T/len(h)
time = t0 + dt*arange(len(h))
return cls(h, index=time, **kws)
elif kind in ['hdf', 'csv']:
read_func = getattr(pd, 'read_{}'.format(kind))
# get list of arguments accepted by pandas read function in order
# to filter out extraneous arguments that should go to cls
read_vars = inspect.signature(read_func).parameters.keys()
# define some defaults to ensure we get a Series and not a DataFrame
read_kws = dict(sep=None, index_col=0, squeeze=True)
if 'sep' in kws:
# gymnastics to be able to support `sep = \t` (e.g., when
# reading a config file)
kws['sep'] = kws['sep'].encode('raw_unicode_escape').decode('unicode_escape')
read_kws.update({k: v for k,v in kws.items() if k in read_vars})
squeeze = read_kws.pop('squeeze', False)
cls_kws = {k: v for k,v in kws.items() if k not in read_vars}
if kind == 'csv' and 'float_precision' not in read_kws:
logging.warning("specify `float_precision='round_trip'` or risk "
"strange errors due to precission loss")
# squeeze if needed (since sequeeze argument no longer accepted)
d = read_func(path, **read_kws)
if squeeze:
d = d.squeeze("columns")
return cls(d, **cls_kws)
else:
raise ValueError("unrecognized file kind: {}".format(kind))
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def training_data(kind, depth = 5):\n\n if kind == 'unigram':\n return UnigramTrainingData.load(UNIGRAM_DIR + str(depth))\n\n if kind == 'rnn':\n return RNNTrainingData.load(RNN_DIR + str(depth))",
"def load_mnist(path, kind='train'):\n labels_path = os.path.join(path,'%s-labels-idx1-ubyte.gz'% kind)\n\n images_path = os.path.join(path,'%s-images-idx3-ubyte.gz'% kind)\n\n with gzip.open(labels_path, 'rb') as lbpath:\n labels = np.frombuffer(lbpath.read(), dtype=np.uint8,offset=8)\n\n with gzip.open(images_path, 'rb') as imgpath:\n images = np.frombuffer(imgpath.read(), dtype=np.uint8,offset=16).reshape(len(labels), 784)\n\n print(\"Dataset Loaded\")\n \n return images, labels",
"def load_file(path, data_type=None, *args, **kwargs):\n\n path = os.path.normpath(path)\n if os.path.isdir(path) and path[-1] != os.sep:\n path = path + os.sep\n\n if data_type == None:\n data_type = autodetect(path)\n\n if data_type == \"prospa\":\n return dnpIO.prospa.import_prospa(path, *args, **kwargs)\n\n elif data_type == \"topspin\":\n return dnpIO.topspin.import_topspin(path, *args, **kwargs)\n\n elif data_type == \"topspin dir\":\n return dnpIO.topspin.import_topspin_dir(path, *args, **kwargs)\n\n elif data_type == \"delta\":\n return dnpIO.delta.import_delta(path, *args, **kwargs)\n\n elif data_type == \"vnmrj\":\n return dnpIO.vnmrj.import_vnmrj(path, *args, **kwargs)\n\n elif data_type == \"tnmr\":\n return dnpIO.tnmr.import_tnmr(path, *args, **kwargs)\n\n elif data_type == \"specman\":\n return dnpIO.specman.import_specman(path, *args, **kwargs)\n\n elif data_type == \"xepr\" or data_type == \"xenon\":\n return dnpIO.bes3t.import_bes3t(path, *args, **kwargs)\n\n elif data_type == \"winepr\" or data_type == \"esp\":\n return dnpIO.winepr.import_winepr(path, *args, **kwargs)\n\n elif data_type == \"h5\":\n return dnpIO.h5.load_h5(path, *args, **kwargs)\n\n elif data_type == \"power\":\n return dnpIO.power.importPower(path, *args, **kwargs)\n\n elif data_type == \"vna\":\n return dnpIO.vna.import_vna(path, *args, **kwargs)\n\n elif data_type == \"cnsi_powers\":\n return dnpIO.cnsi.get_powers(path, *args, **kwargs)\n\n else:\n raise ValueError(\"Invalid data type: %s\" % data_type)",
"def load(self):\n self.data = NSPSpecIO().read(self.path)",
"def load(self, fname, snver=1):\n self._data = self._io.load(fname, snver=snver)",
"def read(self, simtype):\n\n if simtype == 'original':\n if self.filename.endswith(\".pkl\"):\n logging.debug(\"Loading pickle file %s\", self.filename)\n data = pd.read_pickle(self.filename)\n\n elif self.filename.endswith(\".hdf5\"):\n logging.debug(\"Loading HDF5 file %s\", self.filename)\n with h5py.File(self.filename, \"r\") as data_file:\n #print('treeIndex', data_file[\"treeIndex\"].keys())\n #print('haloTrees', data_file[\"haloTrees\"].keys())\n \n # Find dimensionality of keys\n columns_1dim = [] \n columns_2dim = [] \n for column in self.columns:\n if len(data_file[\"/haloTrees/%s\" % column].shape) == 1:\n columns_1dim.append(column)\n else:\n columns_2dim.append(column)\n \n # 1D keys\n data = pd.DataFrame(\n {\n column: data_file[\"/haloTrees/%s\" % column].value\n for column in columns_1dim\n },\n columns=columns_1dim\n ).set_index(\"nodeIndex\")\n del columns_1dim\n\n # 2D keys\n for column in columns_2dim:\n if column == 'position':\n pos = data_file[\"/haloTrees/%s\" % column].value\n data['X'] = pd.Series(pos[:, 0], index=data.index)\n data['Y'] = pd.Series(pos[:, 1], index=data.index)\n data['Z'] = pd.Series(pos[:, 2], index=data.index)\n del columns_2dim\n\n data.rename(index=str,\n columns={\"snapshotNumber\": \"snapnum\"})\n ## eliminate fake elements with isIntegrated=1\n #data = data[data.isInterpolated != 1]\n\n else:\n raise TypeError(\"Unknown filetype %s\" % self.filename)\n if simtype == 'EAGLE':\n if self.filename.endswith(\".pkl\"):\n logging.debug(\"Loading pickle file %s\", self.filename)\n data = pd.read_pickle(self.filename)\n\n elif self.filename.endswith(\".hdf5\"):\n logging.debug(\"Loading HDF5 file %s\", self.filename)\n data_file = h5py.File(self.filename, 'r')\n column_mt = []\n column_sh = []\n for column in self.columns:\n if column in data_file['MergerTree']:\n column_mt.append(column)\n else:\n column_sh.append(column)\n\n data = pd.DataFrame(\n {\n column: data_file[\"/MergerTree/%s\" % column].value\n for column in column_mt\n },\n columns=column_mt\n ).set_index(\"HaloID\")\n #.set_index(data_file[\"/Merger/HaloID\"].value)\n\n for column in column_sh:\n data[column] = pd.Series(data_file[\"/Subhalo/%s\" % column].value,\n index=data.index)\n data = data.rename(index=str,\n columns={\"SnapNum\": \"snapnum\", #\"HaloID\": \"nodeIndex\",\n \"DescendantID\" : \"descendantIndex\"})\n else:\n raise TypeError(\"Unknown filetype %s\" % self.filename)\n\n return data",
"def load_mnist(path, kind='train'):\n\n labels_path = os.path.join(path,\n '%s-labels-idx1-ubyte.gz'\n % kind)\n images_path = os.path.join(path,\n '%s-images-idx3-ubyte.gz'\n % kind)\n\n with gzip.open(labels_path, 'rb') as lbpath:\n labels = np.frombuffer(lbpath.read(), dtype=np.uint8,\n offset=8)\n\n with gzip.open(images_path, 'rb') as imgpath:\n images = np.frombuffer(imgpath.read(), dtype=np.uint8,\n offset=16).reshape(len(labels), 784)\n\n return images, labels",
"def load_disk_data(args: Dict, env: EvalEnv) -> DriverDataCube:\n kwargs = dict(\n glob_pattern=extract_arg(args, 'glob_pattern'),\n format=extract_arg(args, 'format'),\n options=args.get('options', {}),\n )\n dry_run_tracer: DryRunDataTracer = env.get(ENV_DRY_RUN_TRACER)\n if dry_run_tracer:\n return dry_run_tracer.load_disk_data(**kwargs)\n else:\n source_id = dry_run.DataSource.load_disk_data(**kwargs).get_source_id()\n load_params = _extract_load_parameters(env, source_id=source_id)\n return env.backend_implementation.load_disk_data(**kwargs, load_params=load_params, env=env)",
"def load_fashion_mnist(path, kind='train'):\n labels_path = os.path.join(path, '%s-labels-idx1-ubyte.gz' % kind)\n images_path = os.path.join(path, '%s-images-idx3-ubyte.gz' % kind)\n with gzip.open(labels_path, 'rb') as lbpath:\n labels = np.frombuffer(lbpath.read(), dtype=np.uint8, offset=8)\n with gzip.open(images_path, 'rb') as imgpath:\n images = np.frombuffer(imgpath.read(), dtype=np.uint8, offset=16).reshape(len(labels), 784)\n return images, labels",
"def load_gal_spec(self, idx):\n # Init\n cgm_abs = self.cgm_abs[idx]\n # Directories\n galdir = os.environ.get('DROPBOX_DIR')+'/coshaloanalysis/'\n fielddir = 'fields/'+cgm_abs.field+'/'\n sysdir = cgm_abs.gal_id+'/spec1d/'\n sysname = cgm_abs.field+'_'+cgm_abs.gal_id\n\n # Find files\n lris_files = glob.glob(galdir+fielddir+sysdir+sysname+'*corr.fits')\n if len(lris_files) == 0:\n raise ValueError('No LRIS files!')\n elif len(lris_files) == 2:\n lris_files.sort()\n specb = lsio.readspec(lris_files[0]) \n specr = lsio.readspec(lris_files[1]) \n spec = specb.splice(specr)\n else:\n raise ValueError('Not sure what happened')\n\n # Return\n return spec",
"def read_meg_rest_data(kind, band, n_labels=448):\n if kind == 'mne_power_diag':\n data = pd.read_hdf(\n op.join(DRAGO_PATH, f'mne_source_power_diag-{band}.h5'),\n key=kind)\n elif kind == 'mne_power_cross':\n # We need the diagonal powers to do tangent mapping.\n # but then we will discard it.\n diag = read_meg_rest_data(kind='mne_power_diag', band=band)\n # undp log10\n diag = diag.transform(lambda x: 10 ** x)\n index = diag.index.copy()\n\n data = pd.read_hdf(\n op.join(DRAGO_PATH, f'mne_source_power_cross-{band}.h5'),\n key=kind)\n covs = make_covs(diag, data, n_labels)\n data = map_tangent(covs, diag=True)\n data = pd.DataFrame(data=data, index=index)\n if kind == 'mne_envelope_diag':\n data = pd.read_hdf(\n op.join(DRAGO_PATH, f'mne_envelopes_diag_{band}.h5'),\n key=kind)\n elif kind == 'mne_envelope_cross':\n # We need the diagonal powers to do tangent mapping.\n # but then we will discard it.\n diag = read_meg_rest_data(kind='mne_envelope_diag', band=band)\n # undp log10\n diag = diag.transform(lambda x: 10 ** x)\n index = diag.index.copy()\n\n data = pd.read_hdf(\n op.join(DRAGO_PATH, f'mne_envelopes_cross_{band}.h5'),\n key=kind)\n covs = make_covs(diag, data, n_labels)\n data = map_tangent(covs, diag=True)\n data = pd.DataFrame(data=data, index=index)\n elif kind == 'mne_envelope_corr':\n # The diagonal is simply one.\n diag = 1.0\n data = pd.read_hdf(\n op.join(DRAGO_PATH, f'mne_envelopes_corr_{band}.h5'),\n key=kind)\n index = data.index.copy()\n\n data = map_tangent(make_covs(diag, data, n_labels),\n diag=True)\n data = pd.DataFrame(data=data, index=index)\n\n elif kind == 'mne_envelope_corr_orth':\n data = pd.read_hdf(\n op.join(DRAGO_PATH, f'mne_envelopes_corr_orth_{band}.h5'), key=kind)\n # The result here is not an SPD matrix.\n # We do do Fisher's Z-transform instead.\n # https://en.wikipedia.org/wiki/Fisher_transformation\n data = data.transform(np.arctanh)\n return data",
"def _load_disk(self):",
"def _load_disk(self):",
"def load_mnist(path, kind='train'):\n labels_path = os.path.join(path,\n '%s-labels-idx1-ubyte.gz'\n % kind)\n images_path = os.path.join(path,\n '%s-images-idx3-ubyte.gz'\n % kind)\n\n with gzip.open(labels_path, 'rb') as lbpath:\n labels = np.frombuffer(lbpath.read(), dtype=np.uint8,\n offset=8)\n\n with gzip.open(images_path, 'rb') as imgpath:\n images = np.frombuffer(imgpath.read(), dtype=np.uint8,\n offset=16).reshape(len(labels), 784)\n\n return images, labels",
"def load_fmnist(path, kind='train'):\n labels_path = os.path.join(path,\n '%s-labels-idx1-ubyte.gz'\n % kind)\n images_path = os.path.join(path,\n '%s-images-idx3-ubyte.gz'\n % kind)\n\n with gzip.open(labels_path, 'rb') as lbpath:\n labels = np.frombuffer(lbpath.read(), dtype=np.uint8,\n offset=8)\n\n with gzip.open(images_path, 'rb') as imgpath:\n images = np.frombuffer(imgpath.read(), dtype=np.uint8,\n offset=16).reshape(len(labels), 784)\n\n return images, labels",
"def load_mnist(path, kind='train'):\n labels_path = os.path.join(path,'{}-labels-idx1-ubyte'.format(kind))\n images_path = os.path.join(path,'{}-images-idx3-ubyte'.format(kind))\n with open(labels_path, 'rb') as lbpath:\n magic, n = struct.unpack('>II',\n lbpath.read(8))\n labels = np.fromfile(lbpath,\n dtype=np.uint8).reshape(n)\n\n with open(images_path, 'rb') as imgpath:\n magic, num, rows, cols = struct.unpack('>IIII',\n imgpath.read(16))\n images = np.fromfile(imgpath,\n dtype=np.uint8).reshape((num,1,rows,cols))\n print(kind)\n print(\"label num:\",n)\n print(\"image num:\",num)\n print(\"image rows:\",rows)\n print(\"image cols:\",cols)\n images = images/255\n return images, labels",
"def load_mnist(path, kind='train'):\n labels_path = os.path.join(path,\n '%s-labels-idx1-ubyte' % kind)\n images_path = os.path.join(path,\n '%s-images-idx3-ubyte' % kind)\n \n with open(labels_path, 'rb') as lbpath:\n magic, n = struct.unpack('>II',\n lbpath.read(8))\n labels = np.fromfile(lbpath,\n dtype=np.uint8)\n \n with open(images_path, 'rb') as imgpath:\n magic, num, rows, cols = struct.unpack(\">IIII\",\n imgpath.read(16))\n images = np.fromfile(imgpath,\n dtype=np.uint8).reshape(len(labels), 784)\n \n return images, labels",
"def import_scatterdata(self):\n\n datalist = [name for name in os.listdir(self.dir_line.text()) if\n os.path.isfile(os.path.join(self.dir_line.text(), name)) and name.endswith('.h5')]\n\n data_os = os.path.join(self.dir_line.text(), random.choice(datalist))\n with h5py.File(data_os, 'r') as f:\n labelset = f['r_vs_thresh'].attrs['best'][0].decode()\n labels = pd.array(f['labels/' + labelset][()]).astype(int)\n columns = f['data'].attrs['datacolumns'] - 1\n columns = columns.tolist()\n dataset = pd.DataFrame(f['data'][()]).iloc[:, columns]\n dataset = dataset.divide(1000)\n dataset['labels'] = labels\n\n return dataset\n\n # TODO: implement a way to rotate over all datafiles",
"def read(filename: str) -> orm.Data:\n return from_bands_inspect(load(hdf5_file=filename))",
"def load_mnist(path, kind='train'):\n\tlabels_path = os.path.join(path,'%s-labels.idx1-ubyte'%kind)\n\timages_path = os.path.join(path,'%s-images.idx3-ubyte'%kind)\n\t\n\twith open(labels_path, 'rb') as lbpath:\n\t\tmagic, n = struct.unpack('>II', lbpath.read(8))\n\t\tlabels = np.fromfile(lbpath, dtype=np.uint8)\n\t\t\n\twith open(images_path, 'rb') as imgpath:\n\t\tmagic, num, row, cols = struct.unpack('>IIII', imgpath.read(16))\n\t\timages = np.fromfile(imgpath, dtype=np.uint8).reshape(len(labels), 784)\n\t\n\treturn images, labels",
"def load(f, model, ext_unit_dict=None):\n\n if model.verbose:\n sys.stdout.write('loading swt package file...\\n')\n\n if not hasattr(f, 'read'):\n filename = f\n f = open(filename, 'r')\n # dataset 0 -- header\n while True:\n line = f.readline()\n if line[0] != '#':\n break\n # determine problem dimensions\n nrow, ncol, nlay, nper = model.get_nrow_ncol_nlay_nper()\n\n # read dataset 1\n if model.verbose:\n sys.stdout.write(' loading swt dataset 1\\n')\n t = line.strip().split()\n ipakcb, iswtoc, nsystm, ithk, ivoid, istpcs, icrcc = int(t[0]), \\\n int(t[1]), \\\n int(t[2]), \\\n int(t[3]), \\\n int(t[4]), \\\n int(t[5]), \\\n int(t[6])\n\n # if ipakcb > 0:\n # ipakcb = 53\n\n # read dataset 2\n lnwt = None\n if nsystm > 0:\n if model.verbose:\n sys.stdout.write(' loading swt dataset 2\\n')\n lnwt = np.empty((nsystm), dtype=np.int32)\n lnwt = read1d(f, lnwt) - 1\n\n # read dataset 3\n if model.verbose:\n sys.stdout.write(' loading swt dataset 3\\n')\n line = f.readline()\n t = line.strip().split()\n iizcfl, izcfm, iglfl, iglfm, iestfl, \\\n iestfm, ipcsfl, ipcsfm, istfl, istfm = int(t[0]), int(t[1]), \\\n int(t[2]), int(t[3]), \\\n int(t[4]), int(t[5]), \\\n int(t[6]), int(t[7]), \\\n int(t[8]), int(t[9])\n\n # read dataset 4\n if model.verbose:\n sys.stdout.write(' loading swt dataset 4')\n gl0 = Util2d.load(f, model, (nrow, ncol), np.float32, 'gl0',\n ext_unit_dict)\n\n # read dataset 5\n if model.verbose:\n sys.stdout.write(' loading swt dataset 5')\n sgm = Util2d.load(f, model, (nrow, ncol), np.float32, 'sgm',\n ext_unit_dict)\n\n # read dataset 6\n if model.verbose:\n sys.stdout.write(' loading swt dataset 6')\n sgs = Util2d.load(f, model, (nrow, ncol), np.float32, 'sgs',\n ext_unit_dict)\n\n # read datasets 7 to 13\n thick = [0] * nsystm\n void = [0] * nsystm\n sub = [0] * nsystm\n if icrcc == 0:\n sse = None\n ssv = None\n cr = [0] * nsystm\n cc = [0] * nsystm\n else:\n sse = [0] * nsystm\n ssv = [0] * nsystm\n cr = None\n cc = None\n\n for k in range(nsystm):\n kk = lnwt[k] + 1\n # thick\n if model.verbose:\n sys.stdout.write(\n ' loading swt dataset 7 for layer {}\\n'.format(kk))\n t = Util2d.load(f, model, (nrow, ncol), np.float32,\n 'thick layer {}'.format(kk),\n ext_unit_dict)\n thick[k] = t\n if icrcc != 0:\n # sse\n if model.verbose:\n sys.stdout.write(\n ' loading swt dataset 8 for layer {}\\n'.format(kk))\n t = Util2d.load(f, model, (nrow, ncol), np.float32,\n 'sse layer {}'.format(kk), ext_unit_dict)\n sse[k] = t\n # ssv\n if model.verbose:\n sys.stdout.write(\n ' loading swt dataset 9 for layer {}\\n'.format(kk))\n t = Util2d.load(f, model, (nrow, ncol), np.float32,\n 'sse layer {}'.format(kk), ext_unit_dict)\n ssv[k] = t\n else:\n # cr\n if model.verbose:\n sys.stdout.write(\n ' loading swt dataset 10 for layer {}\\n'.format(kk))\n t = Util2d.load(f, model, (nrow, ncol), np.float32,\n 'cr layer {}'.format(kk), ext_unit_dict)\n cr[k] = t\n # cc\n if model.verbose:\n sys.stdout.write(\n ' loading swt dataset 11 for layer {}\\n'.format(kk))\n t = Util2d.load(f, model, (nrow, ncol), np.float32,\n 'cc layer {}'.format(kk), ext_unit_dict)\n cc[k] = t\n # void\n if model.verbose:\n sys.stdout.write(\n ' loading swt dataset 12 for layer {}\\n'.format(kk))\n t = Util2d.load(f, model, (nrow, ncol), np.float32,\n 'void layer {}'.format(kk), ext_unit_dict)\n void[k] = t\n # sub\n if model.verbose:\n sys.stdout.write(\n ' loading swt dataset 13 for layer {}\\n'.format(kk))\n t = Util2d.load(f, model, (nrow, ncol), np.float32,\n 'sub layer {}'.format(kk), ext_unit_dict)\n sub[k] = t\n\n # dataset 14 and 15\n if istpcs != 0:\n pcsoff = [0] * nlay\n pcs = None\n else:\n pcsoff = None\n pcs = [0] * nlay\n for k in range(nlay):\n if istpcs != 0:\n if model.verbose:\n sys.stdout.write(\n ' loading swt dataset 14 for layer {}\\n'.format(kk))\n t = Util2d.load(f, model, (nrow, ncol), np.float32,\n 'pcsoff layer {}'.format(k + 1), ext_unit_dict)\n pcsoff[k] = t\n else:\n if model.verbose:\n sys.stdout.write(\n ' loading swt dataset 15 for layer {}\\n'.format(kk))\n t = Util2d.load(f, model, (nrow, ncol), np.float32,\n 'pcs layer {}'.format(k + 1), ext_unit_dict)\n pcs[k] = t\n\n ids16 = None\n ids17 = None\n if iswtoc > 0:\n # dataset 16\n if model.verbose:\n sys.stdout.write(\n ' loading swt dataset 15 for layer {}\\n'.format(kk))\n ids16 = np.empty(26, dtype=np.int32)\n ids16 = read1d(f, ids16)\n #for k in range(1, 26, 2):\n # model.add_pop_key_list(ids16[k])\n # ids16[k] = 2054 # all sub-wt data sent to unit 2054\n # dataset 17\n ids17 = [0] * iswtoc\n for k in range(iswtoc):\n if model.verbose:\n msg = 2 * ' ' + 'loading swt dataset 17 for ' + \\\n 'iswtoc {}\\n'.format(k + 1)\n sys.stdout.write(msg)\n t = np.empty(30, dtype=np.int32)\n t = read1d(f, t)\n t[0:4] -= 1\n ids17[k] = t\n\n # close file\n f.close()\n\n # determine specified unit number\n unitnumber = None\n filenames = [None for x in range(15)]\n if ext_unit_dict is not None:\n unitnumber, filenames[0] = \\\n model.get_ext_dict_attr(ext_unit_dict,\n filetype=ModflowSwt.ftype())\n if ipakcb > 0:\n iu, filenames[1] = \\\n model.get_ext_dict_attr(ext_unit_dict, unit=ipakcb)\n\n if iswtoc > 0:\n ipos = 2\n for k in range(1, 26, 2):\n unit = ids16[k]\n if unit > 0:\n iu, filenames[ipos] = \\\n model.get_ext_dict_attr(ext_unit_dict,\n unit=unit)\n model.add_pop_key_list(unit)\n ipos += 1\n\n # create sub-wt instance\n swt = ModflowSwt(model, ipakcb=ipakcb, iswtoc=iswtoc, nsystm=nsystm,\n ithk=ithk, ivoid=ivoid, istpcs=istpcs,\n icrcc=icrcc, lnwt=lnwt, izcfl=iizcfl, izcfm=izcfm,\n iglfl=iglfl, iglfm=iglfm, iestfl=iestfl,\n iestfm=iestfm, ipcsfl=ipcsfl, ipcsfm=ipcsfm,\n istfl=istfl, istfm=istfm, gl0=gl0, sgm=sgm,\n sgs=sgs, thick=thick, sse=sse, ssv=ssv, cr=cr, cc=cc,\n void=void, sub=sub, pcsoff=pcsoff,\n pcs=pcs, ids16=ids16, ids17=ids17,\n unitnumber=unitnumber, filenames=filenames)\n\n # return sut-wt instance\n return swt",
"def load_mnist(path, kind = 'train'):\n label_path = os.path.join(path, '%s-labels-idx1-ubyte' % kind)\n images_path = os.path.join(path, '%s-images-idx3-ubyte' % kind)\n\n\n with open(label_path, 'rb') as lbpath:\n magic, n = struct.unpack('>II', lbpath.read(8))\n\n labels = np.fromfile(lbpath, dtype= np.uint8)\n\n with open(images_path, 'rb') as imgpath:\n magic, num, rows, cols = struct.unpack('>IIII', imgpath.read(16))\n\n images = np.fromfile(imgpath, dtype=np.uint8).reshape(len(labels),784)\n\n\n return images, labels",
"def read_raw_img(kind):\n\n mypath = RAW_DIR_PATH[kind]\n files = [f for f in listdir(mypath) if isfile(join(mypath, f))\n and f[0] != '.']\n random.shuffle(files)\n\n if kind == 'bad':\n files *= 3\n\n for img in files:\n yield Image.open(mypath + img)",
"def open(self,type = 'r'):\n \n self.dataset = Dataset(self.run_filename,'r',format='NETCDF4')",
"def load_ood_dataset(args, data_details, name='voc12', n_img=1000):\n n_channels = data_details['n_channels']\n assert n_channels in [1, 3]\n #TO-DO: Make this work across datasets with multiple channels\n x_out, _ = gen_dataset(name, args.dataset_in, data_details, n_img,\n partition='validation', transform_in_dist=False)\n x_out = np.transpose(x_out, (0, 3, 1, 2)) # NHWC -> NCHW conversion\n loader_ood = torch.utils.data.DataLoader(CustomDatasetFromImages(x_out),\n batch_size=args.test_batch_size,\n shuffle=True)\n return loader_ood",
"def load_data_stream(self, *, scope: Scope, doc_key: str, name: str) -> Union[str, bytes]:",
"def load_hdf5(file_path, object_class_name=None):\n\n # identify object class type\n if object_class_name is None:\n object_class_name = file_path.split(\".\")[-1]\n object_class_name = object_class_name.capitalize()\n\n # load object\n if object_class_name == \"Links\":\n obj = load_links(file_path=file_path)\n\n elif object_class_name == \"Net\":\n obj = load_net(file_path=file_path)\n\n elif object_class_name == \"Oracle\":\n obj = load_oracle(file_path=file_path)\n\n elif object_class_name in [\"Tfinfo\", \"tfinfo\"]:\n obj = load_TFinfo(file_path=file_path)\n\n elif object_class_name == \"Gradient\":\n obj = load_gradient(file_path=file_path)\n\n else:\n print(f\"object_class_name: {object_class_name} was not in the loading option\")\n raise ValueError(\"File type identification failed. Please enter 'object_class_name' manually.\")\n\n return obj",
"def _load_disk(self):\r\n pass",
"def loaddata(path):\n if path.endswith(\".tiff\") or path.endswith(\".tif\"):\n try:\n from vigra.impex import readVolume\n except ImportError:\n raise ImportError(\"Vigra is needed to read/write TIFF volumes, but could not be imported.\")\n\n volume = readVolume(path)\n return volume\n\n elif path.endswith(\".h5\"):\n try:\n from Antipasti.netdatautils import fromh5\n except ImportError:\n raise ImportError(\"h5py is needed to read/write HDF5 volumes, but could not be imported.\")\n\n volume = fromh5(path)\n return volume\n\n else:\n raise NotImplementedError(\"Can't load: unsupported format. Supported formats are .tiff and .h5\")",
"def data_kinds():\n\n return ..."
] |
[
"0.5953198",
"0.5252723",
"0.5231705",
"0.50929207",
"0.50844127",
"0.50709325",
"0.506653",
"0.5061513",
"0.50486684",
"0.5040646",
"0.5024534",
"0.5007193",
"0.5007193",
"0.50033534",
"0.49804032",
"0.4952549",
"0.4922163",
"0.49177516",
"0.4917272",
"0.49139136",
"0.4911804",
"0.49107224",
"0.49102566",
"0.4894449",
"0.48908672",
"0.48719808",
"0.48711607",
"0.4868439",
"0.4845743",
"0.48447973"
] |
0.60233635
|
0
|
Return cyclic ACF corresponding to PSD obtained by inverse Fourier transforming. Returns
|
def to_acf(self):
rho = 0.5*np.fft.irfft(self) / self.delta_t
return AutoCovariance(rho, delta_t=self.delta_t)
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def PSD_to_ACF(freq, psd, lags):\n freq_sym = np.append(-freq[::-1], freq) \n psd_sym = np.append(psd[::-1], psd)\n\n steps = freq_sym[1:] - freq_sym[:-1]\n height = psd_sym[1:]\n\n # nd = np.tile(freq_sym[1:], (len(lags), 1)).T\n nd = np.tile(freq_sym, (len(lags), 1)).T\n\n # acf = np.cos(-2*np.pi*nd*lags)*(height*steps)[:, np.newaxis]\n # acf = acf.sum(axis=0)\n\n acf = scipy.integrate.simps(np.cos(-2*np.pi*nd*lags)*psd_sym[:, np.newaxis], freq_sym, axis=0)\n\n # for l in lags:\n # ac = scipy.integrate.simps(np.cos(-2*np*pi*freq_sym*l) * psd_sym, freq_sym)\n return acf",
"def autoc(array):\r\n return ifft2(np.square(np.abs(fft2(array))))",
"def acor_fn(x):\n n = len(x)\n f = np.fft.fft(x-np.mean(x), n=2*n)\n acf = np.fft.ifft(f * np.conjugate(f))[:n].real\n return acf / acf[0]",
"def acor_fn(x):\n n = len(x)\n f = np.fft.fft(x-np.mean(x), n=2*n)\n acf = np.fft.ifft(f * np.conjugate(f))[:n].real\n return acf / acf[0]",
"def get_cfft(self):\n return self.get_rfft().get_cfft()",
"def dan_acf(x, axis=0, fast=False):\n x = np.atleast_1d(x)\n m = [slice(None), ] * len(x.shape)\n\n # For computational efficiency, crop the chain to the largest power of\n # two if requested.\n if fast:\n n = int(2**np.floor(np.log2(x.shape[axis])))\n m[axis] = slice(0, n)\n x = x\n else:\n n = x.shape[axis]\n\n # Compute the FFT and then (from that) the auto-correlation function.\n f = np.fft.fft(x-np.mean(x, axis=axis), n=2*n, axis=axis)\n m[axis] = slice(0, n)\n acf = np.fft.ifft(f * np.conjugate(f), axis=axis)[m].real\n m[axis] = 0\n return acf / acf[m]",
"def get_cfft(self):\n fft = np.zeros((self.ny, self.nx), dtype=np.complex)\n fft[:, 0:(self.nx / 2 + 1)] = self.fft[:, :]\n fft[0, (self.nx / 2 + 1):] = np.conj(self.fft[0, 1:self.nx / 2][::-1])\n fft[1:, (self.nx / 2 + 1):] = np.conj(\n self.fft[1:, 1:self.nx / 2][::-1, ::-1])\n\n return cfft(self.nx, self.dx, fft=fft, ny=self.ny, dy=self.dy)",
"def DFT2(image):\n full_dft2 = DFT(DFT(image.transpose()).transpose())\n return full_dft2.astype(np.complex128)",
"def _irfft2d(f_x) :",
"def _ac_fft3 (self,xp,max_lag):\n '''takes xp'''\n f = np.fft.fft(self.xp)\n p = np.array([np.real(v)**2+np.imag(v)**2 for v in f])\n pi = np.fft.ifft(p)\n corr = np.real(pi)[:self.n]/np.sum(self.xp**2)\n return corr[:max_lag]",
"def get_cycle(tas,period=12,return_complex=False):\n L = len(tas)\n freqs = np.fft.fftfreq(L)\n closest = np.abs(freqs-1./period)\n# i = np.where(freqs == 1./period)[0]\n i = np.argmin(closest)\n #print 1/freqs[i]\n tas_fft = np.fft.fft(tas)/L\n R = tas_fft.real\n Im = tas_fft.imag\n if return_complex:\n return R[i],Im[i]\n else:\n mag = 2*np.sqrt(R**2+Im**2)\n phase = np.arctan2(Im,R)\n return mag[i],phase[i]",
"def Fwacdc(X, g, p0, back, alpha):\n Eac, Edc = X\n return p0 * Eac * g + back + alpha * (2.0 * g * Edc * Eac)",
"def _calc_acf(\n cls,\n ts: np.ndarray,\n nlags: t.Optional[int] = None,\n adjusted: bool = True,\n detrend: bool = True,\n detrended_acfs: t.Optional[np.ndarray] = None,\n ts_detrended: t.Optional[np.ndarray] = None,\n ) -> np.ndarray:\n if detrended_acfs is not None and (\n nlags is None or detrended_acfs.size == nlags\n ):\n return detrended_acfs\n\n if detrend and ts_detrended is None:\n try:\n ts_detrended = _detrend.decompose(ts=ts, ts_period=0)[2]\n\n except ValueError:\n pass\n\n if ts_detrended is None:\n ts_detrended = ts\n\n if nlags is None:\n nlags = ts.size // 2\n\n acf = statsmodels.tsa.stattools.acf(\n ts_detrended, nlags=nlags, adjusted=adjusted, fft=True\n )\n return acf[1:]",
"def VACF(df,conversion = \"x\"):\n #conversion from pixels to micrometers\n if conversion == \"y\":\n df = df/1200*633\n else:\n df = df/1600*844\n #computes the velocity in one direction between the frames\n dif = pd.DataFrame()\n\n for i in range(1,len(df.T)):\n dif[i-1] = velocity(df[i-1],df[i])\n vel = []\n for i in range(len(dif)):\n vel.append(tidynamics.acf(dif.T[i]))\n\n #return the velocities in array\n return np.array(vel)",
"def get_fft(self):\n\t\t# Get the \"ideal\" evenly spaced times\n\t\teven_times = numpy.linspace(self.buf[0][0], self.buf[-1][0], len(self.buf))\n\t\t\n\t\t# Interpolate the data to generate evenly temporally spaced samples\n\t\tinterpolated = numpy.interp(even_times, *zip(*self.buf))\n\t\t\n\t\t# Perform the FFT\n\t\tfft = numpy.fft.rfft(interpolated)\n\t\treturn zip(numpy.abs(fft), numpy.angle(fft))",
"def ft_autocorr_out_dist(\n cls,\n ts: np.ndarray,\n p: float = 0.8,\n max_nlags: t.Optional[int] = None,\n adjusted: bool = True,\n detrended_acfs: t.Optional[np.ndarray] = None,\n ) -> np.ndarray:\n detrended_acfs = cls._calc_acf(\n ts=ts,\n nlags=max_nlags,\n adjusted=adjusted,\n detrended_acfs=detrended_acfs,\n )\n\n ts_abs = np.abs(ts)\n ts_inliners = ts[ts_abs <= np.quantile(ts_abs, p)]\n\n ts_inliners_acfs = cls._calc_acf(\n ts=ts_inliners, nlags=max_nlags, adjusted=adjusted\n )\n\n dist_acfs = np.abs(\n detrended_acfs[: ts_inliners_acfs.size] - ts_inliners_acfs\n )\n\n return dist_acfs",
"def circulant_ACF(C,do_abs=False):\n M = len(C)\n #cols = np.flipud(sla.circulant(arange(M)[::-1]))\n cols = sla.circulant(arange(M))\n ACF = zeros(M)\n for i in range(M):\n row = C[i,cols[i]]\n if do_abs:\n row = abs(row)\n ACF += row\n # Note: this actually also accesses masked values in C.\n return ACF/M",
"def fce(B):\n return wce(B)/(2.*np.pi)",
"def cdp_backward(data, mask):\n assert mask.size(-1) == 2\n sampling_rate = mask.shape[1]\n Ifft_data = torch.ifft(data, 2, normalized=True)\n backward_data = complex_mul(Ifft_data, conjugate(mask))\n return backward_data.mean(1, keepdim=True)",
"def _faddeeva(z):\n from scipy.special import wofz\n if np.angle(z) > 0:\n return wofz(z)\n else:\n return -np.conj(wofz(z.conjugate()))",
"def position_to_Fourier(self):\n #TODO Try to do it with FFT \n U = self.alphas @ self.positions\n \n return U",
"def _calc_pacf(\n cls,\n ts: np.ndarray,\n nlags: t.Optional[int] = None,\n method: str = \"ols-adjusted\",\n detrend: bool = True,\n ts_detrended: t.Optional[np.ndarray] = None,\n ) -> np.ndarray:\n if nlags is None:\n nlags = 1 + ts.size // 10\n\n if detrend and ts_detrended is None:\n try:\n ts_detrended = _detrend.decompose(ts=ts, ts_period=0)[2]\n\n except ValueError:\n pass\n\n if ts_detrended is None:\n ts_detrended = ts\n\n pacf = statsmodels.tsa.stattools.pacf(\n ts_detrended, nlags=nlags, method=method\n )\n return pacf[1:]",
"def autocorrelation(x):\n x = np.asarray(x)\n N = len(x)\n x = x-x.mean()\n s = fft.fft(x, N*2-1)\n result = np.real(fft.ifft(s * np.conjugate(s), N*2-1))\n result = result[:N]\n result /= result[0]\n return result",
"def aveCCV2F(self):\n if getattr(self, '_aveCCV2F', None) is None:\n if self.dim == 1:\n self._aveCCV2F = self.aveCC2F\n elif self.dim == 2:\n aveCCV2Fx = sp.kron(speye(self.nCy), av_extrap(self.nCx))\n aveCC2VFy = sp.kron(av_extrap(self.nCy), speye(self.nCx))\n self._aveCCV2F = sp.block_diag((\n aveCCV2Fx, aveCC2VFy\n ), format=\"csr\")\n elif self.dim == 3:\n aveCCV2Fx = kron3(\n speye(self.nCz), speye(self.nCy), av_extrap(self.nCx)\n )\n aveCC2VFy = kron3(\n speye(self.nCz), av_extrap(self.nCy), speye(self.nCx)\n )\n aveCC2BFz = kron3(\n av_extrap(self.nCz), speye(self.nCy), speye(self.nCx)\n )\n self._aveCCV2F = sp.block_diag((\n aveCCV2Fx, aveCC2VFy, aveCC2BFz\n ), format=\"csr\")\n return self._aveCCV2F",
"def aveFz2CC(self):\n if self.dim < 3:\n return None\n if getattr(self, '_aveFz2CC', None) is None:\n n = self.vnC\n if(self.dim == 3):\n self._aveFz2CC = kron3(av(n[2]), speye(n[1]), speye(n[0]))\n return self._aveFz2CC",
"def acf(t, largest_prime=500):\n\n T = np.array(t)\n\n # Don't allow a prime factor larger than 'largest_prime'. Truncate data until that condition is met\n l = 2 * T.shape[0] - 1\n\n while largest_prime_factor(l) >= largest_prime or l % 2 == 0:\n l -= 1\n\n T = T[:(l + 1) // 2, ...] # '...' allows for no second dimension if only a single time series is analysed\n length = T.shape[0] * 2 - 1\n\n T -= np.mean(T, axis=0)\n\n fftx = np.fft.fft(T, n=length, axis=0)\n ret = np.fft.ifft(fftx * np.conjugate(fftx), axis=0)\n ret = np.fft.fftshift(ret, axes=(0,))\n\n autocorr_fxn = ret[length // 2:].real\n\n if len(autocorr_fxn.shape) > 1:\n autocorr_fxn /= np.arange(T.shape[0], 0, -1)[:, None]\n else:\n autocorr_fxn /= np.arange(T.shape[0], 0, -1)\n\n with warnings.catch_warnings():\n warnings.simplefilter(\"ignore\")\n autocorr_fxn /= np.var(T, axis=0)\n\n return autocorr_fxn # normalized",
"def get_cffts(self):\n return [\n rfft(self.nx, self.dx, fft=self.tfft, ny=self.ny,\n dy=self.dy).get_cfft(),\n rfft(self.nx, self.dx, fft=self.efft, ny=self.ny,\n dy=self.dy).get_cfft(),\n rfft(self.nx, self.dx, fft=self.bfft, ny=self.ny,\n dy=self.dy).get_cfft()\n ]",
"def constract(phase, magnitude):\n new_spectrum = magnitude * np.exp(1j * phase)\n\n # reverse the shift and FFT\n f_ishift = np.fft.ifftshift(new_spectrum)\n img_back = np.fft.ifft2(f_ishift)\n \n return np.abs(img_back)",
"def get_fc_inv(fc):\n return scipy.linalg.pinvh(fc.T @ fc) @ fc.T",
"def forward_fft(self, array_in):\r\n # Find side length, as real array may or may not be doubled for\r\n # aliasing control\r\n side = array_in.shape[0]\r\n div_side = 1.0/side**2\r\n\r\n out = np.fft.fft2(self.sign_mat[0:side, 0:side]*array_in)*div_side\r\n return out"
] |
[
"0.66626525",
"0.6300266",
"0.6267516",
"0.6267516",
"0.626026",
"0.61031294",
"0.6091076",
"0.59904426",
"0.5905221",
"0.5882946",
"0.58500314",
"0.5722968",
"0.571717",
"0.57079524",
"0.57078564",
"0.56853944",
"0.56747645",
"0.56401896",
"0.5640028",
"0.56299365",
"0.55929446",
"0.5575047",
"0.5572908",
"0.55651945",
"0.5552728",
"0.5547437",
"0.5539653",
"0.55394566",
"0.55335945",
"0.55101186"
] |
0.640065
|
1
|
Get an output/input variable.
|
def __getitem__(self, name):
if self.outputs is not None:
try:
return self.outputs[name]
except KeyError:
if name in self._auto_ivc_map:
return self.inputs[self._auto_ivc_map[name]]
if self.inputs is not None:
return self.inputs[name]
elif self.inputs is not None:
return self.inputs[name]
raise KeyError('Variable name "%s" not found.' % name)
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def get_output(self, **kwargs):\n with tf.variable_scope(self.layer_scope):\n return self.out",
"def get_variable(self, name):\n if self._scalamagic:\n intp = self.scala_interpreter\n intp.interpret(name)\n return intp.last_result()",
"def inputValue(self):\n return self.variable",
"def get_output_by_name(self, name):\n for var in self.outputs:\n if var.get_object().name == name:\n return var\n logger.exception(\"Output variable with name {0} not found\".format(name))\n return None",
"def get_variable(self, variable_name):\n with self._graph.as_default():\n return self._sess.run(self._get_tensor_by_name(variable_name))",
"def _get_output(self):\n return self.__output",
"def _get_output(self):\n return self.__output",
"def _get_output(self):\n return self.__output",
"def _get_output(self):\n return self.__output",
"def _get_output(self):\n return self.__output",
"def _get_output(self):\n return self.__output",
"def get_variable(self, request, context):\n response = GetVariableResponse()\n value = self._delegator.get_variable(request.component, request.variable)\n response.value = encode(value)\n return response",
"def get(self, var):\n s = self.eval('{0}'.format(var))\n return self.strip_answer(s)",
"def get_result(hf, var=None):\n if '/output/data' not in hf:\n return []\n\n output_variables = get_output_names(hf)\n if len(output_variables) == 0:\n return []\n\n if var and var not in output_variables:\n print(\"Variable %s not found in output data\" % var)\n raise ValueError\n if not var:\n if len(output_variables) > 1:\n print(\"Output data contains multiple variables.\")\n print(\"You must indicate which you want.\")\n raise ValueError\n var = output_variables[0]\n\n return hf['/output/data/%s' % var].value",
"def get_output(self, **kwargs):\n return self.out",
"def get_variable(x):\n return x.cuda() #if use_cuda else x",
"def get_value(Runner, input_str):\n Runner.stdin.write(input_str)\n output = Runner.stdout.readline() \n return output",
"def _get_input(self):\n return self.__input",
"def _get_input(self):\n return self.__input",
"def _get_input(self):\n return self.__input",
"def _get_input(self):\n return self.__input",
"def _get_input(self):\n return self.__input",
"def _get_input(self):\n return self.__input",
"def _get_input(self):\n return self.__input",
"def get_variable_value(variable):\n def pipeline_from_info(variableinfo):\n controller = variableinfo._controller\n version = controller.vistrail.get_version_number(\n 'dat-var-%s' % variable.name)\n return controller.vistrail.getPipeline(version), version\n\n def pipeline_from_generator(variable_gen):\n # Get the original OutputPort module\n orig_controller = variable_gen._generator.controller\n base_pipeline = orig_controller.vistrail.getPipeline('dat-vars')\n if len(base_pipeline.module_list) != 1:\n raise ValueError(\"dat-vars version is invalid\")\n output_port = base_pipeline.module_list[0]\n\n controller = VistrailController(Vistrail())\n # OutputPort\n operations = [('add', output_port)]\n # Rest of the pipeline\n operations += variable_gen._generator.operations\n # Connection\n connection = controller.create_connection(\n variable_gen._output_module,\n variable_gen._outputport_name,\n output_port,\n 'InternalPipe')\n operations.append(('add', connection))\n # Materialize this\n action = create_action(operations)\n controller.add_new_action(action)\n version = controller.perform_action(action)\n controller.change_selected_version(version)\n assert version == controller.current_version == 1\n return controller.current_pipeline, 1\n\n # Obtain 'pipeline' and 'version' from 'variable'\n if isinstance(variable, Variable.VariableInformation):\n # Pipeline already exists\n pipeline, version = pipeline_from_info(variable)\n elif isinstance(variable, Variable):\n if variable._materialized is not None:\n # Pipeline already exists\n pipeline, version = pipeline_from_info(variable._materialized)\n else:\n # Pipeline doesn't exist\n # We need to make one from the operations\n pipeline, version = pipeline_from_generator(variable)\n else:\n raise TypeError\n\n # Setup the interpreter for execution\n interpreter = get_default_interpreter()\n interpreter.clean_non_cacheable_modules()\n interpreter.parent_execs = [None]\n res = interpreter.setup_pipeline(pipeline)\n if len(res[5]) > 0:\n raise ValueError(\"Variable pipeline has errors:\\n%s\" %\n '\\n'.join(me.msg for me in res[5].itervalues()))\n tmp_id_to_module_map = res[0]\n\n # Execute\n res = interpreter.execute_pipeline(\n pipeline,\n res[0], # tmp_id_to_module_map\n res[1], # persistent_to_tmp_id_map\n current_version=version,\n reason=\"getting variable value\")\n if len(res[2]) > 0:\n raise ValueError(\"Error while executing variable pipeline:\\n%s\" %\n '\\n'.join('%s: %s' % (me.module.__class__.__name__,\n me.msg)\n for me in res[2].itervalues()))\n if len(res[4]) > 0:\n # extract messages and previous ModuleSuspended exceptions\n raise ValueError(\"Module got suspended while executing variable \"\n \"pipeline:\\n%s\" %\n '\\n'.join(msg for msg in res[4].itervalues()))\n\n # Get the result\n outputport_desc = get_module_registry().get_descriptor_by_name(\n 'org.vistrails.vistrails.basic', 'OutputPort')\n for module in pipeline.module_list:\n if module.module_descriptor is outputport_desc:\n if get_function(module, 'name') == 'value':\n module_obj = tmp_id_to_module_map[module.id]\n result = module_obj.get_output('ExternalPipe')\n break\n else:\n result = None\n\n interpreter.finalize_pipeline(pipeline, *res[:-1])\n interpreter.parent_execs = [None]\n return result",
"def getvalue(self):\n return self.out.getvalue()",
"def get(self, name: str) -> Value:\n if name in self.read_hooks:\n func = self.read_hooks[name]\n log.debug(\"Will use function {} to read input\".format(func))\n val = func(name)\n return val\n if name in self._map:\n return self._map[name]\n log.debug(\"Did not find a mapping for variable '{}' in {}\".format(name, self._map))\n return self.default_value",
"def get_input():\r\n operation = input()\r\n\r\n return operation",
"def get_assignment(self, var):\n return self.variable_to_value.get(var)",
"def get_variable(self, svc, var):\n action = \"variableget\"\n path = \"data_request?id=%s&DeviceNum=%d&serviceId=%s&Variable=%s\" \\\n % (action, self.id, svc, var)\n return self.vera.get(path)"
] |
[
"0.6752224",
"0.6591138",
"0.6557258",
"0.6555575",
"0.6506442",
"0.63788795",
"0.63788795",
"0.63788795",
"0.63788795",
"0.63788795",
"0.63788795",
"0.6339062",
"0.63368",
"0.632077",
"0.6307426",
"0.6247665",
"0.62285554",
"0.619877",
"0.619877",
"0.619877",
"0.619877",
"0.619877",
"0.619877",
"0.619877",
"0.61976784",
"0.61670375",
"0.61273897",
"0.6084314",
"0.6076038",
"0.60436964"
] |
0.6600384
|
1
|
Get the units for a variable name.
|
def _get_units(self, name):
meta = self._abs2meta
if name in meta:
return meta[name]['units']
proms = self._prom2abs
if name in proms['output']:
abs_name = proms['output'][name][0]
return meta[abs_name]['units']
elif name in proms['input']:
if len(proms['input'][name]) > 1:
# The promoted name maps to multiple absolute names, require absolute name.
msg = "Can't get units for the promoted name '%s' because it refers to " + \
"multiple inputs: %s. Access the units using an absolute path name."
raise RuntimeError(msg % (name, str(proms['input'][name])))
abs_name = proms['input'][name][0]
return meta[abs_name]['units']
raise KeyError('Variable name "{}" not found.'.format(name))
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def get_units(self, variable):\n try:\n units = self.dataset[variable].units\n return units\n except:\n return None",
"def getResRatioVarUnit( self, name ):\n\n if not self.resNames:\n self.updateAdb( )\n\n if name not in self.resNames:\n for k, v in self.resNames.items():\n if name == v:\n name = k\n break\n \n unit = self.getVarUnit( name, \"all\" )\n if not unit:\n raise AcuDbAssistError, \"Invalid variable name.\"\n return unit",
"def get_unit(shared, unit_name):\n if (shared.config.get_safe('data', 'use_units') != 'off'):\n unit_val, unit_str = shared.config.get_safe_literal('units', unit_name,\n default=(1.0, ''))\n if unit_str:\n unit_str = ' [' + unit_str + ']'\n else:\n unit_val = 1.0\n unit_str = ''\n \n return unit_val, unit_str",
"def get_var_units(self, var_name, var_val):\n var_val = self._var_units[var_name]\n return True",
"def getSolRatioVarUnit( self, name ):\n\n if not self.solNames:\n self.updateAdb( )\n\n if name not in self.solNames:\n for k, v in self.solNames.items():\n if name == v:\n name = k\n break\n \n unit = self.getVarUnit( name, \"all\" )\n if not unit:\n raise AcuDbAssistError, \"Invalid variable name.\"\n return unit",
"def get_units(self):\r\n msg = struct.pack('>2B', 56, 14)\r\n response = self.query(msg)\r\n\r\n if response[1] == 2:\r\n units = 'A'\r\n to_nm_multiplier = 1 / 10\r\n elif response[1] == 1:\r\n units = 'nm'\r\n to_nm_multiplier = 1\r\n elif response[1] == 0:\r\n units = 'um'\r\n to_nm_multiplier = 1000\r\n else:\r\n raise ValueError('Units not recognised.')\r\n\r\n # Save results locally too for quick re-use\r\n self._current_units = units\r\n self._current_to_nm_multiplier = to_nm_multiplier\r\n\r\n return units, to_nm_multiplier",
"def get_units(self, output_name: str):\n return self._output_units[output_name]",
"def get_units(self, names):\n # Make sure names is a list\n if isinstance(names, str) or isinstance(names, unicode):\n names = [names]\n \n # Return the list of units\n ans = []\n for name in names:\n if name in self.interp_ds:\n ans.append(self.interp_ds[name].attrs['units'])\n else:\n ans.append('Not Available in Dataset')\n \n return ans",
"def _getunits(x):\n if pb.units.has_units(x):\n \n units = x.units\n \n else:\n \n units = None\n \n return units",
"def get_variable_units(self, i):\n if i >= self.ndim:\n warnings.warn(\"Variable \" + str(i) + \" doesn't exist, cannot return its units.\")\n return None\n else:\n if i < self.variables_range[0]:\n return self._components_units[0]\n if self.variables_range[0] <= i < self.variables_range[1]:\n return self._components_units[1]\n if self.oceanic_basis is not None:\n if self.variables_range[1] <= i < self.variables_range[2]:\n return self._components_units[2]\n if self.variables_range[2] <= i < self.variables_range[3]:\n return self._components_units[3]\n if self.ground_basis is not None:\n if self.variables_range[1] <= i < self.variables_range[2]:\n return self._components_units[3]",
"def get_time_unit(self, variables):\n if len(self.TIME_VARIABLE):\n # times = self._get_variable(variables, self.TIME_VARIABLE)[:]\n units = variables['time'].units\n return units\n else:\n return \"\"",
"def get_units(cls, wkt):\n if HAS_GDAL:\n return SpatialReference(wkt).units\n else:\n m = cls.units_regex.match(wkt)\n return m.group('unit'), m.group('unit_name')",
"def get_mds_units(node):\n try:\n units=str(node.units)\n except:\n units=node.units_of()\n if not type(units)==type(\"\"):\n try:\n units=units.value_of()\n except:\n units=\"-\"\n return units",
"def unit_of_measurement(self):\n return self.var_units",
"def units(self):\n return self.__class__.get_setting_units(self.key, **self.get_kwargs())",
"def get_parameter_unit(self, parameter_name):\n parameter_units = {\n 'tsky': units.Unit(\"Kelvin\"),\n 'kelvin': self.data_unit\n }\n return parameter_units.get(parameter_name)",
"def getOsiVarUnit( self, name ):\n\n if not self.osiVarNames:\n self.getOsiVarNames( )\n\n if name not in self.osiVarNames:\n for k, v in self.osiVarNames.items():\n if name == v:\n name = k\n break\n \n unit = self.getVarUnit( name, \"osi\" )\n if not unit:\n raise AcuDbAssistError, \"Invalid variable name.\"\n return unit",
"def get_setting_units(cls, key, **kwargs):\n setting = cls.get_setting_definition(key, **kwargs)\n\n return setting.get('units', '')",
"def getOthVarUnit( self, name ):\n\n if not self.othVarNames:\n self.getOthVarNames( )\n\n if name not in self.othVarNames:\n for k, v in self.othVarNames.items():\n if name == v:\n name = k\n break\n \n unit = self.getVarUnit( name, \"oth\" )\n if not unit:\n raise AcuDbAssistError, \"Invalid variable name.\"\n return unit",
"def getVarUnit( self, name, adbOut ):\n\n if name not in _adbUnit: return None\n\n unit = None\n for item in _adbUnit[name]:\n if item[1] == 'all' or adbOut.lower() in item[1].split(','):\n if item[0] == \"None\":\n unit= \"nondim\"\n else:\n unit= acuUnit.getDefUnit( item[0] )\n \n break\n return unit",
"def units(self, *args):\n u = self.parent.unit\n return tuple('%s%s' % (a, u) for a in args)",
"def getOhcVarUnit( self, name ):\n\n if not self.ohcVarNames:\n self.getOhcVarNames( )\n\n if name not in self.ohcVarNames:\n for k, v in self.ohcVarNames.items():\n if name == v:\n name = k\n break\n \n unit = self.getVarUnit( name, \"ohc\" )\n if not unit:\n raise AcuDbAssistError, \"Invalid variable name.\"\n return unit",
"def _get_units(self, q) -> unyt.Unit:\n try:\n units = q.units\n except AttributeError:\n units = unyt.dimensionless\n return unyt.Unit(units, registry=self.registry)",
"def getOqiVarUnit( self, name ):\n\n if not self.oqiVarNames:\n self.getOqiVarNames( )\n\n if name not in self.oqiVarNames:\n for k, v in self.oqiVarNames.items():\n if name == v:\n name = k\n break\n \n unit = self.getVarUnit( name, \"oqi\" )\n if not unit:\n raise AcuDbAssistError, \"Invalid variable name.\"\n return unit",
"def getOfcVarUnit( self, name ):\n\n if not self.ofcVarNames:\n self.getOfcVarNames( )\n\n if name not in self.ofcVarNames:\n for k, v in self.ofcVarNames.items():\n if name == v:\n name = k\n break\n \n unit = self.getVarUnit( name, \"ofc\" )\n if not unit:\n raise AcuDbAssistError, \"Invalid variable name.\"\n return unit",
"def getOriVarUnit( self, name ):\n\n if not self.oriVarNames:\n self.getOriVarNames( )\n\n if name not in self.oriVarNames:\n for k, v in self.oriVarNames.items():\n if name == v:\n name = k\n break\n \n unit = self.getVarUnit( name, \"ori\" )\n if not unit:\n raise AcuDbAssistError, \"Invalid variable name.\"\n return unit",
"def get_units(self, obj: Dimension) -> [Unit]:\n try:\n return obj.units()\n except KeyError as e:\n logging.error(str(e))\n return []",
"def getFormulaUnitsDataForVariable(self, *args):\n return _libsbml.Model_getFormulaUnitsDataForVariable(self, *args)",
"def getOeiVarUnit( self, name ):\n\n if not self.oeiVarNames:\n self.getOeiVarNames( )\n\n if name not in self.oeiVarNames:\n for k, v in self.oeiVarNames.items():\n if name == v:\n name = k\n break\n \n unit = self.getVarUnit( name, \"oei\" )\n if not unit:\n raise AcuDbAssistError, \"Invalid variable name.\"\n return unit",
"def units(self, x):\n u = ''.join([chr(d) for d in self[x]['units'][:]])\n if (u in ['n/a']) and (x in ['latitude', 'longitude']):\n u = 'radian' # assume radians\n return u"
] |
[
"0.74081725",
"0.7261851",
"0.71404684",
"0.71044797",
"0.7081885",
"0.7070995",
"0.69846416",
"0.6925272",
"0.6917649",
"0.6912531",
"0.69123954",
"0.6894977",
"0.6651058",
"0.659811",
"0.65960217",
"0.65913147",
"0.6581691",
"0.6579393",
"0.6524793",
"0.6490647",
"0.6450717",
"0.6448538",
"0.6435908",
"0.6381116",
"0.6348555",
"0.6325244",
"0.6289535",
"0.62883264",
"0.62879956",
"0.62852937"
] |
0.8026463
|
0
|
Get the values of the design variables, as seen by the driver, for this case.
|
def get_design_vars(self, scaled=True, use_indices=True):
return self._get_variables_of_type('desvar', scaled, use_indices)
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def printDesignVariables(self):\n print(\"-\" * 85)\n print(\"{:>30}{:>20}{:>20}\".format(\"CSM Design Parameter\", \"Name\", \"Value\"))\n print(\"-\" * 85)\n for dvName in self.DVs:\n DV = self.DVs[dvName]\n print(f\"{DV.csmDesPmtr:>30}{DV.name:>20}{DV.value:>20}\")",
"def get_variables(self):\n\t\treturn self.variables",
"def get_variables(self) -> np.array:\n pass",
"def get_variable_values(self, vars):\n raise NotImplementedError()",
"def getChemCompVars(self):\n dataDict = self.__dict__\n result = self.specificChemCompVars\n if not result:\n result = self.getByNavigation('namingSystem', 'chemComp', 'chemCompVars')\n return result",
"def vars(self):\n return self.v",
"def variables(self):\n return self._variablesDef",
"def variables(self):\r\n return self.get_field('variable')",
"def get_design_parameters(self):\n return self.__get_one_type_params(DesignParameter)",
"def get_variables(self):\n\n self._enforce_coupling()\n\n dv = []\n for scenario in self.scenarios:\n if scenario.group_master:\n dv.extend(scenario.active_variables())\n else:\n dv.extend(scenario.uncoupled_variables())\n\n for body in self.bodies:\n if body.group_master:\n dv.extend(body.active_variables())\n else:\n dv.extend(body.uncoupled_variables())\n\n return dv",
"def get_variables(self):\n local_variables = self._design.GetVariables(\n )+self._design.GetPostProcessingVariables()\n return {lv: self.get_variable_value(lv) for lv in local_variables}",
"def values(self):\n\t\treturn self.myVals",
"def variables_used (self) :\r\n\t\t## These names possibly contain dimension specification!\r\n\t\treturn self.variable_names",
"def get_variables(self):\n return self.variables",
"def vars(cls):\n raise NotImplementedError(\"This is an abstract function that needs to be implemented for each value function\")",
"def values():",
"def variables(self):\n return self.dataset.data_vars",
"def draw_variables(self): \n z = self.q[0].draw_variable_local(self.sims)\n for i in range(1,len(self.q)):\n z = np.vstack((z,self.q[i].draw_variable_local(self.sims)))\n return z",
"def variables_used (self) :\r\n\t\treturn [i[0] for i in self.parameters]",
"def get_constants(self):\n return self.D1, self.D2, self.A1, self.A2, \\\n self.F1, self.F2, self.S12",
"def get_variables(self):\n return [self.g_t, self.m_t]",
"def get(self):\n return self.vars",
"def get_vars(self):\n return [self.mu, self.var]",
"def calculate_vars(self):\n pass",
"def getVars(self):\n return self.__vars",
"def variables(self):\n return self._.vars",
"def data(self):\n\t\treturn vars(self)",
"def export_values(self):\n return (self.c0, self.rho0, self.p_ref, self.Ux, self.turb_intensity,\n self.length_scale, self.z_sl, self.Mach, self.beta,\n self.flow_param, self.dipole_axis)",
"def get_variables(self) -> np.array:\n return np.array([self.m, self.c])",
"def getGridVarInfo(self):\n return self.gridVars"
] |
[
"0.68563646",
"0.6626772",
"0.6534895",
"0.6532961",
"0.651272",
"0.6484132",
"0.64549094",
"0.6451239",
"0.6384466",
"0.633503",
"0.6333191",
"0.63260627",
"0.6310112",
"0.6295249",
"0.628637",
"0.62795824",
"0.62777877",
"0.6180956",
"0.6172419",
"0.61514163",
"0.61485827",
"0.61461854",
"0.61401",
"0.61325234",
"0.61314553",
"0.60951114",
"0.60946685",
"0.60762",
"0.60581577",
"0.60553074"
] |
0.7145759
|
0
|
Write table of variable names, values, residuals, and metadata to out_stream.
|
def _write_table(self, var_type, var_data, hierarchical, print_arrays, out_stream):
if out_stream is None:
return
# Make a dict of variables. Makes it easier to work with in this method
var_dict = OrderedDict()
for name, vals in var_data:
var_dict[name] = vals
# determine pathname of the system
if self.source in ('root', 'driver', 'problem', 'root.nonlinear_solver'):
pathname = ''
elif '|' in self.source:
pathname = get_source_system(self.source)
else:
pathname = self.source.replace('root.', '')
if pathname.endswith('.nonlinear_solver'):
pathname = pathname[:-17] # len('.nonlinear_solver') == 17
# vars should be in execution order
if 'execution_order' in self._var_info:
var_order = self._var_info['execution_order']
var_list = [var_name for var_name in var_order if var_name in var_dict]
else:
# don't have execution order, just sort for determinism
var_list = sorted(var_dict.keys())
top_name = pathname if pathname else 'model'
write_var_table(pathname, var_list, var_type, var_dict,
hierarchical=hierarchical, top_name=top_name,
print_arrays=print_arrays, out_stream=out_stream)
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def write_table_vehicles(io_stream, vnumber, vcapacity):\n io_stream.write('table vehicle\\n')\n io_stream.write('id;volume;weight;fixed_cost;variable_cost\\n')\n for i in range(0, vnumber):\n row = [i, vcapacity, vcapacity, 0.0, 1.0]\n row = [str(e) for e in row]\n io_stream.write(';'.join(row) + '\\n')",
"def export(self, out=sys.stdout):\n\n tablemodel = None\n for x in self.tables:\n if x.name == self.config.table:\n tablemodel = x\n \n if tablemodel is None:\n return\n \n # output the header\n tableinstance = tablemodel(self.dbpath)\n fieldnames = list(tableinstance.fieldnames()) \n out.write(\"\\t\".join(fieldnames) + \"\\n\") \n # output the table contents\n generator = DBGenerator(tablemodel(self.dbpath))\n for row in generator.next():\n temp = [str(row[_]) for _ in fieldnames]\n out.write(\"\\t\".join(temp) + \"\\n\")",
"def write_stats(self, filestream):\n if not self.summary:\n self.summarize()\n\n print(self.scores, file=filestream)",
"def writeOut(self):\n # import time\n self.outHeader = self.srcHeader\n for line in self.outHeader:\n self.outFile.write(line + '\\n')\n # now = time.asctime(time.localtime(time.time()))\n # self.outFile.write('%% -- %s -- Written to new alog' % now)\n for time_s in sorted(self.outData):\n for sens in self.outData[time_s]:\n for meas in self.outData[time_s][sens]:\n valu = self.outData[time_s][sens][meas]\n msg_list = [str(time_s), meas, sens, str(valu)]\n line_string = reconstructLine(msg_list)\n self.outFile.write(line_string + '\\n')",
"def _writeOutput(self):\n head = \"Station\\tX\\tY\\tZ\\tUEast\\tUNorth\\tUUp\\tSigEast\\tSigNorth\\tSigUp\\n\"\n outFmt = \"%s\" + 9 * \"\\t%g\" + \"\\n\"\n\n f = open(self.outputFile, 'w')\n f.write(head)\n\n for stationNum in range(self.numStations):\n outLine = outFmt % (self.stations[stationNum],\n self.coords[stationNum, 0], self.coords[stationNum, 1],\n self.coords[stationNum, 2],\n self.dispNoise[stationNum, 0],\n self.dispNoise[stationNum, 1],\n self.dispNoise[stationNum, 2],\n self.sigmaEast, self.sigmaNorth, self.sigmaUp)\n f.write(outLine)\n\n f.close()\n\n return",
"def _write_outpu(parameters):\n # Load data\n from ._common import output\n\n data = deepcopy(output)\n data.update(parameters[\"output\"])\n\n # Format\n fmt = block_to_format[\"OUTPU\"]\n fmt1 = str2format(fmt[1])\n fmt2 = str2format(fmt[2])\n fmt3 = str2format(fmt[3])\n\n out = []\n\n # Output format\n out += write_record([data[\"format\"].upper()], fmt1) if data[\"format\"] else []\n\n # Variables\n if data[\"variables\"]:\n buffer = []\n num_vars = 0\n for k, v in data[\"variables\"].items():\n values = [k.upper()]\n\n if numpy.ndim(v) == 0:\n values += [v]\n buffer += write_record(values, fmt3)\n num_vars += 1\n else:\n if numpy.ndim(v[0]) == 0:\n values += list(v)\n buffer += write_record(values, fmt3)\n num_vars += 1\n else:\n for vv in v:\n values_in = values + list(vv)\n buffer += write_record(values_in, fmt3)\n num_vars += len(v)\n\n out += write_record([str(num_vars)], fmt2)\n out += buffer\n\n return out",
"def write(self, file):\n #write header\n for variable in self.variables:\n variable.write(file)\n for subchunk in self.subchunks:\n subchunk.write(file)",
"def save_table(data, out_file):\n logging.info(\"Saving table\")\n #header, data = data\n #out = pd.DataFrame(data=data, columns = header.keys())\n joblib.dump(data, out_file)",
"def write_out(self, niter, locuslen):\n try:\n handle = open(self.output, 'w')\n except OSError:\n print 'Error, you do not have permission to write files here.'\n extit(1)\n # First, write the pop names\n handle.write('#Pop 1: ' + self.popnames[0] + '\\n')\n handle.write('#Pop 2: ' + self.popnames[1] + '\\n')\n # Then write the run parameters\n handle.write('#Model: ' + self.modelname + '\\n')\n handle.write('#Max iterations: ' + str(niter) + '\\n')\n # Then write some model summaries\n handle.write('#Data Likelihoods: ' + ' '.join([str(s) for s in self.mod_like]) + '\\n')\n handle.write('#Optimized Likelihoods: ' + ' '.join([str(s) for s in self.opt_like]) + '\\n')\n handle.write('#AIC: ' + ' '.join([str(s) for s in self.aic]) + '\\n')\n handle.write('#LocusLem: ' + str(locuslen) + '\\n')\n handle.write('#4*Na*u*L: ' + str(self.theta_mean) + '\\n')\n handle.write('#Na: ' + str(self.Na) + '\\n')\n for name, val in zip(self.params['Names'], self.scaled_params):\n towrite = '#' + name + ': ' + str(val) + '\\n'\n handle.write(towrite)\n # Then a table of the parameters that were found\n handle.write('Iteration\\t' + '\\t'.join(self.params['Names']) + '\\n')\n handle.write('Initial\\t' + '\\t'.join([str(s) for s in self.params['Values']]) + '\\n')\n # Write the perturbed parameters\n for index, vals in enumerate(self.p_init):\n name = 'Perturbed_' + str(index) + '\\t'\n handle.write(name + '\\t'.join([str(s) for s in vals]) + '\\n')\n # And the hot annealed values\n for index, vals in enumerate(self.hot_params):\n name = 'Hot_Anneal_' + str(index) + '\\t'\n handle.write(name + '\\t'.join([str(s) for s in vals]) + '\\n')\n # And the cold annealed values\n for index, vals in enumerate(self.cold_params):\n name = 'Cold_Anneal_' + str(index) + '\\t'\n handle.write(name + '\\t'.join([str(s) for s in vals]) + '\\n')\n # And the BFGS parameters\n for index, vals in enumerate(self.opt_params):\n name = 'BFGS_' + str(index) + '\\t'\n handle.write(name + '\\t'.join([str(s) for s in vals]) + '\\n')\n # And the final params\n handle.write('Hot_Mean\\t' + '\\t'.join([str(s) for s in self.hot_mean]) + '\\n')\n handle.write('Cold_Mean\\t' + '\\t'.join([str(s) for s in self.cold_mean]) + '\\n')\n handle.write('BFGS_Mean\\t' + '\\t'.join([str(s) for s in self.bfgs_mean]) + '\\n')\n handle.flush()\n handle.close()\n return",
"def write_results_dat(self, output_path):\n\n def fstr(nb):\n data = '%E' % nb\n if data == 'NAN':\n nb, power = 0,0\n else:\n nb, power = data.split('E')\n nb = float(nb) /10\n power = int(power) + 1\n return '%.5fE%+03i' %(nb,power)\n\n line = '%s %s %s %i %i %i %i %s %s %s %s %s %i\\n' % (fstr(self.axsec), fstr(self.xerru), \n fstr(self.xerrc), self.nevents, self.nw, self.maxit, self.nunwgt,\n fstr(self.luminosity), fstr(self.wgt), fstr(self.xsec), fstr(self.maxwgt),\n fstr(self.th_maxwgt), self.th_nunwgt) \n fsock = open(output_path,'w') \n fsock.writelines(line)\n for i in range(len(self.ysec_iter)):\n line = '%s %s %s %s %s %s\\n' % (i+1, self.ysec_iter[i], self.yerr_iter[i], \n self.eff_iter[i], self.maxwgt_iter[i], self.yasec_iter[i]) \n fsock.writelines(line)",
"def write(self, file):\n #write header\n self.ID.write(file)\n if (self.write_size): \n self.size.write(file)\n for variable in self.variables:\n variable.write(file)\n for subchunk in self.subchunks:\n subchunk.write(file)",
"def PrintOutput(self):\n self.file_settings[\"file_name\"].SetString(self.file_name)\n file = TimeBasedAsciiFileWriterUtility(self.model_part, self.file_settings, self._GetHeader()).file\n for point, var_values in zip(self.found_positions, self.values):\n file.write(self._DataToString(point, var_values))\n file.close()",
"def dump(self, output_stream):\n raise NotImplementedError",
"def write(self, filename=None):\n if filename == None:\n filename = self.ofilename\n\n ofile = open(filename, 'w')\n\n ofile.write('# Susceptibility: %E d(susc): %E Coercivity: %E d(coer): %E\\n' % (self.susceptibility_mean, self.susceptibility_std, self.coercivity_mean, self.coercivity_std) )\n ofile.write('# H[] M[] Mfit[]\\n')\n\n #for i in range(len(self.h)):\n # ofile.write(\" %12.10f %12.10f %12.10f\\n\" % ( self.h[i], self.m[i], self.m_fit[i] ) )\n\n ofile.close()",
"def writeout(self):\n out_file = ''.join(['theta_w_t', str(self.t), '.dat'])\n data_list = [] \n\n for i in xrange(self.n_params): \n data_list.append( self.theta_t[i,:] ) \n\n data_list.append(self.w_t)\n\n np.savetxt(\n out_file, \n (np.vstack(np.array(data_list))).T, \n delimiter='\\t'\n )\n\n return None",
"def output():\n\n if args.top and not args.tfidf and not args.svd:\n most_frequent(vector).to_csv(path_or_buf=\"top{}_vectorfile.csv\".format(args.top))\n\n elif args.top and args.tfidf and not args.svd:\n tfidf_transform(most_frequent(vector)).to_csv(path_or_buf=\"tfidf_top{}.csv\".format(args.top))\n\n elif args.top and args.tfidf and args.svd:\n svd_transform(tfidf_transform(most_frequent(vector)), indexes).to_csv(path_or_buf=\"svd{}_tfidf_topn.csv\".format(args.svd))\n\n elif args.tfidf and not args.top and not args.svd:\n tfidf_transform(vector).to_csv(path_or_buf=\"tfidf.csv\")\n\n elif args.svd and not args.top and not args.tfidf:\n svd_transform(vector, indexes).to_csv(path_or_buf=\"svd{}_vector.csv\".format(args.svd))\n\n elif args.tfidf and args.svd and not args.top:\n svd_transform(tfidf_transform(vector), indexes).to_csv(path_or_buf=\"svd{}_tfidf.csv\".format(args.svd))\n\n else:\n vector.to_csv(path_or_buf=\"vectorfile.csv\")",
"def write(self, out):",
"def output(self):\n # store tables\n # protocol PK keys table for measurements under a protocol\n # 'line' keys table for line-only section (if enabled)\n # 'all' keys table including everything.\n tables = OrderedDict()\n if self.options.line_section:\n tables[\"line\"] = OrderedDict()\n tables[\"line\"][\"header\"] = self._output_line_header()\n if not self.options.protocol_section:\n tables[\"all\"] = OrderedDict()\n tables[\"all\"][\"header\"] = self._output_header()\n self._do_export(tables)\n return self._build_output(tables)",
"def write_output_summary(outfile, read_scores, args):\n\theader = ['sim_info_file', 'sim_sam_file', 'analysis_info_file', 'results_file', 'junc_type', 'score_type', \n\t\t\t 'true_positives', 'true_negatives', 'false_positives', 'false_negatives']\n\t\t\t \n\tfilenames = [args.sim_info, args.sim_sam, args.analysis_info, args.output]\n\ttypes = ['tp', 'tn', 'fp', 'fn']\n\t\t\t \n\twith open(args.output_summary, \"w\") as outfile:\n\t\toutfile.write(\"\\t\".join(header) + \"\\n\")\n\t\t\n\t\tfor score_type in read_scores:\n\t\t\tfor junc_type in read_scores[score_type]:\n\t\t\t\tif junc_type == 'discord':\n\t\t\t\t\tscores = [str(read_scores[score_type][junc_type][type]/2) for type in types]\n\t\t\t\telse:\n\t\t\t\t\tscores = [str(read_scores[score_type][junc_type][type]) for type in types]\n\t\t\t\tline = filenames + [junc_type, score_type] + scores\n\t\t\t\toutfile.write(\"\\t\".join(line) + \"\\n\")",
"def export(self, outpath):\n fout = open(outpath, \"w\")\n\n # Header takes the guesswork out of loading by recording how many lines, vector dims\n fout.write(str(self.n_words) + \" \" + str(self.n_dim) + \"\\n\")\n for token in self.id2word:\n vector_components = [\"%.6f\" % number for number in self[token]]\n vector_as_string = \" \".join(vector_components)\n\n out_line = token + \" \" + vector_as_string + \"\\n\"\n fout.write(out_line)\n\n fout.close()",
"def write(self, stream, root, order):\n\n if root:\n tree = ET.Element('table')\n for row in root:\n tree_row = ET.SubElement(tree, 'row')\n if isinstance(row, list):\n for (pos, col) in enumerate(row):\n tree_col = ET.SubElement(tree_row, 'col{pos:>08}'.format(**locals()))\n tree_col.text = col\n else:\n for (key, col) in row.items():\n tree_col = ET.SubElement(tree_row, self.normalize(key))\n tree_col.text = col\n\n stream.write(ET.tostring(tree).decode('utf-8') + '\\n')",
"def writerow(self, outdict):\r\n row = []\r\n for field in FatWriter.FIELDS:\r\n col = outdict.get(field, '')\r\n col = col.replace('\\t', ' ')\r\n col = col.replace('\\n', '\\\\n')\r\n row.append(col)\r\n self.outfile.write('\\t'.join(row) + '\\n')",
"def Writedata(self, tstep):\n \n nc = Dataset(self.outfile, 'a')\n \n nc.variables['time'][tstep] = self.time\n nc.variables['salt'][tstep] = self.salt\n nc.variables['temp'][tstep] = self.temp\n nc.variables['uc'][tstep] = self.uc\n nc.variables['vc'][tstep] = self.vc\n nc.variables['nu_v'][tstep] = self.nu_v\n nc.variables['rho'][tstep] = self.rho\n nc.variables['tau_x'][tstep] = self.tau_x\n nc.variables['tau_y'][tstep] = self.tau_y\n nc.variables['eta'][tstep] = self.eta\n \n nc.close()",
"def write(self, output_stream=sys.stdout):\n for model in self.models:\n if len(model.chains) == 0:\n continue\n if len(self.models) > 1:\n print(\"MODEL %4d\" % (model.number), file=output_stream)\n model.write(output_stream)\n if len(self.models) > 1:\n print(\"ENDMDL\", file=output_stream)\n print(\"END\", file=output_stream)",
"def Write(self):\n table_data = self._TABLE.build(self._offsets)\n self._zip_file.writestr(self._stream_name, table_data)",
"def writeAllOutput(self, proteins):\n self.outputFh.write(\"%s\\n\" % self.pcssRunner.pfa.getOutputColumnHeaderString())\n for protein in proteins:\n self.writeProteinOutputLines(protein)\n self.outputFh.close()",
"def write_output(self):",
"def Write(self):\n table_data = self._TABLE.build(self._timestamps)\n self._zip_file.writestr(self._stream_name, table_data)",
"def outfile(self):\n return os.path.join(self.outfile_dir, constant.OUTFILE_TABLE + self.table_name)",
"def dumpData(self,out):\n #--Get sizes and dump into dataIO\n self.hedr.getSize()\n self.hedr.dump(out)\n for (name,size) in self.masters:\n out.packSub0('MAST',name)\n out.packSub('DATA','Q',size)\n if self.gmdt: \n self.gmdt.getSize()\n self.gmdt.dump(out)\n for other in self.others:\n other.getSize()\n other.dump(out)"
] |
[
"0.6095418",
"0.6072536",
"0.5994893",
"0.5944242",
"0.59246516",
"0.5886715",
"0.5735065",
"0.5709688",
"0.5703924",
"0.56810933",
"0.56685746",
"0.5661571",
"0.56249696",
"0.5596444",
"0.5583554",
"0.5554277",
"0.55315995",
"0.55291486",
"0.54729015",
"0.5462207",
"0.54551446",
"0.54458463",
"0.54421574",
"0.54349494",
"0.54339445",
"0.5433441",
"0.5378895",
"0.5360397",
"0.5324446",
"0.53233796"
] |
0.66337454
|
0
|
Get the absolute and promoted name versions of the provided derivative key.
|
def _deriv_keys(self, key):
prom2abs = self._prom2abs
abs2prom = self._abs2prom
DERIV_KEY_SEP = self._DERIV_KEY_SEP
# derivative could be tuple or string, using absolute or promoted names
if isinstance(key, tuple):
of, wrt = key
else:
of, wrt = key.split(DERIV_KEY_SEP)
# if promoted, will map to all connected absolute names
abs_of = [of] if of in abs2prom else prom2abs[of]
if wrt in prom2abs:
abs_wrt = [prom2abs[wrt]][0]
else:
abs_wrt = [wrt]
abs_keys = ['%s%s%s' % (o, DERIV_KEY_SEP, w) for o, w in itertools.product(abs_of, abs_wrt)]
prom_of = of if of in prom2abs else abs2prom[of]
if wrt in abs2prom:
prom_wrt = abs2prom[wrt]
else:
prom_wrt = wrt
prom_key = (prom_of, prom_wrt)
return abs_keys, prom_key
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def absolute_names(self):\n DERIV_KEY_SEP = self._DERIV_KEY_SEP\n\n for key in self._keys:\n if DERIV_KEY_SEP in key:\n # return derivative keys as tuples instead of strings\n of, wrt = key.split(DERIV_KEY_SEP)\n yield (of, wrt)\n else:\n yield key",
"def get_all_childname(self, key):\n return [x.split(\"/\")[1] for x in self.get_all_keys() if x.split(\"/\")[0] == key]",
"def find_derivative(name: str):\n return _derivatives[name]",
"def _get_key_aliases(key: str) -> list[str]:\n return [key] + KEY_ALIASES.get(key, [])",
"def _get_variants(name):\n names = [name]\n oldname = name\n # Map greek words to unicode characters\n if DOT_GREEK_RE.search(name):\n wordname = name\n while True:\n m = DOT_GREEK_RE.search(wordname)\n if m:\n wordname = wordname[:m.start(1)-1] + m.group(1) + wordname[m.end(1)+1:]\n else:\n break\n symbolname = name\n while True:\n m = DOT_GREEK_RE.search(symbolname)\n if m:\n symbolname = symbolname[:m.start(1)-1] + GREEK_WORDS[m.group(1)] + symbolname[m.end(1)+1:]\n else:\n break\n names = [wordname, symbolname]\n else:\n while True:\n m = GREEK_RE.search(name)\n if m:\n name = name[:m.start(2)] + GREEK_WORDS[m.group(2)] + name[m.end(2):]\n else:\n break\n while True:\n m = UNAMBIGUOUS_GREEK_RE.search(name)\n if m:\n name = name[:m.start(1)] + GREEK_WORDS[m.group(1)] + name[m.end(1):]\n else:\n break\n if not name == oldname:\n names.append(name)\n newnames = []\n for name in names:\n # If last word \\d+, add variants with hyphen and no space preceding\n if NUM_END_RE.search(name):\n newnames.append(NUM_END_RE.sub('-\\g<1>', name))\n newnames.append(NUM_END_RE.sub('\\g<1>', name))\n # If last word [A-Za-z]\\d* add variants with hyphen preceding.\n if ALPHANUM_END_RE.search(name):\n newnames.append(ALPHANUM_END_RE.sub('-\\g<1>', name))\n names.extend(newnames)\n return names",
"def __getitem__(self, key):\n if key in self._keys:\n # absolute name\n return self._values[key]\n\n elif key in self._auto_ivc_map:\n # We allow the user to query with auto_ivc varname.\n src_key = self._auto_ivc_map[key]\n if src_key in self._keys:\n return self._values[self._auto_ivc_map[key]]\n\n elif key in self:\n # promoted name\n val = super().__getitem__(key)\n if val is _AMBIGOUS_PROM_NAME:\n msg = \"The promoted name '%s' is invalid because it refers to multiple \" + \\\n \"inputs: %s. Access the value using an absolute path name or the \" + \\\n \"connected output variable instead.\"\n raise RuntimeError(msg % (key, str(self._prom2abs[key])))\n else:\n return val\n\n elif isinstance(key, tuple) or self._DERIV_KEY_SEP in key:\n # derivative keys can be either (of, wrt) or 'of!wrt'\n abs_keys, prom_key = self._deriv_keys(key)\n return super().__getitem__(prom_key)\n\n raise KeyError('Variable name \"%s\" not found.' % key)",
"def get_descendants(self, key: str) -> Sequence[str]:\n raise NotImplementedError",
"def shorter_name(key):\n key_short = key\n for sep in ['#', '/']:\n ind = key_short.rfind(sep)\n if ind is not None:\n key_short = key_short[ind+1:]\n else:\n key_short = key_short\n return key_short.replace('-', '_').replace('.', '_')",
"def get_field_names_root(self, adapter: str, key: str = \"name_qual\") -> List[str]:\n schemas = self.get_field_schemas_root(adapter=adapter)\n names = [x[key] for x in schemas]\n return names",
"def _cache_key_from_kvs_key(self, key):\r\n if key.scope == Scope.user_state:\r\n return (key.scope, key.block_scope_id)\r\n elif key.scope == Scope.user_state_summary:\r\n return (key.scope, key.block_scope_id, key.field_name)\r\n elif key.scope == Scope.preferences:\r\n return (key.scope, key.block_scope_id, key.field_name)\r\n elif key.scope == Scope.user_info:\r\n return (key.scope, key.field_name)",
"def get_revisions(self, key):\n c = pysvn.Client()\n revs = c.log(settings.SVN_WC_PATH, discover_changed_paths=True)\n crevs = []\n for r in revs:\n if '/'+key in [p.path for p in r.changed_paths]:\n crevs.append(r.revision.number)\n crevs.sort(reverse=True)\n return crevs[1:] # cut of the head revision-number",
"def list_versions(quartus_versions):\n for key in quartus_versions.keys():\n print(key)",
"def key_to_name_mods(key):\n global KEY_MAP\n global MOD_SHIFT\n\n try:\n found = KEY_MAP[key]\n except KeyError:\n if len(key) > 1:\n return (key, [])\n\n mod = NO_MOD\n if key.lower() != key.upper():\n if key != key.lower():\n mod = MOD_SHIFT\n\n return (key.lower(), mod)\n\n return found",
"def get_adjacent_keys(self, key: str) -> List[str]:\n return [k for k in self.get_adjacent(key)]",
"def _get_kdl_link_names(self):\n num_links = self._urdf_chain.getNrOfSegments()\n link_names = []\n for i in range(num_links):\n link_names.append(self._urdf_chain.getSegment(i).getName())\n return copy.deepcopy(link_names)",
"def get(self, key):\n parts = key.split('.')\n pointer = self\n for p in parts:\n pointer = getattr(pointer, p)\n return pointer",
"def _transformed_name(key: Text) -> Text:\n return key + \"_xf\"",
"def _descend(obj, key):\n tokens = key.split('.')\n if len(tokens) < 2:\n raise ValueError(key)\n value = obj\n for token in tokens[:-1]:\n value = _get(value, token)\n return value, tokens[-1]",
"def split_scope_key(key: str) -> Tuple[Optional[str], str]:\n split_index = key.find('.')\n if split_index != -1:\n return key[:split_index], key[split_index + 1:]\n else:\n return None, key",
"def get_img_name(dict_needed):\r\n \r\n new_list = []\r\n for i in dict_needed:\r\n new_name = i.split('_')[0]\r\n new_list.append(new_name)\r\n \r\n return new_list",
"def GetSubkeyByPath(self, key_path):",
"def subdomain_sorting_key(hostname):\n parts = hostname.split('.')[::-1]\n if parts[-1] == 'www':\n return parts[:-1], 1\n return parts, 0",
"def _resolve_subkeys(key, separator='.'):\n subkey = None\n if separator in key:\n index = key.index(separator)\n subkey = key[index + 1:]\n key = key[:index]\n return key, subkey",
"def get_names(dep):\n res = [dep.name]\n return res",
"def actual_key(self, key):\n key_list = []\n if key.scope == Scope.children:\n key_list.append('children')\n elif key.scope == Scope.parent:\n key_list.append('parent')\n else:\n key_list.append([\"usage\", \"definition\", \"type\", \"all\"][key.scope.block])\n\n if key.block_scope_id is not None:\n key_list.append(key.block_scope_id)\n if key.student_id:\n key_list.append(key.student_id)\n return \".\".join(key_list)",
"def key2basename(self, key):\n for char, replacement in self.dangerous_chars.items():\n key = key.replace(char, replacement)\n return key",
"def extract_key_name(self):\n # quick and dirty regex parsing..\n # consider using gnupg.\n _, out, _ = self.as_user('/usr/bin/gpg --list-keys')\n patterns = [\n 'pub\\s+.*?uid\\s+debrepo.*?sub\\s+\\w+/(\\w+)\\s+[\\w-]+$',\n '^pub.*?\\n\\s+(.*?)\\nuid',\n ]\n keyname = None\n out_str = out.decode('utf8')\n for pattern in patterns:\n m=re.search(pattern, out_str, flags=re.M|re.DOTALL)\n if m:\n keyname=m.group(1)\n break\n return keyname",
"def GetSubkeyByName(self, name):",
"def showDepend(self,childName):\n\tdList,idList,dict,dictId,graph=self.getAllParents(childName)\n# print dict\n# print dictId\n\tif dList:\n\t print \"\\nFor '%s' we found the following versions:\"%childName\n\t space = \"\"\n for item in dList:\n\t print item\n# if not len(space):\n# print \"%s %s\"%(space,item)\n# else:\n# print \"%s |-> %s\"%(space,item)\n# space=\" \"+space\n\telse:\n\t print \"No such data version found\",childName\n\treturn",
"def update_derivation(new_path, old_basename=None):\n new = rmap.fetch_mapping(new_path)\n if old_basename is None: # assume new is a copy of old, with old's name in header\n derived_from = new.name\n else:\n derived_from = old_basename\n new.header[\"derived_from\"] = str(derived_from)\n new.header[\"name\"] = str(os.path.basename(new_path))\n new.write(new_path)\n return str(derived_from)"
] |
[
"0.62433356",
"0.5751408",
"0.56088847",
"0.52073133",
"0.50863856",
"0.49969488",
"0.49366152",
"0.49324927",
"0.4900794",
"0.48926795",
"0.4873144",
"0.48243654",
"0.4822273",
"0.47954556",
"0.47884956",
"0.47645155",
"0.47629976",
"0.47306743",
"0.46946052",
"0.46438673",
"0.46428713",
"0.46352962",
"0.4627377",
"0.46206465",
"0.46176466",
"0.4616761",
"0.45894533",
"0.458401",
"0.45661238",
"0.45642522"
] |
0.740441
|
0
|
Yield absolute names for variables contained in this dictionary. Similar to keys() but with absolute variable names instead of promoted names. Yields str absolute names for variables contained in this dictionary.
|
def absolute_names(self):
DERIV_KEY_SEP = self._DERIV_KEY_SEP
for key in self._keys:
if DERIV_KEY_SEP in key:
# return derivative keys as tuples instead of strings
of, wrt = key.split(DERIV_KEY_SEP)
yield (of, wrt)
else:
yield key
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def vars(cls):\n for key in dir(cls):\n if key.startswith('var_'):\n yield key[4:]",
"def _var_name_generator():\n count = itertools.count()\n while True:\n yield '_var_' + str(count.next())",
"def items(self, deep=False):\n for var in self.vars(deep):\n yield var, self[var]",
"def _key_index_iter(self: Any) -> Iterator[Tuple[str, Any]]:\n for k, v in vars(self).items():\n yield k, v",
"def variable_names(self):\n \n return [x['variable'] for x in self.variable_dicts()]",
"def unique_var_names(vars):\n count = {}\n for var in vars:\n var_name = var.name.replace(':', '_')\n if var_name in count:\n count[var_name] += 1\n var_name += \"_\" + str(count[var_name])\n else:\n count[var_name] = 0\n yield var_name",
"def all_vars(b):\n for obj in b.component_objects(Var, active=True, descend_into=True):\n name = obj.parent_component().getname(fully_qualified=True, relative_to=b)\n yield (name, obj)\n #\n # Look through parent blocks\n #\n b = b.parent_block()\n while not b is None:\n for obj in b.component_objects(Var, active=True, descend_into=False):\n name = obj.parent_component().name\n yield (name, obj)\n b = b.parent_block()",
"def iterate(cls):\n for name, value in vars(cls).iteritems():\n if name.startswith('__'):\n continue\n yield (name, value)",
"def __iter__(self) -> Iterator[str]:\n for fixup in self._fixup.values():\n yield fixup.var",
"def variables(self):\n for name in self._nodes:\n if isinstance(self._nodes[name], RandomVariable):\n yield name",
"def variableIter(self):\n for (para, start), variable in self.variables.iteritems():\n yield para, start, variable",
"def stats_variable_names(res):\n def varname(s):\n pos = s.find(':')\n return s if pos==-1 else s[0:pos]\n return set( [ varname(key) for key in res.keys()] )",
"def __iter__(self): # pragma: no cover\r\n return ((k, v) for k, v in vars(self).items() if not k.startswith(\"_\"))",
"def get_variable_names(self):\n return [var[1] for var in self.variables]",
"def get_all_variables_names(self):\n return self.project.get_variable_names() + self.design.get_variable_names()",
"def names(self):\n return [x for x in self._dict.keys()]",
"def getLinIterVarNames( self ):\n\n self.updateAdb( )\n\n return self.iterNames.keys()",
"def allkeys(self, as_str=False):\n for key in self.__allkeys((\"__ROOT__\",), {\"__ROOT__\": self}):\n yield \".\".join(key) if as_str else key",
"def get_variable_names(self):\n return [VariableString(s) for s in\n self._design.GetVariables()+self._design.GetPostProcessingVariables()]",
"def __iter__(self) -> Iterator[Tuple[str, str]]:\n for fixup in self._mapping._fixup.values():\n yield fixup.var, fixup.value",
"def keys(self):\n return list(s.name.lower() for s in self.attributes)",
"def variable_names(self):\n\n status, stdout, stderr = self.__xcall__(['--print-variables'])\n\n if status != 0:\n raise RuntimeError(\"error querying --print-variables for package `%s': %s\" % (self.name, stderr))\n\n return stdout.strip().split()",
"def AllKeys(self) -> _n_0_t_1[str]:",
"def __setVarNames(self):\n result = set()\n\n # detecting variables\n for templatePart in self.inputString().split(\"{\"):\n if templatePart is '' or \"}\" not in templatePart:\n continue\n\n endIndex = templatePart.find('}')\n result.add(templatePart[:endIndex])\n\n self.__varNames = list(result)",
"def get_strings(self, prefix=''):\n yield prefix + str(self.value) + ':'\n\n for child in self.children:\n for s in child.get_strings(prefix + ' '):\n yield s",
"def vars(self, deep=False, with_name=None, hidden=True):\n\n # Only the variables of the main group:\n if with_name is None:\n if hidden or self.hidden_prefix is None:\n yield from self._vars\n else:\n yield from filter(\n lambda x: not x.startswith(self.hidden_prefix), self._vars)\n elif with_name in self._vars:\n yield with_name\n\n if deep:\n for group in self._groups:\n yield from (\n group + \"/\" + sub_var\n for sub_var in self[group].vars(\n deep, with_name, hidden)\n )",
"def _var_name_sub(self, sprintf, quote=False):\n q = ''\n if quote:\n q = \"'\"\n name_list = map(lambda x: q + self.cdict[x][0] + q, sprintf[\"vars\"] )\n return sprintf[\"text\"] % tuple(name_list)",
"def getNameIter(self):\n return iter(self._nameDetectorDict.keys())",
"def getOqiVarNames( self ):\n\n if self.oqiVarNames:\n return self.oqiVarNames.keys()\n\n n = self.adb.get( \"nOqiVars\" )\n for indx in xrange( n ):\n name = self.adb.get( \"oqiVarName\",\n indx ) \n self.oqiVarNames[name] = indx\n\n return self.oqiVarNames.keys()",
"def __iter__(self):\n return (x for x in vars(self))"
] |
[
"0.6477693",
"0.6199682",
"0.6083263",
"0.6010082",
"0.5916821",
"0.58778125",
"0.5876126",
"0.586796",
"0.5736524",
"0.57143927",
"0.56993145",
"0.5647373",
"0.5647242",
"0.5587756",
"0.5552013",
"0.553918",
"0.55298007",
"0.5525652",
"0.5451237",
"0.5448552",
"0.542522",
"0.5408903",
"0.53993493",
"0.5397019",
"0.5383892",
"0.5363802",
"0.53527135",
"0.5309855",
"0.52991605",
"0.52982086"
] |
0.67446005
|
0
|
Get audit logs, sort by time in in reverse chronological order. This API returns the first 10,000 results only. Please use filter in the API for more relevant results. MSP Customer Would see logs of MSP's and tenants as well.
|
def get_traillogs(self, conn, limit=100, offset=0, username=None, start_time=None,
end_time=None, description=None, target=None, classification=None,
customer_name=None, ip_address=None, app_id=None):
path = urls.TRAIL_LOG["GET_ALL"]
params = {
"limit": limit,
"offset": offset
}
if username:
params["username"] = username
if start_time:
params["start_time"] = start_time
if end_time:
params["end_time"] = end_time
if description:
params["description"] = description
if target:
params["target"] = target
if classification:
params["classification"] = classification
if customer_name:
params["customer_name"] = customer_name
if ip_address:
params["ip_address"] = ip_address
if app_id:
params["app_id"] = app_id
resp = conn.command(apiMethod="GET", apiPath=path, apiParams=params)
return resp
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def GetLogs(self):\n utcnow = datetime.datetime.utcnow()\n lower_filter = self.log_position.GetFilterLowerBound()\n upper_filter = self.log_position.GetFilterUpperBound(utcnow)\n new_filter = self.base_filters + [lower_filter, upper_filter]\n entries = logging_common.FetchLogs(\n log_filter=' AND '.join(new_filter),\n order_by='ASC',\n limit=self.LOG_BATCH_SIZE)\n return [entry for entry in entries if\n self.log_position.Update(entry.timestamp, entry.insertId)]",
"def get_auditlogs(self):\n res = self.get_object(\"/integrationServices/v3/auditlogs\")\n return res.get(\"notifications\", [])",
"def test_getAuditLogsWithNoParams(self):\r\n logs = self.client.getAuditLogs()\r\n return logs",
"async def logs(\n self,\n *,\n latest: Optional[int] = None,\n oldest: Optional[int] = None,\n limit: Optional[int] = None,\n action: Optional[str] = None,\n actor: Optional[str] = None,\n entity: Optional[str] = None,\n additional_query_params: Optional[Dict[str, any]] = None,\n headers: Optional[Dict[str, str]] = None,\n ) -> AuditLogsResponse:\n query_params = {\n \"latest\": latest,\n \"oldest\": oldest,\n \"limit\": limit,\n \"action\": action,\n \"actor\": actor,\n \"entity\": entity,\n }\n if additional_query_params is not None:\n query_params.update(additional_query_params)\n query_params = {k: v for k, v in query_params.items() if v is not None}\n return await self.api_call(\n path=\"logs\",\n query_params=query_params,\n headers=headers,\n )",
"def getAllActivityLog(self):\n url=self._v2BaseURL + \"/api/v2/activity/activityLog\"\n headers = {'Content-Type': \"application/json\", 'Accept': \"application/json\",\"icSessionID\":self._v2icSessionID}\n infapy.log.info(\"GetAllActivityLog URL - \" + url)\n infapy.log.info(\"API Headers: \" + str(headers))\n infapy.log.info(\"Body: \" + \"This API requires no body\")\n # The below format is for post\n # bodyV3={\"username\": userName,\"password\": password}\n # r3 = re.post(url=urlV3, json=bodyV3, headers=headers)\n try:\n response = re.get(url=url, headers=headers)\n infapy.log.debug(str(response.json()))\n except Exception as e:\n infapy.log.exception(e)\n raise\n infapy.log.info(\"Fetched the all the Activity log from IICS\")\n data = response.json()\n return data",
"def api_list_logs():\n if 'POST' == request.method:\n per_page = get_safe_int(request.form.get('per_page'))\n page_num = get_safe_int(request.form.get('page_num'))\n else:\n per_page = get_safe_int(request.args.get('per_page'))\n page_num = get_safe_int(request.args.get('page_num'))\n\n \"\"\"\n pagination = LogEntity.query.paginate(page_num, per_page, False)\n items = [i.serialize() for i in pagination.items]\n app.logger.debug(\"per_page: {}, page_num: {}\".format(per_page, page_num))\n return jsonify_success(dict(total_pages=pagination.pages,\n list_of_events=items))\n \"\"\"\n logs, total_pages = log_manager.get_logs(per_page, page_num)\n # logs_list = [x.to_visible() for x in logs]\n return jsonify_success(dict(list_of_events=logs, total_pages=total_pages))",
"async def read_logs(\n self,\n log_filter: LogFilter = None,\n limit: int = None,\n offset: int = None,\n sort: LogSort = LogSort.TIMESTAMP_ASC,\n ) -> None:\n body = {\n \"logs\": log_filter.dict(json_compatible=True) if log_filter else None,\n \"limit\": limit,\n \"offset\": offset,\n \"sort\": sort,\n }\n\n response = await self._client.post(\"/logs/filter\", json=body)\n return pydantic.parse_obj_as(List[Log], response.json())",
"def fetch_log_entries(owner_account_id):\n batch_size = 500\n log_entries = []\n\n i = 0\n while True:\n i += 1\n skip = batch_size * (i - 1)\n top = batch_size\n\n resp = oauth.tapkey.get(f\"Owners/{owner_account_id}/LogEntries?\"\n f\"$skip={skip}&$top={top}&\"\n f\"$filter=logType eq 'Command' and command eq 'TriggerLock'&\"\n f\"$select=id,entryNo,lockTimestamp,receivedAt,boundLockId,boundCardId,contactId&\")\n\n if resp.status_code != 200:\n abort(500)\n\n next_log_entries = resp.json()\n\n if not isinstance(next_log_entries, list):\n abort(500)\n\n log_entries.extend(next_log_entries)\n\n if len(next_log_entries) < batch_size:\n break\n\n return log_entries",
"def get_logs(element, element_id, limit=None, user=None):\n\n\tfrom log.models import Log\n\n\tlogs = Log.objects.filter(element=element, element_id=element_id).order_by('-at')\n\n\tif user is not None:\n\t\tlogs = logs.filter(user=user)\n\n\tif limit:\n\t\tlogs = logs[:limit]\n\n\treturn logs",
"def getLogs():",
"def getLogs():",
"def query_development_audit_logs_v1(self, get_audit_logs_request, **kwargs):\n # type: (AuditLogsRequest_13316e3e, **Any) -> Union[ApiResponse, object, Error_fbe913d9, AuditLogsResponse_bbbe1918, BadRequestError_f854b05]\n operation_name = \"query_development_audit_logs_v1\"\n params = locals()\n for key, val in six.iteritems(params['kwargs']):\n params[key] = val\n del params['kwargs']\n # verify the required parameter 'get_audit_logs_request' is set\n if ('get_audit_logs_request' not in params) or (params['get_audit_logs_request'] is None):\n raise ValueError(\n \"Missing the required parameter `get_audit_logs_request` when calling `\" + operation_name + \"`\")\n\n resource_path = '/v1/developmentAuditLogs/query'\n resource_path = resource_path.replace('{format}', 'json')\n\n path_params = {} # type: Dict\n\n query_params = [] # type: List\n\n header_params = [] # type: List\n\n body_params = None\n if 'get_audit_logs_request' in params:\n body_params = params['get_audit_logs_request']\n header_params.append(('Content-type', 'application/json'))\n header_params.append(('User-Agent', self.user_agent))\n\n # Response Type\n full_response = False\n if 'full_response' in params:\n full_response = params['full_response']\n\n # Authentication setting\n access_token = self._lwa_service_client.get_access_token_from_refresh_token()\n authorization_value = \"Bearer \" + access_token\n header_params.append(('Authorization', authorization_value))\n\n error_definitions = [] # type: List\n error_definitions.append(ServiceClientResponse(response_type=\"ask_smapi_model.v1.audit_logs.audit_logs_response.AuditLogsResponse\", status_code=200, message=\"Returns a list of audit logs for the given vendor.\"))\n error_definitions.append(ServiceClientResponse(response_type=\"ask_smapi_model.v1.bad_request_error.BadRequestError\", status_code=400, message=\"Invalid request\"))\n error_definitions.append(ServiceClientResponse(response_type=\"ask_smapi_model.v1.error.Error\", status_code=401, message=\"Unauthorized\"))\n error_definitions.append(ServiceClientResponse(response_type=\"ask_smapi_model.v1.error.Error\", status_code=403, message=\"Forbidden\"))\n error_definitions.append(ServiceClientResponse(response_type=\"ask_smapi_model.v1.error.Error\", status_code=404, message=\"Not Found\"))\n error_definitions.append(ServiceClientResponse(response_type=\"ask_smapi_model.v1.error.Error\", status_code=429, message=\"Too Many Requests\"))\n error_definitions.append(ServiceClientResponse(response_type=\"ask_smapi_model.v1.error.Error\", status_code=500, message=\"Internal Server Error.\"))\n error_definitions.append(ServiceClientResponse(response_type=\"ask_smapi_model.v1.error.Error\", status_code=503, message=\"Service Unavailable.\"))\n\n api_response = self.invoke(\n method=\"POST\",\n endpoint=self._api_endpoint,\n path=resource_path,\n path_params=path_params,\n query_params=query_params,\n header_params=header_params,\n body=body_params,\n response_definitions=error_definitions,\n response_type=\"ask_smapi_model.v1.audit_logs.audit_logs_response.AuditLogsResponse\")\n\n if full_response:\n return api_response\n return api_response.body",
"def get_log_events(client, log_group):\n\n\tresp = client.filter_log_events(logGroupName=log_group, limit=10000)\n\treturn resp['events']",
"def getAuditListPage(self, user_id=None, name=None, updated_at__gte=None, updated_at__lte=None, ip_address=None, device_name=None, folder=None, folder_id=None, sub_folder_file=None, action_type=None, recipient=None, permissions=None, per_page = 10, **kwargs): \n # Queryset\n audit_log_list = self.getAuditList(\n user_id=user_id,\n name=name,\n updated_at__gte=updated_at__gte,\n updated_at__lte=updated_at__lte,\n ip_address=ip_address,\n device_name=device_name,\n folder=folder,\n folder_id=folder_id,\n sub_folder_file=sub_folder_file,\n action_type=action_type,\n recipient=recipient,\n permissions=permissions)\n # Pagination\n paginator = Paginator(audit_log_list, per_page)\n\n # return page object_list\n return paginator",
"def get_rolling_log_history():\n current_tag = get_current_tag()\n return get_log_history(current_tag)",
"def getAuditList(self, user_id=None, name=None, updated_at__gte=None, updated_at__lte=None, ip_address=None, device_name=None, folder=None, folder_id=None, sub_folder_file=None, action_type=None, recipient=None, permissions=None): \n # Queryset\n return self.handler.getAuditList(\n user_id=user_id,\n name__icontains=name,\n updated_at__date__gte=updated_at__gte,\n updated_at__date__lte=updated_at__lte,\n ip_address__icontains=ip_address,\n device_name__icontains=device_name,\n folder__icontains=folder,\n folder_id=folder_id,\n sub_folder_file__icontains=sub_folder_file,\n action_type=action_type,\n recipient__icontains=recipient,\n permissions=permissions).order_by('-updated_at')",
"def logs(self, **kwargs):\n return self.client.api.logs(self.id, **kwargs)",
"def test_getRecentAuditTrailsByUsername(self):\n\n c = suds.client.Client(\n self.wsdl, username=self.username, password=self.password)\n result = c.service.getRecentAuditTrailsByUsername(self.user.username)\n self.assertTrue(len(result.AuditTrailComplexType) >= 2)\n\n [self.assertEqual(trail.user_id, self.user.id)\n for trail in result.AuditTrailComplexType]",
"def get_logs_and_parts(self):\n result =[]\n query_params = {'key':'1715230983110018712', 'parameter_name':'imprId','et_log_date':'2017-06-01'}\n query = \"\"\"select a.key,a.uuid,a.page_url,a.domain_name,a.app_visitor_cookie,a.referral_domain\n from wt_logs a, wt_log_parts b\n where a.key = b.key\n and a.et_log_date = :et_log_date\n and a.key = :key\n and b.parameter_name = :parameter_name\"\"\".replace('\\n',' ')\n with vertica_python.connect(**conn_info) as connection:\n #print(\"Connected to {} on host{} \".format(conn_info['database'],conn_info['host']))\n cur = connection.cursor()\n cur.execute(query,query_params)\n for row in cur.iterate():\n result.append(row)\n return(result)",
"def get_entries(audit_id=None, start_time=None):\n al = []\n try:\n if start_time and audit_id:\n raise Exception('Incompatible parameters passed')\n db_path, err = config.get_db_path()\n if err:\n raise Exception(err)\n if audit_id:\n query = 'select * from audit where audit_id=\"%d\" order by audit_id desc' % int(\n audit_id)\n else:\n if not start_time:\n query = 'select * from audit order by audit_id desc'\n else:\n query = 'select * from audit where audit_time >= %d order by audit_id desc' % int(\n start_time)\n rows, err = db.get_multiple_rows(db_path, query)\n if err:\n raise Exception(err)\n if rows:\n for row in rows:\n audit_entry, err = _parse_audit_entry(row)\n if err:\n raise Exception(err)\n al.append(audit_entry)\n except Exception, e:\n return None, 'Error loading audit entries : %s' % str(e)\n else:\n return al, None",
"def get_traillogs_detail(self, conn, id):\n path = urlJoin(urls.TRAIL_LOG[\"GET\"], id)\n resp = conn.command(apiMethod=\"GET\", apiPath=path)\n return resp",
"def get_logs_list():\n # reads the session\n session = request.args.get('session', type=str)\n\n available_keys = []\n\n if check_session_validity(session):\n user = get_user_from_session(session)\n\n all_keys = lh.get_handlers().keys()\n\n for key in all_keys:\n if lh.check_user_log_visibility(user, key):\n available_keys.append(key)\n\n return jsonify({\"logs\": available_keys})",
"def loginAudit(self, params):\n\n sortLimitParams = self.setSortLimitParameters(params)\n \n filterObj = Q()\n\n if params.get('searchEmail'):\n user_ids = []\n users = WebUsers.objects.filter(mail__icontains=params.get('searchEmail'))\n for user in users:\n user_ids.append(user.uid)\n \n filterObj = filterObj & Q(created_by_id__in=user_ids)\n if params.get('searchIpAddress'):\n filterObj = filterObj & Q(ip_address__icontains=params.get('searchIpAddress'))\n if params.get('searchStartLoginDate'):\n filterObj = filterObj & Q(date_created__gte=params.get('searchStartLoginDate'))\n if params.get('searchEndLoginDate'):\n filterObj = filterObj & Q(date_created__lte=params.get('searchEndLoginDate'))\n if params.get('searchIds'):\n filterObj = filterObj & Q(id__in=params.get('searchIds').split(\",\"))\n\n result = LoginAudit.objects.filter(filterObj).order_by(sortLimitParams['dir'] + sortLimitParams['sort']) [sortLimitParams['start']: sortLimitParams['limit']]\n count = LoginAudit.objects.filter(filterObj).count()\n\n cursor = connection.cursor()\n records = []\n for item in result:\n record = {}\n \n record['id'] = item.id\n record['ip_address'] = item.ip_address\n record['login_date'] = item.date_created\n record['logout_date'] = item.logout_date\n #get the details of this user\n user = WebUsers.objects.get(uid=item.created_by_id)\n record['email'] = user.mail\n \n records.append(record)\n\n cursor.close()\n \n return {'totalCount': count, 'records': records}",
"def get_eventlogs(self, conn, limit=100, offset=0, group_name=None, device_id=None,\n classification=None, start_time=None, end_time=None):\n path = urls.EVENT_LOG[\"GET_ALL\"]\n params = {\n \"limit\": limit,\n \"offset\": offset\n }\n if group_name:\n params[\"group_name\"] = group_name\n if device_id:\n params[\"device_id\"] = device_id\n if classification:\n params[\"classification\"] = classification\n if start_time:\n params[\"start_time\"] = start_time\n if end_time:\n params[\"end_time\"] = end_time\n resp = conn.command(apiMethod=\"GET\", apiPath=path, apiParams=params)\n return resp",
"def get_eventlogs_detail(self, conn, id):\n path = urlJoin(urls.EVENT_LOG[\"GET\"], id)\n resp = conn.command(apiMethod=\"GET\", apiPath=path)\n return resp",
"def get_critical_logs_command(args: dict, client: Client) -> Tuple[str, Dict[str, List[dict]], List[Dict[str, Any]]]:\n logs_amount = args.get('limit')\n query_start_time, query_end_time = query_timestamp(args)\n query = 'SELECT * FROM `firewall.threat` WHERE severity = \"Critical\" ' # guardrails-disable-line\n query += f'AND time_generated BETWEEN TIMESTAMP(\"{query_start_time}\") AND ' \\\n f'TIMESTAMP(\"{query_end_time}\") LIMIT {logs_amount}'\n\n records, raw_results = client.query_loggings(query)\n\n transformed_results = [threat_context_transformer(record) for record in records]\n\n human_readable = tableToMarkdown('Logs threat table', transformed_results, removeNull=True)\n ec = {\n 'CDL.Logging.Threat': transformed_results\n }\n return human_readable, ec, raw_results",
"def list_of_logs(self):\n log_line_obj = self.env['common.log.lines.ept']\n model_id = log_line_obj.get_model_id('amazon.vcs.tax.report.ept')\n records = log_line_obj.search(\n [('model_id', '=', model_id), ('res_id', '=', self.id)])\n action = {\n 'domain': \"[('id', 'in', \" + str(records.ids) + \" )]\",\n 'name': 'Mismatch Details',\n 'view_mode': 'tree',\n 'res_model': 'common.log.lines.ept',\n 'type': 'ir.actions.act_window',\n }\n return action",
"def query_threat_logs_command(args: dict, client: Client) -> Tuple[str, dict, List[Dict[str, Any]]]:\n query_table_name: str = 'threat'\n context_transformer_function = threat_context_transformer\n table_context_path: str = 'CDL.Logging.Threat'\n return query_table_logs(args, client, query_table_name, context_transformer_function, table_context_path)",
"def test_getAuditTrailsByDate(self):\n\n c = suds.client.Client(\n self.wsdl, username=self.username, password=self.password)\n\n # create a bunch of old Audit Trails, older than 1 hour\n event1_date = timezone.localtime(\n timezone.now()) - timedelta(days=3 * 365)\n self.createAuditTrail(date=event1_date)\n\n event2_date = timezone.localtime(timezone.now()) - timedelta(days=25)\n self.createAuditTrail(date=event2_date)\n\n event3_date = timezone.localtime(timezone.now()) - timedelta(days=151)\n self.createAuditTrail(date=event3_date)\n\n # take the date and time of today\n date = datetime.today().date()\n\n # and create an Audit Trail of right now\n event4_date = date - timedelta(hours=5)\n self.createAuditTrail(date=event4_date)\n\n # okay we got 4 ATs in the past, and 2 in the future.\n # If we now ask for ATs not older than 1 hour in the future, we\n # should only get the ones +1hr in the future.\n\n # login AT is created NOW, but we're asking for the future!\n soap_result = c.service.getAuditTrailsByDate(date)\n self.assertTrue(len(soap_result.AuditTrailComplexType) >= 3)\n\n # check results for correct user are returned\n [self.assertEqual(trail.user_id, self.user.id)\n for trail in soap_result.AuditTrailComplexType]\n # check ordering\n self.assertEqual(soap_result.AuditTrailComplexType[\n 0].date, soap_result.AuditTrailComplexType[1].date)\n # check max entries\n self.assertTrue(\n len(soap_result.AuditTrailComplexType) <= settings.SOAP_MAX_ENTRIES)",
"def get_logs(self,fields=['key','app_visitor_cookie','page_url','et_log_date'],date='2017-06-10'):\n result =[]\n query_params = {'uuid':'0000000000000000001', 'param_value':'Default'}\n logs_query = \"select uuid,param_name,param_value from log_parts_backup where uuid = :uuid and param_value =:param_value\"\n with vertica_python.connect(**conn_info) as connection:\n #print(\"Connected to {} on host{} \".format(conn_info['database'],conn_info['host']))\n cur = connection.cursor()\n cur.execute(logs_query,query_params)\n for row in cur.iterate():\n result.append(row)\n return(result)"
] |
[
"0.7129476",
"0.6680837",
"0.66445136",
"0.6628818",
"0.6558355",
"0.6427984",
"0.6388969",
"0.63801205",
"0.6173607",
"0.6146164",
"0.6146164",
"0.6137496",
"0.6091237",
"0.6076141",
"0.60328996",
"0.60099995",
"0.59999657",
"0.5991797",
"0.59658474",
"0.5960705",
"0.5949625",
"0.5948865",
"0.5943256",
"0.59309494",
"0.5930776",
"0.589526",
"0.5889222",
"0.58755594",
"0.5864235",
"0.5863648"
] |
0.67995703
|
1
|
Get audit events for all groups, sort by time in in reverse chronological order.This API returns the first 10,000 results only. Please use filter in the API for more relevant results.
|
def get_eventlogs(self, conn, limit=100, offset=0, group_name=None, device_id=None,
classification=None, start_time=None, end_time=None):
path = urls.EVENT_LOG["GET_ALL"]
params = {
"limit": limit,
"offset": offset
}
if group_name:
params["group_name"] = group_name
if device_id:
params["device_id"] = device_id
if classification:
params["classification"] = classification
if start_time:
params["start_time"] = start_time
if end_time:
params["end_time"] = end_time
resp = conn.command(apiMethod="GET", apiPath=path, apiParams=params)
return resp
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def get_log_events(client, log_group):\n\n\tresp = client.filter_log_events(logGroupName=log_group, limit=10000)\n\treturn resp['events']",
"def list_events(\n self,\n project_name,\n group_id,\n service_filter=None,\n time_range=None,\n page_size=None,\n retry=google.api_core.gapic_v1.method.DEFAULT,\n timeout=google.api_core.gapic_v1.method.DEFAULT,\n metadata=None,\n ):\n # Wrap the transport method to add retry and timeout logic.\n if \"list_events\" not in self._inner_api_calls:\n self._inner_api_calls[\n \"list_events\"\n ] = google.api_core.gapic_v1.method.wrap_method(\n self.transport.list_events,\n default_retry=self._method_configs[\"ListEvents\"].retry,\n default_timeout=self._method_configs[\"ListEvents\"].timeout,\n client_info=self._client_info,\n )\n\n request = error_stats_service_pb2.ListEventsRequest(\n project_name=project_name,\n group_id=group_id,\n service_filter=service_filter,\n time_range=time_range,\n page_size=page_size,\n )\n if metadata is None:\n metadata = []\n metadata = list(metadata)\n try:\n routing_header = [(\"project_name\", project_name)]\n except AttributeError:\n pass\n else:\n routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata(\n routing_header\n )\n metadata.append(routing_metadata)\n\n iterator = google.api_core.page_iterator.GRPCIterator(\n client=None,\n method=functools.partial(\n self._inner_api_calls[\"list_events\"],\n retry=retry,\n timeout=timeout,\n metadata=metadata,\n ),\n request=request,\n items_field=\"error_events\",\n request_token_field=\"page_token\",\n response_token_field=\"next_page_token\",\n )\n return iterator",
"def get_all_events_by_group() -> Group:\n level = Event.objects.filter(level__contains='information').values_list('agent')\n agent = Agent.objects.filter(pk__in=level).values_list('user')\n user = User.objects.filter(pk__in=agent).values_list('group')\n return Group.objects.filter(pk__in=user)",
"def get_events(self, group_ids, combine=False):\n # TODO: This (and more is very similar to GLM class. Possible refactor?\n\n evs = get_children(group_ids, self.events)\n\n if combine:\n evs = pd.concat(evs)\n\n return evs",
"def get(self, request, group):\n event = group.get_latest_event()\n\n try:\n return client.get('/events/{}/'.format(event.id), request.user, request.auth)\n except client.ApiError as e:\n return Response(e.body, status=e.status)",
"def get_events(self):\r\n database = main.connect_to_cloudsql()\r\n cursor = database.cursor()\r\n\r\n query = \"\"\"\r\n SELECT DISTINCT E.eid, E1.ename, E1.description,\r\n E.category, E1.start_date, E1.end_date, E1.num_cap,\r\n E1.num_attending, L.lname, L.address_1, E.tag, L.lat, L.lon\r\n FROM {}.EventTags AS E, {}.UserTags AS U, {}.Events as E1, {}.Locations as L\r\n WHERE U.username='{}' AND\r\n E.tag = U.tag AND\r\n E1.eid = E.eid AND\r\n E1.lid = L.lid AND\r\n E1.start_date >= {}\r\n ORDER by E1.start_date\r\n \"\"\".format(\r\n ENV_DB,\r\n ENV_DB,\r\n ENV_DB,\r\n ENV_DB,\r\n self.user.username,\r\n str(datetime.date.today())\r\n )\r\n\r\n cursor.execute(query)\r\n data = cursor.fetchall()\r\n database.close()\r\n\r\n return [i for i in data]",
"def get_events() -> list[Event]:\n g.ledger.changed()\n return [e for e in g.filtered.entries if isinstance(e, Event)]",
"def list_events(self, name):\n return self._get_events(name)",
"def get_all(self, marker=None, limit=None, sort_key='id', sort_dir='asc',\n goal=None, strategy=None):\n\n context = pecan.request.context\n policy.enforce(context, 'audit:get_all',\n action='audit:get_all')\n\n return self._get_audits_collection(marker, limit, sort_key,\n sort_dir, goal=goal,\n strategy=strategy)",
"def getAuditList(self, user_id=None, name=None, updated_at__gte=None, updated_at__lte=None, ip_address=None, device_name=None, folder=None, folder_id=None, sub_folder_file=None, action_type=None, recipient=None, permissions=None): \n # Queryset\n return self.handler.getAuditList(\n user_id=user_id,\n name__icontains=name,\n updated_at__date__gte=updated_at__gte,\n updated_at__date__lte=updated_at__lte,\n ip_address__icontains=ip_address,\n device_name__icontains=device_name,\n folder__icontains=folder,\n folder_id=folder_id,\n sub_folder_file__icontains=sub_folder_file,\n action_type=action_type,\n recipient__icontains=recipient,\n permissions=permissions).order_by('-updated_at')",
"def test_custom_query_response_descriptor_octopus_server_web_api_actions_list_event_groups_responder(self):\n pass",
"def get_events(self):\n return self.s.query(Event).all()",
"def get_events(self, lambda_event):\n \n body = lambda_event['body']\n body = json.loads(body)\n\n required_fields = ['group_id']\n for f in required_fields:\n if f not in body:\n return get_bad_request('POST body missing field {}'.format(f))\n\n group_id = body['group_id']\n limit = 10\n if 'limit' in body:\n limit = body['limit']\n limit = int(limit)\n \n user = self.mealShareUsers.get_user_cognito_data(lambda_event)\n current_user = user['user_id']\n \n # Requesting user must already be a member\n if not self.mealShareGroups.is_user_in_group(current_user, str(group_id)):\n return {\n 'statusCode': 401,\n 'statusMessage': 'User {} is not a member of {}'.format(current_user, group_id),\n 'group_id': group_id,\n 'user_id': current_user\n }\n \n events = self.mealShareGroups.get_events(group_id, limit)\n return {\n 'statusCode': 200,\n 'events': events,\n 'group_id': group_id,\n 'user_id': current_user\n }",
"def audit_actions_and_groups(self) -> Optional[Sequence[str]]:\n return pulumi.get(self, \"audit_actions_and_groups\")",
"def get_events():\n url = app.config['EVENTS_ENDPOINT']\n response = requests.get(url, params={})\n if response.status_code == 200:\n return parse_events(response.json())\n raise RuntimeError('Error in retrieving events.')",
"def get_all_events(cls):\n try:\n events = list(events_coll.find())\n events_list = []\n if events is not None:\n for event in events:\n one_event = cls(**event)\n events_list.append(one_event)\n return events_list\n except Exception as e:\n print(e)",
"def get_auditlogs(self):\n res = self.get_object(\"/integrationServices/v3/auditlogs\")\n return res.get(\"notifications\", [])",
"def get_events():\n # reads the session\n session = request.args.get('session', type=str)\n process = request.args.get('process', default='receipt', type=str)\n\n dictio = {}\n\n if check_session_validity(session):\n user = get_user_from_session(session)\n if lh.check_user_log_visibility(user, process):\n caseid = request.args.get('caseid', type=str)\n events = lh.get_handler_for_process_and_session(process, session).get_events(caseid)\n i = 0\n while i < len(events):\n keys = list(events[i].keys())\n for key in keys:\n if str(events[i][key]).lower() == \"nan\" or str(events[i][key]).lower() == \"nat\":\n del events[i][key]\n i = i + 1\n dictio = {\"events\": events}\n ret = jsonify(dictio)\n return ret",
"def get_group_restricted_events(user, all_events=False):\n types_allowed = get_types_allowed(user)\n\n if all_events:\n return Event.objects.filter(event_type__in=types_allowed)\n else:\n return Event.objects.filter(attendance_event__isnull=False, event_type__in=types_allowed)",
"def _get_all_event_history(device_event_file_path, event_labels, timeout=10.0):\n result = []\n timedout = False\n\n file_exists, remaining_timeout = _wait_for_event_file(device_event_file_path,\n timeout)\n if not file_exists:\n timedout = True\n return result, timedout\n\n timeout_str = \"{:f}\".format(remaining_timeout)\n\n if event_labels is None:\n tac_cmd = [\"timeout\", timeout_str, \"tac\", device_event_file_path]\n out = \"\"\n try:\n out = subprocess.check_output(tac_cmd).decode(\"utf-8\", \"replace\")\n except subprocess.CalledProcessError as err:\n if err.returncode == 124:\n timedout = True\n json_events = out.splitlines()\n else:\n grep_cmd = [\"timeout\", timeout_str, \"grep\", \"-w\"]\n for event_label in event_labels:\n if event_label:\n grep_cmd.append(\"-e\")\n grep_cmd.append(event_label)\n grep_cmd.append(device_event_file_path)\n grep_proc = subprocess.Popen(grep_cmd, stdout=subprocess.PIPE)\n out, _ = grep_proc.communicate()\n if grep_proc.returncode == 124:\n timedout = True\n json_events = out.splitlines()\n json_events.reverse()\n\n return _get_events_from_json_output(json_events, event_labels), timedout",
"def get_events(self):\n\n url = '/v2.4/'+self.page_id+'/events'\n data = self.graph.request(url)\n\n while 'next' in data['paging'].keys():\n print data['paging']['next']\n data = self.graph.request(url, args={\n 'limit' : 100,\n 'after' : data['paging']['cursors']['after']\n })\n\n return data",
"def list_event(self, start_time=0, end_time=sys.maxsize):\n entities = []\n entities_j = self._get('events?startTime={}&endTime={}'.format(start_time, end_time))\n if entities_j:\n for entity_j in entities_j:\n entity = Event(entity_j['id'], entity_j['eventType'], entity_j['ctime'],\n entity_j['dataSource'], entity_j.get('dataId', None),\n entity_j['category'], entity_j['text'], entity_j.get('tags', None),\n entity_j.get('tenantId', None), entity_j.get('context', None))\n entities.append(entity)\n return entities",
"def get_events(self, limit=10, query=None):\n\n conn = http.client.HTTPSConnection(self.OPENFDA_API_URL)\n request = self.OPENFDA_API_EVENT + \"?limit=\" + str(limit)\n if query is not None:\n request += \"&\" + query\n conn.request(\"GET\", request)\n events_search = conn.getresponse()\n raw_data = events_search.read()\n events_str = raw_data.decode(\"utf8\")\n events = json.loads(events_str)\n events = events['results']\n\n return events",
"def get_events():\n\n all_calendar_events = {}\n\n # Suppress warning in logs\n # https://github.com/googleapis/google-api-python-client/issues/299\n service = build('calendar', 'v3', credentials=google_auth.creds, cache_discovery=False)\n\n now = datetime.datetime.utcnow().today().isoformat() + 'Z' # 'Z' indicates UTC time\n\n for calendar_name, calendar_id in config.GOOGLE_CALENDARS.items():\n all_events = []\n events_result = service.events().list(calendarId=calendar_id, timeMin=now,\n maxResults=10, singleEvents=True, orderBy='startTime').execute()\n events = events_result.get('items', [])\n if not events:\n all_events.append(['Ei tulevia tapahtumia'])\n for event in events:\n start = event['start'].get('dateTime', event['start'].get('date'))[:10]\n all_events.append([start, event[\"summary\"], event[\"htmlLink\"]])\n all_calendar_events[calendar_name] = all_events\n\n return all_calendar_events",
"def get_events_list(self):\n # REMARK: Maybe possible to optimize ?\n # - Not using a for loop ?\n # - Storing the ds to avoid reading all events when using the function a second time\n\n # Gather events in ds\n events_df = pd.DataFrame(columns=['name', 'date_start', 'date_end', 'duration', 'type_event', 'is_atypical'])\n for event in self.set_atypical_events:\n events_df = events_df.append(event.get_info(), ignore_index=True)\n\n # Sort ds according to date_start\n events_df = events_df.sort_values('date_start')\n events_df = events_df.reset_index(drop=True)\n\n return events_df",
"def visit_group(self, group):\n for obj in self.event_json['events']:\n event_id = obj['id']\n event = self.world.events[event_id]\n group.add(event)",
"def get_all(self, q=[], marker=None, limit=None, sort_key='timestamp',\n sort_dir='desc', alarms=False, logs=False,\n include_suppress=False, expand=False):\n return self._get_eventlog_collection(marker, limit, sort_key,\n sort_dir, expand=expand, q=q,\n alarms=alarms, logs=logs,\n include_suppress=include_suppress)",
"def get_infra_log(self, max_items=30):\n events = []\n paginator = self.cfn.meta.client.get_paginator(\"describe_stack_events\")\n status = paginator.paginate(StackName=self.stack,\n PaginationConfig={\n 'MaxItems': max_items\n })\n for event in status.search(\"StackEvents[*].[Timestamp, LogicalResourceId, ResourceStatus]\"):\n events.append([str(e) for e in event])\n return events",
"def get_all(self, q=None):\r\n q = q or []\r\n event_filter = _event_query_to_event_filter(q)\r\n return [Event(message_id=event.message_id,\r\n event_type=event.event_type,\r\n generated=event.generated,\r\n traits=event.traits)\r\n for event in\r\n pecan.request.storage_conn.get_events(event_filter)]",
"def process_groups(groups, logs):\n events = list()\n \n for group in groups:\n tag = group[2]\n target = group[3]\n msg_type = group[-1].lower()\n if tag == ACTIVITY_TAG or tag == DIALOG_TAG or tag == VIEW_TAG:\n\n if group[0] == group[1]:\n if msg_type == 'touchevent':\n events.append(touch_processor.create_touch_event(msg_type, target, logs[group[0]], group[0], tag))\n elif msg_type == 'keyevent':\n events.append(key_processor.create_key_event(msg_type, target, logs[group[0]], group[0]))\n continue\n\n # Activity & Dialig\n if msg_type == 'touchevent':\n event_logs = clear_logs(logs[group[0]:group[1]+1], [ACTIVITY_TAG, DIALOG_TAG, VIEW_TAG])\n ev = touch_processor.parse_touch_event(msg_type, target, event_logs, group[0], tag)\n elif msg_type == 'keyevent':\n event_logs = clear_logs(logs[group[0]:group[1]+1], [ACTIVITY_TAG, DIALOG_TAG])\n ev = key_processor.parse_key_event(msg_type, target, event_logs, group[0])\n events.append(ev)\n elif tag == POPUPWINDOW_TAG:\n # PopupWindow, process view onTouchEvent\n events.append(popup_window_processor.create_popup_window_event(msg_type, target, logs[group[0]], group[0]))\n view_groups = group[4]\n view_events = process_groups(view_groups, logs)\n if len(view_events) != 0:\n events += view_events\n events.append(popup_window_processor.create_popup_window_event(msg_type, target, logs[group[1]], group[1]))\n elif tag == EDITABLE_INPUT_CONNECTION_TAG:\n # Input Event\n nested_groups = group[4]\n # Process nested events\n nested_events = process_groups(nested_groups, logs)\n evs = input_processor.parse_input_event(msg_type, target, logs[group[0]:group[1]+1], nested_events, group[0])\n events += evs\n elif tag == TEXT_VIEW_KEY_TAG:\n # Keyboard event caught by TextView onKeyPreIme\n event_logs = clear_logs(logs[group[0]:group[1]+1], [TEXT_VIEW_KEY_TAG])\n ev = key_processor.parse_key_event(msg_type, target, event_logs, group[0])\n ev.intent = event.KeyEvent.HIDE_KEYBOARD_INTENT\n events.append(ev)\n elif tag == WEBVIEW_KEY_EVENT_TAG:\n # WebView KeyBoard event\n event_logs = logs[group[0]:group[1]+1]\n ev = wv_key_processor.parse_key_event(msg_type, target, event_logs, group[0])\n events.append(ev)\n elif tag == WEBVIEW_CLIENT_TAG:\n # WebView page loaded\n event_logs = logs[group[0]:group[1]+1]\n ev = wv_page_loaded_processor.parse_page_loaded(msg_type, target, event_logs, group[0])\n events.append(ev)\n elif tag == SENSOR_LISTENER_TAG:\n # Low level sensor\n event_logs = logs[group[0]:group[1]+1]\n ev = low_level_sensor_processor.parse_low_level_sensor_event(msg_type, target, event_logs, group[0])\n events.append(ev)\n elif tag == LOCATION_MANAGER_TAG or tag == LOCATION_LISTENER_TAG:\n event_logs = logs[group[0]:group[1]+1]\n ev = location_processor.parse_location_sensor_event(msg_type, target, event_logs, group[0])\n events.append(ev)\n\n return events"
] |
[
"0.70322144",
"0.61524975",
"0.604581",
"0.5978046",
"0.5915792",
"0.5767054",
"0.5700942",
"0.5694042",
"0.5663286",
"0.5598019",
"0.55365473",
"0.55105114",
"0.54919237",
"0.54886174",
"0.54715794",
"0.5468754",
"0.5455551",
"0.54503524",
"0.5448905",
"0.5447147",
"0.54007334",
"0.538896",
"0.5385415",
"0.5362074",
"0.5341687",
"0.5341212",
"0.53308547",
"0.533062",
"0.53239524",
"0.531452"
] |
0.65729
|
1
|
Get details of an audit event/log
|
def get_eventlogs_detail(self, conn, id):
path = urlJoin(urls.EVENT_LOG["GET"], id)
resp = conn.command(apiMethod="GET", apiPath=path)
return resp
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def async_describe_logbook_event(event): # type: ignore\n data = event.data\n message = \"has been triggered\"\n if ATTR_SOURCE in data:\n message = f\"{message} by {data[ATTR_SOURCE]}\"\n return {\n \"name\": data.get(ATTR_NAME),\n \"message\": message,\n \"source\": data.get(ATTR_SOURCE),\n \"entity_id\": data.get(ATTR_ENTITY_ID),\n }",
"def get_audit(self, query, session):\n raise NotImplementedError()",
"def get_auditlog_entry_report_status(session):\n\n url = session.get_url('audit', 'main')\n\n req = re.Request('GET', url)\n\n return session.send_recv(req, 'Obtained audit log entry report status.')",
"def detail(self, marker=None, limit=None, sort_key='id', sort_dir='asc',\n alarms=False, logs=False):\n # /detail should only work against collections\n parent = pecan.request.path.split('/')[:-1][-1]\n if parent != \"event_log\":\n raise exceptions.HTTPNotFound\n\n expand = True\n resource_url = '/'.join(['event_log', 'detail'])\n return self._get_eventlog_collection(marker, limit, sort_key, sort_dir,\n expand, resource_url, None,\n alarms, logs)",
"def getLog(self):\n \n return self.resp[\"log\"]",
"def dwl_auditlog_entry_report(session):\n url = session.get_url('audit', 'dwl')\n\n req = re.Request('GET', url)\n\n return session.send_recv(req, 'Audit log entry report downloaded.')",
"def event_log(self):\n pass",
"def test_get_event_log(event_log_api_setup):\n api_response = event_log_api_setup.get_event_log(\n event_log_id=1,\n )\n logging.getLogger().info(\"%s\", api_response)\n print(f\"{BCOLORS.OKGREEN}OK{BCOLORS.ENDC}\")",
"def log(self):\n resp = requests.get(\"%s/api/log\"%self.urlbase, verify=False)\n return resp.json[\"log\"]",
"def log_event(event):\n LOGGER.info(\"====================================================\")\n LOGGER.info(event)\n LOGGER.info(\"====================================================\")",
"async def getAudit(self, auditid) -> GetAuditResponse:\n\n print(\"get audit 1\" + auditid)\n res = await self.stub.GetAudit(\n GetAuditRequest(_id=auditid\n ))\n print(res.status, res.message, res.audit)\n return res",
"def get_event_details(eventId):\n response = client.query(\n TableName=\"EventsSingleTable\",\n # IndexName='',\n Select=\"ALL_ATTRIBUTES\",\n KeyConditionExpression=\"pk = :pk\",\n ExpressionAttributeValues={\":pk\": eventId},\n )\n\n items = response[\"Items\"]\n\n # Try serializing multiple entities from a single request\n for item in items:\n if item[\"sk\"] == item[\"pk\"]:\n e = Event(**item)\n pprint.pprint(str(e))\n else:\n c = Comment(**item)\n pprint.pprint(str(c))",
"def getLog(self):\n return self.session.request('diag/log/')",
"def get_auditlogs(self):\n res = self.get_object(\"/integrationServices/v3/auditlogs\")\n return res.get(\"notifications\", [])",
"def getLogs():",
"def getLogs():",
"def test_getAuditLogsWithNoParams(self):\r\n logs = self.client.getAuditLogs()\r\n return logs",
"def get_test_audit(context, **kw):\n obj_cls = objects.Audit\n db_data = db_utils.get_test_audit(**kw)\n obj_data = _load_related_objects(context, obj_cls, db_data)\n\n return _load_test_obj(context, obj_cls, obj_data, **kw)",
"def get_order_audit_trail(order_guid):\n return linnapi.orders.get_processed_order_audit_trail(order_guid)",
"def copy_log_details(self) -> Sequence[Any]:\n return pulumi.get(self, \"copy_log_details\")",
"def copy_log_details(self) -> Sequence[Any]:\n return pulumi.get(self, \"copy_log_details\")",
"def copy_log_details(self) -> Sequence[Any]:\n return pulumi.get(self, \"copy_log_details\")",
"def get_log()->dict:\n return execute_command(\"SELECT log FROM log\").fetchall()[0][0]",
"def parse_event_attlog(self):\n uid = ''\n ver_type = -1\n date_str = ''\n if self.last_event_code == DEFS.EF_ATTLOG:\n uid = self.last_payload_data[0:9].decode('ascii').\\\n replace('\\x00', '')\n ver_type = struct.unpack('<H', self.last_payload_data[24:26])[0]\n date_str = \"20%i/%i/%i %i:%i:%i\" %\\\n tuple(self.last_payload_data[26:32])\n\n return [uid, ver_type, date_str]",
"def get_event():\n data = _get_process_detail_expanded_data()[\"event\"]\n return data",
"def source_audit(self) -> SourceAudit:\n return self._source_audit",
"def get_one(self, audit):\n if self.from_audits:\n raise exception.OperationNotPermitted\n\n context = pecan.request.context\n rpc_audit = api_utils.get_resource('Audit', audit)\n policy.enforce(context, 'audit:get', rpc_audit, action='audit:get')\n\n return Audit.convert_with_links(rpc_audit)",
"def getLogs():\n # in flux, it may be possible to provide more structured information\n # like python Failure instances",
"def get_one(self, id):\n rpc_ilog = objects.event_log.get_by_uuid(\n pecan.request.context, id)\n\n return EventLog.convert_with_links(rpc_ilog)",
"def get_log_events(client, log_group):\n\n\tresp = client.filter_log_events(logGroupName=log_group, limit=10000)\n\treturn resp['events']"
] |
[
"0.6821845",
"0.6652355",
"0.6543476",
"0.6522972",
"0.64650506",
"0.6413855",
"0.6246624",
"0.62311846",
"0.62180877",
"0.6201794",
"0.61936474",
"0.6179406",
"0.617823",
"0.61002064",
"0.60831964",
"0.60831964",
"0.6037114",
"0.60080445",
"0.5976022",
"0.59753305",
"0.59753305",
"0.59753305",
"0.59369195",
"0.5927277",
"0.59212714",
"0.59127355",
"0.59121966",
"0.58947575",
"0.5869426",
"0.5848566"
] |
0.7285483
|
0
|
Checking with Microsoft Font Validator.
|
def com_google_fonts_check_fontvalidator(font):
# In some cases we want to override the severity level of
# certain checks in FontValidator:
downgrade_to_warn = [
# There are reports that this fontval check has an out-of-date
# understanding of valid bits in fsSelection.
# More info at:
# https://github.com/googlei18n/fontmake/issues/414#issuecomment-379408127
"There are undefined bits set in fsSelection field",
# FIX-ME: Why did we downgrade this one to WARN?
"Misoriented contour"
]
# Some other checks we want to completely disable:
disabled_fval_checks = [
# FontVal E4012 thinks that
# "Versions 0x00010000 and 0x0001002 are currently
# the only defined versions of the GDEF table."
# but the GDEF chapter of the OpenType specification at
# https://docs.microsoft.com/en-us/typography/opentype/spec/gdef
# describes GDEF header version 1.3, which is not yet recognized
# by FontVal, thus resulting in this spurious false-FAIL:
"The version number is neither 0x00010000 nor 0x0001002",
# These messages below are simply fontval given user feedback
# on the progress of runnint it. It has nothing to do with
# actual issues on the font files:
"Validating glyph with index",
"Table Test:",
# No software is affected by Mac strings nowadays.
# More info at: googlei18n/fontmake#414
"The table doesn't contain strings for Mac platform",
"The PostScript string is not present for both required platforms",
# Font Bakery has got a native check for the xAvgCharWidth field
# which is: com.google.fonts/check/xavgcharwidth
"The xAvgCharWidth field does not equal the calculated value",
# The optimal ordering suggested by FVal check W0020 seems to only be
# relevant to performance optimizations on old versions of Windows
# running on old hardware. Since such performance considerations
# are most likely negligible, we're not going to bother users with
# this check's table ordering requirements.
# More info at:
# https://github.com/googlefonts/fontbakery/issues/2105
"Tables are not in optimal order",
# Font Bakery has its own check for required/optional tables:
# com.google.fonts/check/required_tables
"Recommended table is missing"
]
# There are also some checks that do not make
# sense when we're dealing with variable fonts:
VARFONT_disabled_fval_checks = [
# Variable fonts typically do have lots of self-intersecting
# contours because they are used to draw each portion
# of variable glyph features.
"Intersecting contours",
"Intersecting components of composite glyph",
# DeltaFormat = 32768 (same as 0x8000) means VARIATION_INDEX,
# according to https://docs.microsoft.com/en-us/typography/opentype/spec/chapter2
# The FontVal problem description for this check (E5200) only mentions
# the other values as possible valid ones. So apparently this means FontVal
# implementation is not up-to-date with more recent versions of the OpenType spec
# and that's why these spurious FAILs are being emitted.
# That's good enough reason to mute it.
# More info at:
# https://github.com/googlefonts/fontbakery/issues/2109
"The device table's DeltaFormat value is invalid"
]
from fontTools.ttLib import TTFont
if is_variable_font(TTFont(font)):
disabled_fval_checks.extend(VARFONT_disabled_fval_checks)
try:
import subprocess
fval_cmd = [
"FontValidator", "-file", font, "-all-tables",
"-report-in-font-dir", "-no-raster-tests"
]
subprocess.check_output(fval_cmd, stderr=subprocess.STDOUT)
except subprocess.CalledProcessError as e:
filtered_msgs = ""
for line in e.output.decode().split("\n"):
disable_it = False
for substring in disabled_fval_checks:
if substring in line:
disable_it = True
if not disable_it:
filtered_msgs += line + "\n"
yield INFO, \
Message("fontval-returned-error",
("Microsoft Font Validator returned an error code."
" Output follows :\n\n{}\n").format(filtered_msgs))
except (OSError, IOError) as error:
yield ERROR, \
Message("fontval-not-available",
"Mono runtime and/or Microsoft Font Validator"
" are not available!")
raise error
def report_message(msg, details):
if details:
if isinstance(details, list) and len(details) > 1:
# We'll print lists with one item per line for
# improved readability.
if None in details:
details.remove(None)
# A designer will likely not need the full list
# in order to fix a problem.
# Showing only the 10 first ones is more than enough
# and helps avoid flooding the report.
if len(details) > 25:
num_similar = len(details) - 10
details = details[:10]
details.append(f"NOTE: {num_similar} other similar"
" results were hidden!")
details = '\n\t- ' + '\n\t- '.join(details)
return f"MS-FonVal: {msg} DETAILS: {details}"
else:
return f"MS-FonVal: {msg}"
xml_report_file = f"{font}.report.xml"
html_report_file = f"{font}.report.html"
fval_file = os.path.join(os.path.dirname(font), 'fval.xsl')
grouped_msgs = {}
with open(xml_report_file, "rb") as xml_report:
from lxml import etree
doc = etree.fromstring(xml_report.read())
for report in doc.iterfind('.//Report'):
msg = report.get("Message")
details = report.get("Details")
disable_it = False
for substring in disabled_fval_checks:
if substring in msg:
disable_it = True
if disable_it:
continue
if msg not in grouped_msgs:
grouped_msgs[msg] = {"errortype": report.get("ErrorType"),
"details": [details]}
else:
if details not in grouped_msgs[msg]["details"]:
# avoid cluttering the output with tons of identical reports
# yield INFO, 'grouped_msgs[msg]["details"]: {}'.format(grouped_msgs[msg]["details"])
grouped_msgs[msg]["details"].append(details)
# ---------------------------
# Clean-up generated files...
os.remove(xml_report_file)
# FontVal internal detail: HTML report generated only on non-Windows due to
# Mono or the used HTML renderer not being able to render XML with a
# stylesheet directly. https://github.com/googlefonts/fontbakery/issues/1747
if os.path.exists(html_report_file):
os.remove(html_report_file)
os.remove(fval_file)
# ---------------------------
# Here we start emitting the grouped log messages
for msg, data in grouped_msgs.items():
# But before printing we try to make the "details" more
# readable. Otherwise the user would get the text terminal
# flooded with messy data.
# No need to print is as a list if wereally only
# got one log message of this kind:
if len(data["details"]) == 1:
data["details"] = data["details"][0]
# Simplify the list of glyph indices by only displaying
# their numerical values in a list:
for glyph_index in ["Glyph index ", "glyph# "]:
if data["details"] and \
data["details"][0] and \
glyph_index in data["details"][0]:
try:
data["details"] = {'Glyph index': [int(x.split(glyph_index)[1])
for x in data["details"]]}
break
except ValueError:
pass
# And, finally, the log messages are emitted:
if data["errortype"] == "P":
yield PASS, report_message(msg, data["details"])
elif data["errortype"] == "E":
status = FAIL
for substring in downgrade_to_warn:
if substring in msg:
status = WARN
yield status, Message("fontval-error", report_message(msg, data["details"]))
elif data["errortype"] == "W":
yield WARN, Message("fontval-warn", report_message(msg, data["details"]))
else:
yield INFO, Message("fontval-info", report_message(msg, data["details"]))
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def test_check_opentype_familyname(self):\n font = Font.get_ttfont(self.operator.path)\n self.assertEqual(font.ot_family_name, font.familyname)",
"def test_check_opentype_stylename(self):\n stylename_mapping = {\n 'Regular': ['Thin', 'Light', 'Extra Light', 'Regular',\n 'Medium', 'SemiBold', 'Extra Bold', 'Black'],\n 'Italic': ['Thin Italic', 'Extra Light Italic', 'Italic',\n 'Medium Italic', 'SemiBold Italic', 'Extra Bold Italic',\n 'Black Italic'],\n 'Bold': ['Bold'],\n 'Bold Italic': ['Bold Italic']\n }\n\n font = Font.get_ttfont(self.operator.path)\n self.assertIn(font.stylename, stylename_mapping)\n self.assertIn(font.ot_style_name, stylename_mapping[font.stylename])",
"def com_google_fonts_check_058(ttFont):\n if ttFont.sfntVersion == b'\\x00\\x01\\x00\\x00' and ttFont.get(\n \"post\") and ttFont[\"post\"].formatType == 3.0:\n yield SKIP, (\"TrueType fonts with a format 3.0 post table contain no\"\n \" glyph names.\")\n else:\n import re\n bad_names = []\n for _, glyphName in enumerate(ttFont.getGlyphOrder()):\n if glyphName in [\".null\", \".notdef\", \".ttfautohint\"]:\n # These 2 names are explicit exceptions\n # in the glyph naming rules\n continue\n if not re.match(r'^(?![.0-9])[a-zA-Z._0-9]{1,31}$', glyphName):\n bad_names.append(glyphName)\n\n if len(bad_names) == 0:\n yield PASS, \"Glyph names are all valid.\"\n else:\n yield FAIL, (\"The following glyph names do not comply\"\n \" with naming conventions: {}\"\n \" A glyph name may be up to 31 characters in length,\"\n \" must be entirely comprised of characters from\"\n \" the following set:\"\n \" A-Z a-z 0-9 .(period) _(underscore). and must not\"\n \" start with a digit or period.\"\n \" There are a few exceptions\"\n \" such as the special character \\\".notdef\\\".\"\n \" The glyph names \\\"twocents\\\", \\\"a1\\\", and \\\"_\\\"\"\n \" are all valid, while \\\"2cents\\\"\"\n \" and \\\".twocents\\\" are not.\").format(bad_names)",
"def com_google_fonts_check_058(ttFont):\n if ttFont.sfntVersion == b'\\x00\\x01\\x00\\x00' and ttFont.get(\n \"post\") and ttFont[\"post\"].formatType == 3.0:\n yield SKIP, (\"TrueType fonts with a format 3.0 post table contain no\"\n \" glyph names.\")\n else:\n import re\n bad_names = []\n for _, glyphName in enumerate(ttFont.getGlyphOrder()):\n if glyphName in [\".null\", \".notdef\", \".ttfautohint\"]:\n # These 2 names are explicit exceptions\n # in the glyph naming rules\n continue\n if not re.match(r'^(?![.0-9])[a-zA-Z._0-9]{1,31}$', glyphName):\n bad_names.append(glyphName)\n\n if len(bad_names) == 0:\n yield PASS, \"Glyph names are all valid.\"\n else:\n yield FAIL, (\"The following glyph names do not comply\"\n \" with naming conventions: {}\"\n \" A glyph name may be up to 31 characters in length,\"\n \" must be entirely comprised of characters from\"\n \" the following set:\"\n \" A-Z a-z 0-9 .(period) _(underscore). and must not\"\n \" start with a digit or period.\"\n \" There are a few exceptions\"\n \" such as the special character \\\".notdef\\\".\"\n \" The glyph names \\\"twocents\\\", \\\"a1\\\", and \\\"_\\\"\"\n \" are all valid, while \\\"2cents\\\"\"\n \" and \\\".twocents\\\" are not.\").format(bad_names)",
"def validate ( self, object, name, value ):\n if isinstance( value, wx.FontPtr ):\n return wx.Font( value.GetPointSize(), value.GetFamily(), \n value.GetStyle(), value.GetWeight(), \n value.GetUnderlined(), value.GetFaceName() )\n if isinstance( value, wx.Font ):\n return value\n try:\n point_size = 10\n family = wx.DEFAULT\n style = wx.NORMAL\n weight = wx.NORMAL\n underline = 0\n facename = []\n for word in value.split():\n lword = word.lower()\n if font_families.has_key( lword ):\n family = font_families[ lword ]\n elif font_styles.has_key( lword ):\n style = font_styles[ lword ]\n elif font_weights.has_key( lword ):\n weight = font_weights[ lword ]\n elif lword == 'underline':\n underline = 1\n elif lword not in font_noise:\n try:\n point_size = int( lword )\n except:\n facename.append( word )\n return wx.Font( point_size, family, style, weight, underline,\n ' '.join( facename ) )\n except:\n pass\n raise TraitError, ( object, name, 'a font descriptor string',\n repr( value ) )",
"def test_metadata_regular_is_normal(self):\n have = False\n for x in self.metadata.fonts:\n if x.full_name.endswith('Regular') and x.style == 'normal':\n have = True\n self.assertTrue(have)",
"def com_google_fonts_check_ftxvalidator_is_available(ftxvalidator_is_available):\n if ftxvalidator_is_available:\n return PASS, \"ftxvalidator is available.\"\n else:\n return WARN, \"ftxvalidator is not available.\"",
"def test_metadata_regular_is_400(self):\n have = False\n for i in self.metadata.fonts:\n if i.filename.endswith('Regular.ttf') and i.weight == 400:\n have = True\n if not have:\n self.fail(('METADATA.json does not contain Regular font. At least'\n ' one font must be Regular and its weight must be 400'))",
"def available_text_fonts():\n bad = [u'acalc',\n u'acb',\n u'aco',\n u'acp']\n all = available_fonts()\n fonts = []\n for f in all:\n if (f == u'Series 60 ZDigi'):\n continue\n for b in bad:\n try:\n if (f.lower().startswith(b) and f[len(b)].isdigit()):\n break\n except IndexError:\n pass\n else:\n fonts.append(f)\n\n\n\n def compare(a, b):\n return -(a.lower() < b.lower())\n\n\n fonts.sort(compare)\n return fonts",
"def test_font_name_matches_family(self):\n\n for font_metadata in self.metadata.fonts:\n font = Font.get_ttfont_from_metadata(self.operator.path, font_metadata)\n if font_metadata.name != font.familyname:\n msg = '\"fonts.name\" property is not the same as TTF familyname'\n self.fail(msg)",
"def com_google_fonts_check_037(font):\n\n # In some cases we want to override the severity level of\n # certain checks in FontValidator:\n downgrade_to_warn = [\n # There are reports that this fontval check has an out-of-date\n # understanding of valid bits in fsSelection.\n # More info at:\n # https://github.com/googlei18n/fontmake/issues/414#issuecomment-379408127\n \"There are undefined bits set in fsSelection field\",\n\n # FIX-ME: Why did we downgrade this one to WARN?\n \"Misoriented contour\"\n ]\n\n # Some other checks we want to completely disable:\n disabled_fval_checks = [\n # FontVal E4012 thinks that\n # \"Versions 0x00010000 and 0x0001002 are currently\n # the only defined versions of the GDEF table.\"\n # but the GDEF chapter of the OpenType specification at\n # https://docs.microsoft.com/en-us/typography/opentype/spec/gdef\n # describes GDEF header version 1.3, which is not yet recognized\n # by FontVal, thus resulting in this spurious false-FAIL:\n \"The version number is neither 0x00010000 nor 0x0001002\",\n\n # These messages below are simply fontval given user feedback\n # on the progress of runnint it. It has nothing to do with\n # actual issues on the font files:\n \"Validating glyph with index\",\n \"Table Test:\",\n\n # No software is affected by Mac strings nowadays.\n # More info at: googlei18n/fontmake#414\n \"The table doesn't contain strings for Mac platform\",\n \"The PostScript string is not present for both required platforms\",\n\n # Font Bakery has got a native check for the xAvgCharWidth field\n # which is: com.google.fonts/check/034\n \"The xAvgCharWidth field does not equal the calculated value\",\n\n # The optimal ordering suggested by FVal check W0020 seems to only be\n # relevant to performance optimizations on old versions of Windows\n # running on old hardware. Since such performance considerations\n # are most likely negligible, we're not going to bother users with\n # this check's table ordering requirements.\n # More info at:\n # https://github.com/googlefonts/fontbakery/issues/2105\n \"Tables are not in optimal order\",\n\n # Font Bakery has its own check for required/optional tables:\n # com.google.fonts/check/052 - \"Font contains all required tables?\"\n \"Recommended table is missing\"\n ]\n\n # There are also some checks that do not make\n # sense when we're dealing with variable fonts:\n VARFONT_disabled_fval_checks = [\n # Variable fonts typically do have lots of self-intersecting\n # contours because they are used to draw each portion\n # of variable glyph features.\n \"Intersecting contours\",\n \"Intersecting components of composite glyph\",\n\n # DeltaFormat = 32768 (same as 0x8000) means VARIATION_INDEX,\n # according to https://docs.microsoft.com/en-us/typography/opentype/spec/chapter2\n # The FontVal problem description for this check (E5200) only mentions\n # the other values as possible valid ones. So apparently this means FontVal\n # implementation is not up-to-date with more recent versions of the OpenType spec\n # and that's why these spurious FAILs are being emitted.\n # That's good enough reason to mute it.\n # More info at:\n # https://github.com/googlefonts/fontbakery/issues/2109\n \"The device table's DeltaFormat value is invalid\"\n ]\n\n from fontTools.ttLib import TTFont\n if is_variable_font(TTFont(font)):\n disabled_fval_checks.extend(VARFONT_disabled_fval_checks)\n\n try:\n import subprocess\n fval_cmd = [\n \"FontValidator\", \"-file\", font, \"-all-tables\",\n \"-report-in-font-dir\", \"-no-raster-tests\"\n ]\n subprocess.check_output(fval_cmd, stderr=subprocess.STDOUT)\n except subprocess.CalledProcessError as e:\n filtered_msgs = \"\"\n for line in e.output.decode().split(\"\\n\"):\n disable_it = False\n for substring in disabled_fval_checks:\n if substring in line:\n disable_it = True\n if not disable_it:\n filtered_msgs += line + \"\\n\"\n yield INFO, (\"Microsoft Font Validator returned an error code.\"\n \" Output follows :\\n\\n{}\\n\").format(filtered_msgs)\n except (OSError, IOError) as error:\n yield ERROR, (\"Mono runtime and/or \"\n \"Microsoft Font Validator are not available!\")\n raise error\n\n def report_message(msg, details):\n if details:\n if isinstance(details, list) and len(details) > 1:\n # We'll print lists with one item per line for\n # improved readability.\n if None in details:\n details.remove(None)\n\n # A designer will likely not need the full list\n # in order to fix a problem.\n # Showing only the 10 first ones is more than enough\n # and helps avoid flooding the report.\n if len(details) > 25:\n num_similar = len(details) - 10\n details = details[:10]\n details.append(f\"NOTE: {num_similar} other similar\"\n \" results were hidden!\")\n details = '\\n\\t- ' + '\\n\\t- '.join(details)\n return f\"MS-FonVal: {msg} DETAILS: {details}\"\n else:\n return f\"MS-FonVal: {msg}\"\n\n xml_report_file = f\"{font}.report.xml\"\n html_report_file = f\"{font}.report.html\"\n fval_file = os.path.join(os.path.dirname(font), 'fval.xsl')\n\n grouped_msgs = {}\n with open(xml_report_file, \"rb\") as xml_report:\n import defusedxml.lxml\n doc = defusedxml.lxml.parse(xml_report)\n\n for report in doc.iter('Report'):\n msg = report.get(\"Message\")\n details = report.get(\"Details\")\n\n disable_it = False\n for substring in disabled_fval_checks:\n if substring in msg:\n disable_it = True\n if disable_it:\n continue\n\n if msg not in grouped_msgs:\n grouped_msgs[msg] = {\"errortype\": report.get(\"ErrorType\"),\n \"details\": [details]}\n else:\n if details not in grouped_msgs[msg][\"details\"]:\n # avoid cluttering the output with tons of identical reports\n # yield INFO, 'grouped_msgs[msg][\"details\"]: {}'.format(grouped_msgs[msg][\"details\"])\n grouped_msgs[msg][\"details\"].append(details)\n\n # ---------------------------\n # Clean-up generated files...\n os.remove(xml_report_file)\n # FontVal internal detail: HTML report generated only on non-Windows due to\n # Mono or the used HTML renderer not being able to render XML with a\n # stylesheet directly. https://github.com/googlefonts/fontbakery/issues/1747\n if os.path.exists(html_report_file):\n os.remove(html_report_file)\n os.remove(fval_file)\n\n # ---------------------------\n # Here we start emitting the grouped log messages\n for msg, data in grouped_msgs.items():\n # But before printing we try to make the \"details\" more\n # readable. Otherwise the user would get the text terminal\n # flooded with messy data.\n\n # No need to print is as a list if wereally only\n # got one log message of this kind:\n if len(data[\"details\"]) == 1:\n data[\"details\"] = data[\"details\"][0]\n\n # Simplify the list of glyph indices by only displaying\n # their numerical values in a list:\n for glyph_index in [\"Glyph index \", \"glyph# \"]:\n if data[\"details\"] and \\\n data[\"details\"][0] and \\\n glyph_index in data[\"details\"][0]:\n try:\n data[\"details\"] = {'Glyph index': [int(x.split(glyph_index)[1])\n for x in data[\"details\"]]}\n break\n except ValueError:\n pass\n\n # And, finally, the log messages are emitted:\n if data[\"errortype\"] == \"P\":\n yield PASS, report_message(msg, data[\"details\"])\n\n elif data[\"errortype\"] == \"E\":\n status = FAIL\n for substring in downgrade_to_warn:\n if substring in msg:\n status = WARN\n yield status, report_message(msg, data[\"details\"])\n\n elif data[\"errortype\"] == \"W\":\n yield WARN, report_message(msg, data[\"details\"])\n\n else:\n yield INFO, report_message(msg, data[\"details\"])",
"def test_metadata_font_have_regular(self):\n # this tests will appear in each font\n have = False\n for i in self.metadata.fonts:\n if i.weight == 400 and i.style == 'normal':\n have = True\n\n self.assertTrue(have)",
"def test_metadata_fonts_fields_have_fontname(self):\n for x in self.metadata.fonts:\n font = Font.get_ttfont_from_metadata(self.operator.path, x)\n\n self.assertIn(font.familyname, x.name)\n self.assertIn(font.familyname, x.full_name)\n self.assertIn(\"\".join(str(font.familyname).split()),\n x.filename)\n self.assertIn(\"\".join(str(font.familyname).split()),\n x.post_script_name)",
"def IsOk(*args, **kwargs):\n return _gdi_.Font_IsOk(*args, **kwargs)",
"def loadSystemFont(name, size):\n\n try:\n f = pygame.font.SysFont(name,size)\n except error, message:\n print \"Cannot load font: \", name\n raise SystemExit, message\n return f",
"def test_check_familyname_matches_fontnames(self):\n contents = self.read_metadata_contents()\n fm = Metadata.get_family_metadata(contents)\n for font_metadata in fm.fonts:\n _ = '%s: Family name \"%s\" does not match font name: \"%s\"'\n _ = _ % (font_metadata.filename, fm.name, font_metadata.name)\n self.assertEqual(font_metadata.name, fm.name, _)",
"def fontforge_skip_checks():\n return None",
"def fontforge_skip_checks():\n return None",
"def TestFontEncoding(*args, **kwargs):\n return _gdi_.TestFontEncoding(*args, **kwargs)",
"def test_metadata_contains_current_font(self):\n\n contents = self.read_metadata_contents()\n fm = Metadata.get_family_metadata(contents)\n\n is_canonical = False\n for font_metadata in fm.fonts:\n font = Font.get_ttfont_from_metadata(self.operator.path, font_metadata)\n\n _weights = []\n for value, intvalue in weights.items():\n if intvalue == font.OS2_usWeightClass:\n _weights.append(value)\n\n for w in _weights:\n current_font = \"%s %s\" % (font.familyname, w)\n if font_metadata.full_name != current_font:\n is_canonical = True\n\n if not is_canonical:\n v = map(lambda x: font.familyname + ' ' + x, _weights)\n msg = 'Canonical name in font expected: [%s] but %s'\n self.fail(msg % (v, font_metadata.full_name))",
"def com_google_fonts_check_078(ttFont):\n if ttFont.sfntVersion == b'\\x00\\x01\\x00\\x00' and ttFont.get(\n \"post\") and ttFont[\"post\"].formatType == 3.0:\n yield PASS, (\"TrueType fonts with a format 3.0 post table contain no \"\n \"glyph names.\")\n else:\n failed = False\n for name in ttFont.getGlyphOrder():\n if len(name) > 109:\n failed = True\n yield FAIL, (\"Glyph name is too long:\" \" '{}'\").format(name)\n if not failed:\n yield PASS, \"No glyph names exceed max allowed length.\"",
"def com_google_fonts_check_078(ttFont):\n if ttFont.sfntVersion == b'\\x00\\x01\\x00\\x00' and ttFont.get(\n \"post\") and ttFont[\"post\"].formatType == 3.0:\n yield PASS, (\"TrueType fonts with a format 3.0 post table contain no \"\n \"glyph names.\")\n else:\n failed = False\n for name in ttFont.getGlyphOrder():\n if len(name) > 109:\n failed = True\n yield FAIL, (\"Glyph name is too long:\" \" '{}'\").format(name)\n if not failed:\n yield PASS, \"No glyph names exceed max allowed length.\"",
"def test_check_canonical_styles(self):\n contents = self.read_metadata_contents()\n fm = Metadata.get_family_metadata(contents)\n for font_metadata in fm.fonts:\n self.assertIn(font_metadata.style, self.CANONICAL_STYLE_VALUES)\n if self.is_italic(font_metadata):\n if font_metadata.style != 'italic':\n _ = \"%s: The font style is %s but it should be italic\"\n self.fail(_ % (font_metadata.filename, font_metadata.style))\n else:\n if font_metadata.style != 'normal':\n _ = \"%s: The font style is %s but it should be normal\"\n self.fail(_ % (font_metadata.filename, font_metadata.style))",
"def HasFont(self):\r\n\r\n return self._font != wx.NullFont",
"def test_configs_font(\n self):\n root = Tk()\n custom = font.Font(root, family='Helvetica', size=12)\n self.assertEqual(custom.cget('family'), 'Helvetica')\n fontSelect.font_style(custom, 'Times')\n self.assertEqual(custom.cget('family'), 'Times')\n fontSelect.font_size(custom, 18)\n self.assertEqual(custom.cget('size'), 18)",
"def test_check_more_than_one_fontName(self):\n fonts = []\n for css_class in self.pisa_doc.css[0].values():\n for font in css_class.values():\n fonts.append(font)\n for font in fonts:\n if isinstance(font,list):\n result = font\n break\n #here we are checking if fonts in pdf-doc contain a font-name list\n self.assertIsInstance(result,list)",
"def test_the_same_names_of_glyphs_across_family(self):\n glyphs = None\n for font_metadata in self.familymetadata.fonts:\n ttfont = Font.get_ttfont_from_metadata(self.operator.path, font_metadata)\n if not glyphs:\n glyphs = len(ttfont.glyphs)\n\n if glyphs != len(ttfont.glyphs):\n self.fail('Family has a different glyphs\\'s names in fonts')",
"def selectfont(self, char):\n\n charcode = ord(char)\n for font in fontchecksequence:\n for fontrange in fontmapping[font]:\n if charcode in xrange(fontrange[0], fontrange[1]):\n return font\n return \"Helvetica\" # fallback, if no thirdparty font is installed",
"def test_check_only_one_fontName(self):\n fonts = []\n result = False\n for css_class in self.pisa_doc.css[0].values():\n for font in css_class.values():\n fonts.append(font)\n for font in fonts:\n if not isinstance(font, list):\n result = True\n else:\n result = False\n break\n #here we are checking if all objects in fonts list are str, the result have to be True\n self.assertTrue(result)",
"def com_google_fonts_check_052(ttFont):\n REQUIRED_TABLES = {\n \"cmap\", \"head\", \"hhea\", \"hmtx\", \"maxp\", \"name\", \"OS/2\", \"post\"}\n OPTIONAL_TABLES = {\n \"cvt \", \"fpgm\", \"loca\", \"prep\", \"VORG\", \"EBDT\", \"EBLC\", \"EBSC\", \"BASE\",\n \"GPOS\", \"GSUB\", \"JSTF\", \"DSIG\", \"gasp\", \"hdmx\", \"LTSH\", \"PCLT\", \"VDMX\",\n \"vhea\", \"vmtx\", \"kern\"\n }\n # See https://github.com/googlefonts/fontbakery/issues/617\n #\n # We should collect the rationale behind the need for each of the\n # required tables above. Perhaps split it into individual checks\n # with the correspondent rationales for each subset of required tables.\n #\n # check/066 (kern table) is a good example of a separate check for\n # a specific table providing a detailed description of the rationale\n # behind it.\n\n optional_tables = [opt for opt in OPTIONAL_TABLES if opt in ttFont.keys()]\n if optional_tables:\n yield INFO, (\"This font contains the following\"\n \" optional tables [{}]\").format(\", \".join(optional_tables))\n\n if is_variable_font(ttFont):\n # According to https://github.com/googlefonts/fontbakery/issues/1671\n # STAT table is required on WebKit on MacOS 10.12 for variable fonts.\n REQUIRED_TABLES.add(\"STAT\")\n\n missing_tables = [req for req in REQUIRED_TABLES if req not in ttFont.keys()]\n if \"glyf\" not in ttFont.keys() and \"CFF \" not in ttFont.keys():\n missing_tables.append(\"CFF ' or 'glyf\")\n\n if missing_tables:\n yield FAIL, (\"This font is missing the following required tables:\"\n \" ['{}']\").format(\"', '\".join(missing_tables))\n else:\n yield PASS, \"Font contains all required tables.\""
] |
[
"0.66999376",
"0.6561339",
"0.6518208",
"0.6518208",
"0.6447985",
"0.64417946",
"0.64289904",
"0.6428117",
"0.64090306",
"0.64015275",
"0.63722837",
"0.63663197",
"0.63632476",
"0.63315845",
"0.6312507",
"0.6307867",
"0.6289573",
"0.6289573",
"0.6258024",
"0.62548333",
"0.62223166",
"0.62223166",
"0.62204033",
"0.609482",
"0.6045782",
"0.60389644",
"0.6021847",
"0.60152745",
"0.59859467",
"0.5980078"
] |
0.7043042
|
0
|
Instantiate the class with a database instance and also an SMTP instance which is responsible for sending confirmation emails. Also set available timeslots that people can book for.
|
def __init__(self, database_manager=DataBaseManager(), emailer=EmailSender()):
self.database_manager = database_manager
self.emailer = emailer
# Set available timeslots
self.initial_time_slots = ['09:00:00',
'10:00:00',
'11:00:00',
'12:00:00',
'13:00:00',
'14:00:00',
'15:00:00',
'16:00:00',
'17:00:00']
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def __init__(self, user, password, _recipients, templatedir='templates'):\n\n self.user = user\n self.password = password\n self.recipient = _recipients if type (_recipients) is list else [_recipients]\n self.server = 'smtp.gmail.com'\n self.port = 587\n\n if os.path.isdir(templatedir):\n self.templatedir = templatedir\n else:\n self.templatedir = os.path.join(os.path.abspath(os.path.dirname(__file__)), templatedir)\n\n self.env = Environment(loader=FileSystemLoader(self.templatedir))",
"def __init__(self, dbconfig, startdate, enddate):\n\n try:\n # TAO Postgres database configuration from dbconfig dictionary\n postgresdb = dbconfig['tao-postgres']\n\n # TAO MySQL database configuration from dbconfig dictionary\n mysqldb = dbconfig['tao-mysql']\n\n # connect to Postgres database\n self.pgconn = psycopg2.connect(**postgresdb)\n self.pgcursor = self.pgconn.cursor()\n\n # connect to MySQL database\n self.mysqlcon = mysql.connector.connect(**mysqldb)\n self.mysqlcursor = self.mysqlcon.cursor()\n print('connected to DB')\n\n # TAO admin users, to be discarded from Stats\n self.adminusers = dbconfig['tao-admins']\n self.startdate = startdate\n self.enddate = enddate\n\n print(startdate)\n print(enddate)\n\n print('Connected')\n\n\n except psycopg2.DatabaseError as err:\n print(err)\n raise(err)",
"def __init__(self):\r\n self.window = 'dag_emailWindow'\r\n self.title = 'dagRenderMail'\r\n self.size= (195, 290);\r\n \r\n #Sets some defaults\r\n self.subject='Render Complete on '+str(dag_compName());\r\n self.login= '[email protected]'\r\n self.password='Password'\r\n self.to='[email protected]'\r\n self.time='10'\r\n self.smtp='smtp.gmail.com:587'\r\n self.render = ''\r\n \r\n #Default message body\r\n self.body='Your render on '+str(dag_compName())+' is now complete.' + \"this message is automatically generated by dagMail. \\n dagmail script by Dhruv Govil www.dgovil.com \\n\\n\\n\"\r\n \r\n \r\n #default name for settings file. Can be anything. \r\n self.config='dagmail.settings'\r\n \r\n #Default MEL scripts. Don't change.\r\n self.preScr = 'python \"import dagMail\";python \"dagMail.dagMail.preScript()\"'\r\n self.postScr = 'python \"import dagMail\";python \"dagMail.dagMail.postScript()\"'",
"def __init__(self, user, queueTwoMin, queueTenMin, queueHour, queueAlerts, queueTermination, testDic = None):\n # Start Time, keep track of the elapsed time\n self.originalTime = time.time()\n # User that uses the monitoring app, must exist !\n self.user = user\n\n # Queue to transmit all data\n self.queueTwoMin = queueTwoMin\n self.queueTenMin = queueTenMin\n self.queueHour = queueHour\n self.queueAlerts = queueAlerts\n\n # Queue for termination\n self.queueTermination = queueTermination\n\n # Alert Storage, to check whether raised alert are to be sent\n self.alertsDic = {}\n if testDic:\n self.alertsDic = testDic\n self.mailer = mailSender.MailSender(mailSender.mailrecipient)\n\n # Start monitoring\n self.monitorAll()",
"def __init__(self, host, user, password, port=25):\n self.host = host\n self.port = port\n self.user = user\n self.password = password\n\n self.smtp = smtplib.SMTP()",
"def create_email_job(app, db):\n from app.models import Lembrete\n lock = threading.Lock()\n\n def send_email():\n with lock:\n sp = datetime.now(tz=sao_paulo_tz)\n agora = datetime(\n year=sp.year,\n month=sp.month,\n day=sp.day,\n hour=sp.hour,\n minute=sp.minute\n )\n lembretes = Lembrete.query.filter(\n Lembrete.data_notificacao <= agora\n ).all()\n print('Enviando emails')\n if lembretes:\n for lembrete in lembretes:\n texto = lembrete.texto\n nome = ''\n veiculo = ''\n telefone = ''\n celular = ''\n tel_comercial = ''\n e_mail = ''\n if lembrete.cliente is not None:\n nome = lembrete.cliente.nome\n telefone = lembrete.cliente.telefone\n celular = lembrete.cliente.celular\n tel_comercial = lembrete.cliente.telefone_comercial\n e_mail = lembrete.cliente.email\n if lembrete.cliente is not None:\n veiculo = lembrete.veiculo.descricao()\n\n mensagem = \"\"\"\n Nome: {0}\n Telefone: {1}\n Celular: {2}\n Telefone Comercial: {3}\n E-mail: {4}\n Veículo: {5}\n Lembrete: {6}\n \"\"\".format(\n nome,\n telefone,\n celular,\n tel_comercial,\n e_mail,\n veiculo,\n texto\n )\n email = MIMEText(mensagem)\n\n me = app.config['EMAIL_ME']\n you = app.config['EMAIL_YOU']\n password = app.config['EMAIL_ME_PASSWORD']\n smtp = app.config['EMAIL_SMTP']\n smtp_port = app.config['EMAIL_SMTP_PORT']\n\n email['Subject'] = 'Lembrete: {0}|{1}'.format(\n nome, veiculo\n )\n email['From'] = me\n email['To'] = you\n\n s = smtplib.SMTP(smtp, smtp_port)\n s.ehlo()\n s.starttls()\n s.login(me, password)\n s.sendmail(me, [you], email.as_string())\n s.quit()\n # excluindo o lembrete\n db.session.delete(lembrete)\n db.session.commit()\n return send_email",
"def __init__(self):\n gr.sync_block.__init__(\n self,\n name='Send Timed Msgs', # will show up in GRC\n in_sig=None,\n out_sig=None,\n )\n self.message_port_register_out(pmt.intern(\"cron_ft8\"))\n self.message_port_register_out(pmt.intern(\"cron_ft4\"))",
"def __init__(self, player_email, rest_interval=3600):\n self.player_email = player_email\n self.rest_interval = rest_interval",
"def __init__(self, auth=False):\n self.smtp = smtplib.SMTP(host=EMAIL_HOST, port=EMIAL_HOST_PORT)\n self.smtp.ehlo()\n if auth:\n self.smtp.login(EMAIL, EMAIL_PASSWORD)",
"def __init__(self, smtp_server = 'localhost', mail_from = None, mail_to = None):\n self.smtp_server = smtp_server\n self.mail_from = mail_from\n self.mail_to = mail_to\n\n # Dictionary to track sonde IDs\n self.sondes = {}\n\n # Input Queue.\n self.input_queue = Queue()\n\n # Start queue processing thread.\n self.input_processing_running = True\n self.input_thread = Thread(target = self.process_queue)\n self.input_thread.start()\n\n self.log_info(\"Started E-Mail Notifier Thread\")",
"def __init__(self, smtp_server, smtp_user, smtp_password,\n smtp_port=25, is_with_tls=False):\n self.smtp_server = smtp_server\n self.smtp_port = smtp_port\n self.smtp_user = smtp_user\n self.smtp_password = smtp_password\n self.is_with_tls = is_with_tls",
"def __init__(self):\n self.meeting_DAO = MeetingDAO()\n self.meeting_person_DAO = MeetingPersonDAO()\n self.person_DAO = PersonDAO()\n self.date_format = \"%d-%m-%Y %H:%M\"",
"def __init__(self, config):\n # This generates the From address by stripping the part until the first\n # period from the mail server address and won't work always.\n self.fromaddr = config[\"mail\"][\"user\"] + \"@\" + \\\n config[\"mail\"][\"mailserver\"].partition(\".\")[2]\n\n # starts a client session with the SMTP server\n self.s = smtplib.SMTP(config[\"mail\"][\"mailserver\"])\n context = ssl.create_default_context()\n self.s.starttls(context=context)\n self.s.login(config[\"mail\"][\"user\"], config[\"mail\"][\"passphrase\"])",
"def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n\n self._recipient_email_inst = None",
"def __init__(self, sender_id, receivers_id, period, deadline=0, size=1000, starting_time=0, end_to_end=None):\n self.__set_sender_id(sender_id)\n self.__set_receivers_id(receivers_id)\n self.__set_period(period)\n self.__set_deadline(deadline)\n self.__set_size(size)\n self.__set_starting_time(starting_time)\n self.__set_end_to_end_delay(end_to_end)",
"def __init__(self, url_to_check, course_name, to_address, from_address, delay=1800):\n\n # Course to check\n self.url = url_to_check\n self.course = course_name\n\n # Address to notify\n self.to_address = to_address\n\n # Address to notify from\n self.from_address = from_address\n self.password = keyring.get_password(\"system\", from_address)\n\n # Seconds to delay until the next check\n self.delay = delay\n self.s = sched.scheduler(time.time, time.sleep)",
"def __init__(self, email):\n self.EmailAddress = email\n self.Lectures = [False, False, False, False]\n self.Sessions = [\"\", \"\", \"\", \"\"]",
"def __init__(self):\n self.usablerooms = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 22, 23, 24, 25, 26,\n 27, 28, 29, 30, 33, 34, 35, 36, 39, 40, 41, 42]\n\n buzztimes = [110, 110.5, 111, 111.5, 113, 113.5, 114, 114.5,\n 211, 211.5, 212, 212.5, 214, 214.5, 215, 215.5, ]\n self.buzzerschedule = list(map(lambda time: interval([time, time + .5]), buzztimes))\n\n # fill exam schedule\n examtimes = [110, 111, 114, 115, 118, 119, 120, 209, 211, 213, 214, 215]\n self.examschedule = list(map(lambda time: interval([time, time + 1]), examtimes))\n self.militaryschedule = list(map(lambda time: interval([time, time + 1]), [112, 117, 217]))\n self.geographyschedule = list(map(lambda time: interval([time, time + 1]), [112, 117, 217]))\n self.csaexamschedule = list(map(lambda time: interval([time, time + 1]), [116, 210]))\n\n # fill side schedule\n self.citizenschedule = list(map(lambda time: interval([time, time + .5]), [115, 209]))\n self.sandeschedule = list(map(lambda time: interval([time, time + .5]), [115.5, 209.5]))\n self.anniversaryschedule = list(map(lambda time: interval([time, time + .5]), [213, 213.5]))\n\n # fill bowl schedule\n self.bowlschedule = list(map(lambda time: interval([time, time + 3]), [118, 218]))\n\n # fill fqn schedule\n self.fqnschedule = [interval([118, 118 + 2])]\n\n \"\"\" Initialize rooms. \"\"\"\n # start with buzzer rooms\n self.buzzerrooms = []\n for i, item in enumerate(self.buzzerschedule):\n roundrooms = list(map(lambda j: BuzzerRoom(self.buzzerschedule, i, j), ROOM_RANGE))\n self.buzzerrooms.append(roundrooms)\n\n # anniversary rooms\n self.anniversaryrooms = []\n for i, item in enumerate(self.anniversaryschedule):\n roundrooms = list(map(lambda j: SideEventRoom(\"anniversary\", self.anniversaryschedule, i, j), ROOM_RANGE))\n self.anniversaryrooms.append(roundrooms)\n\n # sports and enterinament rooms\n self.sanderooms = []\n for i, item in enumerate(self.sandeschedule):\n roundrooms = list(map(lambda j: SideEventRoom(\"sande\", self.sandeschedule, i, j), ROOM_RANGE))\n self.sanderooms.append(roundrooms)\n\n # citizenship bee rooms\n self.citizenrooms = []\n for i, item in enumerate(self.citizenschedule):\n roundrooms = list(map(lambda j: SideEventRoom(\"citizen\", self.citizenschedule, i, j), ROOM_RANGE))\n self.citizenrooms.append(roundrooms)\n\n # regular exam rooms\n k = xrange(len(self.examschedule))\n self.examrooms = list(map(lambda j: ExamRoom(\"exam\", self.examschedule, j), k))\n\n # military exam rooms\n k = xrange(len(self.militaryschedule))\n self.militaryrooms = list(map(lambda j: ExamRoom(\"military\", self.militaryschedule, j), k))\n\n # geography subject exam rooms\n k = xrange(len(self.geographyschedule))\n self.geographyrooms = list(map(lambda j: ExamRoom(\"geography\", self.geographyschedule, j), k))\n\n # csa exam rooms\n self.csarooms = []\n for i in xrange(len(self.csaexamschedule)):\n cit = ExamRoom(\"cit\", self.csaexamschedule, i)\n sport = ExamRoom(\"sports\", self.csaexamschedule, i)\n self.csarooms.append((cit, sport))",
"def __init__(self, email, pwd, smtp_server='imap.gmail.com', smtp_port=993):\n self._email = email\n self._pwd = pwd\n\n self._mailconn = imaplib.IMAP4_SSL(smtp_server, smtp_port)\n self._mailconn.login(self._email, self._pwd)\n self._mailconn.select(readonly=False)\n self._client = boto3.client('dynamodb')",
"def __init__(self):\n abstracttask.Task.__init__(self)\n self._ticket_id = ''# this is an implementation detail of jutda task tracker\n self.timespent = datetime.timedelta(0) # not editable permenently, but saves data from hours\n self.starttime = datetime.datetime.now() # ticket creation time in this implementation \n self.isappointment = False # always false for these\n self.followups = [] # not likely to be used, since other implementation doesn't have it.\n self._orig = None\n self.submitter_email = None",
"def __init__(self, email, password):\n self.email = email\n self.password = password\n self.plan = None\n self.customer_token = None\n self.subscription_token = None\n self.susbcription_end = None\n\n if email and password:\n self._set_user_info()",
"def __init__(self, **kwargs):\n self.subscriberid = None # kwargs.get('subscriberid', str(uuid.uuid4()))\n self.email = kwargs['email']\n self.first_name = kwargs.get('first_name', \"Feedback\")\n self.last_name = kwargs.get('last_name', \"Test\")\n self.company = kwargs.get('company', \"SmartBrief\")\n self.title = kwargs.get('title', \"Selenium Tester\")\n self.city = kwargs.get('city', \"Washington\")\n self.state = kwargs.get('state', \"DC\")\n self.country = kwargs.get('country', \"United States\")\n self.zipcode = kwargs.get('zipcode', \"20004\")\n self.mail_format_id = 1\n self.marketing_message = 'true'\n # self.position_level = kwargs.get('positionLevel')\n # etc",
"def __init__(self):\n\n self.chat_id = None\n self.text = None\n self.first_name = None\n self.last_name = None\n self.accounts = {}\n self.venues = ['Room 1', 'Room 2', 'Room 3', 'Room 4']\n self.book_date = None\n self.book_time = None",
"def __init__(self):\n self.__ALLOWED_EXTENSIONS__ = {\"txt\", \"doc\", \"docx\", \"xls\", \"xlsx\", \"pdf\", \"png\", \"jpg\", \"jpeg\", \"gif\", \"zip\"}\n self.__APP_PATH__ = path.dirname(path.realpath(__file__))\n self.__APP_DIR__ = self.__APP_PATH__.split(\"/\")[-1]\n self.__SPECIAL_FILES__ = [\"request.saved\", \"request.submitted\", \"request.processed\", \"request.returned\", \"request.voided\", \"submission.json\"]\n self.__TEST_EMAILS__ = [[\"Damian Jimenez\", \"[email protected]\"]]\n self.__PROD_EMAILS__ = [[\"CSE Webmaster\", \"[email protected]\"], [\"Chengkai Li\", \"[email protected]\"]]\n self.mailer = Mailer()",
"def __init__(self, sender, user):\r\n self.user = user\r\n self.sender = Email(sender)\r\n self.recipient = Email(self.user.getEmail())\r\n self.sg = sendgrid.SendGridAPIClient(apikey = \"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\")",
"def __init__(__self__, *,\n can_defer: bool,\n can_reschedule: bool,\n schedule_deadline_time: str,\n start_time: str):\n pulumi.set(__self__, \"can_defer\", can_defer)\n pulumi.set(__self__, \"can_reschedule\", can_reschedule)\n pulumi.set(__self__, \"schedule_deadline_time\", schedule_deadline_time)\n pulumi.set(__self__, \"start_time\", start_time)",
"def __init__(self, mailhost, username, password, fromaddr, toaddrs, subject):\r\n logging.handlers.SMTPHandler.__init__(self, mailhost, fromaddr, toaddrs, subject)\r\n self.username = username\r\n self.password = password",
"def __init__(self,\n fromAddr,\n fromName = None,\n smtpHost = None,\n smtpPort = None,\n smtpTimeout = None,\n adminAddrs = None,\n mock = False,\n sendMails = True,\n actionLogger = None):\n if fromName is None: fromName = self.DEFAULT_FROM_NAME\n if smtpHost is None: smtpHost = self.DEFAULT_SMTP_HOST\n if smtpPort is None : smtpPort = self.DEFAULT_SMTP_PORT\n if smtpTimeout is None : smtpTimeout = self.DEFAULT_SMTP_TIMEOUT\n if adminAddrs is None : adminAddrs = self.DEFAULT_ADMIN_ADDRS\n\n self.__fromAddrs = fromAddr\n self.__fromName = fromName\n self.__smtpHost = smtpHost\n self.__smtpPort = smtpPort\n self.__adminAddrs = adminAddrs\n self.__mock = mock\n self.__sendMails = sendMails\n self.__actionLogger = actionLogger\n if self.__actionLogger is None:\n self.__actionLogger = logging.getLogger()\n self.__smtpTimeout = smtpTimeout",
"def __init__(self):\n try:\n self._chat_db = sqlite3.connect(CHAT_DB_PATH)\n except OperationalError:\n print(\"Cannot access chat database.\\nGo to Settings->Security and Privacy->Privacy->Full Disk Access.\\n\"\n \"Give access to the application you are running and restart the program.\")\n sys.exit(1)\n\n self._contacts = Contacts(self._chat_db).get_contacts_df()\n\n try:\n self._message_db = sqlite3.connect(WEEKLY_MESSAGES_DB_PATH)\n except OperationalError:\n print(\"Could not connect to the database server.\")\n sys.exit(1)",
"def __init__(self, initial_date=None, until_date=None):\n engine_string = (\n 'host=localhost dbname={0} user={1} password={2}').format(\n DATABASE_NAME,\n os.environ.get('REAL_ESTATE_DATABASE_USERNAME'),\n os.environ.get('REAL_ESTATE_DATABASE_PASSWORD'))\n self.conn = psycopg2.connect(engine_string)\n self.cursor = self.conn.cursor()\n\n self.initial_date = initial_date\n self.until_date = until_date\n\n log.debug('self.initial_date: {}'.format(self.initial_date))\n log.debug('self.until_date: {}'.format(self.until_date))"
] |
[
"0.593538",
"0.57756203",
"0.5749966",
"0.5713733",
"0.57021195",
"0.5693684",
"0.56859493",
"0.56799823",
"0.566929",
"0.56081414",
"0.55958164",
"0.5573325",
"0.55702955",
"0.55215466",
"0.54456973",
"0.5395605",
"0.5392591",
"0.53879774",
"0.5381332",
"0.5368475",
"0.5364165",
"0.535877",
"0.53489155",
"0.53405786",
"0.53319174",
"0.5315882",
"0.53076804",
"0.5296182",
"0.528084",
"0.52723163"
] |
0.7780047
|
0
|
Getting the date of 7 days later from current day.
|
def next_seven_day(self):
today = datetime.date.today()
week_next = today + datetime.timedelta(days=7)
return week_next.strftime('%Y-%m-%d')
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def get_week_date():\n return timezone.now()+timezone.timedelta(days=6)",
"def get_prev_weekday(x: Optional[Date] = None) -> Date:\n ## Get the day:\n x = x or get_today()\n\n ## Define the offset:\n offset = max(1, (x.weekday() + 6) % 7 - 3)\n\n ## Compute the day and return:\n return x - TimeDelta(days=offset)",
"def get_week_end(x: Optional[Date] = None) -> Date:\n asof = x or get_today()\n return asof + TimeDelta(days=6 - (asof.isoweekday() - 1) % 7)",
"def get_next_weekend():\n d = datetime.date.today()\n # day 5 for saturday\n t = datetime.timedelta((7 + 5 - d.weekday()) % 7)\n return (d + t).strftime('%d-%m-%Y')",
"def GetFridayOfLastFullWeek(self):\n today = dt.date.today()\n dow = today.weekday()\n if (dow >= 4):\n return today + dt.timedelta(days=-(7 + dow - 4))\n else:\n return today + dt.timedelta(days=-(7 + (7 + 4 - dow)))",
"def dateByDelta(daysInTheFuture=1):\n\ttmDT = datetime.today() + timedelta(days=daysInTheFuture)\n\tnewDate = datetime(tmDT.year,tmDT.month,tmDT.day).date()\n\treturn newDate",
"def get_due_date(self) -> datetime.datetime:\r\n if self.borrow_date:\r\n return self.borrow_date + datetime.timedelta(days=4 * 7)\r\n else:\r\n return None",
"def previous_weekday(date, weekday):\n delta = date.weekday() - weekday\n if delta < 0:\n delta += 7\n return date + timedelta(days=-int(delta))",
"def get_today() -> datetime.date:\n return datetime.date.today()",
"def test_date_accept_last_week(self):\n spi_search = \"find date last week\"\n inv_search = \"year:\" + datetime.datetime.strftime(datetime.datetime.today()\\\n +dateutil.relativedelta.relativedelta(days=-(7+(datetime.datetime.today().isoweekday()%7))), '%Y-%m-%d')\n self._compare_searches(inv_search, spi_search)",
"def create_past_date(self, days):\n past_date = datetime.now() - timedelta(days=days)\n return past_date.isoformat()",
"def next_deadline():\n\n today = date.today()\n\n days_since_starting_sunday = (today - date(2016, 9, 4)).days\n\n if days_since_starting_sunday % 14 < 7:\n return next_sunday(next_sunday(today))\n else:\n return next_sunday(today)",
"def get_time_last_week():\n current_time = arrow.utcnow() # Get the current UTC Time\n return current_time.shift(weeks=-1) # Return the shifted time by -1 weeks",
"def get_date_in_two_weeks():\n today = datetime.datetime.today()\n date_in_two_weeks = today + datetime.timedelta(days=14)\n return date_in_two_weeks.date()",
"def work_days_since_now(days, obj=None):\n if not obj:\n obj = datetime.now()\n counter = days\n newdate = obj + timedelta(0) # Like .copy()\n while counter >= 0:\n newdate = newdate + timedelta(1)\n if newdate.weekday() not in [6, 7]:\n counter -= 1\n return newdate",
"def get_weekday():\n result = datetime.today().weekday() + 1\n return result",
"def get_days_old(days):\n days = int(days)\n current_time = datetime.datetime.today()\n days_after = datetime.timedelta(days)\n new_date = current_time - days_after\n new_date = new_date.strftime(\"%d-%b-%Y\")\n return new_date",
"def largest_current_date():\n # This is not the right way to do it, timezones can change\n # at the time of writing, Samoa observes UTC+14 in Summer\n return datetime.now(timezone(timedelta(hours=14))).strftime(\"%Y-%m-%d\")",
"def _today() -> datetime.date:\n return datetime.today().date()",
"def get_weekday(self):\n originDate = Date(1900, 1, 1)\n return WEEKDAYS[originDate.days_since(self) % 7]",
"def get_last_seven_days_label(self):\n return gettext_lazy('Last seven days')",
"def get_tomorrow(x: Optional[Date] = None) -> Date:\n return (x or get_today()) + TimeDelta(days=1)",
"def get_week_start(x: Optional[Date] = None) -> Date:\n asof = x or get_today()\n return asof - TimeDelta(days=(asof.isoweekday() - 1) % 7)",
"def get_next_weekday(date, weekday):\n return date + dt.timedelta(days=(weekday - date.weekday() + 7) % 7)",
"def get_previous_week(self, startdate):\n # day 0 is Monday. Sunday is 6.\n dow_today = startdate.weekday()\n\n if dow_today == 6:\n days_ago_saturday = 1\n else:\n # To get last saturday, we need to go to day 0 (Monday), then two more days.\n days_ago_saturday = dow_today + 2\n\n # Make a timedelta object so we can do date arithmetic.\n delta_saturday = datetime.timedelta(days=days_ago_saturday)\n # saturday is now a date object representing last saturday\n saturday = startdate - delta_saturday\n # timedelta object representing '6 days'...\n delta_prevsunday = datetime.timedelta(days=6)\n # Making a date object. Subtract the 6 days from saturday to get \"the Sunday before that\".\n prev_sunday = saturday - delta_prevsunday\n\n last_week = (prev_sunday, saturday)\n return last_week",
"def get_next_day(self):\n pass",
"def get_7days_mau(self):\n fday = self.baseDay-timedelta(days=7)\n return self.mau(fday=fday, tday=self.baseDay)",
"def TODAY():\n return datetime.date.today()",
"def _get_next_monday(self):\n today = datetime.date.today()\n weekday_int = today.weekday()\n if weekday_int == 0:\n return today\n next_mon = today + timedelta(7 - weekday_int)\n return next_mon",
"def least_current_date():\n # This is not the right way to do it, timezones can change\n # at the time of writing, Baker Island observes UTC-12\n return datetime.now(timezone(timedelta(hours=-12))).strftime(\"%Y-%m-%d\")"
] |
[
"0.69482285",
"0.67034215",
"0.6692377",
"0.66036826",
"0.6488771",
"0.6337313",
"0.63113797",
"0.6233774",
"0.61356586",
"0.61312723",
"0.6130495",
"0.6126569",
"0.6113718",
"0.60930926",
"0.6027795",
"0.5993995",
"0.5991983",
"0.5979135",
"0.59749365",
"0.59516615",
"0.5949404",
"0.59491986",
"0.5932134",
"0.5929202",
"0.590317",
"0.58917624",
"0.5883559",
"0.5880776",
"0.5872259",
"0.5849249"
] |
0.7539515
|
0
|
Convert a date into weekday string form
|
def get_the_weekday(self,date):
date_convert = date.split('-')
week_days = ("Monday", "Tuesday", "Wednesday", "Thursday", "Friday", "Saturday", "Sunday")
date_list = [int(i) for i in date_convert]
day = datetime.date(date_list[0], date_list[1], date_list[2])
# convert weekday into digit (eg Mon -> 0,)
num_day = day.weekday()
day_as_string = week_days[num_day]
return day_as_string
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def day_of_week(date: datetime) -> str:\n weekday = date.weekday()\n return calendar.day_name[weekday]",
"def date_to_day_of_week(date):\n return date.weekday()",
"def get_weekday_number(date):\n return date.strftime('%w')",
"def date_day_of_week(date):\n day_of_week = date.strftime('%A')\n return day_of_week",
"def format_weekday(time):\n return time.strftime(\"%A\").lower()",
"def get_day_of_week() -> str:\n return datetime.now(pytz.timezone('US/Eastern')).strftime(\"%a\").lower()",
"def weekday(day):\n return (day % 7) - 1",
"def weekdayname(self, date):\n weekday = weekdayname_msgid(date.dow())\n return translate(weekday, domain='plonelocales',\n context=self.request, default=weekday)",
"def day_of_week():\n return calendar.day_name[datetime.date.today().weekday()]",
"def day_of_week(self) -> str:\n return self.elements[4]",
"def date_with_day_of_week_appended(mydate): \n import datetime\n month, day, year = (int(x) for x in mydate.split('/')) \n shortened_year = abs(year) % 100 \n day_of_week = datetime.date(year, month, day).strftime(\"%A\")\n return \"%s/%s/%s %s\" % (month,day,shortened_year, day_of_week)",
"def day(dt: datetime.datetime) -> str:\n day: str = dt.strftime(\"%A\")\n return day",
"def get_day_string(self, date_obj):\n return date_obj.strftime('%A')[:3].upper()",
"def day_name(x):\r\n if x==0:\r\n return \"Sunday\"\r\n elif x==1:\r\n return \"Monday\"\r\n elif x==2:\r\n return \"Tuesday\"\r\n elif x==3:\r\n return \"Wednesday\"\r\n elif x==4:\r\n return \"Thursday\"\r\n elif x==5:\r\n return \"Friday\"\r\n elif x==6:\r\n return \"Saturday\"",
"def day_of_week(self) -> str:\n return pulumi.get(self, \"day_of_week\")",
"def day_of_week(self) -> str:\n return pulumi.get(self, \"day_of_week\")",
"def number_to_day(n):\n if n == 1:\n return \"Sunday\"\n elif n == 2:\n return \"Monday\"\n elif n == 3:\n return \"Tuesday\"\n elif n == 4:\n return \"Wednesday\"\n elif n == 5:\n return \"Thursday\"\n elif n == 6:\n return \"Friday\"\n elif n == 7:\n return \"Saturday\"",
"def date_to_week(y, m, d):\r\n return datetime.datetime(y, m, d).strftime(r'%YW%W')",
"def day_str(day):\n return str(day) if day >= 10 else '0' + str(day)",
"def day_str(day):\n return str(day) if day >= 10 else '0' + str(day)",
"def day_str(day):\n return str(day) if day >= 10 else '0' + str(day)",
"def date_form(day):\r\n new_day = \"\"\r\n if day == \"Monday\":\r\n new_day = \"Poniedziałek\"\r\n elif day == \"Tuesday\":\r\n new_day = \"Wtorek\"\r\n elif day == \"Wednesday\":\r\n new_day = \"Środa\"\r\n elif day == \"Thursday\":\r\n new_day = \"Czwartek\"\r\n elif day == \"Friday\":\r\n new_day = \"Piątek\"\r\n elif day == \"Saturday\":\r\n new_day = \"Sobota\"\r\n elif day == \"Sunday\":\r\n new_day = \"Niedziela\"\r\n return new_day",
"def format_dow(value):\n if value:\n return [\n 'Sunday',\n 'Monday',\n 'Tuesday',\n 'Wednesday',\n 'Thursday',\n 'Friday',\n 'Saturday',\n ][value]\n else:\n return 'N/A'",
"def date_week_of_year(date, *, sunday_is_first_day_of_week: bool = False):\n if sunday_is_first_day_of_week:\n return date.strftime(\"%U\")\n else:\n return date.strftime(\"%V\")",
"def day_of_the_week(arg):",
"def get_weekday(self, as_str=False):\n\n # First we get the first 8 bits stored in the weekday register\n # and translate it to an integer\n wd_8bits = self.__read_register(_REGISTER_WEEKDAY)\n\n # Then we extract the weekday and return it\n wd = wd_8bits & 0x07 # 0x07 = 0b00000111\n\n if as_str is True: # if we want the weekday's name\n wd = WEEKDAY_STR[wd]\n\n return wd",
"def day_of_week(self):\n day_of_week_names = ['Monday', 'Tuesday', 'Wednesday', 'Thursday',\n 'Friday', 'Saturday', 'Sunday']\n diff = self.diff(Date(1, 1, 1970)) + 3\n while diff < 0:\n diff += 7\n print(day_of_week_names[diff % 7])",
"def formatWeekDay(self, day):\n return '<th class=\"day\">%s</th>' % day_abbr[day]",
"def _DayNumToWeekdayNum(daynum):\n return (daynum + _WEEKDAY_BASE) % NUM_WEEKDAYS",
"def WeekdayName(num, length=99):\n if num < 1 or num > NUM_WEEKDAYS:\n raise ValueError('Bad weekday number')\n return _WEEKDAY_NAMES[num][:length]"
] |
[
"0.78104866",
"0.76651293",
"0.7552025",
"0.7451308",
"0.7268718",
"0.71907043",
"0.7140627",
"0.7097642",
"0.7082069",
"0.70273197",
"0.7010225",
"0.69217354",
"0.6881678",
"0.6690514",
"0.6657678",
"0.6657678",
"0.66044676",
"0.6601838",
"0.6588213",
"0.6588213",
"0.6588213",
"0.658627",
"0.6577737",
"0.6577493",
"0.65458846",
"0.6500512",
"0.6479579",
"0.645044",
"0.6443347",
"0.64430577"
] |
0.79862875
|
0
|
Check if a given date is a weekday and if not, we tell user they cannot book that day. Also check if the booking day is inside our allowed range which is one week.
|
def check_weekday(self, date):
week_next = self.next_seven_day()
today = datetime.date.today().strftime('%Y-%m-%d')
if not date or date > week_next or date < today: # check the date is within one week
return False, "Sorry you can only booking consultation up to next one week. Your booking date must before {}".format(week_next)
try:
day_as_string = self.get_the_weekday(date)
if day_as_string == "Saturday" or day_as_string == "Sunday":
logger.info("Sorry, there is no consultation on weekends")
return False, "Sorry, there is no consultation on weekends"
else:
logger.info("It is on next {}".format(day_as_string))
return True, "Your booking has been made on {} {}".format(day_as_string, date)
except ValueError as e:
logger.error(str(e))
return False, "Please try again"
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def check_weekday_of_date(self, date):\n return date.isoweekday() % 7",
"def sleep_in(weekday, vacation):\r\n if not weekday or vacation:\r\n return True\r\n return False",
"def sleep_in(weekday, vacation):\r\n if not weekday or vacation:\r\n return True\r\n else:\r\n return False",
"def test_check_args_weekend(self):\n test_date = dt.datetime(2021, 6, 20, 11, 0, 0)\n with self.assertRaises(ValueError) as context:\n self.duedate.check_args(test_date, self.test_turn_time)\n self.assertTrue(\n \"You can submit requests during weekdays only.\" in str(context.exception))",
"def check_day(self, day_of_week):\n\n day_of_week -= 1\n if (day_of_week == -1):\n self.day_of_week = 6\n else:\n self.day_of_week = day_of_week",
"def is_weekend(date):\n \n return date.weekday() == 5 or date.weekday() == 6",
"def business_day(self): \n\n if self.time_stamp.weekday() not in (5, 6) and not holiday(self.time_stamp):\n return True \n return False",
"def is_working_day_appointment(self):\n # function helps hide appointments on weekend\n return 0 <= self.date.weekday() <= 4",
"def is_weekday(dtObj):\n return dtObj.weekday() < 5",
"def is_workfree(date):\n \n return date.weekday() == 6 or is_holiday(date)",
"def is_dayofweek(day, today):\n if isinstance(today, datetime):\n int_day = int(day)\n if today.weekday() == int_day - 1:\n return True\n return False\n else:\n raise Exception(\"{} is not a datetime instance\".format(today))",
"def get_day_of_week_from_user():\n while True:\n day = input('Select the month to explore. Enter from monday, tuesday, wednesday, thursday, friday, '\n 'saturday, sunday or all: ').lower()\n\n if day in VALID_DAYS:\n confirm = input(\"You have selected {}. Press 'y' to confirm: \".format(day.title()))\n\n if confirm == 'y':\n break\n else:\n print(\"Try again.\\n\")\n else:\n print(\"Invalid input: {}. Try again.\\n\".format(day))\n return day",
"def is_weekend() -> bool:\n return datetime.today().weekday() > 3",
"def clean_date(self):\n input_day = self.cleaned_data.get('day')\n input_date = self.cleaned_data.get('date')\n if input_date < datetime.date.today():\n raise forms.ValidationError(\"Can not create a lesson in the past.\")\n elif input_date.strftime(\"%A\").lower() != input_day:\n raise forms.ValidationError(input_date.strftime(\"%d-%m-%Y\")+\" does not fall on a \"+input_day.title()+\".\")\n return input_date",
"def week_init():\n week = input('Week to check: MM/DD/YYYY\\n')\n week = dtt.datetime.strptime(week,'%m/%d/%Y') #turns input to a datetime\n beforeday = input('Check days before date (Press enter to use today): MM/DD/YYYY\\n') or dtt.date.today()\n if (beforeday != dtt.date.today()):\n beforeday = dtt.datetime.strptime(beforeday,'%m/%d/%Y')\n return week, beforeday",
"def test_sunday(self):\n date = datetime.date(1980, 5, 4)\n self.assertEqual(date.isoweekday(), 7)\n start_date, end_date = get_weekspan(date)\n self.assertEqual(start_date.isoweekday(), 1)\n self.assertEqual(end_date.isoweekday(), 7)\n self.assertTrue(start_date.toordinal() <= date.toordinal() <= end_date.toordinal())",
"def meetup_day(year, month, day_of_week, day_occurrence):\n \n cal = calendar.monthcalendar(year, month)\n day_of_week_index = days_of_week[day_of_week]\n \n not_teenth = day_occurrence != 'teenth'\n day_is_in_first_week = cal[0][day_of_week_index] != 0\n \n if not_teenth and day_is_in_first_week:\n week_index = week_indices[day_occurrence]\n \n elif not_teenth and not day_is_in_first_week:\n week_index = week_indices[day_occurrence] + 1\n \n else:\n for i in range(len(cal)):\n if cal[i][day_of_week_index] >= 10:\n week_index = i\n break\n\n date = cal[week_index][day_of_week_index]\n return datetime.date(year, month, date)",
"def filter_dates(date):\n if not (date.weekday() in [5, 6] or (date.hour > 22 and date.minute > 30)):\n return True\n else:\n return False",
"def test_no_weekend_dates(self):\n input_ = [\n self.indicator_record(date=datetime.date(2014, 10, 14), value=0.035657),\n ]\n output = self.expander._daily_workday_indicator_expander(input_)\n no_weekend_dates = [record.date.weekday() < 5 for record in output]\n\n self.assertTrue(all(no_weekend_dates))",
"def start_day_of_weekend(self, start_day_of_weekend):\n allowed_values = [\"Sunday\", \"Monday\", \"Tuesday\", \"Wednesday\", \"Thursday\", \"Friday\", \"Saturday\"]\n if start_day_of_weekend.lower() not in map(str.lower, allowed_values):\n # print(\"Invalid value for start_day_of_weekend -> \" + start_day_of_weekend)\n self._start_day_of_weekend = \"outdated_sdk_version\"\n else:\n self._start_day_of_weekend = start_day_of_weekend",
"def test_date_accept_this_week(self):\n spi_search = \"find date this week\"\n inv_search = \"year:\" + datetime.datetime.strftime(datetime.datetime.today()\\\n +dateutil.relativedelta.relativedelta(days=-(datetime.datetime.today().isoweekday()%7)), '%Y-%m-%d')\n self._compare_searches(inv_search, spi_search)",
"def isoweekday(self):\n return 0",
"def isoweekday(self):\n return 0",
"def is_weekday(day, halfDay):\n hours, days = halfDay.split('x')\n if day <= int(days)-1:\n return True\n else:\n return False",
"def test_tuesday(self):\n date = datetime.date(1982, 5, 4)\n self.assertEqual(date.isoweekday(), 2)\n start_date, end_date = get_weekspan(date)\n self.assertEqual(start_date.isoweekday(), 1)\n self.assertEqual(end_date.isoweekday(), 7)\n self.assertTrue(start_date.toordinal() <= date.toordinal() <= end_date.toordinal())",
"def test_wednesday(self):\n date = datetime.date(1988, 5, 4)\n self.assertEqual(date.isoweekday(), 3)\n start_date, end_date = get_weekspan(date)\n self.assertEqual(start_date.isoweekday(), 1)\n self.assertEqual(end_date.isoweekday(), 7)\n self.assertTrue(start_date.toordinal() <= date.toordinal() <= end_date.toordinal())",
"def isoweekday(self, *args, **kwargs): # real signature unknown\r\n pass",
"def day_of_the_week(arg):",
"def is_valid_day (val):\n if len(val) == 2 and count_digits(val) == 2:\n day = int(val)\n return day > 0 and day < 32\n return False",
"def make_tuesday(date):\n offset = (date.weekday() - 1) % 7\n tuesday = date - datetime.timedelta(days=offset)\n # Ensure that the database has this date\n with get_dbconn(\"postgis\") as conn:\n cursor = conn.cursor()\n cursor.execute(\"SELECT max(valid) from usdm\")\n maxdate = cursor.fetchone()[0]\n if maxdate is not None:\n tuesday = min([tuesday, maxdate])\n return tuesday"
] |
[
"0.7051321",
"0.6796055",
"0.67910826",
"0.6701195",
"0.65922606",
"0.6474264",
"0.6421613",
"0.6387774",
"0.63600135",
"0.6227869",
"0.61653614",
"0.6164587",
"0.61135364",
"0.60949105",
"0.60126436",
"0.5992501",
"0.5977553",
"0.59321845",
"0.5889374",
"0.58726126",
"0.58097064",
"0.5777408",
"0.5777408",
"0.5776437",
"0.5775914",
"0.57708865",
"0.57619214",
"0.5706275",
"0.5693726",
"0.5654776"
] |
0.7207394
|
0
|
Get list of available times that are booked for a given date and course
|
def get_time_slots(self, cid, date):
query = "SELECT time from consultation where cid = %s and date = %s"
inputs = (cid, date)
array_book = self.database_manager.execute_query(query, inputs)
array_book = [e[0] for e in array_book]
booked = array_book if array_book else []
return booked
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def get_avail_time_slots(self, cid, date):\n booked = self.get_time_slots(cid, date)\n avail_time_slots = []\n for time in self.initial_time_slots:\n if time not in booked:\n avail_time_slots.append(time)\n return avail_time_slots",
"def consultation_booking_query(self, cid, sid, time, date):\n if not self.check_course_exist(cid):\n return ConsultationError.INVALID_COURSE.value\n is_weekday, feedback = self.check_weekday(date)\n time = self.round_time(time)\n if is_weekday:\n try:\n avail_list = self.get_avail_time_slots(cid.upper(), date) # return available time slot list\n logger.debug(avail_list)\n if time in avail_list:\n self.add_consultation(cid, sid, time, date) # add into database\n self.emailer.send_confirm_booking(cid=cid, time=time, date=date, receiver='[email protected]')\n return \"{}\".format(feedback)\n else:\n if not avail_list:\n return \"Sorry, there is no available time slot on date\"\n result = \"Sorry this time slot has been booked, \" \\\n \"please choose another one from following time slots on {}\".format(date)\n return '{}: {}'.format(result, ', '.join(avail_list))\n except ValueError:\n logger.error(\"Invalid Input\")\n return\n else:\n logger.debug(feedback)\n return feedback",
"def get_available_time_slot():\n try:\n time_slot_set_list = list()\n # Read all time slot from database\n with open(InterviewCalendarApi.DB_FILE, \"r\") as fd:\n for line in fd:\n time_slot_list = list()\n (_,_,_, time_slots) = line.strip().split(\"|\")\n for time_slot in time_slots.split(\",\"):\n (from_time_slot, to_time_slot) = list(map(int, time_slot.split(\"-\")))\n time_slot_list.extend(range(from_time_slot, (to_time_slot + 1)))\n # Get all available time slot for every user\n time_slot_set_list.append(set(time_slot_list))\n \n # Find common time slot between multiple parties\n available_slots = list(set.intersection(*time_slot_set_list))\n\n msg = json.dumps({\"Status\": \"Success\", \"available_slots\": available_slots})\n return make_response(msg, 200, InterviewCalendarApi.HEADERS)\n except:\n err_msg = sys.exc_info()\n error = json.dumps({'error': 'Unable to find time slot due to error: %s' %str(err_msg)})\n return make_response(error, 401, InterviewCalendarApi.HEADERS)",
"def available_hours(\n self,\n requested_date: datetime,\n student: \"Student\" = None,\n duration: int = None,\n only_approved: bool = False,\n places: Tuple[Optional[str]] = (None, None),\n ) -> Iterable[Tuple[datetime, datetime]]:\n if not requested_date:\n return []\n\n todays_appointments = self.appointments.filter(\n func.extract(\"day\", Appointment.date) == requested_date.day\n ).filter(func.extract(\"month\", Appointment.date) == requested_date.month)\n work_hours = self.work_hours_for_date(requested_date, student=student)\n taken_appointments = self.taken_appointments_tuples(\n todays_appointments, only_approved\n )\n blacklist_hours = {\"start_hour\": set(), \"end_hour\": set()}\n if student and work_hours:\n approved_taken_appointments = self.taken_appointments_tuples(\n todays_appointments, only_approved=True\n )\n hours = LessonRule.init_hours(\n requested_date, student, work_hours, approved_taken_appointments\n )\n for rule_class in rules_registry:\n rule_instance: LessonRule = rule_class(\n requested_date, student, hours, places\n )\n blacklisted = rule_instance.blacklisted()\n for key in blacklist_hours.keys():\n blacklist_hours[key].update(blacklisted[key])\n\n work_hours.sort(key=lambda x: x.from_hour) # sort from early to late\n for slot in work_hours:\n hours = (\n requested_date.replace(hour=slot.from_hour, minute=slot.from_minutes),\n requested_date.replace(hour=slot.to_hour, minute=slot.to_minutes),\n )\n yield from get_slots(\n hours,\n taken_appointments,\n timedelta(minutes=duration or self.lesson_duration),\n force_future=True,\n blacklist=blacklist_hours,\n )",
"def _get_doctor_available_times(self, date, time_start, time_end, addresses):\n availability = []\n for adress in addresses:\n timesheet = self._compute_current_timesheet(\n date, time_start, time_end, adress)\n if not timesheet:\n availability.append(\n {\n \"address\": adress.name,\n \"availibility\": {\"name\": f\"The doctor {self.name} is not available in {adress.name}\", \"state\": \"not_available\"},\n \"date\": date,\n \"start_time\": str(time_start),\n \"end_time\": str(time_end)\n\n }\n )\n continue\n else:\n from_datetime = datetime.datetime.combine(date, time_start)\n to_datetime = datetime.datetime.combine(date, time_end)\n meetings = self._compute_concurrency(from_datetime.replace(\n hour=0, minute=0), to_datetime.replace(hour=23, minute=59), adress)\n start_time = datetime.time(\n hour=int(timesheet.hour_from), minute=int(modf(timesheet.hour_from)[0] * 60))\n end_time = datetime.time(\n hour=int(timesheet.hour_to), minute=int(modf(timesheet.hour_to)[0] * 60))\n current_time = start_time\n if not meetings:\n availability.append(\n {\n \"address\": adress.name,\n \"availibility\": {\"name\": f\"The doctor {self.name} is available in {adress.name} from {start_time} till {end_time}\", \"state\": \"available\"},\n \"date\": date,\n \"start_time\": str(time_start),\n \"end_time\": str(time_end)\n }\n )\n continue\n for index, meeting in enumerate(meetings):\n tz = timezone(self.env.user.tz)\n start_date_meeting = pytz.utc.localize(\n meeting.start_date).astimezone(tz)\n end_date_meeting = pytz.utc.localize(\n meeting.end_date).astimezone(tz)\n if start_date_meeting.time() > current_time:\n availability.append(\n {\n \"address\": adress.name,\n \"availibility\": {\"name\": f\"The doctor {self.name} is available in {adress.name} from {current_time} till {start_date_meeting.time()}\", \"state\": \"available\"},\n \"date\": date,\n \"start_time\": str(current_time),\n \"end_time\": str(start_date_meeting.time())\n }\n )\n availability.append(\n {\n \"address\": adress.name,\n \"availibility\": {\"name\": f\"The doctor {self.name} is not available in {adress.name} from {current_time} till {end_date_meeting.time()}\", \"state\": \"not_available\"},\n \"date\": date,\n \"start_time\": str(start_date_meeting.time()),\n \"end_time\": str(end_date_meeting.time())\n }\n )\n current_time = end_date_meeting.time()\n\n if start_date_meeting.time() == current_time:\n availability.append(\n {\n \"address\": adress.name,\n \"availibility\": {\"name\": f\"The doctor {self.name} is not available in {adress.name} from {current_time} till {end_date_meeting.time()}\", \"state\": \"not_available\"},\n \"date\": date,\n \"start_time\": str(start_date_meeting.time()),\n \"end_time\": str(end_date_meeting.time())\n }\n )\n current_time = end_date_meeting.time()\n\n if current_time < end_time:\n availability.append(\n {\n \"address\": adress.name,\n \"availibility\": {\"name\": f\"The doctor {self.name} is available in {current_time} from {end_time} till {end_date_meeting.time()}\", \"state\": \"available\"},\n \"date\": date,\n \"start_time\": str(current_time),\n \"end_time\": str(end_time)\n }\n )\n return availability",
"def is_available_at(self, datetime):\n for booking in self.booking_set.all():\n if booking.schedule_start <= datetime < booking.schedule_end and not booking.is_cancelled():\n return False\n return True",
"def findAvailableTimes(self, nowDay, nowHour, nowMinute, workStart, workEnd, events, timeEst):\n global format\n format = Format()\n global timeSlot\n timeSlot = TimeSlot(timeEst)\n global availableTimes\n availableTimes = []\n print(self.current)\n try:\n if len(events) > 1:\n for i in range(len(events) - 1):\n\n event1 = events[i]\n event2 = events[i + 1]\n e1, e2 = format.formatEvent(event1, event2)\n self.compareEvents(e1, e2, workStart, workEnd, nowDay, nowHour, nowMinute, timeEst)\n\n lastEvent = events[len(events) - 1]\n secondToLast = events[len(events) - 2]\n self.compareLastEvent(lastEvent, secondToLast, workStart, workEnd, nowDay, nowHour, nowMinute, timeEst)\n\n elif len(events) == 1:\n lastEvent = events[0]\n nowTime = [self.current[:11] + str(int(self.current[11:13]) - 1) + self.current[13:], self.current]\n nowTime = format.eventFormatDictionary(nowTime, 'now')\n\n self.compareLastEvent(lastEvent, nowTime, workStart, workEnd, nowDay, nowHour, nowMinute, timeEst)\n\n self.addEmptyDays(events, workStart, workEnd, timeEst)\n availableTimes.sort()\n return availableTimes\n except:\n global msg\n msg = \"There isn't enough time. Try again\"\n return redirect('/error')",
"def compute_schedules(courses=None, excluded_times=(), free_sections_only=True, problem=None, generator=False, start=0):\n s = Scheduler(free_sections_only, problem)\n s.exclude_times(*tuple(excluded_times))\n return s.find_schedules(courses, generator, start)",
"def get_available_slots(iso_datetime):\n all_slots = AppointmentService.get_all_slots(iso_datetime)\n made_appointments = AppointmentService.get_made_appointments(iso_datetime)\n available_slots = []\n\n for slot in all_slots:\n if slot not in made_appointments:\n available_slots.append(slot)\n\n return available_slots",
"def getAvailableTimeslots(self, allTimeslots) -> [Timeslot]:\r\n # List with all Timeslots any of the Teachers is not available at.\r\n notAvailableTimeslotsTeachers = flatMap(lambda t: t.not_available_timeslots, self.teachers)\r\n # notAvailableTimeslotsTeachers = [item for sublist in map(lambda t: t.not_available_timeslots, self.teachers) for item in sublist]\r\n # If Lesson can only take place on forenoon, create list with all afternoon timeslots.\r\n if self.course.only_forenoon:\r\n notAvailableTimeslotsForenoon = list(filter(lambda t: t.number not in Timeslot.getForenoonTimeslotNumbers(), allTimeslots))\r\n else:\r\n notAvailableTimeslotsForenoon = []\r\n\r\n timeslots = [x for x in allTimeslots if x not in (notAvailableTimeslotsTeachers + notAvailableTimeslotsForenoon)]\r\n if self.available_timeslots: # If list is not empty. Else no restrictions.\r\n timeslots = [x for x in timeslots if x in self.available_timeslots]\r\n\r\n return timeslots",
"def check_room(rooms):\n free_rooms = []\n booked_rooms = [] \n for element in rooms:\n if element['booked'] == False:\n free_rooms.append(element)\n else:\n booked_rooms.append(element)\n return free_rooms, booked_rooms",
"def get_all_slots(iso_datetime):\n d_time = datetime.fromisoformat(iso_datetime)\n d_date = date(d_time.year, d_time.month, d_time.day)\n schedule = AppointmentService.APPOINTMENT_SCHEDULE.get(d_date.weekday(), {})\n slots = []\n\n if schedule:\n begin_time = datetime.combine(d_date, schedule['begin'])\n end_time = datetime.combine(d_date, schedule['end'])\n\n while begin_time < end_time:\n slots.append(begin_time)\n begin_time += AppointmentService.APPOINTMENT_DURATION\n\n return slots",
"def avail(self, time, resource_group):\n a = set()\n for r in self.resource_group.resources:\n pass",
"def available_hours_by_day(self, day, condition):\r\n if condition == \"close\":\r\n pass\r\n all_hours = [i for i in range(28)]\r\n if not self.availabilities:\r\n # Need to return this first or it will crash when it cant iterate through an empty list\r\n return [(i, str(i) + \":00\") for i in range(23)]\r\n busy_hours = [i[0] for i in self.working_hours_by_day(day)]\r\n available_hours = [i for i in all_hours if i not in busy_hours]\r\n options = []\r\n for i in available_hours:\r\n if condition == \"open\":\r\n hour, minute = self.verify_time_value(i, 0)\r\n else:\r\n hour, minute = self.verify_time_value(i + 1, 0)\r\n hour = time(hour, minute).hour\r\n options.append((hour, str(hour) + \":00\"))\r\n return options",
"def get_booking_at(self, datetime):\n for booking in self.booking_set.all():\n if booking.schedule_start <= datetime < booking.schedule_end and not booking.is_cancelled():\n return booking\n return None",
"def _slots_available(self, slots, first_day, last_day, employee=None):\n\n def is_work_available(start_dt, end_dt, intervals):\n \"\"\" check if the slot is contained in the employee's work hours (defined by intervals)\n \"\"\"\n def find_start_index():\n \"\"\" find the highest index of intervals for which the start_date (element [0]) is before (or at) start_dt\n \"\"\"\n def recursive_find_index(lower_bound, upper_bound):\n if upper_bound - lower_bound <= 1:\n if intervals[upper_bound][0] <= start_dt:\n return upper_bound\n return lower_bound\n index = (upper_bound + lower_bound) // 2\n if intervals[index][0] <= start_dt:\n return recursive_find_index(index, upper_bound)\n else:\n return recursive_find_index(lower_bound, index)\n\n if start_dt <= intervals[0][0] - tolerance:\n return -1\n if end_dt >= intervals[-1][1] + tolerance:\n return -1\n return recursive_find_index(0, len(intervals) - 1)\n\n if not intervals:\n return False\n\n tolerance = timedelta(minutes=1)\n start_index = find_start_index()\n if start_index != -1:\n for index in range(start_index, len(intervals)):\n if intervals[index][1] >= end_dt - tolerance:\n return True\n if len(intervals) == index + 1 or intervals[index + 1][0] - intervals[index][1] > tolerance:\n return False\n return False\n\n def is_calendar_available(slot, events, employee):\n \"\"\" Returns True if the given slot doesn't collide with given events for the employee\n \"\"\"\n start_dt = slot['UTC'][0]\n end_dt = slot['UTC'][1]\n\n event_in_scope = lambda ev: (\n fields.Date.to_date(ev.start) <= fields.Date.to_date(end_dt)\n and fields.Date.to_date(ev.stop) >= fields.Date.to_date(start_dt)\n )\n\n for ev in events.filtered(event_in_scope):\n if ev.allday:\n # allday events are considered to take the whole day in the related employee's timezone\n event_tz = pytz.timezone(ev.event_tz or employee.user_id.tz or self.env.user.tz or slot['slot'].appointment_type_id.appointment_tz or 'UTC')\n ev_start_dt = datetime.combine(fields.Date.from_string(ev.start_date), time.min)\n ev_stop_dt = datetime.combine(fields.Date.from_string(ev.stop_date), time.max)\n ev_start_dt = event_tz.localize(ev_start_dt).astimezone(pytz.UTC).replace(tzinfo=None)\n ev_stop_dt = event_tz.localize(ev_stop_dt).astimezone(pytz.UTC).replace(tzinfo=None)\n if ev_start_dt < end_dt and ev_stop_dt > start_dt:\n return False\n elif fields.Datetime.to_datetime(ev.start) < end_dt and fields.Datetime.to_datetime(ev.stop) > start_dt:\n return False\n return True\n\n workhours = {}\n meetings = {}\n\n # With context will be used in resource.calendar to force the referential user\n # for work interval computing to the *user linked to the employee*\n available_employees = [emp.with_context(tz=emp.user_id.tz) for emp in (employee or self.employee_ids)]\n random.shuffle(available_employees)\n for slot in slots:\n for emp_pos, emp in enumerate(available_employees):\n if emp_pos not in workhours:\n workhours[emp_pos] = [\n (interval[0].astimezone(pytz.UTC).replace(tzinfo=None),\n interval[1].astimezone(pytz.UTC).replace(tzinfo=None))\n for interval in emp.resource_calendar_id._work_intervals_batch(\n first_day, last_day, resources=emp.resource_id,\n )[emp.resource_id.id]\n ]\n\n if is_work_available(slot['UTC'][0], slot['UTC'][1], workhours[emp_pos]):\n if emp_pos not in meetings:\n # note: no check is made on the attendee's status (accepted/declined/...)\n meetings[emp_pos] = self.env['calendar.event'].search([\n ('partner_ids.user_ids', '=', emp.user_id.id),\n ('start', '<', fields.Datetime.to_string(last_day.replace(hour=23, minute=59, second=59))),\n ('stop', '>', fields.Datetime.to_string(first_day.replace(hour=0, minute=0, second=0)))\n ])\n\n if is_calendar_available(slot, meetings[emp_pos], emp):\n slot['employee_id'] = emp\n break",
"def get_bookings(soup: bs4.BeautifulSoup) -> Iterable[Tuple[str, datetime.date, int]]:\n for term in soup.find_all(\"Terms\"):\n try:\n if not term.TerBeginn.string and not term.TerBeginDat.string:\n continue\n start_time = parse_lsf_time(term.TerBeginn.string)\n end_time = parse_lsf_time(term.TerEnde.string)\n start_date = parse_lsf_date(term.TerBeginDat.string)\n end_date = parse_lsf_date(term.TerEndeDat.string)\n frequency = term.TerRhyth.string\n weekday = parse_lsf_weekday(term.find(\"WoTag\").string)\n room_names = [room.RaumBez.string for room in term.find_all(\"Rooms\", recursive=False)]\n step = 1 # step size for date selection\n if frequency == \"Einzel\":\n assert start_date == end_date\n assert weekday is None or start_date.weekday() == weekday\n elif frequency.endswith(\"chentl\"): # wöchentl\n assert weekday is not None\n elif frequency.startswith(\"14t\"): # 14tägl\n assert weekday is not None\n step = 2\n else:\n assert frequency == \"Block\"\n assert weekday is None\n dates = list(filter_date_range(start_date, end_date, weekday))[::step]\n times = list(filter_time_range(start_time.hour, end_time.hour, time_slots))\n for room_name in room_names:\n for date in dates:\n for time in times:\n yield room_name, date, time\n except:\n print(\"WARNING:\\n--- Parsing error in ---\")\n print(term)\n print(\"---\")\n print(traceback.format_exc(), end=\"\")\n print(\"--- END OF WARNING ---\")",
"def available_timing(filename,day,venue):\n #reading the file\n f = open(filename,\"r\")\n incsv = f.readlines()\n #removing affixes\n incsv[:] = [i.rstrip('\\n') for i in incsv]\n #lines into lists\n tempstr = \"\"\n templist = []\n for j in range(len(incsv)):\n #enters each line into temporary string variable\n tempstr = incsv[j]\n #enters the split string into a temporary list variable\n templist.append(tempstr.split(\",\"))\n #modify original line in original list with split list\n incsv[j] = templist\n #reset temporary variables\n tempstr = \"\"\n templist = []\n #finding occupied hours\n brlist = []\n #for all lines in file\n for k in range(len(incsv)):\n #if venue in line matches desired venue and day in line matches desired day\n if incsv[k][0][7] == venue and int(incsv[k][0][3]) == day:\n #add time range of line into brlist\n brlist.append([int(incsv[k][0][5]),int(incsv[k][0][6])])\n #pruning\n #tlist stands for timelist. stores remaining hours for synthesis\n tlist = []\n #list of hours\n tlist = [600,700,800,900,1000,1100,1200,1300,1400,1500,1600,1700,1800,1900,2000,2100,2200,2300,2400]\n #for line in brlist\n for l in range(len(brlist)):\n #for the range of hours of the line\n for m in range(int((brlist[l][1]-brlist[l][0])/100)):\n #if hours in range still in tlist\n if (brlist[l][0] + m*100) in tlist:\n #remove from tlist\n tlist.remove(brlist[l][0] + m*100)\n #plist for partition list. range of available timings appended here\n plist = []\n #check is for the start time of each available time ranges\n check = 0\n #formation of time ranges\n #for hours in tlist\n for n in range(len(tlist)):\n #if code is checking element 2. Could have used range(1,len(tlist)) but nevermind\n if n >= 1:\n #if 2 adjacent hours are not consecutive\n if tlist[n] != (tlist[n-1]+100):\n #add time range to plist\n plist.append((tlist[check],tlist[n-1]+100))\n #set check to next minimum available start time\n check = n\n #adding range with last hour\n #if last hour in tlist is 2400 and precedent hour in tlist is 2300\n if tlist[n] == 2400 and tlist[n-1] == 2300:\n #add time range\n plist.append((tlist[check],2400))\n return plist",
"def get_slots_for_date(url: str, session: requests.Session) -> List[Dict]:\n response = session.get(\n url,\n headers={\n \"Accept\": \"application/json\",\n \"Content-Type\": \"application/json\",\n \"Adrum\": \"isAjax:true\",\n \"X-Requested-With\": \"XMLHttpRequest\",\n },\n )\n\n slots = list(\n filter(lambda item: item[\"status\"] != \"UnAvailable\", response.json()[\"slots\"])\n )\n\n return slots",
"def filter_only_remaining(self,now):\n\t\ttimeshift = now.replace(tzinfo=\"Europe/London\")\n\t\treturn Programs([program for program in self.list if program.end > timeshift and program.end < now])",
"def get_talks_by_room_and_time(self, room):\r\n current_date = QDate.currentDate().toString(1) # yyyy-mm-dd\r\n current_time = QTime.currentTime().toString() # hh:mm:ss\r\n return QtSql.QSqlQuery('''SELECT * FROM presentations\r\n WHERE Room='{}' AND Date='{}'\r\n AND StartTime >= '{}' ORDER BY StartTime ASC'''.format(room, current_date, current_time))",
"def required_slots(tracker: Tracker) -> List[Text]:\n return [\n \"sucursal\",\n \"fecha_hora\"\n ]",
"def test_course_available_runs():\n user = UserFactory.create()\n course = CourseFactory.create()\n runs = CourseRunFactory.create_batch(2, course=course, live=True)\n runs.sort(key=lambda run: run.start_date)\n CourseRunEnrollmentFactory.create(run=runs[0], user=user)\n assert course.available_runs(user) == [runs[1]]\n assert course.available_runs(UserFactory.create()) == runs",
"def list(self, option: str = \"\", short=False, **kwargs):\n courses = self.get_sorted_courses()\n\n if option == \"plain\":\n if short:\n for course in sorted(courses, key=lambda x: x.name + x.type):\n print(f\"{course.name} ({course.type})\")\n else:\n for course in sorted(courses, key=lambda x: x.abbreviation + x.type):\n print(f\"{course.abbreviation}-{course.type[0]}\")\n quit()\n\n current_day = datetime.today()\n current_weekday = current_day.weekday()\n\n # split to scheduled and non-scheduled\n unscheduled = [c for c in courses if c.time is None]\n courses = [c for c in courses if c not in unscheduled]\n\n table = []\n option = option.lower()\n\n for i, course in enumerate(courses):\n # lambda functions to test for various options\n # a is current weekday and b is the course's weekday\n options = {\n \"\": lambda _, __: True, # all of them\n \"t\": lambda a, b: a == b, # today\n \"tm\": lambda a, b: (a + 1) % 7 == b, # tomorrow\n \"mo\": lambda a, b: b == 0,\n \"tu\": lambda a, b: b == 1,\n \"we\": lambda a, b: b == 2,\n \"th\": lambda a, b: b == 3,\n \"fr\": lambda a, b: b == 4,\n \"sa\": lambda a, b: b == 5,\n \"su\": lambda a, b: b == 6,\n }\n\n if option not in options:\n exit_with_error(\"Invalid course-listing option!\")\n\n if options[option](current_weekday, course.weekday()):\n # include the name of the day before first day's course\n if courses[i - 1].time.day != courses[i].time.day:\n weekday = course.time.day.capitalize()\n\n # calculate the next occurrence\n date = (\n current_day\n + timedelta(days=(course.weekday() - current_weekday) % 7)\n ).strftime(\"%-d. %-m.\")\n\n table.append([f\"{weekday if not short else weekday[:3]} / {date}\"])\n\n # for possibly surrounding the name with chars if it's ongoing\n name_surround_char = \"•\" if course.is_ongoing() else \"\"\n\n row = [\n f\"{name_surround_char}{course.name if not short else course.abbreviation}{name_surround_char}\",\n f\"{minutes_to_HHMM(course.time.start)} -\"\n f\" {minutes_to_HHMM(course.time.end)}\"\n + (\n \"\"\n if course.time.weeks is None\n else (\n f\" ({course.time.weeks if not short else course.time.weeks[0]})\"\n )\n ),\n \"-\" if course.classroom is None else course.classroom.number,\n ]\n\n # color the course name the appropriate color, depending on its type\n row[0] = Ansi.color(row[0], course_types[course.type].color)\n\n # append useful information\n table.append(row)\n\n # list unscheduled courses only when no options are specified\n if option == \"\" and len(unscheduled) != 0:\n table.append([\"Unscheduled\"])\n for course in unscheduled:\n table.append(\n [\n course.name if not short else course.abbreviation,\n course.type[0],\n \"-\",\n \"-\",\n ]\n )\n\n if len(table) == 0:\n exit_with_error(\"No courses matching the criteria found!\")\n\n print_table(table)",
"def get_made_appointments(iso_datetime):\n appointments = []\n request_d_time = datetime.fromisoformat(iso_datetime)\n request_date = datetime(request_d_time.year,\n request_d_time.month,\n request_d_time.day)\n try:\n query = db.session.query(Appointment).filter(\n Appointment.d_time >= request_date\n ).all()\n appointments = list(map(lambda appointment: appointment.d_time, query))\n except Exception as e:\n app.logger.error(str(e))\n raise\n else:\n return appointments",
"def see_courses(self, username: str, token: str, spots_available: bool = False) -> List[Dict[str, object]]:\n\n # Validate user first\n if not self.validate(username=username, token=token, check_privilege='student'):\n raise RuntimeError(\"User not verified!\")\n\n # Query database for all courses\n cursor = self._db_connection.cursor()\n cursor.execute(\n '''\n SELECT \n course_id,\n course_abbreviation,\n course_name,\n instructor_id, \n time,\n seats \n FROM \n courses\n ;\n ''')\n db_results = cursor.fetchall()\n\n # If no courses are available\n if db_results is None:\n return []\n\n # Build information dicts for every course this user is enrolled in\n courses = []\n for result in db_results:\n # Get the instructor's username (we don't want to be giving UIDs)\n instructor_name = self.get_username(result[3])\n\n # Get the number of students enrolled in this course\n cursor.execute('''SELECT COUNT(*) FROM enrollment_records WHERE course_id = ?;''', (result[0],))\n students_enrolled = cursor.fetchone()[0]\n if students_enrolled is None:\n students_enrolled = 0\n\n # Don't add if the course is full (BUT ONLY if specified)\n if spots_available and students_enrolled >= result[5]:\n continue\n\n # Build a course dict from the data\n courses.append({\n \"course_abbreviation\": result[1],\n \"course_name\": result[2],\n \"instructor\": instructor_name,\n \"time\": result[4],\n \"students_enrolled\": students_enrolled,\n \"capacity\": result[5],\n })\n\n return courses",
"def list_course(request, template=\"core/list_course.html\"):\n response = {\n 'morning': Course.objects.at_morning(),\n 'afternoon': Course.objects.at_afternoon(),\n }\n return direct_to_template(request, template, response)",
"def test_all_available(self):\n response = self.client.get(\n reverse('bookings', kwargs={'facility': 'g'}))\n\n context = response.context\n bookings = context[\"bookings\"]\n\n self.assertEqual(response.status_code, 200)\n\n self.assertEqual(context[\"username\"], self.user)\n self.assertEqual(context[\"quota\"], settings.BOOKINGS_QUOTA)\n self.assertEqual(context[\"display_first_week\"], True)\n\n self.assertEqual(bookings[0].calendar_week,\n self.current_week.calendar_week)\n self.assertEqual(bookings[1].calendar_week,\n self.current_week.calendar_week + 1)\n self.assertEqual(bookings[2].calendar_week,\n self.current_week.calendar_week + 2)\n self.assertEqual(bookings[3].calendar_week,\n self.current_week.calendar_week + 3)\n\n for week in bookings:\n for row in week.rows:\n for block in row.blocks:\n self.assertEqual(type(block), BlockAvailable)",
"def mainf(): \n \n \n fname = \"C:\\\\Users\\\\pfduc\\\\Documents\\\\room-booking\\\\Output_by_mcgill_system.csv\"\n \n start_data = False\n \n output_data = []\n \n with open(fname, 'r') as csvfile:\n \n spamreader = csv.reader(csvfile, delimiter=',', quotechar='|')\n \n for row in spamreader:\n \n if \"For Week\" in row[0]:\n weekdate_start = row[0].replace(\"For Week\",'').strip()\n \n weekdate_start = weekdate_start.split(' to ')[0]\n \n weekdate_start = timezone.datetime.strptime(weekdate_start, '%d-%b-%Y')\n \n #parse only the meaningful data (see at the end of the loop)\n if start_data:\n\n #information about the days of the week the time information\n #will refer to\n weekdays = row[3].strip().split(' ')\n \n #hours it starts to be free and hours it stops\n time_start, time_stop = row[4].strip().split(' - ')\n \n #will contain which time slots aren't available so we can\n #hardbook them\n timeslots = []\n \n #loop over the weekdays\n for weekday in WEEKDAYS_CODE:\n \n if weekday in weekdays:\n #the room is available on that day, so we keep track of the\n #time at which it isn't in order to hardbook it\n \n #get the date of that day from the one of the beginning of \n #the week\n cur_weekdate = weekdate_start + \\\n timezone.timedelta(days = WEEKDAYS_CODE.index(weekday),\n hours = HOUR_MIN)\n \n #before the period the room is available we\n #need to recreate a hard booking\n hb_stop = timezone.datetime.strptime(\n \"%s %s\"%(cur_weekdate.date(),time_start),\n '%Y-%m-%d %H:%M')\n \n #compare the hour with the min allowed hour\n if hb_stop.hour > HOUR_MIN:\n \n ts = TimeSlot(\"%s from %02d:00 to %s\"%(\n hb_stop.strftime(\"%Y-%m-%d\"),\n HOUR_MIN,\n hb_stop.strftime(\"%H:%M\")),\n datestr = True)\n \n timeslots.append(ts)\n\n \n #after the period where the room is available we\n #need to recreate a hard booking\n hb_restart = timezone.datetime.strptime(\n \"%s %s\"%(cur_weekdate.date(),time_stop),\n '%Y-%m-%d %H:%M')\n \n #compare the hour with the max allowed hour\n if hb_restart.hour < HOUR_MAX:\n \n ts = TimeSlot(\"%s to %02d:00\"%(\n hb_restart.strftime(\"%Y-%m-%d from %H:%M\"),\n HOUR_MAX),\n datestr = True)\n \n timeslots.append(ts)\n else:\n #the room isn't available so we'll hardbook on whole day\n cur_weekdate = weekdate_start + \\\n timezone.timedelta(days = WEEKDAYS_CODE.index(weekday),\n hours = HOUR_MIN)\n \n #create a timeslot for the whole day\n ts = TimeSlot(cur_weekdate,\n duration = HOUR_MAX - HOUR_MIN)\n \n timeslots.append(ts)\n\n #the information needed to do the hard booking :\n #room name and timeslots\n booking = {\n \"room\" : \"%s %s\"%(row[1], row[2]),\n \"timeslots\" : timeslots \n }\n \n output_data.append(booking)\n \n #from this row the data starts to be interesting to parse\n if \"RDEF CODE\" in row[0]:\n \n start_data = True\n\n return output_data",
"def required_slots(tracker: Tracker) -> List[Text]:\n\n return [\"search_type\", \"time\"]"
] |
[
"0.73901004",
"0.6602472",
"0.6398043",
"0.63878804",
"0.6345009",
"0.6186943",
"0.61377203",
"0.605541",
"0.5960356",
"0.5856391",
"0.57217366",
"0.56533283",
"0.5651054",
"0.5642533",
"0.56393796",
"0.5614545",
"0.5546214",
"0.5517886",
"0.5516936",
"0.5475233",
"0.54484606",
"0.54038876",
"0.53934777",
"0.5385623",
"0.5380204",
"0.53795844",
"0.5372052",
"0.5368683",
"0.53367126",
"0.5310471"
] |
0.7257012
|
1
|
Given a course id and a date, get the list of not booked time on that date for that course
|
def get_avail_time_slots(self, cid, date):
booked = self.get_time_slots(cid, date)
avail_time_slots = []
for time in self.initial_time_slots:
if time not in booked:
avail_time_slots.append(time)
return avail_time_slots
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def get_time_slots(self, cid, date):\n query = \"SELECT time from consultation where cid = %s and date = %s\"\n inputs = (cid, date)\n array_book = self.database_manager.execute_query(query, inputs)\n array_book = [e[0] for e in array_book]\n booked = array_book if array_book else []\n return booked",
"def getNoDaters():\n idHHs = mdb.getHH_with_no_date()\n for idHH in idHHs:\n HH = idHH['idHousehold']\n sendEmail(\"{}\".format(HH))\n print \"sent date offer to HH {}\".format(HH)",
"def get_excluded_dates(self):\n raise NotImplementedError",
"def get_excluded_dates(self):\n raise NotImplementedError",
"def getAvailableTimeslots(self, allTimeslots) -> [Timeslot]:\r\n # List with all Timeslots any of the Teachers is not available at.\r\n notAvailableTimeslotsTeachers = flatMap(lambda t: t.not_available_timeslots, self.teachers)\r\n # notAvailableTimeslotsTeachers = [item for sublist in map(lambda t: t.not_available_timeslots, self.teachers) for item in sublist]\r\n # If Lesson can only take place on forenoon, create list with all afternoon timeslots.\r\n if self.course.only_forenoon:\r\n notAvailableTimeslotsForenoon = list(filter(lambda t: t.number not in Timeslot.getForenoonTimeslotNumbers(), allTimeslots))\r\n else:\r\n notAvailableTimeslotsForenoon = []\r\n\r\n timeslots = [x for x in allTimeslots if x not in (notAvailableTimeslotsTeachers + notAvailableTimeslotsForenoon)]\r\n if self.available_timeslots: # If list is not empty. Else no restrictions.\r\n timeslots = [x for x in timeslots if x in self.available_timeslots]\r\n\r\n return timeslots",
"def filter_only_remaining(self,now):\n\t\ttimeshift = now.replace(tzinfo=\"Europe/London\")\n\t\treturn Programs([program for program in self.list if program.end > timeshift and program.end < now])",
"def available_hours(\n self,\n requested_date: datetime,\n student: \"Student\" = None,\n duration: int = None,\n only_approved: bool = False,\n places: Tuple[Optional[str]] = (None, None),\n ) -> Iterable[Tuple[datetime, datetime]]:\n if not requested_date:\n return []\n\n todays_appointments = self.appointments.filter(\n func.extract(\"day\", Appointment.date) == requested_date.day\n ).filter(func.extract(\"month\", Appointment.date) == requested_date.month)\n work_hours = self.work_hours_for_date(requested_date, student=student)\n taken_appointments = self.taken_appointments_tuples(\n todays_appointments, only_approved\n )\n blacklist_hours = {\"start_hour\": set(), \"end_hour\": set()}\n if student and work_hours:\n approved_taken_appointments = self.taken_appointments_tuples(\n todays_appointments, only_approved=True\n )\n hours = LessonRule.init_hours(\n requested_date, student, work_hours, approved_taken_appointments\n )\n for rule_class in rules_registry:\n rule_instance: LessonRule = rule_class(\n requested_date, student, hours, places\n )\n blacklisted = rule_instance.blacklisted()\n for key in blacklist_hours.keys():\n blacklist_hours[key].update(blacklisted[key])\n\n work_hours.sort(key=lambda x: x.from_hour) # sort from early to late\n for slot in work_hours:\n hours = (\n requested_date.replace(hour=slot.from_hour, minute=slot.from_minutes),\n requested_date.replace(hour=slot.to_hour, minute=slot.to_minutes),\n )\n yield from get_slots(\n hours,\n taken_appointments,\n timedelta(minutes=duration or self.lesson_duration),\n force_future=True,\n blacklist=blacklist_hours,\n )",
"def to_exclude(self):\n midnight = now().replace(hour=0, minute=0, microsecond=0)\n return CenterOpen.objects.filter(\n creation_date__gte=midnight,\n ).values_list('phone_number', flat=True)",
"def keep_daytimes(df, hours=range(0,24)):\n within_hours = lambda x: x.hour in hours\n\n df2 = df[df.pick_date.map(within_hours)]\n df2 = df2[df2.drop_date.map(within_hours)]\n\n return df2",
"def list_course(request, template=\"core/list_course.html\"):\n response = {\n 'morning': Course.objects.at_morning(),\n 'afternoon': Course.objects.at_afternoon(),\n }\n return direct_to_template(request, template, response)",
"def dates_empty_folders(img_dir, crid=None):\n\n missing = []\n for dir, subdirs, files in os.walk(img_dir):\n if len(subdirs) != 0:\n continue\n if crid:\n cont = [str(crid) in afile for afile in files]\n if not any(cont):\n missing.append(dir)\n else:\n cont = True if len(files) > 0 else False\n if not cont:\n missing.append(dir)\n\n miss_dates = [\n datetime.strptime(\n os.path.basename(os.path.normpath(miss_path)), '%Y.%m.%d')\n for miss_path in missing\n ]\n\n return sorted(miss_dates)",
"def get_sorted_courses(self, include_unscheduled=False) -> List[Course]:\n return sorted(\n filter(\n lambda c: c.time is not None or include_unscheduled, self.get_courses()\n ),\n key=lambda c: (0, 0) if not c.time else (c.weekday(), c.time.start),\n )",
"def consultation_booking_query(self, cid, sid, time, date):\n if not self.check_course_exist(cid):\n return ConsultationError.INVALID_COURSE.value\n is_weekday, feedback = self.check_weekday(date)\n time = self.round_time(time)\n if is_weekday:\n try:\n avail_list = self.get_avail_time_slots(cid.upper(), date) # return available time slot list\n logger.debug(avail_list)\n if time in avail_list:\n self.add_consultation(cid, sid, time, date) # add into database\n self.emailer.send_confirm_booking(cid=cid, time=time, date=date, receiver='[email protected]')\n return \"{}\".format(feedback)\n else:\n if not avail_list:\n return \"Sorry, there is no available time slot on date\"\n result = \"Sorry this time slot has been booked, \" \\\n \"please choose another one from following time slots on {}\".format(date)\n return '{}: {}'.format(result, ', '.join(avail_list))\n except ValueError:\n logger.error(\"Invalid Input\")\n return\n else:\n logger.debug(feedback)\n return feedback",
"def get_slots_for_date(url: str, session: requests.Session) -> List[Dict]:\n response = session.get(\n url,\n headers={\n \"Accept\": \"application/json\",\n \"Content-Type\": \"application/json\",\n \"Adrum\": \"isAjax:true\",\n \"X-Requested-With\": \"XMLHttpRequest\",\n },\n )\n\n slots = list(\n filter(lambda item: item[\"status\"] != \"UnAvailable\", response.json()[\"slots\"])\n )\n\n return slots",
"def get_deadlines(date):\n with open('deadlines.csv') as deadline_file:\n deadline_dict = csv.DictReader(deadline_file)\n first_week = {}\n for row in deadline_dict:\n section_start = datetime.datetime.strptime(row[\"start\"],\n \"%Y-%m-%d-%H\")\n first_week[row['section']] = section_start\n\n date_dif = date_offset(first_week, date)\n\n deadlines = {}\n for sec, time in first_week.items():\n deadlines[sec] = first_week[sec] + date_dif\n\n return deadlines",
"def get_units_with_due_date(course):\r\n units = []\r\n\r\n def visit(node):\r\n \"\"\"\r\n Visit a node. Checks to see if node has a due date and appends to\r\n `units` if it does. Otherwise recurses into children to search for\r\n nodes with due dates.\r\n \"\"\"\r\n if getattr(node, 'due', None):\r\n units.append(node)\r\n else:\r\n for child in node.get_children():\r\n visit(child)\r\n visit(course)\r\n #units.sort(key=_title_or_url)\r\n return units",
"def filter_by_date(items, start_time, end_time=None):\n start_time = parser.parse(start_time + \"UTC\").timestamp()\n if end_time:\n end_time = parser.parse(end_time + \"UTC\").timestamp()\n else:\n end_time = time.time()\n\n filtered_items = []\n for item in items:\n if 'time' in item:\n item_time = item['time']\n elif 'timestamp' in item:\n item_time = item['timestamp']\n timestamp = parser.parse(item_time + \"UTC\").timestamp()\n if end_time > timestamp > start_time:\n filtered_items.append(item)\n\n return filtered_items",
"def test_list_grading_periods_courses(self):\r\n course_id = None # Change me!!\r\n\r\n r = self.client.list_grading_periods_courses(course_id)",
"def deconstruct_datetime(self, date: datetime) -> List[int]:\n year, month, day, hour, _, _, _, _, _ = date.timetuple()\n return [year, month, day, hour]",
"def upcoming_courses(aud):\n \n courses = [c for c in aud.all_courses() if c.grade == u\"*\"]\n return [c.number.replace(\"-\", \"\") for c in courses]",
"def extract_dates(data):\n dates = []\n \n for line in data.splitlines():\n if line[6:8] == \"20\":\n dates.append(datetime.strptime(line[6:16], '%Y-%m-%d').date())\n \n return list(set(dates))\n pass",
"def find_outdated_game_dates(self):\n state = 'preview.gameData.status.detailedState'\n old = self._db.Games.find({state : {'$nin' : ['Final']}})\n return set([x['date'] for x in old])",
"def cronWaitingList(self, date):\n match = {\"task_type\": \"crontab\", \"task_day\": date, \"status\": \"waiting\"}\n l = []\n for doc in self.search(match):\n l.append(doc)\n return l",
"def get_interested_dates(self, ssn):\n cur = self.conn.cursor(pymysql.cursors.DictCursor)\n sql = \"select * from dates where (c1_ssn = %s or c2_ssn = %s) and see_again = 'yes'\"\n dates = cur.execute(sql, (ssn, ssn))\n return CursorIterator(cur)",
"def get_talks_gt_one_hour(videos):\n return [v for v in videos if get_hours(v) >= 1]",
"def datetime_to_list(date):\n return [date.year, date.month, date.day,\n date.hour, date.minute, date.second]",
"def datetime_to_list(date):\n return [date.year, date.month, date.day,\n date.hour, date.minute, date.second]",
"def getNotMyCamps(self):\n r = []\n for p in self.__camps:\n if(p.getOwner() != 1):\n r.append(p)\n return r",
"def filter_lower_datetime(time, list_time):\n return [t for t in list_time if t <= time]",
"def test_can_not_cancel_past_block(self):\n date = datetime.now().replace(minute=0, second=0, microsecond=0) - timedelta(hours=1)\n\n response = self.client.post(\n reverse('bookings', kwargs={'facility': 'g'}), {'cancel': str(date.timestamp())})\n\n context = response.context\n bookings = context[\"bookings\"]\n\n self.assertEqual(response.status_code, 403)\n self.assertEqual(context[\"quota\"], settings.BOOKINGS_QUOTA)"
] |
[
"0.6475281",
"0.5428682",
"0.5371842",
"0.5371842",
"0.51741886",
"0.51081544",
"0.5009312",
"0.49967223",
"0.49914086",
"0.49795753",
"0.4913998",
"0.49124762",
"0.48989335",
"0.48970482",
"0.48623538",
"0.4860842",
"0.48081848",
"0.47892904",
"0.47883248",
"0.47678515",
"0.4753564",
"0.47535312",
"0.47449136",
"0.47421753",
"0.47405002",
"0.4724239",
"0.4724239",
"0.47208366",
"0.47172695",
"0.4711767"
] |
0.6290072
|
1
|
Main function for handling consultation booking query. Use the other helper function in this class to perform checks. First check the date and time to book is valid and then check if that time slot is free for booking. If successful, send the confirmation email to user
|
def consultation_booking_query(self, cid, sid, time, date):
if not self.check_course_exist(cid):
return ConsultationError.INVALID_COURSE.value
is_weekday, feedback = self.check_weekday(date)
time = self.round_time(time)
if is_weekday:
try:
avail_list = self.get_avail_time_slots(cid.upper(), date) # return available time slot list
logger.debug(avail_list)
if time in avail_list:
self.add_consultation(cid, sid, time, date) # add into database
self.emailer.send_confirm_booking(cid=cid, time=time, date=date, receiver='[email protected]')
return "{}".format(feedback)
else:
if not avail_list:
return "Sorry, there is no available time slot on date"
result = "Sorry this time slot has been booked, " \
"please choose another one from following time slots on {}".format(date)
return '{}: {}'.format(result, ', '.join(avail_list))
except ValueError:
logger.error("Invalid Input")
return
else:
logger.debug(feedback)
return feedback
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def test_make_a_booking(self):\n date = datetime(2030, 3, 1, 11)\n\n response = self.client.post(reverse('bookings', kwargs={'facility': 'g'}), {\n 'book': str(date.timestamp())})\n\n context = response.context\n bookings = context[\"bookings\"]\n\n self.assertEqual(response.status_code, 200)\n self.assertEqual(context[\"quota\"], settings.BOOKINGS_QUOTA - 1)\n self.assertEqual(type(context[\"info\"]), BookingSuccessfulAlert)\n\n for week in bookings:\n for row in week.rows:\n for block in row.blocks:\n if block.date == date:\n self.assertEqual(type(block), BlockReserved)\n else:\n self.assertEqual(type(block), BlockAvailable)",
"def tr_check_availability(agent_directory, agent_full_name, slot_range):\r\n tr_create_booking_register(agent_directory, agent_full_name) # CHANGE THIS WHEN POSSIBLE. IT IS ERRASING ALL BOOKINGS. NOW THE SYSTEM IS NOT CONSTRAINT IN TR RESOURCES.\r\n tr_booking_df = pd.read_csv(f'{agent_directory}''/'f'{agent_full_name}_booking.csv', header=0, delimiter=\",\", engine='python')\r\n tr_booking_df['booking_type'] = tr_booking_df['booking_type'].fillna(\"\")\r\n # Creates 2 lists: booked_slots_list & free_slots_list and checks availability.\r\n free_slots_list = []\r\n booked_slots_list = []\r\n prebooked_slots_list = []\r\n for x in slot_range:\r\n if tr_booking_df.loc[x - 1, 'booking_type'] == \"pre-book\":\r\n prebooked_slots_list.append(x)\r\n elif tr_booking_df.loc[x - 1, 'booking_type'] == \"booked\":\r\n booked_slots_list.append(x)\r\n else:\r\n free_slots_list.append(x)\r\n # Checks availability\r\n if len(booked_slots_list) >= 1:\r\n tr_msg_ca_body = \"negative\"\r\n else:\r\n tr_msg_ca_body = \"positive\"\r\n return tr_msg_ca_body",
"def test_one_reserveation_and_one_booked(self):\n own_booking = create_test_booking(self.user, self.first_day, 11)\n other_booking = create_test_booking(self.someone, self.first_day, 12)\n\n response = self.client.get(\n reverse('bookings', kwargs={'facility': 'g'}))\n context = response.context\n bookings = context[\"bookings\"]\n\n self.assertEqual(response.status_code, 200)\n self.assertEqual(context[\"quota\"], settings.BOOKINGS_QUOTA - 1)\n\n for week in bookings:\n for row in week.rows:\n for block in row.blocks:\n if block.date == own_booking.date:\n self.assertEqual(type(block), BlockReserved)\n elif block.date == other_booking.date:\n self.assertEqual(type(block), BlockBooked)\n else:\n self.assertEqual(type(block), BlockAvailable)",
"def confirm_car_reservation():\n session_id = request.args.get('session-id', None)\n user_id = request.args.get('user-id', None)\n today = datetime.date.today()\n if request.method == 'POST':\n car_id = request.form['hidden-car-id']\n car = get_car_identified_by_id(car_id)\n date_from = request.form['hidden-date-from']\n date_to = request.form['hidden-date-to']\n if not are_dates_valid(date_from, date_to):\n if check_authentication(session_id, user_id):\n return render_template('car_details.html', car=car, error=\"Please insert a valid date interval!\",\n user=user_id, session_id=session_id, today=today)\n else:\n return render_template('car_details.html', car=car, error=\"Please insert a valid date interval!\", today=today)\n if is_car_available_in_the_selected_period(date_from, date_to, car_id):\n if check_authentication(session_id, user_id):\n if has_user_age_requirement(user_id, car_id):\n reservation_id = save_car_reservation(car_id, user_id, date_from, date_to)\n return render_template('car_reservation_details.html', user=user_id, session_id=session_id,\n reservation_id=reservation_id, car=car, date_from=date_from, date_to=date_to,\n total_price=calc_total_price(car.price, date_from, date_to),\n reservation_just_completed=True)\n else:\n error_msg = \"The reservation has failed because you are not at least \" + str(car.min_age) +\\\n \" years old!\"\n return render_template('car_details.html', user=user_id, session_id=session_id,\n error=error_msg, car=car, today=today)\n else:\n return render_template('car_details.html', car=car,\n error=\"You need to be authenticated in order to complete this action!\", today=today)\n else:\n if check_authentication(session_id, user_id):\n return render_template('car_details.html', car=car, is_available=False, show_confirm_div=True,\n date_from=date_from, date_to=date_to, user=user_id, session_id=session_id, today=today)\n else:\n return render_template('car_details.html', car=car, is_available=False, show_confirm_div=True,\n date_from=date_from, date_to=date_to, today=today)\n else:\n if check_authentication(session_id, user_id):\n return render_template('home.html', cars_list=get_cars_preview(), news_list=get_news_list(), user=user_id,\n session_id=session_id, authjs=False, preview_length=get_cars_preview().__len__())\n else:\n return render_template('home.html', cars_list=get_cars_preview(), news_list=get_news_list(), authjs=False,\n preview_length=get_cars_preview().__len__(), del_session_cookie=True)",
"def clean(self):\n\n if self.dateEnd <= self.dateStart:\n raise ValidationError(\"Start date must be before end date!\")\n \n # If there are bookings in the database with:\n # - same room__id\n # - different booking__id\n # - first date at or after dateStart (but before dateEnd)\n # - last date at or before dateEnd (but after dateStart)\n # Raise ValidationError\n if Booking.objects.filter(room__id=self.room_id).exclude(pk=self.pk).filter(\n Q(dateEnd__gt=self.dateStart, dateStart__lt=self.dateEnd)\n ).exists():\n raise ValidationError(\"Overlapping dates, room has been booked.\")",
"def BookThisMotorhome(request, pk):\n if not request.user.is_authenticated:\n messages.add_message(\n request, messages.WARNING, 'Please login or register to create your booking.')\n return redirect(reverse('motorhomes'))\n user = request.user\n template = 'bookings/book_this_motorhome.html'\n motorhome = get_object_or_404(Motorhome, pk=pk)\n form = BookThisMotorhomeForm()\n context = {\n 'motorhome': motorhome,\n 'form': form,\n }\n if request.method == 'POST':\n request.session['user.pk'] = user.pk\n # get the dates from the form\n booked_from = request.POST.get('start_date', False)\n booked_until = request.POST.get('end_date', False)\n # using dateutils to parse the date passed from the page to django accepted format\n booked_until_parsed = dateutil.parser.parse(booked_until)\n booked_from_parsed = dateutil.parser.parse(booked_from)\n # should the from date larger then the until or the same, return to this page with warning\n if (booked_until_parsed - booked_from_parsed).days < 1:\n messages.add_message(\n request, messages.WARNING, 'Please check your dates, something wrong')\n return render(request, template, context)\n\n td = booked_until_parsed-booked_from_parsed\n # get days to count the total\n days = td.days\n total = td.days*motorhome.daily_rental_fee\n try:\n # create booking with the given details, others set to default\n booking = Booking(\n booked_by=user,\n booked_vehicle=motorhome,\n booked_from=booked_from,\n booked_until=booked_until,\n )\n booking.save()\n # add booking information to session\n # so it can be accessed later on\n request.session['motorhome.pk'] = pk\n\n request.session['days'] = days\n request.session['total'] = total\n request.session['booked_from'] = booked_from\n request.session['booked_until'] = booked_until\n request.session['booking_id'] = booking.booking_id\n # uopdate userprofile instance with the last booking ref\n UserProfile.objects.filter(pk=user.id).update(\n last_booking_ref=booking.booking_id)\n\n # If user has billingaddress then go to the checkout view,\n # no billing address saved, redirect to the checkout checkout view to add it before go to payment\n billingaddress = BillingAddress.objects.filter(user=request.user)\n if billingaddress:\n return redirect(reverse('checkout'))\n messages.add_message(request, messages.SUCCESS,\n \"Your Booking has been created, let's go to checkout\")\n else:\n return redirect(reverse('checkout_address'))\n messages.add_message(request, messages.SUCCESS,\n \"Your Booking has been created, let's add a billingadress\")\n except:\n messages.add_message(request, messages.ERROR,\n 'Sorry, We were unable to create your booking, please try again or contact us')\n return render(request, template, context)\n\n return render(request, template, context)",
"def add_booking():\n try:\n \n carid = request.form[\"carid\"]\n userid = request.form[\"userid\"]\n fromdate = request.form[\"fromdate\"].strip()\n todate = request.form[\"todate\"].strip()\n\n print(fromdate, \"|\", todate)\n\n car = Car.query.get(carid)\n car.isavailable = False\n\n user = User.query.get(userid)\n user_email = user.email\n\n fromdate_obj = datetime.datetime.strptime(fromdate, '%Y-%m-%d')\n todate_obj = datetime.datetime.strptime(todate, '%Y-%m-%d')\n \n summary = \"Car Booking. Car id: \" + carid\n\n cal = CalendarUtil()\n resp = cal.addToCalendar(user_email, fromdate_obj, todate_obj, summary)\n cal_event_id = resp['id']\n booking = Booking(carid=carid, userid=userid, fromdate=fromdate, todate=todate, caleventid= cal_event_id, isactive=True)\n\n test = db.session.add(booking)\n db.session.commit()\n return bookingSchema.jsonify(booking)\n except Exception as ex:\n print(\"Failed to add event to calender. Exception: \", str(ex))\n return jsonify(None)",
"def createdevent(service, myemail,my_date):\n page_token = None\n now = datetime.datetime.now().isoformat() + 'Z'\n \n while True:\n\n events = service.events().list(calendarId='primary', timeMin=now,\n pageToken=page_token).execute()\n\n for event in events['items']:\n try: \n start = event['start'].get('dateTime') \n start = str(start).split('T') \n date = start[0]\n time = start[1].split('+')\n time = time[0]\n time = dt.strptime(time, '%H:%M:%S')\n end_t = time + timedelta(minutes=30) \n start_c = time + timedelta(minutes=-30) \n time, end_t,start_c = str(time), str(end_t), str(start_c)\n time, end_t = time.split(\" \"), end_t.split(\" \") \n start_c = start_c.split(\" \")\n time, end_t,start_c = time[1], end_t[1], start_c[1]\n\n admin = event['attendees'][0]['email']\n summary = event['summary']\n\n dat=date.split('-')\n Tim=end_t.split(':')\n tim=datetime.datetime(int(dat[0]),int(dat[1]),\\\n int(dat[2]),int(Tim[0]),int(Tim[1]))\n Sta=start_c.split(':')\n Sta=datetime.datetime(int(dat[0]),int(dat[1]),\\\n int(dat[2]),int(Sta[0]),int(Sta[1]))\n\n \n if myemail == admin:\n if (my_date>Sta and my_date<tim): \n print(\"Failed to Create a Slot because:\")\n print(f\" - You will be busy with {summary}\")\n \n return True \n\n if len(event['attendees']) == 2:\n admin = event['attendees'][0][\"email\"]\n patient_email = event['attendees'][1][\"email\"]\n if myemail == patient_email: \n if (my_date>Sta and my_date<tim): \n print(\"Failed to Create a slot because:\")\n print(f\" - You will be busy with {admin}\\\n on {summary}\")\n \n return True\n \n except KeyError:\n\n break\n\n page_token = events.get('nextPageToken')\n if not page_token:\n\n break\n \n return False",
"def freebusy_check(service, date, time, user_name):\n event = {\n \"timeMin\": (make_datetime_from_string(f'{date}T{time}:00+0200')).isoformat(),\n \"timeMax\": (make_datetime_from_string(f'{date}T{time}:00+0200')+datetime.timedelta(minutes = 90)).isoformat(),\n \"timeZone\": 'Africa/Johannesburg',\n \"items\": [\n {\n \"id\": user_name + '@student.wethinkcode.co.za'\n },\n {\n 'id': '[email protected]'\n }\n ]\n }\n\n eventsResult = service.freebusy().query(body=event).execute()\n return eventsResult",
"def create_new_availability():\n if request.method == 'POST':\n doctor_email = request.form['doctor_email']\n date = request.form['date']\n start_time = request.form['start_time']\n end_time = request.form['end_time']\n\n response_add_availability = requests.post(server_url + 'doctor/add_availability', json={\n 'doctor_email': doctor_email,\n 'date': date,\n 'start_time': start_time,\n 'end_time': end_time\n })\n response_add_availability = response_add_availability.json()\n\n if response_add_availability.get('Status') == \"ALREADY_AVAILABILITY_SET\":\n return render_template('doctors/availability_failed.html')\n else:\n referer = request.referrer\n return render_template('doctors/availability_success.html', referer=referer)\n else:\n return render_template('doctors/dashboard.html')",
"def test_happy_booking(self):\n\n now = datetime.datetime.now()\n\n for club in self.clubs:\n for competition in self.competitions:\n rv = self.app.get(f\"/book/{competition['name']}/{club['name']}\")\n\n print(rv.data, rv.status_code, \"\\n\")\n\n if server.formatDate(competition[\"date\"]) <= now:\n continue\n\n assert rv.status_code in [200]\n assert (\n str.encode(f\"Places available: {competition['numberOfPlaces']}\")\n in rv.data\n )",
"def notify(self):\n Reservation = self.db_con.table_data['reservations']\n Restaurant = self.db_con.table_data['restaurants']\n data = self.db_con.session.query(Reservation, Restaurant).\\\n filter(Reservation.restaurant_id == Restaurant._id).\\\n filter(Reservation.date == datetime.date.today())\n for row in data:\n self.send_email(row.email, f'Your reservation at {row.name}',\n f'This is a reminder of your for '\n f'location {row.address}, {row.time},'\n f'a table for {row.guests}')",
"def test_booking_in_mybookings(client):\n response = client.get('/mybookings')\n\n assert str.encode(PICKUP_DATE.strftime(\n DEFAULT_DATETIME_FORMAT)) in response.data\n assert str.encode(RETURN_DATE.strftime(\n DEFAULT_DATETIME_FORMAT)) in response.data\n assert b'1' in response.data\n assert b'Confirmed' in response.data",
"def check_car_availability():\n session_id = request.args.get('session-id', None)\n user_id = request.args.get('user-id', None)\n today = datetime.date.today()\n if request.method == 'POST':\n car_id = request.form['car-id']\n car = get_car_identified_by_id(car_id)\n date_from = request.form['date-from']\n date_to = request.form['date-to']\n if not are_dates_valid(date_from, date_to):\n if check_authentication(session_id, user_id):\n return render_template('car_details.html', car=car, error=\"Please insert a valid date interval!\", user=user_id, session_id=session_id, today=today)\n else:\n return render_template('car_details.html', car=car, error=\"Please insert a valid date interval!\", today=today)\n if is_car_available_in_the_selected_period(date_from, date_to, car_id):\n if check_authentication(session_id, user_id):\n return render_template('car_details.html', car=car, is_available=True,\n total_price=calc_total_price(car.price, date_from, date_to), show_confirm_div=True,\n date_from=date_from, date_to=date_to, user=user_id, session_id=session_id,\n today=today)\n else:\n return render_template('car_details.html', car=car, is_available=True,\n total_price=calc_total_price(car.price, date_from, date_to),\n show_confirm_div=True,\n date_from=date_from, date_to=date_to, today=today)\n else:\n if check_authentication(session_id, user_id):\n return render_template('car_details.html', car=car, is_available=False, show_confirm_div=True,\n date_from=date_from, date_to=date_to, user=user_id, session_id=session_id, today=today)\n else:\n return render_template('car_details.html', car=car, is_available=False, show_confirm_div=True,\n date_from=date_from, date_to=date_to, today=today)\n else:\n if check_authentication(session_id, user_id):\n return render_template('home.html', cars_list=get_cars_preview(), news_list=get_news_list(), user=user_id,\n session_id=session_id, authjs=False, preview_length=get_cars_preview().__len__())\n else:\n return render_template('home.html', cars_list=get_cars_preview(), news_list=get_news_list(), authjs=False,\n preview_length=get_cars_preview().__len__(), del_session_cookie=True)",
"def book(request):\n if request.method == \"POST\": # getting all fields\n first_name = request.POST.get(\"first_name\") \n last_name = request.POST.get(\"last_name\")\n email_address = request.POST.get(\"email_address\")\n phone_code = request.POST.get(\"phone_code\")\n phone_number = request.POST.get(\"phone_number\")\n countries = request.POST.getlist(\"countries\")\n company = request.POST.get(\"company\")\n objective = request.POST.get(\"objective\")\n details = request.POST.get(\"details\")\n print(first_name,last_name,email_address,phone_code,phone_number,countries,company,objective,details)\n # if all fields not None and have value\n if first_name and last_name and email_address and phone_code and phone_number and countries and company and objective and details:\n try: # to check that phone number is not text, try to convert it to integar\n phone_number = int(phone_number)\n except: # if failed to be converted to integar\n messages.info(request,\"Phone number field must be filled with numbers only.\") # display this message for user\n return redirect(\"book\") # reload the page\n mobile_number = phone_code + str(phone_number) # getting complete mobile number as string\n selected_countries = \", \".join(countries) # converting countries list to be saved as string\n print(selected_countries)\n if not AppointmentRequests.objects.filter(phone_number=mobile_number): # if a user tries to request an appointment with new info of mobile number and email address (not already exist in database)\n if not AppointmentRequests.objects.filter(email_address=email_address):\n\n AppointmentRequests.objects.create(first_name=first_name,last_name=last_name,email_address=email_address,phone_number=mobile_number,\n countries=selected_countries,company= company,objective=objective, details=details) # create an appointment\n\n\n # send email to user\n send_mail( \n subject=f\"Service Provider Appointment\",\n message=f\"\"\"\n Dear {first_name} {last_name},\n [+] Your Info provided:\n 1- First name: {first_name}.\n 2- Last name: {last_name}.\n 3- Email address: {email_address}.\n 4- Phone number: {mobile_number}.\n 5- Countries: {selected_countries}.\n 6- Company: {company}.\n 7- Objective: {objective}.\n 8- Details:\n {details}\n \\n\n We will communicate with you as soon as possible.\n \"\"\",\n recipient_list=[email_address,],from_email=\"[email protected]\",fail_silently=False,\n )\n # send email to service provider agent\n send_mail(\n subject=f\"A new requested Appointment by {first_name} {last_name}\",\n message=f\"\"\"\n [+] Info provided:\n 1- First name: {first_name}.\n 2- Last name: {last_name}.\n 3- Email address: {email_address}.\n 4- Phone number: {mobile_number}.\n 5- Countries: {selected_countries}.\n 6- Company: {company}.\n 7- Objective: {objective}.\n 8- Details:\n {details}\n \"\"\",\n recipient_list=[\"[email protected]\",],from_email=\"[email protected]\",fail_silently=False,\n )\n return redirect(\"confirm\")\n\n else:\n messages.info(request,\"You have already sent a request, we will communicate you as soon as possible, we will handle any changes you want (if exist) when contact.\")\n return redirect(\"book\") # reload the page\n\n else: # if user tries to request a new appointment using same mobile number\n messages.info(request,\"You have already sent a request, we will communicate you as soon as possible, we will handle any changes you want (if exist) when contact.\")\n return redirect(\"book\") # reload the page\n \n\n\n else: # if any field is empty or None\n messages.info(request,\"Please, fill empty fields\")\n return redirect(\"book\") # reload the page\n \n return render(request,\"book_appointment.html\")",
"def mainf(): \n \n \n fname = \"C:\\\\Users\\\\pfduc\\\\Documents\\\\room-booking\\\\Output_by_mcgill_system.csv\"\n \n start_data = False\n \n output_data = []\n \n with open(fname, 'r') as csvfile:\n \n spamreader = csv.reader(csvfile, delimiter=',', quotechar='|')\n \n for row in spamreader:\n \n if \"For Week\" in row[0]:\n weekdate_start = row[0].replace(\"For Week\",'').strip()\n \n weekdate_start = weekdate_start.split(' to ')[0]\n \n weekdate_start = timezone.datetime.strptime(weekdate_start, '%d-%b-%Y')\n \n #parse only the meaningful data (see at the end of the loop)\n if start_data:\n\n #information about the days of the week the time information\n #will refer to\n weekdays = row[3].strip().split(' ')\n \n #hours it starts to be free and hours it stops\n time_start, time_stop = row[4].strip().split(' - ')\n \n #will contain which time slots aren't available so we can\n #hardbook them\n timeslots = []\n \n #loop over the weekdays\n for weekday in WEEKDAYS_CODE:\n \n if weekday in weekdays:\n #the room is available on that day, so we keep track of the\n #time at which it isn't in order to hardbook it\n \n #get the date of that day from the one of the beginning of \n #the week\n cur_weekdate = weekdate_start + \\\n timezone.timedelta(days = WEEKDAYS_CODE.index(weekday),\n hours = HOUR_MIN)\n \n #before the period the room is available we\n #need to recreate a hard booking\n hb_stop = timezone.datetime.strptime(\n \"%s %s\"%(cur_weekdate.date(),time_start),\n '%Y-%m-%d %H:%M')\n \n #compare the hour with the min allowed hour\n if hb_stop.hour > HOUR_MIN:\n \n ts = TimeSlot(\"%s from %02d:00 to %s\"%(\n hb_stop.strftime(\"%Y-%m-%d\"),\n HOUR_MIN,\n hb_stop.strftime(\"%H:%M\")),\n datestr = True)\n \n timeslots.append(ts)\n\n \n #after the period where the room is available we\n #need to recreate a hard booking\n hb_restart = timezone.datetime.strptime(\n \"%s %s\"%(cur_weekdate.date(),time_stop),\n '%Y-%m-%d %H:%M')\n \n #compare the hour with the max allowed hour\n if hb_restart.hour < HOUR_MAX:\n \n ts = TimeSlot(\"%s to %02d:00\"%(\n hb_restart.strftime(\"%Y-%m-%d from %H:%M\"),\n HOUR_MAX),\n datestr = True)\n \n timeslots.append(ts)\n else:\n #the room isn't available so we'll hardbook on whole day\n cur_weekdate = weekdate_start + \\\n timezone.timedelta(days = WEEKDAYS_CODE.index(weekday),\n hours = HOUR_MIN)\n \n #create a timeslot for the whole day\n ts = TimeSlot(cur_weekdate,\n duration = HOUR_MAX - HOUR_MIN)\n \n timeslots.append(ts)\n\n #the information needed to do the hard booking :\n #room name and timeslots\n booking = {\n \"room\" : \"%s %s\"%(row[1], row[2]),\n \"timeslots\" : timeslots \n }\n \n output_data.append(booking)\n \n #from this row the data starts to be interesting to parse\n if \"RDEF CODE\" in row[0]:\n \n start_data = True\n\n return output_data",
"def _get_doctor_available_times(self, date, time_start, time_end, addresses):\n availability = []\n for adress in addresses:\n timesheet = self._compute_current_timesheet(\n date, time_start, time_end, adress)\n if not timesheet:\n availability.append(\n {\n \"address\": adress.name,\n \"availibility\": {\"name\": f\"The doctor {self.name} is not available in {adress.name}\", \"state\": \"not_available\"},\n \"date\": date,\n \"start_time\": str(time_start),\n \"end_time\": str(time_end)\n\n }\n )\n continue\n else:\n from_datetime = datetime.datetime.combine(date, time_start)\n to_datetime = datetime.datetime.combine(date, time_end)\n meetings = self._compute_concurrency(from_datetime.replace(\n hour=0, minute=0), to_datetime.replace(hour=23, minute=59), adress)\n start_time = datetime.time(\n hour=int(timesheet.hour_from), minute=int(modf(timesheet.hour_from)[0] * 60))\n end_time = datetime.time(\n hour=int(timesheet.hour_to), minute=int(modf(timesheet.hour_to)[0] * 60))\n current_time = start_time\n if not meetings:\n availability.append(\n {\n \"address\": adress.name,\n \"availibility\": {\"name\": f\"The doctor {self.name} is available in {adress.name} from {start_time} till {end_time}\", \"state\": \"available\"},\n \"date\": date,\n \"start_time\": str(time_start),\n \"end_time\": str(time_end)\n }\n )\n continue\n for index, meeting in enumerate(meetings):\n tz = timezone(self.env.user.tz)\n start_date_meeting = pytz.utc.localize(\n meeting.start_date).astimezone(tz)\n end_date_meeting = pytz.utc.localize(\n meeting.end_date).astimezone(tz)\n if start_date_meeting.time() > current_time:\n availability.append(\n {\n \"address\": adress.name,\n \"availibility\": {\"name\": f\"The doctor {self.name} is available in {adress.name} from {current_time} till {start_date_meeting.time()}\", \"state\": \"available\"},\n \"date\": date,\n \"start_time\": str(current_time),\n \"end_time\": str(start_date_meeting.time())\n }\n )\n availability.append(\n {\n \"address\": adress.name,\n \"availibility\": {\"name\": f\"The doctor {self.name} is not available in {adress.name} from {current_time} till {end_date_meeting.time()}\", \"state\": \"not_available\"},\n \"date\": date,\n \"start_time\": str(start_date_meeting.time()),\n \"end_time\": str(end_date_meeting.time())\n }\n )\n current_time = end_date_meeting.time()\n\n if start_date_meeting.time() == current_time:\n availability.append(\n {\n \"address\": adress.name,\n \"availibility\": {\"name\": f\"The doctor {self.name} is not available in {adress.name} from {current_time} till {end_date_meeting.time()}\", \"state\": \"not_available\"},\n \"date\": date,\n \"start_time\": str(start_date_meeting.time()),\n \"end_time\": str(end_date_meeting.time())\n }\n )\n current_time = end_date_meeting.time()\n\n if current_time < end_time:\n availability.append(\n {\n \"address\": adress.name,\n \"availibility\": {\"name\": f\"The doctor {self.name} is available in {current_time} from {end_time} till {end_date_meeting.time()}\", \"state\": \"available\"},\n \"date\": date,\n \"start_time\": str(current_time),\n \"end_time\": str(end_time)\n }\n )\n return availability",
"def create(service, user, email):\n day,year,month,hour,minutes = 0,0,0,0,0\n\n while day < 1 or day > 31 or month < 1 or month > 12 or year < 1:\n dateinput = input(\"Enter date (day/month/year): \") .strip()\n if \"/\" in dateinput:\n date = dateinput.split('/') \n if len(date) != 3 or not date[0].isdigit()\\\n or not date[1].isdigit() or not date[2].isdigit():\n print(\"date should be in this format day/month/year\")\n\n continue\n \n else:\n print(\"date should be in this format day/month/year\")\n\n continue\n day = int(date[0])\n month = int(date[1])\n year = int(date[2])\n if day < 1 or day > 31:\n print(\"date is invalid.\")\n if month < 1 or month > 12:\n print(\"month is invalid.\")\n if year < 1:\n print(\"year is invalid.\")\n\n \"\"\"\n Check the date if is passed and ask\n \"\"\"\n my_date = datetime.datetime(year, month, day,23,30)\n \n if my_date < datetime.datetime.today():\n message2 = \"event cannot be created , day has passed.\"\n print(\"{} {}\".format(user, message2))\n \n print(f'Bye {user}')\n\n return message2\n \n\n while hour<7 or hour>17 or minutes<0 or minutes>59\\\n or (hour==17 and min>30):\n timeinput = input(\"Enter time (HH:MM): \").strip()\n if \":\" in timeinput:\n time = timeinput.split(\":\")\n if len(time) != 2 or not time[0].isdigit()\\\n or not time[1].isdigit():\n print(\"time should be in this format HH:MM\")\n\n continue\n else:\n print(\"time should be in this format HH:MM\")\n\n continue\n hour = int(time[0])\n minutes = int(time[1]) \n\n if hour < 7 or hour > 17:\n print(\"Hour should be between 7 and 17\")\n if minutes < 0 or minutes > 59:\n print(\"minutes should be between 0 and 59\")\n if hour == 17 and minutes > 30:\n print(\"minutes should be between 00-30 since we close at 18:00\") \n\n hour2 = hour \n minutes2 = minutes+30\n\n if minutes >= 30:\n minutes2 = 0\n hour2 += 1\n add = minutes - 30\n minutes2 += add \n\n my_date = datetime.datetime(year, month, day, hour, minutes)\n\n \n if my_date < datetime.datetime.now():\n message2 = \"event cannot be created , time has passed.\"\n print(\"{} {}\".format(user,message2))\n \n print(f'Bye {user}')\n\n return message2\n \n else:\n startday = str(year)+\"-\"+str(month)+\"-\"+str(day)\n starttime = str(hour)+\":\"+str(minutes)\n endtime = str(hour2)+\":\"+str(minutes2)\n\n \"\"\"\n Checking if you have created Event Before\n - 30 minutes before start time\n - During Available Event\n - Before the End time\n \"\"\"\n \n if createdevent(service,email,my_date):\n message = \"You will be busy during that time\"\n \n return message\n\n \n \"\"\"\n Creating the event\n \"\"\"\n\n summary,description = \"\",\"\"\n while summary ==\"\":\n summary = input(\"Name of your topic: \").strip()\n \n while description == \"\":\n description = input(\"Describe your topic: \").strip() \n\n confirm = \"\"\n while confirm.lower() != 'y' or confirm.lower() != 'n':\n confirm = input(\"Confirm event?(y/n): \").strip()\n if confirm.lower() == 'y' or confirm.lower() == 'n':\n\n break\n \n if confirm.lower() == 'y':\n event=do_create(service,summary,description,startday,starttime,\\\n endtime,user,email) \n message = \"Event created successfully\"\n print('{}\\n - Calender Link: {}'.format(message,\\\n event.get('htmlLink'))) \n \n else:\n message = \"Event not created\"\n print(message)\n\n return message",
"def test_appointment_date(self):\n appt_date = datetime.date.today() + datetime.timedelta(days=7) # Default for email\n reminders.Patient.objects.filter(\n pk__in=[self.test_patient.pk, self.other_patient.pk]\n ).update(next_visit=appt_date)\n confirmed = self.create_confirmed_notification(self.test_patient, appt_date)\n unconfirmed = self.create_unconfirmed_notification(self.other_patient, appt_date)\n\n self.startRouter()\n self.router.logger.setLevel(logging.DEBUG)\n\n # run email job\n from afrims.apps.reminders.app import daily_email_callback\n daily_email_callback(self.router)\n\n self.assertEqual(len(mail.outbox), 1)\n message = mail.outbox[0]\n self.assertPatientInMessage(message, self.test_patient)\n self.assertPatientInMessage(message, self.other_patient)\n self.assertPatientNotInMessage(message, self.unrelated_patient)\n self.stopRouter()",
"def check() -> bool:\n today = datetime.now(timezone(timedelta(hours=2)))\n second_period = today + timedelta(days=7)\n third_period = second_period + timedelta(days=7)\n periods = list(\n map(\n lambda item: item.strftime(\"%Y-%m-%d\"), [today, second_period, third_period]\n )\n )\n\n periods_urls = map(\n lambda item: f\"https://ezakupy.tesco.pl/groceries/pl-PL/slots/delivery/{item}?slotGroup=2\",\n periods,\n )\n url_login = \"https://ezakupy.tesco.pl/groceries/pl-PL/login\"\n\n session = requests.Session()\n\n response_login_form = session.get(url_login)\n soup = BeautifulSoup(response_login_form.content, features=\"html.parser\")\n csrf_token = soup.find(attrs={\"name\": \"_csrf\"}).attrs[\"value\"]\n\n session.post(\n url_login,\n data={\n \"onSuccessUrl\": \"\",\n \"email\": os.environ.get(\"TESCO_EMAIL\", \"\"),\n \"password\": os.environ.get(\"TESCO_PASSWORD\", \"\"),\n \"_csrf\": csrf_token,\n },\n headers={\"Content-Type\": \"application/x-www-form-urlencoded\"},\n )\n\n period_results = map(lambda url: get_slots_for_date(url, session), periods_urls)\n list_of_slots = []\n\n for period_result in period_results:\n for slot in period_result:\n list_of_slots.append(slot)\n\n if len(list_of_slots) > 0:\n send_email(\n email_address(), \"Free slot available\", f\"Free slots {len(list_of_slots)}\"\n )\n print(\"Free slot available. \", len(list_of_slots))\n return True\n else:\n print(\"No available slots\")\n return False",
"def save_appointment_details(request, calendar_id):\n def schedule_mail(reminder_date, appointment):\n # Configure our scheduler for reminder\n try:\n trigger = DateTrigger(run_date=reminder_date)\n scheduler.add_job(send_appointment_mail, args=[appointment], trigger=trigger)\n except Exception as exp:\n print(exp)\n \n def schedule_sms(reminder_date, appointment):\n # Configure our scheduler for reminder\n try:\n trigger = DateTrigger(run_date=reminder_date)\n scheduler.add_job(send_appointment_sms, args=[appointment], trigger=trigger)\n except Exception as exp:\n print(exp)\n \n start_time = request.GET['start_time'][:19]\n end_time = request.GET['end_time'][:19]\n \n start_time = datetime.strptime(start_time, \"%Y-%m-%dT%H:%M:%S\")\n end_time=datetime.strptime(end_time, \"%Y-%m-%dT%H:%M:%S\")\n \n calendar_obj = Calendar.objects.get(pk=calendar_id)\n # if this is a POST request we need to process the form data\n if request.method == 'POST':\n\n # create a form instance and populate it with data from the request:\n form = AppointmentForm(request.POST)\n\n # check whether it's valid and save it\n if form.is_valid():\n # Save appointment details\n \n mobilephone = form.data['mobilephone']\n email = form.data['email']\n first_name = form.data['first_name']\n last_name = form.data['last_name']\n notes = form.data['notes']\n\n appointment = Appointment(start_time=start_time, end_time=end_time, first_name=first_name, \n last_name=last_name, email=email, mobilephone=mobilephone, notes=notes)\n \n appointment.calendar = calendar_obj\n appointment.save()\n\n try:\n send_appointment_mail(appointment) # send appointment details email\n except Exception as exp:\n print(exp)\n \n try:\n send_appointment_sms(appointment) # send appointment details sms\n except Exception as exp:\n print(exp)\n \n # Calculate reminder schedule dates\n reminder1 = start_time - timedelta(hours=2)\n reminder2 = start_time - timedelta(hours=24)\n reminder3 = start_time - timedelta(days=7)\n\n # Schedule mails\n schedule_mail(reminder1, appointment)\n schedule_mail(reminder2, appointment)\n schedule_mail(reminder3, appointment)\n \n # Schedule sms\n schedule_sms(reminder1, appointment)\n schedule_sms(reminder2, appointment)\n schedule_sms(reminder3, appointment)\n \n return redirect(reverse('appointment:complete_appointment', args=[calendar_id]))\n \n # if a GET (or any other method) we'll create a blank form\n else:\n form = AppointmentForm()\n return render(request, 'appointment_form.html', {'form': form, 'start_time': start_time, 'end_time': end_time,\n 'office_location': calendar_obj.office_location})",
"def book_a_table(rest_id, time, persons, firstName, phone, date='', lastName='', email='', wishes=''):\n url = BOOKING_URL.format(rest_id=rest_id, date=date, time=time,\n persons=persons, firstName=firstName,\n lastName=lastName, email=email, phone=phone,\n wishes=wishes)\n\n try:\n response = requests.get(url)\n data = response.text\n if response.status_code == 200:\n if 'success' in data:\n error_text = re.search(r'\"id\":\"(\\d+)\",\"', data)\n if error_text.groups():\n print 'Success: {}'.format(error_text.group(1))\n return False, error_text.group(1)\n elif 'error' in data:\n error_text = re.search(r'\"message\":\"(.*)\",\"', data)\n if error_text.groups():\n print 'Error: {}'.format(error_text.group(1))\n return False, error_text.group(1)\n\n except HTTPError as err:\n app.logger.error('HttpError while booking: {}'.format(err))",
"def validate_bookings(bookingid, username, car_id):\n\n # get booking object for bookingid\n booking = Booking.query.get(bookingid)\n print(\"Booking:::\")\n print(booking)\n\n user = booking.customer\n print(\"User:::\")\n print(user)\n \n print(\"Params:::\"+bookingid+\"--\"+username+\" --\"+car_id+\"--=>\"+str(booking.carid==car_id))\n\n isValidBooking = False\n if booking and user.email==username and booking.carid==int(car_id):\n isValidBooking = True\n\n print(str(isValidBooking)+\"----------------------------------------\")\n return jsonify({\"isValidBooking\": isValidBooking})",
"def _validate_meeting(self, datetime_start, datetime_end, adress, meeting):\n concurrent_meetings = self._compute_concurrency(\n datetime_start, datetime_end, adress)\n if concurrent_meetings:\n availibilities = self._get_doctor_available_times(datetime_start.date(\n ), datetime.time(hour=0, minute=0), datetime.time(hour=23, minute=59), [adress])\n raise InvalidMeeting(doctor=self, type=\"another_meeting\",\n meeting=concurrent_meetings[0], valid_times=availibilities)\n else:\n return {'success': True}",
"def test_appointment_date(self):\n # Default for email\n appt_date = datetime.date.today() + datetime.timedelta(days=7) \n self.create_confirmed_notification(self.test_patient, appt_date)\n self.create_unconfirmed_notification(self.other_patient, appt_date)\n\n # run email job\n from aremind.apps.reminders.app import daily_email_callback\n daily_email_callback(self.router)\n\n self.assertEqual(len(mail.outbox), 1)\n message = mail.outbox[0]\n self.assertPatientInMessage(message, self.test_patient)\n self.assertPatientInMessage(message, self.other_patient)\n self.assertPatientNotInMessage(message, self.unrelated_patient)",
"def handle_book_slot(time=None, name='default'):\n # Make request here\n print('in book slot')\n if not time:\n return question('You didn\\'t specify the time. Try again.')\n else:\n slot_date = session.attributes.get('date', None)\n params = {\n 'starttime': time,\n 'bookedbyuser': name,\n 'date': slot_date\n }\n print(params)\n session.attributes['stage'] = 'book_slot'\n session.attributes['slot_params'] = params\n return question('You want to book at ' + time + ' Is that correct?')",
"def clerk_create_appointment():\n if request.method == 'POST':\n patient_email = request.form['patient_email']\n doctor_email = request.form['doctor_email']\n date = request.form['date']\n time = request.form['time']\n\n response_clerk_create_appointment = requests.post(server_url + 'medical_clerk/create_appointment', json={\n 'patient_email': patient_email,\n 'doctor_email': doctor_email,\n 'date': date,\n 'time': time\n })\n response_clerk_create_appointment = response_clerk_create_appointment.json()\n\n if response_clerk_create_appointment.get('Status') == \"INVALID_DOCTOR_EMAIL\":\n return render_template('clerks/clerk_appointment_failed.html')\n elif response_clerk_create_appointment.get('Status') == \"INVALID_PATIENT_EMAIL\":\n return render_template('clerks/clerk_appointment_failed.html')\n elif response_clerk_create_appointment.get('Status') == \"DOCTOR_IS_NOT_AVAILABLE_AT_THAT_TIME\":\n return render_template('clerks/clerk_appointment_failed.html')\n elif response_clerk_create_appointment.get('Status') == \"DOCTOR_HAS_AN_APPOINTMENT_SELECTED_TIME_SLOT\":\n return render_template('clerks/clerk_appointment_failed.html')\n else:\n referer = request.referrer\n return redirect(referer, code=302)\n else:\n return render_template('clerks/home.html')",
"def test_email_warnings_not_sent_within_2_hrs_of_booking(self, mock_tz):\n mock_tz.now.return_value = datetime(\n 2015, 2, 11, 10, 0, tzinfo=dt_timezone.utc\n )\n\n # payment_due_date 2015/2/11 23:59 (within 24hrs - warnings sent)\n ticketed_event = baker.make_recipe(\n 'booking.ticketed_event_max10',\n date=datetime(2015, 2, 14, 18, 0, tzinfo=dt_timezone.utc),\n payment_open=True,\n ticket_cost=10,\n payment_due_date=datetime(2015, 2, 11, tzinfo=dt_timezone.utc),\n )\n booking1 = baker.make(\n TicketBooking, ticketed_event=ticketed_event, paid=False,\n date_booked=datetime(2015, 2, 1, 0, 0, tzinfo=dt_timezone.utc),\n )\n booking2 = baker.make(\n TicketBooking, ticketed_event=ticketed_event, paid=False,\n date_booked=datetime(2015, 2, 11, 8, 5, tzinfo=dt_timezone.utc),\n )\n _add_user_email_addresses(TicketBooking)\n for ticket_booking in TicketBooking.objects.all():\n baker.make(Ticket, ticket_booking=ticket_booking)\n\n management.call_command('email_ticket_booking_warnings')\n self.assertEqual(len(mail.outbox), 1)\n booking1.refresh_from_db()\n booking2.refresh_from_db()\n self.assertTrue(booking1.warning_sent)\n self.assertFalse(booking2.warning_sent)",
"def check_for_alerts(self, cr, uid, context=None):\n\n dept_obj = self.pool.get('hr.department')\n detail_obj = self.pool.get('hr.schedule.detail')\n attendance_obj = self.pool.get('hr.attendance')\n rule_obj = self.pool.get('hr.schedule.alert.rule')\n\n # TODO - Someone who cares about DST should fix ths\n #\n data = self.pool.get('res.users').read(\n cr, uid, uid, ['tz'], context=context)\n dtToday = datetime.strptime(\n datetime.now().strftime('%Y-%m-%d') + ' 00:00:00',\n '%Y-%m-%d %H:%M:%S')\n lcldtToday = timezone(data['tz'] and data['tz'] or 'UTC').localize(\n dtToday, is_dst=False)\n utcdtToday = lcldtToday.astimezone(utc)\n utcdtYesterday = utcdtToday + relativedelta(days=-1)\n strToday = utcdtToday.strftime('%Y-%m-%d %H:%M:%S')\n strYesterday = utcdtYesterday.strftime('%Y-%m-%d %H:%M:%S')\n\n dept_ids = dept_obj.search(cr, uid, [], context=context)\n for dept in dept_obj.browse(cr, uid, dept_ids, context=context):\n for employee in dept.member_ids:\n\n # Get schedule and attendance records for the employee for the\n # day\n #\n sched_detail_ids = detail_obj.search(\n cr, uid, [\n ('schedule_id.employee_id', '=', employee.id),\n '&',\n ('date_start', '>=', strYesterday),\n ('date_start', '<', strToday),\n ],\n order='date_start',\n context=context\n )\n attendance_ids = attendance_obj.search(\n cr, uid, [\n ('employee_id', '=', employee.id),\n '&',\n ('name', '>=', strYesterday),\n ('name', '<', strToday),\n ],\n order='name',\n context=context\n )\n\n # Run the schedule and attendance records against each active\n # rule, and create alerts for each result returned.\n #\n rule_ids = rule_obj.search(\n cr, uid, [('active', '=', True)], context=context)\n for rule in rule_obj.browse(\n cr, uid, rule_ids, context=context):\n res = rule_obj.check_rule(\n cr, uid, rule, detail_obj.browse(\n cr, uid, sched_detail_ids, context=context),\n attendance_obj.browse(\n cr, uid, attendance_ids, context=context),\n context=context\n )\n\n for strdt, attendance_id in res['punches']:\n # skip if it has already been triggered\n ids = self.search(\n cr, uid, [\n ('punch_id', '=', attendance_id),\n ('rule_id', '=', rule.id),\n ('name', '=', strdt),\n ], context=context)\n if len(ids) > 0:\n continue\n\n self.create(\n cr, uid, {\n 'name': strdt,\n 'rule_id': rule.id,\n 'punch_id': attendance_id,\n }, context=context\n )\n\n for strdt, detail_id in res['schedule_details']:\n # skip if it has already been triggered\n ids = self.search(\n cr, uid, [\n ('sched_detail_id', '=', detail_id),\n ('rule_id', '=', rule.id),\n ('name', '=', strdt),\n ], context=context)\n if len(ids) > 0:\n continue\n\n self.create(\n cr, uid, {\n 'name': strdt,\n 'rule_id': rule.id,\n 'sched_detail_id': detail_id,\n }, context=context\n )",
"def test_one_reserveation(self):\n test_booking = create_test_booking(self.user, self.first_day, 11)\n\n response = self.client.get(\n reverse('bookings', kwargs={'facility': 'g'}))\n\n bookings = response.context[\"bookings\"]\n\n for week in bookings:\n for row in week.rows:\n for block in row.blocks:\n if block.date == test_booking.date:\n self.assertEqual(type(block), BlockReserved)\n else:\n self.assertEqual(type(block), BlockAvailable)"
] |
[
"0.6285904",
"0.6248594",
"0.60556376",
"0.59616846",
"0.58689266",
"0.5847724",
"0.5803472",
"0.5801589",
"0.5801299",
"0.5799272",
"0.57964927",
"0.57900834",
"0.57629204",
"0.5760087",
"0.5751662",
"0.57443297",
"0.57376814",
"0.5724082",
"0.570967",
"0.56869584",
"0.56727326",
"0.5669036",
"0.56648725",
"0.56616354",
"0.5650675",
"0.5600007",
"0.5596598",
"0.55874664",
"0.55818605",
"0.557898"
] |
0.76142645
|
0
|
Time rounding function to convert time to nearest hour
|
def round_time(self, time):
hour, mins, _ = time.split(":")
return '{:02d}:00:00'.format(int(hour)+1 ) if int(mins) >= 30 else '{:02d}:00:00'.format(int(hour))
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def calculate_hours(time):\n return int(time / 3600)",
"def minute_and_hour_to_time(minute, hour):\n return hour * 60 + minute",
"def round_time(time):\n t_min = time.minute % 5\n t_sec = time.second\n t_mic = time.microsecond\n time = time - timedelta(minutes=t_min, seconds=t_sec, microseconds=t_mic)\n return time",
"def round_hours(dt, resolutionInHours):\n from datetime import datetime, timedelta\n # First zero out minutes, seconds and micros\n dtTrunc = dt.replace(minute=0,second=0, microsecond=0)\n # Figure out how many minutes we are past the last interval\n excessHours = (dtTrunc.hour) % resolutionInHours\n # Subtract off the excess minutes to get the last interval\n return dtTrunc + timedelta(hours=-excessHours)",
"def _round_time(dt=None, round_to=60):\n if hasattr(dt, 'tzinfo'):\n dt.replace(tzinfo=None)\n diff = dt - dt.replace(hour=0, minute=0, second=0, microsecond=0)\n seconds = diff.seconds\n rounding = (seconds + round_to / 2) // round_to * round_to\n return dt + datetime.timedelta(0, rounding-seconds, -dt.microsecond)",
"def _hour_to_time(num: int):\n return datetime.datetime.now().replace(hour=num).strftime(\"%-I %p\")",
"def from_min_to_day(time):\n return str(round(int(time) / (60 * 8), 1))",
"def round_time(dt=None, round_to=60):\n if dt == None : dt = datetime.now()\n seconds = (dt.replace(tzinfo=None) - dt.min).seconds\n rounding = (seconds+round_to/2) // round_to * round_to\n return dt + timedelta(0,rounding-seconds,-dt.microsecond)",
"def time_to_hour_and_minute(time):\n return [time // 60, time % 60]",
"def seconds2hours(time_in_seconds):\n seconds_since_midnight = np.mod(time_in_seconds, SECONDS_PER_DAY)\n fraction_hour = seconds_since_midnight/SECONDS_PER_HOUR\n if fraction_hour[-1] == 0:\n fraction_hour[-1] = 24\n return fraction_hour",
"def round_time(dt, roundTo=60):\n seconds = (dt.replace(tzinfo=None) - dt.min).seconds\n rounding = (seconds + roundTo/2) // roundTo * roundTo\n return dt + timedelta(0, rounding - seconds, -dt.microsecond)",
"def roundTime(dt=None, roundTo=60):\n if dt == None : dt = datetime.datetime.now()\n seconds = (dt - dt.min).seconds\n # // is a floor division, not a comment on following line:\n rounding = (seconds+roundTo/2) // roundTo * roundTo\n return dt + datetime.timedelta(0,rounding-seconds,-dt.microsecond)",
"def roundTime(dt=None, roundTo=60):\n if dt == None : dt = datetime.datetime.now()\n seconds = (dt - dt.min).seconds\n # // is a floor division, not a comment on following line:\n rounding = (seconds+roundTo/2) // roundTo * roundTo\n dt + datetime.timedelta(0,rounding-seconds,-dt.microsecond)\n dt.replace(second=0, microsecond=0)\n return dt",
"def roundTime(dt=None, roundTo=60):\n\n if dt == None : dt = datetime.datetime.now()\n seconds = (dt.replace(tzinfo=None) - dt.min).seconds\n rounding = (seconds+roundTo/2) // roundTo * roundTo\n return dt + timedelta(0,rounding-seconds,-dt.microsecond)",
"def from_minutes_and_halves(s):\n s = s.strip()\n half = s.endswith(\"H\")\n s = s.strip(\"H \")\n \n return (int(s) * 60 if s else 0) + (30 if half else 0)",
"def sec_to_hm(t):\n t = int(t)\n s = t % 60\n t //= 60\n m = t % 60\n t //= 60\n return t, m, s",
"def round_up_to_quarter_hour(self, dt: datetime) -> str:\n delta = timedelta(minutes=15)\n # Round time backwards to the hour\n rounded_hour = dt.replace(minute=0, second=0, microsecond=0)\n rounded_qtr_hour = rounded_hour + ceil((dt - rounded_hour) / delta) * delta\n return self.date_to_intermediate_time_str(rounded_qtr_hour)",
"def recover_time(adjusted_time):\n time_in_s = adjusted_time + glob.base_time\n return time_in_s",
"def convert_to_24_hours(time, ap):\r\n if ap.lower() == 'p' and time <= 12:\r\n time += 12\r\n\r\n return time",
"def round_utc_hour_up(dateString):\n date_object = datetime.strptime(dateString, \"%Y-%m-%d %H:%M:%S\")\n newHour = (date_object.hour + 1) % 24\n date_object = date_object.replace(hour=newHour)\n return date_object.strftime(\"%Y-%m-%d %H:00:00\")",
"def closest_half(iso_datetime):\n d_time = datetime.fromisoformat(iso_datetime)\n approx = round(d_time.minute / 30.0) * 30\n d_time = d_time.replace(minute=0)\n d_time += timedelta(seconds=approx * 60)\n d_time = d_time.replace(second=0)\n return d_time.isoformat()",
"def round_minute(time, round_to):\n rounded = time + datetime.timedelta(minutes=round_to/2.)\n rounded -= datetime.timedelta(minutes=rounded.minute % round_to, \n seconds=rounded.second, \n microseconds=rounded.microsecond)\n return rounded",
"def get_hour(hour):\n if int(hour) == 0:\n return 12\n elif int(hour) > 12:\n return int(hour) - 12\n else:\n return hour",
"def hours_in(sec):\r\n return int(sec//3600)",
"def convert_time(time):\n\n s = time.split()[0]\n s_h = int(s.split(':')[0])\n\n am_pm = s.split(':')[1][-2:]\n if s_h == 12:\n s_h = s_h - 12\n if am_pm == 'PM':\n s_h = s_h + 12\n s_h = s_h + 1\n\n e = time.split()[2]\n e_h = int(e.split(':')[0])\n\n am_pm = e.split(':')[1][-2:]\n if e_h == 12:\n e_h = e_h - 12\n if am_pm == 'PM':\n e_h = e_h + 12\n e_h = e_h + 1\n\n hour_list = range(s_h, e_h + 1)\n return hour_list",
"def scaledTime():\n #return (time.gmtime().tm_wday, time.gmtime().tm_hour)\n epoch = time.strptime(\"2013-02-21 11:30:00\", \"%Y-%m-%d %H:%M:%S\")\n timeInSec = time.mktime(time.gmtime()) - time.mktime(epoch)\n hourSince = timeInSec / Predictor.hourScale\n day = int(hourSince / 24 % 7)\n hour = int(hourSince % 24)\n return (day, hour)",
"def roundTime(dt=None, roundTo=1):\n if dt == None : dt = datetime.now()\n seconds = total_seconds(dt - dt.min)\n # // is a floor division, not a comment on following line:\n rounding = (seconds+roundTo/2) // roundTo * roundTo\n return dt + timedelta(0,rounding-seconds,-dt.microsecond)",
"def hourOfDayAlign(ts, hour):\n\tday = int(ts / secInDay)\n\treturn (24 * day + hour) * secInHour",
"def normalise_time(time_str):\n\n hour = time_str.split(\":\")[0]\n if int(hour) >= 24:\n normalised_hour = int(hour) % 24\n return time_str.replace(hour, f\"{normalised_hour:02}\")\n\n return time_str",
"def hours_to_24h_time(hours: float) -> str:\n return (\n dt.datetime(2001, 1, 2) + dt.timedelta(hours=hours)\n ).time().isoformat()"
] |
[
"0.6894247",
"0.6868417",
"0.6807988",
"0.67942107",
"0.6661185",
"0.6559892",
"0.65049607",
"0.6504825",
"0.64477766",
"0.63714486",
"0.6369177",
"0.63213694",
"0.63099164",
"0.6292212",
"0.6286086",
"0.6229638",
"0.6191705",
"0.61642456",
"0.6159741",
"0.61480266",
"0.6130199",
"0.6129501",
"0.61257744",
"0.61248124",
"0.6081958",
"0.60344696",
"0.60219985",
"0.60197985",
"0.59602785",
"0.59339386"
] |
0.75865996
|
0
|
Prepares the HttpResponse that will be used to contain the CSV data
|
def initialize_response(self, filename):
key = 'Content-Disposition'
self.response = HttpResponse(content_type='text/csv')
self.response[key] = f'attachment; filename="{filename}"'
self.writer = UnicodeCsvWriter(self.response)
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def csv_response(filename, header, rows):\r\n response = HttpResponse(mimetype='text/csv')\r\n response['Content-Disposition'] = 'attachment; filename={0}'.format(filename)\r\n writer = csv.writer(response, dialect='excel', quotechar='\"', quoting=csv.QUOTE_ALL)\r\n # In practice, there should not be non-ascii data in this query,\r\n # but trying to do the right thing anyway.\r\n encoded = [unicode(s).encode('utf-8') for s in header]\r\n writer.writerow(encoded)\r\n for row in rows:\r\n encoded = [unicode(s).encode('utf-8') for s in row]\r\n writer.writerow(encoded)\r\n return response",
"def csv_download_view(request):\n logging.info(\" CSV file download is working\")\n now = datetime.now()\n timestamp = now.strftime(\"%Y_%m_%d\")\n response = HttpResponse(content_type='text/csv')\n response['Content-Disposition'] = 'attachment; filename=\"results_' + \\\n GLOBAL_VARIABLE.get_host_name()+'_'+timestamp+'.csv\"'\n\n writer = csv.writer(response)\n list_of_cd = list(GLOBAL_VARIABLE.get_current_data())\n\n for i in range(10):\n rows = [sub_list[i] for sub_list in list_of_cd]\n writer.writerow(rows)\n\n return response",
"def create_csv_response(filename, header, datarows):\r\n response = HttpResponse(mimetype='text/csv')\r\n response['Content-Disposition'] = 'attachment; filename={0}'\\\r\n .format(filename)\r\n csvwriter = csv.writer(\r\n response,\r\n dialect='excel',\r\n quotechar='\"',\r\n quoting=csv.QUOTE_ALL)\r\n\r\n csvwriter.writerow(header)\r\n for datarow in datarows:\r\n encoded_row = [unicode(s).encode('utf-8') for s in datarow]\r\n csvwriter.writerow(encoded_row)\r\n return response",
"def return_csv(self, filename, header, data):\r\n\r\n csv_file = StringIO.StringIO()\r\n writer = csv.writer(csv_file, dialect='excel', quotechar='\"',\r\n quoting=csv.QUOTE_ALL)\r\n\r\n writer.writerow(header)\r\n\r\n # Setup streaming of the data\r\n def read_and_flush():\r\n \"\"\"Read and clear buffer for optimization\"\"\"\r\n csv_file.seek(0)\r\n csv_data = csv_file.read()\r\n csv_file.seek(0)\r\n csv_file.truncate()\r\n return csv_data\r\n\r\n def csv_data():\r\n \"\"\"Generator for handling potentially large CSVs\"\"\"\r\n for row in data:\r\n writer.writerow(row)\r\n csv_data = read_and_flush()\r\n yield csv_data\r\n response = HttpResponse(csv_data(), mimetype='text/csv')\r\n response['Content-Disposition'] = 'attachment; filename={0}'.format(\r\n filename)\r\n return response",
"def create_csv(request):\n date_dict = income_date_parser(request)\n\n income_history = get_incomes_funds_ids(user_id=date_dict['user_id'],\n date_start=date_dict['start_date'],\n date_end=date_dict['finish_date'],\n time_diff=date_dict['utc_difference'])\n del income_history[-1]\n\n output = io.StringIO()\n\n headers = []\n if income_history:\n for i in income_history[0]:\n if i != 'income_history_id':\n headers.append(i)\n\n writer = csv.DictWriter(output, dialect='excel', quoting=csv.QUOTE_ALL, fieldnames=headers)\n writer.writeheader()\n\n if income_history:\n for entry in income_history:\n del entry['income_history_id']\n writer.writerow(entry)\n\n response = file_streaming_response('text/csv', 'income_history.csv', output)\n return response",
"def generate_dataset_csv(request):\n\n response = csv_export(request,Dataset)\n return response",
"def downloadResponse(request, formcode=None):\n if formcode !=None:\n response = HttpResponse(content_type='text/csv')\n responses = Response.objects.filter(form_id=formcode)\n writer = csv.writer(response)\n writer.writerow(['User', 'Submit Date', 'Answer1', 'Answer2', 'Answer3'])\n for r in responses:\n user = User.objects.get(id=r.user_id)\n writer.writerow([user, r.submitDate, r.answer1 ,r.answer2 , r.answer3])\n\n response['Content-Disposition'] = 'attachment; filename=\"response.csv\"'\n return response \n return render(request, 'download.html')",
"def download_bank_details(request):\n response = HttpResponse(content_type='text/csv')\n response['Content-Disposition'] = 'attachment; filename=\"bank_details.csv\"'\n\n writer = csv.writer(response)\n \n writer.writerow([\n 's/n',\n 'account_number',\n 'account_name',\n 'recipient_code',\n 'bank_name',\n 'student_name',\n 'date_added'\n ])\n \n count = 0\n for bank in StudentBankDetail.objects.filter(month=batch_date):\n count +=1\n writer.writerow([\n count,\n str(bank.account_number),\n str(bank.account_name),\n str(bank.recipient_code),\n str(bank.bank.bank_name),\n str(bank.student.name),\n datetime.strftime(bank.date_added, '%d-%m-%Y')\n ])\n \n\n\n return response",
"def post(self, request, *args, **kwargs):\n create_media_folder_if_not_exists()\n delete_csv_before_request()\n try:\n serializer = self.get_serializer(data=request.data)\n serializer.is_valid(raise_exception=True)\n file = serializer.save()\n df = pd.read_csv(file.csv.path)\n df = get_dummies(df)\n df.to_csv(os.path.join(MEDIA_ROOT, 'modified.csv'), index=False)\n modified = Csv.objects.create(csv='modified.csv')\n\n # response = HttpResponse(modified.csv, content_type='application/csv')\n # response['Content-Disposition'] = 'inline; filename=' + os.path.basename(str(modified.csv))\n return FileResponse(modified.csv) # response\n\n # return Response({\"file\": b\"\".join(modified.csv).decode(\"utf-8\")}, status=status.HTTP_200_OK)\n # return Response({'result': 'ok' }, status=status.HTTP_200_OK)\n except Exception as e:\n return Response({'result': 'ERROR ' + str(e)}, status=status.HTTP_400_BAD_REQUEST)",
"def render_to_response(self, context, **response_kwargs):\n if not self.request.user.is_authenticated:\n # Do not allow to get a good response\n return nlogin(self.request)\n elif 'Csv' in self.request.GET.get('submit_type', ''):\n \"\"\" Provide CSV response\"\"\"\n return export_csv(self.get_qs(), 'begrippen')\n else:\n return super(PartListView, self).render_to_response(context, **response_kwargs)",
"def prepare_CSV(self):\n self.drop_columns()\n self.rename_columns()\n self.spilt_columns()\n self.add_vehicle_id_column()\n self.add_source_column()\n self.add_timestamp_columns()\n self.get_colour_columns()\n self.clean_column_formats()\n\n # print(self.data.info())\n # print(self.data.sample(10))\n\n return self.data",
"def initialize_response(self, filename):\n self.writer = UnicodeCsvWriter(self.csv_buffer)\n self.filename = filename\n self.archive = ZipFile(self.zip_buffer, 'w', compression=ZIP_DEFLATED)",
"def send_data_reply(self, (request, result)):\n if not 'format' in request.args or 'json' in request.args['format']:\n return self.send_reply((request, result))\n elif 'format' in request.args and 'csv' in request.args['format']:\n if len(result) > 1:\n request.setResponseCode(400)\n request.write(\"CSV only supported for one data stream\")\n request.finish()\n return\n # return cvs\n request.setHeader('Content-type', 'text/csv')\n if ('tags' in request.args or\n ('timefmt' in request.args and request.args['timefmt'][0] in ['excel', 'iso8060'])):\n dl = []\n for str in result:\n dl.append(build_tag_query(self.db, request, [('uuid', str['uuid'])]))\n d = defer.DeferredList(dl)\n d.addCallback(lambda x: self.send_csv_reply(request, result, x))\n return d\n else:\n return self.send_csv_reply(request, result, [(False, [])] * len(result))\n else:\n request.setResponseCode(400)\n request.finish()",
"def send_csv_reply(self, request, result, tags):\n request.setHeader('Content-disposition', 'attachment; filename=%s.csv' % \n result[0]['uuid'])\n if tags[0][0]:\n tags = tags[0][1][0][0]\n else:\n tags = None\n self.write_one_stream(request, \n result[0], \n tags)\n \n request.finish()",
"def __init__(self, content_type=\"text/csv\"):\n super(CSVSerializer, self).__init__(content_type=content_type)",
"def csv(request):\n if request.method == 'POST':\n form = CSVUploadForm(request.POST, request.FILES)\n if form.is_valid():\n fund_bot = FundBot(csv_file=request.FILES['csv_file'])\n filename = '%s-banner-iii.csv' % datetime.datetime.today().strftime('%Y-%m-%d')\n response = HttpResponse(mimetype=\"text/csv\")\n response['Content-Disposition'] = 'attachment; filename=%s' % filename\n \n csv_response = fund_bot.process(response)\n new_log = FundProcessLog(substitutions=int(fund_bot.substitutions))\n new_log.save()\n return csv_response\n else:\n return direct_to_template(request,\n 'vendors/iii/csv.html',\n {'form':CSVUploadForm()})",
"def csvdata():\n return render_template(\"data.html\")",
"def dataset_constructor_csv_file_upload(request):\n if request.method == \"POST\":\n relation_support_dataset = request.FILES['csv_file']\n handle_uploaded_file(relation_support_dataset, 'temp/cntr_csv_file.csv')\n df = pd.read_csv('temp/cntr_csv_file.csv')\n ind = {}\n data = []\n for i, row in df.iterrows():\n if row['reldescription'] not in ind:\n data.append({'name':row['reldescription'], 'examples':[]})\n ind[row['reldescription']] = len(data) - 1\n data[ind[row['reldescription']]]['examples'].append({'head':row['head'], 'tail':row['tail'], 'sentence':row['sentence']})\n return HttpResponse(\n json.dumps({'num_rels':len(data), 'num_exs':len(data[0]['examples']), 'data':data}),\n content_type=\"application/json\"\n )",
"def render_to_response(self, context, **response_kwargs):\n if not self.request.user.is_authenticated:\n # Do not allow to get a good response\n return nlogin(self.request)\n elif 'Csv' in self.request.GET.get('submit_type', ''):\n \"\"\" Provide CSV response\"\"\"\n return export_csv(self.get_qs(), 'begrippen')\n else:\n return super(TextListView, self).render_to_response(context, **response_kwargs)",
"def make_response(header, data, format, name, encoding=None):\n if format == 'csv':\n formatter = CSVformatter(encoding)\n mimetype = 'application/csv'\n elif format == 'xls':\n formatter = CSVformatter(encoding)\n mimetype = 'application/xls'\n else:\n raise Exception(\"Unknown format: %s\" % (format,))\n\n resp = HttpResponse(generator(header, data, formatter), mimetype=mimetype)\n resp['Content-Disposition'] = 'attachment; filename=%s.%s' % (name, format)\n return resp",
"def csv_response(rows, filename=\"export.csv\"):\n\t# Unfortunately Flask doesn't let you output response as an IO Stream, so you have\n\t# buffer the entire response to a string first.\n\tsi = StringIO.StringIO()\n\tcw = csv.writer(si)\n\tcw.writerow(header)\n\tfor row in rows:\n\t\tcw.writerow()\n\toutput = make_response(si.getvalue())\n\toutput.headers[\"Content-Disposition\"] = \"attachment; filename=%s\" % filename\n\toutput.headers[\"Content-type\"] = \"text/csv\"\n\treturn output",
"def _prepare_response(self, response):\n\n if not isinstance(response, Response):\n return Response(0, response)\n return response",
"def prepare(self, request):\n pass",
"def csv_report(request):\r\n if not _can_download_report(request.user):\r\n return HttpResponseForbidden(_('You do not have permission to view this page.'))\r\n\r\n if request.method == 'POST':\r\n start_date = request.POST.get('start_date', '')\r\n end_date = request.POST.get('end_date', '')\r\n start_letter = request.POST.get('start_letter', '')\r\n end_letter = request.POST.get('end_letter', '')\r\n report_type = request.POST.get('requested_report', '')\r\n try:\r\n start_date = _get_date_from_str(start_date) + datetime.timedelta(days=0)\r\n end_date = _get_date_from_str(end_date) + datetime.timedelta(days=1)\r\n except ValueError:\r\n # Error case: there was a badly formatted user-input date string\r\n return _render_report_form(start_date, end_date, start_letter, end_letter, report_type, date_fmt_error=True)\r\n\r\n report = initialize_report(report_type, start_date, end_date, start_letter, end_letter)\r\n items = report.rows()\r\n\r\n response = HttpResponse(mimetype='text/csv')\r\n filename = \"purchases_report_{}.csv\".format(datetime.datetime.now(pytz.UTC).strftime(\"%Y-%m-%d-%H-%M-%S\"))\r\n response['Content-Disposition'] = 'attachment; filename=\"{}\"'.format(filename)\r\n report.write_csv(response)\r\n return response\r\n\r\n elif request.method == 'GET':\r\n end_date = datetime.datetime.now(pytz.UTC)\r\n start_date = end_date - datetime.timedelta(days=30)\r\n start_letter = \"\"\r\n end_letter = \"\"\r\n return _render_report_form(start_date.strftime(\"%Y-%m-%d\"), end_date.strftime(\"%Y-%m-%d\"), start_letter, end_letter, report_type=\"\")\r\n\r\n else:\r\n return HttpResponseBadRequest(\"HTTP Method Not Supported\")",
"def set_pipelined_response(self,view_name,request,responsedata):\n pass",
"def dwn_rel_sup_csv(request):\n i = int(request.GET.get('i'))\n \n return FileResponse(open('temp/relation_support_datasets/relation_support_dataset_{}_{}.csv'.format(i, request.user.username),'rb'))",
"def upload_request_entities_csv(request):\n entities_csv_file = request.FILES['entities_csv_file']\n handle_uploaded_file(entities_csv_file, 'temp/entities_csv_file.csv')\n \n return HttpResponse(\n json.dumps({\"status\": \"success\"}),\n content_type=\"application/json\"\n )",
"def dwn_saved_result_csv(request):\n source_id = request.GET.get('source_id')\n data = []\n objs = ExtractedRelation.objects.filter(source=source_id)\n s = Source.objects.filter(source_id=source_id)[0]\n for i in objs:\n data.append((i.sentence, i.head, i.tail, i.pred_relation, i.sentiment, i.conf, s.source, i.rel_id, os.path.basename(i.ckpt)))\n \n df = pd.DataFrame(data, columns=['Sentence', 'Head', 'Tail', 'Predicted Relation', 'Predicted Sentiment', 'Confidence', 'Source', 'rel_id', 'Checkpoint'])\n df.to_csv(\"temp/analysis_results.csv\", index=False)\n \n return FileResponse(open('temp/analysis_results.csv','rb'))",
"def csv_content(self):\n if not hasattr(self, 'CSV_COLUMNS'):\n raise NotImplementedError('Child classes of CsvForm must implement the CSV_COLUMNS constant')\n\n # Get column fields and headers\n csv_columns = [i['column'] for i in self.CSV_COLUMNS]\n csv_headers = [i['title'].encode('utf-8') for i in self.CSV_COLUMNS]\n\n # Build data for csv writer\n csv_data = []\n for obj in self.get_queryset():\n csv_data.append([unicode(csv_getvalue(obj, column)).encode('utf-8') for column in csv_columns])\n\n # Create buffer with csv content\n content = StringIO()\n writer = csv.writer(content)\n writer.writerow(csv_headers)\n writer.writerows(csv_data)\n content.seek(0)\n\n return content",
"def prepare_data(self):"
] |
[
"0.63558805",
"0.6211693",
"0.61712754",
"0.61518145",
"0.6062035",
"0.6041702",
"0.6011072",
"0.5961391",
"0.5917385",
"0.59064513",
"0.5903959",
"0.588618",
"0.58850384",
"0.57541096",
"0.575156",
"0.5745402",
"0.5737436",
"0.5736874",
"0.57214737",
"0.5717868",
"0.57091135",
"0.5670132",
"0.5652784",
"0.56444585",
"0.56080395",
"0.55921465",
"0.55510175",
"0.55457205",
"0.55364704",
"0.5512096"
] |
0.6857063
|
0
|
Convolve two Ndimensional arrays using FFT. See convolve.
|
def fftconvolve(in1, in2, mode='same'):
s1 = array(in1.shape)
s2 = array(in2.shape)
complex_result = (np.issubdtype(in1.dtype, np.complex) or
np.issubdtype(in2.dtype, np.complex))
size = s1 + s2 - 1
# Always use 2**n-sized FFT
fsize = (2 ** np.ceil(np.log2(size))).astype('int')
IN1 = fftn(in1, fsize)
IN1 *= fftn(in2, fsize)
fslice = tuple([slice(0, int(sz)) for sz in size])
ret = ifftn(IN1)[fslice].copy()
del IN1
if not complex_result:
ret = ret.real
if mode == "full":
return ret
elif mode == "same":
if np.product(s1, axis=0) > np.product(s2, axis=0):
osize = s1
else:
osize = s2
return _centered(ret, osize)
elif mode == "valid":
return _centered(ret, abs(s2 - s1) + 1)
return conv[:s[0], :s[1], :s[2]]
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def fftconvolve(array, kernel):\n x = numpy.fft.fftshift(numpy.fft.fftn(image))\n y = numpy.fft.fftshift(numpy.fft.fftn(kernel))\n\n return numpy.real(numpynp.fft.fftshift(\n numpy.fft.ifftn(numpy.fft.ifftshift(x * y))))",
"def weightedfftconvolve(in1, in2, mode=\"full\", weighting=\"none\", displayplots=False):\n in1 = np.asarray(in1)\n in2 = np.asarray(in2)\n\n if np.isscalar(in1) and np.isscalar(in2): # scalar inputs\n return in1 * in2\n elif not in1.ndim == in2.ndim:\n raise ValueError(\"in1 and in2 should have the same rank\")\n elif in1.size == 0 or in2.size == 0: # empty arrays\n return np.array([])\n\n s1 = np.array(in1.shape)\n s2 = np.array(in2.shape)\n complex_result = np.issubdtype(in1.dtype, np.complex) or np.issubdtype(\n in2.dtype, np.complex\n )\n size = s1 + s2 - 1\n\n if mode == \"valid\":\n _check_valid_mode_shapes(s1, s2)\n\n # Always use 2**n-sized FFT\n fsize = 2 ** np.ceil(np.log2(size)).astype(int)\n fslice = tuple([slice(0, int(sz)) for sz in size])\n if not complex_result:\n fft1 = rfftn(in1, fsize)\n fft2 = rfftn(in2, fsize)\n theorigmax = np.max(\n np.absolute(irfftn(gccproduct(fft1, fft2, \"none\"), fsize)[fslice])\n )\n ret = irfftn(\n gccproduct(fft1, fft2, weighting, displayplots=displayplots), fsize\n )[fslice].copy()\n ret = irfftn(\n gccproduct(fft1, fft2, weighting, displayplots=displayplots), fsize\n )[fslice].copy()\n ret = ret.real\n ret *= theorigmax / np.max(np.absolute(ret))\n else:\n fft1 = fftpack.fftn(in1, fsize)\n fft2 = fftpack.fftn(in2, fsize)\n theorigmax = np.max(\n np.absolute(fftpack.ifftn(gccproduct(fft1, fft2, \"none\"))[fslice])\n )\n ret = fftpack.ifftn(\n gccproduct(fft1, fft2, weighting, displayplots=displayplots)\n )[fslice].copy()\n ret *= theorigmax / np.max(np.absolute(ret))\n\n # scale to preserve the maximum\n\n if mode == \"full\":\n return ret\n elif mode == \"same\":\n return _centered(ret, s1)\n elif mode == \"valid\":\n return _centered(ret, s1 - s2 + 1)",
"def fftconvolve(in1, in2, mode=\"full\", axis=None):\n\ts1 = np.array(in1.shape)\n\ts2 = np.array(in2.shape)\n\tcomplex_result = (np.issubdtype(in1.dtype, np.complex) or\n\t\t\t\t\t np.issubdtype(in2.dtype, np.complex))\n\n\tif axis is None:\n\t\tsize = s1 + s2 - 1\n\t\tfslice = tuple([ slice(0, int(sz)) for sz in size ])\n\telse:\n\t\tequal_shapes = s1 == s2\n\t\t# allow equal_shapes[axis] to be False\n\t\tequal_shapes[ axis ] = True\n\t\tassert equal_shapes.all(), 'Shape mismatch on non-convolving axes'\n\t\tsize = s1[ axis ] + s2[ axis ] - 1\n\t\tfslice = [ slice(l) for l in s1 ]\n\t\tfslice[ axis ] = slice(0, int(size))\n\t\tfslice = tuple(fslice)\n\n\t# Always use 2**n-sized FFT\n\tfsize = 2**int(np.ceil(np.log2(size)))\n\tif axis is None:\n\t\tIN1 = fftpack.fftn(in1, fsize)\n\t\tIN1 *= fftpack.fftn(in2, fsize)\n\t\tret = fftpack.ifftn(IN1)[ fslice ].copy()\n\telse:\n\t\tIN1 = fftpack.fft(in1, fsize, axis=axis)\n\t\tIN1 *= fftpack.fft(in2, fsize, axis=axis)\n\t\tret = fftpack.ifft(IN1, axis=axis)[ fslice ].copy()\n\tdel IN1\n\tif not complex_result:\n\t\tret = ret.real\n\tif mode == \"full\":\n\t\treturn ret\n\telif mode == \"same\":\n\t\tif np.product(s1, axis=0) > np.product(s2, axis=0):\n\t\t\tosize = s1\n\t\telse:\n\t\t\tosize = s2\n\t\treturn signaltools._centered(ret, osize)\n\telif mode == \"valid\":\n\t\treturn signaltools._centered(ret, abs(s2 - s1) + 1)",
"def fftconvolve(in1, in2, mode=\"full\", axis=None):\r\n s1 = np.array(in1.shape)\r\n s2 = np.array(in2.shape)\r\n complex_result = (np.issubdtype(in1.dtype, np.complex) or\r\n np.issubdtype(in2.dtype, np.complex))\r\n\r\n if axis is None:\r\n size = s1 + s2 - 1\r\n fslice = tuple([slice(0, int(sz)) for sz in size])\r\n else:\r\n equal_shapes = s1 == s2\r\n # allow equal_shapes[axis] to be False\r\n equal_shapes[axis] = True\r\n assert equal_shapes.all(), 'Shape mismatch on non-convolving axes'\r\n size = s1[axis] + s2[axis] - 1\r\n fslice = [slice(l) for l in s1]\r\n fslice[axis] = slice(0, int(size))\r\n fslice = tuple(fslice)\r\n\r\n # Always use 2**n-sized FFT\r\n fsize = 2 ** int(np.ceil(np.log2(size)))\r\n if axis is None:\r\n IN1 = fftpack.fftn(in1, fsize)\r\n IN1 *= fftpack.fftn(in2, fsize)\r\n ret = fftpack.ifftn(IN1)[fslice].copy()\r\n else:\r\n IN1 = fftpack.fft(in1, fsize, axis=axis)\r\n IN1 *= fftpack.fft(in2, fsize, axis=axis)\r\n ret = fftpack.ifft(IN1, axis=axis)[fslice].copy()\r\n del IN1\r\n if not complex_result:\r\n ret = ret.real\r\n if mode == \"full\":\r\n return ret\r\n elif mode == \"same\":\r\n if np.product(s1, axis=0) > np.product(s2, axis=0):\r\n osize = s1\r\n else:\r\n osize = s2\r\n return signaltools._centered(ret, osize)\r\n elif mode == \"valid\":\r\n return signaltools._centered(ret, abs(s2 - s1) + 1)",
"def fft_convolve2d(x,y):\n fr = fft.fft2(x)\n fr2 = fft.fft2(np.flipud(np.fliplr(y)))\n m,n = fr.shape\n cc = np.real(fft.ifft2(fr*fr2));\n cc = np.roll(cc, -int(m/2)+1,axis=0)\n cc = np.roll(cc, -int(n/2)+1,axis=1)\n return cc",
"def convolve(signal, filter):\r\n\r\n # Make the signal and filter the correct size\r\n padded_signal, padded_filter = preprocess(signal, filter) # Constant time\r\n fft_signal = fft(padded_signal) # Log(n) complexity\r\n fft_filter = fft(padded_filter) # Log(n) complexity\r\n filtered_signal = np.multiply(fft_signal, fft_filter) # Element wise multiply (p multiplies)\r\n time_signal = inverse_fft(filtered_signal) # O(N^2)\r\n # Remove excess zeros\r\n time_signal = postprocess(time_signal, signal.size, filter.size) # O(N)\r\n print(\"Done Filtering\")\r\n # return np.convolve(filter, signal) # Replace with your fft implementation\r\n return time_signal",
"def _cconvolve(x, H, nfft, wlen, axis):\n \n # pad with wlen-1 zeros for overlap & FFT\n x = pad_along_axis(x, [0, wlen - 1], axis=axis)\n xf = np.fft.rfft(x, nfft, axis=axis)\n \n # take product with window in freq. domain\n product = multiply_along_axis(xf, H, axis=axis)\n\n # back transform to sample domain and return\n return np.fft.irfft(product, axis=axis).real",
"def convolve_fft(array, kernel):\n\n array = np.asarray(array, dtype=np.cfloat)\n kernel = np.asarray(kernel, dtype=np.cfloat)\n\n if array.ndim != kernel.ndim:\n raise ValueError(\"Image and kernel must have same number of \"\n \"dimensions\")\n\n array_shape = array.shape\n kernel_shape = kernel.shape\n new_shape = np.array(array_shape) + np.array(kernel_shape)\n\n array_slices = []\n kernel_slices = []\n for (new_dimsize, array_dimsize, kernel_dimsize) in zip(\n new_shape, array_shape, kernel_shape):\n center = new_dimsize - (new_dimsize + 1) // 2\n array_slices += [slice(center - array_dimsize // 2,\n center + (array_dimsize + 1) // 2)]\n kernel_slices += [slice(center - kernel_dimsize // 2,\n center + (kernel_dimsize + 1) // 2)]\n\n array_slices = tuple(array_slices)\n kernel_slices = tuple(kernel_slices)\n\n if not np.all(new_shape == array_shape):\n big_array = np.zeros(new_shape, dtype=np.cfloat)\n big_array[array_slices] = array\n else:\n big_array = array\n\n if not np.all(new_shape == kernel_shape):\n big_kernel = np.zeros(new_shape, dtype=np.cfloat)\n big_kernel[kernel_slices] = kernel\n else:\n big_kernel = kernel\n\n array_fft = np.fft.fftn(big_array)\n kernel_fft = np.fft.fftn(np.fft.ifftshift(big_kernel))\n\n rifft = np.fft.ifftn(array_fft * kernel_fft)\n\n return rifft[array_slices].real",
"def convolve(img, fourier_kernel):\n return np.fft.ifftshift(np.fft.irfft2(np.fft.rfft2(img) * fourier_kernel))",
"def fwd_conv2d_fft(input_mat: np.ndarray,\n filter_mat: np.ndarray) -> np.ndarray:\n flipped_mat = filter_mat[::-1, ::-1]\n return fftconvolve(input_mat, flipped_mat, mode='full')",
"def test_conv2d():\n img = np.array([\n [0.3, 0.5, 0.7, 0.9],\n [0.1, 0.3, 0.5, 0.7],\n [0.9, 0.7, 0.5, 0.3],\n ])\n template = np.array([\n [1, 0],\n [1, 0],\n ])\n template = np.flipud(np.fliplr(template))\n return fftconvolve(img, template, mode='valid')",
"def fftdeconvolve(image, kernel):\n x = numpy.fft.fftshift(numpy.fft.fftn(image))\n y = numpy.fft.fftshift(numpy.fft.fftn(kernel))\n\n return numpy.real(numpy.fft.fftshift(\n numpy.fft.ifftn(numpy.fft.ifftshift(x / y))))",
"def test_fftconvolve_numerics(self, leading_dims, lengths, mode):\n L_x, L_y = lengths\n\n x = torch.rand(*(leading_dims + (L_x,)), dtype=self.dtype, device=self.device)\n y = torch.rand(*(leading_dims + (L_y,)), dtype=self.dtype, device=self.device)\n\n actual = F.fftconvolve(x, y, mode=mode)\n\n expected = signal.fftconvolve(x.detach().cpu().numpy(), y.detach().cpu().numpy(), axes=-1, mode=mode)\n expected = torch.tensor(expected)\n\n self.assertEqual(expected, actual)",
"def convolve(self, X):\n if X.shape[1:-1] < self.basis._axes_shape:\n X = self._zero_pad(X, self.basis._axes_shape)\n FX = self.basis._fftn(X)\n Fy = self._sum(FX * self._Fkernel)\n correlation = self.basis._ifftn(Fy)\n return np.fft.fftshift(correlation, axes=self.basis._axes)",
"def fft2(a, shift_in=False, shift_out=False):\n s = a.shape\n if len(s) != 2:\n raise GalSimValueError(\"Input array must be 2D.\",s)\n M, N = s\n Mo2 = M // 2\n No2 = N // 2\n\n if M != Mo2*2 or N != No2*2:\n raise GalSimValueError(\"Input array must have even sizes.\",s)\n\n if a.dtype.kind == 'c':\n a = a.astype(np.complex128, copy=False)\n xim = ImageCD(a, xmin = -No2, ymin = -Mo2)\n kim = ImageCD(BoundsI(-No2,No2-1,-Mo2,Mo2-1))\n with convert_cpp_errors():\n _galsim.cfft(xim._image, kim._image, False, shift_in, shift_out)\n kar = kim.array\n else:\n a = a.astype(np.float64, copy=False)\n xim = ImageD(a, xmin = -No2, ymin = -Mo2)\n\n # This works, but it's a bit slower.\n #kim = ImageCD(BoundsI(-No2,No2-1,-Mo2,Mo2-1))\n #_galsim.cfft(xim._image, kim._image, False, shift_in, shift_out)\n #kar = kim.array\n\n # Faster to start with rfft2 version\n rkim = ImageCD(BoundsI(0,No2,-Mo2,Mo2-1))\n with convert_cpp_errors():\n _galsim.rfft(xim._image, rkim._image, shift_in, shift_out)\n # This only returns kx >= 0. Fill out the full image.\n kar = np.empty( (M,N), dtype=np.complex128)\n rkar = rkim.array\n if shift_out:\n kar[:,No2:N] = rkar[:,0:No2]\n kar[0,0:No2] = rkar[0,No2:0:-1].conjugate()\n kar[1:Mo2,0:No2] = rkar[M-1:Mo2:-1,No2:0:-1].conjugate()\n kar[Mo2:M,0:No2] = rkar[Mo2:0:-1,No2:0:-1].conjugate()\n else:\n kar[:,0:No2] = rkar[:,0:No2]\n kar[0,No2:N] = rkar[0,No2:0:-1].conjugate()\n kar[1:M,No2:N] = rkar[M-1:0:-1,No2:0:-1].conjugate()\n return kar",
"def convolve_im(im: np.array,\n kernel: np.array,\n verbose=True):\n\t\n ### START YOUR CODE HERE ### (You can change anything inside this block) \n\t\n H,W = np.shape(im)\n h,w = np.shape(kernel)\n t_b = (H-h)//2\n l_r = (W-w)//2\n kernel_padded = np.pad(kernel, ((t_b, t_b+1),(l_r, l_r+1)), 'constant')\n kernel_padded = np.pad(kernel, ((0, 2*t_b),(0, 2*l_r)), 'constant')\n fft_kernel = np.fft.fft2(kernel_padded, s=None, axes=(-2, -1), norm=None)\n \n \n im_fft = np.fft.fft2(im, s=None, axes=(-2, -1), norm=None) \n im_filt = im_fft*fft_kernel \n conv_result = np.fft.ifft2(im_filt, s=None, axes=(-2, -1), norm=None).real \n\n if verbose:\n # Use plt.subplot to place two or more images beside eachother\n plt.figure(figsize=(12, 4))\n # plt.subplot(num_rows, num_cols, position (1-indexed))\n plt.subplot(1, 2, 1)\n plt.imshow(im, cmap=\"gray\")\n plt.subplot(1, 2, 2) \n plt.imshow(conv_result, cmap=\"gray\")\n\n ### END YOUR CODE HERE ###\n return conv_result",
"def apply_filter(data, filter_bank, sfreq): \n if data.ndim == 1:\n filtered = np.zeros((1, filter_bank.shape[0], sfreq))\n for filt in range(filter_bank.shape[0]):\n filtered[0, filt, :] = np.convolve(filter_bank[filt,:], data)[int(sfreq-sfreq/2):int(sfreq+sfreq/2)]\n elif data.ndim == 2:\n filtered = np.zeros((data.shape[0], filter_bank.shape[0], sfreq))\n for chan in range(data.shape[0]):\n for filt in range(filter_bank.shape[0]):\n filtered[chan, filt, :] = np.convolve(filter_bank[filt, :], \\\n data[chan,:])[int(sfreq-sfreq/2):int(sfreq+sfreq/2)] # mode=\"full\"\n return filtered",
"def test_fft_complex_2d():\n\ta, b, c = np.meshgrid([0, 1, 0, 0], [0, 1j, 1j], [0, 1, 1, 1])\n\tdummy_array = xr.DataArray(a * b * c, dims=['x', 'y', 'z'])\n\tchunked_array = dummy_array.chunk(chunks={'x': 2, 'y': 2, 'z': 2})\n\tspectrum_array, spectrum_coords, spectrum_dims = \\\n\t\txfft._fft(chunked_array, nfft={'y': 6, 'z': 8}, dim=['y', 'z'],\n\t\t dx={'y': 0.01, 'z': 0.02})\n\tassert np.allclose(spectrum_array.compute(),\n\t np.fft.fftn(a * b * c, s=(8, 6), axes=(2, 1)))\n\tassert np.array_equal(spectrum_coords['f_y'], np.fft.fftfreq(6, d=0.01))\n\tassert np.array_equal(spectrum_coords['f_z'], np.fft.fftfreq(8, d=0.02))\n\tassert ('x', 'f_y', 'f_z') == spectrum_dims",
"def fft1d_c2c(x):\n return torch.fft(x, signal_ndim=1)",
"def moffat_convolution_fft(im_array,n_fwhm,beta,fwhm) :\n\n r_s = fwhm/(2. *math.sqrt(2.**(1./beta)-1.))\n\n im_kernel_array = moffat_kernel(n_fwhm,beta,r_s)\n fftconv_image = signal.fftconvolve(im_array,im_kernel_array,mode = 'same')\n\n return (fftconv_image)",
"def correlate(array1,array2):\r\n arrayout = np.conj(fft2(array1)) * fft2(array2)\r\n return ifft2(arrayout)",
"def correlate(fft1,fft2, maxlag,dt, Nfft, method=\"cross-correlation\"):\n\n if fft1.ndim == 1:\n nwin=1\n elif fft1.ndim == 2:\n nwin= int(fft1.shape[0])\n\n t0=time.time()\n corr=np.zeros(shape=(nwin,Nfft),dtype=np.complex64)\n fft1_globe = cuda.to_device(fft1[:,:Nfft//2].reshape(fft1.size,))\n fft2_globe = cuda.to_device(fft2[:,:Nfft//2].reshape(fft2.size,))\n corr_globe = cuda.device_array(shape=(nwin*(Nfft//2),),dtype=np.complex64)\n \n threadsperblock = 2000\n blockspergrid = math.ceil(fft1_globe.size/threadsperblock)\n \n if method == 'deconv':\n decon_gpu[threadsperblock,blockspergrid](fft1_globe,fft2_globe,corr_globe)\n elif method =='coherence':\n coherence_gpu[threadsperblock,blockspergrid](fft1_globe,fft2_globe,corr_globe)\n\n tcorr = corr_globe.copy_to_host()\n corr = tcorr.reshape(nwin,Nfft//2)\n\n ncorr = np.zeros(shape=Nfft,dtype=np.complex64)\n ncorr[:Nfft//2] = np.mean(corr,axis=0)\n ncorr[-(Nfft//2)+1:]=np.flip(np.conj(ncorr[1:(Nfft//2)]),axis=0)\n ncorr[0]=complex(0,0)\n ncorr = np.real(np.fft.ifftshift(scipy.fftpack.ifft(ncorr, Nfft, axis=0)))\n\n t1=time.time()\n print('it takes '+str(t1-t0)+' s')\n\n tcorr = np.arange(-Nfft//2 + 1, Nfft//2)*dt\n ind = np.where(np.abs(tcorr) <= maxlag)[0]\n ncorr = ncorr[ind]\n tcorr = tcorr[ind]\n\n return ncorr,tcorr",
"def convolve_im(im: np.array,\n kernel: np.array,\n verbose=True):\n ### START YOUR CODE HERE ### (You can change anything inside this block)\n \"\"\"\n\tcompared to the 4a solution this just adds padding to the filter if its smaller than the image\n\tthis is done by using the second parameter in fft.fft2 \n\t\n\tfirst it applies fourier transforms on the kernel and the image\n\tthen it sets the image to be the pointwise multiplication of the transforms\n\n the image is inverse fourier transformed and filtered for real values\n the domain image is shifted and taken the absolute value of\n the fourier transform of the image and kernel are also shifted and set to be the absolute value\n\tlastly everything is displayed in the subplots\n \"\"\"\n conv_result = im \n \n if verbose:\n fftKernel=np.fft.fft2(kernel,im.shape)\n fftImage=np.fft.fft2(conv_result)\n\t\t\n\t\t\n\t\t\n conv_result=np.multiply(fftImage,fftKernel)\n fftImageTransformed=conv_result\n\t\t\n \n conv_result=np.fft.ifft2(conv_result)\n \n conv_result=np.real(conv_result)\n\n fftImageTransformed=np.fft.fftshift(fftImageTransformed)\n fftImage=np.fft.fftshift(fftImage)\n fftKernel=np.fft.fftshift(fftKernel)\n\n fftImageTransformed=np.absolute(fftImageTransformed)\n fftImage=np.absolute(fftImage)\n fftKernel=np.absolute(fftKernel)\n\t\t\n\t\t\n # Use plt.subplot to place two or more images beside eachother\n plt.figure(figsize=(20, 4))\n # plt.subplot(num_rows, num_cols, position (1-indexed))\n plt.subplot(1, 5, 1)\n plt.imshow(im, cmap=\"gray\")\n plt.subplot(1, 5, 2)\n plt.imshow(fftImage, cmap=\"gray\")\n plt.subplot(1, 5, 3)\n plt.imshow(fftKernel, cmap=\"gray\")\n plt.subplot(1, 5, 4)\n plt.imshow(fftImageTransformed, cmap=\"gray\")\n plt.subplot(1, 5, 5)\n plt.imshow(conv_result, cmap=\"gray\")\n ### END YOUR CODE HERE ###\n return conv_result",
"def conv1d(data_arr, kernel_arr, tarr_len, discrete_kernel_shape, mode='valid'):\n\n assert(data_arr.ndim == 2)\n output_shape = discrete_kernel_shape[1:]\n if (kernel_arr.ndim == 2):\n # Algorithm assumes a \"to\" axis on the kernel. Add it.\n kernel_arr = add_axes(kernel_arr, 1, 'before last')\n discrete_kernel_shape = discrete_kernel_shape[0:1] + (1,) + discrete_kernel_shape[1:2]\n else:\n check(kernel_arr.ndim == 3)\n\n # Convolutions leave the time component on the inside, but we want it on the outside\n # So we do the iterations in reverse order, and flip the result with transpose()\n # The result is indexed as [tidx][to idx][from idx]\n if cf.use_theano:\n # We use slices from_idx:from_idx+1 because conv2d expects 2D objects\n # We then index [:,0] to remove the spurious dimension\n result = T.stack(\n [ T.stack(\n [ T.signal.conv.conv2d(data_arr[:, from_idx:from_idx+1 ],\n kernel_arr[:, to_idx, from_idx:from_idx+1 ],\n image_shape = (tarr_len, 1),\n filter_shape = (discrete_kernel_shape[0], 1),\n border_mode = mode)[:,0]\n for to_idx in np.arange(discrete_kernel_shape[1]) ] )\n for from_idx in np.arange(discrete_kernel_shape[2]) ] ).T\n else:\n assert(discrete_kernel_shape == kernel_arr.shape)\n assert(tarr_len == data_arr.shape[0])\n result = np.stack(\n [ np.stack(\n [ scipy.signal.convolve(data_arr[:, from_idx ],\n kernel_arr[:, to_idx, from_idx ],\n mode=mode)\n for to_idx in np.arange(kernel_arr.shape[1]) ] )\n for from_idx in np.arange(kernel_arr.shape[2]) ] ).T\n\n return result.reshape((tarr_len - discrete_kernel_shape[0] + 1,) + output_shape)",
"def convolve_power(power_spectrum, window_power, axis=-1):\n \n data_corr = fft.ifft(power_spectrum, axis=axis)\n window_corr = fft.ifft(window_power, axis=axis)\n true_corr = data_corr * window_corr\n true_power = fft.fft(true_corr, axis=axis, overwrite_x=True)\n return true_power",
"def fft2(X):\r\n # return scipy.fftpack.fft2(X)\r\n return np.fft.fft2(X)",
"def gauss_convolution_fft(im_array, n_fwhm, fwhm) :\n \n sigma = fwhm / (2.*math.sqrt(2.*math.log(2.)))\n\t\n im_kernel_array = gauss_kernel(n_fwhm, sigma)\n fftconv_image = signal.fftconvolve(im_array,im_kernel_array,mode = 'same')\n\n return (fftconv_image)",
"def fdsb(self, signal: np.ndarray, **kwargs) -> np.ndarray:\n\t\tdelays = kwargs.get(\"delays\", self.freq_delays)\n\t\tbatch_size = kwargs.get(\"batch_size\", 1)\n\n\t\tSignal = np.fft.fft(signal, axis=0)[:self.num_samples // 2,:,None,None]\n\n\t\tconv_signal = np.empty((self.num_samples // 2, delays.shape[1], \n\t\t\t\t\t\t\tdelays.shape[2] , delays.shape[3]))\n\n\t\t# For signals with many samples, a bigger batch size\n\t\t# can speed up the algorithm and prevent memory errors\n\t\tps = self.phi.shape[1]\n\t\tlength = int(np.ceil(ps/batch_size))\n\t\t\n\t\tfor b in range(batch_size):\n\t\t\tconv_signal[...,b*length:b*length+length] = (np.fft.ifft(\n\t\t\t\tSignal * delays[...,b*length:b*length+length], \n\t\t\t\taxis=0)).real\n\n\t\tsquared_conv = ((conv_signal.sum(1)) ** 2).sum(0)\n\n\t\treturn squared_conv",
"def deconvolve_power(power_spectrum, window_power, axis=-1):\n \n data_corr = fft.ifft(power_spectrum, axis=axis)\n window_corr = fft.ifft(window_power, axis=axis)\n true_corr = data_corr / window_corr\n true_power = fft.fft(true_corr, axis=axis, overwrite_x=True)\n return true_power",
"def oaconvolve(pro, window, axis, mode, nfft_factor=32):\n\n # compute the near optimal nfft number and FFT of window\n nfft = optimal_nffts(window) * nfft_factor\n\n # fallback to optimal nffts if nfft_factor makes nfft > data size.\n if nfft - len(window) + 1 > pro.shape[axis]:\n nfft = optimal_nffts(window)\n\n wlen = len(window)\n H = np.fft.rfft(window, nfft)\n\n # set the step size based on optimal nfft and wlen\n step = nfft - wlen + 1\n\n nsegments = int(np.ceil(pro.shape[axis] / step))\n\n # create the wlen-1 samples overlap\n overlap_shape = list(pro.shape)\n overlap_shape[axis] = wlen - 1\n overlap = np.zeros(overlap_shape)\n\n # FIFOArray holding chunksize arrays but yielding step size arrays\n fifo = FIFOArray(step, axis)\n\n # Helper funcs\n def _cconvolve(x, H, nfft, wlen, axis):\n \"\"\"Circularly convolves a data segment, x, with H, the \n fft of the window of len wlen along axis.\"\"\"\n \n # pad with wlen-1 zeros for overlap & FFT\n x = pad_along_axis(x, [0, wlen - 1], axis=axis)\n xf = np.fft.rfft(x, nfft, axis=axis)\n \n # take product with window in freq. domain\n product = multiply_along_axis(xf, H, axis=axis)\n\n # back transform to sample domain and return\n return np.fft.irfft(product, axis=axis).real\n\n def _add_overlap(y, overlap, wlen, axis):\n \"\"\"Adds overlap to first wlen-1 samples of yth segment \n along axis.\"\"\"\n\n slicer = [slice(None)] * y.ndim\n slicer[axis] = slice(0, wlen-1)\n y[tuple(slicer)] += overlap\n\n return y\n\n segment = 0\n for arr in pro:\n\n fifo.put(arr)\n\n while fifo.qsize() > step:\n \n # get data segment & cicularly convolve\n arr = fifo.get()\n z = _cconvolve(arr, H, nfft, wlen, axis)\n \n #split segment & next overlap\n y, new_overlap = split_along_axis(z, step, axis=axis)\n\n # add previous overlap & update overlap\n y = _add_overlap(y, overlap, wlen, axis)\n overlap = new_overlap\n\n #apply the boundary mode to first and last segments\n if segment == 0:\n y = _oa_boundary(y, window, 'left', axis, mode)\n \n #update segment\n segment += 1\n \n yield y\n \n else:\n # put new data into fifo\n continue\n else:\n \n if not fifo.empty():\n \n # get all remaining in queue & circularly convolve\n arr = fifo.queue\n z = _cconvolve(arr, H, nfft, wlen, axis)\n\n # last segment has wlen - 1 overhang\n last = arr.shape[axis] + wlen - 1\n y = slice_along_axis(z, 0, last, axis=axis)\n\n #add previous overlap\n y = _add_overlap(y, overlap, wlen, axis)\n\n yield _oa_boundary(y, window, 'right', axis, mode)"
] |
[
"0.7239143",
"0.71845514",
"0.7157312",
"0.70784557",
"0.7055453",
"0.6915631",
"0.6884482",
"0.67564625",
"0.66061014",
"0.66050863",
"0.6434134",
"0.63963294",
"0.6372463",
"0.63168097",
"0.62808156",
"0.61976945",
"0.61933094",
"0.61032593",
"0.60985255",
"0.6070625",
"0.6066416",
"0.6053868",
"0.60143924",
"0.6005553",
"0.6002966",
"0.596105",
"0.5957",
"0.59185183",
"0.59086543",
"0.5892254"
] |
0.7394281
|
0
|
Sends each order transaction to the database
|
def send_to_db(ck_transactions):
db = DDDB()
db.add_orders(ck_transactions)
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def send_order(self, p_order, p_in_out, count):\n pass",
"def add_orders_on(user, order_date, items):\n for item in items:\n order = BreadOrder(\n user=user,\n date=order_date,\n type=item\n )\n db.session.add(order)\n db.session.commit()",
"def post(cls):\n data = request.get_json() # token + list of item ids [1, 2, 3, 5, 5, 5]\n items = []\n item_id_quantities = Counter(data[\"item_ids\"])\n\n # Iterate over items and retrieve them from the database\n for _id, _count in item_id_quantities.most_common():\n item = ItemModel.find_by_id(_id)\n if not item:\n return {\"message\": gettext(\"order_item_by_id_not_found\").format(_id)}, 404\n \n items.append(ItemInOrder(item_id=_id, quantity=_count))\n \n order = OrderModel(items = items, status=\"pending\")\n order.save_to_db()\n\n order.set_status(\"failed\") # assume the order would fail until it's completed\n #order.charge_with_stripe(data[\"token\"])\n order.set_status(\"complete\") # charge succeeded\n\n return order_schema.dump(order), 200",
"def post(cls):\n data = request.get_json() # token ,item_ids [1, 3, 3, 5, 5, 5]\n items = []\n item_id_quantities = Counter(data[\"item_ids\"])\n\n for _id, count in item_id_quantities.most_common(): # [(5,3),(3,2),(1,1)]\n item = ItemModel.find_by_id(_id)\n if not item:\n return {\"message\": gettext(\"order_item_by_id_not_found\").format(_id)}, 404\n\n \"\"\"ItemsInOrder get item_id and quantity, however\n order_id will be set later on,\n when items is passed into OrderModel, because back_populates=\"order\"\n it goes over to order column of ItemsInOrder table,\n and set order_id for each of those item in OrderModel\n to be the order to which you have added those items\"\"\"\n items.append(ItemsInOrder(item_id=_id, quantity=count))\n\n # items is a list of ItemsInOrder obj\n order = OrderModel(items=items, status=\"pending\") # pending until send to Stripe\n order.save_to_db() # this does not submit to Stripe\n\n try:\n order.set_status(\"failed\") # assume the order would fail until it's completed\n order.charge_with_stripe(data[\"token\"])\n order.set_status(\"complete\") # charge succeeded\n return order_schema.dump(order), 200\n # the following error handling is advised by Stripe, although the handling implementations are identical,\n # we choose to specify them separately just to give the students a better idea what we can expect\n except error.CardError as e:\n # Since it's a decline, stripe.error.CardError will be caught\n return e.json_body, e.http_status\n except error.RateLimitError as e:\n # Too many requests made to the API too quickly\n return e.json_body, e.http_status\n except error.InvalidRequestError as e:\n # Invalid parameters were supplied to Stripe's API\n return e.json_body, e.http_status\n except error.AuthenticationError as e:\n # Authentication with Stripe's API failed\n # (maybe you changed API keys recently)\n return e.json_body, e.http_status\n except error.APIConnectionError as e:\n # Network communication with Stripe failed\n return e.json_body, e.http_status\n except error.StripeError as e:\n # Display a very generic error to the user, and maybe send\n # yourself an email\n return e.json_body, e.http_status\n except Exception as e:\n # Something else happened, completely unrelated to Stripe\n print(e)\n return {\"message\": gettext(\"order_error\")}, 500",
"def run(self):\r\n db_conn = order_db_connector()\r\n db_conn.process_order(self.id)\r\n schedule.every(self.order_max_lifetime).seconds.do(self.order_finish).tag(self.id)\r\n #schedule.every(5).seconds.do(self.trading_main).tag(f'{self.id}_main',self.id)\r\n self.trading_main()\r\n logger.info(\"ENDED trading main\")\r\n \"\"\"\r\n Add order status \r\n \"\"\"\r\n\r\n #Clear scheduled task to avoid task stacking in scheduler\r",
"def manage_orders(self):\r\n for coin, pair_info in self.usdt_pairs.items():\r\n orders = self.kc.get_orders(pair_info[\"symbol\"], status=\"active\")\r\n self.log(coin, orders[\"totalNum\"])\r\n if orders[\"totalNum\"]:\r\n self.log(len(orders[\"items\"]))\r\n for order in orders[\"items\"]:\r\n self.log(order)\r\n\r\n self.log(mp.mpf())\r\n\r\n # ticker = current price action, bid/ask, etc\r\n ticker = self.kc.get_ticker(pair_info[\"symbol\"])\r\n self.log(ticker)\r\n return",
"def orders(self, orders):\n\n self._orders = orders",
"def orders(self, orders):\n\n self._orders = orders",
"def transaction_run():\n print('working...')\n # Get all transaction\n transactions = executor.submit(Transaction.query.filter_by(done=False).all)\n print(transactions.result())\n # Check if thier a transactions\n if transactions.result():\n # Go through each transaction\n for tran in transactions.result():\n print(\"Looping...\")\n # print(trans)\n # Get the currency account for the source user\n currency = executor.submit(Currency.query.filter_by(user_id=tran.user_id).first).result()\n print(currency)\n # target_user = executor.submit(User.query.filter_by(id=tran.target_user).first).result()\n # print(target_user)\n # Get the currency account for the target user\n target = executor.submit(Currency.query.filter_by(user_id=tran.target_user).first).result()\n # Get the transaction account for the target user\n trans_target = executor.submit(Transaction.query.filter_by(user_id=tran.target_user).first).result()\n ### # TODO:\n trans_source = executor.submit(Transaction.query.filter_by(user_id=tran.user_id).first).result()\n # update replace all tran with trans_source\n\n print(tran)\n # print(target_user)\n print(target)\n print(trans_target)\n # Check if the target user has account\n if target:\n # If the user send to himself fail the transaction\n if tran.user_id == tran.target_user:\n tran.state = \"Transaction faild.\"\n db.session.merge(tran)\n db.session.commit()\n db.session.remove()\n else:\n # If the currency type is bitcoin\n # Check if the user has a bitcoin ID\n if tran.currency_Type.lower() == \"bitcoin\":\n if not currency.bitcoin_id:\n tran.state = \"Transaction faild.\"\n # trans_source.state = \"Transaction faild. You don't have a bitcoin account!\"\n db.session.merge(tran)\n db.session.commit()\n db.session.remove()\n # If user has a bitcoin ID\n # Check if transfared money greater than his balance or not\n # Check if transfared money greater than the max amount per transaction or not\n else:\n if tran.currency_amount > currency.bitcoin_balance:\n tran.state = \"Transaction faild.\"\n db.session.merge(tran)\n db.session.commit()\n db.session.remove()\n elif tran.currency_amount > currency.max_amount:\n tran.state = \"Transaction faild.\"\n db.session.merge(tran)\n db.session.commit()\n db.session.remove()\n # Everything ok, then subtract the transfared money from source user\n # Add transfare maney to target user\n else:\n balance = currency.bitcoin_balance - tran.currency_amount\n # updated_balance = str(balance)\n currency.bitcoin_balance = balance\n db.session.merge(currency)\n db.session.commit()\n db.session.remove()\n\n balance_target = target.bitcoin_balance + tran.currency_amount\n target.bitcoin_balance = balance_target\n db.session.merge(target)\n db.session.commit()\n db.session.remove()\n\n tran.state = \"Transaction success.\"\n tran.time_processed = datetime.now().strftime(\"%d-%b-%Y (%H:%M:%S)\")\n db.session.merge(tran)\n db.session.commit()\n db.session.remove()\n\n # If the currency type is ethereum\n # Check if the user has a ethereum ID\n elif tran.currency_Type.lower() == \"ethereum\":\n if not currency.ethereum_id:\n tran.state = \"Transaction faild.\"\n # trans_source.state = \"Transaction faild. You don't have a ethereum account!\"\n db.session.merge(tran)\n db.session.commit()\n db.session.remove()\n # If user has a ethereum ID\n # Check if transfared money greater than his balance or not\n # Check if transfared money greater than the max amount per transaction or not\n else:\n if tran.currency_amount > currency.ethereum_balance:\n tran.state = \"Transaction faild.\"\n # trans_source.state = \"Transaction faild. You don't have enough money!\"\n db.session.merge(tran)\n db.session.commit()\n db.session.remove()\n elif tran.currency_amount > currency.max_amount:\n tran.state = \"Transaction faild.\"\n # trans_source.state = \"Transaction faild. You exceed the max amount!\"\n db.session.merge(tran)\n db.session.commit()\n db.session.remove()\n # Everything ok, then subtract the transfared money from source user\n # Add transfare maney to target\n else:\n balance = currency.ethereum_balance - tran.currency_amount\n currency.ethereum_balance = balance\n db.session.merge(currency)\n db.session.commit()\n db.session.remove()\n\n balance_target = target.ethereum_balance + tran.currency_amount\n target.ethereum_balance = balance_target\n db.session.merge(target)\n db.session.commit()\n db.session.remove()\n\n tran.state = \"Transaction success.\"\n tran.time_processed = datetime.now().strftime(\"%d-%b-%Y (%H:%M:%S)\")\n db.session.merge(tran)\n db.session.commit()\n db.session.remove()\n # if the currency type not bitcoin or ethereum\n else:\n tran.state = \"Transaction faild.\"\n db.session.merge(tran)\n db.session.commit()\n db.session.remove()\n # If the user has no currency account\n else:\n tran.state = \"Transaction faild.\"\n db.session.merge(tran)\n db.session.commit()\n db.session.remove()\n\n\n # Finish the transaction request\n print(tran)\n tran.done = True\n db.session.merge(tran)\n db.session.commit()\n db.session.remove()\n print('Done!!!!')",
"def do_orders(self,args):\n try:\n orders = bitstamp.open_orders()\n orders = sorted(orders, key=lambda x: float(x['price']))\n buytotal,selltotal = 0,0\n numbuys,numsells = 0,0\n amtbuys,amtsells = 0,0\n buyavg,sellavg = 0,0\n numorder = 0 \n for order in orders:\n ordertype=\"Sell\" if order['type'] == 1 else \"Buy\"\n numorder += 1\n print '%s = %s | $%s @ %s BTC %s' % (numorder,ordertype,order['price'],order['amount'],order['id']) \n if order['type'] == 0:\n buytotal += D(order['price'])*D(order['amount'])\n numbuys += D('1')\n amtbuys += D(order['amount'])\n elif order['type'] == 1:\n selltotal += D(order['price'])*D(order['amount'])\n numsells += D('1')\n amtsells += D(order['amount'])\n if amtbuys:\n buyavg = D(buytotal/amtbuys).quantize(cPrec)\n if amtsells:\n sellavg = D(selltotal/amtsells).quantize(cPrec)\n print \"There are %s Buys. There are %s Sells\" % (numbuys,numsells)\n print \"Avg Buy Price: $%s. Avg Sell Price: $%s\" % (buyavg,sellavg)\n except Exception as e:\n print e",
"def send_commit(): \r\n \r\n commit_id = request.args[0]\r\n r_commit = db.logs_commit[commit_id]\r\n \r\n # Create a new send record\r\n send_id = db.logs_send.insert( datetime = request.utcnow,\r\n inventory_store_id = r_commit.inventory_store_id,\r\n to_inventory_store_id = r_commit.for_inventory_store_id\r\n )\r\n \r\n #Only select items which are in the warehouse\r\n commit_items = db( (db.logs_commit_item.logs_commit_id == commit_id) & \\\r\n (db.logs_commit_item.logs_req_item_id == db.logs_req_item.id) & \\\r\n (db.logs_req_item.item_id == db.inventory_store_item.item_id) & \\\r\n (db.logs_commit_item.deleted == False) & \\\r\n (db.logs_req_item.deleted == False) & \\\r\n (db.inventory_store_item.deleted == False)\r\n ).select( db.inventory_store_item.id,\r\n db.logs_commit_item.quantity,\r\n db.logs_commit_item.item_packet_id,\r\n ) \r\n \r\n for commit_item in commit_items: \r\n send_item_id = db.logs_send_item.insert( logs_send_id = send_id,\r\n store_item_id = commit_item.inventory_store_item.id,\r\n quantity = commit_item.logs_commit_item.quantity,\r\n item_packet_id = commit_item.logs_commit_item.item_packet_id \r\n ) \r\n \r\n # Redirect to send\r\n redirect(URL(r = request,\r\n c = \"logs\",\r\n f = \"send\",\r\n args = [send_id]\r\n )\r\n )",
"def batch_transfer(self):\n ticket_range = self.zendesk.ticket_range()\n for i in range(1, ticket_range):\n tickets = self.zendesk.get_list_of_tickets(i)\n for ticket in tickets[\"tickets\"]:\n ticket_id = ticket[\"id\"]\n self.transfer_ticket(ticket_id)",
"def do_send(self, args):\n if not self._check_args(args):\n return\n else:\n params = args.split()\n result, reason, transactions = self.wallet.generate_transaction(from_address=params[0],\n to_address=params[1],\n value=params[2])\n if result:\n self.wallet.send_transactions(transactions)",
"def send_orders_created(order_ids):\n ids = [{\"id\": i} for i in order_ids]\n return make_response(jsonify({\"orders\": ids}), 201)",
"def _batch_write(self):\n if self.to_put:\n db.put(self.to_put)\n self.to_put = []\n if self.to_delete:\n db.delete(self.to_delete)\n self.to_delete = []",
"def jsonrpc_puttxn_batch(self, txns, broadcast = True):\n if ADD_NETWORK_DELAY:\n time.sleep(random.uniform(NETWORK_DELAY_MIN, NETWORK_DELAY_MAX))\n\n if self.node.storage.txns_received == 0:\n self.node.storage.time_measurement = time.time()\n self.node.storage.txns_received += 1\n if broadcast:\n self.node.storage.broadcast_txn_batch(txns)\n for txn in txns:\n self.jsonrpc_puttxn(txn, broadcast = False)",
"def insert_many_execute(self) -> None:\n self.connection.isolation_level = None\n self.cursor.execute('BEGIN TRANSACTION')\n for i in self.__sql_buffer.split(';'):\n self.cursor.execute(i)\n self.__sql_buffer = \"\"\n self.cursor.execute('COMMIT')",
"def SaveOrder(self, order, tenantId, userId):\n\t\tif order:\n\t\t\tif order[\"Id\"]:\n\t\t\t\torderid = order[\"Id\"]\n\t\t\t\to = self.GetOrderById(orderid, tenantId)\n\t\t\t\tif o:\n\t\t\t\t\t#o.TenantId = tenantId\n\t\t\t\t\to.CustomerId = order[\"CustomerId\"]\n\t\t\t\t\to.OrderAmount = order[\"OrderAmount\"]\n\t\t\t\t\to.PaidAmount = order[\"PaidAmount\"]\n\t\t\t\t\to.IpAddress = order['IpAddress']\n\t\t\t\t\tif order['DueDate'] and len(order['DueDate']) > 0:\n\t\t\t\t\t\to.DueDate = dateutil.parser.parse(order['DueDate'])\n\t\t\t\t\tif order['OrderDate'] and len(order['OrderDate']) > 0:\n\t\t\t\t\t\to.OrderDate = dateutil.parser.parse(order['OrderDate'])\n\t\t\t\t\to.UpdatedBy = userId\n\t\t\t\t\to.UpdatedOn = datetime.utcnow()\n\n\t\t\t\t\tlineitems = order[\"LineItems\"]\n\t\t\t\t\tif lineitems:\n\t\t\t\t\t\to.LineItemsCount = len(lineitems)\n\t\t\t\t\t\to.OrderAmount = sum([x[\"SellPrice\"] * x[\"Quantity\"] for x in lineitems])\n\t\t\t\t\t\tDBSession.query(LineItem).filter(LineItem.OrderId == orderid).delete()\n\t\t\t\t\t\tself.SaveOrderLineItems(o.Id, lineitems)\n\t\t\t\t\telse:\n\t\t\t\t\t\to.LineItemsCount = 0\n\t\t\t\t\t\to.OrderAmount = 0\n\t\t\t\t\t\tDBSession.query(LineItem).filter(LineItem.OrderId == orderid).delete()\n\n\t\t\t\t\tpayments = order[\"Payments\"]\n\t\t\t\t\tif payments:\n\t\t\t\t\t\to.PaidAmount = sum([x[\"PaidAmount\"] for x in payments])\n\t\t\t\t\t\tDBSession.query(OrderPayment).filter(OrderPayment.OrderId == orderid).delete()\n\t\t\t\t\t\tself.SaveOrderPayments(o.Id, payments, userId)\n\t\t\t\t\telse:\n\t\t\t\t\t\to.PaidAmount = 0\n\t\t\t\t\t\tDBSession.query(OrderPayment).filter(OrderPayment.OrderId == orderid).delete()\n\t\tpass",
"def _place_orders_onto_queue(self, order_list: List[OrderEvent]):\n for order_event in order_list:\n self._events.add_event(order_event)",
"def request_orders(self):\r\n if self.use_http():\r\n self.enqueue_http_request(\"money/orders\", {}, \"orders\")\r\n else:\r\n self.send_signed_call(\"private/orders\", {}, \"orders\")",
"def get_order(self, order_id):\n request = OrdersGetRequest(order_id)\n #3. Call PayPal to get the transaction\n response = self.client.execute(request)\n return response\n #4. Save the transaction in your database. Implement logic to save transaction to your database for future reference.",
"def _send_email_in_transaction():\n sender_name_email = '%s <%s>' % (sender_name, sender_email)\n\n email_services.send_mail(\n sender_name_email, recipient_email, email_subject,\n cleaned_plaintext_body, cleaned_html_body, bcc_admin,\n reply_to_id=reply_to_id)\n email_models.SentEmailModel.create(\n recipient_id, recipient_email, sender_id, sender_name_email, intent,\n email_subject, cleaned_html_body, datetime.datetime.utcnow())",
"def test_process_order(self):\n expected_contents = self.fill_session_cart()\n\n response = self.client.post(\n self.CHECKOUT_URL, self.build_checkout_form())\n self.assertEqual(response.status_code, 200)\n self.assertContains(response, \"Your order was placed.\")\n\n placed_order = OrderInfo.objects.get()\n order_contents = placed_order.ordercontents_set.all()\n # arbitrary 5 seconds to account for some fault\n self.assertTrue(\n timezone.now() - placed_order.ordered < timedelta(seconds=5))\n self.assertEqual(len(expected_contents), len(order_contents))\n for expected in expected_contents:\n db_contents = order_contents.get(menu_item__id=expected['id'])\n dict_from_db = {\n 'id': db_contents.menu_item.id,\n 'name': db_contents.menu_item.name,\n 'price': db_contents.menu_item.price,\n 'amount': db_contents.amount,\n 'cost': db_contents.cost,\n }\n self.assertEqual(expected, dict_from_db)",
"def create_order(request, order, transaction_id):\n\n\n order.transaction_id = transaction_id\n print transaction_id\n #order.ip_address = request.META.get('REMOTE_ADDR')\n order.user = None\n #if request.user.is_authenticated():\n # order.user = request.user\n order.status = Order.SUBMITTED\n\n DBSession.add(order)\n\n\n if order:\n \"\"\" if the order save succeeded \"\"\"\n cart_items = cart.get_cart_items(request).all()\n print \"The items in the cart are: \", len(cart_items)\n\n for ci in cart_items:\n \"\"\" create order item for each cart item \"\"\"\n\n print \"The product is \", ci.product\n oi = OrderItem()\n oi.order_id = order.id\n oi.order = order\n oi.quantity = ci.quantity\n print \"The product id is \", ci.product.id\n oi.product_id = ci.product.id\n oi.product = ci.product\n\n oi.price = ci.price # now using @property\n DBSession.add(oi)\n\n # all set, clear the cart\n cart.empty_cart(request)\n\n ## save profile info for future orders\n #if request.user.is_authenticated():\n # from ecomstore.accounts import profile\n #\n # profile.set(request)\n\n return order",
"def on_order(self, order: OrderData):\n # print(\"on_order\")\n # print(order)\n pass",
"async def create_order(request):\n async with transaction(request.app) as trans:\n order_id = await trans.connection.scalar(\n tables.software_order.insert().values(\n purchaser_id=request['auth']['account']['id']\n )\n )\n await trans.commit()\n return web.json_response({'order_id': order_id})",
"def create_order():",
"def order_success(self, request):\n order = self.order_from_request(request)\n\n if not order:\n return self.order_new(request)\n\n if not order.balance_remaining:\n self.set_order_on_request(request, order=None)\n\n\n order_data = OrderData.objects.get(order=order)\n o_data = simplejson.loads(order_data.data)\n\n paymentData = {}\n paymentData['delivery_address2'] = o_data['delivery_address2']\n paymentData['billing_address2'] = o_data['billing_address2']\n paymentData['delivery_date'] = o_data['delivery_date']\n paymentData['delivery_state'] = o_data['delivery_state']\n paymentData['billing_state'] = o_data['billing_state']\n paymentData['salutation'] = o_data['salutation']\n paymentData['contact_number'] = o_data['billing_contact_number']\n\n #try:\n oPayment = OrderPayment.objects.get(order=order)\n oPayment.payment_method = o_data['order_payment_method']\n oPayment.data = simplejson.dumps(paymentData)\n oPayment.save()\n #except:\n # pass\n\n \"\"\"\n order update note\n \"\"\"\n notes = o_data['order_notes']\n order.notes = notes\n order.save()\n\n # st_save_helper(request, order)\n\n \"\"\"\n sbid = None\n\n if 'customer_styleboard' in request.session:\n sbid = request.session.get('customer_styleboard').id\n\n if 'personalize_id' in request.session:\n print \"There's a personalize_id\"\n \"\"\"\n\n current_user = User.objects.get(id=int(request.user.id))\n\n if 'ipn_emailed' in o_data and o_data['ipn_emailed']:\n\n pass\n \n else:\n\n emailed = send_email_order(order, current_user, notes, paymentData['contact_number'], self)\n\n logr.info('emailed order confirmation to : %s from order success' % current_user.email)\n\n\n order_data.delete() # not needed after saving to order payment\\\n \n clear_styleboard_session(request)\n\n try:\n del request.session['customer_styleboard']\n del request.session['personalize_id']\n except:\n pass\n\n return self.render(request, 'plata/shop_order_success.html',\n self.get_context(request, {\n 'order': order,\n 'progress': 'success',\n }))",
"async def process_orderbook(self, data):\n for item in data:\n symbol = item.get(\"symbol\")\n orderbook = {\n \"platform\": self._platform,\n \"symbol\": symbol,\n \"asks\": item.get(\"asks\"),\n \"bids\": item.get(\"bids\"),\n \"timestamp\": tools.utctime_str_to_mts(item[\"timestamp\"])\n }\n EventOrderbook(**orderbook).publish()\n logger.debug(\"symbol:\", symbol, \"orderbook:\", orderbook, caller=self)",
"def trackOrderRequest(self):\n\t\tstart_dat=datetime.today()\n\t\tstart_date = start_dat - timedelta( hours=start_dat.time().hour,minutes=start_dat.time().minute,seconds=start_dat.time().second ) \n\t\tend_date=start_dat\n\t\tans=None\n\t\t#print start_dat.time().hour\n\t\tprint end_date\n\t\tans=Order.objects.filter(date_of_order__range=(start_date,end_date))\n\t\tlst=[]\n\t\tfor b in ans:\n\t\t\towneradd=b.owner_id.address\n\t\t\tuseradd=b.userid.address\n\t\t\tusername=b.userid.email\n\t\t\townername=b.owner_id.email\n\t\t\tuserphone=b.userid.contact_no\n\t\t\townerphone=b.owner_id.contact_no\n\t\t\tbookname=b.bookid.title\n\t\t\tstatus=b.paymentid.ispending\n\t\t\tbook=b.__dict__\n\t\t\tbook['owneradd']=owneradd\n\t\t\tbook['useradd']=useradd\n\t\t\tbook['username']=username\n\t\t\tbook['ownername']=ownername\n\t\t\tbook['userphone']=userphone\n\t\t\tbook['ownerphone']=ownerphone\n\t\t\tbook['name']=bookname\n\t\t\tif status==True:\n\t\t\t\tbook['status']=\"Pending\"\n\t\t\telse:\n\t\t\t\tbook['status']=\"Delivered\"\n\t\t\tlst.append(book)\n\t\t#print ans\n\t\t\n\t\treturn lst"
] |
[
"0.6368061",
"0.6128186",
"0.6065711",
"0.6017387",
"0.59723914",
"0.58429277",
"0.5839083",
"0.5839083",
"0.58321863",
"0.5796558",
"0.5778104",
"0.57750577",
"0.5752862",
"0.57170993",
"0.56874627",
"0.5681104",
"0.56760603",
"0.5659921",
"0.5651157",
"0.5630337",
"0.56238633",
"0.56174487",
"0.5615142",
"0.5613755",
"0.5613218",
"0.5611159",
"0.5583063",
"0.5554663",
"0.5549845",
"0.5503124"
] |
0.78310764
|
0
|
Converts a char into the appropriate Position, if any exists.
|
def convert_to_position(char: str) -> Position:
if char == 'PG':
return Position.PG
elif char == 'SG':
return Position.SG
elif char == 'SF':
return Position.SF
elif char == 'PF':
return Position.PF
elif char == 'C':
return Position.C
else:
raise RuntimeError(f"Unrecognized position: {char}")
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def _charToIndex(self,ch): \n return self.char_dict[ch]",
"def alphabet_position(char):\n if type(char) != type(''):\n return -1\n if len(char) != 1:\n return -1\n if char.isalpha():\n return lowerLetters.find(char.lower())\n return -1",
"def _get_char(self):\r\n self._i += 1\r\n if self._i < len(self._text):\r\n self._char = self._text[self._i]\r\n else:\r\n self._char = -1",
"def location_to_pos(self,row, col):\r\n\r\n pos_row = str(row + 1)\r\n pos_col = chr(col + 97)\r\n return pos_col + pos_row",
"def to_index(self, char):\n return ord(char) - ord(\"A\") - 32",
"def char(self, aIndex, char):\n o = ord(char)\n c = -1\n # space\n if o == 32:\n c = 16\n # dash\n if o == 45:\n c = 17\n # uppercase A-F\n if 65 <= o <= 70:\n c = o - 55\n # lowercase a-f\n if 97 <= o <= 102:\n c = o - 87\n # 0-9\n if 48 <= o <= 57:\n c = o - 48\n\n self.digit(aIndex, c)",
"def updatePosition(char, position):\n line, col = position\n return (line + 1, 1) if (char == '\\n') else (line, col + 1)",
"def set_position():\n\n global character\n return character['Position']",
"def _get_index(self, character):\n OFFSET = 65 # ascii value of 'A' since the first element should be 'A'\n index = ord(character) - OFFSET\n return index",
"def getCharAtPos(self, row, col):\n return self.maze[row][col]",
"def return_index(character: str) -> int:\n if character.islower():\n return ord(character) - ord(\"a\")\n else:\n return ord(character) - ord(\"A\")",
"def convert_char(char):\n if char == 'F' or char == 'L':\n return 0\n \n return 1",
"def translate_level_char(self, c, x, y):\n\n if c == \".\":\n return None\n\n elif c == \",\":\n return GrassCellLight()\n\n elif c == \"p\":\n return PathCellSandy()\n\n elif c == \"#\":\n return WallCellStone()\n \n elif c == \"l\":\n return LavaCell()\n \n elif c == \"-\":\n return MountainCellFloor()\n\n elif c == \"w\":\n return WaterCellLight()\n\n elif c == \"s\":\n return SnowCell()\n\n elif c == \"<\":\n return GrassCellLong()\n\n elif c == \"P\":\n return PathCellStone()\n\n elif c == \">\":\n return GrassCellFlowers()\n\n elif c == \"*\":\n return SandCell()\n\n elif c == \"v\":\n return VolcanicCellFloor()\n\n elif c == \"_\":\n return StoneCellFloor()\n\n elif c == \"b\":\n return BoulderCell()\n\n elif c == \"@\":\n assert self.player == None\n self.player_spawn = (x, y)\n self.player = PlayerEnt(self, x, y, img_player_sprites_down_standing)\n self.ents.append(self.player)\n return GrassCellLight()\n\n else:\n raise Exception(\"invalid level char: %s\" % repr(c))",
"def _insChar(self, char, pos, color):\n char, vertices, glyph = self._extractGlyph(char, glm.vec4(color))\n if not self.text:\n off, kern = self._updateMetric(pos, char)\n if char in self.NO_GLYPH_CHARS:\n self.colors.insert(pos, [char, None])\n else:\n vertices['vtx'] += off + glyph['offset']\n self.allVertices = np.hstack(vertices)\n self.allIndices = self._baseInd\n self.colors.insert(pos, [char, color])\n self.text += char\n else:\n self.logger.debug(\"Inserting %r at %d\" % (char, pos))\n nonGlyph = countInSet(self.text[:pos], self.NO_GLYPH_CHARS)\n # Arrange vertices\n if pos < len(self.text):\n self.allVertices = self.allVertices[:(pos - nonGlyph) * 4]\n self.allIndices = self.allIndices[:pos - nonGlyph]\n\n # Set the metric\n off, kern = self._updateMetric(pos, char)\n if char in self.NO_GLYPH_CHARS:\n color = None\n else:\n vertices['vtx'] += off + kern + glyph['offset']\n if self.allVertices is None:\n self.allVertices = np.hstack(vertices)\n else:\n self.allVertices = np.append(self.allVertices, vertices)\n if self.allIndices is None:\n self.allIndices = self._baseInd\n else:\n self.allIndices = np.vstack((self.allIndices,\n self._baseInd + (pos - nonGlyph) * 4))\n\n self.colors.insert(pos, [char, color])\n if pos < len(self.text):\n self.text = self.text[:pos] + char + self.text[pos:]\n self._updateGlyphs(pos, char)\n else:\n self.text += char",
"def get_position():\n\n return character['Position']",
"def parse_pos(self, pos):\r\n\r\n column = ord(pos[0]) - 97\r\n if len(pos) == 2:\r\n row = ord(pos[1]) - 49\r\n else:\r\n row = 9\r\n return [row, column]",
"def ordChar(self, char):\n char = char.upper()\n num = ord(char) - 65\n return num",
"def _extractGlyph(self, char):\n charno = ord(char)\n vertices = None\n currentGlyph = None\n\n if charno in self.extracted:\n currentGlyph = self.extracted[charno]\n else:\n if char in ('\\n', ):\n # No glyph for these chars\n pass\n else:\n glyph = self.font.getGlyph(charno, self.glyphs)\n if glyph is None:\n save_char = char\n save_charno = charno\n # Use '.notdef' glyph if it is defined in the font\n repcharno = None\n if self.glyphs != GlyphTypes.CBDT_COLOR:\n glyph = self.font.getGlyph(repcharno, self.glyphs)\n if glyph is None:\n # Use WHITE SQUARE gplyph: \\u25A1\n repcharno = 9633\n glyph = self.font.getGlyph(repcharno, self.glyphs)\n if glyph is None:\n # Still None? Replace character with blank\n repcharno = 32\n glyph = self.font.getGlyph(repcharno, self.glyphs)\n charno = 32\n char = chr(charno)\n if glyph is None:\n self.logger.error(\"Font %s has no space\"\n \" character!\" % self.font.fontFile)\n\n self.logger.warning(\"Char %r (%d) not found in\"\n \" font %s has been replaced with chr(%s)\"\n % (save_char, save_charno, self.font.fontFile,\n repcharno))\n\n currentGlyph = glyph\n self.extracted[charno] = currentGlyph\n\n if currentGlyph is not None and 'vertices' in currentGlyph:\n vertices = currentGlyph['vertices'].copy()\n\n return char, vertices, currentGlyph",
"def parsePosition(self, parse):\n\n if len(parse) == 2:\n ch1 = ord(parse[0].lower())\n ch2 = ord(parse[1].lower())\n\n maxNum = 48 + self.board.size # ascii of max row #\n\n # [Row#][ColLetter]] case\n if 48 < ch1 <= maxNum and 97 <= ch2 < (97 + self.board.size):\n return maxNum - ch1, ch2 - 97 # actual grid indexes of desired position\n\n # [ColLetter][Row#] case\n if 48 < ch2 <= maxNum and 97 <= ch1 < (97 + self.board.size):\n return maxNum - ch2, ch1 - 97 # actual grid indexes of desired position\n return False",
"def toPosition(self, pos):\n return [ord(pos[0])-ord('a'), int(pos[1])]",
"def toPosition(self, pos):\n return [ord(pos[0])-ord('a'), int(pos[1])]",
"def replace_char_candidate(self, char):\n for couple in self.char_couples:\n for i in range(2):\n if couple[i] == char:\n if i == 0:\n return couple[1]\n else:\n return couple[0]",
"def map_char(self, char):\n for key, pattern in self.char_map.items():\n if char in pattern:\n return key\n return 'U'",
"def _read_char(self):\n if self.read_pos >= len(self.data):\n self.char = \"\"\n else:\n self.char = self.data[self.read_pos]\n\n self.pos = self.read_pos\n self.read_pos += 1",
"def char_to_number(char):\n if not char.isalpha():\n return\n elif char.isupper():\n return (ord(char) - ord(\"A\"))\n else:\n return (ord(char) - ord(\"a\"))",
"def add_char(self, coord, char, modify=False):\n if modify:\n range_y, range_x = self._map_dims\n new_coord = [coord[0]+range_y[0]-1, coord[1]+range_x[0]-1]\n self._screen.addch(new_coord[0], new_coord[1], char)\n self._screen.refresh()\n return new_coord\n else:\n self._screen.addch(coord[0], coord[1], char)\n self._screen.refresh()\n return coord",
"def getchar(words,pos):\n\n\tif pos<0 or pos>=len(words): return None\n\n\treturn words[pos]",
"def text_to_position(string, is_black):\r\n # print(\"string: \", string)\r\n y = int(string[1]) - 1\r\n x = ord(string[0]) - ord('a')\r\n if is_black: #under perspective of black player, the position is flipped\r\n x = 7 - x\r\n y = 7 - y\r\n # print(\"x,y: \", (x, y))\r\n return (x, y)",
"def charToInt(char):\r\n return ord(char);",
"def write_char(self, char, token, string_index=None,\n set_cursor_position=False, z_index=False):\n assert len(char) == 1\n\n char_obj = Char(char, token, z_index)\n char_width = char_obj.get_width()\n\n # In case there is no more place left at this line, go first to the\n # following line. (Also in case of double-width characters.)\n if self._x + char_width > self.size.columns:\n self._y += 1\n self._x = 0\n\n insert_pos = self._y, self._x # XXX: make a Point of this?\n\n if string_index is not None:\n self._cursor_mappings[string_index] = insert_pos\n\n if set_cursor_position:\n self.cursor_position = Point(y=self._y, x=self._x)\n\n # Insertion of newline\n if char == '\\n':\n self._y += 1\n self._x = 0\n self._line_number += 1\n\n # Insertion of a 'visible' character.\n else:\n if char_obj.z_index >= self._buffer[self._y][self._x].z_index:\n self._buffer[self._y][self._x] = char_obj\n\n # When we have a double width character, store this byte in the\n # second cell. So that if this character gets deleted afterwarsd,\n # the ``output_screen_diff`` will notice that this byte is also\n # gone and redraw both cells.\n if char_width > 1:\n self._buffer[self._y][self._x+1] = Char(six.unichr(0))\n\n # Move position\n self._x += char_width\n\n return insert_pos"
] |
[
"0.6826631",
"0.68057626",
"0.66368306",
"0.6616373",
"0.649323",
"0.6439652",
"0.64255637",
"0.6351111",
"0.62918675",
"0.62909377",
"0.6249595",
"0.6187012",
"0.6185425",
"0.61581844",
"0.61502945",
"0.6104441",
"0.6099312",
"0.6068768",
"0.6057841",
"0.6031381",
"0.6031381",
"0.6009619",
"0.600301",
"0.600033",
"0.5982579",
"0.59613866",
"0.59285283",
"0.5927563",
"0.59142935",
"0.59100765"
] |
0.7934982
|
0
|
Test opening cache files in a subprocess (with a clean environment).
|
def test_reopen_cache():
env = os.environ.copy()
# Get the path to current directory
path = os.path.dirname(os.path.realpath(__file__))
# Set the COVERAGE_PROCESS_START env. variable.
# Allows to cover files run in a subprocess
# http://nedbatchelder.com/code/coverage/subprocess.html
env["COVERAGE_PROCESS_START"] = path + "/../.coveragerc"
p = subprocess.Popen(
[sys.executable, "unittests/reopen_cache_tester.py"],
stdout=subprocess.PIPE,
env=env)
print(p.stdout.read())
p.wait()
p.stdout.close()
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def test_use_cache_missing_file():\n # Generate cached files\n cmd_list = [NETMIKO_GREP] + ['interface', 'all']\n _, full_dir = find_netmiko_dir()\n remove_file = 'bad_device.txt'\n remove_file_full = \"{}/{}\".format(full_dir, remove_file)\n if os.path.exists(remove_file_full) and os.path.isfile(remove_file_full):\n os.remove(remove_file_full)\n cmd_list = [NETMIKO_GREP] + ['--use-cache', '--display-runtime', 'interface', 'all']\n (output, std_err) = subprocess_handler(cmd_list)\n assert \"Some cache files are missing: unable to use --use-cache option.\" in std_err",
"def test_local_cache():",
"def test_ref_cache_with_tempfile(self):\n # update cache file from db\n self.host_updater.refresh_cache()\n # create temp_cache_file to test it doesnt broke system\n with open(self.host_updater.temp_cache_file, 'a'):\n pass\n self.host_updater.refresh_cache()\n self.assertFalse(os.path.exists(self.host_updater.temp_cache_file))",
"def test_use_cache():\n # Generate cached files\n cmd_list = [NETMIKO_GREP] + ['interface', 'all']\n subprocess_handler(cmd_list)\n cmd_list = [NETMIKO_GREP] + ['--use-cache', '--display-runtime', 'interface', 'all']\n (output, std_err) = subprocess_handler(cmd_list)\n match = re.search(r\"Total time: (0:.*)\", output)\n time = match.group(1)\n _, _, seconds = time.split(\":\")\n seconds = float(seconds)\n assert seconds <= 1\n assert 'pynet_rtr1.txt:interface FastEthernet0' in output",
"def test_clear_cache_silent_fail():\n shutil.rmtree(yvs.cache.LOCAL_CACHE_DIR_PATH)\n yvs.main()\n case.assertFalse(\n os.path.exists(yvs.cache.LOCAL_CACHE_DIR_PATH),\n 'local cache directory exists')",
"def test_clear_cache():\n yvs.main()\n case.assertFalse(\n os.path.exists(yvs.cache.LOCAL_CACHE_DIR_PATH),\n 'local cache directory exists')",
"def test_clear_cache(self):\n\n wf._items = []\n\n sys.argv = ['drive.py', '> cl']\n main(None)\n self.assertEqual(len(wf._items), 1)\n self.assertEqual(wf._items[0].title, SETTINGS['CLEAR_CACHE']['title'])\n self.assertEqual(wf._items[0].arg, SETTINGS['CLEAR_CACHE']['arg'])\n self.assertTrue(wf._items[0].valid)\n wf._items = []\n\n sys.argv = ['drive.py', '> clear C']\n main(None)\n self.assertEqual(len(wf._items), 1)\n self.assertEqual(wf._items[0].title, SETTINGS['CLEAR_CACHE']['title'])\n self.assertEqual(wf._items[0].arg, SETTINGS['CLEAR_CACHE']['arg'])\n self.assertTrue(wf._items[0].valid)\n wf._items = []\n\n sys.argv = ['drive.py', '>cl']\n main(None)\n self.assertEqual(len(wf._items), 1)\n self.assertEqual(wf._items[0].title, SETTINGS['CLEAR_CACHE']['title'])\n self.assertEqual(wf._items[0].arg, SETTINGS['CLEAR_CACHE']['arg'])\n self.assertTrue(wf._items[0].valid)\n wf._items = []\n\n sys.argv = ['drive.py', '> clear cache']\n main(None)\n self.assertEqual(len(wf._items), 1)\n self.assertEqual(wf._items[0].title, SETTINGS['CLEAR_CACHE']['title'])\n self.assertEqual(wf._items[0].arg, SETTINGS['CLEAR_CACHE']['arg'])\n self.assertTrue(wf._items[0].valid)\n wf._items = []",
"def test_cache_create(self):\n self.assertTrue(self.host_updater.refresh_cache())\n self.assertTrue(os.path.exists(self.host_updater.cache_file))",
"def test_no_io_on_url():\n file = get_image_cache_file()\n file.url\n assert not file.storage.exists.called\n assert not file.storage.open.called",
"def test_set_cache(self):\n\n wf._items = []\n\n sys.argv = ['drive.py', '> set']\n main(None)\n self.assertEqual(len(wf._items), 1)\n self.assertEqual(wf._items[0].title, SETTINGS['SET_CACHE']['title'] % '[seconds]')\n self.assertFalse(wf._items[0].arg)\n self.assertFalse(wf._items[0].valid)\n wf._items = []\n\n sys.argv = ['drive.py', '> set C']\n main(None)\n self.assertEqual(len(wf._items), 1)\n self.assertEqual(wf._items[0].title, SETTINGS['SET_CACHE']['title'] % '[seconds]')\n self.assertFalse(wf._items[0].arg)\n self.assertFalse(wf._items[0].valid)\n wf._items = []\n\n sys.argv = ['drive.py', '>se']\n main(None)\n self.assertEqual(len(wf._items), 1)\n self.assertEqual(wf._items[0].title, SETTINGS['SET_CACHE']['title'] % '[seconds]')\n self.assertFalse(wf._items[0].arg)\n self.assertFalse(wf._items[0].valid)\n wf._items = []\n\n sys.argv = ['drive.py', '> set cache']\n main(None)\n self.assertEqual(len(wf._items), 1)\n self.assertEqual(wf._items[0].title, SETTINGS['SET_CACHE']['title'] % '[seconds]')\n self.assertFalse(wf._items[0].arg)\n self.assertFalse(wf._items[0].valid)\n wf._items = []\n\n sys.argv = ['drive.py', '> Set cache length 1']\n main(None)\n self.assertEqual(len(wf._items), 1)\n self.assertEqual(wf._items[0].title, SETTINGS['SET_CACHE']['title'] % '1 second')\n self.assertEqual(wf._items[0].arg, SETTINGS['SET_CACHE']['arg'] % str(1))\n self.assertTrue(wf._items[0].valid)\n wf._items = []\n\n sys.argv = ['drive.py', '> Set cache length 12']\n main(None)\n self.assertEqual(len(wf._items), 1)\n self.assertEqual(wf._items[0].title, SETTINGS['SET_CACHE']['title'] % '12 seconds')\n self.assertEqual(wf._items[0].arg, SETTINGS['SET_CACHE']['arg'] % str(12))\n self.assertTrue(wf._items[0].valid)\n wf._items = []",
"def test_apt_cache_is_clean(self):\n self.assertSetEqual(\n set(self.host.file(\"/var/cache/apt/archives\").listdir()),\n {\"lock\", \"partial\"},\n )",
"def test_no_io_on_bool():\n file = get_image_cache_file()\n bool(file)\n assert not file.storage.exists.called\n assert not file.storage.open.called",
"def test_clean_old_jobs_no_jid_root():\n with patch(\"os.path.exists\", MagicMock(return_value=False)):\n assert local_cache.clean_old_jobs() is None",
"def test_13_find_exec(self, mock_mktmp, mock_getdata,\n mock_remove, mock_call):\n mock_mktmp.return_value.mktmp.return_value = \"/tmp/tmpfile\"\n with mock.patch(BUILTINS + '.open', mock.mock_open()):\n # executable found\n mock_getdata.return_value = \"/bin/executable\"\n filename = udocker.FileUtil(\"executable\").find_exec()\n self.assertEqual(filename, \"/bin/executable\")\n # executable not found\n mock_getdata.return_value = \"not found\"\n filename = udocker.FileUtil(\"executable\").find_exec()\n self.assertEqual(filename, \"\")\n # executable not found\n mock_getdata.return_value = \"xxxxx\"\n filename = udocker.FileUtil(\"executable\").find_exec()\n self.assertEqual(filename, \"\")",
"def test_retrieve_files_with_pre_hook(self):\n os.makedirs('/tmp/remote_pacha/localhost/etc')\n os.mkdir('/tmp/remote_pacha/localhost/home')\n remote_file = open('/tmp/remote_pacha/localhost/etc/etc.conf', 'w')\n remote_file.write(\"remote second file\")\n remote_file.close()\n remote_file = open('/tmp/remote_pacha/localhost/home/home.conf', 'w')\n remote_file.write(\"remote file\")\n remote_file.close()\n os.makedirs('/tmp/remote_pacha/localhost/pacha_pre')\n touch_script = open('/tmp/remote_pacha/localhost/pacha_pre/foo.sh', 'w')\n touch_script.write('''touch /tmp/remote_pacha/localhost/pre_got_executed.txt''')\n touch_script.close()\n server = \"%s@%s\" % (self.username, host.hostname()) \n run = rebuild.Rebuild(server=server,\n hostname='localhost', \n source='/tmp/remote_pacha')\n run.retrieve_files()\n result_1 = os.path.isfile('/tmp/localhost/etc/etc.conf')\n result_2 = os.path.isfile('/tmp/localhost/home/home.conf')\n line = open('/tmp/localhost/etc/etc.conf')\n remote_line = line.readline()\n self.assertEqual(remote_line, \"remote second file\")\n self.assertTrue(result_2)\n self.assertTrue(result_1)\n self.assertTrue(os.path.isfile('/tmp/remote_pacha/localhost/pre_got_executed.txt'))",
"def find_cache(hw, r):\n\n cmd = \"find . -name CMakeCache.txt\"\n p = Popen(cmd, shell=True, stdout=PIPE)\n out = p.stdout.readlines()\n p.stdout.close()\n p.wait()\n \n # Transform the output into something readable.\n for i in out:\n found = i.split(':')\n \n # Scrub the path name\n path = os.path.dirname(found[0])[2:]\n if path == \"__build__\":\n continue\n r.note(\"found build files in '{0}'\".format(path))",
"def use_cached_files(self, cache_key):\r\n pass",
"def __read_cache_file_if_exists(self) -> None:\n if os.path.exists(self.__cache_file):\n self.__config.open_file(self.__cache_file, \"r\", self.__process_cache)",
"def test_file_list_cache():\n from nose.tools import raises\n\n tmp = FileListCache()\n\n @raises(TypeError)\n def test_tmp():\n \"\"\" nost test \"\"\"\n tmp.cache_file_list_dict = 0\n\n test_tmp()",
"def _run_command(self, cmd_args, return_code=None):\n testargs = ['cache_manage']\n testargs.extend(cmd_args)\n with mock.patch.object(sys, 'exit') as mock_exit:\n with mock.patch.object(sys, 'argv', testargs):\n try:\n cache_manage.main()\n except Exception:\n # See if we expected this failure\n if return_code is None:\n raise\n\n if return_code is not None:\n mock_exit.called_with(return_code)",
"def test_call_and_shelve():\r\n\r\n for func, Result in zip((MemorizedFunc(f, env['dir']),\r\n NotMemorizedFunc(f),\r\n Memory(cachedir=env['dir']).cache(f),\r\n Memory(cachedir=None).cache(f),\r\n ),\r\n (MemorizedResult, NotMemorizedResult,\r\n MemorizedResult, NotMemorizedResult)):\r\n nose.tools.assert_equal(func(2), 5)\r\n result = func.call_and_shelve(2)\r\n nose.tools.assert_true(isinstance(result, Result))\r\n nose.tools.assert_equal(result.get(), 5)\r\n\r\n result.clear()\r\n nose.tools.assert_raises(KeyError, result.get)\r\n result.clear() # Do nothing if there is no cache.\r",
"def test_refresh_error_create_cache(self):\n mock_method_path = ('dbtobindzone.updaters.host_updater'\n '.HostUpdater.cache_file')\n patch = mock.patch(mock_method_path, new_callable=mock.PropertyMock)\n with patch as mock_method:\n mock_method.return_value = '/TMP/DIR/NOT/EXISTS'\n result = self.host_updater.refresh_cache()\n self.assertFalse(result)",
"def test_memleaks():\n build()\n sh(\"%s psutil\\\\tests\\\\test_memleaks.py\" % PYTHON)",
"def test_cache_pollution(self):\n with self._test_checksum_setup(self.tempdir.name) as setupdata:\n filename, data, expected_checksum = setupdata\n\n # corrupt the file\n with open(os.path.join(self.tempdir.name, filename), \"r+b\") as fh:\n fh.seek(0)\n real_first_byte = fh.read(1).decode(\"latin-1\")\n fh.seek(0)\n fh.write(chr(ord(real_first_byte) ^ 0xff).encode(\"latin-1\"))\n\n with self.assertRaises(ChecksumValidationError):\n with self.caching_backend.read_contextmanager(filename, expected_checksum) as cm:\n self.assertEqual(cm.read(), data)\n\n # un-corrupt the file\n with open(os.path.join(self.tempdir.name, filename), \"r+b\") as fh:\n fh.seek(0)\n real_first_byte = fh.read(1).decode(\"latin-1\")\n fh.seek(0)\n fh.write(chr(ord(real_first_byte) ^ 0xff).encode(\"latin-1\"))\n\n with self.caching_backend.read_contextmanager(filename, expected_checksum) as cm:\n self.assertEqual(cm.read(), data)",
"def test__cache(self):\n # Access to a protected member _cache of a client class\n # pylint: disable=W0212\n treadmill.zkutils.get.return_value = {}\n\n zkclient = kazoo.client.KazooClient()\n self.evmgr._cache(zkclient, 'foo#001')\n\n appcache = os.path.join(self.cache, 'foo#001')\n self.assertTrue(os.path.exists(appcache))",
"def test_file_open_bug():\n \n value = Value('test', context, 'reentrant_test', clsmap['file'], data_dir='./cache')\n \n try:\n os.remove(value.namespacemanager.file)\n except OSError:\n pass\n \n value.set_value(\"x\")\n\n f = open(value.namespacemanager.file, 'w')\n f.write(\"BLAH BLAH BLAH\")\n f.close()\n \n # TODO: do we have an assertRaises() in nose to use here ?\n try:\n value.set_value(\"y\")\n assert False\n except:\n pass\n \n _synchronizers.clear()\n context.clear()\n value = Value('test', context, 'reentrant_test', clsmap['file'], data_dir='./cache')\n\n # TODO: do we have an assertRaises() in nose to use here ?\n try:\n value.set_value(\"z\")\n assert False\n except:\n pass",
"def test_lock_missing_cache_entries_gets_all_hashes(PipenvInstance, tmpdir):\n\n with temp_environ():\n os.environ[\"PIPENV_CACHE_DIR\"] = str(tmpdir.strpath)\n with PipenvInstance(chdir=True) as p:\n p._pipfile.add(\"pathlib2\", \"*\")\n assert \"pathlib2\" in p.pipfile[\"packages\"]\n c = p.pipenv(\"install\")\n assert c.return_code == 0, (c.err, (\"\\n\".join([\"{0}: {1}\\n\".format(k, v) for k, v in os.environ.items()])))\n c = p.pipenv(\"lock --clear\")\n assert c.return_code == 0, c.err\n assert \"pathlib2\" in p.lockfile[\"default\"]\n assert \"scandir\" in p.lockfile[\"default\"]\n assert isinstance(p.lockfile[\"default\"][\"scandir\"][\"hashes\"], list)\n assert len(p.lockfile[\"default\"][\"scandir\"][\"hashes\"]) > 1",
"def test_normal_execution(self, mock_path, mock_open, mock_warn):\n # Set the mocked functions returned values\n mock_path.isfile.side_effect = [True]\n mock_context_manager = mock.Mock()\n mock_open.return_value = mock_context_manager\n mock_file = mock.Mock()\n mock_file.read.return_value = \"PTK_RELEASE=4.0\\n\"\n mock_enter = mock.Mock()\n mock_enter.return_value = mock_file\n mock_exit = mock.Mock()\n setattr(mock_context_manager, \"__enter__\", mock_enter)\n setattr(mock_context_manager, \"__exit__\", mock_exit)\n\n # Test execution\n ConnectomistWrapper._connectomist_version_check(\"/my/path/mock_conf\")\n self.assertEqual(len(mock_warn.call_args_list), 1)",
"def test_prep_file(self, mock_open):\n path = \"/tmp/foo\"\n request = DownloadRequest(None, None, None, path)\n download_threads._MultithreadedDownloader._prep_file(request)\n mock_open.assert_called_once_with(path, \"wb\")\n\n mock_open.return_value.close.assert_called_once_with()",
"def testReplayState2(t, env):\n c = env.c1\n c.init_connection()\n c.maketree([t.code])\n ops = c.use_obj(c.homedir)\n ops += [c.open(t.code), c.getfh_op()]\n _replay(c, ops, NFS4ERR_ISDIR)"
] |
[
"0.69525003",
"0.6447687",
"0.636721",
"0.6286523",
"0.62074196",
"0.61756605",
"0.6045512",
"0.600041",
"0.58595663",
"0.5831782",
"0.5773257",
"0.5741478",
"0.5648182",
"0.5604841",
"0.55939627",
"0.5588715",
"0.55886006",
"0.5573728",
"0.5554118",
"0.55458426",
"0.5485352",
"0.54678303",
"0.5466327",
"0.5464726",
"0.5438413",
"0.54111964",
"0.54040676",
"0.537691",
"0.53744656",
"0.53696024"
] |
0.7429855
|
0
|
run encounter from start; introduce NPCs, present interaction choices, and start social/combat encounter based on choices
|
def begin_encounter(self):
#introduce NPCs - run all introduce methods, unless the NPCs have the same name
for i in range(len(self.npc_names)):
for _npc in self.npc_list:
if _npc.name == self.npc_names[i]:
_npc.introduce(self.npc_quantities[i], self.location)
break
#list visible enemies
self.display_npcs()
#check close proximity - if hostile enemy within 10 ft, don't go to interact menu
hostile_close_proximity = False
for m in range(len(self.npc_distances)):
if self.npc_distances[m] < 10:
if self.npc_list[m].hostility == utils.HostilityLevel.HOSTILE:
hostile_close_proximity = True
multiple = self.npc_quantities[self.npc_names.index(self.npc_list[m].name)] > 1
self.npc_list[m].alert_close_proximity(multiple)
break
interaction_result = NextState
if hostile_close_proximity:
#start combat
interaction_result = NextState.COMBAT
else:
#run interaction choice menu - interactions may return flags that spawn social/combat encounters
print("Select NPC to interact with:")
for l in range(len(self.npc_list)):
print(str(l + 1) + ". " + self.npc_list[l].name + " (Distance: " + str(self.npc_distances[l]) + "ft.)")
choice = 0
while choice < 1 or choice > len(self.npc_names) + 1:
try:
choice = int(input("Make selection: "))
except:
print("Enter an integer between 1 and " + str(len(self.npc_names)))
interaction_result = self.npc_list[choice - 1].interact(self, choice - 1, self.main_player)
#spawn social/combat encounter
#if combat, pass npc list to generate turn order
if interaction_result.name == "COMBAT":
#spawn combat encounter
print("Starting combat")
new_combat = combat.CombatEncounter(self.main_player, self.npc_list, self.npc_distances, self.npc_quantities)
elif interaction_result.name == "SOCIAL":
#spawn social encounter
print("Starting social encounter")
elif interaction_result.name == "FINISHED":
#present next choices, award loot from area
#allow player to interact with any remaining/new NPCs
print("Encounter finished")
elif interaction_result.name == "DEATH":
#kill the player and end the game
print("Player dead")
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def parley(self):\n wait_times = constants.TUTORIAL_WAIT_TIMES\n self.introduce_chat_interface()\n self.wait_for_response(\n message='Please type a greeting message to continue.',\n delay_time=wait_times['chat-interface'],\n )\n self.introduce_knowledgeable_entity()\n self.wait_for_response(\n message=constants.ONBOARDING_ACKNOWLEDGE_UNDERSTOOD,\n delay_time=wait_times['chat-interface'],\n )\n self.introduce_search()\n self.wait_for_response(\n message=constants.ONBOARDING_ACKNOWLEDGE_UNDERSTOOD,\n delay_time=wait_times['knowledge'],\n )\n self.try_search()\n self.wait_for_response_with_search()\n self.introduce_persona()\n self.wait_for_response_with_search()\n self.go_for_start()\n self.episodeDone = True",
"def parley(self):\n wait_times = constants.TUTORIAL_WAIT_TIMES\n self.introduce_chat_interface()\n self.wait_for_response(\n message='Please type a greeting message to continue.',\n delay_time=wait_times['chat-interface'],\n )\n self.introduce_persona()\n self.wait_for_response(\n message=constants.APPRENTICE_PERSONA_ROLE_INSTRUCTION,\n delay_time=wait_times['persona'],\n )\n self.introduce_partner_entity()\n self.wait_for_response(\n message=constants.APPRENTICE_CHITCHAT_INSTRUCTION,\n delay_time=wait_times['persona'],\n )\n self.introduce_partner_knowledge()\n self.wait_for_response(\n message=constants.APPRENTICE_PERSONA_MSG_INSTRUCTION,\n delay_time=wait_times['knowledge'],\n )\n self.go_for_start()\n self.episodeDone = True",
"def start(self):\n self.__init__()\n self.set_n_players()\n self.init_players()\n self.init_territory_selection_phase()\n self.init_troop_deployment_phase()\n # self.game_phase()",
"def execute(self):\n # set observations for all agents\n observation = self.environment.get_observation_for_agent()\n for ag in self.environment.agents:\n ag.observation = observation\n # main loop\n while not self.environment.end_episode:\n # each agent choose its action\n self.environment.choose_action()\n # next state\n self.environment.calculate_next_state()\n # is the end of the episode\n self.environment.calculate_end_episode()\n # set observations for all agents\n observation = self.environment.get_observation_for_agent()\n for ag in self.environment.agents:\n ag.observation = observation",
"def step(self): \n self.reset_parameters()\n\n if np.random.uniform(0, 1) < self.model.churn_prob: self.exit_triggered = True \n if self.exit_triggered:\n self.exit()\n else:\n self.register_deposit(self.deposit_intent)\n self.register_contribution(self.contribution_intent)\n self.register_sponsorship(self.sponsor_intent)\n self.register_euro_exchange(self.euro_exchange_intent)\n self.register_teo_exchange(self.teo_exchange_intent)\n self.register_withdraw(self.withdraw_intent)",
"def start():\r\n introduction()\r\n score = duck_shooting1()\r\n dogs()\r\n play_again(score)",
"def start_combat(self):\n super(HonourAutoCombatHandler, self).start_combat()\n\n # All characters auto cast skills.\n for char in self.characters.values():\n character = char[\"char\"]\n character.start_auto_combat_skill()",
"def play_game(self):\n # need everyone to pass to move to next phase?\n self.deal_cards()\n self.plant_food()",
"def exec(self):\n for player_id in range(0, 6):\n ai_command = self.info_manager.get_player_tactic(player_id).exec()\n self.info_manager.set_player_next_action(player_id, ai_command)",
"def do_initiate(bot, msg, **kwargs):\n channel = kwargs.get('event').get('channel')\n instructions = textwrap.dedent(\n '''\n :cop:I am *{name}*, your election police.\n\n \n :grey_question:*How to Vote:*\n Voting in here is simple. Each candidate's profile is listed with a white-on-green checkmark beneath their profile. All you have to do is *click the checkmark once* for your preferred candidate.\n\n\n :warning:*Rules*:\n 1. *Only your first vote counts*. Regardless of the count on checkmark, only your first vote is valid and recorded. Subsequent votes or attemps to remove already cast ballots would be ignored.\n\n 2. *Do not try to post any messages in this channel* as such messages would be deleted immediately.\n\n Now...\n > _Be Nice, Be Respectful, Be Civil_ :simple_smile:\n\n\n I will now list the candidates. Happy Voting :simple_smile:\n > One more thing: _You can vote for yourself._\n\n '''.format(name=bot.username)\n )\n\n # Clear channel\n bot.clear_channel(channel)\n \n print 'Begin Inviting...'\n if 'DEBUG' in dir(bot.config) or 'TESTING' in dir(bot.config):\n print 'test invites'\n # for userid in bot.masters.values():\n # bot.invite_user_to_channel(channel, userid)\n else:\n for member in bot.team_members:\n bot.invite_user_to_channel(channel, member.get('id'))\n print 'End Inviting...'\n\n # Set channel topic\n bot.set_channel_topic(bot.stats.get(channel).get('topic'), channel)\n # Show instructions\n instruction_response = bot.post_msg(text=instructions, channel_name_or_id=channel)\n # Set channel purpose\n bot.set_channel_purpose(bot.stats.get(channel).get('purpose'), channel)\n # Pin message to channel\n bot.pin_msg_to_channel(channel, instruction_response.get('ts'))\n\n help_response = do_help(bot, **kwargs)\n bot.pin_msg_to_channel(channel, help_response.get('ts'))\n\n # Add candidates for this office\n for userid, data in bot.stats.get(channel).get('candidates').iteritems():\n bot.add_candidate(userid, channel)\n bot.vote_for(userid, channel)\n #bot.update_live_stats(channel)\n\n live_stats = bot.get_stats(channel)\n if live_stats is not None:\n response = bot.post_msg(\n text=live_stats,\n channel_name_or_id=channel\n )\n bot.stats.get(channel)['live_ts'] = response.get('ts')\n bot.db.session.query(bot.db.Office).filter_by(channel=channel).first().live_ts=response.get('ts')\n\n response = bot.post_msg(\n text='*NO ONGOING ELECTIONS IN THIS CHANNEL*',\n channel_name_or_id=channel\n )\n bot.stats.get(channel)['election_status_ts'] = response.get('ts')\n bot.db.session.query(bot.db.Office).filter_by(channel=channel).first().election_status_ts=response.get('ts')\n bot.stats.get(channel)['election_status'] = False\n bot.db.session.query(bot.db.Office).filter_by(channel=channel).first().election_status= False\n bot.db.session.commit()\n\n bot.log_msg('Channel{} prepared for voting.'.format(channel), channel)\n \n return True\n #return Response(bot.about)",
"def step(self):\n if not self.is_done():\n actions = []\n for agent in self.agents:\n if agent.alive:\n actions.append(agent.program(self.percept(agent)))\n else:\n actions.append(\"\")\n for (agent, action) in zip(self.agents, actions):\n self.execute_action(agent, action)\n self.exogenous_change()",
"def step(self):\n if not self.is_done():\n actions = []\n for agent in self.agents:\n if agent.alive:\n actions.append(agent.program(self.percept(agent)))\n else:\n actions.append(\"\")\n for (agent, action) in zip(self.agents, actions):\n self.execute_action(agent, action)\n self.exogenous_change()",
"def main():\n global repeat\n regime = collect()\n start = int(raw_input(\"Which line of the exercise script would you like to begin with? \")) - 1\n regime = regime[start:]\n say(\"Ready?\")\n time.sleep(1)\n for exercise in regime:\n coach(exercise[:-1])\n while repeat:\n repeat = False\n coach(exercise[:-1])\n say(\"Session complete.\")",
"def start_game(self):\n while self.can_deal:\n self.take_turn()",
"def start_interaction():\r\n\r\n # Loop infinitely\r\n while True:\r\n # Prints 'Say something: ' and then waits for user input\r\n # Note: line gets a string value\r\n line = input('Say something: ')\r\n\r\n # Right now, not very interesting...?\r\n if line == EASTER_EGG:\r\n print(EASTER_EGG_RESPONSE)\r\n else:\r\n print(repeat(line))",
"def step(self):\n if not self.is_done():\n actions = [ agent.program(self.percept(agent)) for agent in self.agents ]\n for agent, action in zip(self.agents, actions):\n self.execute_action(agent, action)\n\n self.exogenous_change()",
"def __advance(self):\n # If the game is being prepared.\n if self.__current_phase == self.PHASE_PREPARE:\n # If both players are ready.\n if self.__get_current_player().pre_game_prepare() and self.__get_other_player().pre_game_prepare():\n # Start the turn.\n self.__current_phase = self.PHASE_START_TURN\n\n # Begin the game for each player.\n self.__get_current_player().start_game()\n self.__get_other_player().start_game()\n\n # If the game is being set up.\n elif self.__current_phase == self.PHASE_START_TURN:\n # Advance onto the request fire phase.\n self.__current_phase = self.PHASE_REQUEST_SHOT\n\n # Call the start turn method for both players.\n self.__get_current_player().start_turn()\n self.__get_other_player().start_turn()\n\n # If the game requires the user to shoot.\n elif self.__current_phase == self.PHASE_REQUEST_SHOT:\n # Advance onto the fire phase.\n self.__current_phase = self.PHASE_FIRE\n\n # Call the shoot method of the user.\n self.__get_current_player().request_shot()\n\n # If the game requires the other user to be hit.\n elif self.__current_phase == self.PHASE_REQUEST_HIT:\n # Advance onto the hit phase.\n self.__current_phase = self.PHASE_HIT\n\n # Call the other player's request hit method.\n self.__get_other_player().request_hit(self.__current_fire_location)\n\n # If the game shows the hit result.\n elif self.__current_phase == self.PHASE_SHOW_HIT:\n # Advance onto the await phase.\n self.__current_phase = self.PHASE_AWAIT_OPPONENT_SHOT\n\n # Call the player's show hit method.\n self.__get_current_player().show_hit(self.__current_fire_location, self.__current_fire_effect)\n\n # If the game awaits the next shot.\n elif self.__current_phase == self.PHASE_AWAIT_OPPONENT_SHOT:\n # If the opponent has lost.\n if self.__current_fire_effect == Player.SHOT_HIT_TYPE_GAME_OVER:\n # Store the winner's index.\n engine.Engine.game_manager.winner = self.current_player_index\n # Move to the game over phase.\n engine.Engine.load_level(\"GameOver\")\n else:\n # Call the player's await hit method.\n self.__get_current_player().await_opponent_shot()\n\n # If the turn is over.\n if self.current_player_index == 1:\n # Advance to the next turn.\n self.__current_phase = self.PHASE_END_TURN\n else:\n # Advance onto the next fire phase.\n self.__current_phase = self.PHASE_REQUEST_SHOT\n # Increment the user counter.\n self.current_player_index = 1\n\n elif self.__current_phase == self.PHASE_END_TURN:\n # Start a new turn.\n self.__current_phase = self.PHASE_START_TURN\n # Decrement the user counter.\n self.current_player_index = 0\n\n # Call the end turn methods.\n self.__get_current_player().end_turn()\n self.__get_other_player().end_turn()",
"def step(self):\n\t\tnumpy.random.shuffle(self.agents_list)\n\t\tfor agent in self.agents_list:\n\t\t\tagent.produce()\n\t\tfor agent in self.agents_list:\n\t\t\tagent.charge()\n\t\tfor agent in self.agents_list:\n\t\t\tif agent.strategy == 0: \n\t\t\t\tagent.retribute()\n\t\tfor agent in self.agents_list:\n\t\t\tif agent.strategy == 0: \n\t\t\t\tagent.give()\n\t\tfor agent in self.agents_list:\n\t\t\tagent.consume()\n\t\tfor agent in self.agents_list:\n\t\t\tagent.solve_consumption_deficit()\n\t\tfor site in self.sites:\n\t\t\tsite.recovery()\n\t\tfor agent in self.agents_list:\n\t\t\tagent.sprout()",
"def play_game():\n\n # Welcome character\n game_start = game_menu()\n while game_start != 'newgame' and game_start != 'loadgame':\n if game_start == 'walkthrough':\n game_walkthrough()\n print \" Please enter newgame, loadgame, or walkthrough\"\n game_start = game_menu()\n\n if game_start == 'newgame':\n # Initialize character and map\n character = Character.Character()\n game_intro()\n game_map = create_map(None)\n\n # Setup the starting room\n current_room = game_map['Dungeon Entrance']\n\n # Setup the character\n character_name = raw_input('Enter character name > ')\n character.set_name(character_name)\n character.set_current_room(current_room)\n character.set_game_map(game_map)\n\n elif game_start == 'loadgame':\n confirm_load = raw_input('Are you sure? (y or n) > ')\n if confirm_load == 'y':\n # Initialize character and map\n character = Character.Character()\n saved_game_data = load_game()\n if saved_game_data != False:\n game_map = create_map(saved_game_data['json_game_map'])\n\n # Set current room and character name based on saved JSON\n current_room = game_map[saved_game_data['current_room']]\n character_name = saved_game_data['character_name']\n character.set_name(character_name)\n character.set_current_room(current_room)\n character.set_game_map(game_map)\n\n # Add correct items to inventory\n for item in saved_game_data['json_inventory']:\n new_item = Item.Item()\n new_item.set_name(saved_game_data['json_inventory'][item]['Name'])\n new_item.set_description(saved_game_data['json_inventory'][item]['Description'])\n character.add_to_inventory(item, new_item)\n else:\n print \"\\nERROR: saved_game.json does not exist. You must start a new game.\"\n play_game()\n else:\n play_game()\n\n #handle commands\n new_command = user_input()\n while new_command != 'quit':\n handle_commands(new_command, character, game_map)\n new_command = user_input()",
"def step(self):\n\n \"\"\" First updates the variables values of the current time form the environment \"\"\"\n self.update_crispval(self.env.context)\n\n \"\"\"\n here the decision making of the agent\n to determine which activity to suggest to the patient\n i apply the creative controller to the current context\n \"\"\"\n curr_input = sample_inputs(False, 0, self.curr_interaction, self.variables_default_val, self.action_var,\n self.fuzzysets_values, self.variables_universe)\n c_out, rules_activations, is_cc_exception = self.creative_controller.computeOutput(curr_input, False)\n\n \"\"\" i obtain a number of ouput crisp values.\n i determine which one achieves the max expected output w.r.t. the a-rules \"\"\"\n best_a = None\n best_a_val = -1000\n best_a_exphapp = 5\n if self.verbose > Constants.VERBOSE_BASIC:\n print(\"rules activations\")\n for a in rules_activations:\n if rules_activations[a] > 0:\n print(str(a) + \"\\n\\t\\t\\t-> \" + str(rules_activations[a]))\n for item in c_out.items(): # for each pair <activity, crisp output>\n if self.verbose > Constants.VERBOSE_BASIC:\n print(item)\n if not item[\n 0] in self.curr_iter_suggestions: # if i didn't suggest the same activity already in the same interaction\n inputs = dict(curr_input) # I create a copy fo the dict\n inputs[item[0]] = item[1]\n assessor_id = self.actions_to_ti[item[0]]\n self.assessors[assessor_id].feed_inputs(inputs)\n is_ac_exception = False\n assout = []\n try:\n a_out, a_rules_activations, is_ac_exception = self.assessors[assessor_id].compute(verbose=False)\n assout = [a_out[ao] for ao in a_out]\n except:\n is_ac_exception = True\n traceback.print_exc()\n # todo the following assumes that every assessor controller has same eval var\n for v in self.eval_var:\n assout.append(self.variables_default_val[v])\n if len(assout) == 0:\n for v in self.eval_var:\n assout.append(self.variables_default_val[v])\n w_ta = self.weights_therapeutic_interventions[self.actions_to_ti[item[0]]]\n\n avg_credit_rules_that_suggested_action = 1.0\n nr_rules_that_suggested_action = 0\n for r in rules_activations:\n if (rules_activations[r] > 0) and (str(item[0]) in str(r)):\n avg_credit_rules_that_suggested_action = avg_credit_rules_that_suggested_action + \\\n self.rules_credits[str(r)]\n nr_rules_that_suggested_action = nr_rules_that_suggested_action + 1\n if nr_rules_that_suggested_action > 0:\n avg_credit_rules_that_suggested_action = (\n avg_credit_rules_that_suggested_action - 1.0) / nr_rules_that_suggested_action\n repetition_cost = 1.0\n a_val = (mean(assout) * w_ta * avg_credit_rules_that_suggested_action) / repetition_cost\n if (a_val > best_a_val) and (\n item[1] >= (self.variables_default_val[item[0]] + self.range_step[item[0]])):\n best_a = item\n best_a_val = a_val\n best_a_exphapp = mean(assout)\n\n \"\"\"I suggest the activity with best expected outcome and store the information to populate the interactions \n memory \"\"\"\n self.proposeActivity(best_a)\n if not best_a is None:\n if (self.verbose > Constants.VERBOSE_FALSE) and (self.verbose <= Constants.VERBOSE_BASIC):\n print(\"proposing activity\" + str(best_a) + \" which has expected feedback: \" + str(\n best_a_exphapp) + \", which weighted is \" + str(best_a_val))\n self.curr_iter_suggestions.append(best_a[0])\n self.last_suggestion = best_a\n else:\n if (self.verbose > Constants.VERBOSE_FALSE) and (self.verbose <= Constants.VERBOSE_BASIC):\n print(\"the activity proposed is \" + str(\n best_a) + \" so I don't suggest anything. I will ask a question instead\")\n self.last_suggestion = []\n self.expected_feedback = best_a_exphapp\n self.last_context = self.env.context.copy()\n self.last_rules_activations = rules_activations",
"def start():\n commands = {\"new tournament\": Controller.new_tournament,\n \"new round\": Controller.new_round,\n \"new player\": Controller.new_player,\n\n \"set round\": Controller.set_round,\n \"set player\": Controller.set_player,\n\n \"get players -all -alpha\": Controller.get_all_players_alpha,\n \"get players -all -rank\": Controller.get_all_players_rank,\n \"get players -alpha\": Controller.get_players_alpha,\n \"get players -rank\": Controller.get_players_rank,\n\n \"get tournament -all\": Controller.get_all_tournaments,\n \"get tournament\": Controller.get_tournament,\n\n \"get round -all\": Controller.get_all_rounds,\n \"get round\": Controller.get_round,\n\n \"get match -all\": Controller.get_all_matches,\n \"get match\": Controller.get_match,\n\n \"load\": Controller.load,\n\n \"exit\": Controller.close_app\n }\n\n # At the beginning of the program, load all data from a data_base.\n Controller.load()\n print(\"Need help? Type 'commands' to see all commands and there purposes.\")\n\n while True:\n instruction = str(input(\"ChessManager >>> \"))\n try:\n commands[instruction]()\n except KeyError:\n print(\"Wrong Command.\")",
"def StartCombat(self):\n\n print(\"------------------------------------\")\n self.fighter1.PrintInfo()\n self.fighter2.PrintInfo()\n print(\"------------------------------------\")\n self.combatInProgress = True\n while self.combatInProgress and self.currentTurn < self.maxTurns:\n print(\"Turn {}:\".format(self.currentTurn))\n self.CombatAttack()\n if self.EndTurn():\n print(\"------------------------------------\")\n break\n print(\"------------------------------------\")\n time.sleep(3)",
"def run_game(self) -> None:\n decision = 0\n if self._initial:\n self._initial = False\n while decision != 1:\n try:\n display_no_combat_init(self.hero)\n decision = get_user_input([1, 2, -1])\n if decision == -1:\n self._quit()\n elif decision == 2:\n self._show_bag()\n else:\n break\n except KeyboardInterrupt:\n print(\"[!] If you want to quit, use the provided user interface\")\n\n while not self.hero.is_dead:\n try:\n self._load_map()\n except KeyboardInterrupt:\n print(\"[!] If you want to quit, use the provided user interface\")",
"def run_episode(self):\n self.pygame_clock = pygame.time.Clock()\n while True:\n pygame.event.pump()\n is_human_agent = isinstance(self.agents[self.env.turn], HumanAgent)\n\n # handle exit event\n self.handle_input_event()\n\n # pick the next action\n if is_human_agent:\n self.handle_input_event()\n else:\n self.ai_event()\n self.place_a_disk()\n self.render()\n\n if self.event == Event.END_GAME:\n pygame.time.wait(self.END_GAME_DELAY)\n\n if self.event == Event.END_GAME_VIEW:\n pygame.time.wait(self.END_GAME_VIEW_DELAY)\n break",
"def start_episode(self):\n self.last_sensation = self.env()\n self.next_action = self.agent(self.last_sensation)",
"async def start(self):\n await self.on_start()\n valid = await self.get_participants()\n\n if valid:\n await asyncio.sleep(1)\n await self.prepare()\n await self.game()\n\n del started[started.index(self.channel.id)]",
"def start_interaction(self):\n self.__interact()",
"def main():\n player = Player(LivingRoom())\n escaping = True\n\n print('Alright kid, it\\'s you and me on a grand adventure. We\\'re '\n 'currently in the {}, and I can see {} possible exits. You can '\n 'search the room or try exploring, if you like.'\n .format(player.location.name, player.location.exits))\n\n while escaping:\n # need to replace hard list with extract from player.actions\n action = input('\\nWhat now?\\n\\n1. Search\\t2. Grab\\t3. Gurgle\\n>')\n\n if action in player.actions.keys():\n player.actions[action]()",
"def startEpisode(self):\n self.lastState = None\n self.lastAction = None\n self.episodeRewards = 0.0\n\n print(\"Agent Start Episode #\" + str(self.episodesSoFar+1))",
"def start_game(self):\n\n\t\tpass"
] |
[
"0.6173418",
"0.6145519",
"0.599449",
"0.59839225",
"0.59169596",
"0.58977705",
"0.5870836",
"0.5825354",
"0.57647556",
"0.5661851",
"0.5659896",
"0.5659896",
"0.56488043",
"0.5643661",
"0.5642012",
"0.56370825",
"0.5635088",
"0.5627784",
"0.56082624",
"0.5599039",
"0.5566845",
"0.55535877",
"0.55282724",
"0.5523942",
"0.5518464",
"0.5498236",
"0.549594",
"0.5450132",
"0.54249865",
"0.54147387"
] |
0.8137266
|
0
|
quick description of npcs in current location
|
def display_npcs(self):
if self.location == world.LocationType.INDOORS:
print("In the room before you, you see:")
for i in range(len(self.npc_list)):
print("A " + self.npc_list[i].name + " (Distance: " + str(self.npc_distances[i]) + "ft.)")
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def show_npc(loc_npc):\r\n func.clear_screen()\r\n print('В текущей локации находятся:')\r\n for i in loc_npc:\r\n print('NPC ', i)\r\n print(npc[i].about())\r\n print('Настроение:', npc[i].mood, '\\n')\r\n input(\"Нажми enter для продолжения...\")",
"def look_at(self):\n if self.visible == True:\n print('You look at the %s:' % self.alias)\n print(self.desc)",
"def describe_locations():\n pass",
"def get_description(self):\n print(\"This Iron door.\")",
"def describe_restaurant(self):\r\n print(\"\\n==========This is our restaurant \" + self.restaurant.title() + \"===============\")\r\n print(\"We serve you amazing \" + self.cuisine + \" 's cusine\")",
"def show_myhero(self):\n description = (self.name + ' Level is: ' + str(self.level) + ' Age is: ' + str(\n self.age) + ' Rank is: ' + self.rank + ' health is: ' + str(self.health) + ' magic is: ' + str(self.__magic)).title()\n print(description)",
"def show_myhero(self):\n description = (self.name + ' Level is: ' + str(self.level) + ' Age is: ' + str(\n self.age) + ' Rank is: ' + self.rank + ' health is: ' + str(self.health)).title()\n print(description)",
"def describe_restaurant(self):\n print(self.name.title() + \" is known for it's \" + self.cuisine.title() + \".\")",
"def reveal_occupants(idx, huts):\n msg = \"\"\n print(\"展示小屋内部情况...\")\n for i in range(len(huts)):\n occupant_info = \"<%d:%s>\" % (i + 1, huts[i])\n if i + 1 == idx:\n occupant_info = \"\\033[1m\" + occupant_info + \"\\033[0m\"\n msg += occupant_info + \" \"\n\n print(\"\\t\" + msg)\n print_dotted_line()",
"def __str__(self):\n\n descr = \"You are in the \" + self.name + \"\\n\"\n for key in self.exits:\n descr += \"You can go \" + key + \" to the \" + self.exits[key].name + \"\\n\"\n for item in self.inventory:\n descr += \"There is a \" + item.name + \" here.\\n\"\n for item in self.objects:\n descr += item.name + \" is here.\"\n return descr",
"def describe_restaurant(self):\r\n\t\tprint(\"Our restaurant is \" + self.restaurant_name.title() + \".\")\r\n\t\tprint(\"We are known for our \" + self.cuisine_type.title())",
"def get_details(self):\n print(self.name)\n print(10 * \"-\" + \"\\n\")\n print(self.description)\n for direction in self.linked_rooms:\n room = self.linked_rooms[direction]\n print(\"The \" + room.get_name() + \" is \" + direction)\n print(\"\\n\")",
"def desc(self):\n return LandCell.desc(self) + \"; plant=\" + str(self.plant)",
"def info():\n print(\"Made using the OOP RPG game creator (c) Claire.\\n\")",
"def describe_restaurant(self):\n msg = f\"{self.name} serves wonderful {self.cuisine_type}.\"\n print(f\"\\n{msg}\")",
"def print_agent(agent):\n agent_string = \"FOUND:\\n\"\n for key in place_detail_keys:\n agent_string += \"\\t%s: %s\\n\" % (key, agent[key])\n log.debug(agent_string)",
"async def info(self):\n # [p]info\n\n await self.bot.say(strings.info.format(\n CacheAPI.get(key='dwarf_repository'),\n CacheAPI.get(key='dwarf_invite_link')))",
"def description():",
"async def government(self, ctx):\n await ctx.message.channel.send(\n 'List of useable commands for the parent command: **eboard**\\n\\n**eboard seats** - shows a list of all '\n 'government positions and their corresponding officers.\\n\\n**eboard position \\\"<position>\\\"** - shows the '\n 'current officer that fills this position and a description of the position.')",
"def description(self):",
"def helpme(self):\n\n print(\"{}{}{}\".format(' ', 'Commands', ' '))\n print(\"{}{}{}\".format(' ', '--------', ' '))\n print(\"{} {} {}\".format('help ', '|', 'Display all usable commands'))\n print(\"{} {} {}\".format('look ', '|', 'Explore the room to find current location, exits and potential items.'))\n print(\"{} {} {}\".format('go ', '|', 'The prefix required to navigate your player.'))\n print(\"{} {} {}\".format('get ', '|', 'The prefix for picking up items.'))\n print(\"{} {} {}\".format('drop ', '|', 'The prefix for dropping items.'))\n print(\"{} {} {}\".format('inv ', '|', 'Displays the player inventory'))\n print(\"{} {} {}\".format('health ', '|', 'Displays player health'))\n print(\"{} {} {}\".format('eat ', '|', 'Allows the player to use consumables to gain health'))\n print(\"{} {} {}\".format('equip ', '|', 'Equip a weapon in your inventory'))\n print(\"{} {} {}\".format('unequip', '|', 'Unequip a current weapon'))\n print(\"{} {} {}\".format('attack ', '|', 'Allows the player to attack a non-player'))\n print(\"{} {} {}\".format('push ', '|', 'Returns NPC to spawn'))\n print(\"{} {} {}\".format('save ', '|', 'Save current player progress'))\n print(\"{} {} {}\".format('load ', '|', 'Load a previous character'))",
"def showInstructions():\n print(\"\"\"\n RPG Game\n ========\n Commands:\n go [direction]\n get [item]\n\n\t\"\"\")",
"def describe_restaurant(self):\n\t\tprint(f\"{self.restaurant_name.title()} serves {self.cuisine_type}.\")",
"def get_description(self):",
"def description(self):\n\t\treturn \"%s, %s\" % (self.name, self.country)",
"def info(self):",
"def info(self):",
"def get_info(self):\n if self.own_home:\n return print(f'Hi! My name is {self.name}, I\\'m {self.age}. Currently I have {self.own_home} house')\n return print(f'Hi! My name is {self.name}, I\\'m {self.age}. I don\\'t have any home now!')",
"def describe(self):\r\n print( self.name + \" is here!\" )\r\n print( self.description )",
"def visit_npc(self, npc):\n self.visit_character(npc)"
] |
[
"0.67704767",
"0.6305048",
"0.6275345",
"0.6239086",
"0.62250483",
"0.61454546",
"0.6121275",
"0.60830915",
"0.606883",
"0.6023328",
"0.5991017",
"0.5978248",
"0.59666777",
"0.5941092",
"0.5914425",
"0.5826963",
"0.5782244",
"0.57809716",
"0.57723576",
"0.57674545",
"0.5761818",
"0.5750552",
"0.5739417",
"0.571947",
"0.57145584",
"0.5682612",
"0.5682612",
"0.5673385",
"0.56627405",
"0.5650127"
] |
0.72753835
|
0
|
A method to find all saved scores from the database.
|
def find_all(self):
cursor = self._connection.cursor()
cursor.execute('SELECT * FROM scores ORDER BY level')
all_scores = cursor.fetchall()
return all_scores
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def get_all_saved():\n return saved.find()",
"def getScores(self,query):\n pass",
"def find_all_by_level(self, level):\n cursor = self._connection.cursor()\n command = 'SELECT * FROM scores WHERE level=? ORDER BY score'\n cursor.execute(command, [level])\n all_scores_by_level = cursor.fetchall()\n return all_scores_by_level",
"def getScores():\r\n results = \"\"\r\n with sqlite3.connect(database_file) as conn:\r\n cursor = conn.cursor()\r\n team_scores = cursor.execute(\"\"\" SELECT * FROM scores;\"\"\")\r\n\r\n for row in team_scores.fetchall():\r\n teamname, auto, rc, spirit, video = row\r\n results += result_string.format(teamname, auto, rc, spirit, video) + \"\\n\"\r\n return results",
"def all_scores(self):\r\n if not self.child_history:\r\n return None\r\n return [self.child_history[i].get('score') for i in xrange(0, len(self.child_history))]",
"def all_scores(self):\r\n if not self.child_history:\r\n return None\r\n return [self.score_for_attempt(index) for index in xrange(0, len(self.child_history))]",
"def save_scores(self):\n\n with open('.scores.pickle', 'wb') as f:\n pickle.dump(self.scores, f)",
"def get_scores(self):\n return self.score",
"def save_scores(self) -> None:\n # generate scores for this round for all players and save it in the Match instances and create end_time\n # of round order = {'order': <order>, 'left_window_value': <value to display>}\n order = ct.Controls.save_scores(self.lignes)\n # order = ct.Controls.end_round(self.lignes)\n self.master.master.list_instances_menus_tournament = Menu.update_menus_tournament(order, self.master)\n self.master.master.left_window.update_and_display(\n self.master.master.list_instances_menus_tournament)\n self.master.master.launch()\n # self.destroy_window()\n self.destroy_widgets()\n self.display_rounds_result()",
"def retrieve_all_scores(database_connection: mysql.connector.connect\n ) -> List[int]:\n cursor = database_connection.cursor()\n query = (\"SELECT pm.panelistscore FROM ww_showpnlmap pm \"\n \"JOIN ww_shows s ON s.showid = pm.showid \"\n \"WHERE s.bestof = 0 AND s.repeatshowid IS NULL \"\n \"AND pm.panelistscore IS NOT NULL \"\n \"ORDER BY pm.panelistscore ASC;\")\n cursor.execute(query)\n result = cursor.fetchall()\n\n if not result:\n return None\n\n scores = []\n for row in result:\n scores.append(row[0])\n\n return scores",
"def find_all_by_player(self, player):\n cursor = self._connection.cursor()\n command = 'SELECT * FROM scores WHERE player=? ORDER BY level'\n cursor.execute(command, [player])\n return cursor.fetchall()",
"def find_scores(self):\n p1_score = self.p1_store()\n p2_score = self.p2_store()\n return p1_score, p2_score",
"def get_scores(self):\n return pd.DataFrame(self._scores)",
"def get_all():\n return SavedQuery.get_all()",
"def get_highscores(self):\n return self.database.get_high_scores(self.difficulty)",
"def calculate_scores():\n all_people = models.Leaderboard.query.order_by(\n models.Leaderboard.score.desc()).all()\n print(all_people)\n users = []\n scores = []\n for person in all_people:\n users.append(person.username)\n scores.append(person.score)\n return users, scores",
"def get_scores(self):\n return [(self.players[p.get_color()], p.get_score()) for p in self.state.get_players()]",
"def set_scores(apps, schema_editor):\n\n Game = apps.get_model(\"stats\", \"Game\")\n for game in Game.objects.all():\n score_allies = 0\n score_opponents = 0\n player_stats = game.playerstat_set.all()\n for stat in player_stats:\n if stat.is_opponent:\n score_opponents += stat.scored\n else:\n score_allies += stat.scored\n\n game.score_allies = score_allies\n game.score_opponents = score_opponents\n game.save()",
"def find_all():\r\n data = store.read().items()\r\n return [Game(id=id, **value) for id,value in data]",
"def getScore(self):\n\t\tself.scoreList = [submissionsss.score for submissionsss in self.subreddit.top(time_filter = 'day', limit = self.limits)]\n\t\treturn self.scoreList",
"def get_scores(self, tournament: Tournament):\n self.model.eval()\n # collate_fn = lambda x: collate_teams(x, tournament.max_members)\n dl_rank = DataLoader(tournament.ranking, num_workers=self.jobs, batch_size=self.bs, shuffle=False)\n iterator = tqdm(dl_rank, position=0, desc=f'{tournament.tournament_id} ranking', disable=True)\n scores = []\n for i, team in enumerate(iterator):\n score = self.model.get_team_score(team.to(self.device))\n scores.append(score.cpu().numpy())\n\n scores = np.concatenate(scores)\n return scores.flatten()",
"def query_all():\n\tstudents = session.query(Student).all()\n\treturn students",
"def get_score_history(self):\n return self._score_history",
"def get_highscores(self):\n return self.filter(active=False, finished=True).order_by(\"-score\")",
"def get_scores(self, query):\n self.init()\n tokens = self.tokenizer.tokenize(query)\n return self.bm25_instance.get_scores(query=tokens)",
"def score(self):\n return self.client.call('GET', self.name + 'score')",
"def scores_(self):\n return self.predictor.scores_",
"def get_all_f_blast_x_scores(self):\n listOfScoreBlast = []\n sqlObj = _F_score_blast_X_sql()\n results = sqlObj.select_all_score_blast_x_all_attributes()\n for element in results:\n listOfScoreBlast.append(F_score_blast_X(element[0], element[1], element[2], element[3], element[4], element[5], element[6], element[7], element[8], element[9], element[10], element[11], element[12], element[13], element[14]))\n return listOfScoreBlast",
"def get_high_scores(self,request):\n if request.number_of_results:\n scores = Score.query(Score.won == True).order(Score.attempts_allowed,Score.guesses).fetch(request.number_of_results)\n else:\n scores = Score.query(Score.won == True).order(Score.attempts_allowed,Score.guesses).fetch()\n return ScoreForms(items=[score.to_form() for score in scores])",
"def print_scores(self):\n print(\"scores: \", self.get_scores())"
] |
[
"0.6841076",
"0.6711554",
"0.6601322",
"0.657444",
"0.65719324",
"0.65098506",
"0.6496116",
"0.6478606",
"0.6399739",
"0.6348333",
"0.62986887",
"0.62947536",
"0.61703575",
"0.6157236",
"0.6095744",
"0.6060185",
"0.6047248",
"0.60408145",
"0.5990906",
"0.58930206",
"0.5884905",
"0.58688474",
"0.5852008",
"0.5823315",
"0.58120346",
"0.58102214",
"0.57680964",
"0.57670784",
"0.57535326",
"0.5726103"
] |
0.81065226
|
0
|
A method to find all saved scores on a specific level.
|
def find_all_by_level(self, level):
cursor = self._connection.cursor()
command = 'SELECT * FROM scores WHERE level=? ORDER BY score'
cursor.execute(command, [level])
all_scores_by_level = cursor.fetchall()
return all_scores_by_level
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def find_all(self):\n cursor = self._connection.cursor()\n cursor.execute('SELECT * FROM scores ORDER BY level')\n all_scores = cursor.fetchall()\n return all_scores",
"def find_all_by_player(self, player):\n cursor = self._connection.cursor()\n command = 'SELECT * FROM scores WHERE player=? ORDER BY level'\n cursor.execute(command, [player])\n return cursor.fetchall()",
"def get_high_score(self, level):\n score_board = {} # a dic used to store info\n heading = None\n path = os.path.abspath(os.path.dirname(sys.argv[0])) + \"\\\\HighScores\\\\\"\n if not os.path.exists(path): # check whether there's a 'HighScores' folder\n os.makedirs(path) # if not, create and return nothing\n return\n # check whether this level has a high score storage\n elif not os.path.exists(path + level):\n return # if don't have, return nothing\n else: # if do have a storage\n with open(path + level) as fin: # open the file\n for line in fin:\n line = line.strip() # cut the tail\n if line.startswith('==') and line.endswith('=='): # detect headings\n heading = line[2:-2] # heading line\n elif line.count(':') == 1 and heading == level: # detect attribute\n name, _, score = line.partition(':') # get attribute value\n # this storage have all the records, for a player find the highest score\n if name in score_board.keys(): # if already has this player in dic\n if int(score) > int(score_board[name]): # compare score\n score_board[name] = score # if bigger, update\n else:\n score_board[name] = score # new player, update\n # sort the list and take the top 10\n return sorted(score_board.items(), key=lambda x: x[1], reverse=True)[:10]",
"def all_scores(self):\r\n if not self.child_history:\r\n return None\r\n return [self.child_history[i].get('score') for i in xrange(0, len(self.child_history))]",
"def all_scores(self):\r\n if not self.child_history:\r\n return None\r\n return [self.score_for_attempt(index) for index in xrange(0, len(self.child_history))]",
"def save_scores(self):\n\n with open('.scores.pickle', 'wb') as f:\n pickle.dump(self.scores, f)",
"def getScores(self,query):\n pass",
"def save_scores(self) -> None:\n # generate scores for this round for all players and save it in the Match instances and create end_time\n # of round order = {'order': <order>, 'left_window_value': <value to display>}\n order = ct.Controls.save_scores(self.lignes)\n # order = ct.Controls.end_round(self.lignes)\n self.master.master.list_instances_menus_tournament = Menu.update_menus_tournament(order, self.master)\n self.master.master.left_window.update_and_display(\n self.master.master.list_instances_menus_tournament)\n self.master.master.launch()\n # self.destroy_window()\n self.destroy_widgets()\n self.display_rounds_result()",
"def retrieve_scores(num_letters, language, folder, min_score, max_score):\n if (num_letters == 1):\n return retrieve_letter_scores(1, language, folder, min_score, max_score)\n elif (num_letters == 2):\n return retrieve_syllable_scores(num_letters, 1, language, folder, min_score, max_score)\n elif (num_letters == 3):\n return retrieve_syllable_scores(num_letters, 3, language, folder, min_score, max_score)\n else:\n print(\"Error: incorrect number of letters. Value ranges from 1 to 3.\\n\")",
"def get_scores(self):\n return self.score",
"def _get_scores_list(self):\n self.scores = dict()\n for subdir in os.listdir(self.path):\n if subdir.startswith('.'):\n continue\n try:\n score = open('{0}/{1}/{2}/extract_all.sort.uniq.txt'.format(self.path, subdir, DOCKING_RUN_FILES),'r').read().split()[-1]\n self.scores[subdir] = float(score.strip())\n except:\n pass",
"def get_all_saved():\n return saved.find()",
"def get_highscores(self):\n return self.database.get_high_scores(self.difficulty)",
"def load_scores(score_dir):\n score_files = fi.find_files(score_dir, 'sc')\n scores = {\n get_target_name(f):\n pd.read_csv(f, delimiter='\\s*', index_col='description',\n engine='python')\n for f in score_files\n }\n # If duplicate structures present, remove all but first.\n for x, y in scores.items():\n scores[x] = y.loc[~y.index.duplicated(keep='first')]\n return scores",
"def get_levels(self):\n return self.levels[self.game]",
"def find_scores(self):\n p1_score = self.p1_store()\n p2_score = self.p2_store()\n return p1_score, p2_score",
"def save_scores(self, score_file_name):\n with open(score_file_name, 'wb') as score_file:\n pickle.dump(self.scores, score_file)",
"def score(self):\n return self.client.call('GET', self.name + 'score')",
"def load_all_scores(path=None, use_cache=True):\n cache_path = SAVE_ROOT / 'feature' / 'dataframe_map.pkl'\n make_parent_dirs(cache_path)\n if cache_path.is_file() and use_cache:\n logging.info('loading score_data from cache file: {}'.format(cache_path))\n return pickle.load(cache_path.open('rb'))\n score_db_index = load_score_db_index(path)\n score_data = make_all_score_matrices(score_db_index)\n pickle.dump(score_data, cache_path.open('wb'))\n return score_data",
"def save_steady_scores(self):\n \n assert (hasattr(self,'re_scores') and hasattr(self,'ri_scores') and hasattr(self,'he_scores') )\n data=np.load(self.data_path,allow_pickle=True)\n dataMap=dict(data.items())\n scores_attrs=['re_scores','ri_scores','he_scores']\n \n for scores_attr in scores_attrs:\n \n assert(hasattr(self,scores_attr)),'%s is not a field'%scores_attr\n dataMap[scores_attr]=getattr(self,scores_attr)\n\n np.savez(self.data_path,**dataMap)",
"def list_of_scores(self):\n with open(Constant.this_score, \"r\") as f:\n final= f.read()\n final = int(final)\n with open(Constant.list_scores, \"r\") as li:\n points_file = json.load(li)\n points = points_file['HighScores']\n\n with open(Constant.list_scores,\"w\") as files:\n\n if len(points) == 5:\n if points[0] < final:\n for i in range(len(points)-1,0,-1):\n points[i] = points[i-1]\n points[0] = final\n json.dump(points_file,files)\n elif points[0] > final:\n if points[1] < final:\n for i in range(len(points)-1,1,-1):\n points[i] = points[i-1]\n points[1] = final\n json.dump(points_file,files)\n elif points[1] > final:\n if points[2] < final:\n for i in range(len(points)-1,2,-1):\n points[i] = points[i-1]\n points[2] = final\n json.dump(points_file,files)\n elif points[2] > final:\n if points[3] < final:\n for i in range(len(points)-1,3,-1):\n points[i] = points[i-1]\n points[3] = final\n json.dump(points_file,files)\n elif points[3] > final:\n if points[4] < final:\n points[4] = final\n json.dump(points_file,files)\n else:\n json.dump(points_file,files)\n else:\n json.dump(points_file,files)\n else:\n json.dump(points_file,files)\n else:\n json.dump(points_file,files)\n else:\n json.dump(points_file,files)",
"def get_score_history(self):\n return self._score_history",
"def get_all_game_levels(self):\n\n self._katakana_database.create_katakana_database_connection()\n\n raw_game_levels = self._katakana_database.katakana_db.execute(\n \"\"\"SELECT game_level FROM Games\"\"\").fetchall()\n\n all_game_levels = []\n for i in raw_game_levels:\n all_game_levels.append(i[0])\n return all_game_levels",
"def get_scores(tmp_dir, profile_file_directory, stderr_log_output):\r\n\t\r\n\tranges = pickle.load(open(os.path.join(tmp_dir, \"ranges.pkl\")))\r\n\t\r\n\t(database, locusList,list_of_all_allele_numbers_tuple) = try_and_except(stderr_log_output,\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tget_profiles,\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tprofile_file_directory)\r\n\t\r\n\tresults = try_and_except(stderr_log_output,\r\n\t\t\t\t\t\t\t score,\r\n\t\t\t\t\t\t\t tmp_dir,\r\n\t\t\t\t\t\t\t locusList,\r\n\t\t\t\t\t\t\t ranges)\r\n\t\r\n\treturn results, database, locusList, list_of_all_allele_numbers_tuple",
"def getScore(self):\n\t\tself.scoreList = [submissionsss.score for submissionsss in self.subreddit.top(time_filter = 'day', limit = self.limits)]\n\t\treturn self.scoreList",
"def save(self):\n file_name = common.RANK_FILE % (self.week.season.name, self.week.num)\n with open(file_name, 'w') as rank_file:\n for team, score in self.score.items():\n rank_file.write('%s,%s\\n' % (team, score))",
"def get_scores(self):\n return [(self.players[p.get_color()], p.get_score()) for p in self.state.get_players()]",
"def get_highscores(self):\n return self.filter(active=False, finished=True).order_by(\"-score\")",
"def scores_for(self, board):\r\n scores = [1]*board.width\r\n for i in range(board.width):\r\n if not board.can_add_to(i):\r\n scores[i] = -1\r\n elif board.is_win_for(self.checker):\r\n scores[i] = 100\r\n elif board.is_win_for(self.opponent_checker()):\r\n scores[i] = 0\r\n elif self.lookahead == 0:\r\n scores[i] = 50\r\n else:\r\n board.add_checker(self.checker, i)\r\n other = AIPlayer(self.opponent_checker(), self.tiebreak, self.lookahead-1)\r\n other_scores = other.scores_for(board)\r\n if max(other_scores) == 100:\r\n scores[i] = 0\r\n elif max(other_scores) == 50:\r\n scores[i] = 50\r\n elif max(other_scores) == 0:\r\n scores[i] = 100\r\n board.remove_checker(i)\r\n return scores",
"def get_rules_at(self, level):\n if level >= len(self._Psi):\n return []\n return self._Psi[level]"
] |
[
"0.7215508",
"0.6560095",
"0.6534747",
"0.6025395",
"0.58823144",
"0.5759367",
"0.57350314",
"0.5605739",
"0.55363613",
"0.5535136",
"0.5493144",
"0.54914767",
"0.54653996",
"0.5458332",
"0.537102",
"0.5313048",
"0.5254255",
"0.5236667",
"0.5226593",
"0.52190083",
"0.5205779",
"0.5178574",
"0.5154849",
"0.5153175",
"0.514329",
"0.5137505",
"0.5125312",
"0.5122351",
"0.5119391",
"0.5113699"
] |
0.8105305
|
0
|
A method to find all scores by a specific player.
|
def find_all_by_player(self, player):
cursor = self._connection.cursor()
command = 'SELECT * FROM scores WHERE player=? ORDER BY level'
cursor.execute(command, [player])
return cursor.fetchall()
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def get_score(self, player):\n if player in self.player_scores:\n return self.player_scores[player]\n else:\n raise Exception(\"Player not in score list\")",
"def calculate_scores(players):\n scores = {}\n for player in players.tuple_:\n scores[player.id_] = player.score()\n return scores",
"def get_scores(self):\n return [(self.players[p.get_color()], p.get_score()) for p in self.state.get_players()]",
"def get_scores_in_order_of_players(self):\n \n players = self.referee.get_current_state().get_players()\n\n player_scores = []\n for player_color in self.player_color_order:\n for player in players:\n if player_color == player.get_color():\n player_scores.append(player.get_score())\n break\n\n return player_scores",
"def find_all(self):\n cursor = self._connection.cursor()\n cursor.execute('SELECT * FROM scores ORDER BY level')\n all_scores = cursor.fetchall()\n return all_scores",
"def playerStandings():\n\n getPlayers = \"SELECT id, name, wins, matches FROM playerstats ORDER BY wins DESC\"\n players = executeQuery({'dbname': 'tournament', 'query' : getPlayers, 'type' : 'find'})\n return players",
"def perform_get_scores(responder, options):\n match = options['<match-id>']\n all_scores = scores.get_match_scores(match)\n\n if options.get(yaml_opt, False):\n responder(yaml.dump({'scores': all_scores}))\n else:\n if all_scores is None:\n responder('No scores available for match {0}'.format(match))\n else:\n for tla, score in all_scores.iteritems():\n responder('Team {0} scored {1} in match {2}'.format(tla, score, match))",
"def get_current_score(self, game_id: int, player_id: int) -> int:\n with self.eng.session_mgr() as session:\n return session.query(\n func.sum(TablePlayerRound.score)\n ).filter(and_(\n TablePlayerRound.player_key == player_id,\n TablePlayerRound.game_key == game_id\n )).scalar()",
"def get_score(self, player):\n\n df = pd.read_csv('RPSscores.csv')\n if not str(player) in df['Name'].to_dict().values():\n df.loc[len(df.index)] = [str(player),\n 0, 0, 0]\n player_index = int(df.loc[df['Name'] == str(player)].index[0])\n result = 'wins: ' + str(df.iloc[player_index, 1]) + '\\n' + \\\n 'draws: ' + str(df.iloc[player_index, 2]) + '\\n' + \\\n 'losses: ' + str(df.iloc[player_index, 3])\n return result",
"def find_all_by_level(self, level):\n cursor = self._connection.cursor()\n command = 'SELECT * FROM scores WHERE level=? ORDER BY score'\n cursor.execute(command, [level])\n all_scores_by_level = cursor.fetchall()\n return all_scores_by_level",
"def calculate_scores(self):\n words = self.walk_board()\n player_scores = {}\n for word in words:\n player = word.get_owning_player()\n if player not in player_scores:\n player_scores[player] = 0\n player_scores[player] += word.get_score()\n return player_scores",
"def playerStandings(matchid):\n DB = dbc()\n c = DB.cursor()\n c.execute('SELECT matches.playerid, name, win, total_matches, \\\n score, played, bye \\\n FROM matches JOIN players \\\n ON matches.playerid = players.playerid \\\n WHERE matches.matchid = %s \\\n ORDER BY matches.score DESC', (matchid,))\n player_stats = c.fetchall()\n DB.close()\n return player_stats",
"def find_players_for_team(self, team, src='roster', season=None):\n # creating class wide variable to hold current team\n if type(team) is str:\n team = Team.find(team)\n\n print(\"+ Searching %s players for %s\" % (src, team))\n\n if src == 'roster':\n players = self.get_roster_players_via_api(team, season)\n elif src == 'system':\n players = self.get_system_players(team)\n elif src == 'contract':\n players = self.get_contracted_players(team)\n\n return players",
"def drawsheet_get_score(player, scores):\n def distance(score, player):\n dx = float(score[0] - player[0]) / 5\n dy = float(score[1] - player[1])\n if dy < 0:\n dy *= 3\n\n return math.sqrt(dx * dx + dy * dy)\n\n if len(scores) == 0:\n return None\n\n scores.sort(key=lambda s: distance(s[1], player[1]))\n #print([(k, distance(k[1], player[1])) for k in scores[:3]])\n score = scores[0]\n del scores[0]\n\n return score[0]",
"def get_score(self, player: int) -> int:\n score = 0\n i = 0\n while i < len(self.leylines):\n score += 1 if self.leylines[i].player == player else 0\n score += 1 if self.rights[i].player == player else 0\n score += 1 if self.lefts[i].player == player else 0\n i += 1\n return score",
"def look_for_players(self):\n log.debug(\"Start looking for players [\" + self.team_link + \"]\")\n\n players_found = {}\n table = self.soup.find('table', {\"class\": \"table table-striped table-hover no-footer\"})\n for tr in table.find_all(\"tr\"):\n a = tr.find(\"a\")\n if a:\n # tag a container of the name player found\n player_name = str(a.string).strip()\n link = self.host_url + a[\"href\"]\n players_found[link] = player_name\n\n return players_found",
"def get_all_game_players(self):\n return GamePlayer.objects.filter(game=self)",
"def search_player_by_name(players_table, name):\r\n result = players_table.search(Query().Nom == name)\r\n print(result)",
"def players(self):\n return Player.objects.filter(team=self)",
"def fetch_players_stats():\n players_scraper = PlayerStatsScraper(API_URL, API_HEADERS)\n result = players_scraper.save_objects()\n return result",
"def get_player_best_score(self, player):\n return self.get_highscores().filter(player=player).first()",
"def give_round_scores(list_of_players):\n print(\"\\nThe round has ended !\\nWe shall now unveil the cards and the scores!\")\n\n for player in list_of_players:\n cards = [card.name for card in player.cards]\n cards_string = \" \"\n for card in cards:\n cards_string += card + \", \"\n cards_string = cards_string[:-2]\n print(\"\\n{} has these cards: \".format(player.name), cards_string)\n print(\"{} has a score of {}\".format(player.name, player.score()))\n final_scores = [player.score() for player in list_of_players]\n min_score = min(final_scores)\n winners_index = [i for i, x in enumerate(final_scores) if x == min_score]\n if len(winners_index) == 1:\n index_winner = winners_index[0]\n winner = list_of_players[index_winner]\n print(winner.name, \"won the round with a score of {}\".format(winner.score()))\n if len(winners_index) > 1:\n print(\"It's a tie!\")\n winners_names = \"\"\n winners = [list_of_players[i] for i in winners_index]\n for winner in winners:\n winners_names += winner.name\n print(winners_names, \"won the round with a score of \", str(min_score))",
"def get_scores(self, tournament: Tournament):\n self.model.eval()\n # collate_fn = lambda x: collate_teams(x, tournament.max_members)\n dl_rank = DataLoader(tournament.ranking, num_workers=self.jobs, batch_size=self.bs, shuffle=False)\n iterator = tqdm(dl_rank, position=0, desc=f'{tournament.tournament_id} ranking', disable=True)\n scores = []\n for i, team in enumerate(iterator):\n score = self.model.get_team_score(team.to(self.device))\n scores.append(score.cpu().numpy())\n\n scores = np.concatenate(scores)\n return scores.flatten()",
"def calculate_scores():\n all_people = models.Leaderboard.query.order_by(\n models.Leaderboard.score.desc()).all()\n print(all_people)\n users = []\n scores = []\n for person in all_people:\n users.append(person.username)\n scores.append(person.score)\n return users, scores",
"def get_players_by_rank(self):\n return sorted(self.participants, key=lambda p: p.tournament_score, reverse=True)",
"def getpoints(self, player):\n return self.Points[player]",
"def scores_for(self, board):\r\n scores = [1]*board.width\r\n for i in range(board.width):\r\n if not board.can_add_to(i):\r\n scores[i] = -1\r\n elif board.is_win_for(self.checker):\r\n scores[i] = 100\r\n elif board.is_win_for(self.opponent_checker()):\r\n scores[i] = 0\r\n elif self.lookahead == 0:\r\n scores[i] = 50\r\n else:\r\n board.add_checker(self.checker, i)\r\n other = AIPlayer(self.opponent_checker(), self.tiebreak, self.lookahead-1)\r\n other_scores = other.scores_for(board)\r\n if max(other_scores) == 100:\r\n scores[i] = 0\r\n elif max(other_scores) == 50:\r\n scores[i] = 50\r\n elif max(other_scores) == 0:\r\n scores[i] = 100\r\n board.remove_checker(i)\r\n return scores",
"def get_player_stats_from_game(team, year, week):",
"def get_players(self):\r\n return self.players.values()",
"def get_player_games(self, year, use_local=True):"
] |
[
"0.7240055",
"0.69891363",
"0.67218506",
"0.6526238",
"0.64880234",
"0.6344628",
"0.6267268",
"0.6236462",
"0.62285614",
"0.6221099",
"0.6183751",
"0.6143489",
"0.6104288",
"0.6069036",
"0.6060385",
"0.6030242",
"0.6006022",
"0.5981791",
"0.59692144",
"0.5958714",
"0.5953676",
"0.5932083",
"0.59267634",
"0.5909497",
"0.5900597",
"0.5890038",
"0.58842015",
"0.58768064",
"0.5869529",
"0.58693206"
] |
0.84626925
|
0
|
A method to add new scores to the database. A new score is added after every game played.
|
def add_score(self, player, level, score):
cursor = self._connection.cursor()
command = 'INSERT INTO scores (player, level, score) VALUES (?, ?, ?)'
cursor.execute(command, [player, level, score])
self._connection.commit()
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def add_score(self, data):\n # sql_score_add = \"\"\"update $s SET\n for team_id, score in data.items():\n if int(score) in range(0, 14):\n column = \"score_\" + (score)\n sql_cmd = (\"UPDATE %s SET %s=1 WHERE team_id=%s\" % (self.dbtable, column, team_id))\n print sql_cmd\n try:\n self._db_cur.execute(sql_cmd)\n self._db_conn.commit()\n except sqlite3.Error as er:\n print er",
"def add_score(self, score):\n self._score += score",
"def add_score(self, difficulty, time, name):\n self.database[difficulty].insert_one({'time': time, 'name': name})",
"def add_score(score):\n global SCORE\n SCORE = SCORE + score\n # update the display\n mvaddstr(1, 2, \"Score:\", color_pair(HEADING_COLOUR) | A_BOLD)\n mvaddstr(1, 9, \"%d\" % SCORE, color_pair(TEXT_COLOUR) | A_BOLD)",
"def appendScore(self, l):\r\n score = Highscore(l[0], l[1])\r\n self.scores.append(score)",
"def increase_score(self):\n\n old_score = self.get_score()\n new_score = old_score + 1\n sql = \"UPDATE Users SET score = ? WHERE username = ?\"\n self.conn.execute(sql, (new_score, self.username))\n self.conn.commit()",
"def set_scores(apps, schema_editor):\n\n Game = apps.get_model(\"stats\", \"Game\")\n for game in Game.objects.all():\n score_allies = 0\n score_opponents = 0\n player_stats = game.playerstat_set.all()\n for stat in player_stats:\n if stat.is_opponent:\n score_opponents += stat.scored\n else:\n score_allies += stat.scored\n\n game.score_allies = score_allies\n game.score_opponents = score_opponents\n game.save()",
"def update_scores(self, score):\n self.result_list.append(score)\n\n if self.best_score == 0 and self.worst_score == 0:\n self.best_score = score\n self.worst_score = score\n\n if score < self.best_score:\n self.best_score = score\n\n if score > self.worst_score:\n self.worst_score = score",
"def insert_player(self, name, score):\r\n command = \"UPDATE %s \" % self.table_name_players\r\n command += \"SET name_player = '%s', score = %d \" % (name, score)\r\n command += \"WHERE name_player = ( \"\r\n command += \"SELECT name_player \"\r\n command += \"FROM %s \" % self.table_name_players\r\n command += \"WHERE score < %d \" % score\r\n command += \"ORDER BY score ASC \"\r\n command += \"LIMIT 1 );\"\r\n\r\n self.cursor.execute(command)\r\n self.conn.commit()",
"def _fcn_add_score_row(self):\n # Increase length :\n self._scoreTable.setRowCount(self._scoreTable.rowCount() + 1)",
"def add_score():\n json_data = request.get_json()\n exercise_id = json_data.get(\"exercise_id\")\n score = json_data.get(\"score\")\n user_id = session.get(\"email\")\n fm.add_attempt(exercise_id, score, user_id)\n\n msg = \"Attempt added. Exercise ID: {} Score: {}\"\\\n .format(exercise_id, score)\n app.logger.info(msg)\n return jsonify(dict(result=\"success\"))",
"def add(self, aVal, bVal, score):\n Relation.add(self, aVal, bVal)\n self._scores[(aVal, bVal)] = score\n return self",
"def enter_game_scores():\n pass",
"def set_scores(self, scores):\n self.score = {k: v for k, v in scores.items()}",
"def add_score(self, config_id, score_list):\n tmp_column = self.sieve_columns.copy()\n tmp_column.remove('rung_id')\n tmp_column.remove('config_id')\n self.sieve_board.loc[\n (self.sieve_board['config_id'] == config_id),\n tmp_column\n ] = [StatusType.FINISHED] + [score_list]\n\n self.is_completed = self._check_completed()\n return",
"def addScore(self, index, lstGross):\n # update gross score for hole\n for player, gross in zip(self.scores, lstGross):\n player.gross[index] = gross\n # update team score\n for team in self.team_list:\n team.calculate_score(index)\n # update team points using other team score\n self.team_list[0].update_points(index, self.team_list[1])\n self.team_list[1].update_points(index, self.team_list[0])",
"def save_scores(self) -> None:\n # generate scores for this round for all players and save it in the Match instances and create end_time\n # of round order = {'order': <order>, 'left_window_value': <value to display>}\n order = ct.Controls.save_scores(self.lignes)\n # order = ct.Controls.end_round(self.lignes)\n self.master.master.list_instances_menus_tournament = Menu.update_menus_tournament(order, self.master)\n self.master.master.left_window.update_and_display(\n self.master.master.list_instances_menus_tournament)\n self.master.master.launch()\n # self.destroy_window()\n self.destroy_widgets()\n self.display_rounds_result()",
"def _update_score(self) -> None:\n\n # setting new score by iterating over players\n self.score_play[self.n_play_turns, ] = [\n self._score_table[(\n self.contract.level,\n self.contract.suit,\n self.tricks[i],\n self.contract.player_vulnerability[i],\n int(self.contract.double + self.contract.redouble)\n )]\n for i in range(NUM_PLAYERS)\n ]",
"def add_played_game(play):\n games = Play.objects.filter(game=play.game)\n total = 0\n for game in list(games):\n total += game.score\n average_score = total/len(games)\n if ((play.score - average_score) > average_score/2.1):\n play.user.level += 1\n elif ((average_score - play.score) > average_score/1.5):\n play.user.level -= 1\n play.user.save()",
"def add_to_score(self, to_add):\n self.score += to_add",
"def update_score():\n pass",
"def add_game(user, date_played, level, was_won, score, time_taken):\n\n game = Game.objects.get_or_create(user=user, date_played=date_played)[0]\n game.level = level\n game.was_won = was_won\n game.score = score\n game.time_taken = time_taken\n\n game.save()\n return game",
"def record_latest_score(self, score):\r\n self.child_history[-1]['score'] = score",
"def update_score_board(self):\n score = ''\n for key, value in self.model.game_score.items():\n score += key + \"-\" + str(value) + ':'\n if self.view.score_board_entry.get():\n self.view.score_board_entry.delete(0, tkinter.END)\n self.view.score_board_entry.insert('1', score)",
"def add_score(self, points: int) -> None:\n self.__score += points\n\n for rank in self.__ranks.keys():\n if self.__score >= rank:\n self.__level = self.__ranks[rank]\n else:\n break",
"def add_score(self, score_to_add):\n self.score += score_to_add\n if self.score // self.level >= 20:\n self.level += 1\n self.speed *= self.factor\n # Also redraw all pixels because they now change color\n self.screen.grid()\n self.screen.block()\n self.screen.next()\n # Refresh the data on screen\n self.screen.data()",
"def scores(self, value):\n self._scores = value",
"def test_enter_score(self):\n entry = TournamentEntry.query.filter_by(\n player_id=self.player, tournament_id=self.tourn_1).first()\n tourn = Tournament(self.tourn_1)\n\n # a one-off score\n Score(category=self.cat_1, tournament=tourn, entry_id=entry.id,\n score=0).write()\n scores = TournamentScore.query.\\\n filter_by(entry_id=entry.id, tournament_id=tourn.get_dao().id).all()\n compare(len(scores), 1)\n compare(scores[0].score.value, 0)\n\n # score already entered\n score = Score(tournament=tourn, entry_id=entry.id, score=100,\n category=self.cat_1)\n self.assertRaises(ValueError, score.write)",
"def commit_score(self):\n\n # Update the player's total score and total roll count\n self._total_score += self._current_score",
"def _append_score(self, score_field, score):\n if score_field is not None:\n self.args.append(\"SCORE_FIELD\")\n self.args.append(score_field)\n if score is not None:\n self.args.append(\"SCORE\")\n self.args.append(score)"
] |
[
"0.7908688",
"0.68989253",
"0.68800557",
"0.67670214",
"0.66604817",
"0.6655626",
"0.6654588",
"0.6644192",
"0.664415",
"0.6584629",
"0.6540392",
"0.65298015",
"0.6486854",
"0.64767516",
"0.64708185",
"0.64389807",
"0.6434606",
"0.64264286",
"0.642309",
"0.63963497",
"0.63225037",
"0.6314895",
"0.6307863",
"0.62780344",
"0.62740743",
"0.62713104",
"0.62523407",
"0.6240569",
"0.6226504",
"0.62228703"
] |
0.7622393
|
1
|
Simple helper function to get the page url/
|
def get_url(self, page):
return self.server_url + page
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def get_url(self) -> str:\n\n return self.__page_url",
"def url(self):\r\n return self.urlparts.geturl()",
"def get_absolute_url(self):\r\n return \"{0}page1/\".format(self.get_short_url())",
"def url(self):\n url = os.environ.get('PATH_INFO')\\\n or os.environ.get('REQUEST_URI')\n return url if url else ''",
"def get_absolute_url(self):\n\n url = reverse('comicsite.views.page', args=[self.comicsite.short_name,self.title])\n return url",
"def _get_url(self, absolute):",
"def get_full_url(self):\n full_url = home_page + self.source_link\n return full_url",
"def get_page_url(href):\n # type: (str) -> str\n return \"{}{}\".format(JAFC_URI, href.lstrip(\"/\"))",
"def Url(self) -> str:",
"def get_current_url():\n return current_url",
"def getHomePage(self):\n return self.home_url",
"def url(self):\n _, body = self.request('/v1.1/url', 'GET')\n return body.get('url', None)",
"def path_func(page_name):\n root = get_application().cfg['blog_url']\n if not root.endswith('/'):\n root += '/'\n return urljoin(root, url_quote(page_name))",
"def get_url(server_url, wiki_space, page_title):\r\n return '%s/display/%s/%s' % (server_url, wiki_space, urllib.quote_plus(page_title))",
"def url(request):\n return request.config.getoption(\"--url\")",
"def get_url(mods):\n url = mods.find(\"{{{0}}}location/{{{0}}}url\".format(common.MODS_NS))\n return url.text",
"def get_url(self):\n return self.url",
"def get_url(self):\n return self.url",
"def get_url(self):\n url = self.driver.current_url\n return url",
"def getURLForThing(thing):",
"def last_url(self):\r\n return '{0}page{1}/'.format(self.get_short_url(), self.page_count)",
"def get_short_url_base():",
"def url(self):\n self._current_page += 1\n return URL_TPL.format(self._uid, self._current_page)",
"def tandc_url(request):\n# try:\n# url = Page.objects.get(\n# reverse_id='tandc', publisher_is_draft=False).get_absolute_url()\n# except Page.DoesNotExist:\n\n url = ''\n\n return {\n 'tandc_url': url,\n }",
"def page_url(self):\n url = '/plaque/%s' % self.key.urlsafe()\n return url",
"def _get_url(self, *args):\n if self._baseUrl not in args:\n args.insert(0, self._baseUrl)\n args = filter(lambda item: item is not None, args)\n return \"/\".join(args)",
"def build_url(self):\n url = requests.utils.requote_uri(\n self.torrent_page + self.string_search)\n if self.page == '1337x':\n return(url + '/1/')\n elif self.page == 'limetorrents':\n return(url + '/')\n else:\n return(url)",
"def geturl(self):\n return self.__url",
"def _get_page_url(self, page_num):\n\n # for city comes with 2 words, replace the space with -\n # e.g. 'new york' -> 'new-york'\n city = self._city.lower().replace(' ', '-')\n state = self._state.lower().replace(' ', '-')\n page = f'{self._overhead}/{state}/{city}/apartments_condos_houses_townhouses?page={page_num}'\n return page",
"def url():\n ..."
] |
[
"0.76338345",
"0.7173302",
"0.7136625",
"0.71100295",
"0.70039535",
"0.6974125",
"0.6918383",
"0.6789336",
"0.67570287",
"0.6727007",
"0.6721156",
"0.6717571",
"0.6681439",
"0.6677521",
"0.6657421",
"0.66332924",
"0.6630717",
"0.6630717",
"0.6628256",
"0.66132647",
"0.6607386",
"0.6600049",
"0.65595627",
"0.6556276",
"0.65524656",
"0.6548373",
"0.65291226",
"0.6524777",
"0.651307",
"0.64769787"
] |
0.78212947
|
0
|
Get plant details for specified timespan.
|
def plant_detail(self, plant_id, timespan, date):
assert timespan in Timespan
if timespan == Timespan.day:
date_str = date.strftime('%Y-%m-%d')
elif timespan == Timespan.month:
date_str = date.strftime('%Y-%m')
response = self.session.get(self.get_url('PlantDetailAPI.do'), params={
'plantId': plant_id,
'type': timespan.value,
'date': date_str
})
data = json.loads(response.content.decode('utf-8'))
return data['back']
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def timespan(self, timespan=None, timezone=None):\r\n url = '{0}/{1}'.format(self.get_pull_url(), 'timespan')\r\n params = base.get_params(('timespan', 'timezone'), locals())\r\n\r\n return http.Request('GET', url, params), parsers.parse_json",
"def get_hours_per_unit_snap(now):\n print(\"/\"*50)\n print(\"GET hours_per_unit SNAP\")\n print(\"/\"*50)\n plant_settings = PlantSetting.objects.latest('timestamp')\n # print(\"SETINGS\",settings.timestamp)\n print(\"NOW: \", now)\n # preventing processing data before start of defined shift\n start, shift = get_shift_info(plant_settings, now)\n print(\"Start: \", start)\n print(\"Shift: \", shift)\n\n if start > now:\n print(\"NOT IN SHIFT\")\n return\n hours_per_unit_dict = main(start, now)\n hours_per_unit_dict['shift'] = shift\n\n return hours_per_unit_dict",
"def _get_floorplans(self, url):\n \n try:\n jdict = self._load_json(url)\n floorplans_groups = jdict['props']['homeDetails']['floorPlans']['floorPlanGroups']\n address_data = list(self._get_address(jdict))\n rental_data = []\n \n # different floorplans, e.g. studio, 1 bedroom 1 bathroom etc.\n for floorplans in floorplans_groups:\n plans = floorplans['plans']\n for section in plans:\n # this is the header \n section_data = self._get_section_data(section)\n rental_data.append(address_data+section_data+[url])\n units = section['units']\n # these are all the units under that header \n for unit in units:\n unit_data = self._get_section_data(unit)\n rental_data.append(address_data+unit_data+[url])\n return rental_data\n except:\n return None",
"def get_by_time(location_data, time: str) -> tracker.Report:\n datetime = time[:10] + \"T00:00:00Z\"\n LOGGER.warning(f\"Getting cases on date: {datetime}\")\n data = location_data.timelines\n try:\n return tracker.Report(\n confirmed=data.confirmed.timeline.get(datetime),\n deaths=data.deaths.timeline.get(datetime),\n # recovered=data.recovered.timeline.get(datetime, 0),\n )\n except ValidationError:\n return None",
"def plant_info(self, plant_id):\n response = self.session.get(self.get_url('newTwoPlantAPI.do'), params={\n 'op': 'getAllDeviceList',\n 'plantId': plant_id,\n 'pageNum': 1,\n 'pageSize': 1\n })\n\n data = json.loads(response.content.decode('utf-8'))\n return data",
"def get_punch(client, company_id, punch_id):\n response = client.read(ENDPOINT.format(company_id=company_id), punch_id)\n try:\n punch_id = response['data']['time_punch']\n return TimePunch(**response['data'])\n except KeyError:\n raise exceptions.EntityNotFoundError('Time Punch', punch_id)",
"def get_plants(self, pi_num):\n plants_container = []\n try:\n with self.connection.cursor() as cursor:\n\n sql = 'select plants.plant_id, plants.target_weight,\\\n plants_to_balance.balance_id, balances.address, \\\n gpio_pin, balances.pi_assigned, experiment_id from plants\\\n left join plants_to_balance using(plant_id)\\\n left join balances using(balance_id)\\\n left join watering_valves using(balance_id)\\\n where pi_assigned = {0} and end_date IS NULL or end_date > curdate()'.format(pi_num)\n\n cursor.execute(sql)\n result = cursor.fetchall()\n\n for row in result:\n # Assign details of a plant here\n tmp_plant = plant_data(row[\"plant_id\"], row[\"balance_id\"],\n row[\"address\"], row[\"gpio_pin\"],\n row[\"target_weight\"], row[\"experiment_id\"])\n plants_container.append(tmp_plant)\n\n except (pymysql.err.DatabaseError,\n pymysql.err.IntegrityError,\n pymysql.err.MySQLError) as exception:\n sys.stderr.write(exception)\n return 2\n\n finally:\n pass\n return plants_container",
"async def api_get_zone_time_block(v: Visitor, zone_id):\n zone_id = str(zone_id)\n\n url = f'https://app.parkmobile.io/api/proxy/parkmobileapi/zone/{zone_id}?'\n print('[DEBUG] api_get_zone_time_block:', url)\n\n data = await v.js_fetch('GET', url)\n zone = next(filter(lambda i: i.get('internalZoneCode') == zone_id, data['zones']))\n\n time_blocks = zone['parkInfo']['timeBlocks']\n tb_in_min = next(filter(lambda i: i.get('timeBlockUnit') == 'Minutes', time_blocks))\n\n time_block_id = tb_in_min['timeblockId']\n\n allow_min = tb_in_min['minimumValue']\n allow_max = tb_in_min['maximumValue']\n allow_interval = tb_in_min['incrementValue']\n minute_options: List[int] = list(range(allow_min, allow_max, allow_interval)) + [allow_max]\n\n return time_block_id, minute_options",
"def get_timespan(name):\n \n if name not in pytplot.data_quants.keys():\n print(\"That name is currently not in pytplot\") \n return\n\n return pytplot.data_quants[name].attrs['plot_options']['trange'][0], pytplot.data_quants[name].attrs['plot_options']['trange'][1]",
"def time_spent_lookup(self):\n time_search = input(\"Enter the duration to search for (minutes): \")\n return time_search",
"def _get_unit_records(self, start_time):\r\n\r\n if self.optMTTF.get_active():\r\n _query = \"SELECT t2.fld_unit, t1.fld_incident_id, \\\r\n t1.fld_age_at_incident, t1.fld_failure, \\\r\n t1.fld_suspension, t1.fld_cnd_nff, \\\r\n t1.fld_occ_fault, t1.fld_initial_installation, \\\r\n t1.fld_interval_censored, t2.fld_request_date, \\\r\n t2.fld_hardware_id \\\r\n FROM rtk_incident_detail AS t1 \\\r\n INNER JOIN \\\r\n ( \\\r\n SELECT DISTINCT MIN(fld_unit, fld_request_date), \\\r\n fld_incident_id, fld_request_date, \\\r\n fld_unit, fld_hardware_id \\\r\n FROM rtk_incident \\\r\n GROUP BY fld_unit \\\r\n ) AS t2 \\\r\n ON t2.fld_incident_id=t1.fld_incident_id \\\r\n WHERE t1.fld_age_at_incident >= {0:f} \\\r\n ORDER BY t2.fld_unit ASC, \\\r\n t1.fld_age_at_incident ASC, \\\r\n t2.fld_request_date ASC\".format(start_time)\r\n\r\n elif self.optMTBBD.get_active():\r\n _query = \"SELECT t2.fld_unit, t1.fld_incident_id, \\\r\n t1.fld_age_at_incident, t1.fld_failure, \\\r\n t1.fld_suspension, t1.fld_cnd_nff, \\\r\n t1.fld_occ_fault, t1.fld_initial_installation, \\\r\n t1.fld_interval_censored, t2.fld_request_date, \\\r\n t2.fld_hardware_id \\\r\n FROM rtk_incident_detail AS t1 \\\r\n INNER JOIN \\\r\n ( \\\r\n SELECT fld_incident_id, fld_request_date, fld_unit, \\\r\n fld_hardware_id \\\r\n FROM rtk_incident \\\r\n GROUP BY fld_unit, fld_request_date \\\r\n ) AS t2 \\\r\n ON t2.fld_incident_id=t1.fld_incident_id \\\r\n WHERE t1.fld_age_at_incident >= {0:f} \\\r\n GROUP BY t2.fld_unit, t1.fld_age_at_incident \\\r\n ORDER BY t2.fld_unit ASC, \\\r\n t1.fld_age_at_incident ASC, \\\r\n t2.fld_request_date ASC\".format(start_time)\r\n\r\n elif self.optMTBF.get_active():\r\n _query = \"SELECT t2.fld_unit, t1.fld_incident_id, \\\r\n t1.fld_age_at_incident, t1.fld_failure, \\\r\n t1.fld_suspension, t1.fld_cnd_nff, \\\r\n t1.fld_occ_fault, t1.fld_initial_installation, \\\r\n t1.fld_interval_censored, t2.fld_request_date, \\\r\n t2.fld_hardware_id \\\r\n FROM rtk_incident_detail AS t1 \\\r\n INNER JOIN rtk_incident AS t2 \\\r\n ON t2.fld_incident_id=t1.fld_incident_id \\\r\n WHERE t1.fld_age_at_incident >= {0:f} \\\r\n ORDER BY t2.fld_unit ASC, \\\r\n t1.fld_age_at_incident ASC, \\\r\n t2.fld_request_date ASC\".format(start_time)\r\n\r\n (_results, _error_code, __) = self._dao.execute(_query, commit=False)\r\n\r\n return(_results, _error_code)",
"def get_meter_data_for_time_slice(apt_no, start_time, end_time):\n if apt_no in ['102A', 102]:\n apt_no = '102A'\n\n logger.debug(\"sMap: Getting meter data for %s between %s and %s\", apt_no, start_time, end_time)\n\n query = (\"select data in ('\" + str(start_time) + \"','\" + str(end_time) + \"') \"\n \"limit 200000 \"\n \"where Metadata/LoadLocation/FlatNumber ='\" + str(apt_no) + \"' and \"\n \"Metadata/Extra/PhysicalParameter='Power'\")\n\n r = requests.post(url, data=query)\n # logger.debug (\"%s\",r)\n payload = r.json()\n # logger.debug(\"Payload:%s\", payload)\n\n if apt_no in ['102A', 102]:\n apt_no = 102\n meters = retrieve_meter_info(apt_no)\n logger.debug(\"Meters: %s\", meters)\n\n streams = []\n meter_type = []\n l_meters = range(0, len(meters))\n for i in l_meters:\n uuid = payload[i]['uuid']\n\n # Get meter type based on uuid\n for meter in meters:\n if meter['uuid'] == uuid:\n m_type = meter['type']\n # logger.debug (uuid, m_type)\n\n meter_type.append(m_type)\n streams.append(np.array(payload[i]['Readings']))\n # logger.debug(\"Streams: %s\", streams)\n\n if len(streams[0]) > 0:\n\n df = [pd.DataFrame({'time': readings[:, 0] / 1000, 'power': readings[:, 1],\n 'type': [meter_type[i]] * len(readings)},\n columns=['time', 'power', 'type']) for i, readings in enumerate(streams)]\n else:\n df = []\n\n return df",
"def get_timer_details(id):\n\twith postgres, postgres.cursor(cursor_factory=psycopg2.extras.RealDictCursor) as cur:\n\t\tcur.execute(\"select * from mustard.timers where id=%s\", (id,))\n\t\treturn cur.fetchone()",
"def generate_plant_report(self):\n print('PLANTS IN ' + self.name)\n for species, count in self.plants.items():\n print(f'{species}: {count}')",
"def get_timetable(self, robot_id):\n return self.get({'_id': robot_id})",
"def get_floor_plan(port_id):\n url = 'https://api.archisketch.com/v1/public/projects/'\n response = requests.get(url + port_id + '/detail')\n response = response.json()['project']\n floor_plan = response['floorplans'][0]\n return floor_plan",
"def __getitem__(self, datetime_time):\n for stage in self.stages:\n start = stage['dateTime'].time()\n end = (stage['dateTime'] + timedelta(seconds=stage['seconds'])).time()\n\n # check if the times are on the same day\n if start <= datetime_time < end:\n return stage['level']\n # if start is > end, the stage loops from one day to another\n # technically, this could be wrong if someone maintained the same sleep stage for over 24 hours\n # e.g. go to sleep at 9pm until 10pm the next day would be missed.\n elif start >= datetime_time < end and start > end:\n return stage['level']\n\n return None",
"def get_time_info(self):\n\n raise NotImplementedError",
"def get_created_plants(self):\n powerplant_ouessant = PowerPlant.objects.get(name='Ouessant Tidal Power Project')\n powerplant_ilarionas = PowerPlant.objects.get(name='Ilarionas')\n powerplant_tonstad = PowerPlant.objects.get(name='Tonstad')\n return [powerplant_ouessant, powerplant_ilarionas, powerplant_tonstad]",
"def _get_floorplan(self, unit_tag):\n unit = []\n for cell in unit_tag.find_all('td'):\n if cell.attrs:\n # scrape the apartment number \n if cell['data-tid'] == 'pdpfloorplan-displayText':\n floorplan_num = cell.get_text()\n unit.append(floorplan_num)\n # scrape the apartment price \n if cell['data-tid'] == 'pdpfloorplan-price':\n try:\n # remove any punctuation marks and $ sign\n fp_price = cell.get_text()\\\n .replace('$','')\\\n .replace(',','')\n pattern = r'[-+]?\\d*\\.\\d+|\\d+'\n price = re.findall(pattern, fp_price)[0]\n unit.append(float(price))\n except:\n unit.append(np.nan)\n # scrape the number of bedrooms and bathrooms \n if cell['data-tid'] == 'pdpfloorplan-bedbaths':\n try:\n bedbath_tag = cell.find_all('span')\n bed_tag, bath_tag = bedbath_tag[0], bedbath_tag[1]\n pattern = r'[-+]?\\d*\\.\\d+|\\d+'\n bed = re.findall(pattern, bed_tag.get_text())\n bath = re.findall(pattern, bath_tag.get_text())\n bed_fp, bath_fp = 0, 0\n if bed:\n bed_fp = bed[0]\n if bath:\n bath_fp = bath[0]\n unit.append(float(bed_fp))\n unit.append(float(bath_fp))\n except:\n unit.append(np.nan)\n unit.append(np.nan)\n # scrape the area of the apartment in square foot \n if cell['data-tid'] == 'pdpfloorplan-sqft':\n try:\n pattern = r'[-+]?\\d*\\.\\d+|\\d+'\n sqft_fp = re.findall(pattern, cell.get_text())[0]\n unit.append(float(sqft_fp))\n except:\n unit.append(np.nan)\n return unit",
"def generate_zonal_demand(self, t):\n t_hour = (t / 3600)\n t_15_min = convert_seconds_to_15_min(t)\n\n # this needs to be double checked\n if self.WARMUP_PHASE and t >= self.ANALYSIS_TIME_SECONDS:\n # print(\"changing demand rate from warm up to analysis\")\n # self.set_analysis_time(self.ANALYSIS_TIME_HOUR)\n self.WARMUP_PHASE = False\n # update drivers infor/expectations\n\n # generate demand only every 15 minutes\n if t % 900 == 0:\n for z in self.zones:\n z.generate_requests_to_time(t) # t is seconds",
"def get_dep_times(self,route,dt):\n \n day = dt.weekday()\n month = dt.month\n weekend = day > 4\n \n\n variations = self.dep_times[route]\n output = []\n for index, variation in enumerate(variations):\n pattern = variation['pattern']\n times=[]\n busIDs=[]\n for bus_number, pair in enumerate(variation['leave_times']):\n if self.runs_today(pair['schedule'],day):\n ts = pair['lt'].split(':')\n total = int(ts[0])*3600 + int(ts[1]) * 60 + int(ts[2])\n times.append(total)\n busIDs.append(bus_number)\n \n matrix = pd.DataFrame({'actualtime_arr_from':times})\n matrix['dayofweek']=day\n matrix['month'] = month\n matrix['weekend'] = weekend\n matrix['variation'] = index\n matrix['busIDs'] = busIDs\n matrix['routeid'] = route \n if matrix.shape[0] > 0:\n output.append({'pattern':pattern,'matrix':matrix})\n return output",
"def plants (plant_name, plant_type):\n print (f\"\\n{plant_name.title()} is a {plant_type}. \\n\")",
"def lookup_time_spent():\n while True:\n search_query = input('Show entries in which time spent '\n '(in minutes) is: ')\n if validate_lookup_time_spent_format(search_query):\n break\n print('** Please enter positive integer **')\n return Entry.select().where(Entry.time_spent == search_query)",
"def fetch_production(\n zone_key: str = \"JP-KY\",\n session: Optional[Session] = None,\n target_datetime: Optional[datetime] = None,\n logger: Logger = getLogger(__name__),\n) -> Union[dict, list]:\n if target_datetime:\n raise NotImplementedError(\"This parser is not yet able to parse past dates\")\n data = {\n \"zoneKey\": zone_key,\n \"datetime\": None,\n \"production\": {\n \"biomass\": 0,\n \"coal\": 0,\n \"gas\": 0,\n \"hydro\": None,\n \"nuclear\": None,\n \"oil\": 0,\n \"solar\": None,\n \"wind\": None,\n \"geothermal\": None,\n \"unknown\": 0,\n },\n \"storage\": {},\n \"source\": \"www.kyuden.co.jp\",\n }\n # url for consumption and solar\n url = \"https://www.kyuden.co.jp/td_power_usages/pc.html\"\n r = get(url)\n r.encoding = \"utf-8\"\n html = r.text\n soup = BeautifulSoup(html, \"lxml\")\n # get hours, minutes\n ts = soup.find(\"p\", class_=\"puProgressNow__time\").get_text()\n hours = int(re.findall(r\"[\\d]+(?=時)\", ts)[0])\n minutes = int(re.findall(r\"(?<=時)[\\d]+(?=分)\", ts)[0])\n # get date\n ds = soup.find(\"div\", class_=\"puChangeGraph\")\n date = re.findall(r\"(?<=chart/chart)[\\d]+(?=.gif)\", str(ds))[0]\n # parse datetime\n dt = f\"{date[:4]}-{date[4:6]}-{date[6:]} {hours:02d}:{minutes:02d}\"\n dt = arrow.get(dt).replace(tzinfo=\"Asia/Tokyo\").datetime\n data[\"datetime\"] = dt\n # consumption\n cons = soup.find(\"p\", class_=\"puProgressNow__useAmount\").get_text()\n cons = re.findall(\n r\"(?<=使用量\\xa0)[-+]?[.]?[\\d]+(?:,\\d\\d\\d)*[\\.]?\\d*(?:[eE][-+]?\\d+)?(?=万kW/)\",\n cons,\n )\n cons = cons[0].replace(\",\", \"\")\n # convert from 万kW to MW\n cons = float(cons) * 10\n # solar\n solar = soup.find(\"td\", class_=\"puProgressSun__num\").get_text()\n # convert from 万kW to MW\n solar = float(solar) * 10\n\n # add nuclear power plants\n # Sendai and Genkai\n url_s = \"\".join(\n [\n \"http://www.kyuden.co.jp/php/nuclear/sendai/rename.php?\",\n \"A=s_power.fdat&B=ncp_state.fdat&_=1520532401043\",\n ]\n )\n url_g = \"\".join(\n [\n \"http://www.kyuden.co.jp/php/nuclear/genkai/rename.php?\",\n \"A=g_power.fdat&B=ncp_state.fdat&_=1520532904073\",\n ]\n )\n sendai = get(url_s).text\n sendai = re.findall(\n r\"(?<=gouki=)[-+]?[.]?[\\d]+(?:,\\d\\d\\d)*[\\.]?\\d*\" + r\"(?:[eE][-+]?\\d+)?(?=&)\",\n sendai,\n )\n genkai = get(url_g).text\n genkai = re.findall(\n r\"(?<=gouki=)[-+]?[.]?[\\d]+(?:,\\d\\d\\d)*[\\.]?\\d*\" + r\"(?:[eE][-+]?\\d+)?(?=&)\",\n genkai,\n )\n nuclear = 0\n for sendai_i in sendai:\n nuclear += float(sendai_i)\n for genkai_i in genkai:\n nuclear += float(genkai_i)\n # convert from 万kW to MW\n nuclear = nuclear * 10\n\n # add the exchange JP-CG->JP-KY\n exch_list = occtonet.fetch_exchange(\"JP-KY\", \"JP-CG\")\n # find the nearest exchanges in time to consumption timestamp\n nearest_exchanges = sorted(exch_list, key=lambda exch: abs(exch[\"datetime\"] - dt))\n # take the nearest exchange\n exch = nearest_exchanges[0]\n # check that consumption and exchange timestamps are within a 15 minute window\n if abs(dt - exch[\"datetime\"]).seconds <= 900:\n\n generation = cons - exch[\"netFlow\"]\n data[\"production\"][\"solar\"] = solar\n data[\"production\"][\"nuclear\"] = nuclear\n data[\"production\"][\"unknown\"] = generation - nuclear - solar\n\n return data\n else:\n return []",
"def get_task(self, locations):\n if self.current_location == self.desk_location:\n self.task_location = locations[random.randint(0, len(locations)-1)]\n self.task_duration = random.randint(1, 10)\n else:\n self.task_location = self.desk_location\n self.task_duration = random.randint(50, 100)",
"def search_plant_info(name: str) -> Response:\n\n try:\n data = get_plant_info(name, 100)\n return jsonify(status_code=200, data=data)\n except Exception as e:\n return jsonify(\n message=f\"Internal Server Error: {type(e)}\",\n args=e.args,\n status_code=500,\n error_type=\"Internal Server Error\",\n )",
"def get_weather(self, time=None, location=None):\n req = requests.get(self.source_url)\n text = req.text\n moment = self.extract_datetime(text)\n met_data = self.parse_hms_data(text)\n met_data['time'] = moment\n met_data['text'] = text\n return self.source_label, met_data",
"def get_standin_for(userid):",
"async def get_detailed_stations(response: Response,\n toi: datetime = None,\n source: StationSourceEnum = StationSourceEnum.WILDFIRE_ONE,\n __=Depends(audit),\n _=Depends(authentication_required)):\n try:\n logger.info('/stations/details/')\n response.headers[\"Cache-Control\"] = no_cache\n if toi is None:\n # NOTE: Don't be tempted to move this into the function definition. It's not possible\n # to mock a function if it's part of the function definition, and will cause\n # tests to fail.\n toi = get_utc_now()\n else:\n toi = get_hour_20(toi)\n weather_stations = await fetch_detailed_stations_as_geojson(toi, source)\n return DetailedWeatherStationsResponse(features=weather_stations)\n\n except Exception as exception:\n logger.critical(exception, exc_info=True)\n raise"
] |
[
"0.6048593",
"0.5318801",
"0.5122391",
"0.50630397",
"0.49727464",
"0.4930676",
"0.4928082",
"0.48587734",
"0.48452678",
"0.48042193",
"0.47776973",
"0.47688463",
"0.46948302",
"0.46917686",
"0.4672459",
"0.46535444",
"0.46435475",
"0.46115947",
"0.45913598",
"0.45773146",
"0.4546723",
"0.45351455",
"0.4510971",
"0.45023674",
"0.4499432",
"0.44841078",
"0.44591334",
"0.44309542",
"0.44165477",
"0.44078302"
] |
0.74597615
|
0
|
Get inverter data for specified date or today.
|
def inverter_data(self, inverter_id, date):
if date is None:
date = datetime.date.today()
date_str = date.strftime('%Y-%m-%d')
response = self.session.get(self.get_url('newInverterAPI.do'), params={
'op': 'getInverterData',
'id': inverter_id,
'type': 1,
'date': date_str
})
data = json.loads(response.content.decode('utf-8'))
return data
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def get_data_date(self, date):\n data = {}\n for stock in self.stocks:\n data[stock] = self.stock_data[stock].to_stock_dataframe_day(date)\n return data",
"def get_price_data(ticker, days_befoure):\r\n #config_file=raw_input('config file: ')\r\n config_file=\"d:/tmp/moex.json\" \r\n try:\r\n with open(config_file) as config_file: \r\n conn_data = json.load(config_file)\r\n except:\r\n print \"Error: Unable to read config file. \"\r\n sys.exit(1)\r\n\r\n username = conn_data['username']\r\n password = conn_data['password']\r\n my_config = Config(user=username, password=password, proxy_url='')\r\n\r\n my_auth = MicexAuth(my_config)\r\n date = datetime.datetime.now() - datetime.timedelta(days_befoure)\r\n \r\n #ticker = 'SBER' # for tesing...\r\n \r\n if my_auth.is_real_time():\r\n iss = MicexISSClient(my_config, my_auth, MyDataHandler, MyData)\r\n iss.get_history_securities('stock',\r\n 'shares',\r\n 'tqbr',\r\n ticker, \r\n date.strftime(\"%Y-%m-%d\")\r\n #here to be start end dates\r\n )\r\n #print iss.handler.data.history\r\n return iss.handler.data.as_dataframe()",
"def to_stock_data_day(self, date):\n if type(date) is not datetime.datetime and type(date) is not pd.tslib.Timestamp:\n date = datetime.datetime.strptime(date, \"%Y-%m-%d\")\n\n dataframes = [i for i in dir(self) if not callable(getattr(self, i)) and not i.startswith(\"__\")\n and type(getattr(self, i)) is pd.DataFrame]\n dictionaries = [i for i in dir(self) if not callable(getattr(self, i)) and not i.startswith(\"__\")\n and type(getattr(self, i)) is dict]\n constant_values = [i for i in dir(self) if not callable(getattr(self, i)) and not i.startswith(\"__\")\n and getattr(self, i) is not None and i not in dataframes and i not in dictionaries]\n new_stock_data = StockData()\n\n for i in dataframes + dictionaries:\n setattr(new_stock_data, i, getattr(self, i)[date])\n\n for i in constant_values:\n setattr(new_stock_data, i, getattr(self, i))\n\n new_stock_data.dates = [date]\n new_stock_data.str_dates = [str(date)[:USEFUL_TIMESTAMP_CHARS]]\n\n return new_stock_data",
"def get_exchange_rate_data(self, source_currency, exchanged_currency, valuation_date):\n raise NotImplementedError",
"def historical(self, date, base='USD'):\n try:\n resp = self.client.get(self.ENDPOINT_HISTORICAL %\n date.strftime(\"%Y-%m-%d\"),\n params={'base': base})\n resp.raise_for_status()\n except requests.exceptions.RequestException as e:\n raise OpenExchangeRatesClientException(e)\n return resp.json(parse_int=decimal.Decimal,\n parse_float=decimal.Decimal)",
"def fetchEquityDataForSingleDay(on_date, useCache=False):\n return fetchEquityData(on_date, on_date, useCache)",
"def fetch_sundata(self, date: datetime) -> Sundata:\n pass",
"def current_daily_data(self):\n return self._current_daily_data",
"def get(self, as_of_date: str = None):\n if not as_of_date:\n as_of_date = (datetime.now() - timedelta(days=1)).strftime(\"%Y-%m-%d\")\n\n ExchangeRates.GET_EXCHANGE_RATES = ExchangeRates.GET_EXCHANGE_RATES.format(as_of_date, '{0}')\n return self._query_get_all('ExchangeRate', ExchangeRates.GET_EXCHANGE_RATES)",
"def get_data(end_date, n, local, foreign):\n URL = \"https://api.exchangeratesapi.io/history\"\n PARAMS = {'start_at': str(get_weekday_n_days_ago(end_date, n)),\n 'end_at': str(end_date),\n 'symbols': foreign,\n 'base': local}\n r = requests.get(url=URL, params=PARAMS)\n data = r.json()\n input_data = []\n for day in data['rates']:\n input_data.append([datetime.strptime(day, '%Y-%m-%d').date(),\n float(\"{:.8f}\".format(data['rates'][day][foreign]))])\n input_data.sort(key=lambda x: x[0])\n return input_data[-n:]",
"def get_rates_for(currency: str, date: str):\n baseurl = f\"https://openexchangerates.org/api/historical/{date}.json\"\n params = {\"app_id\": OEG_APP_ID, \"symbols\": currency, \"base\": \"USD\"}\n return make_request(baseurl=baseurl, params=params)",
"def get_currency_exchange_daily(self, from_symbol, to_symbol, outputsize='compact'):\n _FUNCTION_KEY = 'FX_DAILY'\n return _FUNCTION_KEY, \"Time Series FX (Daily)\", 'Meta Data'",
"def get_inflation() -> pd.DataFrame:\n url = f\"https://www.alphavantage.co/query?function=INFLATION&apikey={cfg.API_KEY_ALPHAVANTAGE}\"\n r = requests.get(url, headers={\"User-Agent\": get_user_agent()})\n if r.status_code != 200:\n return pd.DataFrame()\n data = pd.DataFrame(r.json()[\"data\"])\n data[\"date\"] = pd.to_datetime(data[\"date\"])\n data[\"Inflation\"] = data[\"value\"].astype(float)\n data = data.drop(columns=[\"value\"])\n\n return data",
"def stock_data(ticker, start,today=date.today()):\n df= web.DataReader(ticker,'yahoo',start,today)\n return df",
"def get_daily_historic_data(self, ticker, start_date, end_date):\n av_url = self._construct_alpha_vantage_symbol_call(ticker)\n\n try:\n av_data_js = requests.get(av_url)\n data = json.loads(av_data_js.text)['Time Series (Daily)']\n except Exception as e:\n print(\n \"Could not download AlphaVantage data for %s ticker \"\n \"(%s)...stopping.\" % (ticker, e)\n )\n return pd.DataFrame(columns=COLUMNS).set_index('Date')\n else:\n prices = []\n for date_str in sorted(data.keys()):\n date = dt.strptime(date_str, '%Y-%m-%d')\n if date < start_date or date > end_date:\n continue\n\n bar = data[date_str]\n prices.append(\n (\n date, \n float(bar['1. open']),\n float(bar['2. high']),\n float(bar['3. low']),\n float(bar['4. close']),\n int(bar['6. volume']),\n float(bar['5. adjusted close'])\n )\n )\n price_df = pd.DataFrame(prices, columns=COLUMNS).set_index('Date').sort_index()\n self._correct_back_adjusted_prices(price_df)\n return price_df",
"def to_stock_dataframe_day(self, date):\n if type(date) is not datetime.datetime and type(date) is not pd.tslib.Timestamp:\n date = datetime.datetime.strptime(date, \"%Y-%m-%d\")\n\n class_data = [i for i in dir(self) if not callable(getattr(self, i)) and\n not i.startswith(\"__\") and type(getattr(self, i)) is pd.DataFrame]\n df = pd.DataFrame()\n for i in class_data:\n df = join_features(df, getattr(self, i), fill_method=FillMethod.FUTURE_KROGH)\n return df.ix[date, :]",
"def inverter_detail(self, inverter_id):\n response = self.session.get(self.get_url('newInverterAPI.do'), params={\n 'op': 'getInverterDetailData',\n 'inverterId': inverter_id\n })\n\n data = json.loads(response.content.decode('utf-8'))\n return data",
"def get_historic_data(end_date = datetime.now(), \n start_date = datetime.now() + timedelta(-365),\n ticker=[],\n close_only=True):\n #checks if the parameters provided through \"ticker\" is not an empty list\n #if it is, the function won't go forward after this point. returns explanatory message.\n if ticker == []:\n return \"Empty list of tickers\"\n \n #if a string is provided as \"ticker\" parameter, then it splits the string by \n #spaces and store the outcome in a list.\n elif type(ticker) is str:\n ticker = ticker.split(\" \")\n \n iex_token = os.getenv(\"IEX_TOKEN\")#not necessary anymore.\n if type(iex_token) == str: print(\"IEX Key found successfully ...getting data\")\n else: return \"Error: IEX Key NOT found\"\n \n \n #Gets historical data with the parameters provided.\n #Gets only \"close\" and \"volume\" value for efficiency.\n prices = get_historical_data(ticker, start_date, end_date,\n output_format='pandas', \n token=iex_token, \n close_only=close_only\n )\n \n #If only one ticker is provided, then it adds another indexing level to the column\n #with the ticker. This is done for two reasons: 1) To visualize the ticker downloaded \n #as a confirmation that I am working with correct data. 2) To mimic the format of the\n #dataframe obtained when getting 2 or more tickers data (2-level column indexing).\n if len(ticker) == 1:\n new_columns = pd.MultiIndex.from_product([ [ticker[0]],prices.columns ] )\n prices.columns = new_columns\n \n return prices",
"def collect_data_date(self, date=None):\n if date is None:\n date = self.date\n # TODO make it so it doenst re-collect all data and just adds historical's data\n self.collect_all_stock_data()",
"def get_data_extended(self, inception_date, interval):\n instrument = self.instrumentLookup()\n from_date = dt.datetime.strptime(inception_date, \"%Y-%m-%d\")\n to_date = dt.date.today()\n data = pd.DataFrame(columns=[\"date\", \"open\", \"high\", \"low\", \"close\", \"volume\"])\n while True:\n if from_date.date() >= (dt.date.today() - dt.timedelta(100)):\n data = data.append(\n pd.DataFrame(\n self.kite.historical_data(\n instrument, from_date, dt.date.today(), interval\n )\n ),\n ignore_index=True,\n )\n break\n else:\n to_date = from_date + dt.timedelta(100)\n data = data.append(\n pd.DataFrame(\n self.kite.historical_data(\n instrument, from_date, to_date, interval\n )\n ),\n ignore_index=True,\n )\n from_date = to_date\n data.set_index(\"date\", inplace=True)\n self.data_df = data",
"def _get_converted_data(self):\n pass",
"def lookup_daily(self, **kwargs):\n return self.lookup(period=self.PERIOD_DAILY, **kwargs)",
"def get_data_from_exchange(self, now):\n currency_options = dict(\n currency_pair='USD',\n bid={12.00 : {'guy_1' : 100.00}},\n ask={14.00 : {'guy_2' : 200.00}},\n time=datetime.datetime.now()\n )\n currency_pair_state = CurrencyPairState(**currency_options)\n return [currency_pair_state]",
"def find_price_inverter(inverter_model):\n\n if inverter_model in INVERTERS:\n return INVERTERS[inverter_model]\n else:\n raise 'The inverter does not in database'",
"def get_exchange_rate_data(self, source_currency, exchanged_currency, valuation_date, provider=None, *args, **kwargs):\n raise NotImplementedError",
"def get_daily(self, from_currency, to_currency, limit = DEFAULT_LIMIT, aggregate = DEFAULT_AGGREGATE):\n return self.__get(HISTORY_DAY, from_currency, to_currency, limit, aggregate)",
"def _check_day_data(self, datetime):\n if self.curr_day_data is None or self.compare_dates(self.curr_day_data.index[0], datetime) is False:\n date = dt.datetime(year=datetime.year, month=datetime.month, day=datetime.day)\n symbols = [product.symbol for product in self.products]\n self.curr_day_data = get_data_multi(symbols, date, second_bars=self.second_bars)\n self.clear_resting_orders()",
"def find_by_date():\n\n input_date = request.args.get('date')\n \n user_id = session['user']\n user_inv = (UserInv.query.filter_by(user_id=user_id)).all()\n\n inv_by_date = []\n\n for item in user_inv: \n if str(item.inv.date_of_investment) == input_date:\n inv_by_date.append({\"company\": item.inv.company_name, \n \"quantity\": item.inv.quantity, \n \"cost\": item.inv.cost})\n print inv_by_date\n\n return jsonify(inv_by_date)",
"def get_daily_data(report, yyyymmdd):\n filename = _get_data_filename(report, yyyymmdd)\n if not os.path.exists(filename):\n return None\n else:\n with open(filename) as f:\n return cPickle.load(f)",
"def get_invoices(self, since, until):\n return self._request('getInvoices', data={\n 'date_from': since.strftime('%d/%m/%Y'),\n 'date_to': until.strftime('%d/%m/%Y')\n })"
] |
[
"0.59639776",
"0.5859418",
"0.57783407",
"0.56881666",
"0.5599386",
"0.5589904",
"0.55563724",
"0.55145603",
"0.55125386",
"0.54645395",
"0.53165585",
"0.53057396",
"0.5285592",
"0.5245948",
"0.5234474",
"0.5232715",
"0.519923",
"0.51930124",
"0.5186436",
"0.5179437",
"0.51698214",
"0.51550657",
"0.5141812",
"0.5136369",
"0.51320934",
"0.51250845",
"0.5108458",
"0.5105973",
"0.5041637",
"0.50377446"
] |
0.7362004
|
0
|
Use device_list, it's more descriptive since the list contains more than inverters.
|
def inverter_list(self, plant_id):
warnings.warn("This function may be deprecated in the future because naming is not correct, use device_list instead", DeprecationWarning)
return self.device_list(plant_id)
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def update_device_list(self, device_list):\n self.device_list = device_list\n\n self.device_combo.clear()\n\n if not device_list:\n return\n\n self.device_combo.addItem(\"\")\n\n active_entry = None\n\n for dev in device_list:\n\n action_string = \"{model:<18} - {contype:<7} - {serial}\".format(model=dev.model,\n contype=dev.device_type,\n serial=dev.serial)\n if dev.serial == self.serial:\n active_entry = action_string\n self.device_combo.addItem(action_string)\n\n if active_entry is not None:\n self.device_combo.setCurrentText(active_entry)",
"def __build_device_list(self):\r\n if self.__lib.TLI_BuildDeviceList() != 0:\r\n raise FailedException(\"build the device list.\")",
"def list_devices(self):\n return [x for x in self.devices.keys()]",
"def _get_device_list(self):\n if self.app.config.cloud_type == 'ec2':\n # c5/m5 on AWS mounts EBS volumes as NVMe:\n # http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/nvme-ebs-volumes.html\n # http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/device_naming.html\n for itype in ['c5', 'm5']:\n if itype in self.app.cloud_interface.get_type():\n return frozenset(glob('/dev/nvme[0-26]n1'))\n return frozenset(glob('/dev/*d[a-z]'))",
"def testBuildDeviceList(self):\n\n self.inv._devices = {\n 'first': self.Device(),\n 'second': self.Device(),\n 'third': self.Device()\n }\n self.inv._CmdFilter('targets', ['^f.*,second,^t.ird'])\n self.inv._CmdFilter('xtargets', [''])\n self.inv._device_list = None\n self.assertEqual(set(['first', 'second', 'third']),\n set(self.inv.device_list))\n\n self.inv._CmdFilter('targets', ['^f.*'])\n self.inv._device_list = None\n self.assertEqual(['first'], self.inv.device_list)",
"def test_get_pci_device_list(self):\n pass",
"def devicelist(self):\n if (self.symbol.type == self.scanner.KEYWORD and\n self.symbol.id == self.scanner.DEVICES_ID):\n self.symbol = self.scanner.get_symbol()\n if (self.symbol.type == self.scanner.LEFT_CURLY):\n self.symbol = self.scanner.get_symbol()\n self.device()\n while (self.symbol.type == self.scanner.NAME):\n self.device()\n # Check right curly bracket ends device block\n if (self.symbol.type == self.scanner.RIGHT_CURLY):\n self.symbol = self.scanner.get_symbol()\n else:\n if (self.symbol.type == self.scanner.KEYWORD and\n self.symbol.id == self.scanner.CONNECT_ID):\n # Error Type: missing '}'\n # Stopping Symbols: 'CONNECT', 'MONITOR' or 'END'\n self.error(self.MISSING_RIGHT_CURLY,\n [self.scanner.KEYWORD],\n [self.scanner.CONNECT_ID,\n self.scanner.MONITOR_ID,\n self.scanner.END_ID])\n else:\n # Bad name terminated devices incorrectly\n # Error type: Invalid name\n # Stopping Symbols: 'CONNECT', 'MONITOR' or 'END'\n self.error(self.DEVICE_NAME, [self.scanner.KEYWORD],\n [self.scanner.CONNECT_ID,\n self.scanner.MONITOR_ID,\n self.scanner.END_ID])\n else:\n # Error: Left curly needed after 'DEVICE'\n # Stopping Symbols: 'CONNECT', 'MONITOR' or 'END' KEYWORD\n self.error(self.NO_CURLY_DEVICE, [self.scanner.KEYWORD],\n [self.scanner.CONNECT_ID, self.scanner.MONITOR_ID,\n self.scanner.END_ID])\n else:\n # Error: 'DEVICE' keyword required\n # Stopping Symbols: 'CONNECT', 'MONITOR' or 'END' KEYWORD\n self.error(self.NEED_DEVICE_KEYWORD, [self.scanner.KEYWORD],\n [self.scanner.CONNECT_ID, self.scanner.MONITOR_ID,\n self.scanner.END_ID])",
"def vendor_list():\n return ['nxos', 'eos', 'cumulus']",
"def serial_ports():\r\n return list(map(lambda listportinfo: listportinfo.device, list_ports.comports()))",
"def getDeviceList(self):\r\n\r\n self._logger.debug(\"In getDeviceList()...\")\r\n\r\n # update the security token if needed \r\n if self._checkToken():\r\n\r\n response = self._callAPI(_API_GET_DEVICE_LIST, useSession=True)\r\n\r\n if response is not None:\r\n\r\n deviceInfo = response.json()\r\n \r\n if response.status_code == 200 and \"items\" in deviceInfo:\r\n\r\n deviceList = []\r\n\r\n for dev in deviceInfo[\"items\"]:\r\n\r\n # pull out common attributes\r\n deviceID = dev[\"serial_number\"]\r\n deviceType = dev[\"device_family\"]\r\n description = dev.get(\"name\", deviceType + \" \" + deviceID[-4:])\r\n\r\n # uncomment the next line to inspect the devices returned from the MyQ service\r\n self._logger.debug(\"Device Found - Device ID: %s, Device Type: %s, Description: %s\", deviceID, deviceType, description)\r\n\r\n # add device to the list with properties based on type\r\n if deviceType == API_DEVICE_TYPE_GATEWAY:\r\n\r\n # get gateway attributes\r\n online = dev[\"state\"][\"online\"]\r\n lastUpdated = dev[\"state\"][\"last_status\"]\r\n\r\n # add gateway device to list\r\n deviceList.append({\r\n \"type\": deviceType,\r\n \"id\": deviceID,\r\n \"description\": description,\r\n \"online\": online,\r\n \"last_updated\": lastUpdated\r\n })\r\n\r\n elif deviceType == API_DEVICE_TYPE_OPENER:\r\n \r\n # get the door attributes\r\n parentID = dev[\"parent_device_id\"] \r\n state = dev[\"state\"][\"door_state\"]\r\n lastChanged = dev[\"state\"][\"last_update\"]\r\n lastUpdated = dev[\"state\"][\"last_status\"]\r\n\r\n # add garage door opener device to list\r\n deviceList.append({\r\n \"type\": deviceType,\r\n \"id\": deviceID,\r\n \"parent_id\": parentID,\r\n \"description\": description,\r\n \"state\": state,\r\n \"last_changed\": lastChanged,\r\n \"last_updated\": lastUpdated\r\n })\r\n \r\n elif deviceType == API_DEVICE_TYPE_LAMP:\r\n\r\n # get the lamp attributes\r\n parentID = dev[\"parent_device_id\"] \r\n state = dev[\"state\"][\"lamp_state\"] \r\n lastChanged = dev[\"state\"][\"last_update\"]\r\n lastUpdated = dev[\"state\"][\"last_status\"]\r\n\r\n # add lamp device to list\r\n deviceList.append({\r\n \"type\": deviceType,\r\n \"id\": deviceID,\r\n \"parent_id\": parentID,\r\n \"description\": description,\r\n \"state\": state,\r\n \"last_changed\": lastChanged,\r\n \"last_updated\": lastUpdated\r\n })\r\n \r\n return deviceList\r\n \r\n elif response.status_code == 401:\r\n \r\n self._logger.error(\"There was an authentication error with the MyQ account: %s\", _parseResponseMsg(response))\r\n return None\r\n\r\n else:\r\n \r\n self._logger.error(\"Error retrieving device list: %s\", _parseResponseMsg(response))\r\n return None\r\n\r\n else:\r\n # Error logged in _callAPI function\r\n return None\r\n\r\n else:\r\n # Check token failed - wait and see if next call successful\r\n return None",
"def listDevices(self):\n count = 0\n for device in self:\n count += 1\n printLog(\"Device \" + str(count) + \": '%s %s (%s, %s, %s)'\" % (\n device.make, device.model, device.deviceId, device.androidVersion, device.operator))\n if device.idle:\n printLog(\"[Idle]\")\n else:\n printLog(\"[Busy]\")",
"def device_list():\n click.echo(\"\\nRetrieving the devices.\")\n\n url = base_url + \"/device\"\n\n response = requests.get(url=url, headers=header,verify=False)\n if response.status_code == 200:\n items = response.json()['data']\n else:\n print(\"Failed to get list of devices \" + str(response.text))\n exit()\n\n headers = [\"Host-Name\", \"Device Type\", \"Device ID\", \"System IP\", \"Site ID\", \"Version\", \"Device Model\"]\n table = list()\n\n for item in items:\n tr = [item.get('host-name'), item.get('device-type'), item.get('uuid'), item.get('system-ip'), item.get('site-id'), item.get('version'), item.get('device-model')]\n table.append(tr)\n try:\n click.echo(tabulate.tabulate(table, headers, tablefmt=\"fancy_grid\"))\n except UnicodeEncodeError:\n click.echo(tabulate.tabulate(table, headers, tablefmt=\"grid\"))",
"def getDevices(i):\n devices = Account['KTFLR'].devices('monpressprod')\n device = devices[i]\n return device",
"def do_list(self, _):\n devices = []\n for source in self._target.devices:\n devices.append({\n 'name': source.device['name'],\n 'path': source.device['path'],\n })\n return devices",
"def __http_update_device_list(self):\n\n # Make sure we are (still) logged in\n self.__login_if_required()\n\n # Fetch all devices from Govee\n req = {\n 'key': '',\n 'transaction': self.__current_milli_time(),\n 'view': 0\n }\n res = self.__http_post(req, '/device/rest/devices/v1/list')\n\n # Response:\n \"\"\"\n {\n \"devices\": [\n {\n \"device\": \"AA:BB:CC:DD:EE:FF:11:22\",\n \"deviceExt\": {\n \"deviceSettings\": \"{\\\"wifiName\\\":\\\"MyWifi\\\",\\\"address\\\":\\\"CC:DD:EE:FF:11:22\\\",\\\"bleName\\\":\\\"ihoment_H6159_XXXX\\\",\\\"topic\\\":\\\"GD/123467890123467890123467890\\\",\\\"sku\\\":\\\"H6159\\\",\\\"device\\\":\\\"AA:BB:CC:DD:EE:FF:11:22\\\",\\\"deviceName\\\":\\\"Kitchen light\\\",\\\"versionHard\\\":\\\"1.00.01\\\",\\\"versionSoft\\\":\\\"1.02.14\\\"}\",\n \"extResources\": \"{\\\"skuUrl\\\":\\\"\\\",\\\"headOnImg\\\":\\\"\\\",\\\"headOffImg\\\":\\\"\\\",\\\"ext\\\":\\\"\\\"}\",\n \"lastDeviceData\": \"{\\\"online\\\":false}\"\n },\n \"deviceName\": \"Kitchen light\",\n \"goodsType\": 0,\n \"sku\": \"H6159\",\n \"versionHard\": \"1.00.01\",\n \"versionSoft\": \"1.02.14\"\n },\n {\n \"device\": \"A2:B2:C3:D4:E5:F6:77:88\",\n \"deviceExt\": {\n \"deviceSettings\": \"{\\\"wifiName\\\":\\\"MyWifi\\\",\\\"address\\\":\\\"C3:D4:E5:F6:77:88\\\",\\\"bleName\\\":\\\"ihoment_H6163_YYYY\\\",\\\"topic\\\":\\\"GD/123467890123467890123467890\\\",\\\"sku\\\":\\\"H6163\\\",\\\"device\\\":\\\"A2:B2:C3:D4:E5:F6:77:88\\\",\\\"deviceName\\\":\\\"Living room\\\",\\\"versionHard\\\":\\\"1.00.01\\\",\\\"versionSoft\\\":\\\"1.02.14\\\"}\",\n \"extResources\": \"{\\\"skuUrl\\\":\\\"\\\",\\\"headOnImg\\\":\\\"\\\",\\\"headOffImg\\\":\\\"\\\",\\\"ext\\\":\\\"\\\"}\",\n \"lastDeviceData\": \"{\\\"online\\\":false}\"\n },\n \"deviceName\": \"Living room\",\n \"goodsType\": 0,\n \"sku\": \"H6163\",\n \"versionHard\": \"1.00.01\",\n \"versionSoft\": \"1.02.14\"\n }\n ],\n \"message\": \"\",\n \"status\": 200\n }\n \"\"\"\n\n # Check response status\n if res['status'] != 200:\n raise GoveeException('Govee answered with device list status {}'.format(res['status'])) \n\n for raw_device in res['devices']:\n identifier = raw_device['device']\n sku = raw_device['sku']\n if not identifier or not sku:\n continue\n name = raw_device['deviceName']\n device_settings = json.loads(raw_device['deviceExt']['deviceSettings'])\n device_settings_keys = device_settings.keys()\n if not 'address' in device_settings_keys and not 'topic' in device_settings_keys:\n continue\n topic = device_settings['topic']\n\n if identifier in self.__devices.keys():\n device = self.__devices[identifier]\n device._name = name\n else:\n device_factory = self.__get_device_factory(sku)\n if not device_factory:\n continue\n last_device_data = json.loads(raw_device['deviceExt']['lastDeviceData'])\n if 'online' in last_device_data.keys():\n if last_device_data['online']:\n iot_connected = dev.IotConnectionStatus.ONLINE\n else:\n iot_connected = dev.IotConnectionStatus.OFFLINE\n elif not 'wifiName' in device_settings:\n iot_connected = dev.IotConnectionStatus.NO_IOT\n else:\n iot_connected = dev.IotConnectionStatus.UNKNOWN\n device = device_factory.build(self, identifier, topic, sku, name, iot_connected)\n if device:\n self.__devices[identifier] = device\n self.on_new_device(self, device, raw_device)",
"def list_devices():\r\n DeviceManagerCLI.BuildDeviceList()\r\n return DeviceManagerCLI.GetDeviceList()",
"def device_adcs(self):\n return [\"SIS 3302\", \"SIS 3305\"]",
"def list_devices(cls):\n # get all matching devices\n return usb.core.find(\n find_all=True,\n custom_match=lambda dev: (\n dev.idVendor == cls.vendor_id and dev.idProduct in cls.product_ids\n ),\n )",
"def lv_devices(self):\n devs = set()\n return devs",
"def device_list(mmw):\n\n _device_list = list(mmw.device_and_parameter_info_dict.values())\n assert _device_list, \"Device list is empty\"\n\n for device in _device_list:\n device_address = device['info'].device_address\n device['object_dictionary'] = sod.ObjectDictionary(mmw, device_address)\n device['state_control'] = sst.StateControl(mmw, device_address)\n\n # Get the hardware description data from each node too.\n try: \n hardware_description_data = mmw.get_device_file(device_address, '.hardware_description')\n hardware_description = json.loads(hardware_description_data)\n device['hardware_description'] = hardware_description\n except Exception as e:\n logging.warning(\"Error retrieving .hardware_description: {}\".format(e))\n # If this fails, just ignore it and make the data empty.\n device['hardware_description'] = {}\n\n return _device_list",
"def device_info(node):\n\n if \"cpu\" in node and \"total_mbufs\" in node[\"cpu\"]:\n total_mbufs = node[\"cpu\"][\"total_mbufs\"]\n if total_mbufs != 0:\n print(\"Total Number of Buffers: {}\".format(total_mbufs))\n\n vpp = VppPCIUtil(node)\n vpp.get_all_devices()\n linkup_devs = vpp.get_link_up_devices()\n if len(linkup_devs):\n print(\"\\nDevices with link up (can not be used with VPP):\")\n vpp.show_vpp_devices(linkup_devs, show_header=False)\n # for dev in linkup_devs:\n # print (\" \" + dev)\n kernel_devs = vpp.get_kernel_devices()\n if len(kernel_devs):\n print(\"\\nDevices bound to kernel drivers:\")\n vpp.show_vpp_devices(kernel_devs, show_header=False)\n else:\n print(\"\\nNo devices bound to kernel drivers\")\n\n dpdk_devs = vpp.get_dpdk_devices()\n if len(dpdk_devs):\n print(\"\\nDevices bound to DPDK drivers:\")\n vpp.show_vpp_devices(dpdk_devs, show_interfaces=True, show_header=False)\n else:\n print(\"\\nNo devices bound to DPDK drivers\")\n\n other_devs = vpp.get_other_devices()\n if len(other_devs):\n print(\"\\nDevices not bound to Kernel or DPDK drivers:\")\n vpp.show_vpp_devices(other_devs, show_interfaces=True, show_header=False)\n else:\n print(\"\\nNo devices not bound to Kernel or DPDK drivers\")\n\n vpputl = VPPUtil()\n interfaces = vpputl.get_hardware(node)\n if interfaces == {}:\n return\n\n print(\"\\nDevices in use by VPP:\")\n\n if len(interfaces.items()) < 2:\n print(\"None\")\n return\n\n print(\n \"{:30} {:4} {:4} {:7} {:4} {:7}\".format(\n \"Name\", \"Numa\", \"RXQs\", \"RXDescs\", \"TXQs\", \"TXDescs\"\n )\n )\n for intf in sorted(interfaces.items()):\n name = intf[0]\n value = intf[1]\n if name == \"local0\":\n continue\n numa = rx_qs = rx_ds = tx_qs = tx_ds = \"\"\n if \"numa\" in value:\n numa = int(value[\"numa\"])\n if \"rx queues\" in value:\n rx_qs = int(value[\"rx queues\"])\n if \"rx descs\" in value:\n rx_ds = int(value[\"rx descs\"])\n if \"tx queues\" in value:\n tx_qs = int(value[\"tx queues\"])\n if \"tx descs\" in value:\n tx_ds = int(value[\"tx descs\"])\n\n print(\n \"{:30} {:>4} {:>4} {:>7} {:>4} {:>7}\".format(\n name, numa, rx_qs, rx_ds, tx_qs, tx_ds\n )\n )",
"def __set_port_list(self):\n\n self._coms = [str(i.device) for i in sorted(self.ports)]",
"def list_devices():\n return _lib.SeaTeaseAPI().list_devices()",
"def list_devices(cls, urlstr: str,\n vdict: Dict[str, int],\n pdict: Dict[int, Dict[str, int]],\n default_vendor: int) -> \\\n List[Tuple[UsbDeviceDescriptor, int]]:\n urlparts = urlsplit(urlstr)\n if not urlparts.path:\n raise UsbToolsError('URL string is missing device port')\n candidates, _ = cls.enumerate_candidates(urlparts, vdict, pdict,\n default_vendor)\n return candidates",
"def get_devices(self):\n devices = self.get(\"event/device\")",
"def ret_device_list():\n token = get_auth_token() # Get Token\n url = \"https://sandboxdnac.cisco.com/api/v1/network-device\"\n hdr = {'x-auth-token': token, 'content-type' : 'application/json'}\n resp = requests.get(url, headers=hdr) # Make the Get Request\n device_list = resp.json()\n return device_list",
"def devices_from_entities(hass, entry):\n device_client = hass.data[DOMAIN][entry.entry_id][DATA_DEVICE_REGISTER]\n devices = []\n for i in range(16):\n device_port = f\"{i:01x}\"\n device = SW16Switch(device_port, entry.entry_id, device_client)\n devices.append(device)\n return devices",
"def test_get_devices(self):\n pass",
"def test_get_devices(self):\n pass",
"def scan_chip_ble_devices(devCtrl):\n devices = []\n bleMgr = BleManager(devCtrl)\n bleMgr.scan(\"-t 10\")\n\n for device in bleMgr.peripheral_list:\n devIdInfo = bleMgr.get_peripheral_devIdInfo(device)\n if devIdInfo:\n devInfo = devIdInfo.__dict__\n devInfo[\"name\"] = device.Name\n devices.append(devInfo)\n\n return devices"
] |
[
"0.64911735",
"0.6443062",
"0.6252887",
"0.62276167",
"0.62269986",
"0.6201142",
"0.61817676",
"0.6126628",
"0.6034526",
"0.60178775",
"0.6007457",
"0.6002648",
"0.59752244",
"0.5935301",
"0.59268457",
"0.5907353",
"0.58914775",
"0.5880226",
"0.5864928",
"0.58647996",
"0.5846338",
"0.5836406",
"0.5834603",
"0.58238184",
"0.5809624",
"0.57959217",
"0.57736",
"0.5751156",
"0.5751156",
"0.57507235"
] |
0.69720715
|
0
|
Get basic plant information with device list.
|
def plant_info(self, plant_id):
response = self.session.get(self.get_url('newTwoPlantAPI.do'), params={
'op': 'getAllDeviceList',
'plantId': plant_id,
'pageNum': 1,
'pageSize': 1
})
data = json.loads(response.content.decode('utf-8'))
return data
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def device_list(self, plant_id):\n return self.plant_info(plant_id)['deviceList']",
"def get_devices():\n names = devices.list()\n if request.args.get('full') is not None:\n data = {d: devices.show(d) for d in names}\n else:\n data = names\n return jsonify({'devices': data})",
"def get_devices(self):\n return self.api_request('GET', self.url + '/device', {})",
"def devices_list_view(request):\n return read_json(request.registry.settings['devices_path'], [])",
"def list_devices():\n return _lib.SeaTeaseAPI().list_devices()",
"def list_devices(self):\n response = self.oauth.get(url=f'{self.base_url}/json/devices/list')\n\n result = response.json()['device']\n for device in result:\n print(device)",
"def list_devices():\r\n DeviceManagerCLI.BuildDeviceList()\r\n return DeviceManagerCLI.GetDeviceList()",
"def device_list():\n click.echo(\"\\nRetrieving the devices.\")\n\n url = base_url + \"/device\"\n\n response = requests.get(url=url, headers=header,verify=False)\n if response.status_code == 200:\n items = response.json()['data']\n else:\n print(\"Failed to get list of devices \" + str(response.text))\n exit()\n\n headers = [\"Host-Name\", \"Device Type\", \"Device ID\", \"System IP\", \"Site ID\", \"Version\", \"Device Model\"]\n table = list()\n\n for item in items:\n tr = [item.get('host-name'), item.get('device-type'), item.get('uuid'), item.get('system-ip'), item.get('site-id'), item.get('version'), item.get('device-model')]\n table.append(tr)\n try:\n click.echo(tabulate.tabulate(table, headers, tablefmt=\"fancy_grid\"))\n except UnicodeEncodeError:\n click.echo(tabulate.tabulate(table, headers, tablefmt=\"grid\"))",
"def getDeviceList(self):\r\n\r\n self._logger.debug(\"In getDeviceList()...\")\r\n\r\n # update the security token if needed \r\n if self._checkToken():\r\n\r\n response = self._callAPI(_API_GET_DEVICE_LIST, useSession=True)\r\n\r\n if response is not None:\r\n\r\n deviceInfo = response.json()\r\n \r\n if response.status_code == 200 and \"items\" in deviceInfo:\r\n\r\n deviceList = []\r\n\r\n for dev in deviceInfo[\"items\"]:\r\n\r\n # pull out common attributes\r\n deviceID = dev[\"serial_number\"]\r\n deviceType = dev[\"device_family\"]\r\n description = dev.get(\"name\", deviceType + \" \" + deviceID[-4:])\r\n\r\n # uncomment the next line to inspect the devices returned from the MyQ service\r\n self._logger.debug(\"Device Found - Device ID: %s, Device Type: %s, Description: %s\", deviceID, deviceType, description)\r\n\r\n # add device to the list with properties based on type\r\n if deviceType == API_DEVICE_TYPE_GATEWAY:\r\n\r\n # get gateway attributes\r\n online = dev[\"state\"][\"online\"]\r\n lastUpdated = dev[\"state\"][\"last_status\"]\r\n\r\n # add gateway device to list\r\n deviceList.append({\r\n \"type\": deviceType,\r\n \"id\": deviceID,\r\n \"description\": description,\r\n \"online\": online,\r\n \"last_updated\": lastUpdated\r\n })\r\n\r\n elif deviceType == API_DEVICE_TYPE_OPENER:\r\n \r\n # get the door attributes\r\n parentID = dev[\"parent_device_id\"] \r\n state = dev[\"state\"][\"door_state\"]\r\n lastChanged = dev[\"state\"][\"last_update\"]\r\n lastUpdated = dev[\"state\"][\"last_status\"]\r\n\r\n # add garage door opener device to list\r\n deviceList.append({\r\n \"type\": deviceType,\r\n \"id\": deviceID,\r\n \"parent_id\": parentID,\r\n \"description\": description,\r\n \"state\": state,\r\n \"last_changed\": lastChanged,\r\n \"last_updated\": lastUpdated\r\n })\r\n \r\n elif deviceType == API_DEVICE_TYPE_LAMP:\r\n\r\n # get the lamp attributes\r\n parentID = dev[\"parent_device_id\"] \r\n state = dev[\"state\"][\"lamp_state\"] \r\n lastChanged = dev[\"state\"][\"last_update\"]\r\n lastUpdated = dev[\"state\"][\"last_status\"]\r\n\r\n # add lamp device to list\r\n deviceList.append({\r\n \"type\": deviceType,\r\n \"id\": deviceID,\r\n \"parent_id\": parentID,\r\n \"description\": description,\r\n \"state\": state,\r\n \"last_changed\": lastChanged,\r\n \"last_updated\": lastUpdated\r\n })\r\n \r\n return deviceList\r\n \r\n elif response.status_code == 401:\r\n \r\n self._logger.error(\"There was an authentication error with the MyQ account: %s\", _parseResponseMsg(response))\r\n return None\r\n\r\n else:\r\n \r\n self._logger.error(\"Error retrieving device list: %s\", _parseResponseMsg(response))\r\n return None\r\n\r\n else:\r\n # Error logged in _callAPI function\r\n return None\r\n\r\n else:\r\n # Check token failed - wait and see if next call successful\r\n return None",
"def get_devices_summary():\n\n # This function was created to replace get_devices_information\n # because it wasn't detecting virtual systems in Palo Alto Virtual Systems\n global nipper_xml\n devices = {}\n headings = []\n\n # Add the table headings to a list\n for h in nipper_xml.findall(\"./summary/table/[@ref='SCOPE.AUDITDEVICELIST.TABLE']/headings/heading\"):\n if h not in headings:\n headings.append(h.text)\n\n for device in nipper_xml.findall(\"./summary/table/[@ref='SCOPE.AUDITDEVICELIST.TABLE']/tablebody/tablerow\"):\n values = []\n for i in device.findall('./tablecell/item'):\n if i not in values:\n values.append(i.text)\n if DEBUG:\n print \"\\t\" + note + \"%s: %s\" % (headings[headings.index('Name')], values[headings.index('Name')])\n print \"\\t\" + note + \"%s: %s\" % (headings[headings.index('Device')], values[headings.index('Device')])\n print \"\\t\" + note + \"%s: %s\" % (headings[headings.index('OS')], values[headings.index('OS')].split(\" \")[0])\n print \"\\t\" + note + \"%s: %s\" % (headings[headings.index('OS')], values[headings.index('OS')].split(\" \")[1])\n devices[values[headings.index('Name')]] = {'name': values[headings.index('Name')],\n 'type': values[headings.index('Device')],\n 'os': values[headings.index('OS')].split(' ')[0],\n 'osversion': values[headings.index('OS')].split(' ')[1]\n }\n\n if DEBUG:\n print info + \"Device Object:\"\n print devices\n raw_input(warn + \"Press enter to continue\")\n return devices",
"def get_devices(self):\n devices = self.get(\"event/device\")",
"def show_device_information(self):\n\n if self._json_format:\n print(json.dumps(self._devices, indent=4, separators=(',', ': ')))\n return\n\n if self._long_format:\n self.show_device_information_long()\n elif self._include_enclosures and self._number_enclosures:\n self.show_device_information_enclosures()\n else:\n self.show_device_information_only()",
"def do_list(self, _):\n devices = []\n for source in self._target.devices:\n devices.append({\n 'name': source.device['name'],\n 'path': source.device['path'],\n })\n return devices",
"def list_devices(self):\n return [x for x in self.devices.keys()]",
"def load_devices(self):\n response = self.oauth.get(url=f'{self.base_url}/json/devices/list')\n\n result = response.json()['device']\n return [(device['id'], device['name'], device['state']) for device in result]",
"def device_info(self):\n return {\n \"identifiers\": {(DOMAIN, self._uuid)},\n \"name\": self._device.device_data[self._uuid]['name'],\n \"manufacturer\": \"Nest Labs\",\n \"model\": self._device.device_data[self._uuid]['model'],\n }",
"def show_device_information_long(self):\n\n for device in self._devices:\n print(\"\")\n if device['Device Type'].startswith(\"enclosu\"):\n if device.get('Device Type'):\n print(\"{0:>32}: {1}\".format(\"Device Type\", device['Device Type']))\n if device['Device Type Description']:\n print(\"{0:>32}: {1}\".format(\"Device Description\", device['Device Type Description']))\n if device.get('SCSI Device Name'):\n print(\"{0:>32}: {1}\".format(\"SCSI Device Name\", device['SCSI Device Name']))\n if device.get('Product Identification'):\n print(\"{0:>32}: {1}\".format(\"Product Identification\", device['Product Identification']))\n if device.get('Vendor Identification'):\n print(\"{0:>32}: {1}\".format(\"Vendor Identification\", device['Vendor Identification']))\n if device.get('Firmware Version'):\n print(\"{0:>32}: {1}\".format(\"Firmware Version\", device['Firmware Version']))\n if device.get('Serial Number'):\n print(\"{0:>32}: {1}\".format(\"Serial Number\", device['Serial Number']))\n if device.get('SAS Address'):\n print(\"{0:>32}: {1}\".format(\"SAS Address\", device['SAS Address']))\n else:\n if device.get('Device Type'):\n print(\"{0:>32}: {1}\".format(\"Device Type\", device['Device Type']))\n if device['Device Type Description']:\n print(\"{0:>32}: {1}\".format(\"Device Description\", device['Device Type Description']))\n if device.get('Linux Device Name'):\n print(\"{0:>32}: {1}\".format(\"Linux Device Name\", device['Linux Device Name']))\n if device.get('SCSI Device Name'):\n print(\"{0:>32}: {1}\".format(\"SCSI Device Name\", device['SCSI Device Name']))\n if device.get('Product Identification'):\n print(\"{0:>32}: {1}\".format(\"Product Identification\", device['Product Identification']))\n if device.get('Vendor Identification'):\n print(\"{0:>32}: {1}\".format(\"Vendor Identification\", device['Vendor Identification']))\n if device.get('Firmware Version'):\n print(\"{0:>32}: {1}\".format(\"Firmware Version\", device['Firmware Version']))\n if device.get('Serial Number'):\n print(\"{0:>32}: {1}\".format(\"Serial Number\", device['Serial Number']))\n if device.get('Drive Capacity'):\n print(\"{0:>32}: {1}\".format(\"Drive Capacity\", device['Drive Capacity']))\n if device.get('Block Length'):\n print(\"{0:>32}: {1}\".format(\"Block Length\", device['Block Length']))\n if device.get('Power On Hours'):\n print(\"{0:>32}: {1}\".format(\"Power On Hours\", device['Power On Hours']))\n if device.get('Current Temperature'):\n print(\"{0:>32}: {1}\".format(\"Current Temperature\", device['Current Temperature']))\n if device.get('SAS Address'):\n print(\"{0:>32}: {1}\".format(\"SAS Address\", device['SAS Address']))\n if device.get('Enclosure Device'):\n print(\"{0:>32}: {1}\".format(\"Enclosure Device\", device['Enclosure Device']))\n if device.get('Enclosure Slot'):\n print(\"{0:>32}: {1}\".format(\"Enclosure Slot\", device['Enclosure Slot']))\n if device.get('Slot Description'):\n print(\"{0:>32}: {1}\".format(\"Slot Desciption\", device['Slot Description']))\n\n if len(self._devices):\n print(\"\")",
"def device_info(self):\n return {\n \"identifiers\": {(DOMAIN, self.unique_id)},\n \"name\": self.name,\n \"manufacturer\": \"Brightech\",\n }",
"def ret_device_list():\n token = get_auth_token() # Get Token\n url = \"https://sandboxdnac.cisco.com/api/v1/network-device\"\n hdr = {'x-auth-token': token, 'content-type' : 'application/json'}\n resp = requests.get(url, headers=hdr) # Make the Get Request\n device_list = resp.json()\n return device_list",
"def device_info(node):\n\n if \"cpu\" in node and \"total_mbufs\" in node[\"cpu\"]:\n total_mbufs = node[\"cpu\"][\"total_mbufs\"]\n if total_mbufs != 0:\n print(\"Total Number of Buffers: {}\".format(total_mbufs))\n\n vpp = VppPCIUtil(node)\n vpp.get_all_devices()\n linkup_devs = vpp.get_link_up_devices()\n if len(linkup_devs):\n print(\"\\nDevices with link up (can not be used with VPP):\")\n vpp.show_vpp_devices(linkup_devs, show_header=False)\n # for dev in linkup_devs:\n # print (\" \" + dev)\n kernel_devs = vpp.get_kernel_devices()\n if len(kernel_devs):\n print(\"\\nDevices bound to kernel drivers:\")\n vpp.show_vpp_devices(kernel_devs, show_header=False)\n else:\n print(\"\\nNo devices bound to kernel drivers\")\n\n dpdk_devs = vpp.get_dpdk_devices()\n if len(dpdk_devs):\n print(\"\\nDevices bound to DPDK drivers:\")\n vpp.show_vpp_devices(dpdk_devs, show_interfaces=True, show_header=False)\n else:\n print(\"\\nNo devices bound to DPDK drivers\")\n\n other_devs = vpp.get_other_devices()\n if len(other_devs):\n print(\"\\nDevices not bound to Kernel or DPDK drivers:\")\n vpp.show_vpp_devices(other_devs, show_interfaces=True, show_header=False)\n else:\n print(\"\\nNo devices not bound to Kernel or DPDK drivers\")\n\n vpputl = VPPUtil()\n interfaces = vpputl.get_hardware(node)\n if interfaces == {}:\n return\n\n print(\"\\nDevices in use by VPP:\")\n\n if len(interfaces.items()) < 2:\n print(\"None\")\n return\n\n print(\n \"{:30} {:4} {:4} {:7} {:4} {:7}\".format(\n \"Name\", \"Numa\", \"RXQs\", \"RXDescs\", \"TXQs\", \"TXDescs\"\n )\n )\n for intf in sorted(interfaces.items()):\n name = intf[0]\n value = intf[1]\n if name == \"local0\":\n continue\n numa = rx_qs = rx_ds = tx_qs = tx_ds = \"\"\n if \"numa\" in value:\n numa = int(value[\"numa\"])\n if \"rx queues\" in value:\n rx_qs = int(value[\"rx queues\"])\n if \"rx descs\" in value:\n rx_ds = int(value[\"rx descs\"])\n if \"tx queues\" in value:\n tx_qs = int(value[\"tx queues\"])\n if \"tx descs\" in value:\n tx_ds = int(value[\"tx descs\"])\n\n print(\n \"{:30} {:>4} {:>4} {:>7} {:>4} {:>7}\".format(\n name, numa, rx_qs, rx_ds, tx_qs, tx_ds\n )\n )",
"def get_devices():\n try:\n with open(DEVICES, 'r') as f:\n data = json.load(f)['devices']\n except (IOError, ValueError) as err:\n raise SwiftlmCheckFailure('Failure opening %s: %s' % (DEVICES, err))\n\n devices = []\n for d in data:\n l = d.get('label', LABEL_CHECK_DISABLED)\n devices.append(Device(\n device=d['name'],\n mount=MOUNT_PATH+d['swift_drive_name'],\n label=l\n ))\n\n return devices",
"def get(self):\n try:\n log.debug(\"Device info : \")\n #get the payload to influx DB\n url = \"http://localhost:8086/query\"\n querystring = {\"pretty\": \"true\", \"db\": \"IOT\",\n \"q\":\"SELECT DISTINCT(deviceId) FROM(SELECT deviceId,q1 FROM \\\"ttd_devices\\\" ) \" }\n response = requests.request(\"GET\", url, params=querystring)\n r_d=json.loads(response.text)\n result_d=[]\n for rec in r_d['results'][0]['series']:\n for element in rec['values']:\n result_d.append(element[1])\n result={}\n result['status'] = 1\n result['message']=result_d\n return_status = 200\n except ValueError as e:\n result = {}\n log.exception('Value Exception while fetching device list')\n result['status'] = 0\n return_status = 400\n result['message'] = e.args[0]\n except :\n result = {}\n log.exception('Exception while fetching the device data')\n return_status = 500\n result['status'] = 0\n result['message'] = 'Internal Error has occurred while fetching devie data'\n finally:\n resp = Response(json.dumps(result), status=return_status, mimetype=\"application/json\")\n return resp",
"def device_info(self):\n return {\n \"name\": get_device_name(self._data, self._actuator.id),\n \"identifiers\": {(DOMAIN, get_identifier(self._data, self._actuator.id))},\n \"via_device\": (DOMAIN, self._data.wiserhub.system.name),\n }",
"def device_info(self) -> Dict[str, Any]:\n return {\n 'name': 'Solar Panels',\n 'identifiers': {\n (DOMAIN, self.toon.agreement.id, 'solar'),\n },\n 'via_device': (DOMAIN, self.toon.agreement.id, 'meter_adapter'),\n }",
"def test_gwservice_listdevices(self, setup_controller):\n resp = setup_controller.request(\"gw\", \"devices\", \"GET\", None, None)\n body = resp.url + \",\" + str(resp.status_code) + ',' + resp.text\n allure.attach(name=\"gw list devices\", body=body)\n if resp.status_code != 200:\n assert False\n devices = json.loads(resp.text)\n print (devices)",
"def get(self):\n devs = Device.query.all()\n lista = []\n for d in devs:\n lista.append(d.json())\n return lista",
"def get_devices():\n devices = []\n for device_id in range(pm.lib.Pm_CountDevices()):\n devices.append(DeviceInfo(device_id))\n\n return devices",
"def getDeviceList(self):\n return defer.succeed(self.discovered)",
"def get_devices_information():\n global nipper_xml\n devices = {}\n\n for device in nipper_xml.findall('./information/devices/device'):\n if DEBUG:\n print \"\\t\" + note + \"Name: %s\" % device.get('name')\n print \"\\t\" + note + \"Type: %s\" % device.get('type')\n print \"\\t\" + note + \"OS: %s\" % device.get('os')\n print \"\\t\" + note + \"OS Version: %s\" % device.get('osversion')\n devices[device.attrib.get('name')] = {'name': device.get('name'),\n 'type': device.get('type'),\n 'os': device.get('os'),\n 'osversion': device.get('osversion')}\n if DEBUG:\n print info + \"Device Object:\"\n print devices\n raw_input(warn + \"Press enter to continue\")\n return devices",
"def inverter_list(self, plant_id):\n warnings.warn(\"This function may be deprecated in the future because naming is not correct, use device_list instead\", DeprecationWarning)\n return self.device_list(plant_id)"
] |
[
"0.75556433",
"0.70134014",
"0.68364483",
"0.68065023",
"0.67908365",
"0.6746467",
"0.6720276",
"0.6682221",
"0.66305697",
"0.6543957",
"0.6543498",
"0.6494734",
"0.6484901",
"0.6465679",
"0.6437325",
"0.6409516",
"0.6389772",
"0.63704956",
"0.6364575",
"0.6355703",
"0.63553387",
"0.6334484",
"0.6330472",
"0.6322804",
"0.63160336",
"0.63102293",
"0.6306656",
"0.6286769",
"0.6251243",
"0.6230533"
] |
0.765959
|
0
|
Return the device class of the sensor.
|
def device_class(self):
return SENSOR_TYPES[self.sensor][3].get("device_class")
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def device_class(self):\n return self.sensor_type[\"class\"]",
"def device_class(self):\n return self._sensor_type",
"def device_class(self):\r\n return self._sensor_cfg[3]",
"def device_class(self):\r\n return self._sensor_cfg[3]",
"def device_class(self) -> str | None:\n return self._get_sensor_type()[2]",
"def device_class(self):\n return SENSOR_TYPES[self._type][1]",
"def device_class(self):\n return SENSOR_TYPES[self._type][1]",
"def device_class(self):\n return BINARY_SENSORS[self.info_type][\"device_class\"]",
"def device_class(self):\n if self.sensor_class in DEVICE_CLASSES:\n return self.sensor_class\n return None",
"def device_class(self):\n return SENSOR_TYPES[self._type][3] if self._type in SENSOR_TYPES else None",
"def device_class(self):\n return DEVICE_CLASSES.get(self.sensor_data[\"model\"])",
"def device_class(self):\n if self._type in SENSOR_TYPES:\n return self._type\n return None",
"def device_class(self):\n return SENSOR_DEFAULT_DEVICE_CLASS",
"def device_class(self):\n try:\n return DEVICE_CLASS_MAP[self._sensor]\n except KeyError:\n # Sensor must be unknown to us, add as generic\n return None",
"def device_class(self):\n if self._sensor_type == \"temperature\":\n return DEVICE_CLASS_TEMPERATURE\n if self._sensor_type == \"humidity\":\n return DEVICE_CLASS_HUMIDITY\n if self._sensor_type == \"battery_level\":\n return DEVICE_CLASS_BATTERY\n return None",
"def device_class(self):\n return BINARY_SENSOR_DEVICE_CLASS",
"def device_class(self) -> SensorDeviceClass | None:\n if self.dev.name.endswith(\"_temp\"):\n return SensorDeviceClass.TEMPERATURE\n return None",
"def device_class(self):\n return self._device_type",
"def device_class(self):\n return self.type",
"def device_class(self):\n return self.type",
"def device_class(self):\n return self.type",
"def device_class(self):\n # type: () -> string_types\n return self._device_class",
"def device_class(self):\r\n return self._device_class",
"def device_class(self):\n return self._device_class",
"def device_class(self):\n return self._device_class",
"def device_class(self):\n return self._device_class",
"def device_class(self):\n return self._device_class",
"def device_class(self):\n return self._device_class",
"def device_class(self) -> SensorDeviceClass | None:\n if hasattr(self, \"_attr_device_class\"):\n return self._attr_device_class\n if hasattr(self, \"entity_description\"):\n return self.entity_description.device_class\n return None",
"def device_class(self):\n if self.values.primary.command_class == CommandClass.BATTERY:\n return DEVICE_CLASS_BATTERY\n if self.values.primary.command_class == CommandClass.METER:\n return DEVICE_CLASS_POWER\n if \"Temperature\" in self.values.primary.label:\n return DEVICE_CLASS_TEMPERATURE\n if \"Illuminance\" in self.values.primary.label:\n return DEVICE_CLASS_ILLUMINANCE\n if \"Humidity\" in self.values.primary.label:\n return DEVICE_CLASS_HUMIDITY\n if \"Power\" in self.values.primary.label:\n return DEVICE_CLASS_POWER\n if \"Energy\" in self.values.primary.label:\n return DEVICE_CLASS_POWER\n if \"Electric\" in self.values.primary.label:\n return DEVICE_CLASS_POWER\n if \"Pressure\" in self.values.primary.label:\n return DEVICE_CLASS_PRESSURE\n return None"
] |
[
"0.93628424",
"0.9240069",
"0.9161452",
"0.9161452",
"0.9156968",
"0.90412056",
"0.90412056",
"0.8956785",
"0.89529496",
"0.8950599",
"0.8919914",
"0.871994",
"0.8717259",
"0.87069035",
"0.8660478",
"0.8652474",
"0.83961546",
"0.83828783",
"0.8380708",
"0.8380708",
"0.8380708",
"0.83359516",
"0.83229655",
"0.82700956",
"0.82700956",
"0.82700956",
"0.82700956",
"0.82700956",
"0.82086635",
"0.8152038"
] |
0.92430294
|
1
|
Initializes an instance from number of times to try, and lower and upper bounds to sleep (in seconds).
|
def __init__ (self, times, lower, upper):
self.times = times
self.lower = lower
self.upper = upper
self.counter = 0
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def __init__(self, timeout, tries):\r\n self._timeout = timeout\r\n self._tries = tries",
"def __init__(self, tries, exceptions=None, delay=1):\r\n self.tries = tries\r\n if exceptions is None:\r\n exceptions = retry.default_exceptions\r\n self.exceptions = exceptions\r\n self.delay = delay",
"def __init__(self, tries , exceptions=None, delay=0.01):\n self.tries = tries\n if exceptions is None:\n exceptions = Retry.default_exceptions\n self.exceptions = exceptions\n self.delay = delay",
"def __init__(self, retry_count):\n self.retry_count = retry_count",
"def __init__(self, max_iters, tries):\r\n\r\n if max_iters < 3:\r\n print 'K must be 3 or more.'\r\n self.max_iters = max_iters\r\n self.tries = tries",
"def __init__(self, *, interval_seconds, safety_seconds=0, allowed_attempts=1,\n action=\"metered action\", enabled=True, log=None, wait_hook=None):\n if not (isinstance(allowed_attempts, int) and allowed_attempts >= 1):\n raise TypeError(\"The allowed_attempts must be a positive integer: %s\" % allowed_attempts)\n # This makes it easy to turn off the feature\n self.enabled = enabled\n self.interval_seconds = interval_seconds\n self.safety_seconds = safety_seconds\n self.allowed_attempts = allowed_attempts\n self.action = action\n self.timestamps = [self.EARLIEST_TIMESTAMP] * allowed_attempts\n self.log = log or logging\n self.wait_hook = wait_hook",
"def __init__(self, max_iter=15, num_random_probes=10):\n self.max_iter = max_iter\n self.num_random_probes = num_random_probes",
"def retry(self, times):\n return Retry((requests.ConnectionError, requests.Timeout), times)",
"def __init__(self):\n sleep(10)",
"def __init__(self, application, tries, retryable=None, highwater=2<<20,\n log_after_try_count=1, delay=0, delay_factor=2):\n self.application = application\n self.tries = tries\n\n if retryable is None:\n retryable = (TransientError, ConflictError, RetryException,)\n\n if not isinstance(retryable, (list, tuple)):\n retryable = [retryable]\n\n self.retryable = tuple(retryable)\n self.highwater = highwater\n self.delay = delay\n self.delay_factor = delay_factor\n self.log_after_try_count = log_after_try_count",
"def determine_sleep_times(self):\n\n determined_sleep_time = \\\n random.randrange(self.dns_conf.min_backoff_range,\n self.dns_conf.max_backoff_range)\n\n backoff = [(2 ** i) * determined_sleep_time for i in\n range(0, self.dns_conf.retries)]\n\n return backoff",
"def __init__(self):\n\n smach.State.__init__(self, \n outcomes=['GoToNormal','GoToSleep'])\n \n self.rate = rospy.Rate(200) # Loop at 50 Hz",
"def __init__(self):\n smach.State.__init__(self, \n outcomes=['GoToNormal','GoToSleep','GoToPlay'])\n\n self.rate = rospy.Rate(1) \n self.counter = 0",
"def __init__(self, start=0, step=1):\n self.count, self.step = start, step\n self.lock = threading.Lock()",
"def __init__(self):\n # A lock/condition to protected _time. It is notified whenever _time is changed.\n self._time_condition = threading.Condition()\n # The current time in seconds past epoch.\n self._time = 0.0\n # A lock/condition to protect _waiting_threads. It is notified whenever _waiting_threads is changed.\n self._waiting_condition = threading.Condition()\n # The number of threads that are blocking in `simulate_waiting`.\n self._waiting_threads = 0",
"def retry(times):\n return repeat_with_success_at_least(times, 1)",
"def __init__(self, min=0, sec=0):\n self.min = min\n self.sec = sec",
"def __init__(self,min_instances=30, drift_level=3.0):\n\n from math import sqrt\n self.min_instances = min_instances\n self.drift_level = float(drift_level)\n self.i = None\n self.pi = None\n self.si = None\n self.pi_min = None\n self.si_min = None\n self.sqrt=sqrt\n self.reset()",
"def __init__(self, timer=120, rate=1, percent=0):\n self.timer = timer\n self.rate = rate\n self.percent = percent",
"def __init__(self, delay=0):\n self.delay = delay",
"def __init__(self, t):\n\t\tself.delay = math.ceil(t / config.time_resolution)",
"def __init__(self, time_corruption=0):\n self._time_corruption = time_corruption\n self._lap = time.perf_counter()",
"def __init__(self,interval):\n _interval = interval",
"def __init__(self,socket,timeout=10, checkintv='fibonacci'):\r\n # Store the socket, timeout and check interval\r\n self.socket = socket\r\n self.timeout = timeout\r\n self.checkintv = checkintv",
"def init(sleep_time=0):\n d = DataCollector()\n r1 = robot(\"PSM1\") # left (but my right)\n r2 = robot(\"PSM2\") # right (but my left)\n time.sleep(sleep_time)\n return (r1,r2,d)",
"def __init__(self, seconds):\n super(RobotiqCommandTimeout, self).__init__()\n self.start_time = rospy.get_rostime()\n self.duration = rospy.Duration(seconds)",
"def __init__(\n self, *, loop=None, initial_number_of_robots=2, time_speeder=1, initial_money=0\n ):\n self.robots = []\n self.foo = []\n self.bar = []\n self.foobar = []\n self.money = initial_money\n\n if loop is None:\n loop = asyncio.get_event_loop()\n\n self.loop = loop\n self.running = True\n self.initial_number_of_robots = initial_number_of_robots\n self.time_speeder = time_speeder",
"def setcheckinterval(n): # real signature unknown; restored from __doc__\n pass",
"def default_backoff(retries, max_retries):\n\n time.sleep(random.random() * (max_retries - retries) / max_retries * 2)",
"def sleep(min_seconds=1, max_seconds=10):\n time.sleep(randint(min_seconds, max_seconds))"
] |
[
"0.6777629",
"0.6550155",
"0.6473606",
"0.608336",
"0.6008538",
"0.5947795",
"0.5889047",
"0.58258945",
"0.582502",
"0.57822096",
"0.57511",
"0.5726218",
"0.5697493",
"0.5653644",
"0.5648945",
"0.56483763",
"0.5644084",
"0.5633479",
"0.5630757",
"0.56092227",
"0.5588159",
"0.55565196",
"0.55394316",
"0.5538962",
"0.55319655",
"0.5519911",
"0.54978204",
"0.54958194",
"0.5473475",
"0.54655814"
] |
0.690081
|
0
|
Get the ids of the registered engines. This method always blocks.
|
def remote_get_ids(self):
return self.smultiengine.get_ids()
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def getIDs(self):\n return self.multiengine.getIDs()",
"def engineList(self, targets):\n if isinstance(targets, int):\n if targets not in self.engines.keys():\n log.msg(\"Engine with id %i is not registered\" % targets)\n raise error.InvalidEngineID(\"Engine with id %i is not registered\" % targets)\n else: \n return [self.engines[targets]]\n elif isinstance(targets, (list, tuple)):\n for id in targets:\n if id not in self.engines.keys():\n log.msg(\"Engine with id %r is not registered\" % id)\n raise error.InvalidEngineID(\"Engine with id %r is not registered\" % id) \n return map(self.engines.get, targets)\n elif targets == 'all':\n eList = self.engines.values()\n if len(eList) == 0:\n msg = \"\"\"There are no engines registered.\n Check the logs in ~/.ipython/log if you think there should have been.\"\"\"\n raise error.NoEnginesRegistered(msg)\n else:\n return eList\n else:\n raise error.InvalidEngineID(\"targets argument is not an int, list of ints or 'all'\")",
"def all_plugin_ids(self):\n\n id_list = []\n for next_plugin in self.__registered_plugins:\n id_list.append(next_plugin.plugin_id)\n return id_list",
"def get_ids(self):\n return self._graphs.keys()",
"def get_instances_ids(self):\n reservations = self.__get_reservations()\n instances_ids = []\n instances,_ = self.__get_multi_instances(reservations)\n for instance in instances:\n instances_ids.append(instance.id.encode(\"latin-1\"))\n return instances_ids",
"def get_engines_details(self):\n if not \"engines\" in self.data:\n raise ValueError(\"This recipe doesn't have engines\")\n return self.data[\"engines\"]",
"def get_active_browser_ids(self):\n\n # This relies on some private data structures, but presently\n # there is no other way. There's been a discussion in the\n # robot slack channels about adding a new keyword that does\n # what this keyword does. When that happens, we can remove\n # this keyword.\n driver_ids = []\n try:\n driver_cache = self.selenium._drivers\n except NoOpenBrowser:\n return []\n\n for index, driver in enumerate(driver_cache._connections):\n if driver not in driver_cache._closed:\n # SeleniumLibrary driver ids start at one rather than zero\n driver_ids.append(index + 1)\n return driver_ids",
"def known_nodes(self) -> List[Client]:\n return list(self.in_memory_client_registry.values())",
"def getIDs():",
"def get_registered_clients(self):\n return self.hub.get_registered_clients(self.get_private_key())",
"def get_host_ids(self):\n host_ids = []\n \n for node_id in self.nodes:\n if (isinstance(self.nodes[node_id], HostNode)):\n host_ids.append(node_id)\n \n return host_ids",
"def known_uids(self) -> Sequence[bytes]:\n assert self._state is not None, 'Unseal the vault first'\n return list(self._state.keys())",
"def ExternalSystemIdentifiers(self, default=[{}]):\n tmp = self.data.get('external_system_identifiers', default)\n return [HEP.IDObject(i) for i in tmp]",
"def get_ids(self):\n return self._ids",
"def admin_ids(self):\n # type: () -> List[int]\n return self._admin_ids",
"def get_room_ids(self):\n if not self.setup():\n return []\n for room in self.homestatus.rooms:\n self.room_ids.append(room)\n return self.room_ids",
"def ids(self):\n return self._ids",
"def all_env_ids(self) -> np.ndarray:",
"def get_app_ids(self):\n return self.apps",
"def get_refresh_ids(self):\n ids = []\n for bucket in self.router.lonely_buckets():\n rid = random.randint(*bucket.range).to_bytes(20, byteorder='big')\n ids.append(rid)\n return ids",
"def vertex_ids(self):\n return self.get_ids()",
"def ids(self):\n return list(self._id_generator())",
"def ids(self):\n return list(self._id_generator())",
"def get_es_ids(self):\n search = self.search.source(['uri']).sort(['uri'])\n es_ids = [item.meta.id for item in search.scan()]\n return es_ids",
"def getRefreshIDs(self):\n ids = []\n for bucket in self.router.getLonelyBuckets():\n rid = random.randint(*bucket.range).to_bytes(20, byteorder='big')\n ids.append(rid)\n return ids",
"def get_session_ids(self):\n with self._sessions_lock:\n session_ids = self.sessions.keys()\n\n return session_ids",
"def ExternalSystemIdentifiers(self, default=[{}]):\n tmp = self.data.get('metadata', {}).get('external_system_identifiers', default)\n return [HEP.IDObject(i) for i in tmp]",
"def waiting_clients(self):\n return self.storage.iterkeys()",
"def select_host_ids():\n return IMPL.select_host_ids()",
"def get_all_master_ids(self):\r\n return self._handler.get_all_master_ids()"
] |
[
"0.69110274",
"0.63427883",
"0.6191196",
"0.6022198",
"0.5954033",
"0.59250677",
"0.58777815",
"0.58292615",
"0.5824796",
"0.5804927",
"0.57669634",
"0.57343227",
"0.5724957",
"0.5702801",
"0.57008684",
"0.57008624",
"0.56766325",
"0.56736207",
"0.56694365",
"0.5666263",
"0.5649912",
"0.5647323",
"0.5647323",
"0.5641384",
"0.55759454",
"0.55694383",
"0.55668366",
"0.5558361",
"0.5558205",
"0.5536428"
] |
0.6516591
|
1
|
Turn a list of deferred_ids into a final result or failure.
|
def process_did_list(did_list):
new_d_list = [self.get_pending_deferred(did, True) for did in did_list]
final_d = gatherBoth(new_d_list,
fireOnOneErrback=0,
consumeErrors=1,
logErrors=0)
final_d.addCallback(error.collect_exceptions, 'gather')
final_d.addCallback(lambda lop: [i[0] for i in lop])
final_d.addCallback(mapObject.joinPartitions)
return final_d
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def gather_results( # type: ignore[misc]\n deferredList: Tuple[\"defer.Deferred[T1]\", ...],\n consumeErrors: bool = False,\n) -> \"defer.Deferred[Tuple[T1, ...]]\":\n # The `type: ignore[misc]` above suppresses\n # \"Overloaded function implementation cannot produce return type of signature 1/2/3\"\n deferred = defer.gatherResults(deferredList, consumeErrors=consumeErrors)\n return deferred.addCallback(tuple)",
"def process_did_list(did_list):\n new_d_list = [self.get_pending_deferred(did, True) for did in did_list]\n final_d = gatherBoth(new_d_list,\n fireOnOneErrback=0,\n consumeErrors=1,\n logErrors=0)\n final_d.addCallback(error.collect_exceptions, 'scatter')\n final_d.addCallback(lambda lop: [i[0] for i in lop])\n return final_d",
"def do_it(did_list):\n d_to_return.callback(deferred_id)\n return process_did_list(did_list)",
"def do_it(did_list):\n d_to_return.callback(deferred_id)\n return process_did_list(did_list)",
"def test_task_retry_and_succeed_56_tasks(self):\n all_done = []\n number = 56\n for x in range(number):\n mock_task = MockFailOnceTask()\n all_done.append(mock_task.done)\n self.measurementManager.schedule(mock_task)\n\n d = defer.DeferredList(all_done)\n\n @d.addCallback\n def done(res):\n self.assertEqual(self.measurementManager.failures, number)\n #self.assertEqual(len(self.measurementManager.failures), number)\n for task_result, task_instance in self.measurementManager.successes:\n self.assertEqual(task_result, 42)\n self.assertIsInstance(task_instance, MockFailOnceTask)\n\n return d",
"def check_deferred_responses(self):\n for future in self.futures:\n results = future.result()\n \n self.futures = []",
"def _resolve_deferred_response(self, response, remaining, deferred_id=None):\n if remaining == 0:\n raise DeferredError(\"Failed to resolve deferred response.\")\n\n if response.status_code == 202 and self.block_on_deferred_response:\n deferred_id = deferred_id or response.text\n sleep(self.deferred_poll_interval)\n return self._resolve_deferred_response(\n self.get('get_deferred_results', {'deferred_id': deferred_id}),\n remaining-1,\n deferred_id\n )\n\n return response",
"def test_multipleConcurrentFailure(self):\n resolver = client.Resolver(servers=[('example.com', 53)])\n resolver.protocol = StubDNSDatagramProtocol()\n queries = resolver.protocol.queries\n\n query = dns.Query('foo.example.com', dns.A)\n firstResult = resolver.query(query)\n secondResult = resolver.query(query)\n\n class ExpectedException(Exception):\n pass\n\n queries.pop()[-1].errback(failure.Failure(ExpectedException()))\n\n return defer.gatherResults([\n self.assertFailure(firstResult, ExpectedException),\n self.assertFailure(secondResult, ExpectedException)])",
"def deferredToResult():\n @receiver()\n def received(item):\n if isinstance(item, Deferred):\n pause = selfAsFount.pauseFlow()\n results = []\n def done(result):\n results[:] = [result]\n pause.unpause()\n item.addBoth(done)\n yield skip\n [result] = results\n if isinstance(result, Failure):\n result.raiseException()\n else:\n yield result\n\n drain = series(received)\n selfAsFount = drain.flowingFrom(None)\n return drain",
"def evalDeferred(*args, evaluateNext: bool=True, list: bool=True, lowPriority: bool=True,\n lowestPriority: bool=True, **kwargs)->List[AnyStr]:\n pass",
"def run(self, args_: Tuple, result_ids: List[Union[str, int]]):\n # TODO: can we reuse result_ids?\n return self.__call__(*args_)",
"def three_to_one(three_res_list):\n one_res_list = []\n\n for res in three_res_list:\n try:\n one = utilities.three_to_one[res]\n one_res_list.append(one)\n except KeyError:\n return False\n return \"\".join(one_res_list)",
"def test_results_lookup_error(self, affiliate_items):\n item_pks = affiliate_items.values_list('pk', flat=True)\n\n def error_first(item):\n if item.name == \"0\":\n raise LookupError()\n\n update_function = mock.Mock(side_effect=error_first)\n batch_job = BatchJob(affiliate_items, update_function)\n\n success_count = 0\n for result in batch_job.run():\n success_count += int(not result.is_error)\n\n assert success_count == 3\n\n items = AffiliateItem.objects.filter(pk__in=item_pks)\n assert items.count() == 3",
"def instantiateAddCallbacksAfterResult(n):\n d = defer.Deferred()\n def f(result):\n return result\n d.callback(1)\n for i in xrange(n):\n d.addCallback(f)\n d.addErrback(f)\n d.addBoth(f)\n d.addCallbacks(f)",
"def __call__(self, id_, seq, qual):\r\n result = self.F(id_, seq, qual)\r\n if result:\r\n self.FailedIds.append(id_)\r\n return result",
"def execute_deferred(fn):\n\n pass",
"def test_later_failure_result(self):\n d = Deferred()\n dr = EventualResult(d, None)\n result_list = []\n done = append_in_thread(result_list, dr.wait, 100)\n time.sleep(0.1)\n d.errback(RuntimeError())\n done.wait(100)\n self.assertEqual(\n (result_list[0], result_list[1].__class__), (False, RuntimeError))",
"def test_filter_by_ids(self, original_list, ids_to_filter, expected_result):\n result = helpers.filter_by_ids(original_list, ids_to_filter)\n\n self.assertEqual(result, expected_result)",
"def test_success_result_of_failure(self):\n d = Deferred()\n d.errback(Exception())\n err = self.assertRaises(FailTest, success_result_of, d)\n self.assertTrue(err.args[0].startswith(\n \"Expected success from deferred %r, got failure:\" % (d,)))",
"def update_results(failures, errors, case_):\n for check in case_.checks:\n if check.result == FAILURE:\n failures.append(check)\n elif check.result == ERROR:\n errors.append(check)",
"def get_list_transform(self, sd_responses):\n permit_list = False\n responses_missing = []\n sd_fields = {\n 'activity' : 'dd8a5g7g',\n 'app_id' : 'uqqrsogr',\n 'biz_name' : 't00kheyd',\n 'dba_name' : '60w4ep9y',\n 'addr' : 'kbqz4189',\n 'parcel' : 'kvrgbqrl'\n }\n if isinstance(sd_responses, list):\n permit_list = []\n for resp in sd_responses:\n if (resp.get('responses', False)\n and resp['responses'].get(sd_fields['activity'], False)\n and (resp['responses'].get(sd_fields['biz_name'], False)\n or resp['responses'].get(sd_fields['dba_name'], False))\n and (resp.get('status', '') in self.status_map.keys())\n ):\n resp_status = self.status_map[resp.get('status')].lower()\n resp_referred = self.get_referred_departments(resp.get('labels'))\n item = {\n 'application_id':'',\n 'business_name':'',\n 'dba_name':'',\n 'address':'',\n 'parcel':'',\n 'status':resp_status,\n 'referred':\", \".join(resp_referred)\n }\n data = resp['responses']\n item['application_id'] = str(data.get(sd_fields['app_id']) or '')\n if not data.get(sd_fields['app_id']):\n item['application_id'] = 'P-' + str(resp['id'])\n item['business_name'] = str(data.get(sd_fields['biz_name']) or '')\n item['dba_name'] = str(data.get(sd_fields['dba_name']) or item['business_name'])\n item['parcel'] = data.get(sd_fields['parcel'], '')\n if data.get(sd_fields['addr']) and data.get(sd_fields['addr']).get('street'):\n addr = data.get(sd_fields['addr'])\n item['address'] = str(addr.get('street') or '')\n item['address'] += ', '+str(addr.get('city') or '')\n item['address'] += ', '+str(addr.get('state') or '')\n item['address'] += ' '+str(addr.get('zipcode') or '')\n item['address'] = item['address'].strip(' ,')\n if data[sd_fields['activity']] and data[sd_fields['activity']]['checked']:\n for applied_permit_type in data[sd_fields['activity']]['checked']:\n item[applied_permit_type.lower()] = resp_status\n\n permit_list.append(item)\n else:\n responses_missing.append(\n {'id':resp['id'], 'sequential_id':resp['sequential_id']}\n )\n\n with sentry_sdk.configure_scope() as scope:\n scope.set_extra('get_list_transform.permit_list_len', len(permit_list))\n if responses_missing:\n scope.set_extra('get_list_transform.responses_missing', responses_missing)\n return permit_list",
"def download_results(client, response_ready, id_list, **kwargs) -> list:\n # set optional arguments\n server = kwargs.get(\"server\", \"/v3/serp/google/organic/task_get/advanced/\")\n if response_ready['status_code'] == 20000:\n results = []\n # this loop ensure that results are collected when they are ready\n count = 0\n while id_list and (count < 1000) :\n if count >= 1:\n print(f\"...this might take a while(x {count})... \")\n print(f\"...still {len(id_list)} items to go! \")\n count += 1\n for id in id_list:\n temp_res = client.get(server + id)\n if temp_res['tasks'][0]['result']:\n results.append(temp_res['tasks'][0]['result'][0])\n id_list.remove(id)\n break\n time.sleep(0.2)\n if (count == 999) and id_list:\n raise ConnectionError(\"could not load all results!!!\")\n return results\n else:\n print(\"error. Code: %d Message: %s\" % (response_ready[\"status_code\"], response_ready[\"status_message\"]))",
"def instantiateAddCallbacksBeforeResult(n):\n d = defer.Deferred()\n def f(result):\n return result\n for i in xrange(n):\n d.addCallback(f)\n d.addErrback(f)\n d.addBoth(f)\n d.addCallbacks(f)\n d.callback(1)",
"def test_insert_batch_result_with_a_single_update(self):\n incomplete = generate_mock_result(status='IN_PROGRESS', success=False, run_id=1)\n self.db.insert_result_batch(results=[incomplete, generate_mock_result(run_id=2)])\n self.assertEqual(2, len(self.db.get_results_for_project('TEST')))\n self.assertEqual(1, len(self.db.get_failed_results_for_project('TEST')))\n incomplete.update({'status': 'SUCCESS', 'success': True})\n self.db.insert_result_batch(results=[incomplete, generate_mock_result(run_id=3)])\n self.assertEqual(3, len(self.db.get_results_for_project('TEST')))\n self.assertEqual(0, len(self.db.get_failed_results_for_project('TEST')))",
"def sort_done_requests(done_requests):\n\n correct_data, incorrect_data, error_data = [], [], []\n\n for future in done_requests:\n future_body = future.result()\n\n # if requests was successful\n if future_body['status_code'] is 200:\n correct_data.append(future_body)\n\n # if requests was failed with a connection error\n elif future_body['status_code'] is 0:\n error_data.append(future_body)\n\n # if requests failed with errors like 404, 403 and etc..\n else:\n incorrect_data.append(future_body)\n\n return correct_data, incorrect_data, error_data",
"def instantiateAddCallbacksNoResult(n):\n d = defer.Deferred()\n def f(result):\n return result\n for i in xrange(n):\n d.addCallback(f)\n d.addErrback(f)\n d.addBoth(f)\n d.addCallbacks(f, f)",
"def TestQueryIDs(l):\r\n good = 0\r\n empty = 0\r\n bad = 0\r\n for i in l:\r\n try:\r\n jsonstring = pyclassyfire.client.get_results(int(i), 'json')\r\n Class = json.loads(jsonstring)\r\n if len(Class['entities']) == 0:\r\n empty += 1\r\n elif len(Class['entities']) > 1:\r\n print(\"WHAT?!\")\r\n sys.exit()\r\n else:\r\n print(i)\r\n good += 1\r\n except Exception as e:\r\n print(e)\r\n bad += 1\r\n\r\n print(\"good\",good)\r\n print(\"bad\",bad)\r\n print(\"empty\",empty)\r\n return None",
"def parse_ids(\n ids: List[str],\n) -> List[Union[WorkflowId, CromwellWorkflowLabel, ExperimentId]]:\n return [parse_id(i) for i in ids]",
"def test_insert_batch_result_and_retrieve(self):\n batch = [self.successfulresult, self.failedresult]\n self.db.insert_result_batch(results=batch)\n successentry = self.db.get_result_by_primary_key(pk=self.successfulresult.get('id'))\n self.assertDictContainsSubset(self.successfulresult, successentry.__dict__)\n failureentry = self.db.get_result_by_primary_key(pk=self.failedresult.get('id'))\n self.assertDictContainsSubset(self.failedresult, failureentry.__dict__)",
"def gen_resources_for_ids(\n resource: Callable, res_ids: List[str], **list_params\n) -> Generator[List, None, None]:\n print(\"Generating resources for ids.\")\n total = len(res_ids)\n res_counter = 0\n\n if \"maxResults\" not in list_params.keys():\n list_params[\"maxResults\"] = DEFAULT_MAX_RESULTS\n max_results = DEFAULT_MAX_RESULTS\n else:\n max_results = list_params[\"maxResults\"]\n\n _res_ids = res_ids.copy()\n\n while len(_res_ids) > 0:\n request_ids = []\n for _ in range(max_results):\n request_ids.append(_res_ids.pop(0))\n\n if len(_res_ids) == 0:\n break\n\n print(\n f\"\\tRequesting {res_counter}-{res_counter + len(request_ids)} of {total}.\"\n )\n\n list_params[\"id\"] = \",\".join(request_ids)\n\n request = resource().list(**list_params)\n response = request.execute()\n yield response[\"items\"]\n\n res_counter += max_results\n\n print(\"\\tFinished requesting resources.\")\n return None"
] |
[
"0.6013431",
"0.5807183",
"0.57902557",
"0.57902557",
"0.5497678",
"0.54742616",
"0.53362757",
"0.5127025",
"0.51104206",
"0.5088483",
"0.5002355",
"0.49978006",
"0.49900883",
"0.48699865",
"0.48649788",
"0.4819112",
"0.47713944",
"0.4770748",
"0.47648305",
"0.47619805",
"0.47252744",
"0.4699087",
"0.46977547",
"0.4678931",
"0.4668937",
"0.46666425",
"0.46601635",
"0.46420318",
"0.4632095",
"0.46040297"
] |
0.5984314
|
1
|
A parallelized version of Python's builtin map. This has a slightly different syntax than the builtin `map`. This is needed because we need to have keyword arguments and thus can't use args to capture all the sequences. Instead, they must be passed in a list or tuple. raw_map(func, seqs) > map(func, seqs[0], seqs[1], ...) Most users will want to use parallel functions or the `mapper` and `map` methods for an API that follows that of the builtin `map`.
|
def raw_map(self, func, sequences, dist='b', targets='all', block=True):
if not isinstance(sequences, (list, tuple)):
raise TypeError('sequences must be a list or tuple')
max_len = max(len(s) for s in sequences)
for s in sequences:
if len(s)!=max_len:
raise ValueError('all sequences must have equal length')
if isinstance(func, FunctionType):
d = self.push_function(dict(_ipython_map_func=func), targets=targets, block=False)
d.addCallback(lambda did: self.get_pending_deferred(did, True))
sourceToRun = '_ipython_map_seq_result = map(_ipython_map_func, *zip(*_ipython_map_seq))'
elif isinstance(func, str):
d = defer.succeed(None)
sourceToRun = \
'_ipython_map_seq_result = map(%s, *zip(*_ipython_map_seq))' % func
else:
raise TypeError("func must be a function or str")
d.addCallback(lambda _: self.scatter('_ipython_map_seq', zip(*sequences), dist, targets=targets))
d.addCallback(lambda _: self.execute(sourceToRun, targets=targets, block=False))
d.addCallback(lambda did: self.get_pending_deferred(did, True))
d.addCallback(lambda _: self.gather('_ipython_map_seq_result', dist, targets=targets, block=block))
return d
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def map(iteratee, *seqs):\n return _map(fnc.iteratee(iteratee), *seqs)",
"def map(self, func, *sequences):\n return self.mapper().map(func, *sequences)",
"def map(func, iterable, chunksize=None, ncpu=0, limit=True, progress=False):\n if (ncpu == 0):\n if (not progress):\n return _map(func, iterable)\n else:\n r = []\n if isinstance(progress, str):\n txt = progress\n else:\n txt = func.__name__\n for k in _PBar(desc=txt).iterover(iterable):\n r.append(func(k))\n return r\n elif progress:\n _n = _mp.cpu_count()\n if (ncpu <= 0):\n # use all available cpus\n p = _mp.Pool(_n)\n elif (ncpu > _n) & (limit is True):\n p = _mp.Pool(_n)\n else:\n p = _mp.Pool(ncpu)\n\n if not hasattr(iterable, '__len__'):\n iterable = list(iterable)\n ntasks = len(iterable)\n\n if isinstance(progress, str):\n txt = progress\n else:\n txt = func.__name__\n\n with _PBar(ntasks, desc=txt) as pb:\n # get the pool working asynchronously\n if islambda(func):\n amap = p.map_async(PicklableLambda(func), iterable, chunksize)\n else:\n amap = p.map_async(func, iterable, chunksize)\n left = 1\n while left > 0:\n _time.sleep(0.1)\n left = amap._number_left\n pb.update(ntasks - left)\n return amap.get()\n else:\n return map_async(func, iterable, chunksize, ncpu=ncpu, limit=limit).get()",
"def thread_map(target, iterable, thread_count=None, *args, **kwargs):\n try:\n jobsize = len(iterable)\n except TypeError:\n iterable = list(iterable)\n jobsize = len(iterable)\n def array_targ(function, it, retvals, arglist, kwarglist, start, size):\n for i in range(start, start + size):\n retvals[i] = function(*(arglist + (it[i],)), **kwarglist)\n retvals = [None] * jobsize\n thread_job(jobsize, thread_count, array_targ,\n target, iterable, retvals, args, kwargs)\n return retvals",
"def multiprocess_map(func, iterable, *worker_args, n_cores=None, mode=\"map\", **pool_kwargs):\n results = []\n\n with mp.Manager() as manager:\n shared_args_proxy = None\n if worker_args is not None:\n shared_args_proxy = manager.list(worker_args)\n\n with mp.Pool(processes=n_cores, initializer=init_worker,\n initargs=shared_args_proxy, **pool_kwargs) as pool:\n if mode == \"map\":\n results = pool.map(func, iterable)\n elif mode == \"starmap\":\n results = pool.starmap(func, iterable)\n elif mode == \"imap\":\n for result in pool.imap(func, iterable):\n results.append(result)\n\n return results",
"def lmap(f: Callable, *xs) -> list:\n return list(map(f, *xs))",
"def map_async(func, iterable, chunksize=None, callback=None, ncpu=0, limit=True, **kwargs):\n _n = _mp.cpu_count()\n if (ncpu <= 0):\n # use all available cpus\n p = _mp.Pool(_n)\n elif (ncpu > _n) & (limit is True):\n p = _mp.Pool(_n)\n else:\n p = _mp.Pool(ncpu)\n\n if islambda(func):\n return p.map_async(PicklableLambda(func), iterable, chunksize, callback)\n else:\n return p.map_async(func, iterable, chunksize, callback)",
"def map_(func, some_list):\n \n result = []\n \n for arg in some_list:\n result.append(func(arg))\n \n return result",
"async def map(afunc: Callable, gen: AnyIterable, batch_size: int = 0) -> AsyncGenerator:\n if isinstance(gen, AsyncGenerator):\n if asyncio.iscoroutinefunction(afunc):\n async for i in _async_map(afunc, gen, batch_size):\n yield i\n else:\n async for i in _sync_map(afunc, gen):\n yield i\n else:\n if asyncio.iscoroutinefunction(afunc):\n async for i in _async_map(afunc, iterate(gen), batch_size):\n yield i\n else:\n for i in _builtin_map(afunc, gen):\n yield i",
"def pfmap(func, workers=8):\n return fmap(func)",
"def map(iterable, function):\n for x in iterable:\n yield function(x)",
"def fastMap(mapper, data):\n i = 0\n ans = []\n while i < len(data):\n with Pool(MAX_POOL_SIZE) as pool:\n ans.extend(pool.map(mapper, data[i:i+MAX_POOL_SIZE]))\n i += MAX_POOL_SIZE\n\n return ans",
"def tmap(f, seq_args, num_workers=20, worker_queue=None, wait=True, stop_on_error=True):\n\n if worker_queue:\n wq = worker_queue\n else:\n # see if we have a global queue to work with.\n if _wq:\n wq = _wq\n else:\n if num_workers == 0:\n return map(f, seq_args)\n\n wq = WorkerQueue(num_workers)\n\n # we short cut it here if the number of workers is 0.\n # normal map should be faster in this case.\n if len(wq.pool) == 0:\n return map(f, seq_args)\n\n # print(\"queue size:%s\" % wq.queue.qsize())\n\n # TODO: divide the data (seq_args) into even chunks and\n # then pass each thread a map(f, equal_part(seq_args))\n # That way there should be less locking, and overhead.\n\n results = []\n for sa in seq_args:\n results.append(FuncResult(f))\n wq.do(results[-1], sa)\n\n # wq.stop()\n\n if wait:\n # print(\"wait\")\n wq.wait()\n # print(\"after wait\")\n # print(\"queue size:%s\" % wq.queue.qsize())\n if wq.queue.qsize():\n raise RuntimeError(\"buggy threadmap\")\n # if we created a worker queue, we need to stop it.\n if not worker_queue and not _wq:\n # print(\"stopping\")\n wq.stop()\n if wq.queue.qsize():\n um = wq.queue.get()\n if not um is STOP:\n raise RuntimeError(\"buggy threadmap\")\n\n # see if there were any errors. If so raise the first one. This matches map behaviour.\n # TODO: the traceback doesn't show up nicely.\n # NOTE: TODO: we might want to return the results anyway? This should be an option.\n if stop_on_error:\n error_ones = list(filter(lambda x: x.exception, results))\n if error_ones:\n raise error_ones[0].exception\n\n return map(lambda x: x.result, results)\n return [wq, results]",
"def thread_map(f, args_list, n_threads=None):\n if n_threads is None:\n n_threads = int(multiprocessing.cpu_count() / 2)\n pool = multiprocessing.pool.ThreadPool(processes=n_threads)\n return pool.map(f, args_list)",
"def map(self, fn, *iterables, **kwargs):\n fn = self._prepare_fn(fn)\n return self._self.map(fn, *iterables, **kwargs)",
"def map(function, iterable):\n\n return [function(x) for x in iterable]",
"def map(self, target, *iterable: iter):\n for args in zip(*iterable):\n self.submit(target=target, args=args)",
"def gmap(\n func: Callable,\n *iterables: Iterable,\n mapper: Callable[[Callable, tuple[Iterable]], Iterable] = map,\n evaluator: Callable[[Iterable], Any] = tuple\n):\n return evaluator(mapper(func, *iterables))",
"def tmap(function, *sequences, **tqdm_kwargs):\n for i in tzip(*sequences, **tqdm_kwargs):\n yield function(*i)",
"def _doMap(self, func, iterable):\n name = \"Mapper\"\n sys.stderr.write(\"Master[%s phase]: starting\\n\" % name)\n pipes = [mp.Pipe() for _ in range(self.num_workers)]\n proc = [mp.Process(target=spawn_mapper(func), name=name, args=(q,)) for q in pipes]\n for p in proc:\n p.daemon = True\n p.start()\n for output_p, input_p in pipes:\n input_p.close() # we don't need to read from the pipes\n qi = 0\n for item in iterable:\n pipes[qi][0].send(item)\n qi = (qi+1) % self.num_workers\n for q,_ in pipes:\n q.send(None) # add termination tokens\n q.close()\n for p in proc:\n p.join()\n sys.stderr.write(\"Master[%s phase]: ended..\\n\" % name)",
"def map(self, func, args_list):\n for args in args_list:\n self.add_task(func, args)",
"def map(self, func, args_list):\n for args in args_list:\n self.add_task(func, args)",
"def map(self, func, args_list):\n for args in args_list:\n self.add_task(func, args)",
"def map(self, func, args_list):\n for args in args_list:\n self.add_task(func, args)",
"def map(self, func, args_list):\n for args in args_list:\n self.add_task(func, args)",
"def map_multi_args(self, func, iterable, chunksize=None):\n assert self._state == RUN\n return self.map_async(one_to_many(func), iterable, chunksize).get()",
"def task_mapper(task_function, task_iterable, parallel_procs=None):\n\n num_procs = get_num_processors(parallel_procs)\n\n if num_procs == 0:\n LOG.debug('Using serial task processor...')\n return serial_pc(task_function, task_iterable)\n else:\n LOG.debug('Using %d-parallel task processors...', num_procs)\n return parallel_pc(task_function, task_iterable, num_procs)",
"def eager_map(func, iterable):\n for _ in map(func, iterable):\n continue",
"def map(f, *a, **kw):\n def writeobj(pipe, obj):\n try:\n s = marshal.dumps(obj)\n s = struct.pack('i', len(s)) + s\n except:\n try: s = cPickle.dumps(obj)\n except:\n print obj\n s = cPickle.dumps(obj)\n s = struct.pack('i', -len(s)) + s\n os.write(pipe, s)\n\n def readobj(pipe):\n n = struct.unpack('i', os.read(pipe, 4))[0]\n s = ''\n an = abs(n)\n while len(s) < an:\n s += os.read(pipe, min(65536, an-len(s)))\n if n > 0:\n return marshal.loads(s)\n else:\n return cPickle.loads(s)\n\n n = kw.get('n', nproc)\n if n == 1:\n return builtin_map(f, *a)\n\n if len(a) == 1:\n L = a[0]\n else:\n L = zip(*a)\n try:\n len(L)\n except TypeError:\n L = list(L)\n n = min(n, len(L))\n\n ans = [None] * len(L)\n pipes = [os.pipe() for i in range(n-1)]\n\n for i in range(n):\n if i < n-1 and not os.fork():\n # Child, and not last processor\n try:\n try:\n if len(a) == 1:\n obj = builtin_map(f, L[i*len(L)//n:(i+1)*len(L)//n])\n else:\n obj = [f(*x) for x in L[i*len(L)//n:(i+1)*len(L)//n]]\n except Exception, obj:\n pass\n writeobj(pipes[i][1], obj)\n except:\n traceback.print_exc()\n finally:\n os._exit(0)\n elif i == n-1:\n # Parent fork, and last processor\n try:\n if len(a) == 1:\n ans[i*len(L)//n:] = builtin_map(f, L[i*len(L)//n:])\n else:\n ans[i*len(L)//n:] = [f(*x) for x in L[i*len(L)//n:]]\n for k in range(n-1):\n obj = readobj(pipes[k][0])\n if isinstance(obj, Exception):\n raise obj\n ans[k*len(L)//n:(k+1)*len(L)//n] = obj\n finally:\n for j in range(n-1):\n os.close(pipes[j][0])\n os.close(pipes[j][1])\n os.wait()\n return ans",
"def map_and_batch(map_func,\n batch_size,\n num_parallel_batches=None,\n drop_remainder=False,\n num_parallel_calls=None):\n\n if num_parallel_batches is None and num_parallel_calls is None:\n num_parallel_calls = batch_size\n elif num_parallel_batches is not None and num_parallel_calls is None:\n num_parallel_calls = batch_size * num_parallel_batches\n elif num_parallel_batches is not None and num_parallel_calls is not None:\n raise ValueError(\n \"`map_and_batch` allows only one of `num_parallel_batches` and \"\n \"`num_parallel_calls` to be set, but \"\n f\"`num_parallel_batches` was set to {num_parallel_batches} \"\n f\"and `num_parallel_calls` as set to {num_parallel_calls}.\")\n\n def _apply_fn(dataset):\n return _MapAndBatchDataset(dataset, map_func, batch_size,\n num_parallel_calls, drop_remainder)\n\n return _apply_fn"
] |
[
"0.71589917",
"0.69364524",
"0.69016457",
"0.6803368",
"0.67787397",
"0.6751321",
"0.67485857",
"0.6698962",
"0.66840696",
"0.6644073",
"0.6631258",
"0.66112894",
"0.6594355",
"0.657014",
"0.6553259",
"0.65476775",
"0.6470776",
"0.6453482",
"0.6449276",
"0.6433381",
"0.63461506",
"0.63461506",
"0.63461506",
"0.63461506",
"0.63461506",
"0.6278401",
"0.6270631",
"0.62580657",
"0.62413025",
"0.6218374"
] |
0.71248853
|
1
|
A parallel version of Python's builtin `map` function. This method applies a function to sequences of arguments. It follows the same syntax as the builtin `map`. This method creates a mapper objects by calling `self.mapper` with no arguments and then uses that mapper to do the mapping. See the documentation of `mapper` for more details.
|
def map(self, func, *sequences):
return self.mapper().map(func, *sequences)
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def _doMap(self, func, iterable):\n name = \"Mapper\"\n sys.stderr.write(\"Master[%s phase]: starting\\n\" % name)\n pipes = [mp.Pipe() for _ in range(self.num_workers)]\n proc = [mp.Process(target=spawn_mapper(func), name=name, args=(q,)) for q in pipes]\n for p in proc:\n p.daemon = True\n p.start()\n for output_p, input_p in pipes:\n input_p.close() # we don't need to read from the pipes\n qi = 0\n for item in iterable:\n pipes[qi][0].send(item)\n qi = (qi+1) % self.num_workers\n for q,_ in pipes:\n q.send(None) # add termination tokens\n q.close()\n for p in proc:\n p.join()\n sys.stderr.write(\"Master[%s phase]: ended..\\n\" % name)",
"def map(self, fn, *iterables, **kwargs):\n fn = self._prepare_fn(fn)\n return self._self.map(fn, *iterables, **kwargs)",
"def map(iteratee, *seqs):\n return _map(fnc.iteratee(iteratee), *seqs)",
"def map(self, mapper):\n def _map(iterator):\n return mapper(next(iterator))\n return self.__class__(self, _map)",
"def map(self, map_function, *map_arguments) -> None:\n\n elements = []\n self.__get_sorted_elements(self.__root, elements)\n\n for element in elements:\n map_function(element, *map_arguments)",
"def task_mapper(task_function, task_iterable, parallel_procs=None):\n\n num_procs = get_num_processors(parallel_procs)\n\n if num_procs == 0:\n LOG.debug('Using serial task processor...')\n return serial_pc(task_function, task_iterable)\n else:\n LOG.debug('Using %d-parallel task processors...', num_procs)\n return parallel_pc(task_function, task_iterable, num_procs)",
"def raw_map(self, func, sequences, dist='b', targets='all', block=True):\n if not isinstance(sequences, (list, tuple)):\n raise TypeError('sequences must be a list or tuple')\n max_len = max(len(s) for s in sequences)\n for s in sequences:\n if len(s)!=max_len:\n raise ValueError('all sequences must have equal length')\n if isinstance(func, FunctionType):\n d = self.push_function(dict(_ipython_map_func=func), targets=targets, block=False)\n d.addCallback(lambda did: self.get_pending_deferred(did, True))\n sourceToRun = '_ipython_map_seq_result = map(_ipython_map_func, *zip(*_ipython_map_seq))'\n elif isinstance(func, str):\n d = defer.succeed(None)\n sourceToRun = \\\n '_ipython_map_seq_result = map(%s, *zip(*_ipython_map_seq))' % func\n else:\n raise TypeError(\"func must be a function or str\")\n \n d.addCallback(lambda _: self.scatter('_ipython_map_seq', zip(*sequences), dist, targets=targets))\n d.addCallback(lambda _: self.execute(sourceToRun, targets=targets, block=False))\n d.addCallback(lambda did: self.get_pending_deferred(did, True))\n d.addCallback(lambda _: self.gather('_ipython_map_seq_result', dist, targets=targets, block=block))\n return d",
"def map(iterable, function):\n for x in iterable:\n yield function(x)",
"def map(self, target, *iterable: iter):\n for args in zip(*iterable):\n self.submit(target=target, args=args)",
"def map(func, iterable, chunksize=None, ncpu=0, limit=True, progress=False):\n if (ncpu == 0):\n if (not progress):\n return _map(func, iterable)\n else:\n r = []\n if isinstance(progress, str):\n txt = progress\n else:\n txt = func.__name__\n for k in _PBar(desc=txt).iterover(iterable):\n r.append(func(k))\n return r\n elif progress:\n _n = _mp.cpu_count()\n if (ncpu <= 0):\n # use all available cpus\n p = _mp.Pool(_n)\n elif (ncpu > _n) & (limit is True):\n p = _mp.Pool(_n)\n else:\n p = _mp.Pool(ncpu)\n\n if not hasattr(iterable, '__len__'):\n iterable = list(iterable)\n ntasks = len(iterable)\n\n if isinstance(progress, str):\n txt = progress\n else:\n txt = func.__name__\n\n with _PBar(ntasks, desc=txt) as pb:\n # get the pool working asynchronously\n if islambda(func):\n amap = p.map_async(PicklableLambda(func), iterable, chunksize)\n else:\n amap = p.map_async(func, iterable, chunksize)\n left = 1\n while left > 0:\n _time.sleep(0.1)\n left = amap._number_left\n pb.update(ntasks - left)\n return amap.get()\n else:\n return map_async(func, iterable, chunksize, ncpu=ncpu, limit=limit).get()",
"def fastMap(mapper, data):\n i = 0\n ans = []\n while i < len(data):\n with Pool(MAX_POOL_SIZE) as pool:\n ans.extend(pool.map(mapper, data[i:i+MAX_POOL_SIZE]))\n i += MAX_POOL_SIZE\n\n return ans",
"def pfmap(func, workers=8):\n return fmap(func)",
"def map(self, func):\n return List(map(func, self))",
"def map(self, function):\n return FunctionalWrapper(map(function, self.data))",
"def map(function, iterable):\n\n return [function(x) for x in iterable]",
"def map(self, function):\n pass",
"def map_reduce(self, cls, *args, **kwargs):\n m = mapper(cls)\n return self.impl.map_reduce(m.collection, *args, **kwargs)",
"def lmap(f: Callable, *xs) -> list:\n return list(map(f, *xs))",
"def map(self, func):\n return _(map(func, self._))",
"def multiprocess_map(func, iterable, *worker_args, n_cores=None, mode=\"map\", **pool_kwargs):\n results = []\n\n with mp.Manager() as manager:\n shared_args_proxy = None\n if worker_args is not None:\n shared_args_proxy = manager.list(worker_args)\n\n with mp.Pool(processes=n_cores, initializer=init_worker,\n initargs=shared_args_proxy, **pool_kwargs) as pool:\n if mode == \"map\":\n results = pool.map(func, iterable)\n elif mode == \"starmap\":\n results = pool.starmap(func, iterable)\n elif mode == \"imap\":\n for result in pool.imap(func, iterable):\n results.append(result)\n\n return results",
"def map(self, function=lambda value: value):\n for j, value in enumerate(self):\n self[j] = function(value)",
"def inline_map_reduce(self, cls, *args, **kwargs):\n m = mapper(cls)\n return self.impl.inline_map_reduce(m.collection, *args, **kwargs)",
"def map_async(func, iterable, chunksize=None, callback=None, ncpu=0, limit=True, **kwargs):\n _n = _mp.cpu_count()\n if (ncpu <= 0):\n # use all available cpus\n p = _mp.Pool(_n)\n elif (ncpu > _n) & (limit is True):\n p = _mp.Pool(_n)\n else:\n p = _mp.Pool(ncpu)\n\n if islambda(func):\n return p.map_async(PicklableLambda(func), iterable, chunksize, callback)\n else:\n return p.map_async(func, iterable, chunksize, callback)",
"def map_(func, some_list):\n \n result = []\n \n for arg in some_list:\n result.append(func(arg))\n \n return result",
"def mapper() -> Callable[[str], Pin]:",
"def gmap(\n func: Callable,\n *iterables: Iterable,\n mapper: Callable[[Callable, tuple[Iterable]], Iterable] = map,\n evaluator: Callable[[Iterable], Any] = tuple\n):\n return evaluator(mapper(func, *iterables))",
"def map(self, func):\n if self.is_right(): return self.right.map(func)\n if self.is_left(): return self.left.map(func)",
"def map(self, func, args_list):\n for args in args_list:\n self.add_task(func, args)",
"def map(self, func, args_list):\n for args in args_list:\n self.add_task(func, args)",
"def map(self, func, args_list):\n for args in args_list:\n self.add_task(func, args)"
] |
[
"0.72321373",
"0.7169596",
"0.7142879",
"0.71094584",
"0.6858198",
"0.6813721",
"0.68108535",
"0.6759652",
"0.67054784",
"0.66790706",
"0.6635265",
"0.6604754",
"0.66029197",
"0.6593076",
"0.65843767",
"0.65499353",
"0.65375745",
"0.650316",
"0.64501595",
"0.6427934",
"0.6421241",
"0.6401372",
"0.63974625",
"0.6394804",
"0.6391658",
"0.6373507",
"0.6372824",
"0.635515",
"0.635515",
"0.635515"
] |
0.7605146
|
0
|
Demonstrates using the pager.
|
def pager():
lines = []
for x in range(200):
lines.append('%s. Hello World!' % click.style(str(x), fg='green'))
click.echo_via_pager('\n'.join(lines))
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def pager(self, start_page=1):\n return Pager(start_page)",
"def print_paging_menu():\n print(\"\\n~ Enter 'n' to view next page of tickets\")\n print(\"~ Enter 'p' to view previous page of tickets\")\n print(\"~ Enter 'q' to quit viewing list of tickets\")",
"def show_more(text,filename=None,writer=None,pagelength=30,prefix=''): # pragma: no cover\n\n pager(text)",
"def page(self, s):\n s = str(s)\n if _bool_globals_check(s):\n txt = pydoc.getdoc(s.__class__)\n logging.warning('First')\n self.blocking_pager(txt)\n return\n\n # After a function contributed by Olivier Aubert, slightly modified.\n\n # Process options/args\n opts, args = self.parse_options(s, 'r')\n raw = 'r' in opts\n if args == '':\n raise UsageError(\"Can't find documentation of None\")\n\n oname = args and args or '_'\n info = self.shell._ofind(oname)\n if info['found']:\n txt = (raw and str or pformat)(info['obj'])\n if 'o' in opts:\n logging.warning('Second')\n self.blocking_pager(txt, cmd='bat --page never ')\n return\n logging.warning('Third')\n self.blocking_pager(txt)\n else:\n logging.warning('Object `%s` not found' % oname)",
"def add_paging(self):\n self.paged = True",
"def test_pagination(self):\n self.check_pagination()",
"def paginated(self) -> global___Snippet.Paginated:",
"def main():\n page = get_page_arg()\n\n items = load_data(page)\n total_pages = get_total_pages()\n context = {\"current_page\": page,\n \"total_pages\": total_pages,\n \"items\": items}\n\n return render_template('index.html', **context)",
"def blocking_pager(self, text, cmd=None):\n return pydoc.tempfilepager(text, self.use_pager)",
"def pagerank(self):\n\n raise NotImplementedError",
"def __init__(self, use_pager=True, cmd=None, **kwargs):\n super().__init__(self.shell, **kwargs)\n self.use_pager = use_pager\n # self.call(*args)\n self.cmd = \"less -JRKMLige \" if cmd is None else cmd",
"def test_pagination(self):\n for page in range(1, 5):\n self._test_one_page(page=page)",
"def test_pagination(self):\r\n page = 1\r\n per_page = 5\r\n total_count = 10\r\n p = pybossa.util.Pagination(page, per_page, total_count)\r\n assert p.page == page, p.page\r\n assert p.per_page == per_page, p.per_page\r\n assert p.total_count == total_count, p.total_count\r\n\r\n err_msg = \"It should return two pages\"\r\n assert p.pages == 2, err_msg\r\n p.total_count = 7\r\n assert p.pages == 2, err_msg\r\n p.total_count = 10\r\n\r\n err_msg = \"It should return False\"\r\n assert p.has_prev is False, err_msg\r\n err_msg = \"It should return True\"\r\n assert p.has_next is True, err_msg\r\n p.page = 2\r\n assert p.has_prev is True, err_msg\r\n err_msg = \"It should return False\"\r\n assert p.has_next is False, err_msg\r\n\r\n for i in p.iter_pages():\r\n err_msg = \"It should return the page: %s\" % page\r\n assert i == page, err_msg\r\n page += 1",
"def pager():\n File = open(raw_input( \"Enter any file name for reading \") )\n if File is not None:\n prompt = raw_input(\"Do you wanna read the contents now... y / n \")\n counter = 1\n delimiter = 1\n m = re.search(r'y|Yes|yes|y', prompt)\n if m:\n for line in File:\n if (counter / delimiter) == 10:\n delimiter += 1\n prompted = raw_input(\"Do you wish to continue\")\n x = re.search(r'y|Yes|YES|yes', prompted)\n if x:\n print line,\n print line,\n counter += 1",
"def djello_paginator_number(cl, i):\n if i == DOT:\n return '… '\n elif i == cl.page_num:\n return format_html('<li class=\"page-item active\"><a class=\"page-link\" href=\"\">{}</a></li>', i + 1)\n else:\n return format_html(\n '<li class=\"page-item\"><a class=\"page-link\" href=\"{}\">{}</a></li>',\n cl.get_query_string({PAGE_VAR: i}),\n i + 1)",
"def make_page(results, page, debug = False):\n\n if debug:\n print(\"initiating make_page()\")\n\n start = 5 * (page % 10)\n shows = []\n\n if debug:\n print(f\"start: {start}\")\n\n for i in range(start, start + 5):\n if debug:\n print(i)\n\n try:\n args = results[i]\n except:\n break\n\n s = {\"title\": args[0],\"link\": args[1],\"image\": get_image(args[1])}\n\n shows.append(s)\n if debug:\n for show in shows:\n print(show)\n return shows",
"def Main(base_dir, page_set_filenames):\n runner = PageTestRunner()\n sys.exit(runner.Run(base_dir, page_set_filenames))",
"def test_tags_browse_pagination_page_links(self):\n\n po = self.catalog.load_pageobject('TagsBrowsePage')\n po.goto_page()\n pagenumbers = po.get_link_page_numbers()\n\n while len(pagenumbers) > 0:\n page = int(pagenumbers[0])\n starturl = po.current_url()\n\n # click the link to go to the next page\n po.goto_page_number(page)\n endurl = po.current_url()\n\n # make sure the page changed\n assert starturl != endurl, \\\n \"clicking the page link for page %s\" % (page) \\\n + \" did not change pages: starturl = %s,\" % (starturl) \\\n + \" endurl = %s\" % (endurl)\n\n\n # update the page numbers\n # generally only a few page numbers surrounding the\n # current page are shown. as we progress through the\n # pages, more page numbers become available.\n if len(pagenumbers) > 1:\n new_pagenumbers = po.get_link_page_numbers()\n assert len(new_pagenumbers) != 0, \\\n 'retrieving new page numbers failed while evaluating' \\\n + ' page #%s (%s)' % (page,endurl)\n pagenumbers = [int(i) \\\n for i in new_pagenumbers if int(i) > page]\n else:\n pagenumbers = []\n\n\n #FIXME: check the current page number matches page",
"def index(request):\n\n queryset_list = Todo.objects.all() #.order_by(\"-timestamp\")\n page = request.GET.get('page', 1)\n\n paginator = Paginator(queryset_list, 2)\n try:\n queryset = paginator.page(page)\n except PageNotAnInteger:\n queryset = paginator.page(1)\n except EmptyPage:\n queryset = paginator.page(paginator.num_pages)\n\n context = {\n \"taskli\": queryset, \n }\n return render(request, \"lists/task_list.html\", context)",
"def pagination_spacer():\n global PAGE_NUMBER\n PAGE_NUMBER += 1\n pagination = \"-{} (Page # {})\".format(SPACER, str(PAGE_NUMBER))\n pagination += \"\\n\"\n pagination += \" {} type 'help' for commands\".format(SPACER,)\n\n return pagination",
"def entries_index(request):\n blog_entries = Entry.objects.filter(status=2).order_by('-pub_date')\n paginator = Paginator(blog_entries, 4)#4 posts/page\n try:\n page = int(request.GET.get('page','1'))\n except ValueError:\n page = 1\n try:\n entries = paginator.page(page)\n except (EmptyPage, InvalidPage):\n entries = paginator.page(paginator.num_pages)\n return render_to_response('blog/blog.html', {'entries':entries}, RequestContext(request))",
"def paging_options(self, index, tasks):\n\n options = {\"P\": \"Previous Entry\", \"N\": \"Next Entry\",\n \"E\": \"Edit Entry\", \"D\": \"Delete Entry\", \"M\": \"Main Menu\"}\n\n if index == 0:\n del options[\"P\"]\n\n if index == len(tasks) - 1:\n del options[\"N\"]\n\n for k, v in options.items():\n print(\" \" + k + \". \" + v)",
"def test_tags_view_pagination_page_links(self,tag_with_items):\n\n self.tag_name = tag_with_items\n\n po = self.catalog.load_pageobject('TagsPage')\n po.goto_page()\n po.search_for_content([self.tag_name])\n\n po = self.catalog.load_pageobject('TagsViewPage')\n\n pagenumbers = po.get_link_page_numbers()\n\n for page in pagenumbers:\n starturl = po.current_url()\n po.goto_page_number(page)\n endurl = po.current_url()\n assert starturl != endurl, \\\n \"clicking the page link for page %s\" % (page) \\\n + \" did not change pages:\" \\\n + \" starturl = %s, endurl = %s\" % (starturl,endurl)\n\n #FIXME: check the current page number matches page",
"def __init__(\n self,\n caller,\n inp,\n always_page=False,\n session=None,\n justify=False,\n justify_kwargs=None,\n exit_on_lastpage=False,\n exit_cmd=None,\n page_formatter=str,\n **kwargs,\n ):\n self._caller = caller\n self._always_page = always_page\n\n if not session:\n # if not supplied, use the first session to\n # determine screen size\n sessions = caller.sessions.get()\n if not sessions:\n return\n session = sessions[0]\n self._session = session\n\n self._justify = justify\n self._justify_kwargs = justify_kwargs\n self.exit_on_lastpage = exit_on_lastpage\n self.exit_cmd = exit_cmd\n self._exit_msg = _(\"|xExited pager.|n\")\n self._kwargs = kwargs\n\n self._data = None\n\n self._pages = []\n self._npos = 0\n\n self._npages = 1\n self._paginator = self.paginator_index\n self._page_formatter = str\n\n # set up individual pages for different sessions\n height = max(4, session.protocol_flags.get(\"SCREENHEIGHT\", {0: _SCREEN_HEIGHT})[0] - 4)\n self.width = session.protocol_flags.get(\"SCREENWIDTH\", {0: _SCREEN_WIDTH})[0]\n # always limit number of chars to 10 000 per page\n self.height = min(10000 // max(1, self.width), height)\n\n # does initial parsing of input\n self.init_pages(inp)\n\n # kick things into gear\n self.start()",
"def test_tags_view_pagination_current_page(self,tag_with_items):\n\n self.tag_name = tag_with_items\n\n po = self.catalog.load_pageobject('TagsPage')\n po.goto_page()\n po.search_for_content([self.tag_name])\n\n po = self.catalog.load_pageobject('TagsViewPage')\n\n current_page_number = po.get_current_page_number()\n assert current_page_number == '1', \\\n \"after loading the page %s and examining the page links,\" \\\n % (po.current_url()) \\\n + \" the current page number is '%s', expected '1'\" \\\n % (current_page_number)",
"def respond(self, pager: Pager, func: Callable) -> None:\n self._pages[pager] = func.__name__",
"def pre_exit(self) -> \"None\":\n # Display pager\n if config.page:\n from pydoc import pager\n\n self.out_file.seek(0)\n data = self.out_file.read()\n pager(data)\n\n if self.out_file is not sys.stdout:\n self.out_file.close()\n\n # Close the application\n self.exit()",
"def paginate_view(request, query_set, page=None, num_items=None):\n if page is None:\n page = request.GET.get('page', default=1)\n if num_items is None:\n num_items = request.GET.get('num_items', default=10)\n paginator = Paginator(query_set, num_items)\n try:\n data_set = paginator.page(page)\n except EmptyPage:\n data_set = paginator.page(paginator.num_pages)\n return data_set, num_items, page",
"def per_page():\n return 100",
"def page(request, pagenum):\n context = Paginator().filter(Book.objects.all(), pagenum)\n return render(request, 'books/bookListPage.html', context)"
] |
[
"0.6193642",
"0.6142706",
"0.59355986",
"0.5923058",
"0.58354825",
"0.58264136",
"0.5783766",
"0.56706333",
"0.5645084",
"0.55829614",
"0.5568693",
"0.5560625",
"0.5547341",
"0.5534162",
"0.55162716",
"0.5515672",
"0.5388088",
"0.5358382",
"0.5328274",
"0.5272116",
"0.5266672",
"0.5262896",
"0.5207969",
"0.51998734",
"0.5196878",
"0.51958454",
"0.5186081",
"0.5181734",
"0.51741713",
"0.5166814"
] |
0.761011
|
0
|
Read data (document / collection) from Firestore recursively and save to local file system
|
def read(cred, output, depth, type, exclude, path):
click.echo('Reading from firetore, credential file: %s' % (cred))
cred = os.path.abspath(cred)
output = os.path.abspath(output)
click.echo('Document path: %s' % path)
click.echo('Output path: %s' % output)
if depth<0:
depth = 1000000
if path and path.startswith('/'):
path = path[1:]
if os.listdir(output):
click.echo('\nError: folder "%s" is not empty' % output)
return
global total_doc_count
global start_time
total_doc_count = 0
start_time = time.time()
fs = firestore.client(initialize_app(credentials.Certificate(cred)))
# data = fs.collection('schools').get()
# for item in data:
# print(json.dumps(item.to_dict()))
# break
if not path or path == '/': # get all root collections
collections = fs.collections() # root collections
# collections = fs.document('schools/dev').collections() # root collections
for col in collections:
read_collection(output, col.id, col, 1, depth, exclude)
elif type=='document':
doc = fs.document(path).get()
read_document(output, path, doc, 1, depth, exclude)
else: # 'collection'
col = fs.collection(path)
read_collection(output, path, col, 1, depth, exclude)
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def write(cred, folder, depth, path):\n click.echo('Writing to Firetore, credential file: %s' % (cred))\n cred = os.path.abspath(cred)\n data_folder = os.path.abspath(folder)\n click.echo('Document path: %s' % path)\n click.echo('Data folder path: %s' % data_folder)\n\n if depth < 0:\n depth = 1000000\n\n if path and path.startswith('/'):\n path = path[1:]\n\n allfiles = os.listdir(data_folder)\n if not allfiles:\n click.echo('\\nError: data folder \"%s\" is empty' % data_folder)\n return\n\n global total_doc_count\n global start_time\n total_doc_count = 0\n start_time = time.time()\n fs = firestore.client(initialize_app(credentials.Certificate(cred)))\n\n write_recursively(fs, data_folder, path, 1, depth)",
"def gather_documents(self):\n self.document_gatherer.gather_and_save_everything(Constants.path_cord, \n Constants.path_metadata, \n Constants.path_linked_documents,\n Constants.path_unlinked_documents,\n Constants.path_parsed_documents,\n Constants.path_all_documents)\n \n print(\"Done gathering documents.\")",
"def index_document(self, document):\n # Recursively collect records\n records = []\n if document.get_type() is document.TYPE_DIR:\n dirname = document.get_filename()\n subdirs, files = document.get_contents()\n for subdir in subdirs:\n document.set_filename(os.path.join(dirname, subdir))\n self.index_document(document)\n for filename in files:\n document.set_filename(os.path.join(dirname, filename))\n record = self.create_record(document)\n if record is not None:\n records.append(record)\n\n if len(records) == 0:\n return\n\n # Store records\n writer = self.get_index().writer()\n for record in records:\n writer.add_document(**record)\n writer.commit()",
"def store_documents(self, partner, documents):\n for docs in documents:\n if docs and docs['type'] in DOCS_TYPES:\n document = DocumentDetails()\n document.partner_id = partner\n document.type = DOCS_TYPES[docs['type']]\n document.file_name = docs['file']\n document.file_data = os.path.join('documents/partner_doc', docs['file'])\n document.save()",
"def updateMultipleDocuments(cred, payload):\n\n url = cred.base_url + \"documents:commit\"\n data = { 'writes': [] }\n\n for path, fieldData in payload.iteritems():\n pathData = createFirestoreDataObject(cred, path, fieldData)\n data['writes'].append(pathData)\n \n makeRequest(cred, url, 'POST', data)",
"def collecte_docs(self, chercheur, overwrite=False): # self,\n init = overwrite # If True, data persistence is lost when references are updated\n docs = hal.find_publications(chercheur[\"halId_s\"], \"authIdHal_s\")\n\n progress_recorder = ProgressRecorder(self)\n progress_recorder.set_progress(0, len(docs), description=\"récupération des données HAL\")\n # Insert documents collection\n for num, doc in enumerate(docs):\n doc[\"country_colaboration\"] = location_docs.generate_countrys_fields(doc)\n doc = doi_enrichissement.docs_enrichissement_doi(doc)\n if \"fr_abstract_s\" in doc.keys():\n if isinstance(doc[\"fr_abstract_s\"], list):\n doc[\"fr_abstract_s\"] = \"/n\".join(doc[\"fr_abstract_s\"])\n if len(doc[\"fr_abstract_s\"]) > 100:\n doc[\"fr_entites\"] = keyword_enrichissement.return_entities(\n doc[\"fr_abstract_s\"], \"fr\"\n )\n doc[\"fr_teeft_keywords\"] = keyword_enrichissement.keyword_from_teeft(\n doc[\"fr_abstract_s\"], \"fr\"\n )\n if \"en_abstract_s\" in doc.keys():\n if isinstance(doc[\"en_abstract_s\"], list):\n doc[\"en_abstract_s\"] = \"/n\".join(doc[\"en_abstract_s\"])\n if len(doc[\"en_abstract_s\"]) > 100:\n doc[\"en_entites\"] = keyword_enrichissement.return_entities(\n doc[\"en_abstract_s\"], \"en\"\n )\n doc[\"en_teeft_keywords\"] = keyword_enrichissement.keyword_from_teeft(\n doc[\"en_abstract_s\"], \"en\"\n )\n\n doc[\"_id\"] = doc[\"docid\"]\n doc[\"validated\"] = True\n\n doc[\"harvested_from\"] = \"researcher\"\n\n doc[\"harvested_from_ids\"] = []\n doc[\"harvested_from_label\"] = []\n\n #\n #\n # print(doc[\"authorship\"], doc ['authLastName_s'])\n\n if len(doc[\"authIdHal_s\"]) != len(doc[\"authLastName_s\"]):\n # print (\"elastichal.py : test d'autorat no good\")\n # test sur le nom complet...\n nom = [\n truc\n for truc in doc[\"authLastName_s\"]\n if chercheur[\"lastName\"].lower() in truc.lower()\n ] # pour les récemment mariés qui auraient un nom composé...\n # Après si 'lun des co-auteur porte le même nom...\n if len(nom) > 0:\n nom = nom[0].title()\n try:\n if doc[\"authLastName_s\"].index(nom) == 0: # premier\n doc[\"authorship\"] = [\n {\"authorship\": \"firstAuthor\", \"authIdHal_s\": chercheur[\"halId_s\"]}\n ]\n elif (\n doc[\"authLastName_s\"].index(nom) == len(doc[\"authLastName_s\"]) - 1\n ): # dernier\n doc[\"authorship\"] = [\n {\"authorship\": \"lastAuthor\", \"authIdHal_s\": chercheur[\"halId_s\"]}\n ]\n except ValueError:\n doc[\"authorship\"] = []\n else:\n doc[\"authorship\"] = []\n elif chercheur[\"halId_s\"] in doc[\"authIdHal_s\"]:\n if doc[\"authIdHal_s\"].index(chercheur[\"halId_s\"]) == 0:\n doc[\"authorship\"] = [\n {\"authorship\": \"firstAuthor\", \"authIdHal_s\": chercheur[\"halId_s\"]}\n ]\n elif (\n doc[\"authIdHal_s\"].index(chercheur[\"halId_s\"]) == len(doc[\"authIdHal_s\"]) - 1\n ): # dernier\n doc[\"authorship\"] = [\n {\"authorship\": \"lastAuthor\", \"authIdHal_s\": chercheur[\"halId_s\"]}\n ]\n else:\n doc[\"authorship\"] = []\n else:\n doc[\"authorship\"] = []\n\n doc[\"harvested_from_ids\"].append(chercheur[\"halId_s\"])\n\n # historique d'appartenance du docId\n # pour attribuer les bons docs aux chercheurs\n # harvet_history.append({'docid': doc['docid'], 'from': row['halId_s']})\n #\n # for h in harvet_history:\n # if h['docid'] == doc['docid']:\n # if h['from'] not in doc[\"harvested_from_ids\"]:\n # doc[\"harvested_from_ids\"].append(h['from'])\n\n doc[\"records\"] = []\n\n doc[\"MDS\"] = utils.calculate_mds(doc)\n\n try:\n should_be_open = utils.should_be_open(doc)\n if should_be_open == 1:\n doc[\"should_be_open\"] = True\n if should_be_open == -1:\n doc[\"should_be_open\"] = False\n\n if should_be_open == 1 or should_be_open == 2:\n doc[\"isOaExtra\"] = True\n elif should_be_open == -1:\n doc[\"isOaExtra\"] = False\n except IndexError:\n print(\"publicationDate_tdate error ?\")\n doc[\"Created\"] = datetime.datetime.now().isoformat()\n\n if not init: # récupération de l'existant pour ne pas écraser\n field = \"_id\"\n doc_param = esActions.scope_p(field, doc[\"_id\"])\n\n if not es.indices.exists(\n index=chercheur[\"structSirene\"]\n + \"-\"\n + chercheur[\"labHalId\"]\n + \"-researchers-\"\n + chercheur[\"ldapId\"]\n + \"-documents\"\n ): # -researchers\" + row[\"ldapId\"] + \"-documents\n print(\"exception \", chercheur[\"labHalId\"], chercheur[\"ldapId\"])\n\n res = es.search(\n index=chercheur[\"structSirene\"]\n + \"-\"\n + chercheur[\"labHalId\"]\n + \"-researchers-\"\n + chercheur[\"ldapId\"]\n + \"-documents\",\n body=doc_param,\n ) # -researchers\" + row[\"ldapId\"] + \"-documents\n\n if len(res[\"hits\"][\"hits\"]) > 0:\n doc[\"validated\"] = res[\"hits\"][\"hits\"][0][\"_source\"][\"validated\"]\n if \"authorship\" in res[\"hits\"][\"hits\"][0][\"_source\"]:\n doc[\"authorship\"] = res[\"hits\"][\"hits\"][0][\"_source\"][\"authorship\"]\n\n if (\n res[\"hits\"][\"hits\"][0][\"_source\"][\"modifiedDate_tdate\"]\n != doc[\"modifiedDate_tdate\"]\n ):\n doc[\"records\"].append(\n {\n \"beforeModifiedDate_tdate\": doc[\"modifiedDate_tdate\"],\n \"MDS\": res[\"hits\"][\"hits\"][0][\"_source\"][\"MDS\"],\n }\n )\n\n else:\n doc[\"validated\"] = True\n progress_recorder.set_progress(num, len(docs), description=\"(récolte)\")\n progress_recorder.set_progress(num, len(docs), description=\"(indexation)\")\n helpers.bulk(\n es,\n docs,\n index=chercheur[\"structSirene\"]\n + \"-\"\n + chercheur[\"labHalId\"]\n + \"-researchers-\"\n + chercheur[\"ldapId\"]\n + \"-documents\",\n refresh=\"wait_for\",\n )\n\n return chercheur # au cas où",
"def createMultipleDocuments(cred, payload):\n url = cred.base_url + \"documents:commit\"\n data = { 'writes': [] }\n\n for path, fieldData in payload.iteritems():\n pathData = createFirestoreDataObject(cred, path, fieldData)\n del pathData['updateMask']\n data['writes'].append(pathData)\n\n makeRequest(cred, url, 'POST', data)",
"def updateDocumentAll(self, documents):\n docs = []\n for document in documents:\n if isinstance(document, couch.Document):\n document = document.getData()\n\n # these are required params\n if \"_id\" not in document or \"_rev\" not in document:\n raise Exception(\"Both _id & _rev fields are required!\")\n\n docs.append(document)\n\n return self.client.post(self.name +\"/_bulk_docs\", None,\n {\"docs\": docs}).getBodyData()",
"def createDocumentAll(self, documents):\n docs = []\n for document in documents:\n if isinstance(document, couch.Document):\n document = document.getData()\n\n # this is create method, no update allowed\n if \"_rev\" in document: del document[\"_rev\"]\n if \"_deleted\" in document: del document[\"_deleted\"]\n\n docs.append(document)\n\n return self.client.post(self.name +\"/_bulk_docs\", None,\n {\"docs\": docs}).getBodyData()",
"def flush(self):\n\n # save ddocs\n all_ddocs = self.all_docs(startkey=u\"_design\", endkey=u\"_design/\\u9999\", include_docs=True)\n ddocs = []\n for ddoc in all_ddocs:\n doc = ddoc['doc']\n old_atts = doc.get('_attachments', {})\n atts = {}\n for name, info in old_atts.items():\n att = {}\n att['content_type'] = info['content_type']\n att['data'] = self.fetch_attachment(ddoc['doc'], name)\n atts[name] = att\n\n # create a fresh doc\n doc.pop('_rev')\n doc['_attachments'] = resource.encode_attachments(atts)\n\n ddocs.append(doc)\n\n # delete db\n self.server.delete_db(self.dbname)\n\n # we let a chance to the system to sync\n times = 0\n while times < 10:\n if self.dbname in self.server:\n break\n time.sleep(0.2)\n times += 1\n\n # recreate db + ddocs\n self.server.create_db(self.dbname)\n self.bulk_save(ddocs)",
"def write_to_db( self ) :\n # first lets update the json file internally through: modify the path to figures\n # The json file has two keys: info and processes\n # we loop over all processes and we change the value of the key figure\n for proc in self.data[\"processes\"].keys():\n # for keys in self.data[\"processes\"][proc].keys():\n # each process has one figure\n try:\n # if keys == \"figure\":\n old_value = self.data[\"processes\"][proc][\"figure\"]\n new_value = self.datapath + \"/\" + old_value\n self.data[\"processes\"][proc][\"figure\"] = new_value\n except Exception as err:\n print( 'The key %s does not exist in the json file' % 'figure' )\n print( err )\n\n # Check the existence of the current json file inside the data base\n # the name of the json file starts with run_number as: run_number.json\n try:\n if self.collection.find_one({\"info.run\": {\"$eq\": self.run_number}}):\n # if the document with the given run number exists, delete it and re-write\n print( \"File %s already in database\" % self.data[\"info\"][\"filename\"] )\n self.collection.delete_one( {\"info.run\": {\"$eq\": self.run_number}} )\n self.collection.insert_one( self.data )\n\n else:\n print('File %s is going to be dumbed' % self.data[\"info\"][\"filename\"])\n self.collection.insert_one( self.data )\n\n except pymongo.errors.ServerSelectionTimeoutError as err:\n print('the data base server is down')\n print(err)\n sys.exit('check the database server if it is up and running ?')\n\n return 0",
"def update_collection(self, collection):\n node = self.node\n flow = node if node.is_flow else node.flow\n\n # Build the key used to store the entry in the document.\n key = node.name\n if node.is_task:\n key = \"w\" + str(node.pos[0]) + \"_t\" + str(node.pos[1])\n elif node.is_work:\n key = \"w\" + str(node.pos)\n\n db = collection.database\n\n # Save files with GridFs first in order to get the ID.\n if self.gridfs_files:\n import gridfs\n fs = gridfs.GridFS(db)\n for ext, gridfile in self.gridfs_files.items():\n logger.info(\"gridfs: about to put file:\", str(gridfile))\n # Here we set gridfile.fs_id that will be stored in the mondodb document\n try:\n with open(gridfile.path, \"r\" + gridfile.mode) as f:\n gridfile.fs_id = fs.put(f, filename=gridfile.path)\n except IOError as exc:\n logger.critical(str(exc))\n\n if flow.mongo_id is None:\n # Flow does not have a mongo_id, allocate doc for the flow and save its id.\n flow.mongo_id = collection.insert({})\n print(\"Creating flow.mongo_id\", flow.mongo_id, type(flow.mongo_id))\n\n # Get the document from flow.mongo_id and update it.\n doc = collection.find_one({\"_id\": flow.mongo_id})\n if key in doc:\n raise ValueError(\"%s is already in doc!\" % key)\n doc[key] = self.as_dict()\n\n collection.save(doc)\n #collection.update({'_id':mongo_id}, {\"$set\": doc}, upsert=False)",
"def backupDocuments(currentTime,baseDir):\n client = MongoClient('asr2.iem.technion.ac.il',27017)\n db = client.asr16\n pathToFolder = baseDir +'Results/'\n FEATURES_DIR = pathToFolder + '/Features/' + currentTime\n docToFeatureVector = parseFeatures(FEATURES_DIR)\n documents = db.documents.find({})\n for document in documents:\n document['text']= document.pop('current_document')\n document['id']= document.pop('_id')\n document['features'] = docToFeatureVector[document[\"query_id\"]+\"-\"+document[\"username\"]]\n del document['posted_document']\n document['iteration'] = currentTime\n db.archive.save(document)",
"def save_local_files(name_file):\n global collection\n name_file = name_file.split('.')[0]\n document = collection.find({'id': name_file})\n if document.count() > 0 and document[0].get('ida_comments', ''):\n print('Comments already extracted for document [%s], skipping.' %\n document[0]['id'])\n return\n if document.count() == 0:\n document = {\n 'id': name_file,\n 'ida_comments': []}\n else:\n document = document[0]\n print('Saving comments for document [%s].' % document['id'])\n asm = open_asm2(document['id'])\n asm = [to_utf(line) for line in asm]\n comments = filter_comments(asm)\n document['ida_comments'] = comments\n collection.save(document)",
"def test_tree_collection_read_write_file(self):\n def eval_klass(coll):\n coll.writeToFile('sample.trees')\n read = LoadTrees('sample.trees')\n self.assertTrue(type(read) == type(coll))\n \n eval_klass(LogLikelihoodScoredTreeCollection(self.scored_trees))\n \n # convert lnL into p\n eval_klass(WeightedTreeCollection([(exp(s), t) \n for s,t in self.scored_trees]))\n remove_files(['sample.trees'], error_on_missing=False)",
"def _write_to_datastore(self, index, doc_type, document, login, path):\n if self.config['Github']['datastore'] == 'filesystem':\n filename = self._generate_filename(doc_type, login)\n self._save_file(json.dumps(document), path, filename)\n elif self.config['Github']['datastore'] == 'elasticsearch':\n self._save_elasticsearch(document, index, doc_type)\n elif self.config['Github']['datastore'] == 'both':\n filename = self._generate_filename(doc_type, login)\n self._save_file(json.dumps(document), path, filename)\n self._save_elasticsearch(document, index, doc_type)\n else:\n error_msg = \"Unable to save result data for {}. Check \" \\\n \" configuration file setting: {}\" \\\n .format(doc_type, self.config['Github']['datastore'])\n self.logger.error(error_msg)",
"def convert(self):\n print('Converting: {}'.format(self.collection.pod_path))\n\n # Pull out the meta information from all the docs.\n sorted_docs = sorted(self.collection.list_docs_unread(), key=lambda doc: doc.pod_path)\n for doc in sorted_docs:\n self.routes_data.extract_doc(doc)\n\n self.routes_data.write_routes(self.pod, self.collection)",
"def save_documents(event, transcript_data):\n documents = [\n ('transcript_url', \"transcript\"),\n ('opening_statement_chair', \"chair opening statement\"),\n ('opening_statement_rm', \"ranking member opening statement\")\n ]\n\n for (field, note) in documents:\n url = transcript_data[field]\n save_document(url, note, event)",
"def update_document(self):\n pass",
"def exportToDB(self, submissions):\n for p in range(len(submissions)):\n for x in range(len(submissions[p])):\n doc_ref = self.fs_db.collection(u'reddit').document(str(submissions[p][4]))\n doc_ref.set({\n u'content': str(submissions[p][0]),\n u'upvote_ratio': str(submissions[p][1]),\n u'score': submissions[p][2],\n u'title': submissions[p][3],\n u'id': submissions[p][4],\n u'total_awards_received': submissions[p][5],\n u'created_utc': submissions[p][6]\n })",
"def extract_doc(self, doc):\n if doc.pod_path.endswith('.yaml'):\n raw_data = yaml.load(\n doc.pod.read_file(doc.pod_path), Loader=yaml_utils.PlainTextYamlLoader)\n else:\n raw_data = doc.format.front_matter.raw_data\n\n if not raw_data:\n print('No raw data found for document: {}'.format(doc.pod_path))\n return\n\n data = collections.OrderedDict()\n\n tagged_keys = tuple(['{}@'.format(key)\n for key in COLLECTION_META_KEYS])\n\n for key, value in raw_data.items():\n if key in COLLECTION_META_KEYS or key.startswith(tagged_keys):\n normal_key = key.lstrip('$')\n if 'path' in key:\n # Use `__NONE` as sentinel to support equating a `value` of None.\n collection_path = self.blueprint.get(\n key, self.blueprint.get(normal_key, '__NONE'))\n # Skip the paths that are the same as the collection.\n if collection_path == value:\n continue\n data[normal_key] = value\n\n if data:\n self.paths[doc.collection_sub_path[1:]] = data",
"def build_collection(data_path, include_online_only):\n mtgjson.fetch_mtgjson(data_path)\n print('Reading mtgjson data.')\n mtgjsondata = mtgjson.read_mtgjson(data_path)\n return collection.Collection(\n mtgjsondata, include_online_only=include_online_only)",
"def upload_latest(self, project_id, document):\n url = base_url + 'portal/' + str(self.portal_id) + '/projects/' + str(project_id) + '/documents/' + str(document.get_id()) + '/' \n data = parser.to_json(document)\n if document.get_upload_doc():\n file_list = []\n for value in document.get_upload_doc():\n attachment = {\n 'uploaddoc': {\n 'filename': basename(value), \n 'content':open(value).read()\n } \n }\n file_list.append(attachment)\n else:\n file_list = []\n response = zoho_http_client.post(url, self.details, data, None, file_list)\n return parser.get_documents(response)[0]",
"def loader(index, col):\n writer = index.writer()\n feed_data = get_data(col)\n for doc in feed_data:\n idx = doc[\"_id\"]\n data = doc\n # data = json.dumps(doc)\n # print(data)\n body = dict_values_to_text(doc)\n writer.add_document(idx=idx,data=data,body=body)\n writer.commit()\n print(f\"{index} loaded successfully\")",
"def iter_documents(self):\n raise NotImplementedError",
"def _get_documents(self) -> Iterable[dict]:\n\n return self._db[\"documents\"]",
"def import_data(directory_name, product_file, customer_file, rentals_file):\n\n mongo = MongoDBConnection()\n\n customersuccesscount, customerfailurecount = 0, 0\n productsuccesscount, productfailurecount = 0, 0\n rentalsuccesscount, rentalfailurecount = 0, 0\n\n # Process product file and add to mongoDB\n try:\n with open(Path(directory_name, product_file), 'r') as prodfile:\n\n next(prodfile) # skip header line\n\n with mongo:\n db = mongo.connection.HPNorton\n productcollection = db[\"products\"]\n\n for line in prodfile:\n linelist = [x.strip() for x in line.split(',')]\n\n try:\n result = productcollection.insert_one(\n {\n 'product_id' : linelist[0],\n 'description' : linelist[1],\n 'product_type' : linelist[2],\n 'quantity_available' : linelist[3]\n })\n if result.acknowledged:\n productsuccesscount += 1\n else:\n productfailurecount += 1\n\n except errors.DuplicateKeyError:\n productfailurecount += 1\n continue\n\n DBLOG.info(f'Added product DB entry: {linelist[0]}')\n\n except FileNotFoundError as fileerror:\n SYSTEMLOG.error(f'File not found at {directory_name + product_file}, exception {type(fileerror).__name__}')\n\n # Process customer file and add to mongoDB\n try:\n with open(Path(directory_name, customer_file), 'r') as custfile:\n\n next(custfile) # skip header line\n with mongo:\n db = mongo.connection.HPNorton\n customercollection = db[\"customers\"]\n for line in custfile:\n linelist = [x.strip() for x in line.split(',')]\n\n result = customercollection.insert_one(\n {\n 'customer_id' : linelist[0],\n 'name' : linelist[1],\n 'address' : linelist[2],\n 'zip_code' : linelist[3],\n 'phone_number' : linelist[4],\n 'email' : linelist[5],\n 'rentals' : []\n })\n\n if result.acknowledged:\n customersuccesscount += 1\n else:\n customerfailurecount += 1\n\n DBLOG.info(f'Added customer DB entry: {linelist[0]}')\n\n except FileNotFoundError as fileerror:\n SYSTEMLOG.error(f'File not found at {directory_name + product_file}, exception {type(fileerror).__name__}')\n\n # Process rental file and add to mongoDB in customer collection\n try:\n with open(Path(directory_name, rentals_file), 'r') as rentfile:\n\n next(rentfile) # skip header line\n\n with mongo:\n db = mongo.connection.HPNorton\n customercollection = db[\"customers\"]\n for line in rentfile:\n linelist = [x.strip() for x in line.split(',')]\n\n result = customercollection.update_one(\n {\n 'customer_id' : linelist[1]\n },\n {\n '$addToSet' :\n {\n 'rentals' : linelist[0]\n }\n })\n\n if result.modified_count:\n rentalsuccesscount += 1\n else:\n rentalfailurecount += 1\n\n DBLOG.info(f'Added rental DB entry to customer: {linelist[1]}')\n\n except FileNotFoundError as fileerror:\n SYSTEMLOG.error(f'File not found at {directory_name + product_file}, exception {type(fileerror).__name__}')\n\n return (productsuccesscount, customersuccesscount, rentalsuccesscount), (productfailurecount, customerfailurecount, rentalfailurecount)",
"def get_documents(self):\n documents = self.tree.execute(\"$.documents\")\n for doc in documents:\n sentences = {s['@id']: s['text'] for s in doc.get('sentences', [])}\n self.document_dict[doc['@id']] = {'sentences': sentences,\n 'location': doc['location']}\n return",
"def download(cls):\n cls._check_folder()\n os.chdir(cls.VIEWS_PATH)\n # iterate documents\n for doc in cls._documents:\n design_doc = doc().view()\n if design_doc is None:\n continue\n bucket_name = design_doc.bucket.name\n # iterate viewtypes (i.e. spatial and views)\n for view_type, views in design_doc.ddoc.iteritems():\n save_dir = '%s/%s/%s' % (bucket_name, design_doc.name, view_type)\n try:\n # remove and recreate the dir\n shutil.rmtree(save_dir, ignore_errors=True)\n os.makedirs(save_dir)\n except OSError:\n pass\n for name, view in views.iteritems():\n if isinstance(view, unicode) and view_type=='spatial':\n spatial_file = '%s/%s.spatial.js' % (save_dir, name)\n with open(spatial_file, 'w') as f:\n f.write(view)\n print 'Downloaded: %s' % spatial_file\n if isinstance(view, dict) and 'map' in view:\n map_file = '%s/%s.map.js' % (save_dir, name)\n with open(map_file, 'w') as f:\n f.write(view['map'])\n print 'Downloaded: %s' % map_file\n if isinstance(view, dict) and 'reduce' in view:\n reduce_file = '%s/%s.reduce.js' % (save_dir, name)\n with open(reduce_file, 'w') as f:\n f.write(view['reduce'])\n print 'Downloaded: %s' % reduce_file\n pass",
"def import_data(directory_name, product_file, customer_file, rentals_file):\n with MongoDBConnection() as mongo:\n database = mongo.connection[DATABASE]\n\n product_error_count = 0\n try:\n product_collection = database[PRODUCT_COLLECTION]\n product_count = 0\n with open(directory_name + '/' + product_file, 'r') as csv_file:\n product_reader = csv.DictReader(csv_file) # return an ordered dictionary\n for row in product_reader:\n product_collection.insert_one(row)\n product_count += 1\n except (FileNotFoundError, TypeError):\n product_error_count += 1\n\n customer_error_count = 0\n try:\n customer_collection = database[CUSTOMER_COLLECTION]\n customer_count = 0\n with open(directory_name + '/' + customer_file, 'r') as csv_file:\n customer_reader = csv.DictReader(csv_file)\n for row in customer_reader:\n customer_collection.insert_one(row)\n customer_count += 1\n except (FileNotFoundError, TypeError):\n customer_error_count += 1\n\n rentals_error_count = 0\n try:\n rentals_collection = database[RENTALS_COLLECTION]\n rentals_count = 0\n with open(directory_name + '/' + rentals_file, 'r') as csv_file:\n rentals_reader = csv.DictReader(csv_file)\n for row in rentals_reader:\n rentals_collection.insert_one(row)\n rentals_count += 1\n except (FileNotFoundError, TypeError):\n rentals_error_count += 1\n\n return product_count + customer_count + rentals_count"
] |
[
"0.6884818",
"0.6076309",
"0.5875299",
"0.58504695",
"0.5354211",
"0.5283906",
"0.5241763",
"0.5212156",
"0.5192171",
"0.5189327",
"0.5181385",
"0.5175433",
"0.5147129",
"0.5092648",
"0.5061182",
"0.5056619",
"0.504086",
"0.50271386",
"0.5024991",
"0.5018954",
"0.5016102",
"0.5016033",
"0.49880254",
"0.49828166",
"0.49694097",
"0.49547124",
"0.49474508",
"0.49402878",
"0.49272892",
"0.4927242"
] |
0.6566067
|
1
|
Read data (document / collection) from local folder and write to Firestore recursively \b
|
def write(cred, folder, depth, path):
click.echo('Writing to Firetore, credential file: %s' % (cred))
cred = os.path.abspath(cred)
data_folder = os.path.abspath(folder)
click.echo('Document path: %s' % path)
click.echo('Data folder path: %s' % data_folder)
if depth < 0:
depth = 1000000
if path and path.startswith('/'):
path = path[1:]
allfiles = os.listdir(data_folder)
if not allfiles:
click.echo('\nError: data folder "%s" is empty' % data_folder)
return
global total_doc_count
global start_time
total_doc_count = 0
start_time = time.time()
fs = firestore.client(initialize_app(credentials.Certificate(cred)))
write_recursively(fs, data_folder, path, 1, depth)
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def read(cred, output, depth, type, exclude, path):\n click.echo('Reading from firetore, credential file: %s' % (cred))\n cred = os.path.abspath(cred)\n output = os.path.abspath(output)\n click.echo('Document path: %s' % path)\n click.echo('Output path: %s' % output)\n\n if depth<0:\n depth = 1000000\n\n if path and path.startswith('/'):\n path = path[1:]\n\n if os.listdir(output):\n click.echo('\\nError: folder \"%s\" is not empty' % output)\n return\n\n global total_doc_count\n global start_time\n total_doc_count = 0\n start_time = time.time()\n fs = firestore.client(initialize_app(credentials.Certificate(cred)))\n # data = fs.collection('schools').get()\n # for item in data:\n # print(json.dumps(item.to_dict()))\n # break\n if not path or path == '/': # get all root collections\n collections = fs.collections() # root collections\n # collections = fs.document('schools/dev').collections() # root collections\n for col in collections:\n read_collection(output, col.id, col, 1, depth, exclude)\n elif type=='document':\n doc = fs.document(path).get()\n read_document(output, path, doc, 1, depth, exclude)\n else: # 'collection'\n col = fs.collection(path)\n read_collection(output, path, col, 1, depth, exclude)",
"def index_document(self, document):\n # Recursively collect records\n records = []\n if document.get_type() is document.TYPE_DIR:\n dirname = document.get_filename()\n subdirs, files = document.get_contents()\n for subdir in subdirs:\n document.set_filename(os.path.join(dirname, subdir))\n self.index_document(document)\n for filename in files:\n document.set_filename(os.path.join(dirname, filename))\n record = self.create_record(document)\n if record is not None:\n records.append(record)\n\n if len(records) == 0:\n return\n\n # Store records\n writer = self.get_index().writer()\n for record in records:\n writer.add_document(**record)\n writer.commit()",
"def save_local_files(name_file):\n global collection\n name_file = name_file.split('.')[0]\n document = collection.find({'id': name_file})\n if document.count() > 0 and document[0].get('ida_comments', ''):\n print('Comments already extracted for document [%s], skipping.' %\n document[0]['id'])\n return\n if document.count() == 0:\n document = {\n 'id': name_file,\n 'ida_comments': []}\n else:\n document = document[0]\n print('Saving comments for document [%s].' % document['id'])\n asm = open_asm2(document['id'])\n asm = [to_utf(line) for line in asm]\n comments = filter_comments(asm)\n document['ida_comments'] = comments\n collection.save(document)",
"def gather_documents(self):\n self.document_gatherer.gather_and_save_everything(Constants.path_cord, \n Constants.path_metadata, \n Constants.path_linked_documents,\n Constants.path_unlinked_documents,\n Constants.path_parsed_documents,\n Constants.path_all_documents)\n \n print(\"Done gathering documents.\")",
"def backupDocuments(currentTime,baseDir):\n client = MongoClient('asr2.iem.technion.ac.il',27017)\n db = client.asr16\n pathToFolder = baseDir +'Results/'\n FEATURES_DIR = pathToFolder + '/Features/' + currentTime\n docToFeatureVector = parseFeatures(FEATURES_DIR)\n documents = db.documents.find({})\n for document in documents:\n document['text']= document.pop('current_document')\n document['id']= document.pop('_id')\n document['features'] = docToFeatureVector[document[\"query_id\"]+\"-\"+document[\"username\"]]\n del document['posted_document']\n document['iteration'] = currentTime\n db.archive.save(document)",
"def generate_data(folder_path):\n # Open the file to write\n with open(\"data.sql\", \"w\") as f:\n # Loop through all the files in the folder_path\n files = sorted(os.listdir(folder_path), key=lambda x: int(x.split('_')[0]))\n for file in files:\n # Open the file to read\n with open(folder_path + \"/\" + file, \"r\") as f1:\n # Write the content of the file to the file to write\n f.write(f1.read())\n # Close the file\n f1.close()\n # Close the file\n f.close()",
"def test_migrate_folder_to_document(self):\n folder = self.portal['folder-1']\n folder.invokeFactory('Document',\n 'my-page-test',\n title=\"My page test\",\n text='spam spam')\n output = migrateContents(self.portal, \"Folder\", \"Document\")\n self.assertEqual(output.get('counter', 0), 2)\n self.assertNotEqual(output.get('error', []), [])\n self.assertEqual(output['error'][0]['msg'], 'Failed migration for object /plone/folder-1 (Folder -> Document)')\n self.assertTrue(self.portal.portal_catalog(portal_type=\"Document\").actual_result_count == 12)\n self.assertTrue(self.portal.portal_catalog(portal_type=\"Folder\").actual_result_count == 1)\n self.assertEqual(self.portal['folder-2'].portal_type, \"Document\")\n self.assertEqual(self.portal['folder-1'].portal_type, \"Folder\")",
"def createMultipleDocuments(cred, payload):\n url = cred.base_url + \"documents:commit\"\n data = { 'writes': [] }\n\n for path, fieldData in payload.iteritems():\n pathData = createFirestoreDataObject(cred, path, fieldData)\n del pathData['updateMask']\n data['writes'].append(pathData)\n\n makeRequest(cred, url, 'POST', data)",
"def test_migrate_document_to_folder(self):\n output = migrateContents(self.portal, \"Document\", \"Folder\")\n self.assertEqual(output.get('counter', 0), 10)\n self.assertEqual(output.get('error', []), [])\n self.assertTrue(self.portal.portal_catalog(portal_type=\"Document\").actual_result_count == 0)\n self.assertTrue(self.portal.portal_catalog(portal_type=\"Folder\").actual_result_count == 12)\n folder_titles = ['Folder 1', 'Folder 2', 'My page 0', 'My page 1', 'My page 2', 'My page 3', 'My page 4', 'My page 5', 'My page 6', 'My page 7', 'My page 8', 'My page 9']\n self.assertEqual([x.Title for x in self.portal.portal_catalog(portal_type=\"Folder\", sort_on=\"sortable_title\")], folder_titles)",
"def updateMultipleDocuments(cred, payload):\n\n url = cred.base_url + \"documents:commit\"\n data = { 'writes': [] }\n\n for path, fieldData in payload.iteritems():\n pathData = createFirestoreDataObject(cred, path, fieldData)\n data['writes'].append(pathData)\n \n makeRequest(cred, url, 'POST', data)",
"def pushDocsFromDir(docDir):\n\tfor i in os.listdir(docDir):\n\t\tif not(i.endswith(\".DS_Store\")):\n\t\t\tif not(docDir.endswith(\"/\")):\n\t\t\t\tfilename = (docDir+\"/\"+i)\n\t\t\telse:\n\t\t\t\tfilename = docDir + i\n\t\t\tpushDocumentToPhone(filename)\n\n\tprint \"Finished pushing files.\"",
"def readDocuments(docs, prefix):\n\n fmap = open(\"mapping.txt\", \"w\")\n\n\n i = -1\n for folder in pressrelease_folders_txt:\n i += 1\n fullpath = path.join(prefix, folder)\n totFilesInFolder = len(fnmatch.filter(os.listdir(fullpath),\n '*.txt'))\n countFiles = 0\n for f in listdir(path.join(prefix, folder)):\n fmap.write(\"{0}\\t {1:5d}\\n\".format(f, countFiles))\n countFiles += 1\n fullname = fullpath + f\n # text = open(fullname).readlines()\n ff = open(fullname)\n docs.append(ff.read())\n\n print(\"{0:5d}/{1:5d} :: Reading file {2:10s} \".format(countFiles,\n totFilesInFolder, f))\n\n # if countFiles > 4:\n # return\n\n\n fmap.close()",
"def write_to_db( self ) :\n # first lets update the json file internally through: modify the path to figures\n # The json file has two keys: info and processes\n # we loop over all processes and we change the value of the key figure\n for proc in self.data[\"processes\"].keys():\n # for keys in self.data[\"processes\"][proc].keys():\n # each process has one figure\n try:\n # if keys == \"figure\":\n old_value = self.data[\"processes\"][proc][\"figure\"]\n new_value = self.datapath + \"/\" + old_value\n self.data[\"processes\"][proc][\"figure\"] = new_value\n except Exception as err:\n print( 'The key %s does not exist in the json file' % 'figure' )\n print( err )\n\n # Check the existence of the current json file inside the data base\n # the name of the json file starts with run_number as: run_number.json\n try:\n if self.collection.find_one({\"info.run\": {\"$eq\": self.run_number}}):\n # if the document with the given run number exists, delete it and re-write\n print( \"File %s already in database\" % self.data[\"info\"][\"filename\"] )\n self.collection.delete_one( {\"info.run\": {\"$eq\": self.run_number}} )\n self.collection.insert_one( self.data )\n\n else:\n print('File %s is going to be dumbed' % self.data[\"info\"][\"filename\"])\n self.collection.insert_one( self.data )\n\n except pymongo.errors.ServerSelectionTimeoutError as err:\n print('the data base server is down')\n print(err)\n sys.exit('check the database server if it is up and running ?')\n\n return 0",
"def test_migrate_empty_folder_to_document(self):\n output = migrateContents(self.portal, \"Folder\", \"Document\")\n self.assertEqual(output.get('counter', 0), 2)\n self.assertEqual(output.get('error', []), [])\n self.assertTrue(self.portal.portal_catalog(portal_type=\"Document\").actual_result_count == 12)\n self.assertTrue(self.portal.portal_catalog(portal_type=\"Folder\").actual_result_count == 0)",
"def createTrecTextForCurrentDocuments(baseDir):\n pathToFolder = baseDir + 'Collections/'\n if not os.path.exists(pathToFolder):\n os.makedirs(pathToFolder)\n currentTime = str(datetime.datetime.now()).replace(\":\", \"-\").replace(\" \", \"-\").replace(\".\", \"-\")\n pathToTrecText = pathToFolder+\"TrecText/\"\n if not os.path.exists(pathToTrecText):\n os.makedirs(pathToTrecText)\n filename = pathToTrecText + currentTime\n client = MongoClient('asr2.iem.technion.ac.il',27017)\n db = client.asr16\n documents = db.documents.find({}).sort('query_id',1)\n queryToDocnos= {}\n current_users = retrieve_users()\n f = open(filename, 'w')\n for document in documents:\n if document['username'] in current_users:\n print(document['query_id'], document['username'])\n f.write('<DOC>\\n')\n docno = str(document['query_id']).zfill(3) + '-' + str(document['username'])\n f.write('<DOCNO>' + docno + '</DOCNO>\\n')\n docnos = queryToDocnos.get(str(document['query_id']).zfill(3), [])\n docnos.append(docno)\n queryToDocnos[str(document['query_id']).zfill(3)] = docnos\n f.write('<TEXT>\\n')\n f.write(unicodedata.normalize('NFKD', document['current_document']).encode('cp1252', \"ignore\").decode('utf-8', 'replace').replace(u'\\uFFFD', ' ').rstrip())\n f.write('\\n</TEXT>\\n')\n f.write('</DOC>\\n')\n f.close()\n pathToWorkingSet = pathToFolder+ 'WorkingSets/'\n if not os.path.exists(pathToWorkingSet):\n os.makedirs(pathToWorkingSet)\n workingSetFilename = pathToWorkingSet + currentTime\n f = open(workingSetFilename, 'w')\n for query, docnos in queryToDocnos.items():\n i = 1\n for docid in docnos:\n f.write(query.zfill(3) + ' Q0 ' + docid + ' ' + str(i) + ' -' + str(i) + ' indri\\n')\n i +=1\n f.close()\n return filename, workingSetFilename, currentTime",
"def process_docs(directory, vocab):\n for file_name in listdir(directory):\n file_path = directory + '/' + file_name\n add_doc_to_vocab(file_path, vocab)",
"def docxProcessing():\n DOCUMENT_ORIGIN_CODE = \"RADIOLOGIE_SOFTWARE\"\n global DATABASE\n conn = db.create_connection(DATABASE)\n pathFolder = \"fichiers source/\"\n extension = \".docx\"\n docxFileArrayPath = glob.glob(pathFolder + \"*\" + extension)\n print(\" - Processing docx\", end=\"\") \n for file in docxFileArrayPath:\n text = readFile.readDocxFile(file)\n query = getDocumentQuery(text, DOCUMENT_ORIGIN_CODE, file, pathFolder, extension)\n db.insert_document(conn, query) \n print(\".\", end = '')\n #commit the changes to db\t\t\t\n conn.commit()\n #close the connection\n conn.close()\n print(\"\\n\")",
"def do_docs(self, path):\n print(\"scaraping documentation\")\n for p in path.glob(\"**/*\"):\n if p.is_file():\n parts = p.relative_to(path).parts\n if parts[-1].endswith(\"rst\"):\n data = tsparse(p.read_bytes())\n blob = DocBlob()\n blob.arbitrary = data\n blob.content = {}\n\n blob.ordered_sections = []\n blob.item_file = None\n blob.item_line = None\n blob.item_type = None\n blob.aliases = []\n blob.example_section_data = Section()\n blob.see_also = []\n blob.signature = None\n blob.references = None\n blob.refs = []\n\n self.docs[parts] = json.dumps(blob.to_json(), indent=2)\n else:\n pass\n # data = p.read_bytes()",
"def process_docs(directory, vocab):\n for filename in listdir(directory):\n if not filename.endswith('.txt'):\n continue\n path = directory + '/' + filename\n add_doc_to_vocab(path, vocab)",
"def linear(files):\n return list(map(insert_to_mongo, files))",
"def CreateCollectionSample():\n client = CreateClient()\n col = gdata.docs.data.Resource(type='folder', title='My Sample Folder')\n col = client.CreateResource(col)\n print 'Created collection:', col.title.text, col.resource_id.text",
"def build_DB(self, doc_files):\n\t\tcompteur=0\n\t\tdoc_name=doc_files+'doc_'+str(compteur)+'.txt'\n\t\twhile os.path.exists(doc_name):\n\t\t doc=Doc(doc_name)\n\t\t self.DB.add_doc(doc)\n\t\t compteur+=1\n\t\t doc_name=doc_files+'doc_'+str(compteur)+'.txt'\n\t\tprint \"Number of documents in the Data Base: \", self.DB.nb_doc_total\n\t\t#print self.DB.id2nbword\n\t\tself.dump_DB()",
"def import_data(directory_name, product_file, customer_file, rentals_file):\n\n mongo = MongoDBConnection()\n\n customersuccesscount, customerfailurecount = 0, 0\n productsuccesscount, productfailurecount = 0, 0\n rentalsuccesscount, rentalfailurecount = 0, 0\n\n # Process product file and add to mongoDB\n try:\n with open(Path(directory_name, product_file), 'r') as prodfile:\n\n next(prodfile) # skip header line\n\n with mongo:\n db = mongo.connection.HPNorton\n productcollection = db[\"products\"]\n\n for line in prodfile:\n linelist = [x.strip() for x in line.split(',')]\n\n try:\n result = productcollection.insert_one(\n {\n 'product_id' : linelist[0],\n 'description' : linelist[1],\n 'product_type' : linelist[2],\n 'quantity_available' : linelist[3]\n })\n if result.acknowledged:\n productsuccesscount += 1\n else:\n productfailurecount += 1\n\n except errors.DuplicateKeyError:\n productfailurecount += 1\n continue\n\n DBLOG.info(f'Added product DB entry: {linelist[0]}')\n\n except FileNotFoundError as fileerror:\n SYSTEMLOG.error(f'File not found at {directory_name + product_file}, exception {type(fileerror).__name__}')\n\n # Process customer file and add to mongoDB\n try:\n with open(Path(directory_name, customer_file), 'r') as custfile:\n\n next(custfile) # skip header line\n with mongo:\n db = mongo.connection.HPNorton\n customercollection = db[\"customers\"]\n for line in custfile:\n linelist = [x.strip() for x in line.split(',')]\n\n result = customercollection.insert_one(\n {\n 'customer_id' : linelist[0],\n 'name' : linelist[1],\n 'address' : linelist[2],\n 'zip_code' : linelist[3],\n 'phone_number' : linelist[4],\n 'email' : linelist[5],\n 'rentals' : []\n })\n\n if result.acknowledged:\n customersuccesscount += 1\n else:\n customerfailurecount += 1\n\n DBLOG.info(f'Added customer DB entry: {linelist[0]}')\n\n except FileNotFoundError as fileerror:\n SYSTEMLOG.error(f'File not found at {directory_name + product_file}, exception {type(fileerror).__name__}')\n\n # Process rental file and add to mongoDB in customer collection\n try:\n with open(Path(directory_name, rentals_file), 'r') as rentfile:\n\n next(rentfile) # skip header line\n\n with mongo:\n db = mongo.connection.HPNorton\n customercollection = db[\"customers\"]\n for line in rentfile:\n linelist = [x.strip() for x in line.split(',')]\n\n result = customercollection.update_one(\n {\n 'customer_id' : linelist[1]\n },\n {\n '$addToSet' :\n {\n 'rentals' : linelist[0]\n }\n })\n\n if result.modified_count:\n rentalsuccesscount += 1\n else:\n rentalfailurecount += 1\n\n DBLOG.info(f'Added rental DB entry to customer: {linelist[1]}')\n\n except FileNotFoundError as fileerror:\n SYSTEMLOG.error(f'File not found at {directory_name + product_file}, exception {type(fileerror).__name__}')\n\n return (productsuccesscount, customersuccesscount, rentalsuccesscount), (productfailurecount, customerfailurecount, rentalfailurecount)",
"def write_storage(file_name: str, news_all: dict):\n logger.info('read_storage start') #Logs a message\n try:\n with open(file_name, 'w') as f:\n json.dump(news_all, f, indent=4)\n except:\n return False\n return True",
"def store_documents(self, partner, documents):\n for docs in documents:\n if docs and docs['type'] in DOCS_TYPES:\n document = DocumentDetails()\n document.partner_id = partner\n document.type = DOCS_TYPES[docs['type']]\n document.file_name = docs['file']\n document.file_data = os.path.join('documents/partner_doc', docs['file'])\n document.save()",
"def loader(index, col):\n writer = index.writer()\n feed_data = get_data(col)\n for doc in feed_data:\n idx = doc[\"_id\"]\n data = doc\n # data = json.dumps(doc)\n # print(data)\n body = dict_values_to_text(doc)\n writer.add_document(idx=idx,data=data,body=body)\n writer.commit()\n print(f\"{index} loaded successfully\")",
"def update_from_document(self, document_path):\n with open(document_path, 'r') as document_file:\n for sentence in document_file:\n words = sentence.strip().split()\n for word in words:\n self._add_new_word(word)",
"def update_collection(self, collection):\n node = self.node\n flow = node if node.is_flow else node.flow\n\n # Build the key used to store the entry in the document.\n key = node.name\n if node.is_task:\n key = \"w\" + str(node.pos[0]) + \"_t\" + str(node.pos[1])\n elif node.is_work:\n key = \"w\" + str(node.pos)\n\n db = collection.database\n\n # Save files with GridFs first in order to get the ID.\n if self.gridfs_files:\n import gridfs\n fs = gridfs.GridFS(db)\n for ext, gridfile in self.gridfs_files.items():\n logger.info(\"gridfs: about to put file:\", str(gridfile))\n # Here we set gridfile.fs_id that will be stored in the mondodb document\n try:\n with open(gridfile.path, \"r\" + gridfile.mode) as f:\n gridfile.fs_id = fs.put(f, filename=gridfile.path)\n except IOError as exc:\n logger.critical(str(exc))\n\n if flow.mongo_id is None:\n # Flow does not have a mongo_id, allocate doc for the flow and save its id.\n flow.mongo_id = collection.insert({})\n print(\"Creating flow.mongo_id\", flow.mongo_id, type(flow.mongo_id))\n\n # Get the document from flow.mongo_id and update it.\n doc = collection.find_one({\"_id\": flow.mongo_id})\n if key in doc:\n raise ValueError(\"%s is already in doc!\" % key)\n doc[key] = self.as_dict()\n\n collection.save(doc)\n #collection.update({'_id':mongo_id}, {\"$set\": doc}, upsert=False)",
"def copydocs(store, path, rc):\n for doc in rc.documents:\n dst = os.path.join(path, os.path.split(doc)[1])\n if not rc.force and os.path.isfile(dst):\n raise RuntimeError(dst + \" already exists!\")\n shutil.copy2(doc, dst)",
"def pdfProcessing():\n global DATABASE\n conn = db.create_connection(DATABASE)\n DOCUMENT_ORIGIN_CODE = \"DOSSIER_PATIENT\"\n\n pathFolder = \"fichiers source/\"\n extension = \".pdf\"\n pdfFileArrayPath = glob.glob(pathFolder + \"*\" + extension)\n print(\" - Processing pdf\", end=\"\")\n for file in pdfFileArrayPath:\n text = readFile.readPdfFile(file)\n query = getDocumentQuery(text, DOCUMENT_ORIGIN_CODE, file, pathFolder, extension)\n \n db.insert_document(conn, query)\n print(\".\", end = '')\n #commit the changes to db\n conn.commit()\n #close the connection\n conn.close()\n print(\"\\n\")"
] |
[
"0.60830563",
"0.5607789",
"0.55427426",
"0.5538369",
"0.55160624",
"0.5408159",
"0.5377633",
"0.5359673",
"0.5350318",
"0.5331554",
"0.53252536",
"0.5292329",
"0.5278777",
"0.52338994",
"0.51846474",
"0.5164863",
"0.51599556",
"0.51181865",
"0.50957966",
"0.5087054",
"0.5023715",
"0.5022252",
"0.5014293",
"0.50142145",
"0.5011732",
"0.50000507",
"0.49968073",
"0.49862105",
"0.49544212",
"0.49409378"
] |
0.71699053
|
0
|
Create config file with OXE connection parameters
|
def oxe_configure(host, login, password, proxies):
config = ConfigParser()
full_path = join(gettempdir(), 'pyoxeconf.ini')
if exists(full_path):
config.read(full_path)
if config.has_section('default') is False:
config.add_section('default')
if config.has_section(str(host)) is False:
config.add_section(str(host))
config.set(str(host), 'host', str(host))
config.set(str(host), 'login', str(login))
config.set(str(host), 'password', str(password))
if proxies is not None:
if config.has_section('proxies') is False:
config.add_section('proxies')
config.set('proxies', 'proxies', proxies)
with open(full_path, 'w+') as file:
try:
config.write(file)
chmod(full_path, 0o600)
except Error as e:
print('Error creating config file: {}'.format(full_path))
exit(-1)
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def create_config(self) -> None:\n if self.is_config_exist() is False:\n file = open(file=self.connection_string, mode=\"w+\")\n file.close()",
"def create_config(self) -> None:\n pass",
"def create_config(self) -> None:\n pass",
"def create_configfile():\n config = ConfigParser.ConfigParser()\n config.add_section('Common')\n config.set('Common', 'renewal days', 20)\n config.set('Common', 'delayed installation days', 5)\n config.set('Common', 'include chain', True)\n config.set('Common', 'account key', './config/key.pem')\n config.add_section('Load Balancer')\n config.set('Load Balancer', 'cluster', True)\n config.set('Load Balancer', 'Host 1', 'lb1.example.com')\n config.set('Load Balancer', 'Host 2', 'lb2.example.com')\n config.set('Load Balancer', 'username', 'admin')\n config.set('Load Balancer', 'password', 'password01')\n config.set('Load Balancer', 'datagroup', 'acme_responses_dg')\n config.set('Load Balancer', 'datagroup partition', 'Common')\n config.add_section('Certificate Authority')\n config.set('Certificate Authority', 'Directory URL',\n 'https://acme-v01.api.letsencrypt.org/directory')\n config.set('Certificate Authority', 'use proxy', False)\n config.set('Certificate Authority', 'proxy',\n 'http://proxy.example.com:8080')\n\n # As the config file contains password, we should be careful with permissions\n with os.fdopen(os.open(CONFIG_FILE, os.O_WRONLY | os.O_CREAT, 0o660), 'w') as config_file:\n config.write(config_file)",
"def get_configuration_file(run_dir, dbname, evalue_exponent):\n host, port = _get_root_credentials()[:2]\n config = \"\"\"# OrthoMCL configuration file for generated database\ndbVendor=mysql\ndbConnectString=dbi:mysql:{dbname}:{host}:{port}\ndbLogin=orthomcl\ndbPassword=pass\nsimilarSequencesTable=SimilarSequences\northologTable=Ortholog\ninParalogTable=InParalog\ncoOrthologTable=CoOrtholog\ninterTaxonMatchView=InterTaxonMatch\npercentMatchCutoff=50\nevalueExponentCutoff={evalue_exponent}\noracleIndexTblSpc=NONE\"\"\".format(dbname=dbname, host=host, port=port, evalue_exponent=evalue_exponent)\n\n # Write to file & return file\n config_file = os.path.join(run_dir, '{0}.cfg'.format(dbname))\n with open(config_file, mode='w') as write_handle:\n write_handle.write(config)\n return config_file",
"def generate_config(args):\n default_config = resource_string('webrpg', 'scripts/templates/default_config.txt').decode('utf-8')\n if args.sqla_connection_string:\n default_config = default_config.replace('%(sqlalchemy_url)s', args.sqla_connection_string)\n else:\n default_config = default_config.replace('%(sqlalchemy_url)s', get_user_parameter('SQL Alchemy Connection String', 'sqlite:///%(here)s/pyire_test.db'))\n\n with open(args.filename, 'w') as out_f:\n out_f.write(default_config)",
"def _createConfigFile(self):\n configFile = self._configFile()\n try:\n with open(configFile) as fh:\n pass\n except IOError:\n try:\n with open(configFile, 'w') as fh:\n fh.write(\"[settings]\\n\")\n fh.write(\"debug = false\\n\")\n fh.write(\"hidefilenames = false\\n\")\n except IOError:\n pass",
"def setup_configuration_file(self):\n\n with open(self.config_path, \"w+\") as f_config:\n\n f_config.write(get_configuration_file_form())",
"def config():\n file_path = None # path to the input file\n db_path = None # path to the output db\n atomic_properties = (\n \"Properties=species:S:1:pos:R:3\"\n ) # atomic properties of the input file\n molecular_properties = [\"energy\"] # molecular properties of the input file\n overwrite = False",
"def genConfig():\n\n cfg = open('/home/sevudan/Scripts/projects/topogen/result.cfg','w')\n template = getTemplate()\n G = topo.topology()\n gen_config_lo(G, cfg)\n # Get node from list nodes.\n for node in sorted(G.nodes):\n d = dict(G[node])\n hostname = node\n # Get attributes for node.\n peer = d.keys()\n for peer_node in peer:\n params = d.get(peer_node)\n conf = template.render(\n node=hostname,\n description = peer_node,\n ifd = params.get('ifd'),\n local_ifl = params.get('local_ifl'),\n peer_ifl = params.get('peer_ifl'),\n ifa = params.get('ip_address')\n )\n result = '{}{}'.format(conf,'\\n')\n cfg.write(result)\n cfg.close()",
"def createConfig():\n\twith open(configPath, 'w', encoding='utf-8') as file:\n\t\tjson.dump(default_config, file, indent=3)",
"def write_config():\n\n e = Element(\"Configuration\")\n r = SubElement(e, \"RepositoryList\")\n r = SubElement(r, \"Repository\", name = \"default\")\n SubElement(r, \"Module\").text = args.driver\n SubElement(r, \"TokenLabel\").text = args.token_label\n SubElement(r, \"PIN\").text = args.pin\n ElementTree(e).write(args.write_config)\n args.write_config.flush()",
"def create_config():\n config = configparser.ConfigParser()\n section = 'Settings'\n config.add_section(section)\n config.set(section, 'font', 'Courier')\n config.set(section, 'font_size', '10')\n config.set(section, 'font_style', 'normal')\n # Interpolation\n config.set(section, 'font_info', \"You are using %(font)s at %(font_size)s pt\")\n\n with open(path, 'w') as config_file:\n config.write(config_file)",
"def createPPSConfig(ppsConfigFilePath, keyDict):\n out = csv.OutFileBuffer(ppsConfigFilePath)\n out.writeText(\n \"\"\"#######################################################################\n#configuration file (PPS+ GENERATED !!!)\n#please make sure that there is no space before or after \":\"\n#lines starting with character \"#\" are treated as comments\n#please provide complete paths instead of only file or directory names\n#######################################################################\n#directory where processed NCBI data is stored, provide empty directory to create new\n#REUSABLE\\n\"\"\")\n out.writeText('NCBI_PROCESSED_DIR:%s\\n' % keyDict.get('NCBI_PROCESSED_DIR', ''))\n out.writeText(\n \"\"\"#Directory containing NCBI taxonomy in SQlite3 format with file name \"ncbitax_sqlite.db\"\n#provide empty directory to create new database\n#REUSABLE\\n\"\"\")\n out.writeText('NCBI_TAX_DIR:%s\\n' % keyDict.get('NCBI_TAX_DIR', ''))\n out.writeText('#project directory, the directory must be empty\\n')\n out.writeText('PROJECT_DIR:%s\\n' % keyDict.get('PROJECT_DIR', ''))\n out.writeText(\n \"\"\"#############################\n#!!!FOLLOWING ARE OPTIONAL!!!\n#############################\n###### Output space options #####\n#a file containing a tree in newick format (see restrictions in INSTALL.txt)\n#OR a file with ncbi taxon ids (one id per line) to create a tree from\\n\"\"\")\n out.writeText('TREE_FILE:%s\\n' % keyDict.get('TREE_FILE', ''))\n out.writeText(\n \"\"\"#Taxonomic ranks (comma separated, no space) starting at the lowest rank. \\\nPlease make sure that \"root\" is there at the end.\nTAXONOMY_RANKS:species,genus,family,order,class,phylum,superkingdom,root\n#number of minimum genomes a clade must contain to be included in generic model\n#effective only if tree file is not provided\nN_MIN_GENOMES_GENERIC:3\n#action on loss 0:disabled, 1:invert\nLOSS_ACTION:0\n###### Input space options #####\n#a directory with sample specific fasta files (file names must start with appropriate organism/species \\\nncbi taxonomic id)\n#leave empty if you don't have any\\n\"\"\")\n out.writeText('SAMPLE_SPECIFIC_DIR:%s\\n' % keyDict.get('SAMPLE_SPECIFIC_DIR', ''))\n out.writeText(\n \"\"\"#kmer feature space for multiple kmers use kmer_min-kmer_max\nKMER:4-6\n#Fragment lengths for different models (comma separated, no space)\nFRAGMENT_LEN:1000,3000,5000,10000,15000,50000\n#kmer feature\n#use reverse complement for computing kmer features?\nREV_COMPLEMENT:1\n#remove reverse complement features?\nRM_REV_COMPLEMENT:1\n#0:disabled, 1:sequence length, 2:sequence_length-k+1, 3:embedded monomer frequency\nKMER_NORMALIZATION:1\n#Number of examples per training file\nNUMBER_EXAMPLES:10000\n#step size for sample specific data; either a single number (for all fragment lengths) or an array separated with \",\"\nSAMPLE_SPECIFIC_STEP:1000,300,500,1000,1500,5000\n###### Training options #####\n#C values for SVM, if single value is given then models will be build with that value.\n#If comma separated (no space) values are given then cross-validation will be performed.\n#If a single value is provided, all models will be built with it. Our experience shows that in general\n#values less than 1 (e.g. 0.01 and 0.1) do not provide good models.\nC_GRID:1000\n#clean-up the data (sampled_fasta and train_data directories) created after training? TRUE/FALSE\nCLEAN_UP_TRAIN:FALSE\n#kernel type 0:linear, 1:polynomial, 2:rbf (on-linear kernels are computationally expensive)\nKERNEL:0\n##polynomial kernel degree\nKERNEL_POLYNOMIAL_DEGREE:2\n##rbf kernel gamma\nKERNEL_RBF_GAMMA:1\n##polynomial kernel s\nKERNEL_POLYNOMIAL_S:1\n###### Predictions options #####\n#number of classifiers to use, keep this odd to avoid ties\nN_CLASSIFIERS:3\n#Create Pie charts for every taxonomic rank TRUE/FALSE (in prediction)\n#slice colors are determined automatically so no color consistency is guaranteed\nPIE_CHARTS:FALSE\n###### Misc options #####\n#should the models be built in parallel (please make sure that you have enough number of\nprocessors and main memory)\\n\"\"\")\n out.writeText('PARALLEL_MODELS:%s\\n' % keyDict.get('PARALLEL_MODELS', 'FALSE'))\n out.writeText(\n \"\"\"#allowed file extensions\nEXTENSIONS:\n#genomes to exclude: file containing one ncbi tax_id per line\\n\"\"\")\n out.writeText('GENOMES_EXCLUDE:%s\\n' % keyDict.get('GENOMES_EXCLUDE', ''))\n out.writeText(\n \"\"\"#if the training data is already there then just build models (TRUE/FALSE)\nONLY_MODELS:FALSE\\n\"\"\")\n out.close()",
"def create_config(self, memory: str, corpora: str, cur_dir: str, postprocessing: str = \"\") -> Tuple[str, str]:\n _, filepath = mkstemp(dir=cur_dir, suffix=\".xml\")\n memory_file = p.join(cur_dir, \"memory\"+memory)\n with open(filepath, \"w\") as file:\n file.write(TEMPLATE.format(\n memory=memory_file,\n corpora=corpora,\n postprocessing=postprocessing\n ))\n return filepath, memory_file",
"def __create_config_file__(fileparser):\n fileparser['server'] = {\n 'server': Configuration.server + \" # Server IP\",\n 'port': str(Configuration.port) +\n \" # Values allowed: \" + str(Configuration.port_min) +\n \"..\" + str(Configuration.port_max),\n 'certfile': Configuration.certfile +\n \" # Use an absolute path\",\n 'timeout': str(Configuration.timeout) +\n \" # Timeout of the connection request\"\n }\n fileparser['client'] = {\n 'curve1': Configuration.curve1 +\n \" # Values allowed: secp521r1, sect571r1, secp384r1, etc.\",\n 'cipher1': Configuration.cipher1 +\n \" # Values allowed: aes-128-cbc, aes-256-cbc, etc.\",\n 'curve2': Configuration.curve2 +\n \" # Values allowed: None, secp521r1, sect571r1, secp384r1, etc.\",\n 'cipher2': Configuration.cipher2 +\n \" # Values allowed: None, aes-128-cbc, aes-256-cbc, etc.\",\n 'curve3': Configuration.curve3 +\n \" # Values allowed: None, secp521r1, sect571r1, secp384r1, etc.\",\n 'cipher3': Configuration.cipher3 +\n \" # Values allowed: None, aes-128-cbc, aes-256-cbc, etc.\"\n }\n fileparser['ui'] = {\n 'lock': str(Configuration.lock) +\n \" # Lock screen - Values allowed: 0 or a positive integer\",\n 'colour': str(Configuration.colour) +\n \" # If available use colours (1) or not (0)\",\n 'colourB': Configuration.colourB +\n \" # Colour for editable widgets (button, input box...)\",\n 'colourD': Configuration.colourD +\n \" # Colour for decoration (label, frame...)\",\n 'colourT': Configuration.colourT +\n \" # Colour for titles\",\n 'colourM': Configuration.colourM +\n \" # Colour for messages\"\n }\n with open(Configuration.configfile, 'w') as configfile:\n fileparser.write(configfile)\n os.chmod(Configuration.configfile,\n stat.S_IRUSR | stat.S_IWUSR | stat.S_IREAD | stat.S_IWRITE)",
"def create_settings_file():\n with open('./cfg/settings.cfg'.replace(\"/\", os.path.sep), 'w') as cfg:\n cfg.write('[report]\\nlogo = ./cfg/logo.png\\ncompany =\\nrecord =\\nunit =\\nexaminer =\\nnotes =\\n\\n[auth]\\ngmail = [email protected]\\npassw = yourpassword\\ndevid = 1234567887654321\\ncelnumbr = BackupPhoneNunmber\\n\\n[app]\\npkg = com.whatsapp\\nsig = 38a0f7d505fe18fec64fbf343ecaaaf310dbd799\\n\\n[client]\\npkg = com.google.android.gms\\nsig = 38918a453d07199354f8b19af05ec6562ced5788\\nver = 9877000'.replace(\"/\", os.path.sep))",
"def make_config(outfile, sample_rate, numof_channels, mode, server = 'localhost', shape = 'None',\n security_mode = False, saving_mode = False, data_file = 'Nofile', format = 'binary',\n resolution = 0.1, returning_speed = 8, channels = 0,\n color_bg = 'white', color_trigger = 'black',\n size_window = (1000, 800)):\n\n\n config = ConfigParser.RawConfigParser()\n\n config.add_section('technics')\n config.add_section('visualization')\n config.add_section('security')\n config.add_section('data')\n\n config.set('technics', 'sample_rate', sample_rate)\n config.set('technics', 'numof_channels', numof_channels)\n config.set('technics', 'server', server)\n config.set('technics', 'resolution', resolution)\n config.set('technics', 'speed', returning_speed)\n if channels == 0:\n channels = range(numof_channels+1)[1:numof_channels+1]\n config.set('technics', 'channels', channels)\n\n config.set('visualization', 'mode', mode)\n config.set('visualization', 'color_bg', color_bg)\n config.set('visualization', 'color_trigger', color_trigger)\n config.set('visualization', 'size_window', size_window)\n\n config.set('security', 'security_mode', security_mode)\n\n config.set('data', 'saving_mode', saving_mode)\n config.set('data', 'file', data_file)\n config.set('data', 'format', format)\n\n config_file = open(outfile, 'w+')\n\n config.write(config_file)",
"def create_configuration(self, context):\n context.cfg_path = path = os.path.join(context.env_dir, 'pyvenv.cfg')\n with open(path, 'w', encoding='utf-8') as f:\n f.write('home = %s\\n' % context.python_dir)\n if self.system_site_packages:\n incl = 'true'\n else:\n incl = 'false'\n f.write('include-system-site-packages = %s\\n' % incl)\n f.write('version = %d.%d.%d\\n' % sys.version_info[:3])\n if self.prompt is not None:\n f.write(f'prompt = {self.prompt!r}\\n')",
"def _create_default_config(self):\n self.options.setdefault('options.admin_passwd', '')\n sys.path.append(self.openerp_dir)\n sys.path.extend([egg.location for egg in self.ws])\n from openerp.tools.config import configmanager\n configmanager(self.config_path).save()",
"def create_configuration(EngineType=None, EngineVersion=None, Name=None, Tags=None):\n pass",
"def new_auto_connection(dsn: dict, name: str = \"DSN\"):\n check_types([(\"dsn\", dsn, [dict],)])\n path = os.path.dirname(verticapy.__file__) + \"/connections.verticapy\"\n confparser = ConfigParser()\n confparser.optionxform = str\n try:\n confparser.read(path)\n except:\n pass\n if confparser.has_section(name):\n confparser.remove_section(name)\n confparser.add_section(name)\n for elem in dsn:\n confparser.set(name, elem, str(dsn[elem]))\n f = open(path, \"w+\")\n confparser.write(f)\n f.close()\n change_auto_connection(name)",
"def create_config(self, context, mgmtport):\n pass",
"def create_config_file():\n working_dir = get_working_dir()\n with open('config.txt', 'w') as config_file:\n config_file.write(working_dir + os.sep + 'LevelProgress.xlsx')",
"def make_config(config):\n config.set(\"dxr\", \"source_folder\", os.path.expanduser(\"~/dxr\"))",
"def create_tx_config_file(self, client_name):\n\n string = render_to_string('create_tenant_setup/txconfig.tpl',\n {'client_name': client_name})\n\n config_path = ''.join([getattr(settings, 'MULTI_TENANT_DIR', None),\n '/', client_name, '/.tx/config'])\n\n with open(config_path, \"w\") as config_file:\n config_file.write(string)",
"def _create_initial_configure_file():\n if not _are_components_configured():\n touch(INITIAL_CONFIGURE_FILE)",
"def generateDefaultConfig(self):\n\n\t\t# Open config.ini in write mode\n\t\tf = open(self.fileName, \"w\")\n\n\t\t# Set keys to config object\n\t\tself.config.add_section(\"db\")\n\t\tself.config.set(\"db\", \"host\", \"localhost\")\n\t\tself.config.set(\"db\", \"username\", \"root\")\n\t\tself.config.set(\"db\", \"password\", \"\")\n\t\tself.config.set(\"db\", \"database\", \"ripple\")\n\t\tself.config.set(\"db\", \"pingtime\", \"600\")\n\n\t\tself.config.add_section(\"server\")\n\t\tself.config.set(\"server\", \"server\", \"tornado\")\n\t\tself.config.set(\"server\", \"host\", \"0.0.0.0\")\n\t\tself.config.set(\"server\", \"port\", \"5001\")\n\t\tself.config.set(\"server\", \"localizeusers\", \"1\")\n\t\tself.config.set(\"server\", \"outputpackets\", \"0\")\n\t\tself.config.set(\"server\", \"outputrequesttime\", \"0\")\n\t\tself.config.set(\"server\", \"timeoutlooptime\", \"100\")\n\t\tself.config.set(\"server\", \"timeouttime\", \"100\")\n\n\t\tself.config.add_section(\"flask\")\n\t\tself.config.set(\"flask\", \"threaded\", \"1\")\n\t\tself.config.set(\"flask\", \"debug\", \"0\")\n\t\tself.config.set(\"flask\", \"logger\", \"0\")\n\n\t\tself.config.add_section(\"ci\")\n\t\tself.config.set(\"ci\", \"key\", \"changeme\")\n\n\t\t# Write ini to file and close\n\t\tself.config.write(f)\n\t\tf.close()",
"def create_config(context, target_repoids, debug, test, tasks, on_aws=False):\n context.makedirs(os.path.dirname(DNF_PLUGIN_DATA_PATH), exists_ok=True)\n with context.open(DNF_PLUGIN_DATA_PATH, 'w+') as f:\n config_data = build_plugin_data(\n target_repoids=target_repoids, debug=debug, test=test, tasks=tasks, on_aws=on_aws\n )\n json.dump(config_data, f, sort_keys=True, indent=2)",
"def testconfig(self):\n\n configuration = Parser.getNodeTag(self, self.xmlDoc, \"configuration\")\n metadatadb = Parser.getNodeTag(self, configuration, \"metadatadb\") \n self.user = Parser.getNodeVal(self, metadatadb, \"user\")\n self.host = Parser.getNodeVal(self, metadatadb, \"host\")\n self.port = Parser.getNodeVal(self, metadatadb, \"port\")\n self.database = Parser.getNodeVal(self, metadatadb, \"database\")\n self.metaDBSchema = Parser.getNodeVal(self, metadatadb, \"schema\")\n \n try:\n self.passwd = Parser.getNodeVal(self, self.metadatadb, \"passwd\")\n self.metaDB = self.user + \"/\" + self.passwd + \"@\" + self.host + \":\" + self.port + \"/\" \\\n + self.database + \":\" + self.metaDBSchema\n except Exception:\n self.metaDB = self.user + \"@\" + self.host + \":\" + self.port + \"/\" + self.database + \":\" \\\n + self.metaDBSchema"
] |
[
"0.71747446",
"0.67635787",
"0.67635787",
"0.6734129",
"0.6716206",
"0.66022563",
"0.65181285",
"0.6445137",
"0.6434882",
"0.6411332",
"0.6348044",
"0.6332934",
"0.61990017",
"0.6172318",
"0.6123248",
"0.61203516",
"0.6118633",
"0.6114111",
"0.6108447",
"0.60713667",
"0.6044218",
"0.59604084",
"0.59293115",
"0.5913779",
"0.58827275",
"0.587918",
"0.58715075",
"0.58710235",
"0.5864783",
"0.5852911"
] |
0.6787651
|
1
|
Builder for requests headers depending on request method
|
def oxe_set_headers(token, method=None):
# basic method GET
headers = {
'Authorization': 'Bearer ' + token,
'accept': 'application/json'
}
# addition for POST & PUT
if method in ('POST', 'PUT'):
headers.update({'Content-Type': 'application/json'})
# addition for DELETE
elif method == 'DELETE':
headers.update({'Content-Type': 'text/plain'})
return headers
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def _headers(self, **kwargs):\n headers = BASE_HEADERS.copy()\n if self.headers:\n headers.update(self.headers)\n headers.update(kwargs)\n return headers",
"def _build_headers(self):\n headers = {\n 'Authorization': 'Bearer {api_key}'.format(api_key=self._api_key),\n 'SplitSDKVersion': SDK_VERSION,\n 'Accept-Encoding': 'gzip'\n }\n\n if self._split_sdk_machine_name is not None:\n headers['SplitSDKMachineName'] = self._split_sdk_machine_name() \\\n if callable(self._split_sdk_machine_name) else self._split_sdk_machine_name\n\n if self._split_sdk_machine_ip is not None:\n headers['SplitSDKMachineIP'] = self._split_sdk_machine_ip() \\\n if callable(self._split_sdk_machine_ip) else self._split_sdk_machine_ip\n\n return headers",
"def _headers(self, **kwargs):\n headers = BASE_HEADERS.copy()\n if self._token:\n headers['X-Plex-Token'] = self._token\n headers.update(kwargs)\n return headers",
"def _headers(self, **kwargs):\n headers = BASE_HEADERS.copy()\n if self._token:\n headers['X-Plex-Token'] = self._token\n headers.update(kwargs)\n return headers",
"def get_headers(self,\n method,\n url,\n params) -> Dict[str, Any]:\n payload = self.generate_payload(method, url, params)\n headers = {\n \"Authorization\": f\"HS256 {payload}\",\n \"Content-Type\": \"application/x-www-form-urlencoded\",\n }\n return headers",
"def _make_headers() -> CaseInsensitiveDict:\n headers = CaseInsensitiveDict()\n headers['Authorization'] = f'Token {os.environ[\"TOKEN\"]}'\n headers['Content-type'] = 'application/json'\n return headers",
"def build_headers(self):\n\n # User-agent is always sent\n headers = {'user-agent': self.useragent}\n for hdr in self.config.client_standard_headers:\n val = getattr(self.config, 'client_' + hdr.lower().replace('-','_'))\n headers[hdr] = val\n\n return headers",
"def _headers(self) -> dict[str, str]:\n headers = super()._headers()\n headers[\"Authorization\"] = f\"Bearer {self.__token}\"\n return headers",
"async def modify_headers(\n self, site: URL, request: web.Request\n ) -> multidict.MultiDict:\n result = await super().modify_headers(site, request)\n method = request.method\n # result.update({SKIP_AUTO_HEADERS: [\"User-Agent\"]})\n if (\n str(site.path) == \"/oauth2/v3/authorize/mfa/verify\"\n and method == \"POST\"\n and not await request.post()\n ):\n # allow post json to autogenerate headers.\n # https://github.com/timdorr/tesla-api/discussions/316.\n return {}\n return result",
"def _build_headers(self, params: Dict) -> None:\n api_key = self._get_query_api_key(params) or self.user_api_key\n if api_key is None:\n raise RedashApiKeyNotProvidedException('No API key provided')\n self.headers = {\"Authorization\": \"Key {}\".format(api_key)}",
"def _build_http_header(self) -> Dict[str, str]:\n return {}",
"def generate_generic_headers(self):\n return {\n 'accept': 'application/json',\n 'Content-Type': 'application/json'\n }",
"def get_request_headers(self):\n return {\n 'Authorization': 'JWT ' + self.get_authorization_token()\n }",
"def __headers(content_type='application/json'):\n headers = {\n 'accept': content_type,\n 'content-type': content_type,\n }\n return headers",
"def _api_headers(self, previous_headers=None):\n if self._headers is None:\n auth_headers = {}\n token = self._token\n if token:\n auth_headers[\"Authorization\"] = f\"token {token}\"\n self._headers = auth_headers\n\n if previous_headers is not None:\n headers = self._headers.copy()\n for condition, key in (\n (\"If-Modified-Since\", \"Last-Modified\"),\n (\"If-None-Match\", \"ETag\"),\n ):\n try:\n headers[condition] = previous_headers[key]\n except KeyError:\n continue\n return headers\n\n return self._headers",
"def setup_request_headers(self, uri: str = None) -> dict:\n\n uri = uri if uri is not None else self.host\n headers = {\n \"Accept\": \"application/atom+json,application/json\",\n \"Accept-Encoding\": \"gzip, deflate, br\",\n \"Accept-Language\": \"en_US\",\n \"Connection\": \"keep-alive\",\n \"User-Agent\": self.user_agent,\n \"Referer\": uri + \"/suite/tempo/\",\n \"X-Appian-Cached-Datatypes\": self.datatype_cache.get(),\n \"Cookie\": \"JSESSIONID={}; __appianCsrfToken={}; __appianMultipartCsrfToken={}\".format(\n self.client.cookies.get(\"JSESSIONID\", \"\"),\n self.client.cookies.get(\"__appianCsrfToken\", \"\"),\n self.client.cookies.get(\"__appianMultipartCsrfToken\", \"\"),\n ),\n \"DNT\": \"1\",\n \"X-APPIAN-CSRF-TOKEN\": self.client.cookies.get(\"__appianCsrfToken\", \"\"),\n \"X-APPIAN-MP-CSRF-TOKEN\": self.client.cookies.get(\"__appianMultipartCsrfToken\", \"\"),\n \"X-Appian-Ui-State\": \"stateful\",\n \"X-Appian-Features\": self.client.feature_flag,\n \"X-Appian-Features-Extended\": self.client.feature_flag_extended,\n \"x-libraries-suppress-www-authenticate\": \"true\",\n # this should probably go...\n \"X-Atom-Content-Type\": \"application/html\"\n }\n return headers",
"def get_headers():\n if not headers:\n headers[\"Content-Type\"] = \"application/json\"\n headers[\"Accept\"] = \"application/json\"\n headers[\"User-Agent\"] = constants.USER_AGENT\n headers[\"Authorization\"] = get_token(constants.AUTH_URL, cfg[\"key\"])\n\n return headers\n\n return headers",
"def __http_build_headers(self, with_authentication):\n\n dynamic_headers = {\n 'timestamp': str(self.__current_milli_time())\n }\n if with_authentication and self.__login_token:\n dynamic_headers['Authorization'] = 'Bearer ' + self.__login_token\n \n dynamic_headers.update(self.__http_default_headers)\n return dynamic_headers",
"def _headers(self):\n\n api_key_bytes = '{0}:'.format(self.api_key).encode()\n authorization = b64encode(api_key_bytes).decode()\n\n headers = {\n 'Authorization': 'Basic {0}'.format(authorization),\n 'User-Agent': USER_AGENT,\n }\n\n if self.config['api_version']:\n headers['X-Button-API-Version'] = self.config['api_version']\n\n return headers",
"def make_headers(self):\n return {'User-Agent': 'Mozilla/5.0 (Windows; U; Windows NT 6.1; en-US;\\\n rv:1.9.1.6) Gecko/20091201 Firefox/3.5.6'}",
"def _create_auth_headers(self):\n auth_headers = {**self.get_headers()}\n auth_headers['Authorization'] = 'Bearer ' + self.get_access_token()\n return auth_headers",
"def _create_headers(self, path, parameters=None, encoding=\"ascii\"):\n if parameters is None:\n parameters = dict()\n payload = {\n 'request': path,\n 'nonce': self._nonce()\n }\n payload.update(parameters)\n creds = self._api_credentials\n b64, signature = self._encode_and_sign(payload, encoding)\n headers = {\n # I think these two headers are set by default.\n #'Content-Type': 'text/plain',\n #'Content-Length': 0,\n 'X-GEMINI-PAYLOAD': b64.decode(encoding),\n 'X-GEMINI-APIKEY': creds.api_key,\n 'X-GEMINI-SIGNATURE': signature\n }\n return headers",
"def init_headers(token):\n headers = {\n 'Content-Type': 'application/json',\n 'Authorization': 'Bearer ' + token\n }\n return headers",
"def http_headers(self) -> dict:\n headers = {\"Accept\": \"application/vnd.github.v3+json\"}\n if \"user_agent\" in self.config:\n headers[\"User-Agent\"] = self.config.get(\"user_agent\")\n if \"auth_token\" in self.config:\n headers[\"Authorization\"] = f\"token {self.config['auth_token']}\"\n return headers",
"def _build_common_headers(apikey: str):\n return {\n \"Authorization\": f\"token {apikey}\",\n \"User-Agent\": \"sharing-api-fetcher\",\n \"Accept-Encoding\": \"gzip\",\n \"Accept\": \"application/json\",\n }",
"def __get_headers(self, passed_headers: Dict) -> Dict:\n\n # User-Agent for HTTP request\n library_details = [\n f\"requests {requests.__version__}\",\n f\"python {platform.python_version()}\",\n f\"connector {self.__class__.__name__}\",\n ]\n library_details = \"; \".join(library_details)\n user_agent = f\"Infermedica-API-Python {__version__} ({library_details})\"\n\n headers = {\n \"Accept\": \"application/json\",\n \"User-Agent\": user_agent,\n \"App-Id\": self.app_id,\n \"App-Key\": self.app_key,\n }\n headers.update(self.default_headers)\n headers.update(passed_headers) # Make sure passed headers take precedence\n return headers",
"def get_headers(self):\n # Creating headers.\n headers = {'accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8',\n 'accept-encoding': 'gzip, deflate, sdch, br',\n 'accept-language': 'en-GB,en;q=0.8,en-US;q=0.6,ml;q=0.4',\n 'cache-control': 'max-age=0',\n 'upgrade-insecure-requests': '1',\n 'user-agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/74.0.3729.131 Safari/537.36'}\n return headers",
"def headers():\n return {\n 'user-agent': 'integration-tester',\n 'content-type': 'application/json',\n }",
"def _build_headers(self):\n headers = {}\n headers.update(self.data_sources)\n headers.update(self.seasons)\n headers.update(self.region)\n headers.update(self.subregions)\n return headers",
"def _headers(self):\n auth = AuthenticationProvider.currentAuth()\n\n return {\n 'Authorization': '%s %s' % (auth.tokenType, auth.accessToken),\n 'Content-Type': 'application/json'}"
] |
[
"0.67952037",
"0.67826074",
"0.67234015",
"0.67234015",
"0.6566839",
"0.6520469",
"0.6506166",
"0.64933366",
"0.64751816",
"0.6439939",
"0.6395954",
"0.6390138",
"0.6371061",
"0.635928",
"0.63532174",
"0.63498604",
"0.63317686",
"0.633148",
"0.6325686",
"0.6300463",
"0.6298683",
"0.62932664",
"0.62543786",
"0.6251199",
"0.6235175",
"0.6201911",
"0.6200426",
"0.6186066",
"0.61841035",
"0.6166875"
] |
0.69006896
|
0
|
Assert that actual_list and expected_list are almost equal. ie. assertAlmostEqual(actual_elem, expected_elem) in each list.
|
def assertListAlmostEqual(self, actual_list, expected_list):
self.assertTrue(len(actual_list) == len(expected_list))
for i in xrange(len(actual_list)):
self.assertAlmostEqual(actual_list[i], expected_list[i])
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def assertListAlmostEqual(self, list1, list2):\n for i, j in zip(list1, list2):\n rounded_i, rounded_j = self.round_values(i, j)\n self.assertAlmostEqual(rounded_i, rounded_j)",
"def assertTuplesAlmostEqual(self, actual, expected):\n try:\n for a, e in exactZip(actual, expected):\n assertTupleAlmostEqual(self, a, e)\n except AssertionError as e:\n raise AssertionError(\"Lines {0} were expected to be {1}; {2}\".format(actual, expected, e))",
"def assertTupleAlmostEqual(self, actual, expected):\n try:\n for a, e in exactZip(actual, expected):\n self.assertAlmostEqual(a, e)\n except AssertionError as e:\n raise AssertionError(\"Tuple {0} was expected to be {1}; {2}\".format(actual, expected, e))",
"def _list_assert(actual_list, expected_list):\n for actual, expected in itertools.izip_longest(actual_list, expected_list):\n _value_assert(None, actual, expected)",
"def assertDeepAlmostEqual(self, expected, actual, *args, **kwargs):\n kwargs.pop(\"__trace\", \"ROOT\")\n if (\n hasattr(expected, \"__geo_interface__\")\n and hasattr(actual, \"__geo_interface__\")\n and expected.__geo_interface__[\"type\"] == actual.__geo_interface__[\"type\"]\n and expected.__geo_interface__[\"type\"]\n not in [\"Feature\", \"FeatureCollection\"]\n ):\n shape_expected = shape(expected)\n shape_actual = shape(actual)\n assert shape_expected.equals(shape_actual)\n elif isinstance(expected, (int, float, complex)):\n self.assertAlmostEqual(expected, actual, *args, **kwargs)\n elif isinstance(expected, (list, tuple)):\n self.assertEqual(len(expected), len(actual))\n for index in range(len(expected)):\n v1, v2 = expected[index], actual[index]\n self.assertDeepAlmostEqual(v1, v2, __trace=repr(index), *args, **kwargs)\n elif isinstance(expected, dict):\n self.assertEqual(set(expected), set(actual))\n for key in expected:\n self.assertDeepAlmostEqual(\n expected[key], actual[key], __trace=repr(key), *args, **kwargs\n )\n else:\n self.assertEqual(expected, actual)",
"def _compare_list(self, name, actual, expect):\n self.op_test.assertListEqual(\n actual.recursive_sequence_lengths(),\n expect[1],\n \"Output (\" + name + \") has different lod at \" + str(place),\n )",
"def assert_same(result, expect):\n assert sorted(result) == sorted(expect)",
"def assertListOfFilesEqual(self, expected: list, actual: list):\n msg = 'Failed asserting list of files where equal expected'\n self.assertEqual(len(expected), len(actual), msg=msg)\n for i, file in enumerate(expected):\n self.assertEqual(file.filename, actual[i].filename, msg=msg)\n self.assertIsInstance(cls=file.__class__, obj=actual[i], msg=msg)",
"def _compare_list(self, name, actual, expect):\n with fluid.dygraph.base.guard(place=place):\n self.op_test.assertListEqual(\n actual.value()\n .get_tensor()\n .recursive_sequence_lengths(),\n expect[1],\n \"Operator (\"\n + self.op_type\n + \") Output (\"\n + name\n + \") has different lod at \"\n + str(place)\n + \" in dygraph mode\",\n )",
"def assertDeepAlmostEqual(test_case, expected, actual, *args, **kwargs):\n \n is_root = not '__trace' in kwargs\n trace = kwargs.pop('__trace', 'ROOT')\n try:\n if isinstance(expected, (int, float, complex)):\n test_case.assertAlmostEqual(expected, actual, *args, **kwargs)\n elif isinstance(expected, (list, tuple)):\n test_case.assertEqual(len(expected), len(actual))\n for index in range(len(expected)):\n v1, v2 = expected[index], actual[index]\n assertDeepAlmostEqual(test_case, v1, v2,\n __trace=repr(index), *args, **kwargs)\n elif isinstance(expected, dict):\n test_case.assertEqual(set(expected), set(actual))\n for key in expected:\n assertDeepAlmostEqual(test_case, expected[key], actual[key],\n __trace=repr(key), *args, **kwargs)\n else:\n test_case.assertEqual(expected, actual)\n except AssertionError as exc:\n exc.__dict__.setdefault('traces', []).append(trace)\n if is_root:\n trace = ' -> '.join(reversed(exc.traces))\n exc = AssertionError(\"%s\\nTRACE: %s\" % (exc.message, trace))\n raise exc",
"def _compare_expect_and_actual_outputs(\n self, place, fetch_list, expect_outs, actual_outs, inplace_atol=None\n ):\n # compare expect_outs and actual_outs\n for i, name in enumerate(fetch_list):\n # Note(zhiqiu): inplace_atol should be only set when op doesn't ensure\n # computational consistency.\n # When inplace_atol is not None, the inplace check uses numpy.allclose\n # to check inplace result instead of numpy.array_equal.\n expect_out = np.array(expect_outs[i])\n actual_out = np.array(actual_outs[i])\n assert (\n actual_out.shape == expect_out.shape\n ), \"Operator ({}) : Output ({}) shape mismatch, expect shape is {}, but actual shape is {}\".format(\n self.op_type, name, expect_out.shape, actual_out.shape\n )\n if inplace_atol is not None:\n np.testing.assert_allclose(\n expect_out,\n actual_out,\n rtol=1e-05,\n atol=inplace_atol,\n err_msg='Operator ('\n + self.op_type\n + ') Output ('\n + name\n + ') has diff at '\n + str(place)\n + ' when using and not using inplace'\n + '\\nExpect '\n + str(expect_out)\n + '\\n'\n + 'But Got'\n + str(actual_out)\n + ' in class '\n + self.__class__.__name__,\n )\n else:\n np.testing.assert_array_equal(\n expect_out,\n actual_out,\n err_msg='Output ('\n + name\n + ') has diff at '\n + str(place)\n + ' when using and not using inplace'\n + '\\nExpect '\n + str(expect_out)\n + '\\n'\n + 'But Got'\n + str(actual_out)\n + ' in class '\n + self.__class__.__name__\n + '\\n',\n )",
"def assertListEqual(self, list1, list2, msg=None):\r\n self.assertSequenceEqual(list1, list2, msg, seq_type=list)",
"def test_sort_all_equal():\n equal_data = [1, 1, 1, 1, 1]\n sorted_list = bubble_sort(equal_data)\n\n for small, large in zip(sorted_list[:-1], sorted_list[1:]):\n assert small <= large",
"def assert_float_lists_equal(expected, received,message=None):\n error = True\n if not type(expected) in [list,tuple]:\n if message is None:\n message = ('assert_float_lists_equal: first argument %s is not a sequence' % repr(expected))\n elif not type(received) in [list,tuple]:\n if message is None:\n message = ('assert_float_lists_equal: second argument %s is not a sequence' % repr(received))\n elif not _check_nested_floats(expected):\n if message is None:\n message = ( 'assert_float_lists_equal: first argument %s has non-numeric values' % repr(expected))\n elif not _check_nested_floats(received):\n if message is None:\n message = ( 'assert_float_lists_equal: second argument %s has non-numeric values' % repr(received))\n elif len(expected) != len(received):\n if message is None:\n message = ( 'assert_float_lists_equal: sequences %s and %s have different sizes' % \n (repr(expected),repr(received)))\n else:\n error = False\n \n if error:\n quit_with_error(message)\n \n test = True\n try:\n if not allclose(expected,received):\n error = True\n if message is None:\n message = 'assert_float_lists_equal: expected %s but instead got %s' % (repr(expected),repr(received))\n except Exception as e:\n error = True\n if message is None:\n message = 'assert_float_lists_equal: sequences %s and %s are not comparable' % (repr(expected),repr(received))\n \n if error:\n quit_with_error(message)",
"def checkTupleAlmostEqualIn(tup, tupList, place):\n for T in tupList:\n length = len(tup)\n if length != len(T):\n continue\n for i in range(length):\n if type(tup[i]) is float:\n if round(tup[i], place) != round(T[i], place):\n break\n else:\n if tup[i] != T[i]:\n break\n if i == length - 1:\n return True\n return False",
"def assert_almost_equal(actual, desired, decimal=7):\n actual, desired = check_and_drop_units(actual, desired)\n numpy.testing.assert_almost_equal(actual, desired, decimal)",
"def assert_float_lists_not_equal(expected, received,message=None):\n error = True\n if not type(expected) in [list,tuple]:\n if message is None:\n message = ('assert_float_lists_not_equal: first argument %s is not a sequence' % repr(expected))\n elif not type(received) in [list,tuple]:\n if message is None:\n message = ('assert_float_lists_not_equal: second argument %s is not a sequence' % repr(received))\n elif not _check_nested_floats(expected):\n if message is None:\n message = ( 'assert_float_lists_not_equal: first argument %s has non-numeric values' % repr(expected))\n elif not _check_nested_floats(received):\n if message is None:\n message = ( 'assert_float_lists_not_equal: second argument %s has non-numeric values' % repr(received))\n elif len(expected) != len(received):\n return\n else:\n error = False\n \n if error:\n quit_with_error(message)\n \n test = True\n try:\n if allclose(expected,received):\n error = True\n if message is None:\n message = 'assert_float_lists_not_equal: expected something different from %s' % repr(expected)\n except ValueError:\n pass\n except Exception as e:\n error = True\n if message is None:\n message = 'assert_float_lists_not_equal: sequences %s and %s are not comparable' % (repr(expected),repr(received))\n \n if error:\n quit_with_error(message)",
"def testEquals():\n assert isEqual([1, 2, 3], [1, 2, 3])",
"def assert_array_almost_equal(actual, desired, decimal=7):\n actual, desired = check_and_drop_units(actual, desired)\n numpy.testing.assert_array_almost_equal(actual, desired, decimal)",
"def assert_lists_equal (a, b):\n assert len(a) == len(b)\n for index, expected in enumerate(a):\n actual = b[index]\n if isinstance(actual,str) and isinstance(expected, str) and \\\n actual.isspace() and expected.isspace ():\n continue\n elif isinstance(actual, list) and isinstance(expected, list):\n assert_lists_equal (actual, expected)\n else:\n assert actual == expected",
"def check_lists_equal( list1, list2, assertobject ):\n assertEqualsImproved( len( list1 ), len( list2 ), assertobject )\n for element in list1:\n assertobject.assertIn( element, list2 )",
"def test_check_difference_between_two_lists():\n # same lists, no error\n list1 = list2 = [0, 1, 2]\n util.check_difference_between_two_lists(list1, list2, name=\"same case\")\n\n # diff lists with same unique numbers\n list_1 = [0, 1, 2]\n list_2 = [1, 2, 0]\n with pytest.raises(ValueError) as err_info:\n util.check_difference_between_two_lists(list_1, list_2, name=\"diff case\")\n assert \"diff case are not identical\" in str(err_info.value)\n\n # totally diff lists\n list_1 = [0, 1, 2]\n list_2 = [3, 4, 5]\n with pytest.raises(ValueError) as err_info:\n util.check_difference_between_two_lists(list_1, list_2, name=\"diff case\")\n assert \"diff case are not identical\" in str(err_info.value)",
"def compare_expect_actual_result(self, tensor_info_list, tensor_data_list, test_index):\n golden_file = os.path.realpath(os.path.join(\"../data/dump/gpu_dumps/golden/\",\n self.test_name + \"_expected.json\"))\n with open(golden_file) as f:\n expected_list = json.load(f)\n for x, (tensor_info, tensor_data) in enumerate(zip(tensor_info_list, tensor_data_list)):\n tensor_id = \"tensor_\" + str(test_index + x + 1)\n expect_tensor = expected_list[x + test_index][tensor_id]\n actual_tensor = write_tensor_to_json(tensor_info, tensor_data)\n assert expect_tensor == actual_tensor",
"def almostEqualList(self, l1:List[float], l2:List[float], margin:float):\r\n ret = False\r\n for i in range(0,len(l1)):\r\n diff = abs(l1[i] - l2[i])\r\n if diff < margin:\r\n ret = True\r\n else:\r\n return False\r\n return ret",
"def testNotEquals():\n assert not isEqual([1, 2, 3], [1, 2, 3, 4])\n assert not isEqual([1, 2, 3, 4], [1, 2, 3])",
"def assert_almost_equal(self, val1, val2, delta):\n return self.assertTrue(\n 0 <= abs(val1 - val2) <= delta,\n \"Absolute difference of {} and {} ({}) is not within {}\".format(\n val1,\n val2,\n abs(val1-val2),\n delta,\n ),\n )",
"def count_equal(a_list: list, b_list: list) -> None:\n a_list, b_list = sorted(a_list), sorted(b_list)\n assert len(a_list) == len(b_list)\n assert all([a == b for a, b in zip(a_list, b_list)])",
"def test_angstroms_validate_list(self):\n angstrom = inches_to.angstroms([1.0, 2.0, 3.0, 4.0])\n comparison = np.array([2.54e8, 2*2.54e8, 3*2.54e8, 4*2.54e8])\n\n try:\n for i in range(len(comparison)):\n self.assertTrue(math.isclose(angstrom[i], comparison[i], rel_tol=self.accepted_error))\n print('{:.40s}{}'.format(sys._getframe().f_code.co_name + self.padding, self.passed))\n except AssertionError:\n print('{:.40s}{}'.format(sys._getframe().f_code.co_name + self.padding, self.failed))",
"def assert_allclose(\n actual, desired, rtol=None, atol=0.0, equal_nan=True, err_msg=\"\", verbose=True\n):\n dtypes = []\n\n actual, desired = np.asanyarray(actual), np.asanyarray(desired)\n dtypes = [actual.dtype, desired.dtype]\n\n if rtol is None:\n rtols = [1e-4 if dtype == np.float32 else 1e-7 for dtype in dtypes]\n rtol = max(rtols)\n\n np_assert_allclose(\n actual,\n desired,\n rtol=rtol,\n atol=atol,\n equal_nan=equal_nan,\n err_msg=err_msg,\n verbose=verbose,\n )",
"def check_equivalent(self, a, b):\n assert len(a) == len(b)\n for x, y in zip(a, b):\n assert self.is_equal(x, y)"
] |
[
"0.83103",
"0.81242037",
"0.7785256",
"0.73372203",
"0.732838",
"0.72245044",
"0.7105452",
"0.70458096",
"0.69843453",
"0.6927921",
"0.68653214",
"0.68061745",
"0.6747581",
"0.6736418",
"0.67044675",
"0.66939676",
"0.66726786",
"0.6670484",
"0.6584054",
"0.657256",
"0.65508825",
"0.6545986",
"0.65058225",
"0.65001273",
"0.6436515",
"0.64317906",
"0.6428675",
"0.63650703",
"0.63524103",
"0.634293"
] |
0.936699
|
0
|
Sample n_samples from the model. Sample from prior and create ldj. Then invert the flow and invert the logit_normalize.
|
def sample(self, n_samples):
z = sample_prior((n_samples,) + self.flow.z_shape)
ldj = torch.zeros(z.size(0))
z, ldj = self.flow (z, ldj, reverse=True)
z, ldj = self.logit_normalize(z, ldj, reverse=True)
return z
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def sample_from_prior(self, n_samples):\n\n p0 = self.rng.lognormal(mean=self.mean, sigma=self.sigma, size=n_samples)\n return p0[:, np.newaxis]",
"def sample_from_prior(self, n_samples):\n\n lamda = np.abs(self.rng.standard_cauchy(size=n_samples))\n\n p0 = np.log(np.abs(self.rng.randn() * lamda * self.scale))\n return p0[:, np.newaxis]",
"def generate_samples(self, n_samples=100):\n \t\t\n\t\t#how many times should ancestral sampling be run\n\t\t#n_samples\n prior_samples=[]\n for i in range(0,n_samples):\n prior_sample = self.prior.get_samples(\n n_latent_nodes=self.n_latent_nodes,\n n_gibbs_sampling_steps=100, \n sampling_mode=\"gibbs_ancestral\")\n prior_sample = torch.cat(prior_sample)\n prior_samples.append(prior_sample)\n prior_samples=torch.stack(prior_samples)\n # prior_samples = tf.slice(prior_samples, [0, 0], [num_samples, -1])\n output_activations = self.decoder.decode(prior_samples)\n output_activations = output_activations+self._train_bias\n output_distribution = Bernoulli(logit=output_activations)\n output=torch.sigmoid(output_distribution.logits)\n # output_activations[0] = output_activations[0] + self.train_bias\n # output_dist = FactorialBernoulliUtil(output_activations)\n # output_samples = tf.nn.sigmoid(output_dist.logit_mu)\n # print(\"--- \",\"end VAE::generate_samples()\")\n return output",
"def sample(self, n_samples, sample_seed):\n self.seed_samples(sample_seed)\n eps = torch.randn(self.batch_size, n_samples, self.Y_dim)\n samples = eps*torch.exp(0.5*self.logvar.unsqueeze(1)) + self.mu.unsqueeze(1)\n samples = self.unwhiten_back(samples)\n samples = samples.data.cpu().numpy()\n return samples",
"def sample_from_prior(self, n_samples):\n pass",
"def NLL(sample, params):\n mu = params[:,:,0]\n logsigma = params[:,:,1]\n \n c = normalization.to(mu.device)\n inv_sigma = torch.exp(-logsigma)\n tmp = (sample - mu) * inv_sigma\n return torch.mean(0.5 * (tmp * tmp + 2 * logsigma + c))",
"def _sample_without_replacement(logits, n_samples):\n z = -K.log(-K.log(K.random_uniform(K.shape(logits))))\n return K.tf.nn.top_k(logits+z, k=n_samples)[1]",
"def latent_sample(self, mu, logvar):\n\n # the reparameterization trick\n std = logvar.mul(0.5).exp_()\n eps = torch.empty_like(std).normal_()\n return eps.mul(std).add_(mu)",
"def generate_samples(self,n_samples=100):\n rnd_input=torch.randn((n_samples,self._reparam_nodes[1]))\n zeta=rnd_input \n # rnd_input=torch.where((rnd_input>0.5),torch.ones(rnd_input.size()),torch.zeros(rnd_input.size()))\n # print(rnd_input) \n # output, mu, logvar, zeta=self.forward(rnd_input)\n # mu = self._reparam_layers['mu'](rnd_input)\n # logvar = self._reparam_layers['var'](rnd_input)\n # zeta = self.reparameterize(mu, logvar)\n output = self.decoder.decode(zeta)\n return output",
"def sample_low_rank(self, n_samples, mu, logvar, F):\n #F = torch.unsqueeze(F, dim=1).repeat(1, n_samples, 1, 1) # [self.batch_size, n_samples, self.Y_dim, self.rank]\n F = F.repeat(n_samples, 1, 1) # [self.batch_size*n_samples, self.Y_dim, self.rank]\n mu = mu.repeat(n_samples, 1) # [self.batch_size*n_samples, self.Y_dim]\n logvar = logvar.repeat(n_samples, 1) # [self.batch_size*n_samples, self.Y_dim]\n eps_low_rank = torch.randn(self.batch_size*n_samples, self.rank, 1)\n eps_diag = torch.randn(self.batch_size*n_samples, self.Y_dim)\n half_var = torch.exp(0.5*logvar) # [self.batch_size*n_samples, self.Y_dim]\n samples = torch.bmm(F, eps_low_rank).squeeze() + mu + half_var*eps_diag\n samples = samples.reshape(n_samples, self.batch_size, self.Y_dim)\n samples = samples.transpose(0, 1)\n samples = self.unwhiten_back(samples)\n samples = samples.data.cpu().numpy()\n return samples",
"def log_prior_grad(self, inputs):",
"def posterior_sampler(self, nsamples, seed=0, verbose=True):\n\n import random\n\n random.seed(seed)\n sample = self.get_chain()[-self.get_tune:]\n sample = sample.reshape(-1, sample.shape[-1])\n sample = random.choices(sample, k=nsamples)\n\n return sample",
"def _sampling_normal(self, args):\n z_mean, z_log_var = args\n return sampling_normal(z_mean, z_log_var, (None, self.latent_dim))",
"def _sampling_normal(self, args):\n z_mean, z_log_var = args\n return sampling_normal(z_mean, z_log_var, (None, self.latent_dim))",
"def _sampling_normal(self, args):\n z_mean, z_log_var = args\n return sampling_normal(z_mean, z_log_var, (None, self.latent_dim))",
"def _sampling_normal(self, args):\n z_mean, z_log_var = args\n return sampling_normal(z_mean, z_log_var, (None, self.latent_dim))",
"def _sampling_normal(self, args):\n z_mean, z_log_var = args\n return sampling_normal(z_mean, z_log_var, (None, self.latent_dim))",
"def generate_samples(model, get_mu_sigma, n_samples=36, \n inpaint=False, denoise_sigma=None, logr_grad=None,\n X_true=None,\n base_fname_part1=\"samples\", base_fname_part2='',\n num_intermediate_plots=4, seed=12345):\n # use the same noise in the samples every time, so they're easier to\n # compare across learning\n rng = np.random.RandomState(seed)\n\n spatial_width = model.spatial_width\n n_colors = model.n_colors\n\n # set the initial state X^T of the reverse trajectory\n XT = rng.normal(size=(n_samples,n_colors,spatial_width,spatial_width))\n if denoise_sigma is not None:\n XT = X_true + XT*denoise_sigma\n base_fname_part1 += '_denoise%g'%denoise_sigma\n if inpaint:\n mask = generate_inpaint_mask(n_samples, n_colors, spatial_width)\n XT.flat[mask] = X_true.flat[mask]\n base_fname_part1 += '_inpaint'\n if logr_grad is not None:\n base_fname_part1 += '_logrperturb'\n else:\n mask = None\n\n if X_true is not None:\n viz.plot_images(X_true, base_fname_part1 + '_true' + base_fname_part2)\n viz.plot_images(XT, base_fname_part1 + '_t%04d'%model.trajectory_length + base_fname_part2)\n\n Xmid = XT.copy()\n for t in xrange(model.trajectory_length-1, 0, -1):\n Xmid = diffusion_step(Xmid, t, get_mu_sigma, denoise_sigma, mask, XT, rng, \n model.trajectory_length, logr_grad)\n if np.mod(model.trajectory_length-t,\n int(np.ceil(model.trajectory_length/(num_intermediate_plots+2.)))) == 0:\n viz.plot_images(Xmid, base_fname_part1 + '_t%04d'%t + base_fname_part2)\n\n X0 = Xmid\n viz.plot_images(X0, base_fname_part1 + '_t%04d'%0 + base_fname_part2)",
"def generate_latent_samples(n_samples, sample_size):\n return np.random.normal(loc=0, scale=1, size=(n_samples, sample_size))",
"def generate_latent_samples(n_samples, sample_size):\n return np.random.normal(loc=0, scale=1, size=(n_samples, sample_size))",
"def train_step(self, samples):\n # Set seed based on args.seed and the update number so that we get\n # reproducible results when resuming from checkpoints\n\n # forward and backward pass\n self.meters['wps'].start()\n logs = []\n sample_sizes_l2r = []\n oom = 0\n losses = []\n for sample in samples:\n try:\n output = get_log(self.args, self.model, sample)\n loss_l2r, sample_size_l2r, nll_loss, log = output\n loss = loss_l2r / sample_size_l2r\n loss.backward()\n\n sample_sizes_l2r.append(sample_size_l2r)\n logs.append(log)\n losses.append(loss.item())\n\n except RuntimeError as e:\n if 'out of memory' in str(e):\n oom += 1\n self.model.zero_grad()\n else:\n raise e\n if oom == len(samples):\n logger.warn('Ran out of memory, skipping batch')\n return None\n # gather logging outputs from all replicas\n log = agg_logs(logs)\n log['loss'] = sum(losses)\n\n try:\n # all-reduce and rescale gradients, then take an optimization step\n grad_norm = self.scale_clip_grad_(len(samples) - oom)\n # take an optimization step\n self.optimizer.step()\n self.optimizer.zero_grad()\n ntok_l2r = log.get('ntok_l2r', 0)\n nsentences = log.get('nsentences', 0)\n # update meters\n self.meters['wps'].stop(ntok_l2r)\n if grad_norm is not None:\n self.meters['gnorm'].update(grad_norm)\n self.meters['clip'].update(1. if grad_norm > self.args.clip_norm else 0.)\n self.meters['oom'].update(oom)\n\n except OverflowError as e:\n self.optimizer.zero_grad()\n logger.warn('Overflow detected during parameters update, {str(e)}')\n except RuntimeError as e:\n if 'out of memory' in str(e):\n self.model.zero_grad()\n return None\n else:\n raise e\n\n return log",
"def sampling_latent(self):\n \n self.z = gaussian_sample(self.qz_m, self.qz_v)\n if self.scalings:\n self.library = gaussian_sample(self.ql_m, self.ql_v)",
"def nll_iw(self, input_batch, nsamples, ns=100, graph_vectors=None, tree_vectors=None):\n\n # compute iw every ns samples to address the memory issue\n # nsamples = 500, ns = 100\n # nsamples = 500, ns = 10\n\n tree_batch, graph_encoder_input, tree_encoder_input = input_batch\n\n tmp = []\n batch_size = len(tree_batch)\n if graph_vectors is None:\n graph_vectors, tree_vectors = self.encode(graph_encoder_input, tree_encoder_input)\n for _ in range(int(nsamples / ns)):\n # [batch, ns, nz]\n (z_vec, graph_z_vec, tree_z_vec), (entropy, log_pz) = self.rsample(graph_vectors, tree_vectors, ns)\n\n # [batch, ns], log p(x,z)\n graph_z_vec_reshaped = graph_z_vec.reshape(batch_size * ns, self.latent_dim)\n tree_z_vec_reshaped = tree_z_vec.reshape(batch_size * ns, self.latent_dim)\n rep_tree_batch = []\n for rna in tree_batch:\n for _ in range(ns):\n rep_tree_batch.append(copy.deepcopy(rna))\n ret_dict = self.decoder(rep_tree_batch, tree_z_vec_reshaped, graph_z_vec_reshaped)\n recon_log_prob = - ret_dict['batch_nuc_pred_loss'].reshape(batch_size, ns) - \\\n ret_dict['batch_hpn_pred_loss'].reshape(batch_size, ns) - \\\n ret_dict['batch_stop_pred_loss'].reshape(batch_size, ns)\n log_comp_ll = log_pz[:, :, 0] + recon_log_prob\n\n # log q(z|x)\n log_infer_ll = self.eval_inference_dist(\n input_batch, z_vec,\n param=(torch.cat([self.g_mean(graph_vectors), self.t_mean(tree_vectors)], dim=-1),\n -torch.abs(torch.cat([self.g_var(graph_vectors), self.t_var(tree_vectors)], dim=-1))))\n\n tmp.append(log_comp_ll - log_infer_ll)\n\n ll_iw = log_sum_exp(torch.cat(tmp, dim=-1), dim=-1) - np.log(nsamples)\n\n return -ll_iw",
"def nll_logprobs(self, input, target_idx):\n raise NotImplementedError()",
"def resample_maxent(model, length, context, least_n=10, resample_num=5, num_samples=1, temperature=0.004, repetition_penalty=1.0,\n top_k=0, top_p=0.0, is_xlnet=False, device='cpu'):\n\n generated = context\n idxs = torch.arange(generated.shape[1], device=device)[-length:]\n with torch.no_grad():\n inputs = {'input_ids': generated}\n input_ids = generated\n perm_mask = torch.zeros((1, input_ids.shape[1], input_ids.shape[1]),\n dtype=torch.float, device=device)\n perm_mask[:, idxs, idxs] = 1.0\n target_mapping = torch.zeros((1, len(idxs), input_ids.shape[1]), dtype=torch.float, device=device)\n target_mapping[0, torch.arange(len(idxs)), idxs] = 1.0\n\n inputs = {'input_ids': input_ids, 'perm_mask': perm_mask, 'target_mapping': target_mapping}\n outputs = model(**inputs)\n\n # next_token_logits = outputs[0][0, update_pos, :] / temperature\n next_token_logits = outputs[0][0,:,:] / (temperature if temperature > 0 else 1.)\n current_probabilities = next_token_logits[torch.arange(next_token_logits.shape[0]), context[0, -length:]]\n relative_min_index = random.choices(current_probabilities.argsort()[:min(least_n, len(current_probabilities))])\n # current_probabilities.argmin() if random.random() < 0.5 else random.choices(torch.arange(len(current_probabilities)))\n\n min_porb_index = idxs[relative_min_index] #index of word in context\n next_token_logits = next_token_logits[relative_min_index] #n_vocab\n filtered_logits = top_k_top_p_filtering(next_token_logits, top_k=top_k, top_p=top_p)\n if temperature == 0: #greedy sampling:\n next_token = torch.argmax(filtered_logits).unsqueeze(0)\n else:\n next_token = torch.multinomial(F.softmax(filtered_logits, dim=-1), num_samples=1)\n\n generated[:, min_porb_index] = next_token[0]\n\n return generated, [min_porb_index]",
"def sample_from_prior(self, n_samples):\n\n p0 = self.rng.normal(loc=self.mean, scale=self.sigma, size=n_samples)\n return p0[:, np.newaxis]",
"def sample_latent(self, x):\n latent_dist = self.encoder(x)\n latent_sample = self.reparameterize(*latent_dist)\n return latent_sample",
"def sampled_softmax_logprobs(self, logit_target_in_model, logit_noise_in_model, logit_noise_in_noise, logit_target_in_noise):\n logits = torch.cat([logit_target_in_model.unsqueeze(2), logit_noise_in_model], dim=2)\n q_logits = torch.cat([logit_target_in_noise.unsqueeze(2), logit_noise_in_noise], dim=2)\n # subtract Q for correction of biased sampling\n logits = logits - q_logits\n logproba = torch.nn.LogSoftmax(dim=-1)(logits)\n \n labels = torch.zeros_like(logits.narrow(2, 0, 1)).squeeze(2).long()\n \n logprobs = -torch.nn.NLLLoss(reduction='none')(logproba.view(-1, logproba.size(-1)), labels.view(-1)).view_as(labels)\n logprobs = torch.sum(logprobs, dim=1)\n \n return logprobs",
"def train_on_first_frame(model, im, label, optimizer, n_bins):\n log_p, logdet = model(im + torch.rand_like(im) / n_bins, label=label)\n loss, log_p, log_det = calc_loss(log_p, logdet, args.img_size, n_bins)\n model.zero_grad()\n loss.backward()\n norm = torch.nn.utils.clip_grad_norm_(model.parameters(), 50) # 100 times the parameters, 10 times the norm\n log_value('norm', norm, i)\n if model.l_embs.grad is not None:\n log_value('label_emb_std', model.l_embs.data.std().item(), i)\n log_value('label_emb_grad_std', model.l_embs.grad.std().item(), i)\n log_value('label_emb_grad_norm', model.l_embs.grad.norm(). item(), i)\n optimizer.step()\n return loss, log_p, log_det",
"def llf(self):\n return self.model.loglike(self.params)"
] |
[
"0.6363827",
"0.6241979",
"0.59031504",
"0.5891487",
"0.5890565",
"0.58652526",
"0.58364046",
"0.5712361",
"0.5668051",
"0.56108147",
"0.55263615",
"0.55153114",
"0.54841155",
"0.54841155",
"0.54841155",
"0.54841155",
"0.54841155",
"0.5463291",
"0.5456113",
"0.5456113",
"0.5453017",
"0.54411477",
"0.5393783",
"0.53924334",
"0.5381117",
"0.53683156",
"0.53661907",
"0.5365719",
"0.5351395",
"0.53362685"
] |
0.73537606
|
0
|
Run a train and validation epoch and return average bpd for each.
|
def run_epoch(model, data, optimizer, epoch):
traindata, valdata = data
model.train()
train_bpd = epoch_iter(model, traindata, optimizer, epoch)
model.eval()
val_bpd = epoch_iter(model, valdata, optimizer, epoch)
return train_bpd, val_bpd
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def run_epoch(self, dataloader, train=True):\n losses = []\n accs = []\n for imgs, targets in dataloader:\n imgs, targets = imgs.to(self.device), targets.to(self.device)\n\n # calc. the losses\n output = self.resnet(imgs)\n loss = self.criterion(output, targets)\n\n if train:\n # update the parameters\n self.optimizer.zero_grad() # initialize gradients\n loss.backward()\n self.optimizer.step()\n\n # save training results\n if self.total_steps % 10 == 0:\n accuracy = self.calc_batch_accuracy(output, targets)\n accs.append(accuracy.item())\n losses.append(loss.item())\n self.log_performance(self.summary_writer,\n {'loss': loss.item(), 'acc': accuracy.item()},\n self.epoch,\n self.total_steps)\n\n if self.total_steps % 100 == 0:\n self.save_module_summary(\n self.summary_writer, self.resnet.module, self.total_steps)\n\n self.total_steps += 1\n else: # no training - validation\n accuracy = self.calc_batch_accuracy(output, targets)\n accs.append(accuracy.item())\n losses.append(loss.item())\n\n avg_loss = sum(losses) / len(losses)\n avg_acc = sum(accs) / len(accs)\n return avg_loss, avg_acc",
"def run_epoch(self, sess, epoch_num, validate=True):\n total_loss = 0\n accuracies = []\n for i in range(self.batches_per_epoch):\n batch = self.loader.get_batch()\n if self.config.print_every and i % self.config.print_every == 0:\n if validate:\n val_accuracy = self.eval_validation_accuracy()\n print(\"step {}, validation accuracy {:.3f}\".format(i, val_accuracy))\n accuracies.append((i + epoch_num * self.batches_per_epoch, val_accuracy))\n else:\n if self.include_coverage and self.include_entropy:\n train_accuracy = self.eval_accuracy_on_batch(batch[0], batch[1], batch[2], batch[3])\n elif self.include_coverage:\n train_accuracy = self.eval_accuracy_on_batch(batch[0], batch[1], batch[2])\n elif self.include_entropy:\n train_accuracy = self.eval_accuracy_on_batch(batch[0], batch[1], batch[2])\n else:\n train_accuracy = self.eval_accuracy_on_batch(batch[0], batch[1])\n print(\"step {}, training accuracy {:.3f}\".format(i, train_accuracy))\n \n if self.include_coverage and self.include_entropy:\n _, loss_val = sess.run([self.train_op, self.loss],\n feed_dict={self.x: batch[0], self.c: batch[1], self.e: batch[2], self.y_: batch[2], \n self.keep_prob: 1-self.config.dropout_prob})\n elif self.include_coverage:\n _, loss_val = sess.run([self.train_op, self.loss],\n feed_dict={self.x: batch[0], self.c: batch[1], self.y_: batch[2], \n self.keep_prob: 1-self.config.dropout_prob})\n elif self.include_entropy:\n _, loss_val = sess.run([self.train_op, self.loss],\n feed_dict={self.x: batch[0], self.e: batch[1], self.y_: batch[2], \n self.keep_prob: 1-self.config.dropout_prob})\n else:\n attention, _, loss_val = sess.run([self.attention, self.train_op, self.loss],\n feed_dict={self.x: batch[0], self.y_: batch[1],\n self.keep_prob: 1-self.config.dropout_prob})\n\t\tpdb.set_trace()\n\t\tnp.savetxt(\"a.csv\", attention[0], delimiter=\",\")\n total_loss += loss_val\n\n return total_loss / self.batches_per_epoch, accuracies",
"def _run_epoch(self, train_loader, valid_loader, threshold):\n # set model in train mode and run a train pass\n self.net.train()\n train_loss, train_metric = self._train_epoch(train_loader, threshold)\n\n # set model in eval mode and validate epoch\n self.net.eval()\n val_loss, val_metric = self._validate_epoch(valid_loader, threshold)\n self.epoch_counter += 1\n\n print(\"Epoch: {}\".format(self.epoch_counter))\n print(\"LOSS - Training : [{}], Validation : [{}]\".format(round(train_loss, 4), round(val_loss, 4)))\n print(\"METRIC - Training : [{}], Validation : [{}]\".format(round(train_metric, 4), round(val_metric, 4)))\n return val_loss, val_metric",
"def run_epoch(self, train, dev, epoch):\n # iterate over dataset\n for i, (words, labels) in enumerate(minibatches(train, self.config.batch_size)):\n fd, _ = self.get_feed_dict(words, labels, self.config.lr,\n self.config.dropout)\n\n _, train_loss= self.sess.run(\n [self.train_op, self.loss], feed_dict=fd)\n\n# =============================================================================\n# # tensorboard\n# if i % 10 == 0:\n# self.file_writer.add_summary(summary, epoch*nbatches + i)\n# =============================================================================\n\n metrics = self.run_evaluate(dev)\n msg = \" - \".join([\"{} {:04.2f}\".format(k, v)\n for k, v in metrics.items()])\n print(msg)\n\n return metrics[\"f1\"]",
"def run_epoch(self, train, dev, epoch):\n # progbar stuff for logging\n batch_size = self.config.batch_size\n nbatches = (len(train) + batch_size - 1) // batch_size\n prog = Progbar(target=nbatches)\n\n # iterate over dataset\n for i, (words, labels) in enumerate(minibatches(train, batch_size)):\n fd, _ = self.get_feed_dict(words, labels, self.config.lr,\n self.config.dropout)\n\n _, train_loss, summary = self.sess.run(\n [self.train_op, self.loss, self.merged], feed_dict=fd)\n\n prog.update(i + 1, [(\"train loss\", train_loss)])\n\n # tensorboard\n if i % 10 == 0:\n self.file_writer.add_summary(summary, epoch*nbatches + i)\n\n metrics = self.run_evaluate(dev)\n msg = \" - \".join([\"{} {:04.2f}\".format(k, v)\n for k, v in metrics.items()])\n self.logger.info(msg)\n\n return metrics[\"f1\"]",
"def _train_epoch(self, train_batches, dropout_keep_prob):\n total_num, total_loss = 0, 0\n log_every_n_batch, n_batch_loss = 50, 0\n for bitx, batch in enumerate(train_batches, 1):\n #self.check(batch)\n feed_dict = {self.p: batch['passage_token_ids'],\n self.q: batch['question_token_ids'],\n self.p_char: batch['passage_char_ids'],\n self.q_char: batch['question_char_ids'],\n self.p_length: batch['passage_length'],\n self.q_length: batch['question_length'],\n self.start_label: batch['start_id'],\n self.end_label: batch['end_id'],\n self.dropout_keep_prob: dropout_keep_prob}\n _, loss = self.sess.run([self.train_op, self.loss], feed_dict)\n total_loss += loss * len(batch['raw_data'])\n total_num += len(batch['raw_data'])\n n_batch_loss += loss\n if log_every_n_batch > 0 and bitx % log_every_n_batch == 0:\n self.logger.info('Average loss from batch {} to {} is {}'.format(\n bitx - log_every_n_batch + 1, bitx, n_batch_loss / log_every_n_batch))\n n_batch_loss = 0\n return 1.0 * total_loss / total_num",
"def train_epoch(args, loss_func, pbar, train_loader, model, optimizer,\n train_bpd, train_recon_error , train_perplexity):\n model.train()\n # Loop data in epoch\n for x, _ in train_loader:\n\n # This break used for debugging\n if args.max_iterations is not None:\n if args.global_it > args.max_iterations:\n break\n\n x = x.to(args.device)\n\n # Get reconstruction and vector quantization loss\n # `x_prime`: reconstruction of `input`\n # `vq_loss`: MSE(encoded embeddings, nearest emb in codebooks)\n x_prime, vq_loss, perplexity = model(x)\n\n loss, log_pxz, bpd = loss_func(args, x_prime, x, vq_loss, model)\n\n optimizer.zero_grad()\n loss.backward()\n optimizer.step()\n\n train_bpd.append((-1)*bpd.item())\n train_recon_error.append((-1)*log_pxz.item())\n train_perplexity.append(perplexity.item())\n\n # Print Average every 100 steps\n if (args.global_it+1) % 100 == 0:\n av_bpd = np.mean(train_bpd[-100:])\n av_rec_err = np.mean(train_recon_error[-100:])\n av_ppl = np.mean(train_perplexity[-100:])\n if args.model == 'vqvae':\n pbar.print_train(bpd=float(av_bpd), rec_err=float(av_rec_err),\n increment=100)\n elif args.model == 'diffvqvae':\n pbar.print_train(bpd=float(av_bpd), temp=float(model.temp),\n increment=100)\n args.global_it += 1",
"def run_epoch(model, data, optimizer):\n traindata, valdata = data\n\n model.train()\n train_elbo = epoch_iter(model, traindata, optimizer)\n\n model.eval()\n val_elbo = epoch_iter(model, valdata, optimizer)\n\n return train_elbo, val_elbo",
"def run_epoch(model, data, optimizer):\n traindata, valdata = data\n\n model.train()\n train_elbo = epoch_iter(model, traindata, optimizer)\n\n model.eval()\n val_elbo = epoch_iter(model, valdata, optimizer)\n\n return train_elbo, val_elbo",
"def train_and_evaluate(model, train_dataloader, test_dataloader, optimizer, scheduler, loss_fn, total_epochs):\n\n for epoch in range(total_epochs):\n\n # Run one epoch for both train and test\n print(\"Epoch {}/{}\".format(epoch + 1, total_epochs))\n\n # compute number of batches in one epoch(one full pass over the training set)\n train(model, optimizer, loss_fn, train_dataloader, epoch)\n \n scheduler.step()\n\n # Evaluate for one epoch on test set\n eval(model, loss_fn, test_dataloader, epoch)",
"def compute_epoch(self, dataset, validation=False):\n\n if validation:\n self.model.eval()\n else:\n # self.model.train()\n # deal with pretrained models.\n if self.opt.freeze_encoder:\n self.model.encoder.eval()\n else:\n self.model.encoder.train()\n\n if self.opt.freeze_decoder:\n self.model.decoder.eval()\n self.model.generator.eval()\n else:\n self.model.decoder.train()\n self.model.generator.train()\n\n total_loss, n_word_total, n_word_correct = 0, 0, 0\n \n\n label = \"Training\" if not validation else \"Validation\"\n for batch in tqdm(dataset, desc=' - '+label, leave=False, dynamic_ncols=True):\n \n # prepare data\n src_seq, src_pos, tgt_seq, tgt_pos = map(\n lambda x: x.to(self.device), batch)\n\n \n gold = tgt_seq[:, 1:]\n if not validation:\n self.optimiser.zero_grad()\n # compute forward propagation\n pred = self.model(src_seq, src_pos, tgt_seq, tgt_pos)\n \n # compute performance\n loss, n_correct = self.performance(\n pred.view(-1, pred.size(2)), \n gold, \n smoothing=self.opt.label_smoothing)\n\n if not validation:\n # backwards propagation\n loss.backward()\n # update parameters\n self.optimiser.step_and_update_lr()\n else:\n if self.opt.log:\n # generate outputs\n self.save_eval_outputs(pred)\n\n # bartending outputs.\n total_loss += loss.detach().item()\n n_word_total += gold.ne(self.constants.PAD).sum().detach().item()\n n_word_correct += n_correct\n\n loss_per_word = total_loss/n_word_total\n accuracy = n_word_correct/n_word_total\n\n return loss_per_word, accuracy",
"def validation_epoch(self):\n self.model.eval()\n\n # Compute for training set\n train_loss, train_acc = compute_loss_and_accuracy(\n self.dataloader_train, self.model, self.loss_criterion\n )\n self.TRAIN_ACC.append(train_acc)\n self.TRAIN_LOSS.append(train_loss)\n\n # Compute for validation set\n validation_loss, validation_acc = compute_loss_and_accuracy(\n self.dataloader_val, self.model, self.loss_criterion\n )\n self.VALIDATION_ACC.append(validation_acc)\n self.VALIDATION_LOSS.append(validation_loss)\n print(\"Current validation loss:\", validation_loss, \" Accuracy:\", validation_acc)\n # Compute for testing set\n test_loss, test_acc = compute_loss_and_accuracy(\n self.dataloader_test, self.model, self.loss_criterion\n )\n self.TEST_ACC.append(test_acc)\n self.TEST_LOSS.append(test_loss)\n\n self.model.train()",
"def run_epoch(self, epoch, data_loader, training=False):\n if training:\n self.model.train()\n else:\n self.model.eval()\n\n epoch_metrics = {\"loss\": 0.0}\n overall_parsing_counts = {\"correct\": 0, \"predicted\": 0, \"gold\": 0}\n num_evaluated_batches = 0\n\n with torch.set_grad_enabled(training):\n for sentences, target in data_loader:\n # Run model\n target = self._to_device(target)\n output, parsing_counts = self.parser.evaluate_batch(sentences)\n\n # Compute loss\n output, target = self._unroll_sequence_batch(output), self._unroll_sequence_batch(target)\n loss = self.criterion(output, target)\n\n # Add metrics to overall total\n epoch_metrics[\"loss\"] += loss.item()\n for count in \"gold\", \"predicted\", \"correct\":\n overall_parsing_counts[count] += parsing_counts[count]\n\n # Perform backpropagation (when training)\n if training:\n self.optimizer.zero_grad()\n loss.backward()\n self.optimizer.step()\n\n # Print progress\n num_evaluated_batches += 1\n self.logger.debug('{} Epoch: {} {} Loss: {:.6f}'.format(\n \"Training\" if training else \"Validation\",\n epoch,\n self._progress(num_evaluated_batches, data_loader),\n loss.item()))\n\n epoch_metrics.update(self.compute_prf(overall_parsing_counts))\n\n return epoch_metrics",
"def train(self, training, epochs, group):\n for epoch in range(epochs):\n self.input_matrix={}\n self.back_propagation_learning(training)\n acc = accuracy(self, group)\n print(\"Accuracy on epoch {} is {} \".format(epoch, acc))",
"def epoch_iter(model, data, optimizer, epoch):\n total_bpd = 0.0\n for i, (inputs, _targets) in enumerate(data):\n log_px = model.forward(inputs)\n loss = -log_px.mean()\n\n if model.training:\n # update gradients\n model.zero_grad()\n loss.backward()\n optimizer.step()\n\n # per dimension\n bpd = loss.item() / (x_dim * np.log(2))\n print({'epoch': epoch, 'i':i, 'loss':loss.item(), 'bpd':bpd})\n total_bpd += bpd\n\n avg_bpd = total_bpd / len(data)\n return avg_bpd",
"def run_epoch(session, model, dataset,\n keep_prob=1.0, passes=1.0, verbose=False):\n num_batches = dataset.num_batches\n start_time = time.time()\n train_cost = train_accy = valid_cost = valid_accy = 0.0\n train_evals = valid_evals = 0.0\n dot_count = 0\n total_steps = int(passes*num_batches)\n prog_int = total_steps/100 # progress interval for stdout\n\n if not num_batches > 0:\n raise RuntimeError(\"batch_size*num_unrollings is larger \"\n \"than the training set size.\")\n\n dataset.rewind() # make sure we start a beggining\n\n print(\"batches: %d \"%num_batches,end=' ')\n\n for step in range(total_steps):\n batch = dataset.next_batch()\n\n (tcost, taccy, tevals,\n vcost, vaccy, vevals) = model.train_step(session, batch,\n keep_prob=keep_prob)\n\n train_cost += tcost\n train_accy += taccy\n train_evals += tevals\n valid_cost += vcost\n valid_accy += vaccy\n valid_evals += vevals\n\n if ( verbose and ((prog_int<=1) or\n (step % (int(prog_int)+1)) == 0) ):\n dot_count += 1\n print('.',end='')\n sys.stdout.flush()\n\n if verbose:\n print(\".\"*(100-dot_count),end='')\n print(\" passes: %.2f train iters: %d valid iters: %d \"\n \"speed: %.0f seconds\" % (passes,\n train_evals,\n valid_evals,\n (time.time() - start_time)) )\n sys.stdout.flush()\n\n return (train_cost/train_evals,\n 1.0 - train_accy/train_evals,\n valid_cost/valid_evals,\n 1.0 - valid_accy/valid_evals)",
"def train(self, epochs):\n print('Starting training...')\n print('\\n{:13} '\n '{:>17} '\n '{:^38}'\n ''.format('', '--- Training ---', '--- Validation ---'))\n print('{:4} {:>8} '\n '{:>8} {:>8} '\n '{:>8} {:>8} {:>8} {:>8}'\n ''.format('', '', 'Loss', 'Acc', 'Loss', 'Prc', 'Rec', 'Acc'))\n training_time = 0\n for epoch in range(1, epochs + 1):\n start_time = time.time()\n trn_stats = self.__progress(self.training, self.__train_fn)\n val_stats = self.__progress(self.validation, self.__val_fn)\n elapsed_time = time.time() - start_time\n training_time += elapsed_time\n print('{:>4} {:>7.2f}s '\n '{:>8.3f} {:>8.1%} '\n '{:>8.3f} {:>8.1%} {:>8.1%} {:>8.1%}'\n ''.format(epoch, elapsed_time,\n trn_stats[0], trn_stats[-1],\n *val_stats))\n self.history.append([epoch] + list(trn_stats) + list(val_stats))\n self.report['epochs'] = epochs\n self.report['time_per_epoch'] = training_time / epochs",
"def train_step(self):\r\n batch_images = next(self.data_loader.next_batch())\r\n _, loss, summary, ea = self.sess.run([self.model.train_op, self.model.total_loss, self.model.merged, self.model.euclidean_a_p],\r\n feed_dict={self.model.input: batch_images, self.model.is_training: True})\r\n \r\n return loss, summary",
"def train(self):\n not_improved_count = 0\n best_validation_fscore = 0.0\n\n for epoch in range(self.start_epoch, self.max_epochs + 1):\n # Perform one training epoch and output training metrics\n training_metrics = self.run_epoch(epoch, self.train_data_loader, training=True)\n self.logger.info(\"Training epoch {} finished.\".format(epoch))\n self.log_metrics(training_metrics)\n\n # Perform one validation epoch and output validation metrics\n validation_metrics = self.run_epoch(epoch, self.valid_data_loader, training=False)\n self.logger.info(\"Validation epoch {} finished.\".format(epoch))\n self.log_metrics(validation_metrics)\n\n # Check if model is new best according to validation F1 score\n improved = validation_metrics[\"fscore\"] > best_validation_fscore\n if improved:\n best_validation_fscore = validation_metrics[\"fscore\"]\n not_improved_count = 0\n else:\n not_improved_count += 1\n\n if improved or epoch % self.save_period == 0:\n self._save_checkpoint(epoch, is_best=improved)\n\n if not_improved_count > self.early_stop and epoch >= self.min_epochs:\n self.logger.info(\"Validation performance didn\\'t improve for {} epochs. \"\n \"Training stops.\".format(self.early_stop))\n break",
"def run_epoch(epoch, lr, best_val_ppl):\n epoch_start_time = time.time()\n train(model, corpus.train, epoch=epoch, lr=lr, weight_decay=args.weight_decay)\n val_ppl = evaluate(model, corpus.valid)\n logger.warning(\n '| end of epoch {:3d} | time: {:5.2f}s |'\n 'valid ppl {:8.2f}'.format(\n epoch,\n (time.time() - epoch_start_time),\n val_ppl)\n )\n torch.save(model, model_path + '.epoch_{}'.format(epoch))\n # Save the model if the validation loss is the best we've seen so far.\n if not best_val_ppl or val_ppl < best_val_ppl:\n torch.save(model, model_path)\n best_val_ppl = val_ppl\n else:\n # Anneal the learning rate if no improvement has been seen in the\n # validation dataset.\n lr /= args.lr_decay\n return lr, best_val_ppl",
"def test_model(self, batch_size):\n (_, gen_val, gen_test) = self.dataset.data_loaders(\n batch_size=batch_size,\n split=(0.01, 0.01)\n )\n print('Num Test Batches: ', len(gen_test))\n mean_loss_test, mean_accuracy_test = self.loss_and_acc_test(gen_test)\n print('Test Epoch:')\n print(\n '\\tTest Loss: ', mean_loss_test, '\\n'\n '\\tTest Accuracy: ', mean_accuracy_test * 100\n )",
"def run_epoch(self):\n self.model_lr_scheduler.step()\n\n print(\"Training\")\n self.set_train()\n\n for batch_idx, inputs in enumerate(self.train_loader):\n\n before_op_time = time.time()\n\n outputs, losses = self.process_batch(inputs)\n\n self.model_optimizer.zero_grad()\n losses[\"loss\"].backward()\n self.model_optimizer.step()\n\n duration = time.time() - before_op_time\n\n # log less frequently after the first 2000 steps to save time & disk space\n early_phase = self.step < 2000\n late_phase = self.step % 2000 == 0\n\n if early_phase or late_phase:\n self.log_time(batch_idx, duration, losses[\"loss\"].cpu().data)\n\n if \"depth_gt\" in inputs:\n self.compute_depth_losses(inputs, outputs, losses)\n\n self.log(\"train\", inputs, outputs, losses)\n self.val()\n\n self.step += 1",
"def train_epoch(self, data_loader):\n self.model.train()\n\n # Prepare summary information\n summary = dict()\n sum_loss = 0\n\n # Loop over training batches\n for i, (batch_input, batch_target) in enumerate(data_loader):\n batch_input = [a.to(self.device) for a in batch_input]\n batch_target = batch_target.to(self.device)\n\n # Compute target weights on-the-fly for loss function\n batch_weights_real = batch_target * self.real_weight\n batch_weights_fake = (1 - batch_target) * self.fake_weight\n batch_weights = batch_weights_real + batch_weights_fake\n\n # Train on this batch\n self.model.zero_grad()\n batch_output = self.model(batch_input)\n batch_loss = self.loss_func(batch_output, batch_target, weight=batch_weights)\n batch_loss.backward()\n self.optimizer.step()\n sum_loss += batch_loss.item()\n self.logger.debug(' train batch %i, loss %f', i, batch_loss.item())\n\n # Summarize the epoch\n n_batches = i + 1\n summary['lr'] = self.optimizer.param_groups[0]['lr']\n summary['train_loss'] = sum_loss / n_batches\n self.logger.debug(' Processed %i batches', n_batches)\n self.logger.debug(' Current LR %f', summary['lr'])\n self.logger.info(' Training loss: %.3f', summary['train_loss'])\n return summary",
"def get_training_stats(mlp, dset, nepochs, batch_size):\n train, val, test = dset\n trainx, trainy = train\n valx, valy = val\n testx, testy = test\n\n idxs = np.arange(len(trainx))\n\n training_losses = []\n training_errors = []\n validation_losses = []\n validation_errors = []\n test_losses = []\n test_errors = []\n training_losses_stats = []\n training_errors_stats = []\n validation_losses_stats = []\n validation_errors_stats = []\n\n np.random.seed(123)\n model = mlp\n model.train()\n\n for e in range(nepochs):\n\n # Per epoch setup ...\n seed = np.random.randint(123)\n np.random.seed(seed)\n np.random.shuffle(trainx)\n np.random.seed(seed)\n np.random.shuffle(trainy)\n\n seed = np.random.randint(123)\n np.random.seed(seed)\n np.random.shuffle(valx)\n np.random.seed(seed)\n np.random.shuffle(valy)\n\n model.train()\n\n for b in range(0, len(trainx), batch_size):\n\n # Train ...\n x_batch = trainx[b:b + batch_size]\n y_batch = trainy[b:b + batch_size]\n\n model.zero_grads()\n preds = model.forward(x_batch)\n model.backward(y_batch)\n loss = model.y_loss_criterion\n model.step()\n\n answers = np.argmax(preds, axis=1)\n labels = np.argmax(y_batch, axis=1)\n error = (answers[answers!=labels]).shape[0] / len(answers)\n\n training_losses_stats.append(loss)\n training_errors_stats.append(error)\n\n for b in range(0, len(valx), batch_size):\n\n # Evaluate/Validate ...\n model.eval()\n\n x_batch = valx[b:b + batch_size]\n y_batch = valy[b:b + batch_size]\n\n model.zero_grads()\n preds = model.forward(x_batch)\n #print(\"preds shape = \", preds.shape, \", y_batch shape = \", y_batch.shape)\n loss = model.criterion(preds, y_batch)\n\n answers = np.argmax(preds, axis=1)\n labels = np.argmax(y_batch, axis=0)\n error = float(len(answers[answers!=labels])) / len(answers)\n\n validation_losses_stats.append(loss)\n validation_errors_stats.append(error) \n\n\n # Accumulate data...\n training_losses.append(np.mean(training_losses_stats))\n training_errors.append(np.mean(training_errors_stats))\n\n validation_losses.append(np.mean(validation_losses_stats))\n validation_errors.append(np.mean(validation_errors_stats))\n \n if val:\n accuracy = model.validate(val) * 100.0\n print(\"Epoch {0}, accuracy {1} %.\".format(e + 1, accuracy))\n model.validation_acc.append(accuracy)\n else:\n print(\"Processed epoch {0}.\".format(e))\n\n\n # Cleanup ...\n model.eval()\n\n seed = np.random.randint(123)\n np.random.seed(seed)\n np.random.shuffle(testx)\n np.random.seed(seed)\n np.random.shuffle(testy) \n\n for b in range(0, len(testx), batch_size):\n\n # Test ...\n x_batch = testx[b:b + batch_size]\n y_batch = testy[b:b + batch_size]\n\n model.zero_grads()\n preds = model.forward(x_batch)\n model.backward(y_batch)\n loss = model.criterion(model.inputs[-1], y_batch)\n\n answers = np.argmax(preds, axis=1)\n labels = np.argmax(y_batch, axis=0)\n error = len(answers[answers!=labels]) / len(answers)\n\n test_losses.append(loss)\n test_errors.append(error)\n\n # Return results ...\n return (training_losses, training_errors, validation_losses, validation_errors)",
"def _train_epoch(self, train_batches, dropout_keep_prob, data, batch_size, save_dir, save_prefix):\n pad_id = self.vocab.get_id(self.vocab.pad_token)\n total_num, total_loss = 0, 0\n log_every_n_batch, n_batch_loss = 50, 0\n eval_every_n_batch = (len(data.train_set) - 1) / (8 * batch_size)\n for bitx, batch in enumerate(train_batches, 1): \n feed_dict = {self.p: batch['passage_token_ids'],\n self.q: batch['question_token_ids'],\n self.pc: batch['passage_char_ids'],\n self.qc: batch['question_char_ids'],\n self.p_em: batch['passage_em'],\n self.p_pos: batch['passage_pos'],\n self.q_pos: batch['question_pos'],\n self.p_length: batch['passage_length'],\n self.q_length: batch['question_length'],\n self.start_label: batch['start_id'],\n self.end_label: batch['end_id'],\n self.pr: batch['passage_rank'],\n self.dropout_keep_prob: dropout_keep_prob}\n\n _, loss = self.sess.run([self.train_op, self.loss], \n feed_dict=feed_dict)\n\n total_loss += loss * len(batch['raw_data'])\n total_num += len(batch['raw_data'])\n n_batch_loss += loss\n if log_every_n_batch > 0 and bitx % log_every_n_batch == 0:\n self.logger.info('Average loss from batch {} to {} is {}'.format(\n bitx - log_every_n_batch + 1, bitx, n_batch_loss / log_every_n_batch))\n n_batch_loss = 0\n \n if eval_every_n_batch > 0 and bitx % eval_every_n_batch == 0:\n self.logger.info('Evaluating the model ...')\n if data.dev_set is not None:\n eval_batches = data.gen_mini_batches('dev', batch_size, pad_id, shuffle=False)\n eval_loss, bleu_rouge = self.evaluate(eval_batches)\n self.logger.info('Dev eval loss {}'.format(eval_loss))\n self.logger.info('Dev eval result: {}'.format(bleu_rouge))\n\n if bleu_rouge['ROUGE-L'] > self.max_rouge_l:\n self.save(save_dir, save_prefix)\n self.max_rouge_l = bleu_rouge['ROUGE-L']\n else:\n self.logger.warning('No dev set is loaded for evaluation in the dataset!')\n\n return 1.0 * total_loss / total_num",
"def train(self):\n args = self.args\n mnist = self.mnist\n feed_valid = {self.x: mnist.validation.images, self.y: mnist.validation.labels}\n feed_test = {self.x: mnist.test.images, self.y: mnist.test.labels}\n print('------------------------')\n print(\"epoch | l2_loss (v) | ce_loss (v) | valid_err (s) | valid_err (m) | test_err (s) | test_err (m)\")\n\n for ep in range(args.num_epochs):\n num_mbs = int(args.num_train / args.batch_size)\n for _ in range(num_mbs):\n batch = mnist.train.next_batch(args.batch_size)\n feed = {self.x: batch[0], self.y: batch[1]}\n self.sess.run(self.train_step, feed)\n valid_stats = self.sess.run(self.stats, feed_valid)\n test_stats = self.sess.run(self.stats, feed_test)\n\n valid_err_single = 100*(1.0-valid_stats['accuracy'])\n valid_err_model = self.eval_valid.eval(valid_stats['y_softmax'])\n test_err_single = 100*(1.0-test_stats['accuracy'])\n test_err_model = self.eval_test.eval(test_stats['y_softmax'])\n\n print(\"{:5} {:9.4f} {:9.4f} {:10.3f} {:10.3f} {:10.3f} {:10.3f}\".format(ep,\n valid_stats['l2_loss'], valid_stats['cross_entropy'],\n valid_err_single, valid_err_model,\n test_err_single, test_err_model))",
"def train_epoch(self):\n for batch, targets in self.training_dataloader:\n self.training_step(batch, targets)\n self.calculate_training_loss()\n self.epochs_trained += 1\n LOGGER.info(\n \"Training loss after {} epochs: {}\".format(str(self.epochs_trained), str(self.training_average_loss))\n )",
"def run_epoch( self ):\n # --- Init Epoch ----\n total_epoch_loss = 0.0\n epoch_batches = self.dataset.dataloader( self.config.neuron.epoch_length )\n progress_bar = qqdm(enumerate(epoch_batches), total=len(epoch_batches), desc=format_str('blue', f'Epoch Progress'))\n for iteration, (inputs) in progress_bar:\n\n # ---- Forward / Backward ----\n prev_mechanism_weights = self.mechanism_weights.tolist()\n output = self.train ( batch = { 'inputs': inputs } )\n next_mechanism_weights = self.mechanism_weights.tolist()\n total_epoch_loss += output.local_target_loss.item()\n\n # ---- Logs ----\n self.epoch_logs (\n progress_bar,\n iteration = iteration,\n output = output,\n prev_mechanism_weights = prev_mechanism_weights,\n next_mechanism_weights = next_mechanism_weights\n )\n self.global_step += 1\n\n self.epoch_loss = total_epoch_loss / self.config.neuron.epoch_length\n self.epoch += 1",
"def train(model, train_loader, val_loader, epochs, optimizer, loss_fn, device):\n \n for epoch in range(1, epochs + 1):\n model.train()\n total_loss = 0\n for batch in train_loader: \n batch_X, batch_y = batch\n \n batch_X = batch_X.to(device)\n batch_y = batch_y.to(device)\n \n optimizer.zero_grad()\n # Forward\n output = model(batch_X)\n loss = loss_fn(output, batch_y)\n # Backward\n loss.backward()\n optimizer.step()\n \n total_loss += loss.data.item()\n print(\"Epoch: {}, BCELoss: {}\".format(epoch, total_loss / len(train_loader)))\n log_eval_metrics(model, val_loader, device, epoch)",
"def _valid_epoch(self, epoch=0, tb=True):\n\t\tstart = time.time()\n\t\tself.model.eval()\n\n\t\tvalid_loss = 0.0\n\t\tcorrect = 0.0\n\n\t\tall_predictions = []\n\t\tall_targets = []\n\t\tfor images, labels in self.valid_loader:\n\n\t\t\timages, labels = images.to(self.config.DEVICE), labels.to(self.config.DEVICE)\n\n\t\t\toutputs = self.model(images)\n\t\t\tloss = self.criterion(outputs, labels)\n\n\t\t\tvalid_loss += loss.item() * images.size(0)\n\t\t\t_, preds = outputs.max(1)\n\t\t\tcorrect += preds.eq(labels).sum()\n\n\t\t\tall_predictions.extend(preds.cpu().tolist())\n\t\t\tall_targets.extend(labels.cpu().tolist())\n\n\t\tfinish = time.time()\n\t\tmatrix = confusion_matrix(all_targets, all_predictions)\n\n\t\tfig = plot_confusion_matrix(matrix, self.config.CLASS_NAMES, normalize=True)\n\t\tfig.savefig(os.path.join(self.logger_setup['plots_dir'],'confusion_matrix_epoch_'+str(epoch)+'.png'), bbox_inches='tight')\n\t\tprint(\"all targets\",all_targets)\n\t\tprint(\"all predictions\", all_predictions)\n\t\tprint(\"set(targets) - set(predictions)\", set(all_targets)-set(all_predictions))\n\t\tsave_report(all_targets, all_predictions, self.config.CLASS_NAMES, self.logger_setup['reports_dir'], epoch)\n\t\tweighted_kappa = quadratic_weighted_kappa(matrix)\n\t\tself.logger_setup['writer'].add_figure('Test/Confusion Matrix', fig, epoch)\n\n\t\tprint('Evaluating Network.....')\n\t\tprint('Validation set: Epoch: {}, Average loss: {:.4f}, Accuracy: {:.4f}, Time consumed:{:.2f}s'.format(\n\t\t\tepoch,\n\t\t\tvalid_loss / self.total_valid_samples,\n\t\t\tcorrect.float() / self.total_valid_samples,\n\t\t\tfinish - start))\n\t\tprint()\n\t\tif tb:\n\t\t\tself.logger_setup['writer'].add_scalar('Test/Average loss', valid_loss / self.total_valid_samples, epoch)\n\t\t\tself.logger_setup['writer'].add_scalar('Test/Accuracy', correct.float() / self.total_valid_samples, epoch)\n\t\t\tself.logger_setup['writer'].add_scalar('Test/Quadratic weighted kappa', weighted_kappa, epoch)\n\n\t\treturn (correct.float() / self.total_valid_samples, weighted_kappa)"
] |
[
"0.72259",
"0.7162968",
"0.70626986",
"0.70128274",
"0.693935",
"0.6934751",
"0.6930591",
"0.6924467",
"0.6924467",
"0.6878245",
"0.68370175",
"0.6802234",
"0.6745276",
"0.6732712",
"0.6711104",
"0.66833466",
"0.65563345",
"0.6547786",
"0.65361917",
"0.6494339",
"0.6479519",
"0.6473307",
"0.6461541",
"0.64333576",
"0.64332837",
"0.6431155",
"0.6427104",
"0.63989127",
"0.6391225",
"0.63875335"
] |
0.75531656
|
0
|
Given an adjacency matrix and starting node, traverse the graph
|
def dfsIterative(m, start):
s = [start] # list, use as stack
visited = {start} # set
out = []
while len(s) > 0:
cur = s.pop()
pr('cur')
out.append(cur)
for vertex, connected in enumerate(m[cur]):
# vertex is column in matrix (i)
# connected is the True/False, 1 or 0 value
if connected and not vertex in visited:
s.append(vertex)
visited.add(vertex)
return out
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def viterbi(adj_matrix, label_sequence, starting_vertex):\n\n assert adj_matrix, \"adj_matrix is None or empty.\"\n n = len(adj_matrix) # vertex count.\n for row in adj_matrix:\n assert len(row) == n, \"adj_matrix is not square.\"\n\n assert 0 <= starting_vertex <= n - 1, \"starting_vertex out of range.\"\n\n assert label_sequence, \"label_sequence is None or empty.\"\n k = len(label_sequence)\n for l in label_sequence:\n assert isinstance(l, int) and l > 0, \"label ids must be positive integers.\"\n\n p = [[0 for _ in range(0, k)] for _ in range(0, n)]\n for j in range(k - 1, -1, -1):\n for beg in range(0, n):\n for end in range(0, n):\n if not adj_matrix[beg][end]: # No edge from i to r.\n continue\n\n assert isinstance(adj_matrix[beg][end], AdjMatrixElem),\\\n \"adj_matrix[%d][%r] is not an AdjMatrixElem\" % (beg, end)\n elem = adj_matrix[beg][end]\n if elem.label_id != label_sequence[j]:\n continue\n\n later_prob = 1 if j == k - 1 else p[end][j + 1]\n if elem.probability * later_prob > p[beg][j]:\n p[beg][j] = elem.probability * later_prob\n\n if round(p[starting_vertex][0] - 0.0, PROBABILITY_PRECISION) == 0:\n return 0, NO_SUCH_PATH\n\n path = [starting_vertex]\n for j in range(0, k):\n beg = path[j]\n for end in range(0, n):\n later_prob = 1 if j == k - 1 else p[end][j + 1]\n if adj_matrix[beg][end] and adj_matrix[beg][end].label_id == label_sequence[j]\\\n and round(p[beg][j] - adj_matrix[beg][end].probability * later_prob, PROBABILITY_PRECISION) == 0:\n path.append(end)\n break\n\n return p[starting_vertex][0], tuple(path)",
"def dfs(visited: list, graph: AdjList, node: int):\n if node not in visited:\n visited.append(node)\n for neighbour in graph[node]:\n dfs(visited, graph, neighbour)",
"def additive_phylogeny(matrix, n, G):\n new_node = n\n\n def additive_recur_helper(matrix, n, G):\n\n nonlocal new_node\n\n if n == 2:\n print(\"d add_edge (%s,%s):%s\" % (0, 1, matrix[0, 1]))\n G.add_edge(0, 1, weight=matrix[0, 1])\n return\n\n limblen = limblength(n - 1, matrix)\n i, k = find_i_k(matrix, n - 1, limblen)\n x = matrix[i, n - 1] - limblen\n\n print(\"n=%s limblen=%s i=%s k=%s x=%s\" % (n, limblen, i, k, x))\n\n additive_recur_helper(matrix[0 : n - 1, 0 : n - 1], n - 1, G)\n\n v = node_at_distance(G, i, k, x, matrix[i, k], new_node)\n if v == new_node:\n new_node += 1\n\n print(\"node_at_distance %s from %s is %s\" % (x, i, v))\n\n print(\"e add_edge (%s,%s):%s\" % (v, n - 1, limblen))\n G.add_edge(v, n - 1, weight=limblen)\n\n # draw graph if small\n if len(G) < 30:\n global plot_cnt\n pos = nx.kamada_kawai_layout(G)\n labels = nx.get_edge_attributes(G, \"weight\")\n nx.draw(G, pos, with_labels=True)\n nx.draw_networkx_edge_labels(G, pos, edge_labels=labels)\n plt.draw()\n plt.savefig(\"Graph\" + str(plot_cnt) + \".png\", format=\"PNG\")\n plt.clf()\n plot_cnt += 1\n\n return\n\n additive_recur_helper(matrix, n, G)\n\n return",
"def walk(self):\n #print self.matrix\n current_spot = self.beginning\n while not (current_spot.row_num == len (self.matrix)-1 \n and current_spot.col_num == len(self.matrix[0])-1):\n current_spot = current_spot.walk_right()",
"def main(args):\n\n # Take the adjacency matrix of the graph\n adjacency_mat_graph = parse_adjacency_matrix_csv(args.i)\n\n # Check if the number of row == number of columns of the matrix\n logging.info(\"Checking the matrix shape...\")\n number_row_edges = len(adjacency_mat_graph)\n number_col_edges = len(adjacency_mat_graph[0])\n if number_row_edges != number_col_edges:\n raise ValueError(f\"The number of nodes detected in row\"\n f\"{number_row_edges} not equal to the \"\n f\"number of nodes found in column \"\n f\"{number_col_edges}.\")\n\n # check and verify if graph has only one root\n logging.info(\"Checking if graph has only one root and verify...\")\n root, number_root = unique_root_node_checking(adjacency_mat_graph)\n\n if root == \"NA\" and number_root == 0:\n raise ValueError(f\"No root node found in graph\")\n elif root == \"NA\" and number_root > 0:\n raise ValueError(\"More than one root node found in graph\")\n\n # check if identify root is the same as the one provided\n if root != \"NA\" and root != args.r:\n raise ValueError(f\"Expected {args.r} as root node but {root}\"\n f\" found in graph.\")\n\n # Convert adjacency matrix to dict representation for further steps\n logging.info(\"Converting matrix to dictionary graph...\")\n dict_graph = adjacency_matrix_to_dict_graph(adjacency_mat_graph)\n\n # Check if provided node is in the graph\n logging.info(\"Checking if node of interest n in graph...\")\n if args.n not in dict_graph:\n raise ValueError(f\"Provided node of interest {args.n} \"\n f\"is not found in the provided graph.\")\n\n # Check if the provided graph is acyclic\n logging.info(\"Checking if graph is acyclic...\")\n is_cyclic = is_cyclic_graph(dict_graph)\n if is_cyclic:\n raise ValueError(\"The provided graph is not acyclic. \"\n \"At least one cycle was detected in graph.\")\n logging.info(\"THE PROVIDED GRAPH IS DAG !!!\")\n\n # Get depth\n logging.info(f\"Getting the depth of node {args.n}...\")\n path, depth = get_node_depth(dict_graph, args.r, args.n, [])\n assert len(path) -1 == depth, f\"Length path {path} != depth {depth}\"\n logging.info(f\"The depth of node {args.n} is equal to {depth}\")",
"def dijkstra(adj_matrix, source_node):\n # init shortest paths table and unvisited table\n N = len(adj_matrix)\n shortest_paths = init_shortest_paths(N, source_node)\n\n visited = [False] * N\n\n # while there are still unvisited nodes\n while False in visited:\n curr_node = get_next_unvisited(visited, shortest_paths)\n curr_dist = shortest_paths[curr_node][0]\n neighbors = get_neighbors(adj_matrix, curr_node)\n # for each neighbor\n\n for n in neighbors:\n edge_weight = adj_matrix[curr_node][n]\n # if the curr_dist + weight is less than what's\n # already in the shortest path entries for that neighbor\n # then update the shortest path entry value\n curr_entry = shortest_paths[n][0]\n if curr_dist + edge_weight < curr_entry:\n shortest_paths[n] = (curr_dist + edge_weight, curr_node)\n visited[curr_node] = True\n\n return shortest_paths",
"def compute_path(predecessor_matrix, start_node, end_node):\n\n i = start_node\n j = end_node\n path = []\n\n #Go through the predecessor matrix to save the data in a list\n while j != i:\n path.append(j)\n j = predecessor_matrix[j]\n path.append(i)\n\n #reverse it so that it goes from start node to end node instead\n path.reverse()\n return path",
"def adjacency_matrix(g):\n nodes = sorted(g.keys())\n adj = []\n for row_node in nodes:\n row = []\n for column_node in nodes:\n if column_node in g[row_node]:\n row.append(1)\n else:\n row.append(0)\n adj.append(row)\n \n return adj",
"def _get_walk(start_node, graph, walk_length, matrix, p, q):\n walk = [start_node]\n prev = None\n while len(walk) < walk_length: # here we may need to consider some dead end issues\n cur = walk[-1]\n cur_nbrs = list(graph.neighbors(cur)) # (G.neighbors(cur))\n\n if len(cur_nbrs) == 0:\n return walk # the walk has hit a dead end\n random.shuffle(cur_nbrs)\n if len(walk) == 1:\n walk.append(random.choice(cur_nbrs))\n else:\n prev = walk[-2]\n\n if prev not in graph:\n print(f'Problem: prev not in graph: {prev}')\n raise ValueError\n elif cur not in graph[prev]:\n print(f'Problem: cur not in graph: {cur}')\n print(list(graph[prev].keys()))\n raise ValueError\n\n pre_edge_type = graph[prev][cur]['type'] - 1\n\n distance_sum = 0\n\n for neighbor in cur_nbrs:\n # print \"neighbor_link: \",neighbor_link\n neighbor_link_type = graph[cur][neighbor]['type'] - 1\n # Get transition probability based on the previous edge and the current possible edge\n transition_probability = matrix[pre_edge_type][neighbor_link_type]\n\n neighbor_link_weight = graph[cur][neighbor]['weight']\n\n if graph.has_edge(neighbor, prev) or graph.has_edge(prev, neighbor): # undirected graph\n distance_sum += transition_probability * neighbor_link_weight / p # +1 normalization\n elif neighbor == prev: # decide whether it can random walk back\n distance_sum += transition_probability * neighbor_link_weight\n else: # Triangle\n distance_sum += transition_probability * neighbor_link_weight / q\n\n '''\n pick up the next step link\n '''\n nn = pick_neighbors(graph, cur, prev, cur_nbrs, pre_edge_type, matrix, distance_sum, p, q)\n if nn is not None:\n walk.append(nn)\n else:\n print('No neighbour to go!')\n print(prev, cur)\n walk.append(random.choice(cur_nbrs))\n\n # print \"walk length: \",len(walk),walk\n # print \"edge walk: \",len(edge_walk),edge_walk \n return walk",
"def fit(self, input_matrix: Union[sparse.csr_matrix, np.ndarray], labels: Union[np.ndarray, dict] = None,\n labels_row: Union[np.ndarray, dict] = None, labels_col: Union[np.ndarray, dict] = None) -> 'Propagation':\n adjacency, seeds, self.bipartite = get_adjacency_values(input_matrix, values=labels, values_row=labels_row,\n values_col=labels_col, which='labels')\n n = adjacency.shape[0]\n index_seed, index_remain, labels_seed = self._instantiate_vars(seeds)\n\n if self.node_order == 'random':\n np.random.shuffle(index_remain)\n elif self.node_order == 'decreasing':\n index = np.argsort(-adjacency.T.dot(np.ones(n))).astype(np.int32)\n index_remain = index[index_remain]\n elif self.node_order == 'increasing':\n index = np.argsort(adjacency.T.dot(np.ones(n))).astype(np.int32)\n index_remain = index[index_remain]\n\n labels = -np.ones(n, dtype=np.int32)\n labels[index_seed] = labels_seed\n labels_remain = np.zeros_like(index_remain, dtype=np.int32)\n\n indptr = adjacency.indptr.astype(np.int32)\n indices = adjacency.indices.astype(np.int32)\n if self.weighted:\n data = adjacency.data.astype(np.float32)\n else:\n data = np.ones(n, dtype=np.float32)\n\n t = 0\n while t < self.n_iter and not np.array_equal(labels_remain, labels[index_remain]):\n t += 1\n labels_remain = labels[index_remain].copy()\n labels = np.asarray(vote_update(indptr, indices, data, labels, index_remain))\n\n probs = get_membership(labels)\n probs = normalize(adjacency.dot(probs))\n\n self.labels_ = labels\n self.probs_ = probs\n self._split_vars(input_matrix.shape)\n\n return self",
"def solve(list_of_locations, list_of_homes, starting_car_location, adjacency_matrix, params=[]):\n\n\n\n\n path = [starting_car_location]\n dict = {}\n index = 0\n for i in range(len(list_of_locations)):\n if list_of_locations[i] == starting_car_location:\n index = i\n\n path = [index]\n\n G, m = adjacency_matrix_to_graph(adjacency_matrix)\n\n home_indexes = []\n\n for home in list_of_homes:\n for i in range(len(list_of_locations)):\n if list_of_locations[i] == home:\n home_indexes.append(i)\n break\n\n new_adjacency = [[\"x\" for i in range(len(list_of_locations))] for j in range(len(list_of_locations))]\n\n # for sake of figuring out where to walk\n for home in home_indexes:\n di_path = nx.dijkstra_path(G, index, home)\n for i in range(len(di_path) - 1):\n new_adjacency[di_path[i]][di_path[i + 1]] = adjacency_matrix[di_path[i]][di_path[i + 1]]\n new_adjacency[di_path[i + 1]][di_path[i]] = adjacency_matrix[di_path[i]][di_path[i + 1]]\n\n\n for home1 in home_indexes:\n for home2 in home_indexes:\n if not home1 == home2:\n di_path = nx.dijkstra_path(G, home1, home2)\n for i in range(len(di_path) - 1):\n new_adjacency[di_path[i]][di_path[i + 1]] = adjacency_matrix[di_path[i]][di_path[i + 1]]\n new_adjacency[di_path[i + 1]][di_path[i]] = adjacency_matrix[di_path[i]][di_path[i + 1]]\n\n\n\n\n G2, m = adjacency_matrix_to_graph(new_adjacency)\n\n all_driving_path = list(nx.dfs_edges(G2))\n\n\n\n\n walking_to = []\n walking_from = {}\n\n for i in range(len(new_adjacency)):\n if i in home_indexes:\n count = 0\n edge_to = 0\n for j in range(len(new_adjacency)):\n if new_adjacency[i][j] != \"x\":\n count += 1\n edge_to = j\n\n #must ensure that this is not a home that we are already dropping someone off at, otherwise it will cut off a line of two homes\n if count == 1 and i != index and i not in walking_from.keys():\n new_adjacency[i][edge_to] = \"x\"\n new_adjacency[edge_to][i] = \"x\"\n walking_to.append(i)\n if edge_to in walking_from:\n walking_from[edge_to] = walking_from[edge_to] + [i]\n else:\n walking_from[edge_to] = [i]\n\n #\n # for i in range(len(all_driving_path) - 1):\n # #if first vertex in edge is the same, we should walk\n # if all_driving_path[i][0] == all_driving_path[i + 1][0]:\n # print(all_driving_path[i][0])\n # print(all_driving_path[i][1])\n # #get rid of only edge connected to this home\n # new_adjacency[all_driving_path[i][0]][all_driving_path[i][1]] = \"x\"\n # new_adjacency[all_driving_path[i][1]][all_driving_path[i][0]] = \"x\"\n # walking_to.append(all_driving_path[i][1])\n # if all_driving_path[i][0] in walking_from:\n # walking_from[all_driving_path[i][0]] = walking_from[all_driving_path[i][0]] + [all_driving_path[i][1]]\n # else:\n # walking_from[all_driving_path[i][0]] = [all_driving_path[i][1]]\n\n\n\n dropoff_locations = list(walking_from.keys())\n for loc in dropoff_locations:\n if loc in home_indexes:\n dropoff_locations.remove(loc)\n\n\n for loc in dropoff_locations:\n di_path = nx.dijkstra_path(G, loc, home)\n for i in range(len(di_path) - 1):\n new_adjacency[di_path[i]][di_path[i + 1]] = adjacency_matrix[di_path[i]][di_path[i + 1]]\n new_adjacency[di_path[i + 1]][di_path[i]] = adjacency_matrix[di_path[i]][di_path[i + 1]]\n\n for loc in dropoff_locations:\n for home in home_indexes:\n di_path = nx.dijkstra_path(G, loc, home)\n for i in range(len(di_path) - 1):\n new_adjacency[di_path[i]][di_path[i + 1]] = adjacency_matrix[di_path[i]][di_path[i + 1]]\n new_adjacency[di_path[i + 1]][di_path[i]] = adjacency_matrix[di_path[i]][di_path[i + 1]]\n\n\n G2, m = adjacency_matrix_to_graph(new_adjacency)\n # G = G2\n # pos=nx.spring_layout(G2)\n # nx.draw_networkx_nodes(G2,pos)\n # nx.draw_networkx_labels(G2, pos)\n # nx.draw_networkx_edges(G2,pos,width=1.0,alpha=0.5)\n #\n # plt.draw()\n # plt.show()\n\n # condensed shortest paths to edges - use G3 for real\n\n new_adjacency2 = [[\"x\" for i in range(len(list_of_locations))] for j in range(len(list_of_locations))]\n\n for home in home_indexes:\n if home not in walking_to:\n di_path = nx.dijkstra_path(G2, index, home)\n start = di_path[0]\n end = di_path[len(di_path) - 1]\n new_adjacency2[start][end] = 0\n new_adjacency2[end][start] = 0\n for i in range(len(di_path) - 1):\n new_adjacency2[start][end] += new_adjacency[di_path[i]][di_path[i + 1]]\n new_adjacency2[end][start] += new_adjacency[di_path[i]][di_path[i + 1]]\n\n\n for home1 in home_indexes:\n for home2 in home_indexes:\n if not home1 == home2 and home1 not in walking_to and home2 not in walking_to:\n di_path = nx.dijkstra_path(G2, home1, home2)\n start = di_path[0]\n end = di_path[len(di_path) - 1]\n new_adjacency2[start][end] = 0\n new_adjacency2[end][start] = 0\n for i in range(len(di_path) - 1):\n new_adjacency2[start][end] += new_adjacency[di_path[i]][di_path[i + 1]]\n new_adjacency2[end][start] += new_adjacency[di_path[i]][di_path[i + 1]]\n\n for loc in dropoff_locations:\n di_path = nx.dijkstra_path(G2, index, loc)\n start = di_path[0]\n end = di_path[len(di_path) - 1]\n new_adjacency2[start][end] = 0\n new_adjacency2[end][start] = 0\n for i in range(len(di_path) - 1):\n new_adjacency2[start][end] += new_adjacency[di_path[i]][di_path[i + 1]]\n new_adjacency2[end][start] += new_adjacency[di_path[i]][di_path[i + 1]]\n\n for loc in dropoff_locations:\n for home in home_indexes:\n di_path = nx.dijkstra_path(G2, loc, home)\n start = di_path[0]\n end = di_path[len(di_path) - 1]\n new_adjacency2[start][end] = 0\n new_adjacency2[end][start] = 0\n for i in range(len(di_path) - 1):\n new_adjacency2[start][end] += new_adjacency[di_path[i]][di_path[i + 1]]\n new_adjacency2[end][start] += new_adjacency[di_path[i]][di_path[i + 1]]\n\n\n\n\n final_G, m = adjacency_matrix_to_graph(new_adjacency2)\n drive_path = list(nx.dfs_edges(final_G, source=index))\n drive_path.append(index)\n\n mst = nx.minimum_spanning_tree(final_G)\n\n\n\n new_mst = nx.MultiGraph(mst)\n for edge in mst.edges():\n new_mst.add_edge(edge[0], edge[1])\n\n\n if new_mst.degree[index] != 0:\n to_remove = []\n for node in new_mst:\n if (new_mst.degree[node] == 0):\n to_remove.append(node)\n new_mst.remove_nodes_from(to_remove)\n\n eulerian = list(nx.eulerian_circuit(new_mst, index))\n\n path = []\n for edge in eulerian:\n path.append(edge[0])\n\n path.append(eulerian[len(eulerian) - 1][1])\n\n already_seen = []\n to_remove = []\n for i in range(len(path) - 1):\n if path[i] in already_seen:\n to_remove.append(i)\n else:\n already_seen.append(path[i])\n\n new_path = []\n for i in range(len(path) - 1):\n if i not in to_remove:\n new_path.append(path[i])\n path = new_path\n print(eulerian)\n else:\n path = [index]\n print(path)\n\n\n\n\n\n\n\n # print(path)\n final_path = []\n for node in path:\n if node == index:\n final_path.append(node)\n # print(\"Index: \", node)\n elif node in home_indexes and node not in walking_to:\n final_path.append(node)\n # print(\"Home but not walking: \", node)\n elif node in dropoff_locations:\n final_path.append(node)\n # print(\"Dropoff loc: \", node)\n final_path.append(index)\n #print(walking_from)\n # print(final_path)\n # nx.draw(mst)\n # plt.draw()\n # plt.show()\n for node in final_path:\n if node in walking_from and node in home_indexes:\n dict[node] = [node] + walking_from[node]\n elif node in home_indexes:\n dict[node] = [node]\n elif node in walking_from:\n dict[node] = walking_from[node]\n\n very_final_path = []\n for i in range(len(final_path) - 1):\n condensed_path = nx.dijkstra_path(G2, final_path[i], final_path[i+1])\n for j in range(len(condensed_path) - 1):\n if condensed_path[j] != condensed_path[j + 1]:\n very_final_path.append(condensed_path[j])\n\n if len(very_final_path) >= 1 and [len(very_final_path) - 1] != index:\n very_final_path.append(index)\n\n if len(very_final_path) == 0:\n very_final_path = [index]\n\n print(very_final_path)\n print(dict)\n\n\n path2 = list(nx.dfs_preorder_nodes(mst, index))\n\n final_path2 = []\n for node in path2:\n if node == index:\n final_path2.append(node)\n # print(\"Index: \", node)\n elif node in home_indexes and node not in walking_to:\n final_path2.append(node)\n # print(\"Home but not walking: \", node)\n elif node in dropoff_locations:\n final_path2.append(node)\n # print(\"Dropoff loc: \", node)\n final_path2.append(index)\n\n\n for node in final_path2:\n if node in walking_from and node in home_indexes:\n dict[node] = [node] + walking_from[node]\n elif node in home_indexes:\n dict[node] = [node]\n elif node in walking_from:\n dict[node] = walking_from[node]\n\n very_final_path2 = []\n for i in range(len(final_path2) - 1):\n condensed_path = nx.dijkstra_path(G2, final_path2[i], final_path2[i+1])\n for j in range(len(condensed_path) - 1):\n if condensed_path[j] != condensed_path[j + 1]:\n very_final_path2.append(condensed_path[j])\n\n if len(very_final_path2) >= 1 and [len(very_final_path2) - 1] != index:\n very_final_path2.append(index)\n\n if len(very_final_path2) == 0:\n very_final_path2 = [index]\n\n opt1 = cost_of_solution(G, very_final_path, dict)\n opt2 = cost_of_solution(G, very_final_path2, dict)\n\n ultra_final_path = []\n if (opt1 <= opt2):\n ultra_final_path = very_final_path\n else:\n ultra_final_path = very_final_path2\n\n return ultra_final_path, dict\n\n pass",
"def get_adjacency_matrix(node_list: List[Node], graph: Graph):\n node_to_index = {node: index for index, node in enumerate(node_list)}\n adjacency_matrix = numpy.zeros((len(node_list), len(node_list)), dtype=int)\n for node in node_list:\n for dependency in graph[node]:\n if dependency != node:\n adjacency_matrix[\n node_to_index[node],\n node_to_index[dependency]\n ] = 1\n return adjacency_matrix",
"def get_connected_nodes(node, current_path_len) :\r\n\r\n connected_nodes = [] #A list of the connected nodes\r\n closed_list_coords = get_path_coordinates(closed_list)\r\n\r\n #Checking if the node belongs to the 1st row\r\n if(node.coords[0] != 0) :\r\n connected_node = Node((node.coords[0] - 1, node.coords[1]), goal_pos, current_path_len)\r\n #Checking if the node has already been traversed or is it is an obstacle\r\n if(not connected_node.coords in closed_list_coords and not connected_node.coords in obstacle_coords) :\r\n connected_nodes.append(connected_node)\r\n\r\n #Checking if the node belongs to the last row\r\n if(node.coords[0] != grid_dims[0] - 1) :\r\n connected_node = Node((node.coords[0] + 1, node.coords[1]), goal_pos, current_path_len)\r\n #Checking if the node has already been traversed or is it is an obstacle\r\n if(not connected_node.coords in closed_list_coords and not connected_node.coords in obstacle_coords) :\r\n connected_nodes.append(connected_node)\r\n\r\n #Checking if the node belongs to the 1st column\r\n if(node.coords[1] != 0) :\r\n connected_node = Node((node.coords[0], node.coords[1] - 1), goal_pos, current_path_len)\r\n #Checking if the node has already been traversed or is it is an obstacle\r\n if(not connected_node.coords in closed_list_coords and not connected_node.coords in obstacle_coords) :\r\n connected_nodes.append(connected_node)\r\n\r\n #Checking if the node belongs to the 1st column\r\n if(node.coords[1] != grid_dims[1] - 1) :\r\n connected_node = Node((node.coords[0], node.coords[1] + 1), goal_pos, current_path_len)\r\n #Checking if the node has already been traversed or is it is an obstacle\r\n if(not connected_node.coords in closed_list_coords and not connected_node.coords in obstacle_coords) :\r\n connected_nodes.append(connected_node)\r\n\r\n return connected_nodes",
"def BFS(maze: list, start: tuple, goal: tuple):\n n = len(maze) # Get the dimension of the maze\n\n #========================================#\n # Some data checking statements\n\n if (not is_valid(start, n)):\n print(\"BFS: Start indices outside maze dimensions\")\n return False\n elif (not is_valid(goal, n)):\n print(\"BFS: Goal indices outside maze dimensions\")\n return False\n\n # End data checking statements\n #========================================#\n\n number_of_nodes_visited = 0\n visited = copy.deepcopy(maze) # We can use a copy of the maze to keep track of visited squares (Considered using a set here, thought that time efficiency was important)\n # visited = list(map(list, maze)) # Alternative to using copy.deepcopy\n\n # Initialize a matrix of the same size as maze where each value is None.\n previous = [[None for i in range(n)] for j in range(n)]\n\n queue = deque() # Define our queue of \"fringe\" squares\n queue.append(start) # Push the start square into our queue\n visited[start[0]][start[1]] = 1 # Set our start to visited\n\n while (len(queue)): # While there exists items in the queue\n current = queue.popleft() # Pop the square at index 0\n number_of_nodes_visited += 1 # Increase number of nodes visited\n\n if (current == goal): # If current is the goal, we found it!\n # We now want to traverse back to make a path using our 'previous' matrix\n path = []\n while (current != None):\n path.append(current)\n current = previous[current[0]][current[1]]\n path.reverse()\n return (True, path, number_of_nodes_visited)\n\n current_i, current_j = current # Unpack the current pair\n \n # Now we want to add all unvisited squares that are possible to get to from the current square\n for i in range(len(nearby_offsets)):\n offset_i, offset_j = nearby_offsets[i]\n possible = (current_i + offset_i, current_j + offset_j)\n # print(f\"Current possible: {possible_i} {possible_j}\") # DEBUG\n if (is_valid(possible, n)): # If the calculated square is within the maze matrix\n # If possible has not been visited yet\n if (not visited[possible[0]][possible[1]]):\n queue.append(possible) # Add possible to our queue\n # Set possible to visited\n visited[possible[0]][possible[1]] = 1\n # Set the previous square for possible to the current square\n previous[possible[0]][possible[1]] = current\n # If the while loop goes out, and the queue is empty, then there is no possible path\n return (False, [], number_of_nodes_visited)",
"def adjacency_matrix():\n file_path = PROJECT_PATH + \"/geographycal_data/adjacency_matrix/Howgrp.txt\"\n router = Router(adjacency_metrix=file_path)\n # router.write2vtk(router.graph, \"adjacency_matrix\")\n # nx.draw(router.graph)\n # plt.show()\n # adjacency matrix\n A = nx.adjacency_matrix(router.graph, weight=None).toarray()\n # ... and its spectrum\n nx.adjacency_spectrum(router.graph, weight=None)\n # weighted adjacency\n W = nx.adjacency_matrix(router.graph)\n # D\n I = np.reshape(np.ones(12), (-1, 1))\n D = np.matmul(A, I)\n # combinatorial graph Laplacian L = D - A\n L = nx.laplacian_matrix(router.graph, weight=None)\n # ... and his spectrum\n nx.laplacian_spectrum(router.graph, weight=None)\n # weighted Laplacian\n Y = nx.laplacian_matrix(router.graph)\n\n # Note\n sumD = np.matmul(I.transpose(), D)\n sumD = sumD[0][0]\n sumA = 0\n for row in np.nditer(A):\n for e in np.nditer(row):\n sumA += e\n\n # Fielder vector\n fiedler_vector = nx.fiedler_vector(router.graph, weight=None)\n\n # Matrix Double index Sum\n\n def D_app(F):\n return D * F\n\n def A_app(F):\n AF = np.zeros(len(F))\n for i, e_i in enumerate(F):\n for j, e_j in enumerate(F):\n if (A[i][j] != 0):\n AF[i] += F[j]\n return AF",
"def dft_recursive(self, starting_vertex, visited=None):\n if visited is None: # if visited is None\n visited = set() # create an empty 'set' to store visisted vertex, set sorts \n\n if starting_vertex not in visited: # if starting_vertex has not been visited yet\n print(starting_vertex)\n visited.add(starting_vertex) # add to the set \n\n for neighbor in self.vertices[starting_vertex]: # loop through each neighbor \n self.dft_recursive(neighbor, visited) # call the dft_recursive method on each neighbor ",
"def get_graph(adj):\n # remove all zeros rows and columns\n adj = adj[~np.all(adj == 0, axis=1)]\n adj = adj[:, ~np.all(adj == 0, axis=0)]\n adj = np.asmatrix(adj)\n G = nx.from_numpy_matrix(adj)\n return G",
"def dfs_inorder_iter(graph, start_node):\n nonlocal t\n\n if visited[start_node]:\n return\n\n seen_once = {}\n nodes_seen = 0\n stack = [start_node]\n nodes_in_stack = set(stack)\n\n while stack:\n node = stack.pop()\n nodes_in_stack.remove(node)\n if not seen_once.get(node):\n # It's our first time visiting the node,\n # so put it back on the stack; we won't take\n # it off permanently until we're backtracking\n stack.append(node)\n nodes_in_stack.add(node)\n seen_once[node] = True\n for neighbor_node in graph[node]:\n if (not visited[neighbor_node]\n and not seen_once.get(neighbor_node)\n and neighbor_node not in nodes_in_stack):\n stack.append(neighbor_node)\n nodes_in_stack.add(neighbor_node)\n else:\n # We're backtracking\n visited[node] = True\n finishing_times[t] = node\n t += 1\n sccs[s] += 1",
"def read_graph(Amatrix):\n\tG = nx.from_numpy_matrix(Amatrix)\n\tG = G.to_undirected()\n\treturn G",
"def enumerate_links_around_node(self, node):\n\n l0 = self.node_link[node]\n l = l0\n edges = []\n traversing = True\n while traversing:\n edges.append(l)\n v = l[0]\n if v == node:\n l = self.pred_right[l]\n else:\n l = self.pred_left[l]\n if l0 == l:\n traversing = False\n if l0[1] == l[0] and l0[0] == l[1]:\n traversing = False\n #print v, l\n #raw_input('here')\n return edges",
"def find_next(graph, strategy, mission, node, parent_node=None,\n matrix=None):\n debug('strategy =', strategy)\n node_gt = graph.name2node[node]\n nodes_gt = [n for n in node_gt.out_neighbours()]\n nodes = [graph.vp['name'][n] for n in nodes_gt]\n debug('nodes =', nodes)\n if strategy == 'random':\n if parent_node is not None and parent_node not in nodes:\n nodes.append(parent_node)\n return random.choice(nodes)\n neighbor_targets = [n for n in nodes if n in mission.targets[0]]\n if neighbor_targets:\n debug('target in neighbors')\n return neighbor_targets[0]\n\n nodes = [n for n in nodes if n not in mission.visited]\n try:\n candidates = {n: matrix[n, mission.targets[0][0]] for n in nodes}\n except KeyError:\n pdb.set_trace()\n except TypeError, e:\n print(e)\n pdb.set_trace()\n if not candidates:\n chosen_node = None # abort search\n else:\n if strategy == 'title_stochastic' and random.random() <= 0.05:\n chosen_node = random.choice(candidates.keys())\n debug('randomly selecting node', chosen_node)\n return chosen_node\n chosen_node = max(candidates.iteritems(),\n key=operator.itemgetter(1))[0]\n debug('candidates are:')\n for k, v in candidates.items():\n debug(k, ':', v)\n if chosen_node == parent_node:\n debug('backtracking to node', parent_node)\n return None\n debug('going to ', chosen_node)\n return chosen_node",
"def dft_recursive(self, starting_vertex):\n # TODO\n # creating a function inside that includes a list\n # of previously traversed vertices\n def recursive(graph, traversed, vertex):\n # if the vertex is in traversed already, return none\n if vertex in traversed:\n return \n # otherwise we print it out\n print(vertex)\n # append the vertex to our traversed list\n traversed.add(vertex)\n # running the function on the neighbors of the vertex\n for val in graph[vertex]:\n recursive(graph, traversed, val)\n\n recursive(self.vertices, set(), starting_vertex)",
"def neighbor_nodes(self, node):\n row = node[0]\n col = node[1]\n if row == -1 and col == -1:\n # The nodes that can be accessed from the start node\n # (i.e. all the nodes in the first column)\n for r in range(self.num_rows):\n yield (r, 0)\n else:\n if row < (self.num_rows - 1):\n # We can still go down\n yield (row + 1, col)\n if row > 0:\n # We can still go up\n yield (row - 1, col)\n if col < (self.num_cols - 1):\n # We can still go to the right\n yield (row, col + 1)",
"def bfs_iterative(graph,start):\n\tvisited = set()\n\twatched = set()\n\tnodes_queue = [start] # List that helps as queue\n\twatched.add(start)\n\t\n\twhile nodes_queue:\n\t\tcurrent_node = nodes_queue.pop(0)\n\n\t\tprint(\"visiting\",current_node)\n\t\tvisited.add(current_node)\n\t\t\n\t\tfor adjacent_node in graph[current_node]:\n\t\t\tif (adjacent_node not in watched) and (adjacent_node not in visited):\n\t\t\t\tnodes_queue.append(adjacent_node)\n\t\t\t\t#path.add(adjacent_node)",
"def _build_adjacency_matrix_1(self):\n\n from scipy import sparse as sparse\n \n down_neighbour = np.empty(self.tri.npoints)\n\n for node in range (0,self.tri.npoints):\n down_neighbour[node] = self.neighbour_array_lo_hi[node][0]\n\n # Build a matrix of downhill-ness - one entry per node ! \n \n size = self.tri.npoints\n row_array = np.empty(size)\n col_array = np.empty(size)\n down_array = np.ones(size)\n\n # Catch cases where node is local low point (i.e. it is its own low neighbour)\n\n for row in range(0, self.tri.npoints): \n row_array[row] = row\n col_array[row] = down_neighbour[row]\n if row == down_neighbour[row]:\n down_array[row] = 0.0\n \n\n downMCOO = sparse.coo_matrix( (down_array, (row_array, col_array)), shape=(size,size) ).T \n\n self.adjacency1 = downMCOO.tocsr() \n\n # Catch pathological cases - sometimes if there is a flat spot on the boundary, then \n # the filling method above will produce a non-square matrix. This is caused by\n # repetition of values in the COO list which are summed on conversion.\n\n if downMCOO.shape[0] != downMCOO.shape[1]:\n # This approach works but is a lot slower\n\n print \"\"\"\n Warning: the downhill matrices require a slow build method. This is probably\n Because there are degeneracies in the slope - particularly at the boundaries\n A small random perturbation is usually enough to fix this problem\n \"\"\"\n downMat = sparse.lil_matrix((size, size))\n\n for row in range(0, self.tri.npoints): \n downMat[down_neighbour[row],row] = 1.0\n\n for row in range(0, self.tri.npoints): \n if down_neighbour[row] == row:\n downMat[row,row] = 0.0\n \n self.adjacency1 = downMat.T.tocsr() \n \n return",
"def dfs_recursive(self, starting_vertex, destination_vertex):\n pass # TODO",
"def dfs_recursive(self, starting_vertex, destination_vertex):\n pass # TODO",
"def graph_traverse(nodes, node, path_count, path_length):\n\tpath_length += 1\n\t\n\t# For any connected_node, traverse that node unless it's the last node\n\tfor connected_node in node[1]:\n\t\ttry:\n\t\t\tgraph_traverse(nodes, nodes[int(connected_node)], path_count, path_length)\n\t\t\t\n\t\texcept:\n\t\t\t# Total paths\n\t\t\tnodes[1][0][2] += 1\n\t\t\n\t\t\t# Shortest path\n\t\t\tif path_length < nodes[1][0][0]:\n\t\t\t\tnodes[1][0][0] = path_length\n\n\t\t\t# Longest path\n\t\t\tif path_length > nodes[1][0][1]:\n\t\t\t\tnodes[1][0][1] = path_length\n\n\t\t\tcontinue\n\tprint(nodes[1][0][2])",
"def solve(list_of_locations, list_of_homes, starting_car_location, adjacency_matrix, params=[]):\n\n loc_map = {}\n drop_off_dict = {}\n num_home_visited = 0\n\n \"\"\"\n for i in range(len(list_of_locations)):\n loc_map[i] = list_of_locations[0]\n \"\"\"\n\n home_indexes = convert_locations_to_indices(list_of_homes, list_of_locations)\n start = list_of_locations.index(starting_car_location)\n graph, msg = adjacency_matrix_to_graph(adjacency_matrix)\n num_homes = len(list_of_homes)\n\n car_path = []\n all_paths = dict(nx.all_pairs_dijkstra(graph))\n visited = set()\n\n #print(start)\n car_path.append(start)\n current_node = start\n\n if start in home_indexes:\n visited.add(start)\n drop_off_dict[start] = [start]\n num_home_visited += 1\n\n while num_home_visited < num_homes:\n dist_dict = all_paths.get(current_node)[0]\n paths_dict = all_paths.get(current_node)[1]\n\n dist_dict = {k:v for (k,v) in dist_dict.items() if k not in visited and k in home_indexes}\n min_dist = min(dist_dict.values())\n min_list = [k for k in dist_dict.keys() if dist_dict[k] <= min_dist]\n #print(dist_dict.values())\n target = min_list[0]\n drop_off_dict[target] = [target]\n #print(target+1)\n #print(target)\n car_path.pop()\n car_path.extend(paths_dict[target])\n\n visited.add(target)\n current_node = target\n num_home_visited += 1\n\n paths_dict = all_paths.get(current_node)[1]\n car_path.pop()\n car_path.extend(paths_dict[start])\n #print((drop_off_dict.keys()))\n #car_path = [start, ...., start]\n #drop_off_dict = {drop_off_loc: [home1, home2, ...] }\n\n return car_path, drop_off_dict",
"def dfs_iter(graph, start):\n # vkladam uzel a index potencialniho naslednika, kterym mam pokracovat\n stack = [(start, 0)]\n time = 1\n graph.discovery_time[start] = time\n graph.visited[start] = True\n\n while stack: # not empty\n u, v = stack.pop()\n\n while v < graph.size and not is_edge(graph, u, v):\n v += 1\n\n if v < graph.size:\n # found successor, u is not yet finished\n stack.append((u, v + 1))\n\n if not graph.visited[v]:\n # we have discovered v\n stack.append((v, 0))\n graph.parent[v] = u\n graph.visited[v] = True\n time += 1\n graph.discovery_time[v] = time\n else:\n # u has no more successors\n time += 1\n graph.finishing_time[u] = time"
] |
[
"0.63261956",
"0.6208576",
"0.61341274",
"0.6112829",
"0.60796154",
"0.60773766",
"0.6034537",
"0.6020517",
"0.60101736",
"0.60092723",
"0.597108",
"0.5929371",
"0.5928171",
"0.59148794",
"0.59029406",
"0.58984756",
"0.5895746",
"0.5883026",
"0.5869071",
"0.5836068",
"0.5827412",
"0.580834",
"0.57967705",
"0.57921106",
"0.5780705",
"0.5774379",
"0.5774379",
"0.57718694",
"0.5749079",
"0.57467246"
] |
0.6316166
|
1
|
Create an image upload_job and return an UploadJob instance
|
def create_job(self, image_name, image_checksum, project, cloud_account_names=None):
self._log.debug("Project {}: Create image upload job for image {} to {}".
format(project, image_name, cloud_account_names))
create_job_msg = RwImageMgmtYang.YangInput_RwImageMgmt_CreateUploadJob.from_dict({
"project_name": project,
"onboarded_image": {
"image_name": image_name,
"image_checksum": image_checksum,
}
})
if cloud_account_names is not None:
create_job_msg.cloud_account = cloud_account_names
query_iter = yield from self._dts.query_rpc(
"I,/rw-image-mgmt:create-upload-job",
0,
create_job_msg,
)
for fut_resp in query_iter:
rpc_result = (yield from fut_resp).result
job_id = rpc_result.job_id
return UploadJob(self._log, self._loop, self._dts, job_id, project)
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def image_create_and_upload(self, upload=True, **kwargs):\n if 'name' not in kwargs:\n name = data_utils.rand_name(self.__name__ + \"-image\")\n kwargs['name'] = name\n\n params = dict(kwargs)\n image = self.create_image(**params)\n self.assertEqual('queued', image['status'])\n if not upload:\n return image\n\n file_content = data_utils.random_bytes()\n image_file = io.BytesIO(file_content)\n self.client.store_image_file(image['id'], image_file)\n\n image = self.client.show_image(image['id'])\n return image",
"def create_and_upload_image(cls, data=None, **kwargs):\n if 'name' not in kwargs:\n name = data_utils.rand_name(\"kb-image\")\n kwargs['name'] = name\n\n params = cls._get_create_params(**kwargs)\n if data:\n # NOTE: On glance v1 API, the data should be passed on\n # a header. Then here handles the data separately.\n params['data'] = data\n\n image = cls.client.create_image(**params)\n # Image objects returned by the v1 client have the image\n # data inside a dict that is keyed against 'image'.\n if 'image' in image:\n image = image['image']\n cls.created_images.append(image['id'])\n # Upload image to glance artifactory.\n file_content = data_utils.random_bytes()\n image_file = six.BytesIO(file_content)\n cls.client.store_image_file(image['id'], image_file)\n cls.kingbird_client = kb_client.Client(\n kingbird_url=KINGBIRD_URL, auth_token=cls.client.token,\n project_id=cls.client.tenant_id)\n return image",
"def create_upload(projectArn=None, name=None, type=None, contentType=None):\n pass",
"def create_job_object(message, environment_image):\n\n PYTHONUNBUFFERED_ENV = client.V1EnvVar(name=\"PYTHONUNBUFFERED\", value=\"1\")\n AUTH_TOKEN_ENV = client.V1EnvVar(name=\"AUTH_TOKEN\", value=AUTH_TOKEN)\n EVALAI_API_SERVER_ENV = client.V1EnvVar(\n name=\"EVALAI_API_SERVER\", value=EVALAI_API_SERVER\n )\n MESSAGE_BODY_ENV = client.V1EnvVar(name=\"BODY\", value=json.dumps(message))\n submission_pk = message[\"submission_pk\"]\n image = message[\"submitted_image_uri\"]\n # Configureate Pod agent container\n agent_container = client.V1Container(\n name=\"agent\", image=image, env=[PYTHONUNBUFFERED_ENV]\n )\n # Configureate Pod environment container\n environment_container = client.V1Container(\n name=\"environment\",\n image=environment_image,\n env=[\n PYTHONUNBUFFERED_ENV,\n AUTH_TOKEN_ENV,\n EVALAI_API_SERVER_ENV,\n MESSAGE_BODY_ENV,\n ],\n resources=client.V1ResourceRequirements(\n limits={\"nvidia.com/gpu\": \"1\"}\n ),\n )\n # Create and configurate a spec section\n template = client.V1PodTemplateSpec(\n metadata=client.V1ObjectMeta(labels={\"app\": \"evaluation\"}),\n spec=client.V1PodSpec(\n containers=[environment_container, agent_container],\n restart_policy=\"Never\",\n ),\n )\n # Create the specification of deployment\n spec = client.V1JobSpec(backoff_limit=1, template=template)\n # Instantiate the job object\n job = client.V1Job(\n api_version=\"batch/v1\",\n kind=\"Job\",\n metadata=client.V1ObjectMeta(\n name=\"submission-{0}\".format(submission_pk)\n ),\n spec=spec,\n )\n return job",
"def upload_image():\n s3client = __initiate_s3client()\n # Generate random UUIDs as image ids\n image_id = str(uuid.uuid4())\n # Generate pre-signed POST url\n url_info = s3client.generate_presigned_post(\n Bucket=env.get('bucket'),\n Key=image_id\n )\n return Response(status_code=201,\n headers={'Content-Type': 'application/json'},\n body={'status': 'success',\n 'upload_url': url_info,\n 'id': image_id})",
"def cmd_image_upload(client, args):\n config = data_fields(args, client.allowed_image_fields)\n if args.type == 'file':\n image = client.upload_from_path(args.image, config)\n else:\n image = client.upload_from_url(args.image, config)\n generate_output({'image': image})",
"def create_photo(image, handler):\n if image == False: # False means it wasn't valid (see validate_image)\n raise FormatUnrecognizedError()\n\n if max(image.width, image.height) <= MAX_IMAGE_DIMENSION:\n # No resize needed. Keep the same size but add a transformation to\n # force re-encoding.\n image.resize(image.width, image.height)\n elif image.width > image.height:\n image.resize(MAX_IMAGE_DIMENSION,\n image.height * MAX_IMAGE_DIMENSION / image.width)\n else:\n image.resize(image.width * MAX_IMAGE_DIMENSION / image.height,\n MAX_IMAGE_DIMENSION)\n\n try:\n image_data = image.execute_transforms(output_encoding=images.PNG)\n except RequestTooLargeError:\n raise SizeTooLargeError()\n except Exception:\n # There are various images.Error exceptions that can be raised, as well\n # as e.g. IOError if the image is corrupt.\n raise PhotoError()\n\n photo = model.Photo.create(handler.repo, image_data=image_data)\n photo_url = get_photo_url(photo, handler)\n return (photo, photo_url)",
"async def create_job(response: Response,\n request: Request,\n job: Job = Body(\n ...,\n example={\n \"id_video\": \"bbb_0.mp4\",\n \"bitrate\": 7000,\n \"speed\": \"ultrafast\",\n },\n )\n ): \n \n\n # get an ID and return to client\n id_job = mngr.getID()\n logger.debug(\"got id_job %s\" %id_job)\n resp = [\"http:/\"]\n resp.append(request.headers['host'])\n resp.append(id_job)\n response.headers[\"Location\"] = \"/\".join(resp)\n\n # create the task\n mngr.newJob(id_job, \n job.id_video, \n job.bitrate, \n job.speed)\n\n return id_job",
"def __upload(self, filename):\n # Save to local path\n save_img = self.__frame.copy()\n\n # Initialize the bucket for after usage\n image_blob = None\n\n # Make the Google Cloud Storage client\n # and set the storage path\n if self.__yaml[\"bucket\"] is not None:\n client = storage.Client()\n bucket = client.get_bucket(self.__yaml[\"bucket\"])\n image_blob = bucket.blob(filename)\n\n # Upload and save the image\n try:\n if self.__yaml[\"output_path\"] is not None:\n # Save image in local\n LOGGER.info(f\"Saved {filename} in local folder\", )\n path = os.path.sep.join((self.__yaml[\"output_path\"], filename))\n cv2.imwrite(path, save_img)\n\n # Upload to Google Cloud Storage\n # if the user set the \"bucket\" option\n if self.__yaml[\"bucket\"] is not None:\n image_blob.upload_from_filename(os.path.sep.join((self.__yaml[\"output_path\"],\n filename)),\n content_type=\"image/jpeg\")\n\n LOGGER.info(f\"Saved {filename} to google cloud storage\")\n elif self.__yaml[\"bucket\"] is not None:\n # Convert numpy array to bytes\n temp_file = Image.fromarray(cv2.cvtColor(save_img, cv2.COLOR_BGR2RGB))\n temp_file_bytes = io.BytesIO()\n temp_file.save(temp_file_bytes,\n format=\"JPEG\")\n\n # Read the bytes from beginning\n temp_file_bytes.seek(0)\n image_blob.upload_from_file(temp_file_bytes,\n content_type=\"image/jpeg\")\n\n LOGGER.info(f\"Saved {filename} to google cloud storage\")\n except Exception as error:\n # If errors occur, just print the error messages\n # and don't exit the program\n LOGGER.warning(error)",
"def create(self):\n\n if self.image:\n return self.image\n\n import nova_utils\n nova = nova_utils.nova_client(self.os_creds)\n image_dict = None\n try:\n # TODO/FIXME - Certain scenarios, such as when the name has whitespace,\n # the image with a given name is not found....\n image_dict = nova.images.find(name=self.image_name)\n except Exception as e:\n logger.info('No existing image found with name - ' + self.image_name)\n pass\n\n if image_dict:\n self.image = self.glance.images.get(image_dict.id)\n if self.image:\n logger.info('Found image with name - ' + self.image_name)\n return self.image\n\n self.image_file = self.__get_image_file()\n self.image = self.glance.images.create(name=self.image_name, disk_format=self.image_format,\n container_format=\"bare\")\n logger.info('Uploading image file')\n self.glance.images.upload(self.image.id, open(self.image_file.name, 'rb'))\n logger.info('Image file upload complete')\n return self.image",
"def create(self, name, containerFormat, diskFormat, isPublic, pathFile):\n if isPublic:\n isPublic = \"public\"\n else:\n isPublic = \"private\"\n\n image = self.client.images.create(name=name, container_format=containerFormat, disk_format=diskFormat, is_public=isPublic)\n # Thread ?\n self.client.images.upload(image.id, open(pathFile, 'rb'))\n while image.status == \"queued\":\n image = self.find(image_id=image.id)\n time.sleep(1)\n return self.find(image_id=image.id)",
"def _CreateImage(media_service, opener, url):\n # Note: The utf-8 decode is for 2to3 Python 3 compatibility.\n image_data = opener.open(url).read().decode('utf-8')\n image = {\n 'type': 'IMAGE',\n 'data': image_data,\n 'xsi_type': 'Image'\n }\n\n return media_service.upload(image)[0]",
"def create_job(api_instance, job):\n api_response = api_instance.create_namespaced_job(\n body=job, namespace=\"default\", pretty=True\n )\n logger.info(\"Job created with status='%s'\" % str(api_response.status))\n return api_response",
"def image_resize(job_object):\n try:\n job = json.loads(job_object.arg)\n base64_file = job['image']\n args = job['args'] if 'args' in job else {}\n del job['image']\n logging.info(job)\n \n def write_file(local_path,filename,file_b64):\n logging.debug(\"about to save to \" + \"%s/%s\" % (local_path,filename))\n if not os.path.exists(local_path): os.makedirs(local_path)\n image_file = base64.b64decode(file_b64)\n local_file = open(\"%s/%s\" % (local_path,filename), \"w\")\n local_file.write(image_file)\n local_file.close()\n \n def download_file(url,local_path,filename):\n print \"downloading \" + url\n f = urllib2.urlopen(urllib2.Request(url))\n print \"about to save to \" + \"%s/%s\" % (local_path,filename)\n if not os.path.exists(local_path): os.makedirs(local_path)\n # Open our local file for writing\n local_file = open(\"%s/%s\" % (local_path,filename), \"w\")\n local_file.write(f.read())\n local_file.close()\n \n local_path = '%s/upload/%s' % (options.asset_root,job['path'])\n local_path_wfile = '%s/%s%s' % (local_path,job['file'],job['extension'])\n filename = '%s%s' % (job['file'],job['extension'])\n #download_file(job['url'],local_path,filename)\n write_file(local_path,filename,base64_file)\n \n def resize_and_save(local_file,new_file,maxsize=None,maxh=None,maxw=None,crop=None):\n \"\"\"Resize the image and save\"\"\"\n logging.debug(\"maxw = %s, maxsize=%s, crop=%s\" % (maxw,maxsize,crop))\n img = Image.open(local_file)\n width,height = img.size\n width,height = float(width), float(height)\n ratio = float(1)\n if crop is not None:\n size = float(maxsize)\n if width <= height and width > size:\n ratio = size/width\n elif height < width and height > size:\n ratio = size/height\n else: \n ratio = 1 # too small\n elif maxsize:\n size = float(maxsize)\n if width >= height and width > size:\n ratio = size/width\n elif height > width and height > size:\n ratio = size/height\n else: \n ratio = 1 # too small\n elif maxh:\n size = maxh\n if height > size:\n ratio = size/height\n else:\n # too small\n ratio = 1\n elif maxw:\n size = maxw\n if width > size:\n ratio = size/width\n else:\n # too small\n ratio = 1\n else:\n raise Exception(\"must specify max width, OR max size\")\n \n print(\"old: ratio = %s: size(x,y) = %s,%s\" % (ratio,width,height))\n height = int(height*ratio)\n width = int(width*ratio)\n print(\"new ratio = %s: size(x,y) = %s,%s\" % (ratio,width,height))\n img = img.resize((width, height),Image.ANTIALIAS)\n if crop is not None:\n log.debug(\"in crop %s\" % crop)\n crop = int(crop)\n if width > crop:\n amt = int((int(width) - crop)/2)\n img = img.crop((amt,0,amt + crop, crop))\n elif height > crop:\n amt = int((int(height) - crop)/2)\n img = img.crop((0,amt,crop,amt+crop))\n \n log.debug(\"saving new file %s\" % new_file)\n if img.mode != \"RGB\":\n img = img.convert(\"RGB\")\n img.save(new_file)\n \n \n if os.path.exists(local_path_wfile):\n if args != {}:\n ext = args['extension'] if 'extension' in args else \"_t\"\n resize_and_save(local_path_wfile,\n '%s/%s%s.jpg' % (local_path,job['file'],ext),\n maxsize=args['maxsize'],\n crop=args['crop'])\n else:\n resize_and_save(local_path_wfile,'%s/%s_t.jpg' % (local_path,job['file']),maxsize=100)\n resize_and_save(local_path_wfile,'%s/%s_m.jpg' % (local_path,job['file']),maxw=317)\n resize_and_save(local_path_wfile,'%s/%s_l.jpg' % (local_path,job['file']),maxsize=800)\n keeptrying = False\n else:\n logging.error(\"haven't found file? %s\" % local_path_wfile)\n \n # delete original\n logging.debug(\"About to delete original %s\" % local_path_wfile)\n os.remove(local_path_wfile)\n \n except:\n traceback.print_exc()",
"def _submitInstance( self, imageName, workDir ):\n return S_OK()",
"async def _upload(self) -> None:\n\n # filename given?\n filename = str(uuid.uuid4()) if self.filename is None else self.filename\n\n # check\n if self._upload_path is None:\n raise ValueError(\"No upload URL given.\")\n\n # send data and return image ID\n async with aiohttp.ClientSession() as session:\n data = aiohttp.FormData()\n data.add_field(\"file\", self._buffer, filename=self.filename)\n async with session.post(self._upload_path, auth=self._auth, data=data, timeout=self._timeout) as response:\n if response.status == 401:\n log.error(\"Wrong credentials for uploading file.\")\n raise FileNotFoundError\n elif response.status != 200:\n log.error(f\"Could not upload file to filecache: {response.status} {response.reason}\")\n raise FileNotFoundError",
"def setup_classic_job(self, create_job_path=True, upload_id=None):\n upload = self.setup_upload(upload_id)\n oqp = OqParams()\n oqp.job_type = \"classical\"\n oqp.upload = upload\n oqp.region_grid_spacing = 0.01\n oqp.min_magnitude = 5.0\n oqp.investigation_time = 50.0\n oqp.component = \"gmroti50\"\n oqp.imt = \"pga\"\n oqp.truncation_type = \"twosided\"\n oqp.truncation_level = 3\n oqp.reference_vs30_value = 760\n oqp.imls = [\n 0.005, 0.007, 0.0098, 0.0137, 0.0192, 0.0269, 0.0376, 0.0527,\n 0.0738, 0.103, 0.145, 0.203, 0.284, 0.397, 0.556, 0.778]\n oqp.poes = [0.01, 0.10]\n oqp.realizations = 1\n from django.contrib.gis import geos\n oqp.region = geos.Polygon(\n ((-122.2, 38.0), (-121.7, 38.0), (-121.7, 37.5),\n (-122.2, 37.5), (-122.2, 38.0)))\n oqp.save()\n job = OqJob(oq_params=oqp, owner=upload.owner, job_type=\"classical\")\n job.save()\n if create_job_path:\n job.path = os.path.join(upload.path, str(job.id))\n os.mkdir(job.path)\n os.chmod(job.path, 0777)\n job.save()\n return job",
"def deploy_job(\n image_uri,\n chief_config,\n worker_count,\n worker_config,\n entry_point_args,\n enable_stream_logs,\n job_labels=None,\n service_account=None,\n):\n job_id = _generate_job_id()\n project_id = gcp.get_project_name()\n ml_apis = discovery.build(\n \"ml\",\n \"v1\",\n cache_discovery=False,\n requestBuilder=google_api_client.TFCloudHttpRequest,\n )\n\n request_dict = _create_request_dict(\n job_id,\n gcp.get_region(),\n image_uri,\n chief_config,\n worker_count,\n worker_config,\n entry_point_args,\n job_labels=job_labels or {},\n service_account=service_account\n )\n try:\n unused_response = (\n ml_apis.projects()\n .jobs()\n .create(parent=\"projects/{}\".format(project_id), body=request_dict)\n .execute()\n )\n _print_logs_info(job_id, project_id)\n if enable_stream_logs:\n _stream_logs(job_id)\n except errors.HttpError as err:\n print(\"There was an error submitting the job.\")\n raise err\n return job_id",
"def create_job_threadsafe(self, image_name, image_checksum, project, cloud_account_names=None):\n future = concurrent.futures.Future()\n\n def on_done(asyncio_future):\n if asyncio_future.exception() is not None:\n future.set_exception(asyncio_future.exception())\n\n elif asyncio_future.result() is not None:\n future.set_result(asyncio_future.result())\n\n def add_task():\n task = self._loop.create_task(\n self.create_job(image_name, image_checksum, project, cloud_account_names)\n )\n task.add_done_callback(on_done)\n\n self._loop.call_soon_threadsafe(add_task)\n return future.result()",
"def upload():\n\n # TODO: decorator to check token\n token = request.headers.get(\"Authorization\")\n\n has_text = bool(request.get_json())\n has_file = request.files and request.files[\"file\"]\n if not has_text and not has_file:\n error = \"No text input and no file provided\"\n return jsonify({\"success\": False, \"message\": error})\n\n filename, error = save_text(request)\n if error:\n return jsonify({\"success\": False, \"message\": error})\n\n job_id = schedule(filename, token)\n add_user_job(job_id, token)\n\n return jsonify({\"success\": True, \"data\": {\"jobId\": job_id}})",
"def upload_image_to_minio_package(request):\n # Instanciating MinioStoreStorage creates a Minio client from settings\n # and a bucket with the name passed to it.\n x = MinioStoreStorage(\"abdelhalim\")\n image = request.FILES[\"image_to_upload\"]\n width, height, size, imageFormat, name = fetch_metadata(image)\n\n # The name might conflict with an already existing picture\n while x.exists(name):\n name = name + \"X\"\n\n # Using Pillow to fetch metadata\n width, height, size, image_format, name = fetch_metadata(image)\n\n serializer = ImageForMinioSerializer(data={\n \"name\": name,\n \"image\": image,\n \"height\": height,\n \"width\": width,\n \"size\": size, # pillow_image.size will return (width, height)\n \"path_to_image\": \"NEEDSTOBESET\", # See presigned URLs in Minio?\n \"image_format\": image_format\n }\n )\n\n if (\n serializer.is_valid(raise_exception=True) and\n is_image(image.content_type)\n ):\n # What if saving does not go well?\n # Wrap in a try/except statement\n x._save(name, image)\n serializer.save()\n return Response({\"response\": \"Picture saved\"})",
"def get_upload(arn=None):\n pass",
"def create(self, request, *args, **kwargs):\n if request.data.get(\"audition_range\"):\n if not (\n request.data[\"audition_range\"].get(\"lower\")\n and request.data[\"audition_range\"].get(\"upper\")\n ):\n raise ValidationError(\n \"Audition date range upper and lower both needed.\"\n )\n serializer = self.get_serializer(data=request.data)\n serializer.is_valid(raise_exception=True)\n if serializer.validated_data.get(\"audition_range\"):\n sub_dead = serializer.validated_data[\"audition_range\"].upper - timedelta(\n days=1\n )\n serializer.validated_data.update({\"submission_deadline\": sub_dead})\n job = serializer.save()\n\n image = request.FILES.get(\"image\")\n if image:\n image_data = {\"image\": image}\n verify_image(image)\n image_data.update({\"title\": image.name})\n image_type = request.data.get(\"image_type\", \"Generic\")\n image_data.update({\"image_type\": image_type})\n image_serializer = ImageSerializer(data=image_data)\n image_serializer.is_valid(raise_exception=True)\n image_serializer.validated_data.update(\n {\n \"content_object\": job,\n }\n )\n try:\n image = Image.objects.create(**image_serializer.validated_data)\n except Exception as e:\n raise e\n return Response(serializer.data)",
"def create(self, validated_data):\n\n image = models.Image(\n url = validated_data['url'],\n uploaded_by = self.context['request'].user\n )\n\n image.save()\n\n return image",
"def initiate_multipart_upload(self):\n request = self.s3.create_request(\"OBJECT_POST\", uri = self.uri, headers = self.headers_baseline, extra = \"?uploads\")\n response = self.s3.send_request(request)\n data = response[\"data\"]\n self.upload_id = getTextFromXml(data, \"UploadId\")\n return self.upload_id",
"def upload_to(instance, filename):\n return upload_image_path(filename, 'products')",
"def new_upload_image():\n log_request(request)\n\n if not valid_params(['username', 'session_id'], request.form) or\\\n not valid_params(['file'], request.files):\n logging.debug(\"Missing parameters\")\n return jsonify({'error' : 500})\n \n username = request.form['username']\n sId = request.form['session_id']\n fil = request.files['file']\n\n \n # check session before upload\n if not user.verify(username, sId):\n logging.debug(\"Invalid username or session id\")\n return jsonify({'error' : 101})\n\n if fil and allowed_file(fil.filename):\n # get the file extension\n ext = os.path.splitext(fil.filename)[1]\n # create a temporary file\n f = tempfile.NamedTemporaryFile(delete=False, dir=\"/var/www/resources/tmp/\", suffix=\"{0}\".format(ext))\n os.chmod(f.name, 0644)\n name = os.path.basename(f.name)\n f.write(fil.read())\n f.close()\n # get the dividing points for the page\n i = Image.open(f.name)\n divs = divLines(i)\n del i\n # return the dividing points and the name of the page in json form\n return jsonify(\n name = name,\n divs = divs,\n error = 0)\n else:\n logging.debug(\"Image processing failed, invalid filetype?\")\n return jsonify({'error' : 200})",
"def insert_job(sess, filetype, status, type_id, submission, job_id=None, filename=None,\n file_size=None, num_rows=None):\n job = Job(\n file_type_id=filetype,\n job_status_id=status,\n job_type_id=type_id,\n submission_id=submission,\n original_filename=filename,\n file_size=file_size,\n number_of_rows=num_rows\n )\n if job_id:\n job.job_id = job_id\n sess.add(job)\n sess.commit()\n return job",
"def create_image(self, image=None):\n if image is None:\n image = self.image\n\n current_image_id = self.check_for_updated_image(image)\n if current_image_id is not None:\n return current_image_id\n\n def wait_for_image_state(account, image_id, state, timeout=300):\n state = state.lower()\n current_state = 'unknown'\n\n while current_state != state:\n rc, image_info = self.cal.get_image(account, image_id)\n current_state = image_info.state.lower()\n\n if current_state in ['failed']:\n raise ValidationError('Image [{}] entered failed state while waiting for state [{}]'.format(image_id, state))\n\n if current_state != state:\n time.sleep(1)\n\n if current_state != state:\n logger.error('Image still in state [{}] after [{}] seconds'.format(current_state, timeout))\n raise TimeoutError('Image [{}] failed to reach state [{}] within timeout [{}]'.format(image_id, state, timeout))\n\n return image_info\n\n logger.debug(\"Uploading VM Image: %s\", image.name)\n rc, image_id = self.cal.create_image(self.account, image)\n assert rc == RwTypes.RwStatus.SUCCESS\n image_info = wait_for_image_state(self.account, image_id, 'active')\n\n return image_id",
"def upload(self, upload_id):\r\n return u.Upload(self, upload_id)"
] |
[
"0.7061483",
"0.64601487",
"0.59854835",
"0.5850496",
"0.58365744",
"0.5815387",
"0.57524824",
"0.56940156",
"0.5630146",
"0.5604861",
"0.5588327",
"0.5575735",
"0.5562993",
"0.5536786",
"0.55213976",
"0.551275",
"0.55043775",
"0.54979646",
"0.54731596",
"0.5472851",
"0.5471495",
"0.5465363",
"0.5461131",
"0.5459522",
"0.5455238",
"0.53970784",
"0.53845483",
"0.53841805",
"0.5384056",
"0.5366805"
] |
0.7056711
|
1
|
Wait until the upload job reaches a terminal state
|
def wait_until_complete(self):
self._log.debug("waiting for upload job %s to complete", self._job_id)
xpath = ManoProject.prefix_project("D,/rw-image-mgmt:upload-jobs/" +
"rw-image-mgmt:job[rw-image-mgmt:id={}]".
format(quoted_key(str(self._job_id))),
project=self._project,
log=self._log)
while True:
query_iter = yield from self._dts.query_read(xpath)
job_status_msg = None
for fut_resp in query_iter:
job_status_msg = (yield from fut_resp).result
break
if job_status_msg is None:
raise UploadJobError("did not get a status response for job_id: %s",
self._job_id)
if job_status_msg.status == "COMPLETED":
msg = "upload job %s completed successfully" % self._job_id
self._log.debug(msg)
return
elif job_status_msg.status == "FAILED":
msg = "upload job %s as not successful: %s" % (self._job_id, job_status_msg.status)
self._log.error(msg)
raise UploadJobFailed(msg)
elif job_status_msg.status == "CANCELLED":
msg = "upload job %s was cancelled" % self._job_id
self._log.error(msg)
raise UploadJobCancelled(msg)
yield from asyncio.sleep(.5, loop=self._loop)
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def wait_until_job_completes(self):\n while True:\n jobflow = self.conn.describe_jobflow(self.jobid)\n if self.verbose_mode:\n print jobflow.state\n if (jobflow.state == 'COMPLETED' or jobflow.state == 'TERMINATED'\n or jobflow.state == 'FAILED'):\n break\n sleep(10)",
"def waitUntilFinished():",
"def waitUntilFinished():",
"def waitUntilFinished():",
"def waitUntilFinished():",
"def upload(self):\n while not self._upload_queue.empty():\n logger.info('%d files left to upload', self._upload_queue.qsize())\n self._sia_condition_waiter.wait_for_available_upload_slot()\n job = self._upload_queue.get()\n if (not self._process_upload_job_async(job)) and (job.failure_count\n < 3):\n self._upload_queue.put(job)\n self._sia_condition_waiter.wait_for_all_uploads_to_complete()\n self._exit_event.set()",
"async def wait_until_done(self) -> None:\n ...",
"def wait_progress(self):\n pass",
"def wait_progress(self):\n pass",
"def _process_upload_job_async(self, job):\n logger.info('Uploading file to Sia: %s', job.local_path)\n try:\n return self._sia_client.upload_file_async(job.local_path,\n job.sia_path)\n except Exception as ex:\n logger.error('Upload failed: %s', ex.message)\n job.increment_failure_count()\n return False",
"def wait(self):\r\n self.jobs.join()",
"def check_completion(self):\n\n time.sleep(3)\n while self.status == 0:\n pass",
"def wait_on_job(self, delay=10):\n while self.isJobRunning() == True:\n time.sleep(delay)\n return self.ofile_exists()",
"def wait_till_jobs_complete(self, collection_name: str, job_id: str, job_name: str):\n status = self.job_status(collection_name, job_id, job_name)\n while (\n status[\"status\"] == \"Running\"\n or status[\"status\"] == \"Started\"\n or status[\"status\"] == \"NotStarted\"\n ):\n status = self.job_status(collection_name, job_id, job_name)\n time.sleep(15)\n print(status)\n return \"Done\"",
"def check_upload_complete(self, job_id):\n post_json = {\"upload_id\": job_id}\n return self.app.post_json(\"/v1/finalize_job/\", post_json, headers={\"x-session-id\": self.session_id})",
"def waitUntilSuccess():",
"def wait(self):\n self.Popen.wait()",
"def wait_until_finished(self):\n for processor in self._processors.values():\n while not processor.done:\n time.sleep(0.1)",
"async def job_wait(self, uid):\n self._require_running()\n await self._get_job(uid).wait()",
"def wait_finish(self):\r\n self.proc.join()",
"def wait(self):\n if not self.submitted:\n if _conf.get_option('jobs', 'auto_submit'):\n _logme.log('Auto-submitting as not submitted yet', 'debug')\n self.submit()\n _sleep(0.5)\n else:\n _logme.log('Cannot wait for result as job has not been ' +\n 'submitted', 'warn')\n return False\n _sleep(0.1)\n self.update()\n if self.done:\n return True\n _logme.log('Waiting for self {}'.format(self.name), 'debug')\n if self.queue.wait(self) is not True:\n return False\n # Block for up to file_block_time for output files to be copied back\n btme = _conf.get_option('jobs', 'file_block_time')\n # btme = 2\n start = _dt.now()\n lgd = False\n while True:\n if not lgd:\n _logme.log('Checking for output files', 'debug')\n lgd = True\n count = 0\n for i in self.outfiles:\n if _os.path.isfile(i):\n count += 1\n if count == len(self.outfiles):\n _logme.log('All output files found in {} seconds'\n .format(count), 'debug')\n break\n _sleep(0.1)\n if (_dt.now() - start).seconds > btme:\n _logme.log('Job completed but files have not appeared for ' +\n '>{} seconds'.format(btme))\n return False\n self.update()\n return True",
"def waitForCompletion(self):\n\n while(json.loads(self.robot.device())['state']!=0):\n time.sleep(0.1)\n continue\n\n return",
"def do_wait(self):\n pass",
"async def wait(self):\n if self._state in (JobState.PENDING, JobState.RUNNING):\n await self._process.wait()",
"def waitForThreadCompletion(self) -> None:\n self.writer.waitForThreadCompletion()",
"def wait(self, timeoout=None, state=\"C-completed\"):",
"def wait_jobs_completed():\n\n logging.info(\"checking jobs\")\n time.sleep(30)\n while True:\n running_jobs = []\n for job in bq.list_jobs(state_filter='RUNNING', all_users=True):\n running_jobs.append(job)\n logging.info(\"running jobs {}\".format(len(running_jobs)))\n if not running_jobs:\n break\n time.sleep(30)",
"def waitfor(self):\r\n finished = False\r\n while finished == False:\r\n time.sleep(5)\r\n finished = self.isFinished()",
"def wait():\n pass",
"async def _job(self):\n await asyncio.sleep(self._timeout)\n await self._callback()"
] |
[
"0.7011266",
"0.69578224",
"0.69578224",
"0.69578224",
"0.69578224",
"0.6860455",
"0.68326867",
"0.6693386",
"0.6693386",
"0.6650743",
"0.6615907",
"0.6586633",
"0.6496838",
"0.6451071",
"0.64483374",
"0.63990617",
"0.6358756",
"0.6353165",
"0.62730867",
"0.626102",
"0.62284875",
"0.62273955",
"0.61849535",
"0.6181539",
"0.6175829",
"0.6173353",
"0.61664176",
"0.6152021",
"0.6120874",
"0.610349"
] |
0.74622226
|
0
|
A simpler version of exit_based_on_results(); this function causes the script to exit normally with return value of zero if and only if all tests within the script passed and had no errors. Otherwise it returns the number of failures plus the number of errors
|
def simple_exit(results):
if results.wasSuccessful():
_exit(0)
else:
nfail = len(results.errors)+len(results.failures)
_exit(nfail)
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def exit_based_on_results(results):\n NotImpErrors = 0\n for error in results.errors:\n for errormsg in error:\n if type(errormsg) is str:\n if 'NotImplemented' in errormsg:\n NotImpErrors +=1\n break\n if results.wasSuccessful():\n _exit(0)\n elif len(results.failures)==0 and len(results.errors)==NotImpErrors:\n _exit(1)\n else:\n _exit(2)",
"def check_exit_code(results):\n assert results[\"metrics\"][\"Exit code\"] == 0",
"def exitTest(self, exitOutputLine):\n # determine which (if any) atom exited in this line\n curAtom = self.getAtomType(exitOutputLine)\n\n # run all test functions\n errNum = 1\n for test in self.exitTests:\n if not test(curAtom):\n return errNum\n errNum += 1\n\n # no errors, update exited counts, return success.\n self.departed[curAtom] += 1\n return 0",
"def test_check_exit_status(self):\n run_dir_success = 'data/nanopore_data/run4/done_demuxing/20200104_1412_MN19414_AAU644_68125dc2'\n success_run = MinIONqc(run_dir_success, None, None)\n self.assertTrue(success_run.check_exit_status('data/nanopore_data/run4/done_demuxing/20200104_1412_MN19414_AAU644_68125dc2/.exitcode_for_nanoseq'))\n run_dir_fail = 'data/nanopore_data/run8/demux_failed/20200108_1412_MN19414_AAU648_68125dc2'\n fail_run = MinIONqc(run_dir_fail, None, None)\n self.assertFalse(fail_run.check_exit_status('data/nanopore_data/run8/demux_failed/20200108_1412_MN19414_AAU648_68125dc2/.exitcode_for_nanoseq'))",
"def analyze_results(self, results):\n ok_c = 0\n ko_c = 0\n for row in results:\n if \"opentsdb.health\" not in row[2] and \".health\" in row[2]:\n if row[4] == \"ERROR\":\n ko_c += 1\n else:\n ok_c += 1\n return ok_c, ko_c",
"def check_result(self, result):\n self.log.info(\"--check_result, result= %s\", result)\n if result[0]['exit_status'] != 0:\n self.fail(\"##Error detected from check_result\")\n else:\n self.log.info(\"--check_result passed\")",
"def test_fixture_missing_results(tmp_sample_project):\n config_dir = tmp_sample_project\n output = subprocess.run([\"smif\", \"missing_results\", \"energy_central\", \"-d\", config_dir],\n stdout=subprocess.PIPE)\n\n out_str = str(output.stdout)\n assert(out_str.count('model run: energy_central') == 1)\n assert(out_str.count('sos model: energy') == 1)\n assert(out_str.count('sector model:') == 1)\n assert(out_str.count('output:') == 2)\n assert(out_str.count('output: cost') == 1)\n assert(out_str.count('output: water_demand') == 1)\n assert(out_str.count('no missing results') == 0)\n assert(out_str.count('results missing for:') == 2)\n\n # Run energy_central and re-check output with optional flag for completed results\n subprocess.run([\"smif\", \"run\", \"energy_central\", \"-d\", config_dir], stdout=subprocess.PIPE)\n output = subprocess.run([\"smif\", \"missing_results\", \"energy_central\", \"-d\", config_dir],\n stdout=subprocess.PIPE)\n\n out_str = str(output.stdout)\n assert(out_str.count('model run: energy_central') == 1)\n assert(out_str.count('sos model: energy') == 1)\n assert(out_str.count('sector model:') == 1)\n assert(out_str.count('output:') == 2)\n assert(out_str.count('output: cost') == 1)\n assert(out_str.count('output: water_demand') == 1)\n assert(out_str.count('no missing results') == 2)\n assert(out_str.count('results missing for:') == 0)",
"def done(exit = True, result = main_result):\n failures = result.failed\n result.finish()\n if exit:\n sys.exit(failures)",
"def _check_n_results(self, n_results, sub):\n \n if sub[1].upper() != \"S\":\n try:\n int(n_results)\n if int(n_results) == 0:\n raise ValueError\n except ValueError:\n raise ValueError",
"def run_tests():\n fail = []\n okay = []\n for i in os.listdir(\".\"):\n if i.find(\"_test_\") > -1 and i.endswith(\".py\"):\n if 0 != subprocess.call(\"python \" + i, shell=True):\n fail.append(i)\n else:\n okay.append(i)\n if fail:\n print(\"[ERROR] The following %u tests failed: %r\" % (len(fail), fail))\n return False\n print(\"[DONE] All %u tests completely successfully!\" % (len(okay)))\n return True",
"def _failed_tests(self, metric_source_id: str) -> int:\n return self.__test_count(metric_source_id, 'failed')",
"def determine_exit_code(self) -> int:",
"def _check_results(self):\n if not 'EXECUTION OF GAMESS TERMINATED NORMALLY' in self.file_dic['output']:\n print self.job_name + \" didn't finish\"\n raise TypeError('Calculation didn\\'t finish')",
"def test_failed():\n build()\n sh(\"%s %s --last-failed\" % (PYTHON, RUNNER_PY))",
"def test_use_exit_status(self): # suppress(no-self-use)\n subprocess.call.return_value = 1\n GreenTestCommand(Distribution()).run()\n sys.exit.assert_called_with(1)",
"def run_all_tests():\n successes = 0\n testsrun = 0\n testsdir = tests_dirpath()\n for test in os.listdir(testsdir):\n path = os.path.join(testsdir, test)\n if os.path.isdir(path):\n testsrun += 1\n if run_test(path):\n successes += 1\n print(\"--- %d/%d TESTS PASSED ---\" % (successes, testsrun))\n return successes == testsrun",
"def execute_tests():\n\n if len(sys.argv) > 1:\n # Filter test list based on command line requests\n tests_to_run = []\n for requested in sys.argv[1:]:\n for func, param in registered_tests:\n if param == requested:\n tests_to_run += [(func, param)]\n break\n else:\n print('Unknown test ' + requested)\n sys.exit(1)\n else:\n tests_to_run = registered_tests\n\n failing_tests = []\n for func, param in tests_to_run:\n print(param + (' ' * (OUTPUT_ALIGN - len(param))), end='')\n sys.stdout.flush()\n try:\n func(param)\n print(COLOR_GREEN + 'PASS' + COLOR_NONE)\n except KeyboardInterrupt:\n sys.exit(1)\n except TestException as exc:\n print(COLOR_RED + 'FAIL' + COLOR_NONE)\n failing_tests += [(param, exc.args[0])]\n except Exception as exc: # pylint: disable=W0703\n print(COLOR_RED + 'FAIL' + COLOR_NONE)\n failing_tests += [(param, 'Test threw exception:\\n' +\n traceback.format_exc())]\n\n if failing_tests:\n print('Failing tests:')\n for name, output in failing_tests:\n print(name)\n print(output)\n\n print(str(len(failing_tests)) + '/' +\n str(len(tests_to_run)) + ' tests failed')\n if failing_tests != []:\n sys.exit(1)",
"def parse_test_output(txt):\n\n err_m = re.search(r'^FAILED \\(errors=(\\d+)\\)', txt, re.MULTILINE)\n if err_m:\n nerr = int(err_m.group(1))\n nfail = 0\n return nerr, nfail\n\n fail_m = re.search(r'^FAILED \\(failures=(\\d+)\\)', txt, re.MULTILINE)\n if fail_m:\n nerr = 0\n nfail = int(fail_m.group(1))\n return nerr, nfail\n\n both_m = re.search(r'^FAILED \\(errors=(\\d+), failures=(\\d+)\\)', txt,\n re.MULTILINE)\n if both_m:\n nerr = int(both_m.group(1))\n nfail = int(both_m.group(2))\n return nerr, nfail\n\n # If the input didn't match any of these forms, assume no error/failures\n return 0, 0",
"def test_results_errors(self, affiliate_items):\n updater = mock.Mock(side_effect=ValueError())\n batch_job = BatchJob(affiliate_items, updater)\n\n error_count = 0\n for result in batch_job.run():\n error_count += int(result.is_error)\n\n assert error_count == 4",
"def CheckExpectedOutput(output, expected):\n failures = 0\n for ex in expected:\n match = re.search(ex, output)\n if not match:\n print('Test match failed:')\n print('Searching for regex:', ex)\n failures += 1\n if failures:\n print('output:\\n', output)\n return failures",
"def failing(repo_url=None, list_tests=False, subunit=False, stdout=sys.stdout):\n repo = util.get_repo_open(repo_url=repo_url)\n run = repo.get_failing()\n if subunit:\n return _show_subunit(run)\n case = run.get_test()\n failed = False\n result, summary = _make_result(repo, list_tests=list_tests)\n result.startTestRun()\n try:\n case.run(result)\n finally:\n result.stopTestRun()\n failed = not results.wasSuccessful(summary)\n if failed:\n result = 1\n else:\n result = 0\n if list_tests:\n failing_tests = [test for test, _ in summary.errors + summary.failures]\n output.output_tests(failing_tests, output=stdout)\n return result",
"def test_run_and_check_result(self):\n # Run a successful command.\n result = build_cmake_project.run_and_check_result('echo hello world')\n self.assertTrue(result)\n\n # Run a failure command.\n try:\n result = build_cmake_project.run_and_check_result('unexistent --command')\n except subprocess.CalledProcessError:\n self.fail('Exception thrown when running unexistent command.')\n self.assertFalse(result)",
"def process_ResultCheck(self):\n try:\n cmd = self.ExecutionTask.get_param().split(',')\n logging.debug(\"%s-%s-%s-%s-%s\" % ( TestScriptSymbolTable.get_value_from_sym_tab(cmd[0], TestScriptSymbolTable.test_script_sym_tab),cmd[0], cmd[1], cmd[2], cmd[3]))\n\n checkval = cmd[0].split('!') \n \n cval = TestScriptSymbolTable.get_value_from_sym_tab(checkval[1], TestScriptSymbolTable.capi_cmd_ret_sym_tab)\n\n if int(cval) >= int(cmd[1]):\n result = cmd[2]\n else:\n result = cmd[3]\n\n logging.info(\"\\nRESULT CHECK---> %15s\", result) \n self.setTestResult(result)\n \n #if result == 'FAIL':\n if 'FAIL' in result:\n self.setStatus('STOP')\n self.setTestResult('FAIL')\n except OSError:\n logging.info(\"\\nException - ResultCheck\")",
"def execute_testsets(testsets):\n group_results = dict() #results, by group\n group_failure_counts = dict()\n total_failures = 0\n myinteractive = False\n\n for testset in testsets:\n mytests = testset.tests\n myconfig = testset.config\n mybenchmarks = testset.benchmarks\n\n #Make sure we actually have tests to execute\n if not mytests and not mybenchmarks:\n # no tests in this test set, probably just imports.. skip to next test set\n break\n\n myinteractive = True if myinteractive or myconfig.interactive else False\n\n #Run tests, collecting statistics as needed\n for test in mytests:\n #Initialize the dictionaries to store test fail counts and results\n if test.group not in group_results:\n group_results[test.group] = list()\n group_failure_counts[test.group] = 0\n\n result = run_test(test, test_config = myconfig)\n result.body = None # Remove the body, save some memory!\n\n if not result.passed: #Print failure, increase failure counts for that test group\n logging.error('Test Failed: '+test.name+\" URL=\"+test.url+\" Group=\"+test.group+\" HTTP Status Code: \"+str(result.response_code))\n\n if test.validators is not None:\n for validator in test.validators:\n if validator.passed == False:\n logging.warning(\" Validation Failed: \" + str(validator))\n\n #Increment test failure counts for that group (adding an entry if not present)\n failures = group_failure_counts[test.group]\n failures = failures + 1\n group_failure_counts[test.group] = failures\n\n else: #Test passed, print results\n logging.info('Test Succeeded: '+test.name+\" URL=\"+test.url+\" Group=\"+test.group)\n\n #Add results for this test group to the resultset\n group_results[test.group].append(result)\n\n # handle stop_on_failure flag\n if not result.passed and test.stop_on_failure is not None and test.stop_on_failure:\n print 'STOP ON FAILURE! stopping test set execution, continuing with other test sets'\n break\n\n for benchmark in mybenchmarks: # Run benchmarks, analyze, write\n if not benchmark.metrics:\n logging.debug('Skipping benchmark, no metrics to collect')\n continue\n\n logging.info(\"Benchmark Starting: \"+benchmark.name+\" Group: \"+benchmark.group)\n curl = configure_curl(benchmark, myconfig)\n benchmark_result = run_benchmark(curl, benchmark, myconfig)\n print benchmark_result\n logging.info(\"Benchmark Done: \"+benchmark.name+\" Group: \"+benchmark.group)\n\n if benchmark.output_file: # Write file\n write_method = OUTPUT_METHODS[benchmark.output_format]\n my_file = open(benchmark.output_file, 'w') # Overwrites file\n logging.debug(\"Benchmark writing to file: \" + benchmark.output_file)\n write_method(my_file, benchmark_result, benchmark, test_config = myconfig)\n my_file.close()\n\n if myinteractive:\n # a break for when interactive bits are complete, before summary data\n print \"===================================\"\n\n #Print summary results\n for group in sorted(group_results.keys()):\n test_count = len(group_results[group])\n failures = group_failure_counts[group]\n total_failures = total_failures + failures\n if (failures > 0):\n print u'Test Group '+group+u' FAILED: '+ str((test_count-failures))+'/'+str(test_count) + u' Tests Passed!'\n else:\n print u'Test Group '+group+u' SUCCEEDED: '+ str((test_count-failures))+'/'+str(test_count) + u' Tests Passed!'\n\n return total_failures",
"def excecute(self):\r\n self.initialize()\r\n self.addteststeps()\r\n for teststep in self.test_steps_list:\r\n if teststep.run() == TestStatus.PASS:\r\n logging.info(\"test {} passed the test\".format(teststep.stepname))\r\n self.result = TestStatus.PASS\r\n else:\r\n logging.warn(\"test {} failed the test\".format(teststep.stepname))\r\n self.result = TestStatus.FAIL\r\n self.cleanup()\r\n return self.result",
"def test_fixture_available_results(tmp_sample_project):\n config_dir = tmp_sample_project\n output = subprocess.run([\"smif\", \"available_results\", \"energy_central\", \"-d\", config_dir],\n stdout=subprocess.PIPE)\n\n out_str = str(output.stdout)\n assert(out_str.count('model run: energy_central') == 1)\n assert(out_str.count('sos model: energy') == 1)\n assert(out_str.count('sector model:') == 1)\n assert(out_str.count('output:') == 2)\n assert(out_str.count('output: cost') == 1)\n assert(out_str.count('output: water_demand') == 1)\n assert(out_str.count('no results') == 2)\n assert(out_str.count('decision') == 0)\n\n # Run energy_central and re-check output with optional flag for completed results\n subprocess.run([\"smif\", \"run\", \"energy_central\", \"-d\", config_dir], stdout=subprocess.PIPE)\n output = subprocess.run([\"smif\", \"available_results\", \"energy_central\", \"-d\", config_dir],\n stdout=subprocess.PIPE)\n\n out_str = str(output.stdout)\n assert(out_str.count('model run: energy_central') == 1)\n assert(out_str.count('sos model: energy') == 1)\n assert(out_str.count('sector model:') == 1)\n assert(out_str.count('output:') == 2)\n assert(out_str.count('output: cost') == 1)\n assert(out_str.count('output: water_demand') == 1)\n assert(out_str.count('no results') == 0)\n assert(out_str.count('decision') == 8)\n assert(out_str.count('decision 1') == 2)\n assert(out_str.count('decision 2') == 2)\n assert(out_str.count('decision 3') == 2)\n assert(out_str.count('decision 4') == 2)\n assert(out_str.count(': 2010') == 4)\n assert(out_str.count(': 2015') == 2)\n assert(out_str.count(': 2020') == 2)",
"def test_exit_does_raise(result_count, catastrophic, exit_zero, value,\n application):\n application.result_count = result_count\n application.catastrophic_failure = catastrophic\n application.options = options(exit_zero=exit_zero)\n\n with pytest.raises(SystemExit) as excinfo:\n application.exit()\n\n assert excinfo.value.args[0] is value",
"def check_expectations(self):\n self.load_results()\n\n for (benchmark, producer), result in self.results.items():\n if not result.reports:\n print('No results found for ' + benchmark + ' ' + producer)\n result.test_passed = False\n else:\n for report in result.reports:\n if check_benchmark_result(report, result.expectation):\n print('Test passed: ' + result.directory)\n result.test_passed = True\n else:\n print('Test failed: ' + result.directory)\n result.test_passed = False",
"def main() -> int:\n parser = argparse.ArgumentParser()\n parser.add_argument('--dir-metadata',\n type=pathlib.Path, required=True)\n\n args = parser.parse_args()\n\n with LockedMetadata(args.dir_metadata, __file__) as md:\n summary_dict = {}\n passing_tests = []\n failing_tests = []\n for f in md.tests_pickle_files:\n try:\n trr = TestRunResult.construct_from_pickle(f)\n summary_dict[f\"{trr.testname}.{trr.seed}\"] = \\\n ('PASS' if trr.passed else\n 'FAILED' + (\" {T}\" if (trr.failure_mode == Failure_Modes.TIMEOUT) else \"\"))\n if trr.passed:\n passing_tests.append(trr)\n else:\n failing_tests.append(trr)\n except RuntimeError as e:\n failing_tests.append(\n TestRunResult(\n name='broken_test',\n failure_message=str(e)\n ))\n\n md.regr_log = md.dir_run/'regr.log'\n md.regr_log_junit = md.dir_run/'regr_junit.xml'\n md.regr_log_junit_merged = md.dir_run/'regr_junit_merged.xml'\n\n # Write results as junit_xml\n with open(md.regr_log_junit,\n 'w',\n encoding='UTF-8') as junit_xml,\\\n open(md.regr_log_junit_merged,\n 'w',\n encoding='UTF-8') as junit_merged_xml:\n output_run_results_junit_xml(passing_tests, failing_tests,\n junit_xml,\n junit_merged_xml)\n\n with open(md.regr_log, 'w', encoding='UTF-8') as outfile:\n # Write results as regr.log (custom logfile format)\n output_results_text(passing_tests, failing_tests, summary_dict,\n outfile)\n\n test_summary_dict = create_test_summary_dict(passing_tests +\n failing_tests)\n\n cov_summary_dict = {}\n if md.simulator == \"xlm\":\n cov_summary_dict = create_cov_summary_dict(md)\n else:\n print(\"Warning: Not generating coverage summary, unsupported \" \\\n f\"simulator {md.simulator}\")\n\n html_report_filename = md.dir_run/'report.html'\n with open(html_report_filename, 'w') as outfile:\n output_results_html(md, passing_tests + failing_tests,\n test_summary_dict, cov_summary_dict, outfile)\n\n json_report_filename = md.dir_run/'report.json'\n with open(json_report_filename, 'w') as json_report_file:\n output_results_dvsim_json(md, test_summary_dict, cov_summary_dict,\n json_report_file)\n\n svg_summary_filename = md.dir_run/'summary.svg'\n with open(svg_summary_filename, 'w') as svg_summary_file:\n output_results_svg(test_summary_dict, cov_summary_dict,\n svg_summary_file)\n\n # Print a summary line to the terminal\n print(gen_summary_line(passing_tests, failing_tests))\n\n # Succeed if no tests failed\n return 1 if failing_tests else 0",
"def check_training_result_files(folder, ruleset, quiet, werror):\n\n too_many_errors = False\n result_folder = os.path.join(folder, 'results')\n for system_folder in _get_sub_folders(result_folder):\n for benchmark_folder in _get_sub_folders(system_folder):\n folder_parts = benchmark_folder.split('/')\n benchmark = folder_parts[-1]\n system = folder_parts[-2]\n\n # If it is not a recognized benchmark, skip further checks.\n if benchmark not in _ALLOWED_BENCHMARKS:\n print('Skipping benchmark: {}'.format(benchmark))\n continue\n\n # Find all result files for this benchmark.\n pattern = '{folder}/result_*.txt'.format(folder=benchmark_folder)\n result_files = glob.glob(pattern, recursive=True)\n\n # No result files were found. That is okay, because the organization\n # may not have submitted any results for this benchmark.\n if not result_files:\n print('No Result Files!')\n continue\n\n _print_divider_bar()\n print('System {}'.format(system))\n print('Benchmark {}'.format(benchmark))\n\n # If the organization did submit results for this benchmark, the number\n # of result files must be an exact number.\n if len(result_files) != _EXPECTED_RESULT_FILE_COUNTS[benchmark]:\n print('Expected {} runs, but detected {} runs.'.format(\n _EXPECTED_RESULT_FILE_COUNTS[benchmark],\n len(result_files)))\n\n errors_found = 0\n result_files.sort()\n for result_file in result_files:\n result_basename = os.path.basename(result_file)\n result_name, _ = os.path.splitext(result_basename)\n run = result_name.split('_')[-1]\n\n # For each result file, run the benchmark's compliance checks.\n _print_divider_bar()\n print('Run {}'.format(run))\n config_file = '{ruleset}/common.yaml'.format(\n ruleset=ruleset,\n benchmark=benchmark)\n checker = mlp_compliance.make_checker(\n ruleset=ruleset,\n quiet=quiet,\n werror=werror)\n valid, _, _, _ = mlp_compliance.main(result_file, config_file, checker)\n if not valid:\n errors_found += 1\n if errors_found == 1:\n print('WARNING: One file does not comply.')\n print('WARNING: Allowing this failure under olympic scoring rules.')\n if errors_found > 1:\n too_many_errors = True\n\n _print_divider_bar()\n if too_many_errors:\n raise Exception('Found too many errors in logging, see log above for details.')"
] |
[
"0.76364",
"0.7418066",
"0.6828983",
"0.66794246",
"0.65226054",
"0.64622444",
"0.6386139",
"0.63859236",
"0.63789564",
"0.63374364",
"0.6336109",
"0.6326882",
"0.6310747",
"0.630126",
"0.62832946",
"0.6256901",
"0.61972606",
"0.6194782",
"0.6180795",
"0.6180723",
"0.61695355",
"0.61435515",
"0.6120895",
"0.60970706",
"0.6086983",
"0.60618275",
"0.6060199",
"0.60445297",
"0.60345346",
"0.6024366"
] |
0.7812444
|
0
|
A probablyobsolete function to exit from a unit testscript with a status that depends on whether or not the only errors or failures were NotImplemented errors. Specifically,
|
def exit_based_on_results(results):
NotImpErrors = 0
for error in results.errors:
for errormsg in error:
if type(errormsg) is str:
if 'NotImplemented' in errormsg:
NotImpErrors +=1
break
if results.wasSuccessful():
_exit(0)
elif len(results.failures)==0 and len(results.errors)==NotImpErrors:
_exit(1)
else:
_exit(2)
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def check_exit_code(results):\n assert results[\"metrics\"][\"Exit code\"] == 0",
"def simple_exit(results):\n if results.wasSuccessful():\n _exit(0)\n else:\n nfail = len(results.errors)+len(results.failures)\n _exit(nfail)",
"def test_xfailed_but_passed():\n pass",
"def test_use_exit_status(self): # suppress(no-self-use)\n subprocess.call.return_value = 1\n GreenTestCommand(Distribution()).run()\n sys.exit.assert_called_with(1)",
"def test_check_if_not_error(self):\n actual_result = ExecutionExitCodeController(OK_RETURN_CODE,\n OK_MESSAGE)\\\n .check_if_error()\n self.assertIsNone(actual_result)",
"def test_check_exit_status(self):\n run_dir_success = 'data/nanopore_data/run4/done_demuxing/20200104_1412_MN19414_AAU644_68125dc2'\n success_run = MinIONqc(run_dir_success, None, None)\n self.assertTrue(success_run.check_exit_status('data/nanopore_data/run4/done_demuxing/20200104_1412_MN19414_AAU644_68125dc2/.exitcode_for_nanoseq'))\n run_dir_fail = 'data/nanopore_data/run8/demux_failed/20200108_1412_MN19414_AAU648_68125dc2'\n fail_run = MinIONqc(run_dir_fail, None, None)\n self.assertFalse(fail_run.check_exit_status('data/nanopore_data/run8/demux_failed/20200108_1412_MN19414_AAU648_68125dc2/.exitcode_for_nanoseq'))",
"def test_deploy_exit_code(deploy_result: Result) -> None:\n assert deploy_result.exit_code != 0",
"def fail_with(s):\n print \"[FAILURE] %s\" % s\n sys.exit(1)",
"def exit(status=None): # real signature unknown; restored from __doc__\n pass",
"def _check_for_errors(self, status):\r\n\r\n # Case when test suite name is misspelled or file doesn't exist\r\n if status == 252:\r\n sys.stderr.write('Test suite \"{}\" was not found in path {}\\n'.format(self.name, self.path))\r\n print 'Return code is {}'.format(status)",
"def test_case_01(self):\n if True:\n self.fail()",
"def test_check_if_not_error(self):\n actual_result = SshpassErrorExitCodeController(OK_RETURN_CODE,\n OK_MESSAGE)\\\n .check_if_error()\n self.assertIsNone(actual_result)",
"def test_failed():\n assert False",
"def test_check_if_not_error(self):\n actual_result = BaseErrorExitCodeController(OK_RETURN_CODE,\n OK_MESSAGE)\\\n .check_if_error()\n self.assertIsNone(actual_result)",
"def test_deploy_exit_code(deploy_result: Result) -> None:\n assert deploy_result.exit_code == 0",
"def execute_failure(self, *args, **kwargs):\n return 1, \"\", None",
"def test_check_if_error(self):\n with self.assertRaises(MyError):\n ExecutionExitCodeController(ERROR_RETURN_CODE, ERROR_MESSAGE)\\\n .check_if_error()",
"def determine_exit_code(self) -> int:",
"def _handle_failure(self, proc, test_case):\n if proc.returncode != 0:\n print('ERROR: Test execution failed: {}'.format(test_case.get_name()))\n stdout, stderr = proc.communicate()\n raise TestCaseFailure('Test case {} failed. stdout: {}, stderr: {}, '\n 'return code: {}.'.format(test_case.get_name(),\n stdout, stderr,\n proc.returncode))",
"def test_fails(self):\n raise FoolishError(\"I am a broken test\")",
"def test_xfail_with_run_false_and_with_reason():\n pass",
"def test_deploy_no_change_exit_code(deploy_no_change_result: Result) -> None:\n assert deploy_no_change_result.exit_code == 0",
"def expected_failure(self) -> int:\n return 139",
"def test_check_if_not_error(self):\n actual_result = SshErrorExitCodeController(OK_RETURN_CODE,\n OK_MESSAGE)\\\n .check_if_error()\n self.assertIsNone(actual_result)",
"def test_check_if_not_error(self):\n actual_result = SshErrorExitCodeController(ERROR_RETURN_CODE,\n OK_MESSAGE)\\\n .check_if_error()\n self.assertIsNone(actual_result)",
"def test_failed():\n build()\n sh(\"%s %s --last-failed\" % (PYTHON, RUNNER_PY))",
"def indicate_failure(self):\n pass",
"def end_script(status):\n if status is not 0:\n print(\"Failure occurred: \" + str(status))\n sys.exit(status)",
"def assertFailStatus(self, code, options, arguments):\n exc = self.assertRaises(SystemExit, options.parseOptions, arguments)\n self.assertEqual(exc.args, (code,))",
"def exitTest(self, exitOutputLine):\n # determine which (if any) atom exited in this line\n curAtom = self.getAtomType(exitOutputLine)\n\n # run all test functions\n errNum = 1\n for test in self.exitTests:\n if not test(curAtom):\n return errNum\n errNum += 1\n\n # no errors, update exited counts, return success.\n self.departed[curAtom] += 1\n return 0"
] |
[
"0.71618015",
"0.6987518",
"0.68979514",
"0.68465614",
"0.6811039",
"0.6778233",
"0.66374123",
"0.6611588",
"0.6609739",
"0.65894103",
"0.6573683",
"0.6560653",
"0.65252155",
"0.6486731",
"0.6451126",
"0.6423018",
"0.6410285",
"0.63805205",
"0.6364145",
"0.63522345",
"0.6344553",
"0.63387793",
"0.63225317",
"0.6318501",
"0.6301107",
"0.62628514",
"0.62598133",
"0.6252438",
"0.62505287",
"0.6205"
] |
0.74607766
|
0
|
helper function to respond json with the given status code and the given data
|
def respond(code, data):
return {
'statusCode': code,
'headers': {
'Content-Type': 'application/json'
},
'body': json.dumps(data)
}
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def make_json_response(data, status=True, code=200):\n\n to_serialize = {}\n if status:\n to_serialize['status'] = True\n if data is not None:\n to_serialize['result'] = data\n else:\n to_serialize['status'] = False\n to_serialize['error'] = data\n response = app.response_class(\n response=json.dumps(to_serialize),\n status=code,\n mimetype='application/json'\n )\n return response",
"def response_json(data, status=200):\n\n response = jsonify(**data)\n response.status_code = status\n\n return response",
"def create_response(data={}, status=200, message=''):\n response = {\n 'success': 200 <= status < 300,\n 'code': status,\n 'message': message,\n 'result': data\n }\n return jsonify(response), status",
"def response_handler(r_data, status_code):\n import json\n\n r_json = json.dumps(r_data)\n\n r = Response(\n response=r_json,\n status=status_code,\n content_type='application/json'\n )\n\n return r",
"def response(status, message, code):\n return make_response(jsonify({\n 'status': status,\n 'message': message\n })), code",
"def create_response(data={}, status=200, message=''):\n if type(data) is not dict:\n raise TypeError('Data should be a dictionary 😞')\n\n response = {\n 'success': 200 <= status < 300,\n 'code': status,\n 'message': message,\n 'result': data\n }\n return jsonify(response), status",
"def json_response(data, status = 200, **kwargs):\n\t# Conversion function is needed for datetime objects :/\n\tdef jsonconvert(obj):\n\t\tif isinstance(obj, datetime) or isinstance(obj, date): return obj.isoformat()\n\t\treturn str(obj)\n\tresp = Response(json.dumps(data, default=jsonconvert), status=status, mimetype='application/json')\n\tfor k, v in kwargs.get('headers', {}).items(): resp.headers[k] = v\n\treturn resp",
"def return_response(data, status_code, dumps=True):\n data = json.dumps(data) if dumps else data\n return Response(data, status_code, content_type='application/json')",
"def send_response(data: dict = None, error: str = None, status_code: int = 200):\n if data is None:\n data = {}\n\n response = {\"data\": data, \"error\": error}\n\n return jsonify(response), status_code",
"def JsonResponse(data, status=http.OK, **kwargs):\n if isinstance(data, (basestring, int, float, bool)):\n data = {\"ok\": data}\n if data is None or not isinstance(data, (dict, list, tuple)):\n return data\n kwargs.setdefault(\"content_type\", \"application/json\")\n response = HttpResponse(serialize(data),\n status=status, **kwargs)\n response.csrf_exempt = True\n return response",
"def create_error_response(data: Dict[str, str], status_code: int) -> Response:\n resp = jsonify(data)\n resp.status_code = status_code\n return resp",
"def json_response(self, out, code=200):\n self.response.set_status(code)\n self.response.headers[CONTENT_TYPE] = CONTENT_TYPE_JSON\n self.response.out.write(json.dumps(out))",
"def _writeJSONResponse(result, request, code=CODE.SUCCESS, status=http.OK):\n response = {\n u'code': code.value,\n u'result': result}\n request.setHeader('content-type', 'application/json')\n request.setResponseCode(status)\n request.write(json.dumps(response))\n request.finish()",
"def output_json(data, code, headers=None):\n response = make_response(json.dumps(data), code)\n response.headers.extend(headers or {})\n return response",
"def get_response_json(status, msg):\n\tresponse = dict()\n\tresponse['status'] = status\n\tresponse['result'] = msg\n\treturn jsonify(**response)",
"def return_json_error(msg, status_code):\n return Response(response=json.dumps({'message': str(msg)}), status=status_code, mimetype=\"application/json\")",
"def json_response(resp: dict, http_code: int) -> response.HTTPResponse:\n # escape_forward_slashes: https://github.com/huge-success/sanic/issues/1019\n return response.json(resp, status=http_code,\n escape_forward_slashes=False)",
"def response(content=None, error_code='0', message=''):\n if error_code == '0':\n data = {\n 'success': True,\n 'errorCode': error_code,\n 'data': content\n }\n else:\n data = {\n 'success': False,\n 'errorCode': error_code,\n 'errorMsg': message,\n }\n resp = jsonify(data)\n\n return resp",
"def json_response(content=\"\", status=\"OK\", status_code=200, error=\"\"):\n wrapper = {\n 'status': status,\n 'status_code': status_code,\n 'output': content,\n 'error': error\n }\n response = json.dumps(wrapper, cls=DjangoJSONEncoder, indent=4)\n return HttpResponse(response, content_type='application/json', status=status_code)",
"def _json(self, response, status_code):\n if isinstance(status_code, numbers.Integral):\n status_code = (status_code,)\n\n if response.status_code in status_code:\n return response.json()\n else:\n print(\"Response has status \"\n \"code {} not {}\".format(response.status_code,\n status_code))\n return None",
"def _json(response, status_code):\n if isinstance(status_code, numbers.Integral):\n status_code = (status_code,)\n\n if response.status_code in status_code:\n return response.json()\n else:\n raise RuntimeError(\"Response has status \"\n \"code {} not {}\".format(response.status_code,\n status_code))",
"def _http_response(response, http_status_code):\n return make_response(jsonify(response), http_status_code)",
"def error_response(status_code, message=None):\n payload = {'error': str(status_code)+\" : \"+HTTP_STATUS_CODES.get(status_code, \"Unknown Error\")}\n if message:\n payload['message'] = message\n response = jsonify(payload)\n response.status_code = status_code\n return response",
"def create_response(\n data: dict = None, status: int = 200, message: str = \"\"\n) -> Tuple[Response, int]:\n if type(data) is not dict and data is not None:\n raise TypeError(\"Data should be a dictionary 😞\")\n\n response = {\n \"code\": status,\n \"success\": 200 <= status < 300,\n \"message\": message,\n \"result\": data,\n }\n return jsonify(response), status",
"def status(code=200):\n\treturn jsonify(server.status_data()), code",
"def json( self, data ):\n self.res.status = 202\n self.res.content_type = 'application/json'\n self.res.charset = None\n self.res.content_length = None\n self.start_response(self.res.status, self.res.headerlist)\n return str(data)",
"def custom_response(status, details):\n return app.response_class(status=status,\n mimetype='application/json',\n response=json.dumps({\"status\": status,\n \"details\": details}))",
"def json_status():\n return jsonify({\"status\": \"OK\"})",
"def write_response(self, status_code, json_body):\n self.send_response(status_code)\n self.send_header('Content-type', 'application/json')\n self.end_headers()\n self.write_json(json_body)",
"def render_to_json_response(self, data: Optional[Dict] = {}, meta: Optional[Dict] = {},\n error: Optional[str] = '', status=HTTPStatus.OK, **response_kwargs):\n response_data = {\"body\": data, \"meta\": meta, \"error\": error}\n return JsonResponse(response_data, status=status, **response_kwargs)"
] |
[
"0.82891816",
"0.8202379",
"0.7970874",
"0.7904336",
"0.789215",
"0.77233076",
"0.7712879",
"0.76846516",
"0.76841486",
"0.75793076",
"0.73921037",
"0.7388529",
"0.7360723",
"0.7353334",
"0.731192",
"0.7300626",
"0.7272777",
"0.7260774",
"0.72089255",
"0.72054183",
"0.720473",
"0.7137243",
"0.71232784",
"0.71062",
"0.70871127",
"0.7080482",
"0.70749235",
"0.70631295",
"0.70556855",
"0.70402807"
] |
0.83790994
|
0
|
Loads a series of known mnemonics and their generated addresses on account 0, index 0.
|
def mnemonics(tdd):
return load_data(tdd, "mnemonics.json")
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def load_lemma_pos_offset_map():\n lemma_pos_offset_map = defaultdict(dict)\n ##pos_lemma_offset_map = defaultdict(dict)\n for suffix in _FILEMAP.values():\n # parse each line of the file (ignoring comment lines)\n with open(wordnet_dir+'index.%s' % suffix) as fin:\n for i, line in enumerate(fin):\n if line.startswith(' '):\n continue\n _iter = iter(line.split())\n def _next_token():\n return next(_iter)\n try:\n # get the lemma and part-of-speech\n lemma = _next_token()\n pos = _next_token()\n # get the number of synsets for this lemma\n n_synsets = int(_next_token())\n assert n_synsets > 0\n # get and ignore the pointer symbols for all synsets of\n # this lemma\n n_pointers = int(_next_token())\n [_next_token() for _ in range(n_pointers)]\n # same as number of synsets\n n_senses = int(_next_token())\n assert n_synsets == n_senses\n # get and ignore number of senses ranked according to\n # frequency\n _next_token()\n # get synset offsets\n synset_offsets = [int(_next_token()) for _ in range(n_synsets)]\n\n # raise more informative error with file name and line number\n except (AssertionError, ValueError) as e:\n tup = ('index.%s' % suffix), (i + 1), e\n raise WordNetError('file %s, line %i: %s' % tup)\n\n # map lemmas and parts of speech to synsets\n lemma_pos_offset_map[lemma][pos] = synset_offsets\n ##pos_lemma_offset_map[pos][lemma] = synset_offsets\n if pos == ADJ:\n lemma_pos_offset_map[lemma][ADJ_SAT] = synset_offsets\n ##pos_lemma_offset_map[ADJ_SAT][lemma] = synset_offsets\n return lemma_pos_offset_map##, pos_lemma_offset_map",
"def load_addresses():\n with open('addresses.txt') as f:\n return [address.strip() for address in f.readlines()]",
"def assign_static_addresses(self, path = 'ebug_tab.json'):\n self.cameras = dict()\n self.ebugs = dict()\n self.unknowns = set()\n\n device_table = self.load_device_info_from_file(path)\n\n ebugs_psoc_id_list = dict((value[u'psoc_id'], key) for key, value in device_table.items())\n\n self.forget_unicast_address() #everyone should forget their current addresses\n for j in xrange(3): #repeat a few times in case of collisions\n for i in xrange(7):\n neighbours = self.neighbour_discovery(i,True) #find all neighbours that haven't been assigned an address\n for x in neighbours:\n if x in ebugs_psoc_id_list:\n for t in xrange(10):\n try:\n address = ebugs_psoc_id_list[x]\n self.set_unicast_address(x, address)\n self.set_TX_address(address)\n self.send_packet('\\x00')\n if(device_table[address]['type'] == 1):\n self.cameras[address] = device_table[address]\n else:\n self.ebugs[address] = device_table[address]\n break\n except:\n pass\n else:\n self.unknowns.add(x)\n\n self.display_devices()\n return self.cameras, self.ebugs, self.unknowns",
"def load_initial_nl_mapping(matrix: list):\n print(\"Start loading...\")\n\n # delete existed objects\n # models.NominalLabelMapping.objects.all().delete()\n\n for row in matrix[1:]:\n book = row[0].strip()\n plant_code = row[1].strip()\n model = row[2].strip()\n value = row[3]\n\n match_object = models.NominalLabelMapping.objects.filter(\n model=model,\n value=value).first()\n if not match_object:\n match_object = models.NominalLabelMapping(\n model=model,\n value=value\n )\n setattr(match_object, 'book', book)\n setattr(match_object, 'plant_code', plant_code) \n # save models\n match_object.save()",
"def _load20news_miao():\n DIR = os.path.dirname(os.path.realpath(__file__)).split('vae_sparse')[0]+'vae_sparse/optvaedatasets'\n DIR += '/20news_miao'\n h5file = DIR+'/miao.h5'\n if not os.path.exists(h5file):\n flen = len(open(DIR+'/vocab').readlines())\n print 'DIM: ',flen\n np.random.seed(1)\n TRAIN_VALID_MAT = readSparseFile(DIR+'/train.feat', flen, zeroIndexed=False)\n idx = np.random.permutation(TRAIN_VALID_MAT.shape[0])\n VALIDMAT = TRAIN_VALID_MAT[idx[:500]]\n TRAINMAT = TRAIN_VALID_MAT[idx[500:]]\n TESTMAT = readSparseFile(DIR+'/test.feat', flen, zeroIndexed=False) \n saveSparseHDF5(TRAINMAT,'train', h5file)\n saveSparseHDF5(VALIDMAT,'valid', h5file)\n saveSparseHDF5(TESTMAT, 'test' , h5file)\n dset = {}\n dset['vocabulary']= [k.strip().split(' ')[0] for k in open(DIR+'/vocab').readlines()]\n dset['train'] = loadSparseHDF5('train',h5file)\n dset['valid'] = loadSparseHDF5('valid',h5file)\n dset['test'] = loadSparseHDF5('test',h5file)\n dset['dim_observations'] = dset['train'].shape[1]\n dset['data_type'] = 'bow'\n return dset",
"def test_ipam_ip_addresses_read(self):\n pass",
"def load_accounts():\n logger.info('Loading accounts...')\n with open(\"C:\\\\Users\\\\harun\\\\Desktop\\\\BigSwede-volume-bot\\\\keys.txt\") as f:\n for key in f:\n key = key.strip()\n address = web3.eth.account.from_key(key).address\n accounts.append((address, key))",
"def get_wb_addresses(filename):\r\n if filename is not None:\r\n fptr = open(filename, 'r')\r\n firstline = fptr.readline().strip().rstrip('\\n')\r\n if firstline != '#!/bin/kcpfpg':\r\n fptr.close()\r\n raise RuntimeError('%s does not look like an fpg file we can '\r\n 'parse.' % filename)\r\n else:\r\n raise IOError('No such file %s' % filename)\r\n memorydict = {}\r\n metalist = []\r\n while True:\r\n line = fptr.readline().strip().rstrip('\\n')\r\n if line.lstrip().rstrip() == '?quit':\r\n break\r\n elif line.startswith('?meta'):\r\n # some versions of mlib_devel may mistakenly have put spaces\r\n # as delimiters where tabs should have been used. Rectify that\r\n # here.\r\n if line.startswith('?meta '):\r\n line = line.replace(' ', '\\t')\r\n # and carry on as usual.\r\n line = line.replace('\\_', ' ').replace('?meta', '')\r\n line = line.replace('\\n', '').lstrip().rstrip()\r\n #line_split = line.split('\\t')\r\n # Rather split on any space\r\n line_split = line.split()\r\n name = line_split[0]\r\n tag = line_split[1]\r\n param = line_split[2]\r\n if len(line_split[3:]) == 1:\r\n value = line_split[3:][0]\r\n else:\r\n value = ' '.join(line_split[3:])\r\n # name, tag, param, value = line.split('\\t')\r\n name = name.replace('/', '_')\r\n metalist.append((name, tag, param, value))\r\n elif line.startswith('?register'):\r\n if line.startswith('?register '):\r\n register = line.replace('\\_', ' ').replace('?register ', '')\r\n register = register.replace('\\n', '').lstrip().rstrip()\r\n name, address, size_bytes = register.split(' ')\r\n elif line.startswith('?register\\t'):\r\n register = line.replace('\\_', ' ').replace('?register\\t', '')\r\n register = register.replace('\\n', '').lstrip().rstrip()\r\n name, address, size_bytes = register.split('\\t')\r\n else:\r\n raise ValueError('Cannot find ?register entries in '\r\n 'correct format.')\r\n address = int(address, 16)\r\n size_bytes = int(size_bytes, 16)\r\n if name in memorydict.keys():\r\n raise RuntimeError('%s: mem device %s already in '\r\n 'dictionary' % (filename, name))\r\n memorydict[name] = {'address': address, 'bytes': size_bytes}\r\n fptr.close()\r\n return memorydict",
"def fetch_mnist():\n data_path = check_fetch_mnist()\n f = gzip.open(data_path, 'rb')\n try:\n train_set, valid_set, test_set = pickle.load(f, encoding=\"latin1\")\n except TypeError:\n train_set, valid_set, test_set = pickle.load(f)\n f.close()\n train_indices = np.arange(0, len(train_set[0]))\n valid_indices = np.arange(0, len(valid_set[0])) + train_indices[-1] + 1\n test_indices = np.arange(0, len(test_set[0])) + valid_indices[-1] + 1\n return {\"data\": np.concatenate((train_set[0], valid_set[0], test_set[0]),\n axis=0).astype(theano.config.floatX),\n \"target\": np.concatenate((train_set[1], valid_set[1], test_set[1]),\n axis=0).astype(np.int32),\n \"train_indices\": train_indices.astype(np.int32),\n \"valid_indices\": valid_indices.astype(np.int32),\n \"test_indices\": test_indices.astype(np.int32)}",
"def addMeds(self):\n if self.pid in Med.meds: \n for m in Med.meds[self.pid]:\n\n # build the fills, setting some defaults\n subs = {\n 'qunit': {'qunit': '{tab}'},\n 'pbm': {'pbm': 'T00000000001011'},\n 'ncpdp': {'ncpdp': '5235235'},\n 'pharm_org': {'pharm_org': 'CVS #588'},\n 'pharm_co': {'pharm_co': 'Australia'},\n 'pharm_ci': {'pharm_ci': 'Wonder City'},\n 'pharm_pc': {'pharm_pc': '5555'},\n 'pharm_st': {'pharm_st': '111 Lake Drive'},\n 'pharm_re': {'pharm_re': 'West Australia'},\n 'prov_dea': {'prov_dea': '325555555'},\n 'prov_npi': {'prov_npi': '5235235'},\n 'prov_email': {'prov_email': '[email protected]'},\n 'prov_fn': {'prov_fn': 'Joshua'},\n 'prov_ln': {'prov_ln': 'Mandel'},\n 'prov_tel': {'prov_tel': '1-234-567-8910'},\n }\n fills_str = ''\n for f in Refill.refill_list(self.pid, m.rxn):\n self._set_default_attrs(f, subs)\n fills_str = '\\n'.join([fills_str, FULFILLMENT.sub({\n 'date': f.date,\n 'days': f.days,\n 'pbm': f.pbm,\n 'ncpdp': f.ncpdp,\n 'pharm_org': f.pharm_org,\n 'pharm_co': f.pharm_co,\n 'pharm_ci': f.pharm_ci,\n 'pharm_pc': f.pharm_pc,\n 'pharm_st': f.pharm_st,\n 'pharm_re': f.pharm_re,\n 'prov_dea': f.prov_dea,\n 'prov_npi': f.prov_npi,\n 'prov_email': f.prov_email,\n 'prov_fn': f.prov_fn,\n 'prov_ln': f.prov_ln,\n 'prov_tel': f.prov_tel,\n 'quantity': f.q,\n 'quantityUnits': f.qunit}).done()])\n\n # build the med, setting some defaults\n subs = {\n 'qtt': {'qtt': 30, 'qttunit': '{tab}'},\n 'freq': {'freq':2, 'frequnit': '/d'},\n 'prov': {'prov': 'Derived by prescription', 'prov_id': 'prescription'},\n 'end': {'end': '2010-04-09'},\n }\n self._set_default_attrs(m, subs)\n med_data = {\n 'name': m.name,\n 'rxnorm': m.rxn,\n 'endDate': m.end,\n 'frequencyValue': m.freq,\n 'frequencyUnits': m.frequnit,\n 'instructions': m.sig,\n 'provenance': m.prov,\n 'provenance_id': m.prov_id,\n 'quantityValue': m.qtt,\n 'quantityUnits': m.qttunit,\n 'startDate': m.start,\n }\n med_str = MEDICATION.sub(med_data).sub({'fills':fills_str}, escape=False).done() \n self.data.append(SDMX.sub({'models':med_str}, escape=False).done())",
"def _setup_markov_network(self, probabilistic):\n for index_counter in range(self.genome.shape[0] - 1):\n # Sequence of 42 then 213 indicates a new Markov Gate",
"def load_demo():\n\tprint(\"\"\"\n\tBreast Cancer Wisconsin dataset. It contains a total of 569 samples of tumor and malignant cells. \n\tData labeled 1 corresponds to malignant cells, while data labeled 0 corresponds to benign cells. \n\tThe 30 characteristics contain real values obtained from images of cell nuclei. For more information:\n\n\t\t\thttp://archive.ics.uci.edu/ml/datasets/breast+cancer+wisconsin+(diagnostic)\n\n\n\tThe returned value is a dictionary where 'x_data' are the predictor variables, 'y_data' the class \n\tlabels and 'features' the name of the characteristics.\n\t\"\"\")\n\tpath = '/'.join(os.path.abspath(pywinEA.__file__).split('/')[:-1])\n\t\n\tdata = pd.read_csv(path+'/dataset/data/BreastCancerWisconsin.csv', index_col=0)\n\tx_data = data.iloc[:, 1:].values\n\ty_data = data.iloc[:, 0].values\n\tfeatures = data.columns[1:].values\n\n\t# Transform labels\n\ty_data[np.where(y_data == 'M')] = 1\n\ty_data[np.where(y_data == 'B')] = 0\n\ty_data = y_data.astype(int)\n\n\treturn {'x_data': x_data, 'y_data': y_data, 'features': features}",
"def load(self):\n basePath = './examples/'\n file = \"print8.ls8\"\n # file = \"mult.ls8\"\n # file = \"stack.ls8\"\n # file = \"call.ls8\"\n file = \"sctest.ls8\"\n if len(sys.argv) > 1:\n file = sys.argv[1]\n address = 0\n\n with open(basePath + file, \"r\") as f:\n for line in f:\n line = line.split(\"#\")\n\n try:\n v = int(line[0], 2)\n except ValueError:\n continue\n # print(v)\n self.ram[address] = v\n address += 1",
"def load_models(self):\n self.models = {}\n for code in self.soi_codes:\n print(f\"> Loading CNN for species code {code:02d}.\")\n self.models[code] = load_model(self.cnn_locations[code])\n print(\"> Complete.\")",
"def __init__(self, entries: ghidra.program.model.address.AddressSetView, findEntryPoint: bool):\n ...",
"def test_ipam_prefixes_available_ips_read(self):\n pass",
"def load_ami(self, **kwargs):\n import psami\n proxy_nodes = ['daq-{:}-mon{:02}'.format(self.instrument,num+1) \\\n for num in range(6)]\n \n proxy_host = kwargs.get('proxy_host', None)\n if not proxy_host and hasattr(self, '_kwargs'):\n proxy_host = self._kwargs.get('proxy_host')\n\n if proxy_host:\n proxy_nodes.insert(0,proxy_host)\n\n for proxy_host in proxy_nodes:\n try:\n self.ami = psami.Ami(proxy_host)\n print 'Loading ami from:',proxy_host\n break\n except:\n print 'Cannot load ami from', proxy_host\n pass",
"def load_owned_instances():\n\n global owned_instances\n\n owned_instances = []\n try:\n with open(state_file, 'r') as f:\n for line in f:\n # Strip spaces and skip empty lines\n inst = line.strip()\n if inst != '':\n owned_instances.append(inst)\n logging.info(\"Loaded list of owned instances: %s\" % ','.join(owned_instances))\n except IOError:\n logging.warning(\"Cannot read initial state from %s\" % state_file)",
"def read(filename, num_magnets):\n hxy = []\n with open(filename, encoding='latin1', errors='ignore') as f:\n for s in readSections(f):\n pos = float(s[0].split()[-1])\n num = np.array([[float(x) for x in l.split()] for l in s[5:] if l])\n hxy.append({'pos': pos, 'e': num[:, :2], 'hxy': num[:, 2:4],\n 'bxy': num[:, 4:6], 'mxy':num[:, 6:]})\n K = num_magnets\n points = [point(i, np.random.randint(0,K), xy)\n for i, xy in enumerate(hxy[0]['e'])]\n new_means, new_points = fit(points)\n # move values to magnets:\n magnets = [{'e': [p.coord for p in new_points if p.k == k],\n 'pos': [], 'hxy': [], 'bxy': [], 'mxy': []}\n for k in range(K)]\n hkeys = ['hxy', 'bxy', 'mxy']\n for i, h in enumerate(hxy): # all positions\n for mag in magnets:\n mag['pos'].append(h['pos'])\n m = [{k: [] for k in hkeys}\n for kk in range(K)]\n for p in new_points: # all elements\n for k in hkeys:\n m[p.k][k].append(h[k][p.k])\n for mk, magk in zip(m, magnets):\n for k in hkeys:\n magk[k].append(mk[k])\n for mag in magnets:\n for k in ['e'] + hkeys:\n mag[k] = np.array(mag[k])\n mag['havg'] = []\n mag['hmax'] = []\n for hpos in mag['hxy']:\n h = np.abs(np.linalg.norm(hpos, axis=1))\n mag['havg'].append(np.mean(h))\n mag['hmax'].append(np.max(h))\n\n # Note dimension of hkeys is (positions x elements x 2)\n\n return magnets",
"def prepare_word_emb_matrices(experiment):\r\n\r\n with open(\"public_data/stats/stats_train.pkl\", 'rb') as stats:\r\n stats = pickle.load(stats)\r\n vocab = stats[\"VOCAB\"]\r\n stops = [word.lower() for word in set(stopwords.words('english'))]\r\n vocab = vocab + stops\r\n\r\n if experiment == \"RANDOM\":\r\n word_embs = np.random.uniform(low=-1.0, high=1.0, size=(len(vocab), PARAMS[\"SIZE\"])).astype(\"float32\")\r\n\r\n else:\r\n word_embs = []\r\n count_unk = 0\r\n count_kn = 0\r\n\r\n if experiment == \"5\":\r\n emb_model = KeyedVectors.load_word2vec_format(\"public_data/models/experiment_5/embeddings_5.bin\",\r\n binary=True)\r\n elif experiment == \"6\":\r\n emb_model = Word2Vec.load(\"public_data/models/experiment_6/embeddings_6\")\r\n\r\n elif experiment in [\"7\", \"8\"]:\r\n emb_model = FastText.load_fasttext_format(\"public_data/models/experiment_%s/embeddings_%s.bin\"\r\n %(experiment, experiment))\r\n for word in vocab:\r\n if word in emb_model:\r\n word_embs.append(emb_model[word])\r\n count_kn += 1\r\n else:\r\n word_embs.append(np.random.uniform(low=-1.0, high=1.0, size=PARAMS[\"SIZE\"]))\r\n count_unk += 1\r\n\r\n word_embs = np.array(word_embs).astype(\"float32\")\r\n print(count_unk / (count_kn + count_unk))\r\n\r\n pad = np.zeros(shape=PARAMS[\"SIZE\"]).astype(\"float32\")\r\n unk = np.random.uniform(low=-1.0, high=1.0, size=PARAMS[\"SIZE\"]).astype(\"float32\")\r\n word_embs = np.insert(word_embs, 0, unk, axis=0) #id 1\r\n word_embs = np.insert(word_embs, 0, pad, axis=0) #id 0\r\n\r\n with open(\"public_data/embeddings/word_embeddings_%s.pkl\" %experiment, 'wb') as out:\r\n pickle.dump(word_embs, out, protocol=4)\r\n\r\n return word_embs",
"def atlas_clusters():\n pass",
"def main():\n\n\t# Parse the file\n\tmem_file = advanced_analysis('../data_1/mempages.dat.out')",
"def load_cmudict():\n with open(\"text/en/cmudict-0.7b.txt\", encoding=\"ISO-8859-1\") as file_reader:\n cmudict = (line.strip().split(\" \") for line in islice(file_reader, 126, 133905))\n\n cmudict = {format_alt_entry(word): pronunciation for word, pronunciation in cmudict}\n\n return cmudict",
"def store_wn_lookup():\n syns = list( wn.all_synsets() )\n #syn_str = map(lambda s: str(s).replace(\"Synset\",'').strip('()').strip(\"'\"), syns)\n syn_str = map(lambda s: str(s).replace(\"Synset\",'').strip('()').strip(\"'\").strip('\"'), syns)\n #offsets_list = [(\"n%08d\" % s.offset, s) for s in syns]\n olist = map(lambda a, b: (\"n%08d\" % a.offset, b), syns, syn_str)\n offset_dict = dict(olist)\n pickle.dump(offset_dict, open('/Users/xlx/Documents/proj/imgnet-flickr/db3/wn_offset_dict.pickle', 'wb'))",
"def load_all(): \n training_data = dict() \n for i in range(7):\n training_data[i+1] = load_data(i+1) \n\n return training_data",
"def load_atom_matrixes(name):\r\n hdulist = pyfits.open(os.path.join(data_dir, '{}.fits'.format(name)))#, ignore_missing_end=True)\r\n data_list = [hdulist[i].data for i in range(1, len(hdulist))]\r\n return data_list",
"def from_naadsm_file(self, root, ns):\n models=root.find(\"models\")\n self.disease_by_type=dict()\n self.disease_by_id=dict()\n for disease_model in models.findall(\"disease-model\", ns):\n production_type=disease_model.attrib[\"production-type\"]\n production_id=disease_model.attrib[\"production-type-id\"]\n dm=DiseaseModel()\n dm.from_naadsm_file(disease_model, ns)\n self.disease_by_type[production_type]=dm\n self.disease_by_id[production_id]=dm\n logger.debug(\"result of find quarantine {0}\".format(models.find(\n \"quarantine-model\", ns)))\n if models.find(\"quarantine-model\", ns) is not None:\n logger.debug(\"Using quarantine model\")\n self.quarantine=QuarantineModel()\n else:\n self.quarantine=NoQuarantineModel()\n\n self.global_detection=GlobalDetectionModel()\n self.detect_models=dict()\n for detect_model in models.findall(\"detection-model\", ns):\n production_type=detect_model.attrib[\"production-type\"]\n production_id=detect_model.attrib[\"production-type-id\"]\n dm=DetectionModel(self.global_detection)\n dm.from_naadsm_file(detect_model, ns)\n self.detect_models[production_type]=dm\n logger.debug(dm)\n\n self.spread_models=dict()\n for airborne in [\"airborne-spread-exponential-model\",\n \"airborne-spread-model\"]:\n for neighbor_model in models.findall(airborne, ns):\n from_production=neighbor_model.attrib[\"from-production-type\"]\n to_production=neighbor_model.attrib[\"to-production-type\"]\n im=InfectNeighborModel()\n im.from_naadsm_file(neighbor_model, airborne, ns)\n self.spread_models[(from_production, to_production)]=im\n logger.debug(\"Spread models for {0}\".format(self.spread_models.keys()))\n\n self.contact_models=collections.defaultdict(list)\n for indirect_model in models.findall(\"contact-spread-model\", ns):\n from_production=indirect_model.attrib[\"from-production-type\"]\n to_production=indirect_model.attrib[\"to-production-type\"]\n contact_type=indirect_model.attrib[\"contact-type\"]\n if contact_type==\"indirect\":\n inm=IndirectModel()\n inm.from_naadsm_file(indirect_model, ns)\n logger.debug(\"from_naadsm_file: indirect {0}\".format(inm))\n self.contact_models[from_production].append(inm)\n elif contact_type==\"direct\":\n logger.warn(\"Ignoring direct contact model\")\n else:\n logger.warn(\"Unknown contact spread model {0}\".format(\n contact_type))\n\n self.farm_models=dict() # production_type => farm model\n for production_type in self.disease_by_type.keys():\n f=Farm()\n f.production_type=production_type\n f.disease=self.disease_by_type[production_type]\n f.quarantine=self.quarantine\n f.detection=self.detect_models[production_type]\n self.farm_models[production_type]=f\n\n self.models_loaded=True",
"def populate_memes() -> None:\n\n for i in range(len(GD_MEMES)):\n GD_MEMES[i][0] = os.path.join(GD_PATH, GD_MEMES[i][0])",
"def getAllInitializedAddressSet(self) -> ghidra.program.model.address.AddressSetView:\n ...",
"def test_ipam_prefixes_read(self):\n pass"
] |
[
"0.5462835",
"0.5123565",
"0.5044636",
"0.49631006",
"0.494597",
"0.49274454",
"0.49178877",
"0.49134806",
"0.4877699",
"0.48551586",
"0.48249665",
"0.4812821",
"0.48062235",
"0.47993273",
"0.47603127",
"0.47496802",
"0.47460192",
"0.47357395",
"0.47238162",
"0.47097886",
"0.4707255",
"0.47056565",
"0.47036844",
"0.46864712",
"0.46853524",
"0.46808895",
"0.467129",
"0.46708992",
"0.46654284",
"0.46626398"
] |
0.58986574
|
0
|
Mocks the Terra instance before a request is made.
|
def mock_terra():
terra = Terra("soju-0013", "")
terra.lcd.request_middlewares.append(lcd_request_test_middleware)
return terra
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def default_setup(self, mocker):\n # pylama: ignore=W0201\n session_cls = mocker.patch.object(requests, 'Session')\n self.session = mocker.MagicMock()\n self.session.__enter__.return_value = self.session\n session_cls.return_value = self.session\n\n self.response = mocker.Mock()\n response_cls = mocker.patch.object(requests, 'Response')\n response_cls.return_value = self.response\n self.session.get.return_value = self.response\n self.session.post.return_value = self.response",
"def setup_class(cls):\n cls.mock_get_patcher = patch('project.services.requests.get')\n cls.mock_get = cls.mock_get_patcher.start()",
"def __init__(self):\n\n super().__init__()\n\n self.__current_request_mock = None",
"def setup_method(self) -> None:\n self.client = Mock()",
"def testTurbiniaStart(self, mock_create_request):\n mock_create_request.return_value = {\n \"request_id\": \"41483253079448e59685d88f37ab91f7\"\n }\n mock_api_instance = mock.MagicMock()\n mock_api_instance.create_request = mock_create_request\n self.turbinia_processor.requests_api_instance = mock_api_instance\n evidence = {\n \"type\": \"GoogleCloudDisk\",\n \"disk_name\": \"disk-1\",\n \"project\": \"project-1\",\n \"zone\": \"us-central1-f\",\n }\n request_id = self.turbinia_processor.TurbiniaStart(\n evidence=evidence, yara_rules=YARA_RULE)\n self.assertEqual(request_id, \"41483253079448e59685d88f37ab91f7\")",
"def setUp(self):\n\n self._patcher = patch('requests.Session', autospec=True)\n self._mock_session = self._patcher.start()\n instance = self._mock_session.return_value\n # Add \"headers\" attribute\n instance.headers = {}\n # By default, return a successfull response\n self.setSessionResponse(200)",
"def inject_new_request(self):\n\n self.__current_request_mock = CoreRequestMock()",
"def setUp(self):\n self.logger = mock.MagicMock()\n test_state = state.DFTimewolfState(config.Config)\n self.turbinia_processor = turbinia_base.TurbiniaProcessorBase(\n test_state, self.logger)\n file_path = os.path.join(\n CURRENT_DIR, \"test_data\", \"turbinia_request_status.json\")\n self._request_status = json.load(open(file_path))",
"def mock_request(auth_header):\n request = HttpRequest()\n request.META['HTTP_AUTHORIZATION'] = auth_header\n return request",
"def setUp(self):\n self.r_patcher = patch('redis_metrics.models.R')\n self.mock_r = self.r_patcher.start()",
"def test_request(self):\n client = RestClient(host=self.host, username='')\n rest_url = 'some/url/'\n\n # Mock good get response\n with responses.RequestsMock() as rsps:\n rsps.add(responses.GET, f'{self.host}/{rest_url}', status=200,\n json={'value':\"good!\"})\n r = client.request('get', rest_url)\n assert r.status_code == 200\n assert r.json()['value'] == 'good!'\n \n # Mock bad get response\n with responses.RequestsMock() as rsps:\n rsps.add(responses.GET, f'{self.host}/{rest_url}', status=401,\n json={'value':\"bad!\"})\n with raises(requests.HTTPError):\n r = client.request('get', rest_url)\n r = client.request('get', rest_url, checkstatus=False)\n assert r.status_code == 401\n assert r.json()['value'] == 'bad!'",
"def setUp(self):\n cache.clear()\n self.factory = APIRequestFactory()",
"def default_setup(self, mocker):\n # pylama: ignore=W0201\n self.url = '/api/v0/publish'\n self.client = wsgi.application.test_client()\n self._retryable = mocker.patch.object(wsgi, '_retryable')",
"def setUp(self):\n self.client = mock.create_autospec(CQLClient)\n\n self.maas_client = mock.create_autospec(MaasClient)\n patcher = mock.patch('bobby.worker.MaasClient')\n self.addCleanup(patcher.stop)\n _MaasClient = patcher.start()\n _MaasClient.return_value = self.maas_client",
"def setUp(self):\n self.factory = RequestFactory()",
"def setUp(self):\n self.factory = RequestFactory()",
"def setUp(self):\n self.factory = RequestFactory()",
"def __init__(self, headers=None, body=None, status=None,\n request_headers={}):\n self.headers = headers\n self.body = body\n self.status_code = status\n self.request = mock.Mock()\n self.request.headers = request_headers",
"def setUpClass(cls):\n cls.get_patcher = patch('requests.get')\n cls.mock = cls.get_patcher.start()\n cls.mock.return_value.json.side_effect = [\n cls.org_payload, cls.repos_payload,\n cls.org_payload, cls.repos_payload,\n ]",
"def setUp(self):\n self.response = self.s.get(self.url, params=self.params)",
"def test_make_request_method(self, m_requests, m_sleep):\r\n request = testing.DummyRequest({mut.URL_KEY: SAMPLE_URL, \r\n mut.METHOD_KEY: SAMPLE_METHOD})\r\n m_response, response_dict = self.mock_response()\r\n m_requests.post.return_value = m_response\r\n self.assertEqual(response_dict, mut.make_request(request))\r\n m_requests.post.assert_called_with(url=SAMPLE_URL)\r\n m_sleep.assert_called_with(mut.SECURITY_SLEEP)",
"def test_make_request(self, m_requests, m_sleep):\r\n request = testing.DummyRequest({mut.URL_KEY: SAMPLE_URL})\r\n m_response, response_dict = self.mock_response()\r\n m_requests.get.return_value = m_response\r\n self.assertEqual(response_dict, mut.make_request(request))\r\n m_requests.get.assert_called_with(url=SAMPLE_URL)\r\n m_sleep.assert_called_with(mut.SECURITY_SLEEP)",
"def mock_basket_init(mocker, user: Dict, existing: bool = True):\n if existing:\n mock_response(mocker, GsAssetApi, 'resolve_assets', resolved_asset)\n mock_response(mocker, GsAssetApi, 'get_asset', gs_asset)\n mock_response(mocker, GsAssetApi, 'get_latest_positions', target_position_set)\n mock_response(mocker, GsAssetApi, 'get_many_assets_data', assets_data)\n mock_response(mocker, GsIndexApi, 'initial_price', initial_price)\n mock_response(mocker, GsReportApi, 'get_reports', [report])\n mock_response(mocker, GsUsersApi, 'get_users', [TargetUser.from_dict(user)])\n mock_response(mocker, GsUsersApi, 'get_current_user_info', user)",
"def setUp(self):\n super().setUp()\n self.request_factory = RequestFactory()\n self._auth_backend = LTIBackend()",
"def test_request_context_create(self, mock_rc_init):\n mock_request = MagicMock()\n rc = get_request_context(mock_request)\n mock_rc_init.assert_called_with(mock_request)\n self.assertIsInstance(rc, RequestContext)",
"def requestsmock():\n with requests_mock.mock() as m:\n yield m",
"def setUp(self):\n self.patcher = patch('esis.es.Elasticsearch')\n self.elasticsearch_cls = self.patcher.start()\n self.client = Client(host='localhost', port=9200)",
"def test_post(self):\n client = RestClient(host=self.host, username='')\n rest_url = 'some/url/'\n \n # Mock good response\n with responses.RequestsMock() as rsps:\n rsps.add(responses.POST, f'{self.host}/{rest_url}', status=200,\n json={'value':\"good!\"})\n r = client.post(rest_url)",
"def test_request_should_not_include_token(self):\n client = Client()\n\n with patch(\"requests.request\") as request:\n request.return_value.json.return_value = {}\n\n client.request(\"GET\", \"http://www.google.com/\")\n\n request.assert_called_once_with(\n \"GET\",\n \"http://www.google.com/\",\n headers=None,\n json=None,\n params=b\"per_page=100\",\n )",
"def setup_system_xmodule_mocks_for_lti20_request_test(self):\r\n self.system.get_real_user = Mock(return_value=self.USER_STANDIN)\r\n self.xmodule.max_score = Mock(return_value=1.0)\r\n self.xmodule.get_client_key_secret = Mock(return_value=('test_client_key', u'test_client_secret'))\r\n self.xmodule.verify_oauth_body_sign = Mock()"
] |
[
"0.6782043",
"0.6592813",
"0.6565687",
"0.6544803",
"0.65013963",
"0.64377755",
"0.63736105",
"0.6317969",
"0.62460375",
"0.6238057",
"0.6181688",
"0.61687994",
"0.61482024",
"0.6130161",
"0.6127421",
"0.6127421",
"0.6127421",
"0.6083885",
"0.60798734",
"0.60511017",
"0.60322815",
"0.602008",
"0.59830725",
"0.59384984",
"0.5920622",
"0.5913596",
"0.59118736",
"0.5908427",
"0.58654016",
"0.586389"
] |
0.7425829
|
0
|
Return a screenshot of all screens.
|
def grab_screens() -> QPixmap:
# grab all screens
screens = QtWidgets.QApplication.screens()
pixmaps = []
w = 0
h = 0
for screen in screens:
pix = screen.grabWindow(0)
w += pix.width()
h = max(h, pix.height())
pixmaps.append(pix)
# merge all pixmaps
final = QPixmap(w, h)
painter = QPainter(final)
final.fill(Qt.white)
p = 0
for pixmap in pixmaps:
painter.drawPixmap(QPoint(p, 0), pixmap)
p += pixmap.width()
return final
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def getScreenList(self, verbose = False):\n return execCmd(\"%s -list\" % self._screenPath, verbose)",
"def create_screens(self):\n\n for name in State.game['screens']:\n self.create_screen(name)",
"def get_screen_image(dir=\"screenshots\"):\n screenshot_name = dir + \"/screenshot_\" + str(random.randint(0, 1e10)) + \".png\"\n\n screenshot = autopy.bitmap.capture_screen()\n screenshot.save(screenshot_name)\n return screenshot_name",
"def screenshots(self):\n return self._screenshots",
"def view(self, screen=None):\n r = requests.get(\"{}{}/view\".format(self.api,\n screen))\n\n return template(\"all_screens.tpl\", screens=self.screens)",
"def measure_screen(self):\n outputs = self._get_images()\n\n if self.save_image_flag:\n self.save_images(outputs)\n return outputs",
"def list_screens(self):\n\n # Check if the form ws submitted\n form = request.forms.get(\"submit\", False)\n\n # If so...\n if form:\n\n # ...find out what the user wanted to do and to which screen\n action, screen = form.split(\"+\")\n\n # Call the relevant action\n if action == \"view\":\n r = requests.get(\"{}{}/view\".format(self.api,\n screen))\n\n elif action == \"enable\":\n r = requests.get(\"{}{}/enable\".format(self.api,\n screen))\n\n elif action == \"disable\":\n r = requests.get(\"{}{}/disable\".format(self.api,\n screen))\n\n elif action == \"configure\":\n redirect(\"/configure/{}\".format(screen))\n\n elif action == \"custom\":\n url = self.custom_screens.get(screen, \"/\")\n redirect(url)\n\n # Rebuild list of screens\n self.process_plugins()\n sc = self.screens\n\n # Return the web page\n return template(\"all_screens.tpl\", screens=sc)",
"def get_screen():\n img_title = 'screen_' + g.client_id + '.png'\n image_path = STATIC_FILES_PATH + img_title\n if g.driver_status != WhatsAPIDriverStatus.LoggedIn:\n try:\n g.driver.get_qr(image_path)\n return send_file(image_path, mimetype='image/png')\n except Exception as err:\n pass\n g.driver.screenshot(image_path)\n return send_file(image_path, mimetype='image/png')",
"def screen_shot():\n screen_shot_string_io = StringIO.StringIO()\n ImageGrab.grab().save(screen_shot_string_io, \"PNG\")\n screen_shot_string_io.seek(0)\n return screen_shot_string_io.read()",
"def getDisplaysAsImages():\n\ttry:\n\t\trects = getDisplayRects()\n\texcept RectFailed as e:\n\t\traise GrabFailed(\"Error during getDisplayRects: \" + str(e))\n\t# im has an origin at (0, 0) in the top-left corner of the virtual screen,\n\t# but our `rect`s have a (0, 0) origin in the top-left corner of the main\n\t# display. So we normalize all coordinates in the rects to be >= 0.\n\tnormalizedRects = normalizeRects(rects)\n\tim = getScreenAsImage()\n\n\treturn list(im.crop(rect) for rect in normalizedRects)",
"def grabScreenshot(self):\n\n self.griddButton.setVisible(True)\n self.mirrorButton.setVisible(True)\n self.blurButton.setVisible(True)\n self.display1Button.setVisible(True)\n self.display2Button.setVisible(True)\n self.tutorialLabel.setVisible(False)\n\n print (\"Grabbing Screenshot\")\n print (\"Showing Buttons now\")\n\n with mss() as sct:\n monitor = sct.monitors[1]\n sct_img = sct.grab(monitor)\n # Convert to PIL/Pillow Image\n screenshots = Image.frombytes('RGB', sct_img.size, sct_img.bgra, 'raw', 'BGRX')\n screenshots.save(self.firstScreen, \"PNG\")\n\n # 2nd Display Screen shot\n\n monitor = sct.monitors[2]\n sct_img = sct.grab(monitor)\n # Convert to PIL/Pillow Image\n screenshots = Image.frombytes('RGB', sct_img.size, sct_img.bgra, 'raw', 'BGRX')\n screenshots.save(self.secondScreen, \"PNG\")\n self.photo.setPixmap(QtGui.QPixmap(self.firstScreen))\n self.statustext.setText(\"Added display 1 as work display for now\")\n self.ActivePhoto = \"Screenshot1.png\" # Set Photo as display 1 so we dont get callstack error when mirrroring",
"def screenGrab():\n box = (x_pad+1, y_pad+1, 796, 825)\n save_directory = os.getcwd()\n time_stamp = int(time.time())\n image_file_name = '{}\\\\full_snap__{}.png'.format(save_directory, time_stamp)\n im = ImageGrab.grab(box)\n im.save(image_file_name, 'PNG')",
"def get_screenshots(miscobj):\n\n imagedir = misctools.get_screenshots_dir(miscobj)\n if imagedir:\n return mark_safe(htmltools.get_screenshots(imagedir))\n else:\n return None",
"def getScreenAsImage():\n\treturn _getRectAsImage(None)",
"def screenshot(screen_width: int, screen_height: int,\n offset_left: float, offset_top: float,\n offset_right: float, offset_bottom: float) -> Image:\n x1 = offset_left * screen_width\n y1 = offset_top * screen_height\n x2 = (offset_right * -screen_width) + screen_width\n y2 = (offset_bottom * -screen_height) + screen_height\n image = pyscreenshot.grab(bbox=(x1, y1, x2, y2))\n return image",
"def capture_screen():\n loop_time = time()\n with mss() as sct:\n monitor = {'top': 100, 'left':200, 'width':600, 'height':480}\n while True:\n # Captures window and converts into numpy array\n window_capture = np.array(sct.grab(monitor))\n # Converts to RGB colour\n window_capture = cv.cvtColor(window_capture, cv.COLOR_RGB2BGR)\n loop_time = time() \n window_capture = Image.fromarray(window_capture)\n #Keeps the data in memory as buffer\n window_capture_inMemory = io.BytesIO()\n window_capture.save(window_capture_inMemory,format='PNG')\n window_capture_bytes=window_capture_inMemory.getvalue()\n return window_capture_bytes",
"def screen_shot(self):\n screen_size = '{}x{}@{}x{}/0'.format(self.screen[0], self.screen[1], self.screen[0], self.screen[1])\n subprocess.check_call([\n ADB_EXECUTOR, '-s', self.device_id, 'shell',\n 'LD_LIBRARY_PATH=/data/local/tmp', '/data/local/tmp/minicap', '-s', '-P', screen_size,\n '>', TEMP_PIC_ANDROID_PATH\n ], stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL)\n logger.info('screen shot saved in {}'.format(TEMP_PIC_ANDROID_PATH))",
"def take_desktop_screenshot(self):\n filepath = self._get_screenshot_path(\"whitelib_screenshot_{index}.png\")\n directory = os.path.dirname(filepath)\n if not os.path.exists(directory):\n os.makedirs(directory)\n logger.info(get_link_path(filepath, self._log_directory))\n logger.info(\n '</td></tr><tr><td colspan=\"3\">'\n '<a href=\"{src}\"><img src=\"{src}\" width=\"800px\"></a>'.format(\n src=get_link_path(filepath, self._log_directory)\n ),\n html=True,\n )\n bmp = Desktop.CaptureScreenshot()\n bmp.Save(filepath, ImageFormat.Png)\n return filepath",
"def get_screen(env):\n # Returned screen requested by gym is 400x600x3\n # Transpose it into torch order (CHW).\n screen = env.render(mode='rgb_array').transpose((2, 0, 1))\n # Convert to float, rescale, convert to torch tensor\n screen = np.ascontiguousarray(screen, dtype=np.float32) / 255\n screen = torch.from_numpy(screen)\n # Resize, and add a batch dimension (BCHW)\n return resize(screen).unsqueeze(0)",
"def takeScreenshot ():\n\n im = ImageGrab.grab()\n return im",
"def find_screenshots():\n # Inside SCREENSHOT_DIR, there should be 1 folder with a\n # random name which contains the user's puzzles. Just\n # attempt to modify a screenshot in each of the directories\n # in the folder.\n for folder in os.listdir(SCREENSHOT_DIR):\n full_path = os.path.join(SCREENSHOT_DIR, folder)\n if os.path.isdir(full_path):\n # The screenshot to modify is untitled.jpg\n screenshot = os.path.join(full_path, 'untitled.jpg')\n if os.path.isfile(screenshot):\n yield screenshot",
"def TakeScreenShot(rect):\r\n\r\n # Create a DC for the whole screen area\r\n dcScreen = wx.ScreenDC()\r\n\r\n # Create a Bitmap that will later on hold the screenshot image\r\n # Note that the Bitmap must have a size big enough to hold the screenshot\r\n # -1 means using the current default colour depth\r\n bmp = wx.EmptyBitmap(rect.width, rect.height)\r\n\r\n # Create a memory DC that will be used for actually taking the screenshot\r\n memDC = wx.MemoryDC()\r\n\r\n # Tell the memory DC to use our Bitmap\r\n # all drawing action on the memory DC will go to the Bitmap now\r\n memDC.SelectObject(bmp)\r\n\r\n # Blit (in this case copy) the actual screen on the memory DC\r\n # and thus the Bitmap\r\n memDC.Blit( 0, # Copy to this X coordinate\r\n 0, # Copy to this Y coordinate\r\n rect.width, # Copy this width\r\n rect.height, # Copy this height\r\n dcScreen, # From where do we copy?\r\n rect.x, # What's the X offset in the original DC?\r\n rect.y # What's the Y offset in the original DC?\r\n )\r\n\r\n # Select the Bitmap out of the memory DC by selecting a new\r\n # uninitialized Bitmap\r\n memDC.SelectObject(wx.NullBitmap)\r\n\r\n return bmp",
"def grab_screenshot(windows_name):\n hwnd_main = win32gui.FindWindow(None, windows_name)\n if not hwnd_main:\n print('window not found!')\n\n window_rect = win32gui.GetWindowRect(hwnd_main)\n #print(window_rect)\n src_image: Image = getRectAsImage(window_rect)\n return src_image",
"def take_windows_screenshot(x, y):\n # screenshot takes starting x,y coordinates and then for how far the shot should stretch\n pic = pyautogui.screenshot(region=(0, y * 0.6, x * 0.33, y * 0.3))\n pic.save(\"Screenshot.png\")",
"def display_board(self, screen):\n for wall in self.cube_walls_list:\n screen = wall.draw_rhombus(screen)\n for tile in self.tile_rhombus_list:\n screen = tile.draw_rhombus(screen)\n\n return screen",
"def mod_screenshots():\n mod_type = CONF['screenshot_type', 'PETI'].lower()\n\n if mod_type == 'cust':\n LOGGER.info('Using custom screenshot!')\n scr_loc = CONF['screenshot', '']\n elif mod_type == 'auto':\n LOGGER.info('Using automatic screenshot!')\n scr_loc = None\n # The automatic screenshots are found at this location:\n auto_path = os.path.join(\n '..',\n GAME_FOLDER.get(CONF['game_id', ''], 'portal2'),\n 'screenshots'\n )\n # We need to find the most recent one. If it's named\n # \"previewcomplete\", we want to ignore it - it's a flag\n # to indicate the map was playtested correctly.\n try:\n screens = [\n os.path.join(auto_path, path)\n for path in\n os.listdir(auto_path)\n ]\n except FileNotFoundError:\n # The screenshot folder doesn't exist!\n screens = []\n screens.sort(\n key=os.path.getmtime,\n reverse=True,\n # Go from most recent to least\n )\n playtested = False\n for scr_shot in screens:\n filename = os.path.basename(scr_shot)\n if filename.startswith('bee2_playtest_flag'):\n # Previewcomplete is a flag to indicate the map's\n # been playtested. It must be newer than the screenshot\n playtested = True\n continue\n elif filename.startswith('bee2_screenshot'):\n continue # Ignore other screenshots\n\n # We have a screenshot. Check to see if it's\n # not too old. (Old is > 2 hours)\n date = datetime.fromtimestamp(\n os.path.getmtime(scr_shot)\n )\n diff = datetime.now() - date\n if diff.total_seconds() > 2 * 3600:\n LOGGER.info(\n 'Screenshot \"{scr}\" too old ({diff!s})',\n scr=scr_shot,\n diff=diff,\n )\n continue\n\n # If we got here, it's a good screenshot!\n LOGGER.info('Chosen \"{}\"', scr_shot)\n LOGGER.info('Map Playtested: {}', playtested)\n scr_loc = scr_shot\n break\n else:\n # If we get to the end, we failed to find an automatic\n # screenshot!\n LOGGER.info('No Auto Screenshot found!')\n mod_type = 'peti' # Suppress the \"None not found\" error\n\n if srctools.conv_bool(CONF['clean_screenshots', '0']):\n LOGGER.info('Cleaning up screenshots...')\n # Clean up this folder - otherwise users will get thousands of\n # pics in there!\n for screen in screens:\n if screen != scr_loc:\n os.remove(screen)\n LOGGER.info('Done!')\n else:\n # PeTI type, or something else\n scr_loc = None\n\n if scr_loc is not None and os.path.isfile(scr_loc):\n # We should use a screenshot!\n for screen in find_screenshots():\n LOGGER.info('Replacing \"{}\"...', screen)\n # Allow us to edit the file...\n utils.unset_readonly(screen)\n shutil.copy(scr_loc, screen)\n # Make the screenshot readonly, so P2 can't replace it.\n # Then it'll use our own\n utils.set_readonly(screen)\n\n else:\n if mod_type != 'peti':\n # Error if we were looking for a screenshot\n LOGGER.warning('\"{}\" not found!', scr_loc)\n LOGGER.info('Using PeTI screenshot!')\n for screen in find_screenshots():\n # Make the screenshot writeable, so P2 will replace it\n LOGGER.info('Making \"{}\" replaceable...', screen)\n utils.unset_readonly(screen)",
"def get_screens_data():\n result = subprocess.run((\"xrandr\", \"--query\"),\n check=True, universal_newlines=True,\n stdout=subprocess.PIPE, stderr=subprocess.STDOUT)\n # kinda like sscanf\n props = (('output', str), ('primary', bool), ('width', int), ('height', int))\n regex = re.compile(r\"^(\\S+) connected( primary)? (\\d+)x(\\d+)\",\n flags=re.MULTILINE | re.ASCII)\n for match in regex.findall(result.stdout):\n yield {name: type(value) for (name, type), value in zip(props, match)}",
"def show_img(graphs = False):\n while True:\n screen = (yield)\n window_title = \"logs\" if graphs else \"game_play\"\n cv2.namedWindow(window_title, cv2.WINDOW_NORMAL) \n imS = cv2.resize(screen, (800, 400)) \n cv2.imshow(window_title, screen)\n if (cv2.waitKey(1) & 0xFF == ord('q')):\n cv2.destroyAllWindows()\n break",
"def deviceScreenshot(self, event):\n\n self.screenshotbutton.Disable()\n\n deviceModel, deviceID = self.getDevices()\n deviceIDModel = []\n\n if not deviceModel or not deviceID:\n self.SetStatusText(\"No Android devices detected\")\n self.screenshotbutton.Enable()\n return\n\n for everyi in deviceModel:\n for everym in deviceID:\n deviceIDModel = [everym + \" \" + everyi]\n\n try:\n dialog = wx.MultiChoiceDialog(self, \"Pick your devices\", \"caption\", deviceIDModel, wx.OK | wx.CANCEL)\n except UnboundLocalError:\n self.SetStatusText(f\"No Devices Found\")\n self.screenshotbutton.Enable()\n return\n\n instance = dialog.ShowModal()\n devices = dialog.GetSelections()\n\n listLength = len(devices)\n dialog.Destroy()\n\n if instance == wx.ID_OK:\n for i in range(listLength):\n self.SetStatusText(f\"Screenshotting {deviceModel[i]} {i+1}/{listLength}\")\n subprocess.call(f\"adb -s {deviceID[i]} shell screencap /sdcard/{deviceModel[i]}.png\", creationflags=self.createNoWindow)\n subprocess.call(fr\"adb -s {deviceID[i]} pull /sdcard/{deviceModel[i]}.png C:\\ADBscripts\\PhoneScreenshots\", creationflags=self.createNoWindow)\n\n if listLength > 1:\n self.SetStatusText(f\"Took {listLength} Screenshots\")\n else:\n self.SetStatusText(f\"Took {listLength} Screenshot\")\n\n self.screenshotbutton.Enable()",
"def configure_screenshots(scenario):\r\n world.auto_capture_screenshots = False"
] |
[
"0.669737",
"0.6682343",
"0.66719764",
"0.6635923",
"0.6561212",
"0.63937396",
"0.63591784",
"0.6343912",
"0.6330666",
"0.63194615",
"0.6269219",
"0.62050754",
"0.61970925",
"0.61793834",
"0.6173518",
"0.6172579",
"0.61499625",
"0.60498494",
"0.6041306",
"0.60394",
"0.60236806",
"0.60149556",
"0.60128826",
"0.60018134",
"0.5931413",
"0.58977956",
"0.588481",
"0.5871064",
"0.5858197",
"0.58572143"
] |
0.76967037
|
0
|
Return only unique elements of a list of names.
|
def unique_names(names):
return sorted(set(names))
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def uniq(listinput):\n\t\"\"\" This will be provided for the student. \"\"\"\n\toutput = []\n\tfor x in listinput:\n\t\tif x not in output:\n\t\t\toutput.append(x)\n\treturn output",
"def uniq(elements):\n us = set()\n ret = []\n for e in elements:\n if e not in us:\n ret.append(e)\n us.add(e)\n return ret",
"def get_unique_employees():\n unique_names = []\n\n for entry in Entry.select():\n if entry.employee_name not in unique_names:\n unique_names.append(entry.employee_name)\n\n clear()\n return unique_names",
"def _unique(li):\n return list(set(li))",
"def uniq_stable(elems):\r\n unique = []\r\n unique_set = set()\r\n for nn in elems:\r\n if nn not in unique_set:\r\n unique.append(nn)\r\n unique_set.add(nn)\r\n return unique",
"def unique(my_list):\n return [x for x in my_list if x not in locals()['_[1]']]",
"def _repair_names_check_unique(names: Iterable[str]) -> Iterable[str]:\n for name in names:\n if names.count(name) > 1:\n raise NameNonUniqueError(f\"Names must be unique: {name}\")\n if name == \"\" or name is numpy.nan:\n raise NameNonUniqueError(f\"Names can't be empty: {name}\")\n if re.search(r\"(?:(?<!_)_{2}\\d+|(?<!_)__)+$\", str(name)):\n raise NameNonUniqueError(\n f\"Names can't be of the form `__` or `_j`: {name}\"\n )\n return names",
"def dedup_and_title_case_names(names):\n return list(set(name.title() for name in names))",
"def _naive_unique(li):\n tmp = []\n for el in li:\n if el not in tmp:\n tmp.append(el)\n return tmp",
"def unique(li):\r\n seen = set()\r\n seen_add = seen.add\r\n return [x for x in li if not (x in seen or seen_add(x))]",
"def unique(input_list):\n output = []\n for item in input_list:\n if item not in output:\n output.append(item)\n return output",
"def dedupe_list(input):\n return list(set(input))",
"def makeUnique(list):\n\tu = []\n\tfor l in list:\n\t\tif not l in u:\n\t\t\tu.append(l)\n\treturn u",
"def unique_list(inlist):\n return set(inlist)",
"def clean_names_list(names):\n pure_names = []\n nan = re.compile('nan', re.IGNORECASE)\n title = re.compile('surname', re.IGNORECASE)\n for name in names:\n if nan.search(name):\n continue\n elif title.search(name):\n continue\n else:\n pure_names.append(name)\n return pure_names",
"def omitnames(names, patterns, sort=True):\n if not patterns:\n return names\n omitset = set()\n for pattern in patterns.split(' '):\n omitset.update(fnmatch.filter(names, pattern))\n if sort:\n return sorted(set(names) - omitset)\n else:\n result = []\n for name in names:\n if name not in omitset:\n result.append(name)\n return result",
"def unique(list_of_links):\n return list(set(list_of_links))",
"def dedup_and_title_case_names(names):\n names1 =[]\n for n in names:\n if n.title() not in names1:\n names1.append(n.title())\n return names1\n # return [n.title() for n in names if n.title() not in names1]\n pass",
"def unique(lis):\n seen = set()\n ret = []\n for elm in lis:\n if elm not in seen:\n ret.append(elm)\n seen.add(elm)\n return ret",
"def make_unique(lista):\n f = []\n for it in lista:\n if it not in f:\n f.append(it)\n return f",
"def listops_uniq(list_a):\r\n retlist = []\r\n for item in list_a:\r\n if item not in retlist:\r\n retlist.append(item)\r\n\r\n return retlist",
"def unique(self):\n return self.element_wise(lambda seqs: list(set(seqs)))",
"def unique(x):\n\n return list(set(x))",
"def distinct(x):\n return list(set(x))",
"def namelist(self):\n return set(self.names())",
"def _repair_names_unique(\n names: Sequence[str],\n quiet: bool = False,\n sanitizer: Callable = None,\n base0_: bool = None,\n) -> List[str]:\n base = int(not base0_)\n min_names = _repair_names_minimal(names)\n neat_names = [\n re.sub(r\"(?:(?<!_)_{1,2}\\d+|(?<!_)__)+$\", \"\", name)\n for name in min_names\n ]\n if callable(sanitizer):\n neat_names = [sanitizer(name) for name in neat_names]\n\n new_names = []\n changed_names = []\n for i, name in enumerate(neat_names):\n if neat_names.count(name) > 1 or name == \"\":\n name = f\"{name}__{i + base}\"\n if name != names[i]:\n changed_names.append((names[i], name))\n new_names.append(name)\n if not quiet:\n _log_changed_names(changed_names)\n return new_names",
"def make_unique(in_list):\n new_list = []\n for l in in_list:\n if l not in new_list:\n new_list.append(l)\n return new_list",
"def unique(list1):\n \n # intilize a null list \n unique_list = [] \n \n # traverse for all elements \n for x in list1: \n # check if exists in unique_list or not \n if x not in unique_list: \n unique_list.append(x)\n return unique_list",
"def names(self, ignore_items=True):\n all_names = self.variables()\n if not ignore_items:\n all_names = self.unroll(all_names, both='all')\n lower_names = [n.lower() for n in all_names]\n multiple_names = [k for k, v in list(Counter(lower_names).items()) if v > 1]\n if not multiple_names: return self.variables()\n weak_dupes = OrderedDict()\n for name in all_names:\n if name.lower() in multiple_names:\n if not name.lower() in weak_dupes:\n weak_dupes[name.lower()] = [name]\n elif not name in weak_dupes[name.lower()]:\n weak_dupes[name.lower()].append(name)\n max_wd = max(len(v) for v in list(weak_dupes.values()))\n for k, v in list(weak_dupes.items()):\n while not len(v) == max_wd:\n v.append(None)\n weak_dupes[k] = v\n\n return pd.DataFrame(weak_dupes)",
"def remove_duplicates(input: List[str]) -> List[str]:\n\n output = input # Replace with your logic\n\n return output"
] |
[
"0.6976379",
"0.6941287",
"0.69210607",
"0.69099766",
"0.6850234",
"0.68281347",
"0.6813528",
"0.67876744",
"0.66393167",
"0.6612804",
"0.6556339",
"0.6508535",
"0.65020144",
"0.64769304",
"0.64689285",
"0.6465169",
"0.6462953",
"0.64619017",
"0.6438553",
"0.6429687",
"0.6411834",
"0.64038813",
"0.63927555",
"0.636023",
"0.6355093",
"0.6337985",
"0.630891",
"0.63054264",
"0.6279073",
"0.62735397"
] |
0.829594
|
0
|
Counts the element/attribute usage based on the information found in fileinfos.
|
def count_items(fileinfos, type, name=""):
names = []
if is_filename(name):
# count all elements/attributes for one text
for nodeName in fileinfos[name]["usage_" + type].keys():
names.append(nodeName)
elif name == "":
# count all elements/attributes for all texts
for filename in fileinfos:
for nodeName in fileinfos[filename]["usage_" + type].keys():
names.append(nodeName)
items_counted = {}
if is_filename(name) or name == "":
for singleName in unique_names(names):
counter = 0
for file in fileinfos:
counter += fileinfos[file]["usage_" + type].get(singleName, 0)
items_counted[singleName] = counter
elif is_attname(name):
# count attribute usage for all texts
for file in fileinfos:
if name[1:] in fileinfos[file]["usage_att"].keys():
items_counted[file] = fileinfos[file]["usage_att"][name[1:]]
else:
# count element usage for all texts
for file in fileinfos:
if name in fileinfos[file]["usage_el"].keys():
items_counted[file] = fileinfos[file]["usage_el"][name]
return items_counted
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def process_all(fileinfos, args):\n # create overall figure\n count_and_draw(fileinfos,args)\n # create figures for all the files\n for key in fileinfos:\n count_and_draw(fileinfos,args,key)\n # create figures for all the elements\n els_processed = []\n for key in fileinfos:\n for key in fileinfos[key][\"usage_el\"]:\n if key not in els_processed:\n count_and_draw(fileinfos,args,key)\n els_processed.append(key)\n # create figures for all the attributes\n atts_processed = []\n for key in fileinfos:\n for key in fileinfos[key][\"usage_att\"]:\n if key not in atts_processed:\n count_and_draw(fileinfos,args,\"@\"+key)\n atts_processed.append(key)",
"def count_and_draw(fileinfos, args, name=\"\"):\n els_counted = count_items(fileinfos,\"el\",name)\n atts_counted = count_items(fileinfos,\"att\",name)\n draw_figure(els_counted, atts_counted, args, name)",
"def check_attname(name, fileinfos):\n num = 0 \n for file in fileinfos:\n if name[1:] in fileinfos[file][\"usage_att\"].keys():\n num += fileinfos[file][\"usage_att\"][name[1:]]\n try:\n if num == 0:\n raise ValueError(\"Error: No attribute '\"+name+\"' was found in the collection.\")\n except ValueError as err:\n print(err)\n exit(1)",
"def check_elname(name, fileinfos):\n num = 0\n for file in fileinfos:\n if name in fileinfos[file][\"usage_el\"].keys():\n num += fileinfos[file][\"usage_el\"][name]\n try:\n if num == 0:\n raise ValueError(\"Error: No element '\"+name+\"' was found in the collection.\")\n except ValueError as err:\n print(err)\n exit(1)",
"def fileCount(self):\n pass",
"def fileCounter(directory):",
"def process_single(fileinfos, args):\n # does the file name exist?\n if is_filename(args.name):\n check_filename(args.name, fileinfos)\n # does the attribute name exist?\n elif is_attname(args.name):\n check_attname(args.name, fileinfos)\n # does the element name exist? \n elif args.name != \"\":\n check_elname(args.name, fileinfos)\n \n count_and_draw(fileinfos, args, args.name)",
"def analyze_files(self):\n for file in os.listdir(self.directory):\n if file[-3:] == (\".py\"):\n fopen = open(os.path.join(self.directory, file), \"r\")\n try:\n if not (py_file := fopen):\n raise FileNotFoundError\n\n with py_file: # close file after opening\n class_count: int = 0\n fun_count: int = 0\n l_count: int = 0\n ch_count: int = 0\n for line in py_file: # calculate values for the file\n if line.strip().startswith(\"class \"):\n class_count = class_count+1\n elif line.strip().startswith(\"def \"):\n fun_count = fun_count+1\n\n l_count = l_count+1\n ch_count = ch_count+len(line)\n\n self.files_summary[str(os.path.join(self.directory, file))] = {\"class\": class_count, \"function\": fun_count, \"line\": l_count,\n \"char\": ch_count}\n except FileNotFoundError:\n print(f\"File {py_file} is not found or can not be opened\")\n fopen.close()",
"def getFileCount(self) -> int:\n ...",
"def run_filetype(filepath, fileinfo, temppath):\n features = collections.Counter()\n\n # skip files in temp directory (only record primary file type here)\n if filepath.startswith(temppath):\n return features\n\n # skip if no fileinfo\n if not fileinfo:\n return features\n\n features['File type: '+fileinfo['type']] += 1\n for segment in fileinfo['type'].split(','):\n features['File type segment: ' + segment] += 1\n for word in segment.split():\n features['File type word: ' + word] += 1\n\n return features",
"def _collect_counts(self):\n for t in self.system.keys():\n if t in self.gold:\n self.tp += 1\n else:\n self.fp += 1\n for t in self.gold.keys():\n if t not in self.system:\n self.fn += 1",
"def process_stat_files(param):\n\n #get the files that are actually in the output directory\n call = ['cp', '-R']\n call.append(param['working_dir']+'results/featureCount/')\n call.append(param['working_dir']+'report/')\n _, _ = subprocess.Popen(call,\n stdout=subprocess.PIPE,\n stderr=subprocess.PIPE).communicate()\n\n featurecount_file = (param['working_dir']+\n 'results/featureCount/featureCount_stats.txt')\n #extract table\n table = []\n filehandle = open(featurecount_file)\n #header\n table.append(filehandle.readlines()[0].rstrip().split('\\t'))\n table[0] = table[0][1:]\n filehandle.close()\n\n #total number of aligned reads\n tot_reads = param['bam_qc']['unique_aligned_reads']\n counter = [0] * len(param['bam_qc']['unique_aligned_reads'])\n \n filehandle = open(featurecount_file)\n for line in filehandle.readlines()[1:]:\n cur_line = line.rstrip().split('\\t')\n cur_line[0] = re.sub(r'_',' ',cur_line[0])\n if cur_line[0] not in ['Unassigned MultiMapping','Assigned']:\n counter = [ct + int(cr) for ct, cr in zip(counter, cur_line[1:])]\n perc = ([cur_line[0]]+\n MODULE_HELPER.get_percentage(cur_line[1:],\n tot_reads,\n len(cur_line)-1))\n table.append(perc)\n filehandle.close()\n assigned = [tot_reads[idx] - counter[idx] for idx in range(len(tot_reads))]\n perc = ['Assigned'] + MODULE_HELPER.get_percentage(assigned,\n tot_reads,\n len(counter))\n return table",
"def _summary(in_file):\n data = Counter()\n out_file = in_file + \"_size_stats\"\n if file_exists(out_file):\n return out_file\n with open(in_file) as in_handle:\n for line in in_handle:\n counts = int(line.strip().split(\"_x\")[1])\n line = in_handle.next()\n l = len(line.strip())\n in_handle.next()\n in_handle.next()\n data[l] += counts\n with file_transaction(out_file) as tx_out_file:\n with open(tx_out_file, 'w') as out_handle:\n for l, c in data.items():\n out_handle.write(\"%s %s\\n\" % (l, c))\n return out_file",
"def numberFiles(self):\n return self.n",
"def resultCounter(detections):\n counter = 0\n for attribute, value in classIterator(detections):\n if 'crease' in attribute:\n counter += len(value)\n return counter",
"def numberFiles(self):\n with open(self.inputfile) as fin:\n for n, _ in enumerate(fin, start=1): pass\n self.n = n\n return self.n",
"def finalize(param, input_files='count_files'):\n\n import csv\n HELPER.writeLog('Collecting featureCount raw counts ... \\n', param)\n\n #check which of these files are actually available\n working_files = [iFile for iFile in param[input_files] if iFile != '']\n\n if len(working_files) > 0:\n #get feature ID using the first column in the first file in the list of working files\n csv_file = open(working_files[0])\n csv_reader = csv.reader(csv_file, delimiter='\\t')\n\n #For featureCount output, we want to skip the first two lines as they\n #include the featureCount call and the headers which we don't want\n next(csv_reader, None)\n next(csv_reader, None)\n\n #Now start by taking the list of identifier,\n #which is the first column in the file\n counts = [row[0] for row in csv_reader]\n csv_file.close()\n\n #get all the expression values\n header = 'ID'\n for idx in range(param['num_samples']):\n if param[input_files] != '':\n header = header+'\\t'+param['stub'][idx]\n csv_file = open(param[input_files][idx])\n csv_reader = csv.reader(csv_file, delimiter='\\t')\n\n #Here too we want to skip the first two lines, before getting the counts\n next(csv_reader, None)\n next(csv_reader, None)\n #Now start getting the counts (row[6]) and add in the ID (counts[i]) before it\n idx = 0\n for row in csv_reader:\n counts[idx] = counts[idx]+'\\t'+row[6]\n idx += 1\n csv_file.close()\n\n #output the file\n out_file = param['working_dir']+'deliverables/featureCount_raw_counts.txt'\n out_handle = open(out_file, 'w')\n out_handle.write(header+'\\n')\n\n for i in range(len(counts)):\n out_handle.write(counts[i]+'\\n')\n out_handle.close()\n\n #output_phenotype_file\n HELPER.writeLog('Writing phenotype data ... \\n', param)\n MODULE_HELPER.output_sample_info(param)\n\n #write summary stats\n #featureCount does this on its own so we can just fetch each summary file\n #check which of these files are actually available\n working_files = [iFile+'.summary' for iFile in param[input_files] if iFile != '']\n\n if len(working_files) > 0:\n #get Status column from summary file using the first column in\n #the first file in the list of working files\n csv_file = open(working_files[0])\n csv_reader = csv.reader(csv_file, delimiter='\\t')\n #Here, we want to skip the first line, as it simply points to the\n #alignment file used when running featureCount\n next(csv_reader, None)\n\n #Now start by taking the list of identifier,\n #which is the first column in the file\n entry = [row[0] for row in csv_reader]\n csv_file.close()\n\n #get all the summary stats for each sample\n header = 'Status'\n\n for idx in range(param['num_samples']):\n if param[input_files] != '':\n header = header+'\\t'+param['stub'][idx]\n #Fetch the corresponding sample's summary file\n csv_file = open(param[input_files][idx]+'.summary')\n csv_reader = csv.reader(csv_file, delimiter='\\t')\n #Again, we want to skip the first line, as it simply points\n #to the alignment file used when running featureCount\n next(csv_reader, None)\n\n #Now start getting the stats (row[1]) and add in the Status\n # (counts[i]) before it\n i = 0\n for row in csv_reader:\n entry[i] = entry[i]+'\\t'+row[1]\n i += 1\n csv_file.close()\n #output the file\n out_handle = open(param['working_dir']+\n 'results/featureCount/featureCount_stats.txt',\n 'w')\n out_handle.write(header+'\\n')\n\n for i in range(len(entry)):\n out_handle.write(entry[i]+'\\n')\n out_handle.close()\n else:\n print 'featureCount was not run successfully on any of the files..\\n'",
"def tally_stats(hdf5_file):\n Stat = namedtuple('Stat', ['cr_count',\n 'img_count',\n 'total_exptime'])\n\n with h5py.File(hdf5_file,mode='r') as f:\n instr = list(f.keys())[0]\n print(instr)\n grp = f['/{}/sizes'.format(instr)]\n num_images = 0\n num_cr = 0\n total_exptime = 0\n for key in grp.keys():\n dset = grp[key][...]\n attrs = grp[key].attrs\n # print(list(attrs.items()))\n num_cr += dset.shape[1]\n num_images += 1\n total_exptime += attrs['exptime']\n\n result = Stat(cr_count=num_cr,\n img_count=num_images,\n total_exptime=total_exptime)\n\n return instr, result",
"def count_values_for_attribute(local_data, attr, attr_value):\n\tcounter = 0\n\tfor element in local_data:\n\t\tif element[attr] == attr_value:\n\t\t\tcounter += 1\n\tif debug == 1:\n\t\tprint(debug_prefix + \"Counted\", counter, attr_value, \"values for\", attr, \"attribute.\" + debug_postfix)\n\treturn counter",
"def osm_general_stats(osm_file):\n stats = {\n 'element_types': {},\n 'attributes': {},\n 'tag_keys': {},\n }\n for event, element in ET.iterparse(osm_file, events=(\"start\",)):\n # Element types\n if element.tag not in stats['element_types']:\n stats['element_types'][element.tag] = 0\n stats['element_types'][element.tag] += 1\n\n # Attributes\n for attrib in element.attrib:\n if attrib not in stats['attributes']:\n stats['attributes'][attrib] = 0\n stats['attributes'][attrib] += 1\n\n # Tag keys\n if element.tag == \"tag\":\n if element.attrib['k'] not in stats['tag_keys']:\n stats['tag_keys'][element.attrib['k']] = 0\n stats['tag_keys'][element.attrib['k']] += 1\n return stats",
"def n_total_files(self):\n return len(self.fileinfo)",
"def tag_counts (count_file):\r\n tagcounts = defaultdict(int)\r\n f = open(count_file, 'r')\r\n for line in f:\r\n fields = line.split()\r\n if fields[1] != 'WORDTAG':\r\n continue\r\n count = int(fields[0])\r\n tag = fields[2]\r\n tagcounts[tag] += count \r\n f.close() \r\n return tagcounts",
"def updateCounts(self):\n found = False\n fileName = \"counts\"\n if not os.access(fileName, os.F_OK):\n try:\n TFH = open(fileName, \"w\")\n TFH.close()\n except IOError as inst: # @UnusedVariable\n self.logIt(__name__ + \".updateCounts(): Unable to open \" + fileName + \" for write.\" + \" => \" + str(\n inst.errno) + \":\" + str(inst.strerror) + \"\\n\")\n raise\n\n self.logIt(__name__ + \".updateCounts(): fileName=\" + fileName + \"\\n\")\n try:\n FH = open(fileName, \"rb+\")\n # FH = posixfile.open(fileName, \"rb+\") # posixfile has been deprecated.\n # FH.lock('w|')\n data = None\n while 1:\n data = str(FH.readline())\n if data is None or data == \"\": break\n data = re.sub(\"\\n\", \"\", data)\n self.debug(__name__ + \".updateCounts(): data is \" + str(data) + \"\\n\")\n ms = str(self.msgNum) + \"=\"\n self.debug(__name__ + \".updateCounts(): ms is\" + str(ms) + \"\\n\")\n if re.search(ms, data):\n found = True\n self.debug(__name__ + \".updateCounts(): DEBUG0.5\\n\")\n break\n self.debug(__name__ + \".updateCounts(): DEBUG1\\n\")\n if data and found:\n self.debug(__name__ + \".updateCounts(): DEBUG2\\n\")\n eloc = FH.tell()\n self.debug(__name__ + \".updateCounts(): eloc=\" + str(eloc) + \"\\n\")\n sloc = eloc - len(data) - 1\n self.debug(__name__ + \".updateCounts(): sloc=\" + str(sloc) + \"\\n\")\n FH.seek(sloc, os.SEEK_SET)\n cloc = FH.tell()\n self.debug(__name__ + \".updateCounts(): cloc=\" + str(cloc) + \"\\n\")\n myList = list()\n myList = data.split('=')\n icount = int(myList[1]) + 1\n FH.write(str(self.msgNum) + \"=\" + str(icount) + \"\\n\")\n else:\n self.debug(__name__ + \".updateCounts(): DEBUG3\\n\")\n FH.write(str(self.msgNum) + \"=1\" + \"\\n\")\n FH.lock('u')\n FH.close()\n except IOError as inst: # @UnusedVariable\n pass\n # self.logIt( __name__ + \".updateCounts(): Unable to open \" + fileName + \" for write.\" + \" => \" + str( inst.errno ) + \":\" + str( inst.strerror ) + \"\\n\" )\n # Endtry",
"def process_info(info):\n\n global graf\n for tup in info:\n info.remove(tup)\n for tup2 in info:\n if tup[0] == tup2[0]:\n info.remove(tup2)\n try:\n graf[tup[1]][tup2[1]] += 1\n except KeyError:\n try: \n graf[tup2[1]][tup[1]] += 1\n except KeyError:\n graf[tup[1]][tup2[1]] = 1",
"def file_stat(self, file_path):",
"def getNumStatDataFiles(self):\n return self.nStatDataFiles",
"def find_n(self):\n metadata_files = [\n file for file in self.cfg[\"input_files\"]\n if \"tas/metadata.yml\" in file\n ]\n self.cfg[\"N\"] = {}\n for meta_file in metadata_files:\n n_identifyer = meta_file.split(\"/tas/\")[0].split(\"/tas_\")[-1]\n metadata = group_metadata(get_cfg(meta_file).values(), \"dataset\")\n self.cfg[\"N\"][n_identifyer] = len(metadata.keys()) - 1",
"def count_aggregate(self):\n count = FileCount()\n if self._status == FileStatus.EMPTY:\n count.found_empty_file()\n else:\n for expd in self._expected_docstrings:\n if expd.ignore_reason:\n pass # Ignores will be counted in a future version\n elif expd.has_docstring:\n count.found_needed_docstr()\n else:\n count.missed_needed_docstring()\n return count",
"def analyze_files(self):\n num_file = 0\n results = dict()\n try:\n list_files = os.listdir(self.directory)\n except FileNotFoundError:\n raise FileNotFoundError(\"Can't find any file\")\n else:\n for file in list_files: #looping the files in the directly\n num_file += 1\n if file.endswith(\".py\"): # Looking for files that end with .py\n try:\n fp = open(os.path.join(self.directory, file), \"r\")\n except FileNotFoundError:\n raise FileNotFoundError(f\"Can't open file no {num_file}\")\n else:\n with fp:\n c_total = 0 #Total length of Characters for the entire file\n filename = file # Storing the file name\n t_line = 0 # Getting the total number of line\n t_def = 0 #Getting the total number of functions\n t_class = 0 #Getting the total number of classes\n \n for line in fp:\n t_line += 1 # Counting each line\n t_char = len(line) #Length of characters for each line\n n_line = line.strip() # gets rid of white spaces and new lines\n c_total += t_char # adding each total char in line to the pervious total char in line\n if n_line.startswith(\"def \"): \n t_def += 1 \n elif n_line.startswith(\"class \"):\n t_class += 1\n results[filename] = {'class': t_class, 'function': t_def, 'line': t_line, 'char': c_total }\n return results",
"def count_files(self):\n self.file_count = 0\n self.count_files_loop(self.dirpath)\n return"
] |
[
"0.6886276",
"0.6738042",
"0.6660266",
"0.6429914",
"0.64092374",
"0.62989074",
"0.628416",
"0.6216311",
"0.5990448",
"0.5982315",
"0.588425",
"0.58645785",
"0.5762811",
"0.56903577",
"0.5662114",
"0.5643657",
"0.56194276",
"0.5618649",
"0.5595091",
"0.55841815",
"0.557454",
"0.5542427",
"0.5515631",
"0.5515056",
"0.5487995",
"0.5479221",
"0.54631037",
"0.54140055",
"0.5391298",
"0.5385891"
] |
0.7681641
|
0
|
Dump the fileinfos to a JSON file.
|
def dump_to_json(fileinfos, out):
jsonarray = json.dumps(fileinfos)
json_filename = "all_elements_used.json"
text_file = open(os.path.join(out,out_dir_name,json_filename), "w")
text_file.write(jsonarray)
text_file.close()
stdout.write("... "+json_filename+" created\n")
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def SaveJSON(self, filename):\n data = {\n 'files': self._files,\n 'ebuilds': self._ebuilds,\n }\n json.dump(data, open(filename, 'w'))",
"def _dumpJson(self, data, file):\n name, ext = os.path.splitext(file)\n tempFile = \"{0}.tmp\".format(name)\n with open(tempFile, \"w\") as f:\n json.dump(data, f, indent=4)\n shutil.copyfile(tempFile, file)\n os.remove(tempFile)",
"def save_info(self):\n json.dump(self.info, open(os.path.join(self.dstore_dir, \"info.json\"), \"w\"),\n sort_keys=True, indent=4, ensure_ascii=False)",
"def to_file(self, file_name: str) -> None:\n\n with open(file_name, 'w') as fi:\n json.dump(self.to_dict(), fi, indent=1)",
"def to_json(self, filename, indent=2):\n d = self.to_dicts()\n if hasattr(filename, \"write\"):\n json.dump(d, filename, indent=indent)\n elif isinstance(filename, str):\n with open(os.path.expanduser(filename), \"w\") as f:\n json.dump(d, f, indent=indent)\n else:\n raise ValueError(\"Unknown filename or file-object\")",
"def to_json(self, filepath=None):\n obj = {'history': self.resp,\n 'details': {k: v.to_json() for k, v in self.details.items()}\n }\n if filepath is None:\n return json.dumps(obj)\n else:\n with open(filepath) as f:\n json.dump(obj, f)",
"def dump_json(object, filename):\n import json\n\n filename = filename if filename.endswith('.json') else (filename + '.json')\n\n with open(filename, 'w') as f:\n json.dump(object, f, indent=4)",
"def writeToJson(inputObj,fileLoc):\n myFile = open(fileLoc,'w')\n json.dump(inputObj, myFile, sort_keys=True, indent=4, separators=(',', ': '))",
"def write_json_file(self, fname, content):\n pass",
"def write_json_file(self, fname, content):\n pass",
"def export_json(self, verbosejson=False):\n outputfile = tkinter.filedialog.asksaveasfilename(\n defaultextension=\".json\",\n filetypes=((\"javascript object notation\", \"*.json\"),\n (\"All Files\", \"*.*\")))\n if outputfile:\n joutdict = {}\n joutdict['NMEA Stats'] = self.tabs.window.nmeatracker.nmea_stats()\n joutdict['AIS Stats'] = self.tabs.window.aistracker.tracker_stats()\n joutdict['AIS Stations'] = self.tabs.window.aistracker. \\\n all_station_info(verbose=verbosejson)\n export.write_json_file(joutdict, outputfile)\n else:\n raise ExportAborted('Export cancelled by user.')",
"def _dump(self):\n with open('weibo_dumps.json', 'w+', encoding='utf-8') as f:\n json.dump(self._marshaller.result, f, ensure_ascii=False)",
"def create_json_file(self,file_name):\n with open('saves/' + file_name + '.json', 'w') as fp:\n json.dump(self.options, fp, indent=4)",
"def save(self):\n\n with open(FileStorage.__file_path, \"w\") as file:\n dictionary = {}\n for a, b in FileStorage.__objects.items():\n dictionary[a] = b.to_dict()\n ink = json.dumps(dictionary)\n file.write(ink)",
"def to_json(self, fname):\n fname = enforce_extension(fname, \".json\")\n write_json(self.as_dict, fname)",
"def save_info_to_file(filepath, tokens):\n with open(filepath, 'w') as f:\n json.dump(tokens, f)",
"def save_to_file(data):\n\ttry:\n\t\toutput_file = open(\"output.json\", \"w\")\n\t\toutput_file.write(json.dumps(data))\n\texcept:\n\t print(Fore.GREEN + \"File not found or path is incorrect\")\n\tfinally:\n\t print(Fore.GREEN + \"Success go to output.json to look at the json\")",
"def dump_json(self, json_path_fn, json_data_dic):\n with open(json_path_fn, \"w\") as hf_json:\n json.dump(\n json_data_dic,\n hf_json,\n sort_keys=self.json_dump_sort_key,\n indent=self.json_dump_indent_val,\n )",
"def save_data_file(self):\n with open(self.files['data'], 'w') as outfile:\n outfile.write(self.to_json())\n outfile.close()",
"def _write_json(self):\n with open(self._file_path, 'w') as f:\n json.dump(self._content, f, indent=4, separators=None,\n encoding='utf-8', sort_keys=False)",
"def _save(self):\n with open(self.file_path, 'w') as fid:\n json.dump(self.data, fid, indent=4, sort_keys=True)",
"def _dumpJson(data, file):\n name, ext = os.path.splitext(file)\n tempFile = \"{0}.tmp\".format(name)\n with open(tempFile, \"w\") as f:\n json.dump(data, f, indent=4)\n shutil.copyfile(tempFile, file)\n os.remove(tempFile)",
"def to_json(self, fpath):\n import json\n with open(fpath, 'w') as fp:\n json.dump(self.to_dict(), fp)",
"def dict_2_json(obj, filename):\n\twith open('data/output/' + filename, 'w') as fp:\n\t\tjson.dump(obj, fp, indent=4)",
"def create_json(self):\n data = {\"image_id\": self.ids, \"img_path\": self.img_paths, \"bg\": self.bgs}\n if hasattr(self, \"bbox\"):\n data[\"bbox\"] = self.bbox\n if hasattr(self, \"masks\"):\n data[\"masks\"] = self.masks\n with open(f\"{self.save_path}{self.name}/json/images_info.json\", \"w\") as f:\n json.dump(data, f)",
"def dump(self, outdir):\n quantille_data = dict(((key, quantille.dumps()) for key, quantille in self.__data.items()))\n with open(os.path.join(outdir, self.__filename), \"wt\") as outfile:\n json.dump((quantille_data, self.__keys, self.__value_keynames), outfile)",
"def to_json(obj: ConfiguredBaseModel, file: str):\n if file:\n with open(file, \"w\") as f:\n f.write(obj.json(indent=4))\n console.print(f\"\\nOutput written to {file}\\n\")\n else:\n print_json(obj.json(indent=4))",
"def to_file(self, fp):\n dict_ = self.serialize()\n with open_file(fp, mode='w') as writer:\n json.dump(dict_, writer, indent=2)",
"def save(self):\n d1 = {}\n with open(self.__file_path, mode=\"w\") as f:\n for k, v in self.__objects.items():\n d1[k] = v.to_dict()\n json.dump(d1, f)",
"def dump_json_to_file(dictionary, file_name):\n if file_name is not None:\n IOUtils.mkdirf(file_name)\n with open(file_name, 'w') as file_obj:\n json.dump(dictionary, file_obj, indent=4)"
] |
[
"0.69172674",
"0.66837215",
"0.66653365",
"0.66187596",
"0.65824217",
"0.6502465",
"0.6499544",
"0.64916843",
"0.64843327",
"0.64843327",
"0.6442699",
"0.6432443",
"0.6427955",
"0.64231676",
"0.6397462",
"0.6368988",
"0.63653654",
"0.6357841",
"0.63443846",
"0.6318794",
"0.6318202",
"0.6303027",
"0.6300088",
"0.6295722",
"0.629254",
"0.625522",
"0.6231382",
"0.6205396",
"0.61979306",
"0.6187053"
] |
0.7570784
|
0
|
Dump the fileinfos to a CSV file.
|
def dump_to_csv(fileinfos, out, all_el_names, all_att_names):
uni_el_names = unique_names(all_el_names)
uni_att_names = unique_names(all_att_names)
att_names_prefixed = ["@%s" % item for item in uni_att_names]
csv_filename = "all_elements_used.csv"
# transform information from dictionary to CSV format
with open(os.path.join(out,out_dir_name,csv_filename), "w") as fout:
fout.write("," + ",".join(uni_el_names) + "," + ",".join(att_names_prefixed) + "\n")
for key in fileinfos:
el_str = ""
for eln in uni_el_names:
if (eln in fileinfos[key]["usage_el"]):
el_str += "," + str(fileinfos[key]["usage_el"][eln])
else:
el_str += ",0"
att_str = ""
for attn in uni_att_names:
if (attn in fileinfos[key]["usage_att"]):
att_str += "," + str(fileinfos[key]["usage_att"][attn])
else:
att_str += ",0"
fout.write(key + el_str + att_str + "\n")
stdout.write("... "+csv_filename+" created\n")
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def save_csv(self, filename): # DONE\n self.data.to_csv(filename)",
"def write_to_csv_backup(api_info):\n\n rows = build_csv_write(api_info)\n\n with open(\"DbDynamicInfoBackUp.csv\", \"a\") as csv_file:\n writer = csv.writer(csv_file)\n for i in rows:\n writer.writerow(i)",
"def info2csv(df, csv_path):\n df.to_csv(csv_path, index=False)",
"def file(self):\n result = []\n completePath = CompletePath(self.path, self.filename) \n with open(completePath.path(), 'w', newline='') as csvfile:\n fieldnames = ['Activity', 'Points']\n writer = csv.DictWriter(csvfile, fieldnames = fieldnames)\n writer.writeheader()\n for i in range ( len( self.groupPriority.rows() ) ):\n tmp = self.groupPriority.rows()[i]\n self.log.info ( \"FinalCSV\", \"file\",\"data {0},{1}\".format( tmp.activity(), tmp.points() ) )\n writer.writerow({'Activity': tmp.activity(), 'Points': tmp.points()})\n self.log.info(\"FinalCSV\", \"file\", \"Elaborated file: {0}\".format ( completePath.path() ) )",
"def export_csv(self):\n outputfile = tkinter.filedialog.asksaveasfilename(\n defaultextension=\".csv\",\n filetypes=((\"comma seperated values\", \"*.csv\"),\n (\"All Files\", \"*.*\")))\n if outputfile:\n tabledata = self.tabs.window.aistracker.create_table_data()\n export.write_csv_file(tabledata, outputfile)\n else:\n raise ExportAborted('Export cancelled by user.')",
"def write_to_file(self, results):\n with open(self.outputFilename, \"w\") as csvFile:\n csvWriter = csv.writer(csvFile, delimiter=',') \n title_row = ('asset_id', 'component_id', 'latitude', 'longitude', 'installation_date', 'commissioning_date', 'street_name', 'cabinet_id', 'nominal_wattage', 'current_time', 'current_LogValue', 'current_IsLogValueOff') \n csvWriter.writerow(title_row)\n for record in results:\n csvWriter.writerow(record)",
"def save_entries(self):\n with open(self.file_name, \"w\") as file:\n file.write('date,name,minutes,note\\n')\n for entry in self.entries:\n writer = csv.writer(file)\n writer.writerow([entry.date, entry.name, entry.minutes, entry.note])",
"def to_csv(self, filename):\n self.data.to_csv(filename)",
"def to_csv(self, filename):\n self.data.to_csv(filename)",
"def save_file(self):\n # paginate over deputies and senators getting their fields\n fieldnames = set([])\n congressmen = self.deputies + self.senators\n for data in congressmen:\n fieldnames = fieldnames.union(data.dump().keys())\n\n\n with open(IDENTITY_FILE_UPDATED, 'a') as csvfile:\n writer = csv.DictWriter(csvfile, fieldnames=list(fieldnames), delimiter=';')\n writer.writeheader()\n\n for data in congressmen:\n writer.writerow(data.dump())",
"def saveCSV(self):\n filename=tkFileDialog.asksaveasfilename(defaultextension='.csv',\n initialdir=os.getcwd(),\n filetypes=[(\"csv\",\"*.csv\"),(\"All files\",\"*.*\")])\n if not filename:\n return\n for m in self.matrices:\n matrix = self.matrices[m] \n if matrix != None: \n c=matrix.csvRepresentation()\n f=open(filename,'w')\n f.write(c)\n f.close()\n return",
"def write_file(poet, info_dict):\r\n\r\n filename = SAVE_PATH + '/' + poet + '/' + str(info_dict['id']) + '_'+ str(info_dict['pagenum']) \\\r\n + '_' + info_dict['id2'] +'_' + info_dict['ord2'] \\\r\n + '_' + info_dict['id3'] + '_' + info_dict['ord3'] \\\r\n + '_' + info_dict['id4'] + '_' + info_dict['ord4'] + '.txt'\r\n\r\n print(filename)\r\n with open(filename, 'w', encoding='utf-16') as f:\r\n txt = ','.join([str(info_dict[k]) for k in KEYS ])\r\n txt = txt + '\\n' + '\\n'.join([x for x in info_dict['beyts']])\r\n f.write(txt)\r\n\r\n\r\n locale.setlocale(locale.LC_ALL, '')\r\n DELIMITER = ';'# if locale.localeconv()['decimal_point'] == ',' else ','\r\n\r\n list_of_lists = [[info_dict[k] for k in KEYS]]\r\n with open('D:/poem/molana.csv', 'a', newline='', encoding='utf-16') as csvfile:\r\n\r\n writer = csv.writer(csvfile, delimiter=DELIMITER)\r\n writer.writerows(list_of_lists)",
"def write_into_csv(self, loc_details=[], itype='atm', mode='w'): \n \n if itype==\"brc\":\n csvfile_name = self.branch_file\n headers = self.branch_headers\n else:\n csvfile_name = self.atm_file\n headers = self.atm_headers\n\n with open(csvfile_name, mode, newline='') as csvfile:\n locwriter = csv.writer(csvfile, delimiter=',', quoting=csv.QUOTE_ALL)\n if mode=='w':\n locwriter.writerow(headers) \n\n for loc in loc_details:\n locwriter.writerow(loc)",
"def create_csv_file(self):\r\n # Create a new csv-file\r\n with open(self.fname, 'w') as f:\r\n writer = csv.writer(f, dialect='excel')\r\n writer.writerow(['set_time',\r\n 'read_time_P_ac',\r\n 'read_time_P_bat',\r\n 'soc',\r\n 'set_value',\r\n 'P_ac',\r\n 'P_bat'])",
"def to_csv(self, filename, **kwargs):\n self.data.to_csv(filename, **kwargs)",
"def save_values(self):\n f_name = self.img_path.split('.')[0] + '_{}_'.\\\n format(self.data_type_name) + '.csv'\n dir_name = os.path.join(self.base_dir, f_name)\n if not os.path.exists(dir_name):\n for data_list in self.converted_values():\n with open(f_name, 'a') as f:\n wr = csv.writer(f, delimiter=';')\n wr.writerow(data_list)\n else:\n os.remove(f_name)\n for data_list in self.converted_values():\n with open(f_name, 'a') as f:\n wr = csv.writer(f, delimiter=';')\n wr.writerow(data_list)",
"def export_to_csv(self, outputfilepath, separator):\n\n con = fdb.connect(\n database=self.db_filepath,\n # dsn='localhost:~/test/CGI.vvv', #localhost:3050\n user='sysdba', password='masterkey'\n #charset='UTF8' # specify a character set for the connection\n )\n cur = con.cursor()\n statement = \"select * from FILES\"\n cur.execute(statement)\n # Retrieve all rows as a sequence and print that sequence:\n print(cur.fetchall())\n\n # VVV export format: Volume,Path,Name,Size,Ext,Last modified,Description\n\n with open(outputfilepath, 'w') as csvfile:\n writer = csv.writer(csvfile)\n writer.writerow([ i[0] for i in cur.description ]) \n writer.writerows(cur.fetchall())",
"def write_output(self):\n with open(self.filename, 'a', newline='', encoding='utf-8') as \\\n csv_file:\n csv_writer = csv.writer(csv_file)\n if os.stat(self.filename).st_size == 0:\n # if the csv file needs a headers\n csv_writer.writerow(Configurations.header)\n for quote in self.quotes_objects:\n csv_writer.writerow(quote.info)",
"def save_csv_file():\n global output_on_display, import_lst, column_names, data\n if data_base == '':\n mistake_load_table()\n else:\n column_names = data[0]\n save_name = asksaveasfilename(title=\"Select file\", filetypes=((\"CSV\", \"*.csv\"), (\"all files\", \"*.*\")),\n confirmoverwrite=True, defaultextension='.csv')\n step = len(column_names)\n data_csv = import_lst\n if len(data_csv[0]) == step:\n pass\n else:\n data_csv = import_lst[step::]\n\n with open(save_name, 'w+') as csv_file:\n csv_writer = csv.writer(csv_file)\n csv_writer.writerow(column_names)\n csv_writer.writerows(data_csv)",
"def dump_all_binaries_to_CSV():\n ## TODO\n timenow = datetime.now()",
"def _write_csv(self):\n\n # add the label to the header\n if self.input_data.get_value(InputType.TIME_PERIOD) == 'all':\n self.header.append('Date')\n else:\n self.header.append('sample id')\n\n key_list = []\n\n for i, cube in enumerate(self.cube_list):\n if self.input_data.get_value(InputType.TIME_PERIOD) == 'all':\n self._write_sample_with_date(cube, i, key_list)\n else:\n self._write_sample(cube, i, key_list)\n\n output_data_file_path = self._get_full_file_name()\n self._write_data_dict(output_data_file_path, key_list)\n\n return [output_data_file_path]",
"def generate_csv(inf, outf):\n o = csv.writer(outf)\n o.writerow(COLUMNS)\n for row in reformat_data(inf):\n o.writerow([inf.name] + row)",
"def __create_csv(self):\n with open(self.__csv_file_name, 'w', newline='', encoding='utf-8') as csv_file:\n writer = csv.DictWriter(csv_file, fieldnames=self.__csv_fields, delimiter=';')\n writer.writeheader()",
"def guarda_archivos_csv(lista_archivos, nom_arch):\n da = open(nom_arch, \"w\")\n csv_writer = csv.writer(da)\n for arch in lista_archivos:\n fila = [arch[\"nombre\"], arch[\"ext\"], arch[\"peso\"], arch[\"fecha\"]]\n csv_writer.writerow(fila)\n da.close()",
"def setup_csv(self) -> None:\n csvData = ['Followers', 'Time']\n\n # Create our CSV file header\n with open(self.graphfile, 'w') as csvFile:\n writer = csv.writer(csvFile)\n writer.writerow(csvData)\n csvFile.close()",
"def create_csv(self):\n try:\n # Convert List of Lists to DataFrame and write it to a CSV\n pd.DataFrame(self.data, columns=self.header) \\\n .to_csv(os.path.join(self.file_path, self.file_name), index=False)\n self.successful_run = True\n except:\n # TODO create Exception Handling\n raise",
"def write_to_file(info: List[str]) -> None:\n return",
"def export_csv(self, csvfileobject):\n for index, track in enumerate(self._tracks):\n csvfileobject.writerow(track.properties)\n for delta in track.periods: \n csvfileobject.writerow(delta.properties)",
"def export_fallout(): \n with open('fallout.csv', 'w', newline='') as csvfile:\n wr = csv.writer(csvfile, delimiter=',')\n wr.writerows(environment)",
"def csv_file(data,output_dir,filename,order = [],head = True):\n with open(output_dir + filename + '.csv', 'w') as f:\n write = csv.writer(f)\n write.writerows(manip.dic_to_list(data,order,head),)\n return None"
] |
[
"0.6587749",
"0.6567262",
"0.6516401",
"0.64488095",
"0.6329177",
"0.6303232",
"0.62775934",
"0.6203434",
"0.6203434",
"0.62002873",
"0.6188982",
"0.6188288",
"0.6174277",
"0.6162279",
"0.61549187",
"0.6153997",
"0.6153394",
"0.6148572",
"0.6131395",
"0.6130641",
"0.611053",
"0.6094073",
"0.6085405",
"0.6076464",
"0.6076203",
"0.607584",
"0.603775",
"0.60330266",
"0.601673",
"0.6014595"
] |
0.67741257
|
0
|
Checks whether the collection and XML files can be found.
|
def check_paths(coll_path, out):
pathpattern = os.path.join(coll_path,"*.xml")
try:
if not os.path.exists(coll_path):
raise ValueError("Error: The collection could not be found.")
except ValueError as err:
print(err)
exit(1)
try:
if not os.path.exists(out):
raise ValueError("Error: The output directory could not be found.")
except ValueError as err:
print(err)
exit(1)
list_of_filenames = glob.glob(pathpattern)
try:
if not list_of_filenames:
raise ValueError("Error: No XML file was found in the collection.")
except ValueError as err:
print(err)
exit(1)
return list_of_filenames
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def _check_integrity(self):\n root = self.root\n for scene_name in self.scene_list:\n if not(os.path.isdir(os.path.join(root,scene_name)) and \n os.path.isdir(os.path.join(root,scene_name, images_dir)) and\n os.path.isfile(os.path.join(root,scene_name,annotation_filename))):\n return False\n return True",
"def _check_integrity(self):\n root = self.root\n for scene_name in self.scene_list:\n if not(os.path.isdir(os.path.join(root,scene_name)) and \n os.path.isdir(os.path.join(root,scene_name, images_dir)) and\n os.path.isfile(os.path.join(root,scene_name,annotation_filename))):\n return False\n return True",
"def valid(self):\r\n if self.dir_exists and self.files_exist:\r\n return True\r\n else:\r\n return False",
"def init_check(self):\n for required_file in self._required_files:\n # Check if required files are there\n # FIXME Sometimes it doesn't work :?\n if required_file not in self.files:\n self.valid = False",
"def has_file(self, doc):\n return len(doc.package.files) != 0",
"def _verify(self) -> None:\n # Check if the files already exist\n if os.path.exists(os.path.join(self.root, self.image_root)):\n return\n\n # Check if .zip files already exists (if so extract)\n exists = []\n for filename, md5 in zip(self.filenames, self.md5s):\n filepath = os.path.join(self.root, filename)\n if os.path.isfile(filepath):\n if self.checksum and not check_integrity(filepath, md5):\n raise RuntimeError(\"Dataset found, but corrupted.\")\n exists.append(True)\n extract_archive(filepath)\n else:\n exists.append(False)\n\n if all(exists):\n return\n\n # Check if the user requested to download the dataset\n raise RuntimeError(\n \"Dataset not found in `root` directory, either specify a different\"\n + \" `root` directory or manually download the dataset to this directory.\"\n )",
"def check_helpers(self):\n paths = self.get_helper_out_paths()\n\n for p in paths:\n full_path = p + \".data-00000-of-00001\"\n file = Path(full_path)\n if not file.exists():\n return False\n\n return True",
"def _check_file_not_used(self):\n module_files = set(self._get_module_files())\n referenced_files = set(self._get_manifest_referenced_files()).union(\n set(self._get_xml_referenced_files())\n )\n excluded_dirs = ['static', 'test', 'tests', 'migrations']\n no_referenced_files = [\n f for f in (module_files - referenced_files)\n if f.split(os.path.sep)[0] not in excluded_dirs\n ]\n self.msg_args = no_referenced_files\n return not no_referenced_files",
"def check_missing_files(self):\n files = [getattr(self, attr) for attr in self._required]\n try:\n utilities.check_missing_files(files)\n except utilities.MissingConstraintError as err:\n err.message += \"\\nSkipping {}\\n\".format(self.__class__.__name__)\n raise err",
"def exists():\n check50.include(\"data\")\n check50.exists(\"adventure.py\")\n check50.exists(\"room.py\")",
"def assertExists(self):\n for db in self._db_tree:\n assert(db in self._datastore.conn.database_names)\n for collection in db['collections']:\n assert(collection['name'] in self._datastore[db['database']].collection_names())",
"def check(self):\n # validate contents still to do - for now just check if it exists\n return os.path.exists(self.getDefaultDatabaseConnectionParameter()['path'])",
"def _checkIntegrity(self):\n return (\n os.path.isfile(os.path.join(self._root, 'processed/train.pkl'))\n and os.path.isfile(os.path.join(self._root, 'processed/test.pkl')))",
"def __isValidXMLResourcesFolder(self, folder):\n tablesInFolder = filter(lambda f: os.path.isdir(os.path.join(folder, f)),\n os.listdir(folder))\n containedInRequiredTables = map(lambda f: f in self.__requiredTables,tablesInFolder)\n return (True if len(containedInRequiredTables)>0 else False)",
"def _check_before_run(self):\n if not osp.exists(self.dataset_dir):\n raise RuntimeError(\"'{}' is not available\".format(self.dataset_dir))\n if not osp.exists(self.train_dir):\n raise RuntimeError(\"'{}' is not available\".format(self.train_dir))\n if not osp.exists(self.list_query_path):\n raise RuntimeError(\"'{}' is not available\".format(self.list_query_path))\n if not osp.exists(self.list_gallery_path):\n raise RuntimeError(\"'{}' is not available\".format(self.list_gallery_path))",
"def contains_files(self):\n if self.file_list is None:\n self._set_file_list()\n for individual_file in self.file_list:\n if not os.path.exists(os.path.join(self.base_dir, individual_file)):\n return False\n return True",
"def check_comps(root, comps):\n for key, comp in comps.items():\n\n filename = os.path.join(root, comp['filename'])\n if not os.path.isfile(filename):\n warnings.warn(\n 'The file {0} could not be found'.format(filename))",
"def check_file_exist(self):\n return False",
"def self_check(self):\n out = \"Loaded components\\n\"\n for package_name, package in sorted(self.packages.items()):\n out += \"\\t%s:\\n\" % package_name\n for c, fd in sorted(package.components.iteritems()):\n out += \"\\t\\t%s (%s)\\n\" % (c, fd.filename)\n\n LOG.info(out)\n\n for p in self.packages.values():\n for f in p.files:\n for id in f.requires:\n # This throws if it doesn't find something.\n try:\n self.get(id)\n except:\n LOG.exception(\"Error in: \" + f.filename)\n raise",
"def check_out_files_exist(self):\n for filetype in self.filetypes:\n filename = self.out_filename(filetype)\n if not filename.is_file():\n log.error('MISSING: {}'.format(filename))\n return False\n\n return True",
"def check_out_files_exist(self):\n for filetype in self.filetypes:\n filename = self.out_filename(filetype)\n if not filename.is_file():\n log.error('MISSING: {}'.format(filename))\n return False\n\n return True",
"def check_out_files_exist(self):\n for filetype in self.filetypes:\n filename = self.out_filename(filetype)\n if not filename.is_file():\n log.error('MISSING: {}'.format(filename))\n return False\n\n return True",
"def exists(self):\n basedir = os.path.dirname(self.path)\n\n for filename in self.files:\n path = os.path.join(basedir, filename)\n if not os.path.exists(path):\n return False\n\n return True",
"def test_check():\n for f in cfg.required_files:\n assert os.path.isfile(f)",
"def check_requirements(self):\n if not os.path.isfile(self.file_path):\n _logger.error(\"File not found\")\n _logger.error(ex)\n raise\n _logger.info(\"File notifier check passed\")",
"def _check_before_run(self):\n if not osp.exists(self.root):\n raise RuntimeError(\"'{}' is not available\".format(self.root))\n if not osp.exists(self.train_name_path):\n raise RuntimeError(\"'{}' is not available\".format(self.train_name_path))\n if not osp.exists(self.test_name_path):\n raise RuntimeError(\"'{}' is not available\".format(self.test_name_path))\n if not osp.exists(self.track_train_info_path):\n raise RuntimeError(\"'{}' is not available\".format(self.track_train_info_path))\n if not osp.exists(self.track_test_info_path):\n raise RuntimeError(\"'{}' is not available\".format(self.track_test_info_path))\n if not osp.exists(self.query_IDX_path):\n raise RuntimeError(\"'{}' is not available\".format(self.query_IDX_path))",
"def check_paths( self ):\n check_a = utility_code.checkDirectoryExistence( self.PATH_TO_SOURCE_FILE_DIRECTORY )\n check_b = utility_code.checkDirectoryExistence( self.PATH_TO_ARCHIVES_ORIGINALS_DIRECTORY )\n check_c = utility_code.checkDirectoryExistence( self.PATH_TO_ARCHIVES_PARSED_DIRECTORY )\n check_d = utility_code.checkDirectoryExistence( self.PATH_TO_PARSED_ANNEX_DATA_DIRECTORY )\n check_e = utility_code.checkDirectoryExistence( self.PATH_TO_PARSED_ANNEX_COUNT_DIRECTORY )\n if check_a == 'exists' and check_b == 'exists' and check_c == 'exists' and check_d == 'exists' and check_e == 'exists':\n log.debug( 'path check passed' )\n else:\n message='path check failed; quitting'\n log.error( message )\n sys.exit( message )\n return",
"def _verify(self) -> None:\n # Check if the extracted files already exist\n pathname = os.path.join(self.root, self.data_dir)\n if os.path.exists(pathname):\n return\n\n # Check if the zip files have already been downloaded\n pathname = os.path.join(self.root, self.data_dir) + \".zip\"\n if os.path.exists(pathname):\n self._extract()\n return\n\n # Check if the user requested to download the dataset\n if not self.download:\n raise RuntimeError(\n f\"Dataset not found in `root={self.root}` and `download=False`, \"\n \"either specify a different `root` directory or use `download=True` \"\n \"to automatically download the dataset.\"\n )\n\n # Download the dataset\n self._download()\n self._extract()",
"def sanity_check(hdf):\n required_paths = ['Analyses', 'UniqueGlobalKey', 'Analyses/EventDetection_000']\n try:\n for p in required_paths:\n if p not in hdf:\n return False\n return True\n except:\n return False",
"def _check_before_run(self):\n if not osp.exists(self.dataset_dir):\n raise RuntimeError(\"'{}' is not available\".format(self.dataset_dir))\n if not osp.exists(self.train_dir):\n raise RuntimeError(\"'{}' is not available\".format(self.train_dir))\n if not osp.exists(self.query_dir):\n raise RuntimeError(\"'{}' is not available\".format(self.query_dir))\n if not osp.exists(self.gallery_dir):\n raise RuntimeError(\"'{}' is not available\".format(self.gallery_dir))"
] |
[
"0.6503214",
"0.6503214",
"0.6393145",
"0.63877565",
"0.6369802",
"0.63557065",
"0.6311064",
"0.6305909",
"0.62576085",
"0.6249468",
"0.6203111",
"0.61695117",
"0.6166657",
"0.61103743",
"0.60590494",
"0.6058801",
"0.6057483",
"0.6051951",
"0.6050533",
"0.6041272",
"0.6041272",
"0.6041272",
"0.603898",
"0.603166",
"0.5977999",
"0.59778434",
"0.59767663",
"0.5958669",
"0.59573525",
"0.5952204"
] |
0.66739774
|
0
|
Tests if the input string is an XML filename
|
def is_filename(name):
test = re.search("[A-Za-z0-9_-]+\.xml$", name)
if test:
return True
else:
return False
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def checkFilename(path):\n ret = libxml2mod.xmlCheckFilename(path)\n return ret",
"def isValidTagName(s):\n if s.lower().startswith(\"xml\"):\n return False\n return re.match(\"[^\\W\\d][\\w\\-_.]*\", s)",
"def isXML(content):\n\n testStr = '<?xml'\n\n # File case.\n if hasattr(content, 'read') and hasattr(content, 'seek'):\n xml = content.read(len(testStr))\n content.seek(0)\n if testStr == xml:\n return True\n\n # String case.\n elif isinstance(content, types.StringTypes):\n if content.startswith(testStr):\n return True\n\n return False",
"def is_filename_safe(value):\n return value == str_to_filename(value)",
"def validate_string(self, xml_string):\n etree_xml = parse(xml_string, allow_file=False)\n return self.validate_etree(etree_xml)",
"def xml_validator(self,xml_string):\r\n\r\n try:\r\n schema = etree.XMLSchema(file=XSD_FILE_PATH)\r\n parser = objectify.makeparser(schema=schema)\r\n objectify.fromstring(xml_string, parser)\r\n print(\"XML file has been validated.\")\r\n return True\r\n except XMLSyntaxError:\r\n #handle exception here\r\n print(\"XML file cannot be validated.\")\r\n return False",
"def _check_is_file(_string: str) -> str:\n if os.path.isfile(_string):\n return _string\n else:\n raise argparse.ArgumentTypeError(\"{0} file does \"\n \"not exists.\".format(_string))",
"def _isvalid_file(filename):\r\n thisisavalidfile = True\r\n if (filename[0] == \".\") or (filename[0] == \"_\") or not ((filename.split(\".\")[-1] == \"txt\") or (filename.split(\".\")[-1] == \"csv\")):\r\n thisisavalidfile = False\r\n\r\n return thisisavalidfile",
"def check_filename(name, fileinfos): \n try:\n if not name in fileinfos.keys():\n raise ValueError(\"Error: The XML file could not be found.\")\n except ValueError as err:\n print(err)\n exit(1)",
"def is_file_i(value):\n if not (type(value) is str and os.path.isfile(value)):\n return False\n else:\n return True",
"def is_valid_filename(self, imageNode):\r\n src = self.parser.getAttribute(imageNode, attr='src')\r\n\r\n if not src:\r\n return False\r\n\r\n if self.badimages_names_re.search(src):\r\n return False\r\n\r\n return True",
"def is_filename_safe(filename: str) -> bool:\n allowed_format = \"|\".join(FileStoreManager.ALL_FORMAT) # png|svg|jpg\n regex = f\"[a-zA-Z0-9][a-zA-Z0-9_()-\\.]*\\.({allowed_format})$\"\n return re.match(regex, filename) is not None",
"def is_file(path_name):\n if re.search(\"\\.[a-zA-Z]+$\", os.path.basename(path_name)):\n return True\n else:\n return False",
"def is_file_o(value):\n if not (type(value) is str and os.path.split(value)[0]):\n return False\n else:\n return True",
"def is_test_filename(filename):\n return 'test' in filename",
"def has_filename(self):\n if self.filename == \"untitled\":\n return False\n else:\n return True",
"def is_file(filename):\n if not os.path.isfile(filename):\n msg = \"{0} is not a file\".format(filename)\n raise argparse.ArgumentTypeError(msg)\n else:\n return filename",
"def _is_file_valid(name: str) -> bool:\n return not name.startswith(\".\")",
"def test_is_check_filename_False(self):\n self.assertFalse(check_filename('sample.txt'))",
"def check_file(file_name):\n\n if isinstance(file_name, str):\n if not os.path.isfile(file_name):\n raise ValueError('Got string input, but it is not a valid path')\n\n # check if this is just an xml file\n with open(file_name, 'rb') as fi:\n initial_bits = fi.read(30)\n if initial_bits.startswith(b'<?xml') or initial_bits.startswith(b'<SICD'):\n sicd_xml = fi.read()\n return _evaluate_xml_string_validity(sicd_xml)[0]\n\n return check_sicd_file(file_name)",
"def is_file(self):\n\n url_path = self.url.split('/')\n if re.match(r\".+\\.\\w+\", url_path[-1]):\n # Find <file_name>.<extension>\n return True\n return False",
"def _is_valid_filename(filename, ext):\n if not isinstance(filename, str):\n return False\n\n if not(ext == filename.split('.')[-1]):\n return False\n fname = os.path.abspath(filename)\n return os.path.isfile(fname)",
"def check_file_name_extensions(self, file_name, input_output):\n file_type = FileTypes ()\n extension_types = file_type.get_extension_types ()\n for extension in extension_types:\n if file_name.endswith (extension):\n if input_output == 'input':\n self._input_file = file_type.get_file_type (extension)\n else:\n self._output_file = file_type.get_file_type (extension)\n return True\n print (\"File name must end with:\")\n for extension in extension_types:\n print (extension)\n return False",
"def test_is_check_filename(self):\n self.assertTrue(check_filename('sample.csv'))",
"def valid_filename(filename):\n if filename in IGNORED_FILES:\n return False\n if not os.path.exists(filename):\n return False\n _, ext = os.path.splitext(filename)\n return ext == '.py'",
"def is_filename_safe(file: Union[str, FileStorage]) -> bool:\n filename = _retrieve_filename(file)\n accepted_formats = '|'.join(IMAGES) #png|jpg|svg|jpeg\n # start with any of alphanumeric, follwed by alhpanumeric and special characters(any amount), \".\", then one of the accepted formats\n regex = f'[a-zA-Z0-9][a-zA-Z0-9_()-\\.]*\\.({accepted_formats})$'\n # if no match is found match() returns None\n return re.match(filename, regex) is not None",
"def verify_filename(filename):\n extension = \".ics\"\n if not filename.endswith(extension):\n filename = filename + extension\n return filename",
"def is_string(document):\r\n return isinstance(document, str)",
"def verify_filename(filename):\n\n if is_fileobj(filename):\n raise ValueError(\"%r not a filename\" % filename)",
"def is_filetype(filename=None, search_str=None):\n if not search_str:\n return False\n results = puremagic.magic_file(filename)\n for result in results:\n if search_str.lower() in result.name.lower():\n return True\n return False"
] |
[
"0.68024284",
"0.6641543",
"0.65921056",
"0.6479503",
"0.63526535",
"0.63460016",
"0.627532",
"0.62090844",
"0.6194135",
"0.6066573",
"0.6058418",
"0.60285604",
"0.59887815",
"0.59805787",
"0.5978015",
"0.5959385",
"0.59314483",
"0.59218484",
"0.58941853",
"0.5881844",
"0.58812815",
"0.5868973",
"0.5866202",
"0.5856762",
"0.58464384",
"0.58158576",
"0.57506084",
"0.57164186",
"0.5702457",
"0.5695369"
] |
0.80211204
|
0
|
Adds subplots to the figure.
|
def add_subplots(fig, els_counted, atts_counted, chart_info, name="", log=False):
if name != "":
if is_filename(name):
# overview of element/attribute usage for a single text
draw_chart(els_counted, fig, chart_info["elements_used_text"], log)
draw_chart(atts_counted, fig, chart_info["attributes_used_text"], log)
elif is_attname(name):
# overview for a specific attribute
draw_chart(atts_counted, fig, chart_info["attribute_used"], log)
else:
# overview for a specific element
draw_chart(els_counted, fig, chart_info["element_used"], log)
else:
# overall overview of element and attribute usage
draw_chart(els_counted, fig, chart_info["elements_used_all"], log)
draw_chart(atts_counted, fig, chart_info["attributes_used_all"], log)
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def add_subplot(gridRows, gridCols, plotNo):\n pl.subplot(gridRows, gridCols, plotNo)",
"def create_four_subplots():\n pass",
"def plot_subplots(self, fig_num: int, title: str, raw: np.ndarray, smoothed: np.ndarray,\n axes_lbl_entries: Sequence[str]) -> matplotlib.figure.Figure:\n fig = plt.figure(fig_num)\n axs = fig.subplots(3, 1, sharex=True)\n raw_lines = marker_graph_init(axs, raw, '', self.frame_nums, color='red')\n for idx, ax in enumerate(axs):\n plot_utils.update_ylabel(ax, axes_lbl_entries[idx], font_size=10)\n smoothed_lines = marker_graph_add(axs, smoothed, self.frame_nums, color='green')\n plt.tight_layout()\n plt.subplots_adjust(top=0.94)\n fig.suptitle(title)\n fig.legend((raw_lines[0], smoothed_lines[0]), ('Raw', 'Smoothed'), ncol=2, handlelength=0.75,\n handletextpad=0.25, columnspacing=0.5, loc='lower left')\n make_interactive()\n return fig",
"def axes_subplots():\n # gerenate data\n x = np.arange(0, 6 * np.pi+0.2, 0.2)\n y_1 = np.cos(x)\n y_2 = np.sin(2*x)\n y_3 = y_1 + y_2\n\n # display multiple\n fig, axs = plt.subplots(3, 1, sharex=True)\n fig.suptitle('Subplots w/ shared axes')\n axs[0].plot(x, y_1)\n axs[1].plot(x, y_2)\n axs[2].plot(x, y_3)\n axs[0].set_ylabel('$y$')\n axs[1].set_ylabel('$y$')\n axs[2].set_ylabel('$y$')\n\n plt.show()\n\n return None",
"def subplots(fig_width=None, fig_height=None, *args, **kwargs):\n fig_width, fig_height = get_width_height(fig_width, fig_height, columns=2)\n fig, axes = plt.subplots(figsize=(fig_width, fig_height), *args, **kwargs)\n return fig, axes",
"def set_subplots(self, names: [str]) -> None:\n self._plots = list()\n self._logger.debug(\"running\")\n if len(names) < 1:\n return\n r = len(names)\n c = 1\n for i in range(0, r):\n self._plots.append((names[i], (r, c, i + 1), True))\n self._logger.debug(\"done\")",
"def plot(self):\n fig, axes = plt.subplots(math.ceil(len(self.plots) / self.col_wrap), self.col_wrap)\n\n for ps, ax in zip(self.plots, axes.flatten()):\n for p in ps:\n if p.x is not None and p.y is not None:\n p.method(x=p.x, y=p.y, *p.args, ax=ax, **p.kwargs)\n else:\n p.method(*p.args, ax=ax, **p.kwargs)\n\n return fig, axes",
"def plot_main(self):\n\n f, axes = plt.subplots(2, 3, figsize=(16, 8))\n self.data_plot(ax=axes[0, 0])\n self.model_plot(ax=axes[0, 1])\n self.normalized_residual_plot(ax=axes[0, 2], v_min=-6, v_max=6)\n self.source_plot(ax=axes[1, 0], convolution=False, deltaPix_source=0.01, numPix=100)\n self.convergence_plot(ax=axes[1, 1], v_max=1)\n self.magnification_plot(ax=axes[1, 2])\n f.tight_layout()\n f.subplots_adjust(left=None, bottom=None, right=None, top=None, wspace=0., hspace=0.05)\n return f, axes",
"def add_subplot(self, plot, i=0, j=0, plot_id=None):\n self.plotlayout.addWidget(plot, i, j)\n self.plots.append(plot)\n if plot_id is None:\n plot_id = id(plot)\n self.manager.add_plot(plot, plot_id)",
"def plot_subplots(self, fig_num: int, title: str, y_label: str, labeled: np.ndarray, filled: np.ndarray,\n smoothed: np.ndarray) -> matplotlib.figure.Figure:\n fig = plt.figure(fig_num)\n axs = fig.subplots(3, 1, sharex=True)\n labeled_lines = marker_graph_init(axs, labeled, y_label, self.frame_nums, color='blue')\n filled_lines = marker_graph_add(axs, filled, self.frame_nums, color='red')\n smoothed_lines = marker_graph_add(axs, smoothed, self.frame_nums, color='green')\n plt.tight_layout()\n fig.suptitle(title)\n fig.legend((labeled_lines[0], filled_lines[0], smoothed_lines[0]), ('Labeled', 'Filled', 'Smoothed'),\n ncol=3, handlelength=0.75, handletextpad=0.25, columnspacing=0.5, loc='upper left')\n make_interactive()\n return fig",
"def __init__(self, subplot_objects):\n self.subplot_objects = subplot_objects",
"def plot_subplots_vel(self, fig_num: int, title: str, y_label: str, vel: np.ndarray) -> matplotlib.figure.Figure:\n fig = plt.figure(fig_num)\n axs = fig.subplots(3, 1, sharex=True)\n marker_graph_init(axs, vel, y_label, self.frame_nums, color='blue')\n plt.tight_layout()\n fig.suptitle(title)\n make_interactive()\n return fig",
"def subplottPNG(self):\n os.chdir(self.mainDir)\n folder = os.listdir(u'.')\n folders = [f for f in folder if f[0] == 'S']\n\n for subject in folders:\n\n try: # go to the 'results' directory\n resultsDir = os.path.join(os.path.join(self.mainDir, subject),'results')\n os.chdir(resultsDir)\n\n # find all files with .png extension\n pngfiles = glob.glob('*.png')\n pngfiles.sort(key = lambda x:x[0])\n pngfiles.sort(key = lambda x:x[1])\n\n fig = plt.figure()\n\n for ii, filename in enumerate(pngfiles):\n f = plt.subplot(4,4,ii+1)\n f.set_axis_off()\n f.set_xlabel('ses:'+str(ii+1))# f.set_figheight(15)\n fig.set_figwidth(30)\n fig.set_figheight(30)\n fig.tight_layout()\n img = matplotlib.image.imread(filename)\n plt.imshow(img)\n\n figname = subject + '_subplot'+ '.png'\n matplotlib.pyplot.savefig(figname)\n\n except Exception as errMessage:\n print(errMessage)",
"def prepare_subplot(fig, point_list, cols, rows, number, lims):\n ax = fig.add_subplot(cols, rows, number)\n ax.scatter(list(map(lambda e: e[0], point_list)), list(map(lambda e: e[1], point_list)), s=9)\n ax.set_xlim(lims[0], lims[1])\n ax.set_ylim(lims[0], lims[1])\n return ax",
"def __init__(self):\n self.fig = pl.figure(1,figsize=(8,6), dpi=80 , frameon = True , facecolor = '0.75' , edgecolor = 'w')\n self.fig.add_subplot(111 , axisbg = 'w' , projection = 'rectilinear') #if you want to add axes on particular place: fig.add_axes([0.15, 0.1, 0.7, 0.3]) where -> [begin , bottom to start axes , width , height ]\n self.separated = True #if we have a list and need to plot the plots separated",
"def subplots(nrows=1, ncols=1, sharex=False, sharey=False, squeeze=True, apportion = None, debug = 0, *args, **kwargs):\n #Note: we use squeeze = False internally, then return axes according to the keyword\n fig, axes = pylab_subplots(nrows=nrows, ncols=ncols, sharex=sharex, sharey=sharey, squeeze=False, *args, **kwargs)\n nrows = len(axes[:,0].flatten())\n # start with even allocation of unity\n fracts = np.ones(nrows)\n # if just one arg, that is the first allocation\n if apportion != None:\n if len(np.shape(apportion)) == 0:\n fracts[0]=apportion\n # fill up the rest\n for (i,a) in enumerate(apportion):\n if i<nrows: fracts[i] = a\n # now make into a fractions\n fracts = fracts/np.sum(fracts)\n\n #loop over axes, bottom to top, extract the space below and the height for each (ignore space above\n above = [] ; height = []\n lasty = 1\n for (i,ax) in enumerate(axes[:,0]):\n bb = ax.get_position().get_points()\n pos = bb.flatten() \n height.append(pos[3]-pos[1])\n above.append(lasty - pos[3] )\n lasty = pos[1]\n\n# loop again, building down from top according to new share, keep margins\n yabove_0 = 1 # the norm. y coord of the bottom of the above graph\n print(above, height)\n for col in range(np.shape(axes)[1]):\n for (i,ax) in enumerate(axes[:,col]):\n if (i==0): yabove = yabove_0\n bb = ax.get_position().get_points()\n pos = bb.flatten() \n # convert to x0,y0, dx, dy form by subtracting origin\n newh = height[i]*fracts[i]*nrows\n\n pos[1] = yabove - newh - above[i]\n pos[3] = newh\n pos[2] = pos[2] - pos[0]\n yabove = pos[1]\n if debug>0: print(pos)\n ax.set_position(pos)\n\n if squeeze: \n if len(np.shape(axes[0]))==0: axes = axes.flatten() \n if len(axes) == 1: axes = axes[0]\n return(fig, axes)",
"def subplot_fit(self):\r\n\r\n self.open_subplot_figure(number_subplots=12)\r\n\r\n self.figures_2d(data=True)\r\n\r\n self.set_title(label=\"Data (Source Scale)\")\r\n self.figures_2d(data=True, use_source_vmax=True)\r\n self.set_title(label=None)\r\n\r\n self.figures_2d(signal_to_noise_map=True)\r\n self.figures_2d(model_image=True)\r\n\r\n self.set_title(label=\"Lens Light Model Image\")\r\n self.figures_2d_of_planes(plane_index=0, model_image=True)\r\n\r\n # If the lens light is not included the subplot index does not increase, so we must manually set it to 4\r\n self.mat_plot_2d.subplot_index = 6\r\n\r\n final_plane_index = len(self.fit.tracer.planes) - 1\r\n\r\n self.mat_plot_2d.cmap.kwargs[\"vmin\"] = 0.0\r\n\r\n self.set_title(label=\"Lens Light Subtracted Image\")\r\n self.figures_2d_of_planes(plane_index=final_plane_index, subtracted_image=True, use_source_vmax=True)\r\n\r\n self.set_title(label=\"Source Model Image (Image Plane)\")\r\n self.figures_2d_of_planes(plane_index=final_plane_index, model_image=True, use_source_vmax=True)\r\n\r\n self.mat_plot_2d.cmap.kwargs.pop(\"vmin\")\r\n\r\n self.set_title(label=\"Source Plane (Zoomed)\")\r\n self.figures_2d_of_planes(plane_index=final_plane_index, plane_image=True, use_source_vmax=True)\r\n\r\n\r\n self.set_title(label=None)\r\n\r\n self.mat_plot_2d.subplot_index = 9\r\n\r\n self.figures_2d(normalized_residual_map=True)\r\n\r\n self.mat_plot_2d.cmap.kwargs[\"vmin\"] = -1.0\r\n self.mat_plot_2d.cmap.kwargs[\"vmax\"] = 1.0\r\n\r\n self.set_title(label=\"Normalized Residual Map (1 sigma)\")\r\n self.figures_2d(normalized_residual_map=True)\r\n self.set_title(label=None)\r\n\r\n self.mat_plot_2d.cmap.kwargs.pop(\"vmin\")\r\n self.mat_plot_2d.cmap.kwargs.pop(\"vmax\")\r\n\r\n self.figures_2d(chi_squared_map=True)\r\n\r\n self.set_title(label=\"Source Plane (No Zoom)\")\r\n self.figures_2d_of_planes(\r\n plane_index=final_plane_index,\r\n plane_image=True,\r\n zoom_to_brightest=False,\r\n use_source_vmax=True\r\n )\r\n\r\n self.set_title(label=None)\r\n\r\n self.mat_plot_2d.output.subplot_to_figure(\r\n auto_filename=\"subplot_fit\"\r\n )\r\n self.close_subplot_figure()",
"def plot_subplots(x_list, y_list, z_list):\n # create a line chart with the average rating of the top movies per year\n # min rating = 0 and max = 10\n plot1 = plt.subplot(211)\n plt.plot(x_list, y_list, color = 'lightseagreen')\n plt.axis([START_YEAR, END_YEAR - 1, 0, 10])\n plt.title('Average IMDB Movie Rating per Year', fontsize=12)\n plt.ylabel('Average Rating')\n plt.grid(True)\n\n # make x ticklabels of plot1 invisible\n plt.setp(plot1.get_xticklabels(), visible=False)\n\n # adjust space between subplots\n plt.subplots_adjust(hspace=0.3)\n\n # create a line chart with the average runtime with shared x-axis\n plot2 = plt.subplot(212, sharex=plot1)\n plt.plot(x_list, z_list, color = 'lightseagreen')\n plt.title('Average IMDB Movie Runtime per Year', fontsize=12)\n plt.ylabel('Average Runtime (min)')\n plt.grid(True)\n\n # define axes, with all years (2008 till 2017) on the x-axis\n # min runtime = 0, max runtime = 180\n plt.axis([START_YEAR, END_YEAR - 1, 0, 180])\n plt.xticks(x_list)\n plt.xlabel('Year')\n\n # plot both the subplots\n plt.show()",
"def _make_subplots(n_plots, max_cols=5, row_height=3, sharex=False, sharey=False):\n n_rows, n_cols = find_pretty_grid(n_plots, max_cols=max_cols)\n fig, axes = plt.subplots(n_rows, n_cols,\n figsize=(4 * n_cols, row_height * n_rows),\n constrained_layout=True,\n sharex=sharex, sharey=sharey)\n # we don't want ravel to fail, this is awkward!\n axes = np.atleast_2d(axes)\n return fig, axes",
"def PlotCollectionAddOneTwoThree(title, frit, ulist, addvalues):\n\tfig = pp.figure( figsize=printsize )\n\tnxpoints = 907\n\tfig.suptitle = \"TEST SUP TITLE\" #title\n\tplotspots = [231,232,233,234,235,236]\n\tfor i,(uvalue,ulabel,xrange,yrange) in enumerate(ulist):\n\t print('making sub-plot for u=%g' % (uvalue,)\t\n\t\tPlotOneSubplot( fig.add_subplot(plotspots[i]), frit, xrange, nxpoints, yrange, uvalue,ulabel, addvalues)\n\t#pp.show() \n\tfig.savefig(title+\".eps\", format='eps', pad_inches=0.02)\n\n\naddvalues = [\n # value to \"add\", color\n\t[ -2, \"#6688ee\" ],\n\t[ -1, \"#66aacc\" ],\n\t[ 0, \"#000000\" ],\n\t[ 1, \"#ee6688\" ],\n\t[ 2, \"#dd9966\" ],\n\t[ 3, \"#bbaa66\" ],",
"def makeQuadSubplots(df_rad_obs, \n df_dir_obs, \n df_rad_sen, \n df_dir_sen, \n suptitle='Big title',\n eps=3, \n min_samples=50):\n fig, axs = plt.subplots(2, 2, \n figsize=(10,10)\n )\n\n fig.suptitle('Clustering Output', fontsize=20)\n\n populateSubPlot(df=df_rad_obs,\n eps=eps, \n min_samples=min_samples,\n fig=fig, \n axs=axs, \n row=0, \n col=0, title='Obsever Wards Radiant')\n\n\n populateSubPlot(df=df_dir_obs, \n eps=eps, \n min_samples=min_samples,\n fig=fig, \n axs=axs, \n row=0, \n col=1, title='Obsever Wards Dire')\n\n\n populateSubPlot(df=df_rad_sen, \n eps=eps, \n min_samples=min_samples,\n fig=fig, \n axs=axs, \n row=1, \n col=0, title='Sentry Wards Radiant')\n\n populateSubPlot(df=df_dir_sen, \n eps=eps, \n min_samples=min_samples,\n fig=fig, \n axs=axs, \n row=1, \n col=1, title='Sentry Wards Dire')\n \n \n return fig, axs",
"def draw(subplots, title=''):\n if type(subplots) == SubPlot:\n subplots = [[subplots]]\n font_size = 16\n title_size = 20\n root = tk.Tk()\n root.title(title if title else 'Plot')\n root.geometry(\"1050x700\")\n fig = create_figure(root)\n\n num_rows = len(subplots)\n num_columns = max(len(graphs) for graphs in subplots)\n\n for i in range(num_rows):\n for j in range(num_columns):\n subplot = subplots[i][j]\n if subplot is None:\n continue\n index = (i*num_columns)+j+1\n ax = fig.add_subplot(num_rows, num_columns, index)\n ax.set_ylabel(subplot.y_label, fontsize=font_size)\n ax.set_xlabel(subplot.x_label, fontsize=font_size)\n ax.set_title(subplot.title, fontsize=font_size, fontweight='bold')\n ax.ticklabel_format(axis='y', style='sci')\n ax.ticklabel_format(useOffset=False)\n for graph in subplot.graphs:\n if type(graph) == Histogram:\n _draw_historgram(ax, graph)\n elif type(graph) == Graph:\n _draw_graph(ax, graph)\n '''\n spacing = 2\n if subplot.x_log:\n ax.set_xscale('log')\n x_lim = ax.get_xlim()\n ax.set_xlim(x_lim[0]/spacing, x_lim[1]*spacing)\n ax.grid(which='both')\n if subplot.y_log:\n ax.set_yscale('log')\n y_lim = ax.get_ylim()\n ax.set_ylim(y_lim[0]/spacing, y_lim[1]*spacing*3)\n ax.grid(which='both')\n '''\n with warnings.catch_warnings():\n warnings.simplefilter('ignore')\n ax.legend(loc='best')\n\n fig.suptitle(title, fontweight='bold', fontsize=title_size)\n fig.subplots_adjust(hspace=.6, wspace=.3)\n root.mainloop()",
"def subplot_to_figure(self):\n if self.format is \"show\":\n plt.show()\n elif self.format is \"png\":\n plt.savefig(self.path + self.filename + \".png\", bbox_inches=\"tight\")",
"def subplot(\r\n self,\r\n data: bool = False,\r\n noise_map: bool = False,\r\n signal_to_noise_map: bool = False,\r\n model_data: bool = False,\r\n residual_map: bool = False,\r\n normalized_residual_map: bool = False,\r\n chi_squared_map: bool = False,\r\n auto_filename: str = \"subplot_fit\",\r\n ):\r\n self._subplot_custom_plot(\r\n data=data,\r\n noise_map=noise_map,\r\n signal_to_noise_map=signal_to_noise_map,\r\n model_image=model_data,\r\n residual_map=residual_map,\r\n normalized_residual_map=normalized_residual_map,\r\n chi_squared_map=chi_squared_map,\r\n auto_labels=AutoLabels(filename=auto_filename),\r\n )",
"def add_figure1(self,x,y,index=1,title='',xlabel='',ylabel=''):\n self.last_index = index\n ax = self.fig.add_subplot(self.position+index)\n ax.set_title(title)\n ax.set_xlabel(xlabel)\n ax.set_ylabel(ylabel)\n ax.plot(x,y)",
"def createFigure(self,numSubplots,figWidth,figHeight):\r\n#\t\tif self.makeTB:\r\n#\t\t\tself.createToolbar(self.vbox)\r\n#\t\tself.vbox.pack_start(self.myTB,False,False)\r\n\t\tself.axisList=[]\r\n\t\tself.axis=None\r\n\t\t# define handles to widgets\r\n\r\n\t\t############## FIGURE\r\n\t\tself.figure = Figure(dpi=60)\t\t\r\n\t\tself.figure.set_facecolor(figBgColour)\r\n\t\tself.figure.set_edgecolor(figBgColour)\r\n\r\n\t\t#self.axis.set_title('Graph')\r\n\r\n\t\tself.canvas = FigureCanvas(self.figure)\r\n\t\tself.canvas.set_size_request(figWidth,figHeight)\r\n\t\tself.canvas.show()\r\n\t\tself.buttonCallback=self.canvas.mpl_connect('button_press_event', self.OnPress)\r\n#\t\tself.canvas.mpl_connect('resize_event', onAutoScale, None, self.axis, self.canvas)\r\n\r\n \r\n\t\t############## AXIS\r\n\t\t#self.axis=self.figure.add_axes(plotPosition,axisbg=axisColour)\r\n\t\tsubplotList=[]\r\n\t\tfor m in range(numSubplots[0]*numSubplots[1]):\r\n\t\t\tsubplotList.append(numSubplots[0]*100 + numSubplots[1] * 10 + m+1)\r\n\r\n\t\tif len(subplotList)==1:\r\n\t\t\tself.axisList.append(self.figure.add_subplot(111,axisbg=axisColour,polar=self.plotPolar))\r\n\t\t\tself.axisList[0].set_position(PLOT_POSITION)\r\n\t\telse:\r\n\t\t\tfor x in subplotList:\r\n\t\t\t\tself.axisList.append(self.figure.add_subplot(x,axisbg=axisColour))\r\n\r\n\t\tself.axis=self.axisList[0]\r\n\r\n\t\t# format each axis correctly\r\n\t\tfor axis in self.axisList:\r\n\t#\t\tself.axis.grid(True,which='major')\r\n\t\t\taxis.grid(True)\r\n\t#\t\tself.axis.grid(True,which='minor',color='r', linestyle='-', linewidth=2)\r\n\t#\t\tself.axis.set_position(plotPosition)\r\n\r\n\t\t\txax=axis.get_xticklabels()\r\n\t\t\tyax=axis.get_yticklabels()\r\n\r\n\t\t\tfor tick in xax:\r\n\t\t\t\ttick.set_fontsize(axisTextSize)\r\n\r\n\t\t\tfor tick in yax:\r\n\t\t\t\ttick.set_fontsize(axisTextSize)\t\t\r\n\r\n\t\t\r\n\t\tself.legendStr=[]\r\n\t\tself.gaList=[]\r\n\r\n\t\t## add cursor function to axis when mouse is over it\r\n#\t\tself.cursor = Cursor(self.axis, useblit=True, color='red', linewidth=1)\r\n\r\n\t\tself.canvas.draw()\r\n\r\n\t\t# plot a transparent rectangle just on axis 1\r\n\t\tcurrXlim=self.axis.get_xlim()\r\n\t\tdx=abs(currXlim[1]-currXlim[0])\r\n\t\tx0=currXlim[0]\r\n\t\tcurrYlim=self.axis.get_ylim()\r\n\t\tdy=abs(currYlim[1]-currYlim[0])\r\n\t\ty0=currYlim[0]\r\n\r\n\t\tself.axis.r1=plotRect(self.axis,self.canvas,(x0,y0),dx,dy,showRect=self.showRect)\r\n\r\n\t\t#self.axis.cla()\r\n\r\n\t\t\r\n\t\t############## TOOLBAR\r\n\t\t# use a custom version of the matplotlib toolbar\r\n#\t\ttoolbar = NavigationToolbar2(self.canvas, self.win)\r\n\t\tself.toolbar = PlotToolbar(self.canvas,self.win,self.axis)\r\n\t\tzoomtoolbar = PlotZoomToolbar(self.canvas,self.win,self.axis,)\r\n\r\n\t\t# make a TB menu\r\n\t\tmenuList=['|FFT|','Normalised |FFT|','|FFT| & arg(FFT)','|T| & <T','Re & Im (T)','Re & Im (1/T - 1)','n & alpha']\r\n\t\tmnuBtn = MenuToolButtonWidget(menuList, icon=gtk.STOCK_SELECT_COLOR, label='FFT')\r\n\t\tmnuBtn.btn.connect(\"clicked\",self.newFFTwin2,0)\r\n\t\tfor m in range(len(menuList)):\r\n\t\t\tmnuBtn.menuItems[m].connect(\"activate\",self.newFFTwin,m)\r\n\r\n\t\tmnuBtn.btn.set_tooltip_text('Take windowed FFT of ALL lines.')\r\n\t\tself.toolbar.add(mnuBtn.btn)\r\n\r\n\r\n\r\n\t\tsep=gtk.SeparatorToolItem()\r\n\t\tself.toolbar.insert(sep,1)\r\n\r\n\r\n\t\tbtn6=gtk.ToolButton(gtk.STOCK_CLEAR)\r\n\t\tbtn6.connect(\"clicked\",self.OnClear)\r\n\t\tbtn6.set_label('Clear')\r\n\t\tbtn6.set_tooltip_text('Clear the axis.')\r\n\t\tself.toolbar.insert(btn6,1)\r\n\r\n\t\tbtn0=gtk.ToolButton(gtk.STOCK_SAVE_AS)\r\n\t\tbtn0.connect(\"clicked\",self.OnExport)\r\n\t\tbtn0.set_label('Export')\r\n\t\tbtn0.set_tooltip_text('Export data from a curve.')\r\n\t\tself.toolbar.insert(btn0,1)\r\n\r\n\r\n\t\t# make a TB menu\r\n\t\tfitMenuList=['Linear','Polynomial','Exp decay','Subtract exp']\r\n\t\tfitmnuBtn = MenuToolButtonWidget(fitMenuList, icon=gtk.STOCK_ABOUT, label='Fit')\r\n\t\tfitmnuBtn.btn.connect(\"clicked\",self.fitPolynomial,0)\r\n\t\tfor m in range(len(fitMenuList)):\r\n\t\t\tfitmnuBtn.menuItems[m].connect(\"activate\",self.fitPolynomial,m)\r\n\r\n\t\tfitmnuBtn.btn.set_tooltip_text('Fits a polynomial to data (default is a linear fit).')\r\n\t\tself.toolbar.add(fitmnuBtn.btn)\r\n\r\n\r\n\t\tbtn7=gtk.ToolButton(gtk.STOCK_CONVERT)\r\n\t\tbtn7.connect(\"clicked\",self.getBeamWidth)\r\n\t\tbtn7.set_label('Beamwidth')\r\n\t\tbtn7.set_tooltip_text('Get the beamwidth (fits Gaussian to dy/dx).')\r\n\t\tself.toolbar.add(btn7)\r\n\r\n\t\tbtn8=gtk.ToolButton(gtk.STOCK_EDIT)\r\n\t\tbtn8.connect(\"clicked\",self.editPlotParams)\r\n\t\tbtn8.set_label('Axes')\r\n\t\tbtn8.set_tooltip_text('Edit plot parameters.')\r\n\t\tself.toolbar.add(btn8)\r\n\r\n\t\tbtn9=gtk.ToolButton(gtk.STOCK_PROPERTIES)\r\n\t\tbtn9.connect(\"clicked\",self.editLegend)\r\n\t\tbtn9.set_label('Legend')\r\n\t\tbtn9.set_tooltip_text('Edit legend.')\r\n\t\tself.toolbar.add(btn9)\r\n\r\n#\t\tself.toolbar.set_style(gtk.TOOLBAR_BOTH) # make toolbar icons and labels visible\r\n\r\n\t\tif self.makeTB:\r\n\t\t\tself.vbox.pack_start(self.toolbar,False,False)\r\n\r\n\t\tself.vbox.pack_start(self.canvas,True,True)\r\n\t\tself.vbox.pack_start(zoomtoolbar,False,False)\r\n\r\n\t\t####### Line selector/axis alteration toolbar\r\n\t\thbox=gtk.HBox(homogeneous=False, spacing=0)\r\n\r\n\t\tparamNames = ['Line:']\r\n\t\tparamTypes = ['cmb']\r\n\t\tparamDefaultValues = [[]]\r\n\r\n\t\tparamBox = ParamWidget(paramNames,paramTypes,paramDefaultValues)\r\n\t\tself.cmbBox = paramBox.objectList[0]\r\n#\t\tself.cmbBox.connect('changed',self.line_changed)\r\n\r\n\t\tself.hideBtn = gtk.ToggleToolButton(gtk.STOCK_NO)\r\n\t\tself.hideBtn.set_tooltip_text('Hide')\r\n\t\tself.hideBtn.connect('clicked',self.toggle_line)\r\n\t\tparamBox.table.attach(self.hideBtn,0,1,0,1,xoptions=gtk.EXPAND,yoptions=gtk.EXPAND)\r\n\t\t\r\n\t\tself.colourBtn = gtk.ToolButton(gtk.STOCK_COLOR_PICKER)\r\n\t\tself.colourBtn.set_tooltip_text('Colour')\r\n\t\tself.colourBtn.connect('clicked',self.change_colour)\r\n\t\tself.color=gtk.gdk.Color(red=0,green=0,blue=1)\r\n\r\n\t\tparamBox.table.attach(self.colourBtn,1,2,0,1,xoptions=gtk.EXPAND,yoptions=gtk.EXPAND)\r\n\t\t\r\n\t\tself.cmbStyle = gtk.combo_box_new_text()\r\n\r\n\t\tfor style in STYLES:\r\n\t\t\tself.cmbStyle.append_text(style)\r\n\t\tself.cmbStyle.set_active(0)\r\n#\t\tself.style.set_tooltip_text('Line style')\r\n\t\tself.cmbStyle.connect('changed',self.change_style)\r\n\r\n\t\tparamBox.table.attach(self.cmbStyle,2,3,0,1,xoptions=gtk.EXPAND,yoptions=gtk.EXPAND)\r\n\r\n\t\tself.removeBtn = gtk.ToolButton(gtk.STOCK_DELETE)\r\n\t\tself.removeBtn.set_tooltip_text('Remove')\r\n\t\tself.removeBtn.connect('clicked',self.remove_line)\r\n\r\n\t\tparamBox.table.attach(self.removeBtn,3,4,0,1,xoptions=gtk.EXPAND,yoptions=gtk.EXPAND)\r\n\r\n\r\n\t\thbox.pack_start(paramBox.frame,False,False)\r\n\r\n\t\tparamNames = ['Axis:','Left-click sets:']\r\n\t\tparamTypes = ['lbl','cmb']\r\n\t\tparamDefaultValues = ['',['Nothing','Window left','Window right','Axis left','Axis right','Plots point']]\r\n\r\n\t\tparamBox = ParamWidget(paramNames,paramTypes,paramDefaultValues)\r\n\t\thbox.pack_start(paramBox.frame,False,False)\r\n\t\t\r\n\t\tself.cmbBtn = paramBox.objectList[1]\r\n\t\tself.cmbBtn.set_active(0)\r\n\t\tself.cmbBtn.connect(\"changed\", self.onModeChanged)\r\n\r\n\t\thbox.show_all()\r\n\r\n#\t\tself.canvas.mpl_connect('axes_enter_event', self.enter_axes)\r\n#\t\tself.canvas.mpl_connect('axes_leave_event', self.leave_axes)\r\n\r\n\t\tif self.makeTB:\r\n#\t\t\tself.connectToolbar()\r\n\t\t\tself.vbox.pack_start(hbox,False,False)",
"def plot(self) -> List[matplotlib.figure.Figure]:\n figs = []\n\n title_prefix = self.trial_name + ' ' + self.segment_name + ' '\n # Figure 1, position in 3 subplots\n pos_fig_sub = self.plot_subplots(self.fig_num_start, title_prefix + 'Position (mm)', self.pos_raw,\n self.pos_smooth, self.pos_legend)\n figs.append(pos_fig_sub)\n\n # Figure 2, orientation in 3 subplots\n eul_fig_sub = self.plot_subplots(self.fig_num_start + 1, title_prefix + 'Euler Angles (deg)', self.eul_raw,\n self.eul_smooth, self.euler_legend)\n figs.append(eul_fig_sub)\n\n # Figure 3, velocity in 3 subplots\n vel_fig_sub = self.plot_subplots_vel(self.fig_num_start + 2, title_prefix + 'Velocity', 'Velocity (mm/s)',\n self.vel)\n figs.append(vel_fig_sub)\n\n # Figure 4, angular velocity in 3 subplots\n ang_vel_fig_sub = self.plot_subplots_vel(self.fig_num_start + 3, title_prefix + 'Angular Velocity',\n 'Angular Velocity (deg/s)', self.ang_vel)\n figs.append(ang_vel_fig_sub)\n\n # Figure 5, position in one axes\n pos_fig_one = self.plot_one_axes(self.fig_num_start + 4, title_prefix + 'Position', 'Position (mm)',\n self.pos_raw, self.pos_smooth, self.pos_legend)\n figs.append(pos_fig_one)\n\n # Figure 6, orientation in one axes\n eul_fig_one = self.plot_one_axes(self.fig_num_start + 5, title_prefix + 'Euler Angles', 'Angle (deg)',\n self.eul_raw, self.eul_smooth, self.euler_legend)\n figs.append(eul_fig_one)\n\n # Figure 7, velocity in one axes\n vel_fig_one = self.plot_one_axes_vel(self.fig_num_start + 6, title_prefix + 'Velocity', 'Velocity (mm/s)',\n self.vel, self.pos_legend)\n figs.append(vel_fig_one)\n\n # Figure 8, angular velocity in one axes\n ang_vel_fig_one = self.plot_one_axes_vel(self.fig_num_start + 7, title_prefix + 'Angular Velocity',\n 'Angular Velocity (deg/s)', self.ang_vel, self.pos_legend)\n figs.append(ang_vel_fig_one)\n\n return figs",
"def plot_separate(self):\n f, axes = plt.subplots(2, 3, figsize=(16, 8))\n\n self.decomposition_plot(ax=axes[0, 0], text='Lens light', lens_light_add=True, unconvolved=True)\n self.decomposition_plot(ax=axes[1, 0], text='Lens light convolved', lens_light_add=True)\n self.decomposition_plot(ax=axes[0, 1], text='Source light', source_add=True, unconvolved=True)\n self.decomposition_plot(ax=axes[1, 1], text='Source light convolved', source_add=True)\n self.decomposition_plot(ax=axes[0, 2], text='All components', source_add=True, lens_light_add=True,\n unconvolved=True)\n self.decomposition_plot(ax=axes[1, 2], text='All components convolved', source_add=True,\n lens_light_add=True, point_source_add=True)\n f.tight_layout()\n f.subplots_adjust(left=None, bottom=None, right=None, top=None, wspace=0., hspace=0.05)\n return f, axes",
"def __init__(self, subplot_class, *args, **kwargs):\n import pylab\n self.fig = pylab.figure(*args, **kwargs)\n self.subplot_class = subplot_class",
"def one_data_figure_sep(obs, fig, subplot_spec=None, **kwargs):\n if subplot_spec is None:\n gs = gridspec.GridSpec(2,1,height_ratios = [3,1], hspace=0)\n else:\n gs = gridspec.GridSpecFromSubplotSpec(2, 1, hspace=0,\n subplot_spec=subplot_spec,\n height_ratios = [3,1])\n \n \n spec = pl.Subplot(fig, gs[0,0])\n spec.plot(obs['wavelength'], obs['spectrum'], **kwargs)\n spec.set_ylabel(r'$f_\\lambda \\times \\, C$')\n pl.setp(spec.get_xticklabels(), visible = False)\n fig.add_subplot(spec)\n unc = pl.Subplot(fig, gs[1,0])\n unc.plot(obs['wavelength'], obs['unc'], **kwargs)\n unc.set_ylabel(r'$\\sigma f_\\lambda$')\n unc.set_xlabel(r'$\\lambda (\\AA)$')\n fig.add_subplot(unc)\n return fig, gs"
] |
[
"0.7376317",
"0.72689253",
"0.70899445",
"0.70780927",
"0.6994428",
"0.6944281",
"0.6904434",
"0.6834838",
"0.678537",
"0.673969",
"0.6703993",
"0.661437",
"0.65972716",
"0.6589826",
"0.65496194",
"0.65350604",
"0.6439696",
"0.6395684",
"0.6373495",
"0.6358416",
"0.63542396",
"0.6331364",
"0.6301813",
"0.6300426",
"0.6243113",
"0.62374157",
"0.6233243",
"0.6216213",
"0.6179898",
"0.61696494"
] |
0.7276523
|
1
|
Count elements and attributes and draw figure
|
def count_and_draw(fileinfos, args, name=""):
els_counted = count_items(fileinfos,"el",name)
atts_counted = count_items(fileinfos,"att",name)
draw_figure(els_counted, atts_counted, args, name)
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def process_all(fileinfos, args):\n # create overall figure\n count_and_draw(fileinfos,args)\n # create figures for all the files\n for key in fileinfos:\n count_and_draw(fileinfos,args,key)\n # create figures for all the elements\n els_processed = []\n for key in fileinfos:\n for key in fileinfos[key][\"usage_el\"]:\n if key not in els_processed:\n count_and_draw(fileinfos,args,key)\n els_processed.append(key)\n # create figures for all the attributes\n atts_processed = []\n for key in fileinfos:\n for key in fileinfos[key][\"usage_att\"]:\n if key not in atts_processed:\n count_and_draw(fileinfos,args,\"@\"+key)\n atts_processed.append(key)",
"def draw_elem_numbers(n):\n return drawNumbers(named(n))",
"def draw_figure(els_counted, atts_counted, arg, name=\"\"):\n \n stdout.write(\"... drawing chart\\n\") \n fig_width = max(len(els_counted),len(atts_counted),10)\n fig_height = 6\n if is_filename(name) or name == \"\":\n fig_height *= 2\n fig = plt.figure(figsize=(fig_width,fig_height))\n \n chart_info = get_chart_info(arg.coll_name, name) \n add_subplots(fig, els_counted, atts_counted, chart_info, name, arg.log)\n \n plt.tight_layout() \n save_figure(fig, arg.out, name)\n plt.close()",
"def XPCountChildWidgets(inWidget):\n pass",
"def plot_insertsize():",
"def display_number_of_items(self):\n\n items = [item for item in self.graphicsView.scene().items() if type(item) is QEngineeringUnknownItem]\n if len(items) > 0:\n self.labelStatus.setText(\n \"<font color='red'>\" + self.tr('Unrecognition') + \" : {}</font>\".format(len(items)))\n else:\n self.labelStatus.setText(\n \"<font color='black'>\" + self.tr('Unrecognition') + \" : {}</font>\".format(len(items)))\n\n items = [item for item in self.graphicsView.scene().items() if\n issubclass(type(item), SymbolSvgItem) and type(item) is not QEngineeringErrorItem]\n self.labelSymbolStatus.setText(\"<font color='blue'>\" + self.tr('Symbol') + \" : {}</font>\".format(len(items)))\n\n items = [item for item in self.graphicsView.scene().items() if type(item) is QEngineeringLineItem]\n self.labelLineStatus.setText(\"<font color='blue'>\" + self.tr('Line') + \" : {}</font>\".format(len(items)))\n\n items = [item for item in self.graphicsView.scene().items() if issubclass(type(item), QEngineeringTextItem)]\n self.labelTextStatus.setText(\"<font color='blue'>\" + self.tr('Text') + \" : {}</font>\".format(len(items)))\n\n self.itemTreeWidget.sceneChanged(self.graphicsView.scene().items())",
"def count():",
"def part1(mem):\n return len(paint_panels(mem, 0))",
"def render(self, n: int):\n if int(n) == 1:\n self.layout.children = [self.figures[0]]\n elif int(n) == 2:\n self.layout.children = [self.figures[0], self.figures[1]]\n elif int(n) == 3:\n self.layout.children = [\n self.figures[0],\n self.figures[1],\n self.figures[2],\n ]",
"def visualise_count_history(self, figsize=(5, 2)):\n plt.figure(figsize=figsize)\n data = []\n labels = []\n for v in self.V:\n print(v)\n labels.append(v)\n data.append(self.count_history[v])\n pal = plt.get_cmap('Dark2').colors\n plt.stackplot(\n np.arange(1, self.total_interactions + 1),\n np.array(data),\n labels=labels,\n colors=pal)\n plt.ylim(top=1, bottom=0)\n plt.xlabel('Interactions')\n plt.ylabel('Proportions')\n\n # Shrink current axis\n ax = plt.gca()\n ax.xaxis.set_ticks_position('bottom')\n ax.yaxis.set_ticks_position('left')\n box = ax.get_position()\n ax.set_position([box.x0, box.y0, box.width * 0.67, box.height])\n plt.xlim(1, self.total_interactions)\n plt.legend(loc='center left', bbox_to_anchor=(1, 0.5), ncol=1)",
"def size(self):",
"def DrawInfo(self, count):\n offset = 200\n shift = self.incr * 9\n for i in self.price.keys():\n self.canvas.create_text(self.incr / 2, shift, fill = self.color[i], anchor = NW,\n font=\"Times 20\", text = i + (\":\\t\" if len(i) > 9 else \":\\t\\t\") + str(self.share[i]))\n self.canvas.create_text(offset, 20, fill = self.color[i], font=\"Times 20\", text = i)\n (self.canvas.create_text(offset, 65, fill = \"#00FF00\", font=\"Times 30\",\n text = \"\\u25B2\" + str(self.price[i][0])) if self.price[i][0] >= self.old[i]\n else self.canvas.create_text(offset, 65, fill = \"red\", font=\"Times 30\",\n text = \"\\u25BC\" + str(self.price[i][0])))\n self.canvas.create_text(self.width / 2, 10, fill = \"black\",\n anchor = NW, text = str(count), font = \"Times 20\")\n self.canvas.create_rectangle(self.incr * 7, self.incr * 8, self.incr * 12,\n self.incr * 9, fill = \"white\", width = 3)\n self.canvas.create_text(self.width / 2, self.incr * 8 + self.incr / 3,\n font = \"Times 30\", fill = \"black\", text = f\"{self.money:,.2f}$\")\n self.canvas.create_text(self.width / 2, self.incr * 8 + self.incr / 1.3,\n font = \"Times 20\", fill = \"black\", text = f\"{self.GetSpeculated():,.2f}$\")\n self.old[i] = self.price[i][0]\n offset += 425\n shift += self.incr / 2",
"def test_count_elements(self):\n from pykml.util import count_elements\n\n test_datafile = path.join(\n path.dirname(__file__),\n 'testfiles',\n 'google_kml_developers_guide/complete_tour_example.kml'\n )\n with open(test_datafile) as f:\n doc = parse(f, schema=Schema('kml22gx.xsd'))\n summary = count_elements(doc)\n\n self.assertTrue('http://www.opengis.net/kml/2.2' in summary)\n self.assertEqual(4,\n summary['http://www.opengis.net/kml/2.2']['Placemark']\n )\n self.assertTrue('http://www.google.com/kml/ext/2.2' in summary)\n self.assertEqual(5,\n summary['http://www.google.com/kml/ext/2.2']['FlyTo']\n )\n self.assertEqual(2,\n summary['http://www.google.com/kml/ext/2.2']['Wait']\n )",
"def generate(self, diagram):",
"def info(self):\n print(\n \"\"\"\n Factory holds {0} unique plots\n \"\"\".format(\n len(self.plots)\n )\n )\n for i, plot in enumerate(self.plots):\n print(\"\\t\\tPlot {0} holds {1} unique datasets\".format(i, len(plot)))\n for j, dataset in enumerate(plot):\n print(\n \"\\t\\t\\tDataset {0} holds {1} datapoints\".format(\n j, len(dataset[\"x\"])\n )\n )\n\n print()\n return",
"def compute_visuals(self):\n pass",
"def showAnns(self, anns):\n if len(anns) == 0:\n return 0\n ax = plt.gca()\n ax.set_autoscale_on(False)\n for ann in anns:\n c = (np.random.random((1, 3)) * 0.6 + 0.4).tolist()[0]\n if ann['category_id'] == 1:\n [x, y, width, height] = ann['bbox']\n rect = patches.Rectangle((x, y), width, height, edgecolor=c, facecolor=c, linewidth=2, alpha=0.4)\n ax.add_patch(rect)",
"def draw(self):",
"def num_object_attributes(self):\n return len(self._idx_to_shapes_and_colors) + self._n_sizes",
"def __repr__(self):\r\n return \"Colored Set of {0}\".format(self.count)",
"def draw(self): \n [component.draw() for component in self.components]",
"def draw(self): \n [component.draw() for component in self.components]",
"def draw(self): \n [component.draw() for component in self.components]",
"def draw(self): \n [component.draw() for component in self.components]",
"def draw(self): \n [component.draw() for component in self.components]",
"def draw(self): \n [component.draw() for component in self.components]",
"def draw(self): \n [component.draw() for component in self.components]",
"def draw(self): \n [component.draw() for component in self.components]",
"def add_subplots(fig, els_counted, atts_counted, chart_info, name=\"\", log=False):\n if name != \"\":\n if is_filename(name):\n # overview of element/attribute usage for a single text\n draw_chart(els_counted, fig, chart_info[\"elements_used_text\"], log)\n draw_chart(atts_counted, fig, chart_info[\"attributes_used_text\"], log)\n elif is_attname(name):\n # overview for a specific attribute\n draw_chart(atts_counted, fig, chart_info[\"attribute_used\"], log)\n else:\n # overview for a specific element\n draw_chart(els_counted, fig, chart_info[\"element_used\"], log)\n else:\n # overall overview of element and attribute usage\n draw_chart(els_counted, fig, chart_info[\"elements_used_all\"], log)\n draw_chart(atts_counted, fig, chart_info[\"attributes_used_all\"], log)",
"def get_number_of_elements(self):\n if self.page.paginator.count < int(self.page.number) * self.page_size:\n show = self.get_shows()\n\n return \"{} - {}\".format(show, self.page.paginator.count)\n else:\n show = self.get_shows()\n return \"{} - {}\".format(show, self.get_page_range())"
] |
[
"0.59811217",
"0.58999234",
"0.5895706",
"0.58333105",
"0.5640615",
"0.5606671",
"0.5598163",
"0.55730134",
"0.55256116",
"0.5464455",
"0.54509664",
"0.5447591",
"0.5437948",
"0.5432597",
"0.54283845",
"0.5419368",
"0.54186237",
"0.5411935",
"0.54011625",
"0.5399584",
"0.5393961",
"0.5393961",
"0.5393961",
"0.5393961",
"0.5393961",
"0.5393961",
"0.5393961",
"0.5393961",
"0.53856814",
"0.53822446"
] |
0.7792259
|
0
|
Reduce the size of an image to the indicated maximum dimensions This function takes a PIL.Image object and integer values for the maximum allowed width and height (a zero value means no maximum constraint), calculates the size that meets those constraints and resizes the image. The resize is done in place, changing the original object. Returns a boolean indicating if the image was changed.
|
def downsize_img(img: Image.Image,
max_width: int,
max_height: int) -> Tuple[Image.Image, bool]:
width, height = img.size
# Assume 0 as current size
if not max_width:
max_width = width
if not max_height:
max_height = height
if (max_width, max_height) == (width, height): # If no changes, do nothing
return img, False
img.thumbnail((max_width, max_height), resample=Image.LANCZOS)
return img, True
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def resizeImage(image, maxW, maxH):\n\timageW, imageH = image.size\n\tif imageW == maxW and imageH == maxH:\n\t\treturn image\n\t# find which axis requires the biggest zoom (smallest relative max dimension)\n\tzoomW = float(imageW) / float(maxW)\n\tzoomH = float(imageH) / float(maxH)\n\tzoom = max(zoomW, zoomH)\n\tif zoomW >= zoomH:\t# size is defined by width\n\t\tmaxH = int(imageH//zoom)\t# calculate the new height\n\telse:\n\t\tmaxW = int(imageW//zoom)\n\treturn image.resize((maxW, maxH))",
"def preprocess_image(image: Image.Image, max_size: int = 1200) -> Image.Image:\n width_0, height_0 = image.size\n\n if max((width_0, height_0)) <= max_size:\n return image\n\n if width_0 > height_0:\n aspect_ratio = max_size / float(width_0)\n new_height = int(float(height_0) * float(aspect_ratio))\n image = image.resize((max_size, new_height), Image.ANTIALIAS)\n return image\n else:\n aspect_ratio = max_size / float(height_0)\n new_width = int(float(width_0) * float(aspect_ratio))\n image = image.resize((max_size, new_width), Image.ANTIALIAS)\n return image",
"def _image_resize_keep_ratio(self, image, max_w, max_h, crop=False):\n raise NotImplementedError",
"def resize_image(self, image, min_dim=None, max_dim=None, min_scale=None, resize_mode=\"square\"):\n # Keep track of image dtype and return results in the same dtype\n image_dtype = image.dtype\n # Default window (y1, x1, y2, x2) and default scale == 1.\n h, w = image.shape[:2]\n window = (0, 0, h, w)\n scale = 1\n padding = [(0, 0), (0, 0), (0, 0)]\n crop = None\n\n if resize_mode == \"none\":\n return image, window, scale, padding, crop\n pass\n\n # Scale?\n if min_dim:\n # Scale up but not down\n scale = max(1, min_dim / min(h, w))\n pass\n if min_scale and scale < min_scale:\n scale = min_scale\n pass\n\n # Does it exceed max dim?\n if max_dim and resize_mode == \"square\":\n image_max = max(h, w)\n if round(image_max * scale) > max_dim:\n scale = max_dim / image_max\n pass\n pass\n\n # Resize image using bilinear interpolation\n if scale != 1:\n image = self.resize(image, (round(h * scale), round(w * scale)), preserve_range=True)\n pass\n\n # Need padding or cropping?\n if resize_mode == \"square\":\n # Get new height and width\n h, w = image.shape[:2]\n top_pad = (max_dim - h) // 2\n bottom_pad = max_dim - h - top_pad\n left_pad = (max_dim - w) // 2\n right_pad = max_dim - w - left_pad\n padding = [(top_pad, bottom_pad), (left_pad, right_pad), (0, 0)]\n image = np.pad(image, padding, mode='constant', constant_values=0)\n window = (top_pad, left_pad, h + top_pad, w + left_pad)\n pass\n\n elif resize_mode == \"pad64\":\n h, w = image.shape[:2]\n # Both sides must be divisible by 64\n assert min_dim % 64 == 0, \"Minimum dimension must be a multiple of 64\"\n # Height\n if h % 64 > 0:\n max_h = h - (h % 64) + 64\n top_pad = (max_h - h) // 2\n bottom_pad = max_h - h - top_pad\n else:\n top_pad = bottom_pad = 0\n # Width\n if w % 64 > 0:\n max_w = w - (w % 64) + 64\n left_pad = (max_w - w) // 2\n right_pad = max_w - w - left_pad\n else:\n left_pad = right_pad = 0\n padding = [(top_pad, bottom_pad), (left_pad, right_pad), (0, 0)]\n image = np.pad(image, padding, mode='constant', constant_values=0)\n window = (top_pad, left_pad, h + top_pad, w + left_pad)\n pass\n\n elif resize_mode == \"crop\":\n # Pick a random crop\n h, w = image.shape[:2]\n y = np.random.randint(0, (h - min_dim))\n x = np.random.randint(0, (w - min_dim))\n crop = (y, x, min_dim, min_dim)\n image = image[y:y + min_dim, x:x + min_dim]\n window = (0, 0, min_dim, min_dim)\n pass\n\n else:\n raise Exception(\"Mode {} not supported\".format(resize_mode))\n pass\n\n return image.astype(image_dtype), window, scale, padding, crop\n\n pass",
"def resize_image(image, max_dimension):\n max_size = (max_dimension, max_dimension)\n return image.thumbnail(max_size, Image.ANTIALIAS)",
"def resize_image(image, min_dim=None, max_dim=None, min_scale=None, mode=\"square\"):\n # Keep track of image dtype and return results in the same dtype\n image_dtype = image.dtype\n # Default window (y1, x1, y2, x2) and default scale == 1.\n h, w = image.shape[:2]\n window = (0, 0, h, w)\n scale = 1\n padding = [(0, 0), (0, 0), (0, 0)]\n crop = None\n\n if mode == \"none\":\n return image, window, scale, padding, crop\n\n # Scale?\n if min_dim:\n # Scale up but not down\n scale = max(1, min_dim / min(h, w))\n if min_scale and scale < min_scale:\n scale = min_scale\n\n # Does it exceed max dim?\n if max_dim and mode == \"square\":\n image_max = max(h, w)\n if round(image_max * scale) > max_dim:\n scale = max_dim / image_max\n\n # Resize image using bilinear interpolation\n if scale != 1:\n image = resize(image, (round(h * scale), round(w * scale)),\n preserve_range=True)\n\n # Need padding or cropping?\n if mode == \"square\":\n # Get new height and width\n h, w = image.shape[:2]\n top_pad = (max_dim - h) // 2\n bottom_pad = max_dim - h - top_pad\n left_pad = (max_dim - w) // 2\n right_pad = max_dim - w - left_pad\n padding = [(top_pad, bottom_pad), (left_pad, right_pad), (0, 0)]\n image = np.pad(image, padding, mode='constant', constant_values=0)\n window = (top_pad, left_pad, h + top_pad, w + left_pad)\n elif mode == \"pad64\":\n h, w = image.shape[:2]\n # Both sides must be divisible by 64\n assert min_dim % 64 == 0, \"Minimum dimension must be a multiple of 64\"\n # Height\n if h % 64 > 0:\n max_h = h - (h % 64) + 64\n top_pad = (max_h - h) // 2\n bottom_pad = max_h - h - top_pad\n else:\n top_pad = bottom_pad = 0\n # Width\n if w % 64 > 0:\n max_w = w - (w % 64) + 64\n left_pad = (max_w - w) // 2\n right_pad = max_w - w - left_pad\n else:\n left_pad = right_pad = 0\n padding = [(top_pad, bottom_pad), (left_pad, right_pad), (0, 0)]\n image = np.pad(image, padding, mode='constant', constant_values=0)\n window = (top_pad, left_pad, h + top_pad, w + left_pad)\n elif mode == \"crop\":\n # Pick a random crop\n h, w = image.shape[:2]\n y = random.randint(0, (h - min_dim))\n x = random.randint(0, (w - min_dim))\n crop = (y, x, min_dim, min_dim)\n image = image[y:y + min_dim, x:x + min_dim]\n window = (0, 0, min_dim, min_dim)\n else:\n raise Exception(\"Mode {} not supported\".format(mode))\n return image.astype(image_dtype), window, scale, padding, crop",
"def image_resize(img, min_size=600, max_size=1000):\n C, H, W = img.shape\n scale1 = min_size / min(H, W)\n scale2 = max_size / max(H, W)\n scale = min(scale1, scale2)\n img = img / 255\n img = transform.resize(img, (C, H * scale, W * scale),\n mode='reflect', anti_aliasing=False)\n # img = pytorch_normalize(img)\n # img = caffe_normalize(img)\n return img",
"def autoscale(self, img: Image, max_height: int, max_width: int):\n height = img.bgr.shape[0]\n width = img.bgr.shape[1]\n\n diff_height = max_height / height\n diff_width = max_width / width\n\n diff = min(diff_height, diff_width)\n\n height = int(height * diff)\n width = int(width * diff)\n\n return img.rescale_image(height, width)",
"def maxSize(image, maxSize, method=3):\n imAspect = float(image.size[0]) / float(image.size[1])\n outAspect = float(maxSize[0] / float(maxSize[1]))\n\n if imAspect >= outAspect:\n return image.resize(\n (maxSize[0], int((float(maxSize[0]) / imAspect) + 0.5)), method\n )\n else:\n return image.resize(\n (int((float(maxSize[1]) * imAspect) + 0.5), maxSize[1]), method\n )",
"def resizeImage(image, newDimension):\r\n image = validateImage(image)\r\n if image is None:\r\n print(\"ERROR - resizeImage: Image is missing.\")\r\n return None\r\n\r\n if not isinstance(newDimension, tuple) or len(newDimension) != image.ndim:\r\n print(\"ERROR - resizeImage: Specified dimension is illegal. Dimension=\", len(newDimension), \", ImageDimension=\",\r\n image.ndim)\r\n return None\r\n\r\n return cv2.resize(image, newDimension)",
"def resize_img(self, filename: str, size: Tuple[int, int] = (299, 299)):\n img = Image.open(join(self.source_dir, filename))\n width, height = img.size\n orig_shape = np.array(img.size)\n wanted_shape = np.array(size)\n ratios = wanted_shape / orig_shape\n wanted_width, wanted_height = size\n ratio_w, ratio_h = wanted_width / width, wanted_height / height\n\n if np.alltrue(ratios > 1):\n # Both sides of the image are shorter than the desired dimension,\n # so take the side that's closer in size and enlarge the image\n # in both directions to make that one fit\n factor = min(ratio_h, ratio_w)\n img = img.resize((int(width * factor), int(height * factor)))\n\n # Now we have an image that's either larger than the desired shape\n # or at least one side matches the desired shape and we can resize\n # with contain\n cover = resizeimage.resize_contain(img, size)\n cover.save(join(self.dest_dir, filename), 'JPEG')",
"def resize_image(image, min_dim=None, max_dim=None, padding=False):\n # Default window (y1, x1, y2, x2) and default scale == 1.\n h, w = image.shape[:2]\n window = (0, 0, h, w)\n scale = 1\n\n # Scale?\n if min_dim:\n # Scale up but not down\n scale = max(1, min_dim / min(h, w))\n # Does it exceed max dim?\n if max_dim:\n image_max = max(h, w)\n if round(image_max * scale) > max_dim:\n scale = max_dim / image_max\n # Resize image and mask\n if scale != 1:\n image = scipy.misc.imresize(\n image, (round(h * scale), round(w * scale)))\n # Need padding?\n if padding:\n # Get new height and width\n h, w = image.shape[:2]\n top_pad = (max_dim - h) // 2\n bottom_pad = max_dim - h - top_pad\n left_pad = (max_dim - w) // 2\n right_pad = max_dim - w - left_pad\n padding = [(top_pad, bottom_pad), (left_pad, right_pad), (0, 0)]\n image = np.pad(image, padding, mode='constant', constant_values=0)\n window = (top_pad, left_pad, h + top_pad, w + left_pad)\n return image, window, scale, padding",
"def _resize(img, max_dim=128):\n if max(img.shape[:3]) <= max_dim:\n return img\n else:\n new_size = [max_dim / s if s >= max_dim else 1.0 for s in img.shape[:3]]\n new_size.append(1.0) # for channel\n return scipy.ndimage.zoom(img, new_size, order=2)",
"def resizeImg(img, maxXSize=720):\r\n if img.size[1] > maxXSize:\r\n downRat = maxXSize / img.size[1]\r\n outImg = img.resize((int(img.size[0] * downRat),maxXSize))\r\n else:\r\n outImg = img\r\n return outImg",
"def resize_img(img, max_side=1000):\n height, width = img.shape\n # if the image is small enough as is, return it unchanged\n if height <= 1000 and width <= 1000:\n return img\n dim = tuple()\n if height > width:\n scale_ratio = max_side / height\n dim = (int(width * scale_ratio), max_side)\n else:\n scale_ratio = max_side / width\n dim = (max_side, int(height * scale_ratio))\n return cv2.resize(img, dim, interpolation=cv2.INTER_AREA)",
"def resize(im, target_size, max_size):\n im_shape = im.shape\n im_size_min = np.min(im_shape[0:2])\n im_size_max = np.max(im_shape[0:2])\n im_scale = float(target_size) / float(im_size_min)\n # prevent bigger axis from being more than max_size:\n if np.round(im_scale * im_size_max) > max_size:\n im_scale = float(max_size) / float(im_size_max)\n im = cv2.resize(im, None, None, fx=im_scale, fy=im_scale, interpolation=cv2.INTER_LINEAR)\n return im, im_scale",
"def downscale_image(im, max_dim=2048):\n a, b = im.size\n if max(a, b) <= max_dim:\n return 1.0, im\n\n scale = 1.0 * max_dim / max(a, b)\n new_im = im.resize((int(a * scale), int(b * scale)), Image.ANTIALIAS)\n return scale, new_im",
"def resize_image(img: torch.Tensor, new_size: Union[int, Tuple[int, int]], resize_method: str, crop_or_pad_constant: str=CROP_OR_PAD, interpolate_constant: str=INTERPOLATE) ->torch.Tensor:\n new_size = to_tuple(new_size)\n if list(img.shape[-2:]) != list(new_size):\n if resize_method == crop_or_pad_constant:\n return crop_or_pad(img, new_size)\n elif resize_method == interpolate_constant:\n return F.resize(img, new_size)\n raise ValueError(f'Invalid image resize method: {resize_method}')\n return img",
"def resizeImg(img, new_width, new_height):\n w, h = img.size\n width = new_width\n pwidth = new_width / float(w)\n height = int((float(h) * float(pwidth)))\n if height > new_height:\n height = new_height\n pheight = height / float(h)\n width = int((float(w) * float(pheight)))\n img = img.resize((width, height), Image.ANTIALIAS)\n return img",
"def resize(im, fit=None, crop=None, fill=None, smart_crop=False, upscale=False,\n zoom=None, target=None, HIGHRES=None, **kwargs):\n size = crop or fit or fill\n if not size:\n return im\n\n source_x, source_y = [float(v) for v in im.size]\n target_x, target_y = [int(v) for v in size]\n if HIGHRES:\n target_x = int(target_x * HIGHRES)\n target_y = int(target_y * HIGHRES)\n\n if crop or fill or not target_x or not target_y:\n scale = max(target_x / source_x, target_y / source_y)\n else:\n scale = min(target_x / source_x, target_y / source_y)\n\n # Handle one-dimensional targets.\n if not target_x:\n target_x = source_x * scale\n if not target_y:\n target_y = source_y * scale\n\n if zoom:\n if not crop:\n target_x = source_x * scale\n target_y = source_y * scale\n crop = True\n scale *= (100 + int(zoom)) / 100.0\n\n target_x = int(round(target_x))\n target_y = int(round(target_y))\n\n if scale < 1.0 or (scale > 1.0 and upscale):\n # Resize the image to the target size boundary. Round the scaled\n # boundary sizes to avoid floating point errors.\n im = im.resize(\n (int(round(source_x * scale)), int(round(source_y * scale))),\n antialias=True)\n\n if crop:\n # Use integer values now.\n source_x, source_y = im.size\n # Difference between new image size and requested size.\n diff_x = int(source_x - min(source_x, target_x))\n diff_y = int(source_y - min(source_y, target_y))\n cropped_image = smart_crop and im.smart_crop((target_x, target_y))\n if cropped_image and cropped_image is not im:\n im = cropped_image\n elif diff_x or diff_y:\n if isinstance(target, six.string_types):\n target = re.match(r'(\\d+)?,(\\d+)?$', target)\n if target:\n target = target.groups()\n if target:\n focal_point = [int(n) if (n or n == 0) else 50 for n in target]\n else:\n focal_point = 50, 50\n # Crop around the focal point\n halftarget_x, halftarget_y = int(target_x / 2), int(target_y / 2)\n focal_point_x = int(source_x * focal_point[0] / 100)\n focal_point_y = int(source_y * focal_point[1] / 100)\n box = [\n max(0, min(source_x - target_x, focal_point_x - halftarget_x)),\n max(0, min(source_y - target_y, focal_point_y - halftarget_y)),\n ]\n box.append(min(source_x, int(box[0]) + target_x))\n box.append(min(source_y, int(box[1]) + target_y))\n # Finally, crop the image!\n im = im.crop(box)\n return im",
"def _image_resize_keep_ratio(self, image, max_w, max_h, crop=False):\n inter = cv2.INTER_AREA\n height, width = image.shape[:2]\n\n source_aspect_ratio = float(width) / height\n target_aspect_ratio = float(max_w) / max_h\n\n if crop:\n if source_aspect_ratio <= target_aspect_ratio:\n h_cropped = int(width / target_aspect_ratio)\n x_offset = 0\n y_offset = int((float(height) - h_cropped) / 2)\n cropped = image[y_offset:(y_offset + h_cropped), x_offset:width]\n else:\n w_cropped = int(height * target_aspect_ratio)\n x_offset = int((float(width) - w_cropped) / 2)\n y_offset = 0\n cropped = image[y_offset:height, x_offset:(x_offset + w_cropped)]\n image = cv2.resize(cropped, (max_w, max_h), interpolation=inter)\n else:\n width, height = sizing.new_size_keep_aspect_ratio((width, height), (max_w, max_h), 'inner')\n image = cv2.resize(image, (width, height), interpolation=cv2.INTER_AREA)\n return image, image.shape[1], image.shape[0]",
"def resize(image_path, target_dimensions, image_format):\n with Image.open(image_path) as img:\n img = img.resize(target_dimensions, resample=Image.LANCZOS)\n if image_format == 'PNG':\n img = img.convert('RGBA')\n else:\n img = img.convert('RGB')\n img.save(image_path, format=image_format, quality=95)",
"def rescale_image(image, rescale_width):\r\n\r\n # image size\r\n image_height = pdb.gimp_image_height(image)\r\n image_width = pdb.gimp_image_width(image)\r\n\r\n # new image height\r\n rescale_height = round(image_height * (rescale_width * 1.0 / image_width))\r\n\r\n pdb.gimp_image_scale(image, rescale_width, rescale_height)\r\n gimp.message('Rescaled image')",
"def resize(img, height=800):\n if img.shape[0] > height:\n ratio = height / img.shape[0]\n return cv2.resize(img, (int(ratio * img.shape[1]), height))",
"def resize_aspect(image_path, original_dimensions, largest_size_target, image_format):\n img_x, img_y = original_dimensions\n if img_x >= img_y: # e.g. 1024x768\n target_dims = max(int(largest_size_target), 1), max(int(img_y * (largest_size_target / (img_x * 1.0))), 1)\n else: # e.g. 768x1024\n target_dims = max(int(img_x * (largest_size_target / (img_y * 1.0))), 1), max(int(largest_size_target), 1)\n resize(image_path, target_dims, image_format)",
"def _resize_image(filename, size):\n width, height = 0, 1\n\n try:\n import Image, ImageOps\n except ImportError:\n from PIL import Image, ImageOps\n\n if not size['resample']:\n resample = Image.ANTIALIAS\n\n img = Image.open(filename)\n if (img.size[width] > size['width'] or\n img.size[height] > size['height']):\n\n #If the image is big resize it with the cheapest resize algorithm\n factor = 1\n while (img.size[0] / factor > 2 * size['width'] and\n img.size[1] * 2 / factor > 2 * size['height']):\n factor *= 2\n if factor > 1:\n img.thumbnail((int(img.size[0] / factor),\n int(img.size[1] / factor)), resample=resample)\n\n if size['crop']:\n img = ImageOps.fit(img, (size['width'], size['height']), method=resample)\n else:\n img.thumbnail((size['width'], size['height']), resample=resample)\n\n try:\n img.save(filename, optimize=1)\n except IOError:\n img.save(filename)",
"def _image_resize_keep_ratio(self, image, max_w, max_h, crop=False):\n if crop:\n width, height = sizing.new_size_keep_aspect_ratio(image.size, (max_w, max_h), 'outer')\n image = image.resize((width, height), Image.ANTIALIAS)\n image = image.crop(sizing.new_size_by_croping(image.size, (max_w, max_h)))\n else:\n width, height = sizing.new_size_keep_aspect_ratio(image.size, (max_w, max_h), 'inner')\n image = image.resize((width, height), Image.ANTIALIAS)\n return image, image.size[0], image.size[1]",
"def resize_image(image, new_size):\n height, width = image.shape[:2]\n quad_size = min(height, width)\n\n if len(image.shape) == 3:\n cropped_image = image[:quad_size, :quad_size, :]\n else:\n cropped_image = image[:quad_size, :quad_size]\n\n resized_image = cv2.resize(cropped_image, (new_size, new_size))\n\n return resized_image",
"def imresize(img, size):\n if hasattr(size, \"__len__\"):\n num_rows, num_cols = size\n assert (num_rows > 0) or (num_cols > 0)\n if num_rows < 0:\n num_rows = num_cols * img.shape[0] / img.shape[1]\n if num_cols < 0:\n num_cols = num_rows * img.shape[1] / img.shape[0]\n else:\n num_rows = int(round(img.shape[0] * size))\n num_cols = int(round(img.shape[1] * size))\n return skimage.transform.resize(img, (num_rows, num_cols))",
"def _image_is_large_enough(im):\n return (im.shape[0] >= MIN_DIM) and (im.shape[1] >= MIN_DIM)"
] |
[
"0.7313931",
"0.6797074",
"0.66853905",
"0.6638203",
"0.6576893",
"0.64611125",
"0.6412122",
"0.6409634",
"0.63759714",
"0.6345033",
"0.62863255",
"0.62354404",
"0.62176794",
"0.62083846",
"0.6204622",
"0.61793035",
"0.615424",
"0.612027",
"0.61074275",
"0.6103727",
"0.60951966",
"0.6089458",
"0.6081684",
"0.6059987",
"0.6059119",
"0.6054333",
"0.602478",
"0.5946398",
"0.5914088",
"0.59109455"
] |
0.7365748
|
0
|
Reduce the number of colors of an Image object It takes a PIL image object and tries to reduce the total number of colors, converting it to an indexed color (mode P) image. If the input image is in mode 1, it cannot be further reduced, so it's returned back with no changes.
|
def do_reduce_colors(img: Image.Image,
max_colors: int) -> Tuple[Image.Image, int, int]:
orig_mode = img.mode
if orig_mode == "1":
return img, 2, 2
colors = img.getcolors()
if colors:
orig_colors = len(colors)
else:
orig_colors = 0
# Intermediate conversion steps when needed
if orig_mode in ["CMYK", "YCbCr", "LAB", "HSV"]:
img = img.convert("RGB")
elif orig_mode == "LA":
img = img.convert("RGBA")
# Actual color reduction happening here
if orig_mode in ["RGB", "L"]:
palette = Image.ADAPTIVE
elif orig_mode == "RGBA":
palette = Image.ADAPTIVE
transparent = Image.new("RGBA", img.size, (0, 0, 0, 0))
# blend with transparent image using own alpha
img = Image.composite(img, transparent, img)
elif orig_mode == "P":
palette = img.getpalette()
img = img.convert("RGBA")
width, height = img.size
alpha_layer = Image.new("L", img.size)
for x in range(width):
for y in range(height):
_, _, _, alpha = img.getpixel((x, y))
alpha_layer.putpixel((x, y), alpha)
img.putalpha(alpha_layer)
else:
return img, 0, 0
img = img.convert("P", palette=palette, colors=max_colors)
return img, orig_colors, len(img.getcolors())
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def reduce_color(image):\n\n # http://stackoverflow.com/questions/5906693/how-to-reduce-the-number-of-colors-in-an-image-with-opencv-in-python\n w, h, _ = image.shape\n for row in xrange(h-1):\n for col in xrange(w-1):\n #pi = row * w * 3 + col * 3\n pixel = image[col][row]\n pixel[0] = __reduceColorValue(pixel[0])\n pixel[1] = __reduceColorValue(pixel[1])\n pixel[2] = __reduceColorValue(pixel[2])\n return image",
"def colorImgPreProcess(self, image):\n #do processing on the image while it's still in color\n image = cv2.medianBlur(image, 7) #kernal size must be odd\n #image = cv2.bilateralFilter(image, 9, 75, 75) #TODO: uncomment when it won't cause C++ errors with ROS\n #self.closeImages() #uncomment if showing output image\n return image",
"def colorize(image, newColor):\n image = image.copy()\n\n # zero out RGB values\n image.fill((0, 0, 0, 255), None, pg.BLEND_RGBA_MULT)\n # add in new RGB values\n image.fill(newColor[0:3] + (0,), None, pg.BLEND_RGBA_ADD)\n\n return image",
"def post_process_image(self, image):\r\n A = np.min(image)\r\n B = np.max(image)\r\n k = 255\r\n R, C = image.shape\r\n for i in range(R):\r\n for j in range(C):\r\n image[i][j] = (k / (B-A)) * (image[i][j] - A)\r\n average_image = np.average(image)\r\n if average_image > 50:\r\n return image.astype(dtype='uint8')\r\n else:\r\n return (255-image).astype(dtype='uint8')",
"def color_reduction(image, n_colors, method='kmeans', palette=None):\n method = method.lower()\n if method not in ('kmeans', 'linear', 'max', 'median', 'octree'):\n method = 'kmeans'\n if n_colors < 2:\n n_colors = 2\n elif n_colors > 128:\n n_colors = 128\n if method == 'kmeans':\n n_clusters = n_colors\n h, w = image.shape[:2]\n img = cv2.cvtColor(image, cv2.COLOR_BGR2LAB)\n img = img.reshape((-1, 3)) # -1 -> img.shape[0] * img.shape[1]\n centers, labels = kmeans(img, n_clusters)\n if palette is not None:\n # palette comes in RGB\n centers = cv2.cvtColor(np.array([palette]), cv2.COLOR_RGB2LAB)[0]\n quant = centers[labels].reshape((h, w, 3))\n output = cv2.cvtColor(quant, cv2.COLOR_LAB2BGR)\n else:\n img = PIL.Image.fromarray(image[:, :, ::-1], mode='RGB')\n quant = img.quantize(colors=n_colors,\n method=get_quantize_method(method))\n if palette is not None:\n palette = np.array(palette, dtype=np.uint8)\n quant.putpalette(palette.flatten())\n output = np.array(quant.convert('RGB'), dtype=np.uint8)[:, :, ::-1]\n return output",
"def _adjust_color_img(self, results, factor=1.0):\n for key in results.get('img_fields', ['image']):\n # NOTE defaultly the image should be BGR format\n img = results[key]\n results[key] = mmcv.adjust_color(img, factor).astype(img.dtype)",
"def colorize(image, newColor):\n image = image.copy()\n\n # zero out RGB values\n image.fill((0, 0, 0, 255), None, pygame.BLEND_RGBA_MULT)\n # add in new RGB values\n image.fill(newColor[0:3] + [0,], None, pygame.BLEND_RGBA_ADD)\n\n return image",
"def reduce_image(img, n_colors):\n # d (depth) will always be 3 due to RGB values\n w, h, d = img.shape\n\n # convert the image into a 2D array, where pixels[0] gets [r,g,b]\n # for the top-left and pixels[w*h] gets [r,g,b] for bottom-right\n pixels = np.float32(img.reshape((-1, d)))\n\n # perform k-means clustering on all pixels\n centroids, labels, deltas = k_means(data=pixels, K=n_colors)\n\n # update each pixel in the original image with its new classification\n pixels = np.array([centroids[i] for i in labels])\n\n # convert the 2D array back to 3D so it can be understood by skimage/plt\n # pixels[0][0] gets [r,g,b] at top-left, pixels[w][h] gets [r,g,b] at bottom-right\n pixels = np.int32(pixels.reshape((w, h, d)))\n\n return pixels, deltas, labels",
"def reduceImage(img,N,M,n,m):\n scaleN = int(n/(2*N))\n scaleM = int(m/(2*M))\n imgR = np.zeros((2*N+1,2*M+1))\n for i in range(2*N+1):\n for j in range(2*M+1):\n if img[i*scaleN+2,j*scaleM+2,3] != 255:\n imgR[i,j] = 0.\n else: \n imgR[i,j] = 1.\n return imgR",
"def rescale_image_0255(image):\n # scale image to from [0.0, 1.0] to [0, 255]\n image *= 255\n return image.astype(np.uint8)",
"def img_recolor(self, args, input_image_path):\n \n ec = encoder.Encoder(output_path=args.intermediate_representation, method=args.method,\n size=args.size, p=args.p, grid_size=args.grid_size, plot=args.plot, quantize=args.quantize)\n dc = decoder.Decoder(output_path=args.output_path, method=args.method, size=args.size, p=args.p, gpu_id=args.gpu_id, plot=args.plot)\n\n ec.encode(input_image_path)\n img_gray_name = ar_utils.gen_new_gray_filename(input_image_path)\n img_gray_path = os.path.join(args.intermediate_representation, img_gray_name)\n dc.decode(img_gray_path)\n\n if args.delete_gray and os.path.exists(img_gray_path):\n os.remove(img_gray_path)",
"def image_local_enhance_contrast(image: np.ndarray):\n \n # Resize image to a shape of (48, 48)\n image = image_as_square(image)\n\n image = filters.rank.enhance_contrast(image, morphology.disk(2))\n\n # Resize the iamge back to a shape of (2304, )\n return image_as_array(image)",
"def preprocess(image):\n return (image / 255) * 2 - 1",
"def reduceSize(image_object, mask_object):\n mask_np = sitk.GetArrayFromImage(mask_object)\n image_np = sitk.GetArrayFromImage(image_object)\n d,l,c = mask_np.shape\n dim = [[],[],[]]\n for k in range(d):\n if mask_np[k,:,:].max() == 0:\n continue\n else:\n dim[0].append(k)\n for i in range(l):\n if mask_np[k,i,:].max() == 0:\n continue\n else:\n dim[1].append(i)\n for j in range(c):\n if mask_np[k,i,j] == 1:\n dim[2].append(j)\n \n mask = mask_np[min(dim[0]):max(dim[0])+1, min(dim[1]):max(dim[1])+1, min(dim[2]):max(dim[2])+1]\n image = image_np[min(dim[0]):max(dim[0])+1, min(dim[1]):max(dim[1])+1, min(dim[2]):max(dim[2])+1]\n\n red_image_object = sitk.GetImageFromArray(image)\n red_mask_object = sitk.GetImageFromArray(mask)\n\n return red_image_object, red_mask_object",
"def preprocessing(I):\n I = I[35:195] # crop\n I = I[::2,::2,0] # downsample by factor of 2\n I = scipy.misc.imresize(I,size=(img_dim,img_dim))\n I[I == 144] = 0 # erase background (background type 1)\n I[I == 109] = 0 # erase background (background type 2)\n #I[I != 0] = 1 # everything else (paddles, ball) just set to 1\n I = I/255\n return I.astype(np.float).ravel() #flattens",
"def get_modified_median_cut_palette(image, colorCount=10, quality=1):\n\n w, h, _ = image.shape\n colors = []\n for row in xrange(0, h):\n for col in xrange(0, w, quality):\n pixel = image[col][row]\n b = pixel[0]\n g = pixel[1]\n r = pixel[2]\n if r < 250 and g < 250 and b < 250:\n colors.append((r, g, b)) \n if len(colors) == 0:\n return [(255,255,255)]\n c_map = mmcq.mmcq(colors, colorCount) \n return c_map.palette",
"def saturate(image_path, factor=4):\n BasicTransform.convert_image(image_path)\n\n with Image.open(image_path) as img:\n filter = ImageEnhance.Color(img)\n new_image = filter.enhance(factor)\n new_image.save(image_path)",
"def post_process_image(image):\n\n image = (image - np.min(image)) * (255 / (np.max(image) - np.min(image)))\n\n return image",
"def __enhance_image(self, img):\n\n blue = self.g.clahe.apply(img[:,:,0])\n green = self.g.clahe.apply(img[:,:,1])\n red = self.g.clahe.apply(img[:,:,2])\n img[:,:,0] = blue\n img[:,:,1] = green\n img[:,:,2] = red\n return img",
"def _preprocess(self, image):\n\n # Scale from [0, 255] to [0, 1] and BGR to RGB \n return (image / 255.0)[:, :, ::-1]",
"def red_filter(img):\r\n #with Image.open(filename) as img:\r\n w = img.width\r\n h = img.height\r\n\r\n newimg = Image.new('RGB', (w,h))\r\n for y in range(h):\r\n for x in range(w):\r\n r, g, b = img.getpixel((x,y))\r\n \r\n newimg.putpixel((x, y), (r, 0, 0))\r\n \r\n return newimg",
"def obscure_image(image):\n size = image.size\n pixel_size = 9\n if size[0] < 9 or size[1] < 9:\n return image\n image_f = image.filter(ImageFilter.MaxFilter)\n image_s = image_f.resize((size[0] / pixel_size, size[1] / pixel_size), Image.NEAREST)\n image_l = image_s.resize((size[0], size[1]), Image.NEAREST)\n\n return image_l",
"def quantizeColor(bilateralFilter_img, a, ksize):\n medianBlur_img = cv2.medianBlur(bilateralFilter_img,ksize)\n [rows,cols,c] = medianBlur_img.shape\n quantizeColor_img = medianBlur_img\n for i in xrange(0,rows):\n for j in xrange(0,cols):\n pixel_b = medianBlur_img.item(i,j,0)\n pixel_g = medianBlur_img.item(i,j,1)\n pixel_r = medianBlur_img.item(i,j,2) \n pixel_b = math.floor(pixel_b/a)*a \n pixel_g = math.floor(pixel_g/a)*a\n pixel_r = math.floor(pixel_r/a)*a\n quantizeColor_img.itemset((i,j,0),pixel_b)\n quantizeColor_img.itemset((i,j,1),pixel_g)\n quantizeColor_img.itemset((i,j,2),pixel_r)\n\n return quantizeColor_img",
"def post_process_image(self, image):\n \n from numpy import max, min\n \n max_value, min_value = max(image), min(image)\n # print(min_value, max_value)\n # print(image)\n min_value = 0 if min_value < 0 else min_value \n _image = 255 * ( ( image - min_value )/ (max_value - min_value) )\n # print('next')\n # print(min(_image), max(_image))\n # print(_image)\n return 255 - _image.astype('uint8')",
"def grayscale(image: Image) -> Image:\r\n new_image = copy(image)\r\n for x,y,(r,g,b) in image:\r\n pix_bright = (r+g+b)//3\r\n Gray = create_color(pix_bright,pix_bright,pix_bright)\r\n set_color(new_image,x,y,Gray) \r\n return new_image",
"def rescale_image_01(image):\n # scale image to from [0, 255] to [0.0, 1.0]\n image = image.astype(np.float32)\n return image / 255",
"def prepro(I):\n I = I[35:195] # crop\n I = I[::2,::2,0] # downsample by factor of 2\n I[I == 144] = 0 # erase background (background type 1)\n I[I == 109] = 0 # erase background (background type 2)\n I[I != 0] = 1 # everything else (paddles, ball) just set to 1\n return I.astype(np.float).ravel()",
"def prepro(I):\n I = I[35:195] # crop\n I = I[::2,::2,0] # downsample by factor of 2\n I[I == 144] = 0 # erase background (background type 1)\n I[I == 109] = 0 # erase background (background type 2)\n I[I != 0] = 1 # everything else (paddles, ball) just set to 1\n return I.astype(np.float).ravel()",
"def process_image(image):\r\n image = random_brightness(image)\r\n image = crop_image(image)\r\n image = resize(image)\r\n return image",
"def adjust_saturation(img, saturation_factor):\n check_type(img)\n\n enhancer = ImageEnhance.Color(img)\n img = enhancer.enhance(saturation_factor)\n return img"
] |
[
"0.7646288",
"0.6199736",
"0.6088131",
"0.6087488",
"0.60827786",
"0.6039727",
"0.58463526",
"0.58313364",
"0.5830371",
"0.5783675",
"0.5783629",
"0.57750285",
"0.57423425",
"0.5721086",
"0.5709663",
"0.57042706",
"0.56973356",
"0.5687904",
"0.56869465",
"0.56319326",
"0.56056535",
"0.5570622",
"0.555778",
"0.5551497",
"0.5551315",
"0.5536911",
"0.5532883",
"0.5532883",
"0.552239",
"0.5478538"
] |
0.71109265
|
1
|
Create and return a sample quizz
|
def sample_quizz(**params):
defaults = {
'title': 'Boomer WW II Quizz',
'description': 'Are You A World War II Whiz?'
}
defaults.update(params)
return Quizz.objects.create(**defaults)
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def test_create_quizes(self):\n driver = self.driver\n wait = self.wait\n\n create_quizz_name(driver, wait, quiz_name)\n\n create_textual_question(driver, wait, textual_question_1)\n create_textual_question(driver, wait, textual_question_2)\n create_textual_question(driver, wait, textual_question_3)\n\n create_single_choice_question(driver, wait, single_choice_1, single_choice_1_opt_1,single_choice_1_opt_1, single_choice_1_opt_2)\n create_single_choice_question(driver, wait, single_choice_2, single_choice_2_opt_1,single_choice_2_opt_1, single_choice_2_opt_2)\n create_single_choice_question(driver, wait, single_choice_3, single_choice_3_opt_1,single_choice_3_opt_1, single_choice_3_opt_2)\n\n create_multiple_choice_question(driver, wait, multiple_choice_1, multiple_choice_1_opt_1, multiple_choice_1_opt_1, multiple_choice_1_opt_2)\n create_multiple_choice_question(driver, wait, multiple_choice_2, multiple_choice_2_opt_2, multiple_choice_2_opt_1, multiple_choice_2_opt_2)\n create_multiple_choice_question(driver, wait, multiple_choice_3, multiple_choice_3_opt_2, multiple_choice_3_opt_1, multiple_choice_3_opt_2)\n\n save_quiz(driver, wait)\n self.quiz_name = quiz_name\n self.email_teacher = email_teacher\n self.password_teacher = password_teacher\n\n find_quizz(driver, wait, quiz_name)\n driver.get_screenshot_as_file('{} created.png'.format(quiz_name))",
"def create_sample(question: Union[str, List[str]], context: Union[str, List[str]]) -> Union[SquadExample, List[SquadExample]]:\n if isinstance(question, list):\n return [SquadExample(None, q, c, None, None, None) for q, c in zip(question, context)]\n else:\n return SquadExample(None, question, context, None, None, None)",
"def play_quizz():\n body = request.get_json()\n\n previous_questions = body.get('previous_questions', [])\n quiz_category = body.get('quiz_category', None)\n\n try:\n quiz_category['id'] = int(quiz_category['id'])\n except:\n abort(405)\n\n selection = []\n ALL_CATEGORIES = 0\n\n if quiz_category['id'] == ALL_CATEGORIES:\n selection = Question.query.filter(~Question.id.in_(\n previous_questions)).order_by(Question.id).all()\n else:\n selection = Question.query.filter(~Question.id.in_(previous_questions)).filter(\n Question.category == quiz_category['id']).all()\n\n if len(selection) == 0:\n abort(404)\n\n STEP = 1\n index = randrange(0, len(selection), STEP)\n\n return jsonify({\n 'success': True,\n 'question': selection[index].format()\n })",
"def gen_questions(self, number_of_questions):",
"def _generate_qubits(self):\n return cq.LineQubit.range(4)",
"def sample_qubo(self, Q, **parameters):\n return self.child.sample_qubo(Q, **parameters)",
"def createQuiz(repo_url):\n CourseSet.quizcontent()\n ExtractChoices()\n # pass",
"def get_quits(self, uuid):\n\n return self.template(uuid, \"quits\")",
"def quick_quiz(character_set):",
"def create_quizz_question(classe, lecon):\n path_to_classe = \"..//data//questions//\" + classe\n path_to_questions = \"..//data//questions//\" + classe + \"//\" + lecon\n empty_questions_list = []\n # Enregistrement du fichier\n with open(path_to_questions, \"wb\") as file:\n writer = pickle.Pickler(file)\n writer.dump(empty_questions_list)\n\n # Enregistrement sur la liste\n with open(path_to_classe + '//liste_lecons', 'rb') as file:\n lecteur = pickle.Unpickler(file)\n liste_lecons = lecteur.load()\n liste_lecons.append(lecon)\n\n with open(path_to_classe + '//liste_lecons', 'wb') as file:\n writer = pickle.Pickler(file)\n writer.dump(liste_lecons)",
"def populate_game_questions():\n indices = random.sample(range(0, len(quizquestion.questions_all)), 5) # If user doesn't specify, choose 5 random questions\n return quizquestion.QuizQuestion.get_game_questions(indices)",
"def quasi_rand(values, feature, parent):\r\n seed = values[0]\r\n base = values[1]\r\n min = values[2]\r\n max = values[3]\r\n \r\n return math.floor(halton(seed, base) * (max - min + 1) + min)",
"def make_samples(\n context_qas: List[EncodedContextQuestionAnswer]\n ) -> List[EncodedSample]:\n return [\n EncodedSample(ctx.word_encoding, ctx.char_encoding, qa)\n for ctx in context_qas\n for qa in ctx.qas\n ]",
"def generate_question(self, num_question = 10):\n\t\t\n\t\tquestions = []\n\t\tfor q in range(num_question):\n\t\t\tfor r in range(2):\n\t\t\t\tquestion = np.zeros(self.question_dim, dtype = np.float32)\n\t\t\t\tcolor = np.random.randint(len(COLOR))\n\t\t\t\tquestion[color] = 1.0\n\t\t\t\tquestion[6 + r] = 1.0\n\t\t\t\tquestion_label = np.random.randint(3)\n\t\t\t\tquestion[8 + question_label] = 1.0\n\t\t\t\tquestions.append(question)\n\t\treturn questions",
"def set_qs():\n\n print \"Hi there! We're going to give you a fun grammar quiz.\"\n\n user_name = raw_input(\"To start, please enter your name: \")\n\n print \"Thanks, {}!\".format(user_name)\n\n user_num = int(raw_input(\"How many questions would you like us to generate for you? Enter a number: \"))\n\n num_qs = validate_num(user_num)\n\n print \"Ok, we'll make you a quiz with {} questions!\".format(num_qs)\n\n return num_qs",
"def _samples(self):\n finite_types = \\\n [QuiverMutationType(t) for t in [['A', 1], ['A', 5], ['B', 2], ['B', 5],\n ['C', 3], ['C', 5], ['D', 2], ['D', 5],\n [\"E\", 6], [\"E\", 7], [\"E\", 8], [\"F\", 4],\n [\"G\", 2]]]\n affine_types = \\\n [QuiverMutationType(t) for t in [['A', [1,1], 1], ['A', [4,5], 1], ['D', 4, 1], ['BB', 5, 1]]]\n elliptic_types = \\\n [QuiverMutationType(t) for t in [['E', 6, [1,1]], ['E', 7, [1,1]]]]\n mutation_finite_types = \\\n [QuiverMutationType(t) for t in [['R2',(1,5)], ['R2',(3,5)]]]\n mutation_infinite_types = \\\n [QuiverMutationType(t) for t in [['E',10], ['BE',5], ['GR',(3,10)], ['T',(3,3,4)]]]\n\n return finite_types + affine_types + elliptic_types + mutation_finite_types + mutation_infinite_types",
"def test_quick_answer(self):\n pass",
"async def start(self, ctx, t=1, qnum=None):\n # Verify this channel has a quiz assigned to it\n serv = ctx.message.guild\n chan = ctx.message.channel\n try:\n quiz = self.QUIZZES[f\"{serv}:{chan}\"]\n except KeyError as e:\n title = \"Quiz not found\"\n msg = {\n \"Error:\": e\n }\n embed = make_embed(title,msg)\n await ctx.message.channel.send(embed=embed)\n return\n\n # Iterate over a randomized list\n questions = list(quiz['quiz'].keys())\n random.shuffle(questions)\n\n # Fix our params\n if qnum is None:\n qnum = len(questions)\n else:\n qnum = int(qnum)\n t = float(t)\n\n # Iterate over all the questions\n for idx in range(qnum):\n # Grab question\n question = quiz['quiz'][questions[idx]]\n\n # Get question info\n answers = question['answers']\n correct = question['correct']\n\n # Create embed\n title = f\"Quiz - Question {idx + 1}\"\n choices = f\"\"\":regional_indicator_a: : {answers['a']}\n:regional_indicator_b: : {answers['b']}\n:regional_indicator_c: : {answers['c']}\n:regional_indicator_d: : {answers['d']}\n\"\"\"\n \n try:\n code = f\"\"\"```\n{question['code']}\n```\n\"\"\"\n msg = {\n \"Question:\": questions[idx],\n \"Cmd/Code:\": code,\n \"Choices:\": choices,\n }\n except:\n msg = {\n \"Question:\": questions[idx],\n \"Choices:\": choices,\n }\n\n # Create embed and send\n embed = make_embed(title, msg)\n msg = await ctx.message.channel.send(embed=embed)\n\n # Add reactions\n a = \"🇦\"\n b = \"🇧\"\n c = \"🇨\"\n d = \"🇩\"\n reactions = [a, b, c ,d]\n for r in reactions:\n await msg.add_reaction(r)\n\n whodid = {\n a: [],\n b: [],\n c: [],\n d: []\n }\n\n # Sleep sometime before revealing answer\n time.sleep(t)\n\n # Calculate the answers\n msg = await msg.channel.fetch_message(msg.id)\n reactions: List[discord.Reaction] = msg.reactions\n for react in reactions:\n users = await react.users().flatten()\n for u in users:\n if self.bot.user.name in u.name:\n continue\n # Make sure first the user existss in this game\n try:\n quiz['players'][u]\n except:\n # User does not exist, lets add them\n quiz['players'][u] = 0 # 0 points to start\n whodid[react.emoji].append(u)\n\n # Who got it right?\n crct = []\n if correct == \"a\":\n # Give points to those who deserve it\n for user in whodid[a]:\n if user not in whodid[b] and user not in whodid[c] and user not in whodid[d]:\n quiz['players'][user] += 1\n crct.append(user.name)\n elif correct == \"b\":\n # Give points to those who deserve it\n for user in whodid[b]:\n if user not in whodid[a] and user not in whodid[c] and user not in whodid[d]:\n quiz['players'][user] += 1\n crct.append(user.name)\n elif correct == \"c\":\n # Give points to those who deserve it\n for user in whodid[c]:\n if user not in whodid[b] and user not in whodid[a] and user not in whodid[d]:\n quiz['players'][user] += 1\n crct.append(user.name)\n elif correct == \"d\":\n # Give points to those who deserve it\n for user in whodid[d]:\n if user not in whodid[b] and user not in whodid[c] and user not in whodid[a]:\n quiz['players'][user] += 1\n crct.append(user.name)\n\n # Print out correct answer\n title = \"Correct Answer\"\n # if no one, then no one\n if len(crct) == 0:\n crct.append(\"No one lol\")\n msg = {\n \"Correct Answer:\": f\"{correct}. {answers[correct]}\",\n \"Who Got it?\": '\\n'.join(crct)\n }\n embed = make_embed(title, msg)\n await ctx.message.channel.send(embed=embed)\n\n # Print out the scoreboard\n scrbrd = make_scoreboard(quiz['players'])\n await ctx.message.channel.send(embed=scrbrd)\n time.sleep(5)",
"def get_questions():\n fields_dt = ['name', 'category', 'key', 'text']\n questions = frappe.db.get_list('Big Five Factor Model',\n fields=fields_dt)\n\n # Ordenamiendo random: se aplica sobre el objeto original\n suffle_data = random.shuffle(questions)\n\n return questions",
"async def generate_question(self) -> None:\n topic = choice(list(self._topics.keys()))\n arrayList = await self.session.get(\"https://restcountries.eu/rest/v2\")\n arrayList = await arrayList.json() # get request to the country API\n countries = []\n \n for _ in range(4):\n country = choice(arrayList)\n del arrayList[arrayList.index(country)]\n countries.append(country)\n del arrayList\n \n country = choice(countries)\n del countries[countries.index(country)]\n self.question = f\"What is the {self._topics[topic]} of {country['name']}?\"\n self.correct_order = randint(0, 3)\n self.choices = [i[topic] for i in countries]\n self.choices.insert(self.correct_order, country[topic])\n del countries, topic",
"def generate_quest(self):\n\n if odds(3):\n\n quest_items = add_dicts_together(items[\"master\"], items[self.p.square.square_type])\n quest_item = random.choice(list(quest_items.keys()))\n\n i = Item(quest_item, 0, **quest_items[quest_item])\n self.inventory.append(i)\n\n quantity = {'super rare': '1',\n 'rare': '2',\n 'uncommon': '3',\n 'common': '6',\n 'super common': '15'}\n q = quantity[i.rarity]\n\n self.quest = i, int(q), f\"{self.p.name}, if you bring \" \\\n f\"me {q} {i.plural if int(q) > 1 else remove_little_words(i.name)}, \" \\\n f\"I will teach you a valuable skill.\"\n return\n elif odds(5):\n mobs = []\n for biome, building in buildings.items():\n for b, attributes in building.items():\n if attributes.get('mobs'):\n for k in attributes['mobs'].keys():\n mobs.append(k)\n for biome, mob in wild_mobs.items():\n for k in mob.keys():\n mobs.append(k)\n target = f\"{mobs[random.randint(0, len(mobs)-1)]} named {names[random.randint(0, len(names)-1)]}\"\n print(f\"Well, we'll keep this off the record, but I can arrange for some money to find its way \"\n f\"into your account if you make {colored(target, 'yellow')} disappear, if you know what I mean...\")\n self.p.hit_list.append(target)\n return False\n\n else:\n return None",
"def test_QSe_Run(self):\n fit_group, result = BayesQuasi(Program='QSe',\n SampleWorkspace=self._sample_ws,\n ResolutionWorkspace=self._res_ws,\n MinRange=-0.547607,\n MaxRange=0.543216,\n SampleBins=1,\n ResolutionBins=1,\n Elastic=False,\n Background='Sloping',\n FixedWidth=False,\n UseResNorm=False,\n WidthFile='',\n Loop=True,\n Save=False,\n Plot='None')\n self._validate_QSe_shape(result, fit_group)\n self._validate_QSe_value(result, fit_group)",
"def test_retrieve_recipes(self):\n sample_quizz()\n sample_quizz()\n\n res = self.client.get(QUIZZES_URL)\n\n quizzes = Quizz.objects.all()\n serializer = QuizzSerializer(quizzes, many=True)\n self.assertEqual(res.status_code, status.HTTP_200_OK)\n self.assertEqual(res.data, serializer.data)",
"def sample(self):",
"def create_quiz():\n try:\n\n quiz_category_id = request.json.get(\"quiz_category_id\")\n previous_question_ids = request.json.get(\"previous_question_ids\")\n questions = Question.query.filter(\n ~Question.id.in_(previous_question_ids)\n )\n\n if quiz_category_id != 0:\n questions = questions.filter(\n Question.category_id == quiz_category_id\n )\n\n questions = questions.all()\n\n if len(questions) > 0:\n question = random.choice(questions).format()\n else:\n question = None\n\n response = jsonify({\"success\": True, \"question\": question})\n\n except AttributeError:\n abort(400)\n\n return response",
"def tst_random_set():\n final_wave, final_spec, final_z = desi_qso_templates(\n outfil='test_random_set.fits', N_perz=100, seed=12345)",
"def make_music_rand():\n pass",
"def generate_question_and_answer(): # noqa: WPS210\n start_number = random.randint(1, 100)\n progression_step = random.randint(1, 10)\n progression_length = random.randint(5, 10)\n progression = generate_progression(\n start_number, progression_step, progression_length,\n )\n return hide_number(progression)",
"def quartic(indiv):\n y = sum([ (i+1) * x**4 for i,x in enumerate(indiv)]) + random.uniform(0,1)\n return y",
"def create_sample(i):\n return Sample(**{\n 'name': f'Sample{i}',\n 'metadata': {'foobar': f'baz{i}'},\n KrakenResultModule.name(): create_kraken(),\n KrakenHLLResultModule.name(): create_krakenhll(),\n Metaphlan2ResultModule.name(): create_metaphlan2(),\n }).save()"
] |
[
"0.6743306",
"0.62160754",
"0.62013143",
"0.6199276",
"0.6093944",
"0.5910831",
"0.58823454",
"0.58408946",
"0.5759428",
"0.5724299",
"0.566125",
"0.56514454",
"0.5644561",
"0.5635653",
"0.56318986",
"0.562786",
"0.56228137",
"0.5605605",
"0.558572",
"0.5567439",
"0.5556626",
"0.5497341",
"0.54885405",
"0.5481957",
"0.54741925",
"0.545805",
"0.54301417",
"0.54115903",
"0.5393793",
"0.53880775"
] |
0.7778706
|
0
|
Retrieves specific league matchday fixtures from API
|
async def _get_league_fixtures_matchday(self, server_id: str, league_id: str, matchday: str):
params = {'matchday': matchday}
url = self.api_url + 'competitions/{}/fixtures'.format(league_id)
return await self._make_request(url, params, server_id)
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"async def _matchdayfixtures(self, ctx: commands.Context, league_id: str, matchday: str='1'):\n headers = ['ID', 'Home', ' ', ' ', 'Away']\n data = await self._get_league_fixtures_matchday(ctx.message.server.id, league_id, matchday)\n\n await self.bot.say('```diff\\n+ Matchday ' + matchday + ' fixtures```')\n pretty_data = []\n for fixture in data['fixtures']:\n pretty_data.append([\n fixture['id'],\n '[{}] {}'.format(fixture['homeTeamId'], fixture['homeTeamName']),\n fixture['result']['goalsHomeTeam'],\n fixture['result']['goalsAwayTeam'],\n '[{}] {}'.format(fixture['awayTeamId'], fixture['awayTeamName'])\n ])\n\n await self.bot.say(box(tabulate(pretty_data, headers=headers)))",
"async def _get_league_fixtures_timeframe(self, server_id: str, league_id: str, timeframe: str):\n params = {'timeFrame': timeframe}\n url = self.api_url + 'competitions/{}/fixtures'.format(league_id)\n\n return await self._make_request(url, params, server_id)",
"def test_get_league_leaders___daily(self):\n msg = \"Response status is not 200\"\n response = self.api.get_league_leaders___daily(self.year1, self.nhl_season, self.year2, self.month, self.day)\n self.assertEqual(response.status_code, 200, msg)",
"def get_fixtures(team, dateFrom=None, dateTo=None, status=None, venue=None, limit=None):\r\n query_params = {}\r\n if dateFrom:\r\n query_params['dateFrom'] = dateFrom\r\n if dateTo:\r\n query_params['dateTo'] = dateTo\r\n if status:\r\n query_params['status'] = status\r\n else:\r\n query_params['status'] = 'SCHEDULED' \r\n if venue:\r\n query_params['venue'] = venue\r\n if limit :\r\n query_params['limit'] = limit \r\n \r\n url = _generate_url(f\"teams/{team}/matches\", query_params)\r\n fixtures = requests.get(url, headers=headers).json()\r\n \r\n return fixtures",
"def test_time_league(self):\n result = self.test_client.league\n\n assert isinstance(result, dict)",
"async def _get_league_leaderboard(self, server_id: str, league_id: str, matchday: str):\n if matchday is None:\n matchday = ''\n params = {'matchday': matchday}\n url = self.api_url + 'competitions/{}/leagueTable'.format(league_id)\n\n return await self._make_request(url, params, server_id)",
"def get_match_data(event: str, match: int, team: int = -1):\n \n if team < 0:\n return requests.get(f'https://us-central1-pearadox-2020.cloudfunctions.net/GetMatchData/{event}/{match:03d}-').json()\n else:\n return requests.get(f'https://us-central1-pearadox-2020.cloudfunctions.net/GetMatchData/{event}/{match:03d}-').json()[f'{match:03}-{team:4}']",
"async def _nextfixtures(self, ctx: commands.Context, league_id: str):\n headers = ['ID', 'Home', ' ', 'Away', 'Date']\n data = await self._get_league_fixtures_timeframe(ctx.message.server.id, league_id, 'n7')\n\n await self.bot.say('```diff\\n+ Next fixtures```')\n pretty_data = []\n for fixture in data['fixtures']:\n pretty_data.append([\n fixture['id'],\n '[{}] {}'.format(fixture['homeTeamId'], fixture['homeTeamName']),\n ' - ',\n '[{}] {}'.format(fixture['awayTeamId'], fixture['awayTeamName']),\n fixture['date']\n ])\n\n await self.bot.say(box(tabulate(pretty_data, headers=headers)))",
"def test_deaths_get(self):\n query_string = [('label', 'label_example'),\n ('page', 1),\n ('per_page', 100)]\n headers = { \n 'Accept': 'application/json',\n }\n response = self.client.open(\n '/v0.0.1/deaths',\n method='GET',\n headers=headers,\n query_string=query_string)\n self.assert200(response,\n 'Response body is : ' + response.data.decode('utf-8'))",
"def test_teams_get_team_v1(self):\n pass",
"def construct_fixtures_tweets():\n\n # Declares today's date\n today = str(datetime.date.today())\n\n # Gets today's fixtures data from football-data.org API\n connection = http.client.HTTPConnection('api.football-data.org')\n headers = {'X-Auth-Token': ''}\n connection.request('GET', '/v2/competitions/PL/matches?dateFrom='+today+'&dateTo='+today, None, headers)\n response = json.loads(connection.getresponse().read().decode())\n\n # Initialises fixtures tweet\n tweet1 = \"Today's #PremierLeague matches:\\n\"\n tweet2 = \"\"\n tweet3 = \"\"\n\n # Checks if any fixtures on today\n if response['matches']:\n # For each fixture obtained, appends line to tweet with information\n for i in range(len(response['matches'])):\n time = response['matches'][i]['utcDate']\n utc = datetime.datetime.strptime(time, '%Y-%m-%dT%H:%M:%SZ')\n gmt = pytz.timezone(\"Europe/London\").fromutc(utc)\n ko_time = gmt.strftime(\"%H:%M\")\n tweet_line = response['matches'][i]['homeTeam']['name'] + ' vs ' + response['matches'][i]['awayTeam'][\n 'name'] + ' (' + ko_time + ')' + '\\n'\n # Checks that tweet will not be too long (~ >280 chars), by splitting into separate tweets\n if len(tweet1) >= 220:\n tweet2 += tweet_line\n elif len(tweet2) >= 220:\n tweet3 += tweet_line\n else:\n tweet1 += tweet_line\n return send_fixtures_tweets(tweet1, tweet2, tweet3)\n else:\n return print('No PL fixtures today')",
"def get_game(self, game_id):\n \n session = requests.session()\n response = session.get(self.baseURL + str(game_id), headers=self.headers)\n soup = BeautifulSoup(response.text)\n \n #get teams\n defeated_by = False \n game_header = soup.find_all(text=re.compile('defeats'))\n \n if len(game_header) == 0:\n game_header = soup.find_all(text=re.compile('defeated by'))\n \n if (len(game_header)) == 0:\n game_header = soup.find_all(text=re.compile('defeat'))\n \n if (len(game_header)) == 0:\n game_header = soup.find_all(text=re.compile('drew'))\n defeated_by = True \n else:\n defeated_by = True \n\n if defeated_by: \n teams = self.remove_long_names(game_header[1]).replace('\\n', '')\n home_team = teams.split(' ')[0]\n away_team = teams.split(' ')[3]\n else:\n teams = self.remove_long_names(game_header[1]).replace('\\n', '')\n home_team = teams.split(' ')[0]\n away_team = teams.split(' ')[2]\n \n date_string = game_header[0].split(' ')\n date_string_find = [date.lower() for date in date_string]\n \n venue = date_string[date_string_find.index('at') + 1]\n \n #get round\n round_num = None\n \n try:\n date_string_find.remove('')\n except:\n pass\n \n try:\n round_num = int(date_string[date_string_find.index('round') + 1])\n except:\n try:\n round_num = date_string_find[date_string_find.index('final') - 1] + ' final'\n except:\n round_num = date_string_find[date_string_find.index('semi-final')]\n \n date = date_string[-3:]\n date = ' '.join(date) \n date = parser.parse(date)\n \n #get attendance\n attend = soup.find_all(text=re.compile('Attendance'))\n attendance = 0\n \n if (len(attend) > 3):\n attendance = int(attend[1].split(' ')[-1])\n \n #get stats \n away_stats = {}\n home_stats = {}\n \n for stat in stats:\n stat_row = soup.find_all('td', text=stat)[0].find_parent('tr')\n elements = stat_row.find_all('td')\n \n if elements[0].text == '-':\n home_stats[stat] = None\n else:\n home_stats[stat] = elements[0].text\n \n if elements[0].text == '-':\n away_stats[stat] = None\n else:\n away_stats[stat] = elements[2].text\n \n return Game(game_id, home_team, away_team, venue, round_num, date, attendance, home_stats, away_stats)",
"def test_retrieve_team(self):\n pass",
"def get_league_listing(self):\n url = self.__build_url(urls.GET_LEAGUE_LISTING)\n req = self.executor(url)\n if self.logger:\n self.logger.info('URL: {0}'.format(url))\n if not self.__check_http_err(req.status_code):\n return response.build(req, url, self.raw_mode)",
"def get_teams():",
"def test_teams_get_teams_v1(self):\n pass",
"def test_single_match(self):\n match = Match.select().first()\n with self.client:\n response = self.client.get(f'/get-match/{match.id}')\n data = json.loads(response.data.decode())\n date = datetime.strptime(data['data']['start_date_time'], \"%Y-%m-%dT%H:%M:%S\")\n self.assertEqual(response.status_code, 200)\n self.assertEqual(match.start_date_time.day, date.day)",
"async def _lastfixtures(self, ctx: commands.Context, league_id: str):\n headers = ['ID', 'Home', 'G', ' ', 'G', 'Away']\n data = await self._get_league_fixtures_timeframe(ctx.message.server.id, league_id, 'p7')\n\n await self.bot.say('```diff\\n+ Last fixtures```')\n pretty_data = []\n for fixture in data['fixtures']:\n pretty_data.append([\n fixture['id'],\n '[{}] {}'.format(fixture['homeTeamId'], fixture['homeTeamName']),\n fixture['goalsHomeTeam'],\n ' - ',\n fixture['goalsAwayTeam'],\n '[{}] {}'.format(fixture['awayTeamId'], fixture['awayTeamName'])\n ])\n\n await self.bot.say(box(tabulate(pretty_data, headers=headers)))",
"def test_load_draft(league):\n draft = league.draft_results()\n assert(len(draft) == 144)\n #mcdavid 1st\n assert(draft[0]['player_key'] == '396.p.6743')\n # carter hart 67th\n assert(draft[66]['player_key'] == '396.p.7156')\n # zadorov last\n assert(draft[-1]['player_key'] == '396.p.5995')",
"def test_get_league_leaders___goaltending(self):\n msg = \"Response status is not 200\"\n response = self.api.get_league_leaders___goaltending(self.season, self.nhl_season)\n self.assertEqual(response.status_code, 200, msg)",
"def test_get_waivers(league):\n pass",
"def test_get_teams(self):\n pass",
"def test_get_teams(self):\n pass",
"def get_live_league_games(self):\n url = self.__build_url(urls.GET_LIVE_LEAGUE_GAMES)\n req = self.executor(url)\n if self.logger:\n self.logger.info('URL: {0}'.format(url))\n if not self.__check_http_err(req.status_code):\n return response.build(req, url, self.raw_mode)",
"def find_games(days_ahead=0):\n headers = {\n 'Host': 'stats.nba.com',\n 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:61.0) Gecko/20100101 Firefox/61.0',\n 'Accept': 'application/json, text/plain, */*',\n 'Accept-Language': 'en-US,en;q=0.5',\n 'Referer': 'https://stats.nba.com/',\n 'Accept-Encoding': 'gzip, deflate, br',\n 'Connection': 'keep-alive',\n 'x-nba-stats-origin': 'stats',\n 'x-nba-stats-token': 'true'\n }\n board = scoreboardv2.ScoreboardV2(day_offset=days_ahead, headers=headers).get_data_frames()[0]\n board.replace(id_to_abrv, inplace=True)\n return board[['GAME_DATE_EST', 'GAME_ID', 'HOME_TEAM_ID', 'VISITOR_TEAM_ID']]",
"def fixture_stats_singel(self, fixture):\n ds = load_match_data(f'https://footballapi.pulselive.com/football/stats/match/{fixture}')\n return ds",
"def fetch_fixture_data(\n start_date: str, end_date: str, data_import=match_data, verbose: int = 1\n) -> ApiResponse:\n return _api_response(\n pd.DataFrame(\n data_import.fetch_fixture_data(\n start_date=start_date, end_date=end_date, verbose=verbose\n )\n ).pipe(match.clean_fixture_data)\n )",
"def fixture_info_singel(self, fixture_id):\n ds = load_match_data(f'https://footballapi.pulselive.com/football/fixtures/{fixture_id}')\n return ds",
"def test_get_ny_daily_data(self):\n dag = self.dagbag.get_dag(self.dag_id)\n extract_task = dag.get_task('extract')\n resp = self.extract.getDailyNyDataFromAPI()\n self.assertIsNotNone(resp)\n self.assertEqual(type(resp), list)",
"def _load(self):\n if not self._loaded:\n url = f\"https://api.opendota.com/api/matches/{self.id}\"\n logger.info(\"Loading match details for match id: %s from url %s\",\n self._id, url)\n self.data = requests.get(url).json()\n self._duration = self.data.get('duration')\n self._chat = self.data.get('chat')\n self._cluster = self.data.get('cluster')\n self._engine = self.data.get('engine')\n self._first_blood_time = self.data.get('first_blood_time')\n self._game_mode = self.data.get('game_mode')\n self._human_players = self.data.get('human_players')\n self._league_id = self.data.get('league_id')\n self._lobby_type = self.data.get('lobby_type')\n self._match_seq_num = self.data.get('match_seq_num')\n self._negative_votes = self.data.get('negative_votes')\n self._positive_votes = self.data.get('positive_votes')\n self._objectives = self.data.get('objectives')\n self._picks_bans = self.data.get('picks_bans')\n self._barracks_status_dire = self.data.get('barracks_status_dire')\n self._dire_score = self.data.get('dire_score')\n self._dire_team = self.data.get('dire_team')\n self._tower_status_dire = self.data.get('tower_status_dire')\n self._barracks_status_radiant = self.data.get('barracks_status_radiant')\n self._radiant_gold_adv = self.data.get('radiant_gold_adv')\n self._radiant_xp_adv = self.data.get('radiant_xp_adv')\n self._radiant_score = self.data.get('radiant_score')\n self._radiant_team = self.data.get('radiant_team')\n self._radiant_win = self.data.get('radiant_win')\n self._tower_status_radiant = self.data.get('tower_status_radiant')\n self._start_time = self.data.get('start_time')\n self._teamfights = self.data.get('teamfights')\n self._version = self.data.get('version')\n self._replay_salt = self.data.get('replay_salt')\n self._series_id = self.data.get('series_id')\n self._series_type = self.data.get('series_type')\n self._league = self.data.get('league')\n self._skill = self.data.get('skill')\n self._players = self.data.get('players')\n self._patch = self.data.get('patch')\n self._region = self.data.get('region')\n self._all_word_counts = self.data.get('all_word_counts')\n self._version = self.data.get('version')\n self._throw = self.data.get('throw')\n self._comeback = self.data.get('comeback')\n self._cosmetics = self.data.get('cosmetics')\n self._draft_timings = self.data.get('draft_timings')\n self._loss = self.data.get('loss')\n self._win = self.data.get('win')\n self._replay_url = self.data.get('replay_url')\n self._loaded = True"
] |
[
"0.7193875",
"0.67219526",
"0.6705892",
"0.643531",
"0.62030923",
"0.61631644",
"0.6108169",
"0.5966481",
"0.5817939",
"0.5803004",
"0.57856566",
"0.57549417",
"0.57185924",
"0.5710034",
"0.5686895",
"0.5681784",
"0.5653688",
"0.5642157",
"0.56397754",
"0.5622974",
"0.5606544",
"0.5587761",
"0.5587761",
"0.5565093",
"0.55535126",
"0.5503531",
"0.5498468",
"0.54703087",
"0.5455255",
"0.5445729"
] |
0.7777739
|
0
|
Retrieves specific league leaderboard from API
|
async def _get_league_leaderboard(self, server_id: str, league_id: str, matchday: str):
if matchday is None:
matchday = ''
params = {'matchday': matchday}
url = self.api_url + 'competitions/{}/leagueTable'.format(league_id)
return await self._make_request(url, params, server_id)
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def leaderboard(self):\n\n url = API_PATH[\"leaderboard\"].format(region_url=self.region_url)\n\n response = requests.get(url, headers=self.headers)\n\n return response.json()",
"def get_leaderboard(request):\n\n includedUsers = User.objects.filter(hide_leaderboard=False, is_staff=False)\n\n # ordered list of points, index denoting leaderboard position (rank)\n # distinct values means that everyone with the same points has the same rank\n rankings = []\n for item in includedUsers.values(\"points\").distinct().order_by(\"-points\"):\n rankings.append(item[\"points\"])\n\n includedUsers = includedUsers.order_by(\"-points\")\n\n paginationData = []\n for user in includedUsers:\n # rank is the index of the users points +1 (converting from 0-indexing)\n data = {\"user\": user, \"rank\": rankings.index(user.points) + 1}\n paginationData.append(data)\n\n return JsonResponse(\n json_paginator(request, paginationData, lb_serializer),\n status=200,\n )",
"def get_current_leader(api_url : str, session : Optional[requests.Session] = None) -> GetLeaderResponse:\n return getLeader(api_url, session)",
"async def from_url(cls) -> \"AocGlobalLeaderboard\":\n aoc_url = f\"https://adventofcode.com/{AocConfig.year}/leaderboard\"\n\n async with aiohttp.ClientSession(headers=AOC_REQUEST_HEADER) as session:\n async with session.get(aoc_url) as resp:\n if resp.status == 200:\n raw_html = await resp.text()\n else:\n log.warning(f\"Bad response received from AoC ({resp.status}), check session cookie\")\n resp.raise_for_status()\n\n soup = BeautifulSoup(raw_html, \"html.parser\")\n ele = soup.find_all(\"div\", class_=\"leaderboard-entry\")\n\n exp = r\"(?:[ ]{,2}(\\d+)\\))?[ ]+(\\d+)\\s+([\\w\\(\\)\\#\\@\\-\\d ]+)\"\n\n lb_list = []\n for entry in ele:\n # Strip off the AoC++ decorator\n raw_str = entry.text.replace(\"(AoC++)\", \"\").rstrip()\n\n # Use a regex to extract the info from the string to unify formatting\n # Group 1: Rank\n # Group 2: Global Score\n # Group 3: Member string\n r = re.match(exp, raw_str)\n\n rank = int(r.group(1)) if r.group(1) else None\n global_score = int(r.group(2))\n\n member = r.group(3)\n if member.lower().startswith(\"(anonymous\"):\n # Normalize anonymous user string by stripping () and title casing\n member = re.sub(r\"[\\(\\)]\", \"\", member).title()\n\n lb_list.append((rank, global_score, member))\n\n return cls(lb_list)",
"async def get_one_leaderboard(self, variant: 'VariantTypes', limit: int = 10) -> 'Response':\n headers = {\n 'Content-Type': 'application/json',\n 'Accept': 'application/vnd.lichess.v3+json'\n }\n response = await self._client.request(method=RequestMethods.GET,\n url=USERS_PLAYER_TOP_URL.format(nb=limit, perfType=variant),\n headers=headers)\n return response",
"def get_league_listing(self):\n url = self.__build_url(urls.GET_LEAGUE_LISTING)\n req = self.executor(url)\n if self.logger:\n self.logger.info('URL: {0}'.format(url))\n if not self.__check_http_err(req.status_code):\n return response.build(req, url, self.raw_mode)",
"def list(self, request):\n team_leaders = self.controller.retrieve_all_teams_leaders()\n serializer = data_serializers.TeamLeaderPresenterSerializer(team_leaders, many=True)\n return Response(serializer.data)",
"def get_live_league_games(self):\n url = self.__build_url(urls.GET_LIVE_LEAGUE_GAMES)\n req = self.executor(url)\n if self.logger:\n self.logger.info('URL: {0}'.format(url))\n if not self.__check_http_err(req.status_code):\n return response.build(req, url, self.raw_mode)",
"def retrieve(self, request, pk=None):\n team_leader = self.get_team_leader_object(pk)\n serializer = data_serializers.TeamLeaderPresenterSerializer(team_leader)\n return Response(serializer.data, status=status.HTTP_201_CREATED)",
"def getLeaderboard(self, **kwargs):\n board = []\n scores = sorted(self._players, key=lambda score: score.dct_net['total'])\n pos = 1\n prev_total = None\n for sc in scores:\n score_dct = {\n 'player': sc.doc,\n 'total' : sc.dct_net['total'],\n }\n if prev_total != None and score_dct['total'] > prev_total:\n pos += 1\n prev_total = score_dct['total']\n score_dct['pos'] = pos\n for n,net in enumerate(sc.dct_net['holes']):\n if net == None:\n break\n else:\n n += 1\n score_dct['thru'] = n\n score_dct['line'] = '{:<3} {:<6} {:>5} {:>4}'.format(\n score_dct['pos'], score_dct['player'].nick_name, score_dct['total'], score_dct['thru'])\n board.append(score_dct)\n self.dctLeaderboard['leaderboard'] = board\n return self.dctLeaderboard",
"def leaderboard():\n # Get leaderboard and user information\n leaderboard, current_user_info = gdb.getleaderboard(current_user.userID)\n # Get top gainer leaderboards\n weektopgainers, monthtopgainers = gdb.gettopgainers()\n # Render template\n return render_template('leaderboard.html',\n leaderboard=leaderboard,\n current_user_info=current_user_info,\n weektopgainers=weektopgainers,\n monthtopgainers=monthtopgainers,\n userbalance=current_user.balance)",
"def get_info_from_api(team_name):\n if \"-\" in team_name:\n team_name = team_name.replace(\"-\", \"+\")\n if \"brighton\" in team_name: # some teams has different names than in sofa-score\n team_name = \"brighton\"\n if \"leicester\" in team_name:\n team_name = \"leicester\"\n if \"norwich\" in team_name:\n team_name = \"norwich\"\n if \"mallorca\" in team_name:\n team_name = \"mallorca\"\n if \"parma\" in team_name:\n team_name = \"parma+calcio\"\n if \"bayern\" in team_name:\n team_name = \"bayern\"\n if \"koln\" in team_name:\n team_name = \"fc+koln\"\n if \"union+berlin\" in team_name:\n team_name = \"union+berlin\"\n if \"fsv+mainz\" in team_name:\n team_name = \"mainz\"\n if \"hoffenheim\" in team_name:\n team_name = \"hoffenheim\"\n if \"mgladbach\" in team_name:\n team_name = \"borussia+monchengladbach\"\n if \"schalke\" in team_name:\n team_name = \"schalke\"\n if \"leverkusen\" in team_name:\n team_name = \"leverkusen\"\n if \"paderborn\" in team_name:\n team_name = \"paderborn\"\n print(team_name)\n response = requests.get(cfg.API_URL + team_name)\n team_data = json.loads(response.text)\n return team_data['teams'][0]",
"async def json_from_url(\n leaderboard_id: int = AocConfig.leaderboard_id, year: int = AocConfig.year\n ) -> \"AocPrivateLeaderboard\":\n api_url = f\"https://adventofcode.com/{year}/leaderboard/private/view/{leaderboard_id}.json\"\n\n log.debug(\"Querying Advent of Code Private Leaderboard API\")\n async with aiohttp.ClientSession(cookies=AOC_SESSION_COOKIE, headers=AOC_REQUEST_HEADER) as session:\n async with session.get(api_url) as resp:\n if resp.status == 200:\n raw_dict = await resp.json()\n else:\n log.warning(f\"Bad response received from AoC ({resp.status}), check session cookie\")\n resp.raise_for_status()\n\n return raw_dict",
"def get_team_roster(league):\n pass",
"def leaderboard(self, game: str = 'aoe2de', leaderboard_id: int = 3, start: int = 1, count: int = 1,\n search: str = None, steam_id: int = None, profile_id: str = None) -> Response:\n\n endpoint = '/api/leaderboard'\n query = f'?game={game}&leaderboard_id={leaderboard_id}&start={start}&count={count}'\n\n if not (search is None):\n query += f'&search={search}'\n if not (steam_id is None):\n query += f'&steam_id={steam_id}'\n if not (profile_id is None):\n query += f'&profile_id={profile_id}'\n\n return self.fetch(endpoint, query)",
"async def leaderboard(self, ctx: commands.Context):\r\n async with ctx.typing():\r\n user_info_unsorted = {}\r\n user_info_sorted = {}\r\n async with self.bot.database() as db:\r\n user_info_rows = await db(\"\"\"SELECT * FROM user_balance\"\"\")\r\n for user_info in user_info_rows:\r\n user_info_unsorted[user_info['balance']] = user_info['user_id']\r\n user_info_unsorted_items = user_info_unsorted.items()\r\n user_id_sorted = sorted(user_info_unsorted_items, reverse=True)\r\n page = 0\r\n place = 0\r\n set = 0\r\n fields = []\r\n field_info = []\r\n for user_id_sorted_single in user_id_sorted:\r\n user = await self.bot.fetch_user(user_id_sorted_single[1])\r\n place += 1\r\n field_info.append(f\"#{place:,}. {user.name} ({user_id_sorted_single[0]:,})\")\r\n set += 1\r\n if set == 10:\r\n page += 1\r\n fields.append((f\"Page {page:,}\", \"\\n\".join(field_info)))\r\n field_info = []\r\n set = 0\r\n if set != 0:\r\n page += 1\r\n fields.append((f\"Page {page:,}\", \"\\n\".join(field_info)))\r\n field_info = []\r\n set = 0\r\n return await utils.paginate(ctx, fields, ctx.author.id, \"Global Balance Leaderboard\")",
"def leaderboard(request, when):\n limit = _clean_int(request.GET.get('limit'), 300, 1, 1000)\n data = leaderboard_impl(when, limit)\n if data is None:\n return HttpResponseNotFound()\n tops = []\n shame = []\n for i in data:\n if i.score == models.AccountStatsBase.NULL_SCORE:\n shame.append(i)\n else:\n tops.append(i)\n return respond(\n request, 'leaderboard.html', {'tops': tops, 'shame': shame, 'when': when})",
"def get_leaderboards_get(self, destinyMembershipId, maxtop, membershipType, modes, statid):\n # TODO: Assuming first server is good - need to make fallback logic\n return self.session.get_any(\"{base}{request_url}\".format(base=self.servers[0],\n request_url=F\"/Destiny2/{membershipType}/Account/{destinyMembershipId}/Stats/Leaderboards/\"))",
"async def from_url(cls) -> \"AocPrivateLeaderboard\":\n api_json = await cls.json_from_url()\n return cls.from_json(api_json)",
"def get_teams():",
"def get(self, request):\n\n queries = request.GET.dict()\n user = UserValidator.validate_user(request.user.id)\n\n if user is None:\n return JsonResponse({\n \"message\": \"Invalid credentials.\",\n }, status=400)\n\n if user is None:\n return JsonResponse({\n \"message\": \"Invalid credentials.\",\n }, status=400)\n\n try:\n game = Game.value_of(queries[\"game\"].lower())\n\n except (KeyError, ValueError, Exception):\n game = None\n\n try:\n sort = queries[\"sort\"].lower()\n\n if sort not in [\"wins\", \"total\",]:\n raise ValueError(\"invalid key value\")\n\n except (KeyError, ValueError, Exception):\n sort = \"wins\"\n\n entries = GameModel.objects.values(\"player\").filter(is_deleted=False)\n\n if game is not None:\n entries = entries.filter(game_played=game)\n game = game.value\n else:\n game = \"All\"\n\n entries = entries.annotate(\n wins=(Count(\"player\", filter=Q(did_win=True))),\n total=(Count(\"player\"))\n )\n\n if sort == \"wins\":\n entries = entries.order_by(\"-wins\")\n elif sort == \"total\":\n entries = entries.order_by(\"-total\")\n\n board = ScoreboardView.get_board_from_db_rows(entries)\n\n token = Token.get_tokens_for_user(user)\n\n return JsonResponse({\n \"game\": game,\n \"board\": board,\n \"access\": token[\"access\"],\n \"refresh\": token[\"refresh\"],\n })",
"async def message_leaderboard(self, ctx, boardType):\n\n\t\tglobal embeds\n\t\tguild = ctx.message.guild\n\n\t\tif boardType == \"quotes\":\n\t\t\tleaderboardType = \"quoteLeaderboard\"\n\t\t\tleaderboard = self.leaderboards[str(ctx.message.guild.id)][\"quoteLeaderboard\"]\n\t\t\tleaderboardEmbed = embeds[leaderboardType]\n\t\telif boardType == \"reactions\":\n\t\t\tleaderboardType = \"reactionLeaderboard\"\n\t\t\tleaderboard = self.leaderboards[str(ctx.message.guild.id)][\"reactionLeaderboard\"]\n\t\t\tleaderboardEmbed = embeds[leaderboardType]\n\t\telif boardType == \"emojis\":\n\t\t\tleaderboardType = \"emojiLeaderboard\"\n\t\t\tleaderboard = self.leaderboards[str(ctx.message.guild.id)][\"emojiLeaderboard\"]\n\t\t\tleaderboardEmbed = embeds[leaderboardType]\n\t\telse:\n\t\t\tleaderboardType = \"messageLeaderboard\"\n\t\t\tleaderboard = self.leaderboards[str(ctx.message.guild.id)][\"messageLeaderboard\"]\n\t\t\tleaderboardEmbed = embeds[leaderboardType]\n\n\t\tleaderboardEmbed.clear_fields()\n\n\t\tleaderboard = {k: v for k, v in sorted(leaderboard.items(), key=lambda a: a[1], reverse=True)}\n\n\t\tpastScore = 0\n\t\toffset = 0\n\t\tposition = 0\n\t\tuserValues = \"\"\n\n\t\tfor participant in leaderboard:\n\t\t\tscore = leaderboard[participant]\n\n\t\t\tif score == pastScore:\n\t\t\t\toffset += 1\n\t\t\telse:\n\t\t\t\tposition += offset + 1\n\t\t\t\toffset = 0\n\t\t\t\tpastScore = score\n\n\t\t\tif leaderboardType == \"reactionLeaderboard\":\n\t\t\t\tname = str(participant)\n\t\t\telif leaderboardType == \"emojiLeaderboard\":\n\t\t\t\tfor emoji in guild.emojis:\n\t\t\t\t\tif int(participant) == emoji.id:\n\t\t\t\t\t\tname = \"<:\" + emoji.name + \":\" + str(emoji.id) + \">\"\n\t\t\t\t\t\tbreak\n\t\t\telse:\n\t\t\t\tif int(participant) == 456226577798135808:\n\t\t\t\t\t# Skip deleted users\n\t\t\t\t\tTrue\n\t\t\t\telif guild.get_member(int(participant)) is None:\n\t\t\t\t\tname = str(await self.bot.fetch_user(int(participant)))\n\t\t\t\telse:\n\t\t\t\t\tname = str(guild.get_member(int(participant)).display_name)\n\n\t\t\tuserValues += \"**\" + str(position) + \". \" + name + \"** - \" + str(score) + \"\\n\\n\\t\"\n\n\t\tif userValues == \"\":\n\t\t\tuserValues = \"None\"\n\n\t\tleaderboardEmbed.add_field(name=\"User\", value=\"\".join(userValues.split(\"\\t\")[0:10]), inline=True)\n\n\t\tmessage = await ctx.send(embed=leaderboardEmbed)\n\t\tself.cachedMessages[message.id] = {\"type\": leaderboardType, \"page\": 1}\n\t\tawait message.add_reaction(\"⬅️\")\n\t\tawait message.add_reaction(\"➡️\")",
"def mlbleagueleaders(self, irc, msg, args, optleague, optcategory):\n\n league = {'mlb': '9', 'al':'7', 'nl':'8'} # do our own translation here for league/category.\n category = {'avg':'avg', 'hr':'homeRuns', 'rbi':'RBIs', 'ra':'runs', 'sb':'stolenBases', 'era':'ERA', 'whip':'whip', 'k':'strikeoutsPerNineInnings'}\n\n optleague = optleague.lower()\n optcategory = optcategory.lower()\n\n if optleague not in league:\n irc.reply(\"League must be one of: %s\" % league.keys())\n return\n\n if optcategory not in category:\n irc.reply(\"Category must be one of: %s\" % category.keys())\n return\n\n url = self._b64decode('aHR0cDovL20uZXNwbi5nby5jb20vbWxiL2FnZ3JlZ2F0ZXM=') + '?category=%s&groupId=%s&y=1&wjb=' % (category[optcategory], league[optleague])\n\n try:\n req = urllib2.Request(url)\n html = (urllib2.urlopen(req)).read()\n except:\n irc.reply(\"Failed to fetch: %s\" % url)\n return\n \n html = html.replace('class=\"ind alt nw\"', 'class=\"ind nw\"')\n\n soup = BeautifulSoup(html)\n table = soup.find('table', attrs={'class':'table'})\n rows = table.findAll('tr')\n \n append_list = []\n \n for row in rows[1:6]:\n rank = row.find('td', attrs={'class':'ind nw', 'nowrap':'nowrap', 'width':'10%'}).renderContents()\n team = row.find('td', attrs={'class':'ind nw', 'nowrap':'nowrap', 'width':'70%'}).find('a').text\n num = row.find('td', attrs={'class':'ind nw', 'nowrap':'nowrap', 'width':'20%'}).renderContents()\n append_list.append(rank + \". \" + team + \" \" + num)\n\n thelist = string.join([item for item in append_list], \" | \")\n\n irc.reply(\"Leaders in %s for %s: %s\" % (ircutils.bold(optleague.upper()), ircutils.bold(optcategory.upper()), thelist))",
"def leaderboard(request):\r\n\tMEDIA_URL = '/media/'\r\n\tgames = Game.objects.all()\r\n\tuser_high_scores = []\r\n\tgame_high_scores = []\r\n\tnew = {}\r\n\t# Get global scores\r\n\tfor game in games:\r\n\t\tgame_intermediate_high = Score.objects.filter(game = game.id).order_by('-current_score').values('game__name', 'player__user__username', 'current_score')[:1]\r\n\t\tif (game_intermediate_high.count() > 0):\r\n\t\t\tgame_high_scores.append(game_intermediate_high)\r\n\t# Check if user is authenticated and get user's scores\r\n\tif (request.user.is_authenticated):\r\n\t\tfor game in games:\r\n\t\t\t\tuser_intermediate_high = Score.objects.filter(game=game.id, player = request.user.profile).order_by('-current_score').values('player__user__username','game__name', 'current_score').distinct()[:1]\r\n\t\t\t\tif (user_intermediate_high.count() > 0):\r\n\t\t\t\t\tuser_high_scores.append(user_intermediate_high)\r\n\r\n\treturn render(request, 'leaderboard.html',{'MEDIA_URL' : MEDIA_URL,'games': games, 'user_high_scores': user_high_scores, 'game_high_scores': game_high_scores})",
"def perform_get_league_points(responder, options):\n tla = options['<tla>']\n league_points = scores.get_league_points(tla)\n\n if league_points is None:\n if options.get(yaml_opt, False):\n responder(yaml.dump({'points': None}))\n else:\n responder('No scores available for team {0}'.format(tla))\n return\n\n if options.get(yaml_opt, False):\n responder(yaml.dump({'points': league_points}))\n else:\n responder('Team {0} have {1} league points'.format(tla, league_points))",
"def golden_boot_leaders(league=EPL_LEAGUE_ID) -> List[Tuple[int, str]]:\n try:\n top_scorers = []\n season = get_season_year()\n params = {\"season\": season, \"league\": league}\n req = requests.get(\n FOOTY_TOPSCORERS_ENDPOINT,\n headers=FOOTY_HTTP_HEADERS,\n params=params,\n )\n players = req.json().get(\"response\")\n if players:\n for i, player in enumerate(players):\n name = player[\"player\"][\"name\"]\n team = player[\"statistics\"][0][\"team\"][\"name\"]\n goals = player[\"statistics\"][0][\"goals\"][\"total\"]\n assists = player[\"statistics\"][0][\"goals\"].get(\"assists\", 0)\n shots_on = player[\"statistics\"][0][\"shots\"].get(\"on\", 0)\n shots_total = player[\"statistics\"][0][\"shots\"].get(\"total\", 0)\n if assists is None:\n assists = 0\n top_scorers.append(\n (\n goals,\n f\"{goals} - {name}, {team} ({assists} assists, {shots_on}/{shots_total} SOG)\",\n )\n )\n if i > 9:\n break\n return top_scorers\n except HTTPError as e:\n LOGGER.error(f\"HTTPError while fetching golden boot leaders: {e.response.content}\")\n except KeyError as e:\n LOGGER.error(f\"KeyError while fetching golden boot leaders: {e}\")\n except Exception as e:\n LOGGER.error(f\"Unexpected error when fetching golden boot leaders: {e}\")",
"def get_pvp_leaderboard(self, region, namespace, season_id, bracket, **filters):\n filters['namespace'] = namespace\n resource = 'data/wow/pvp-season/{0}/pvp-leaderboard/{1}'\n return self.get_resource(resource, region, *[season_id, bracket], **filters)",
"def test_lacrosseplayers_get(self):\n query_string = [('label', 'label_example'),\n ('page', 1),\n ('per_page', 100)]\n headers = { \n 'Accept': 'application/json',\n }\n response = self.client.open(\n '/v0.0.1/lacrosseplayers',\n method='GET',\n headers=headers,\n query_string=query_string)\n self.assert200(response,\n 'Response body is : ' + response.data.decode('utf-8'))",
"def show_players_specific_tournament(self) -> None:\n id_choice = check.request_id(TOURNAMENTS)\n tournament_data = TOURNAMENTS.get(doc_id=id_choice)\n if tournament_data.get(\"players\") == {}:\n print(\"\\n This tournaments has no players yet\")\n else:\n players_list = tournament_data.get(\"players\")\n deserialized_player_list = []\n for player_data in players_list:\n deserialized_player = Player(**json.loads(player_data))\n deserialized_player_list.append(deserialized_player)\n utils.clear_terminal()\n print(\n \"Do you want the list of players by alphabetical order or by ranking ? \\n\"\n \"1 - Ranking players list \\n\"\n \"2 - Alphabetical players list\"\n )\n choice = check.request_selection_with_number(\"alphabetical\", \"ranking\", \"None\")\n if choice == \"alphabetical\":\n utils.clear_terminal()\n deserialized_player_list = sorted(deserialized_player_list, key=lambda player: player.first_name)\n for deserialized_player in deserialized_player_list:\n print(deserialized_player)\n elif choice == \"ranking\":\n utils.clear_terminal()\n deserialized_player_list = sorted(deserialized_player_list, key=lambda player: player.ranking)\n for deserialized_player in deserialized_player_list:\n print(deserialized_player)",
"def lobbies(self, game: str = None) -> Response:\n\n endpoint = '/api/lobbies'\n if not (game is None):\n query = f'?game={game}'\n else:\n query = None\n\n return self.fetch(endpoint, query)"
] |
[
"0.68470746",
"0.6579917",
"0.65558994",
"0.6431325",
"0.6389054",
"0.6388643",
"0.6316726",
"0.6310277",
"0.6274791",
"0.61995524",
"0.61978364",
"0.61695975",
"0.6142409",
"0.61303324",
"0.60644513",
"0.6037634",
"0.60006803",
"0.5971666",
"0.5949463",
"0.59340775",
"0.592282",
"0.58695364",
"0.58582264",
"0.58551395",
"0.58196676",
"0.5808363",
"0.57984483",
"0.5795781",
"0.57564807",
"0.5748794"
] |
0.6942586
|
0
|
Retrieves specific team info
|
async def _get_team_info(self, server_id: str, team_id: str):
params = {}
url = self.api_url + 'teams/{}'.format(team_id)
return await self._make_request(url, params, server_id)
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def get_team_info(self, id):\n params = {'key': self.key, 'start_at_team_id': id,\n 'teams_requested': 1}\n r = requests.get(self.TEAM_URL, params=params)\n return TeamResponse(r.json()['result']['teams'][0])",
"def get_team(self):\n try:\n team_id = self.request.GET.get('team')\n if team_id is not None:\n team_id = int(team_id)\n return self.get_available_teams().get(pk=team_id)\n return self.get_available_teams().latest()\n except (Team.DoesNotExist, ValueError):\n return None",
"def get_teams():",
"def get_people(team):",
"def get_team(team_id):\n team = TeamController.get(filters={\"Team\": {\"id\": team_id}})\n return jsonify(format_team(team)), 200",
"def get_teams(self):\n url = 'teams'\n result = self.get(url)\n return result.get('teams', result)",
"def test_retrieve_team(self):\n pass",
"def retrieve(self, request, pk=None):\n team = self.get_team_object(pk)\n serializer = data_serializers.PresentTeamSerializer(team)\n return Response(serializer.data)",
"def find_by_id(self, team, params={}, **options):\n path = \"/teams/%s\" % (team)\n return self.client.get(path, params, **options)",
"def info():\n print 'Loading info page'\n\n team_list = datastore.get_all_teams(engine)\n\n return render_template('info.html', rows=team_list)",
"def get_team(self, team_reference, include_users=False):\n url = 'teams/{0}'.format(team_reference)\n result = self.get(url, {'include_users': include_users})\n #TODO: check how included users returned\n return result.get('team', result)",
"def get_team(uid=None):\n user = get_user(uid=uid)\n return api.team.get_team(tid=user[\"tid\"])",
"def get_info_from_api(team_name):\n if \"-\" in team_name:\n team_name = team_name.replace(\"-\", \"+\")\n if \"brighton\" in team_name: # some teams has different names than in sofa-score\n team_name = \"brighton\"\n if \"leicester\" in team_name:\n team_name = \"leicester\"\n if \"norwich\" in team_name:\n team_name = \"norwich\"\n if \"mallorca\" in team_name:\n team_name = \"mallorca\"\n if \"parma\" in team_name:\n team_name = \"parma+calcio\"\n if \"bayern\" in team_name:\n team_name = \"bayern\"\n if \"koln\" in team_name:\n team_name = \"fc+koln\"\n if \"union+berlin\" in team_name:\n team_name = \"union+berlin\"\n if \"fsv+mainz\" in team_name:\n team_name = \"mainz\"\n if \"hoffenheim\" in team_name:\n team_name = \"hoffenheim\"\n if \"mgladbach\" in team_name:\n team_name = \"borussia+monchengladbach\"\n if \"schalke\" in team_name:\n team_name = \"schalke\"\n if \"leverkusen\" in team_name:\n team_name = \"leverkusen\"\n if \"paderborn\" in team_name:\n team_name = \"paderborn\"\n print(team_name)\n response = requests.get(cfg.API_URL + team_name)\n team_data = json.loads(response.text)\n return team_data['teams'][0]",
"def getTeam(self):\n return self.team",
"def get_team(self, team_id):\n try:\n return CourseTeam.objects.get(team_id=team_id)\n except CourseTeam.DoesNotExist:\n raise Http404 # lint-amnesty, pylint: disable=raise-missing-from",
"def find_team(self):\n if self.team_id is not None:\n return ItopapiPrototype.get_itop_class('Team').find(self.team_id)\n return None",
"def get_all_team_info():\n # hit this url in browser or postman like http://127.0.0.1:5000/getAllTeamInfo and it will return json data\n final_team_list = []\n if request.method == 'GET':\n teams = Team.query.all()\n for rec in range(len(teams)):\n final_team = {}\n final_team['Team_name'] = teams[rec].team_name\n final_team['Team_ID'] = teams[rec].team_id\n final_team_list.append(final_team)\n return json.dumps({\"TeamInformation\": final_team_list})",
"def getTeamInfo(team):\r\n results = \"\"\r\n with sqlite3.connect(database_file) as conn:\r\n cursor = conn.cursor()\r\n print(\"SELECT * FROM scores WHERE teamname = '{0}';\".format(team))\r\n team_info = cursor.execute(\"SELECT * FROM scores WHERE teamname = '{0}';\".format(team))\r\n print(team_info.fetchall())\r\n for row in team_info.fetchall():\r\n teamname, auto, rc, spirit, video = row\r\n results += result_string.format(teamname, auto, rc, spirit, video) + \"\\n\"\r\n return results",
"def team_read(token_user, team_id):\n team = Team.query.get(team_id)\n if team is None:\n abort(404, 'team not found')\n\n return json.dumps(team.as_dict(for_user=token_user))",
"def test_teams_get_team_v1(self):\n pass",
"def teams(teamid):\n team_summary = team.TeamSummary(teamid)\n team_summary_info = team_summary.info()\n team_season_ranks = team_summary.season_ranks()\n\n team_common_roster = team.TeamCommonRoster(teamid)\n roster = team_common_roster.roster()\n coaches = team_common_roster.coaches()\n\n season = team_summary_info[0][\"SEASON_YEAR\"]\n\n team_game_log = team.TeamGameLogs(teamid,\n season=season)\n team_games = team_game_log.info()\n\n playoffs_teamgamelogs = team.TeamGameLogs(teamid,\n season=season,\n season_type=\"Playoffs\")\n playoffs_team_games = playoffs_teamgamelogs.info()\n\n team_season = team.TeamSeasons(teamid)\n team_season_info = team_season.info()\n\n for i in team_season_info:\n if (i[\"YEAR\"] == season):\n current_season_info = i\n\n return render_template(\"teams.html\",\n title=team_summary_info[0][\"TEAM_CITY\"] + \" \" + team_summary_info[0][\"TEAM_NAME\"],\n teamid=teamid,\n team_summary_info=team_summary_info,\n team_season_ranks=team_season_ranks,\n season=season,\n team_games=team_games,\n playoffs_team_games=playoffs_team_games,\n team_season=team_season_info,\n roster=roster,\n coaches=coaches,\n current_season_info=current_season_info,\n team_img=TEAM_ID_DATA)",
"def team_details(request, id):\n template = loader.get_template('team/details.html')\n\n try:\n team = Team.objects.get(pk=id)\n team_members = User.objects.filter(profile__team=team)\n\n context = {\n 'team_name': team.name,\n 'team_info': team.information,\n 'team_logo': team.logo,\n 'team_members': team_members,\n 'days': Information.getDaysToContest()\n }\n\n except Team.DoesNotExist:\n context = None\n\n return CustomHttpResponse.send(template, context, request)",
"def get_team_info_by_team_id(self, start_at_team_id=None, **kwargs):\n if 'start_at_team_id' not in kwargs:\n kwargs['start_at_team_id'] = start_at_team_id\n url = self.__build_url(urls.GET_TEAM_INFO_BY_TEAM_ID, **kwargs)\n req = self.executor(url)\n if self.logger:\n self.logger.info('URL: {0}'.format(url))\n if not self.__check_http_err(req.status_code):\n return response.build(req, url, self.raw_mode)",
"def get_team(self):\n if self.team:\n return self.team\n return None",
"def view_team_page(request, team_pk):\n\t\n\tselected_team = ChallengeTeam.objects.get(pk = team_pk)\n\t\n\tusers = selected_team.team_members.all()\n\t\n\tteam_name = selected_team.team_name\n\t\n\tall_results = get_team_results(users, selected_team.challenge.schedule)\n\tteam_consistency = all_results[\"consistency\"]\n\tteam_completion = all_results[\"completion\"]\n\t\n\tmember_names = []\n\tfor usr in users:\n\t\tprint usr.first_name + \" \" + usr.last_name\n\t\tmember_names.append(usr.first_name + \" \" + usr.last_name)\n\t\t\n\tjoin_control = \"join\"\n\tif(request.user in selected_team.team_members.all()):\n\t\tjoin_control = \"leave\"\n\telif(selected_team.challenge.invite_only and not request.user in selected_team.invited.all()):\n\t\tjoin_control = \"invite\"\n\t\n\tcontext = RequestContext(request, { \"team_pk\" : team_pk, \"name\" : team_name, \"members\" : member_names, \"consistency\" : team_consistency, \"completion\" : team_completion, \"join_control\" : join_control, \"messages\" : messages })\n\treturn render_to_response(\"encourage/view_team.html\", context)",
"def display_selected_team(team_id):\n if request.method == 'GET':\n result_dict = {}\n teams = get_team()\n players = Player.query.join(Team, Player.team_id==team_id).\\\n add_columns(Player.player_fname,Player.player_lname,Team.team_name,Player.player_id)\n result_dict['teams'] = teams\n result_dict['players']= players\n return render_template('viewplayers.html', result=result_dict)",
"def test_get_individual_team(self):\n args = {\n 'name': 'test team',\n 'capacity': '11',\n 'number_players': '6',\n 'pitch_postcode': 'E1 6LT',\n 'time': '2019-01-01 13:00'\n }\n team = Team(args)\n db.session.add(team)\n db.session.commit()\n response = self.client.get('/teams/1')\n self.assertEqual(response.status_code, 200)\n self.assertIn(b'test team', response.data)",
"async def team_show(self, ctx: commands.Context, team_id: int):\n try:\n if team_id not in self.teams:\n self.teams[team_id] = await self._get_team_data(team_id)\n team = self.teams[team_id]\n except KeyError:\n await ctx.send(f'Unrecognized team ID {team_id}. If you think this is a '\n 'valid team ID, perhaps no one from that team has '\n 'registered a Discord account yet.')\n return\n\n if ctx.guild:\n members, users = self._get_members_if_possible(\n [user.id for user in team.users], ctx.guild)\n else:\n members, users = [], team.users\n\n pages = paginate_team_data(members, users,\n [channel for channel in team.channels\n if channel and channel.guild == ctx.guild])\n\n embeds = [\n discord.Embed(title=f'**{team.display_name} (ID: {team.team_id})**',\n color=discord.Color(0x22aaff),\n description=content)\n for content in pages]\n if len(embeds) == 1:\n await ctx.send(embed=embeds[0])\n else:\n await menu(ctx, embeds, DEFAULT_CONTROLS, timeout=120)",
"def team(request,league_id = 1,team_id = -1):\n\n\t#define variables\n\targs = {}\n\n\t#getting data form models\n\tthis_team = get_object_or_404(Team, pk = team_id)\n\tnews = this_team.news_set.all().order_by('date')\n\tthis_league = get_object_or_404(League, pk = league_id)\n\tteams = this_league.team_set.all().exclude(pk = team_id).order_by('name')\n\tleagues = League.objects.all().exclude(pk = league_id).order_by('name')\n\n\t#context data initialization into dictionary 'args'\n\targs['this_team'] = this_team\n\targs['this_league'] = this_league\n\targs['teams'] = teams\n\targs['leagues'] = leagues\n\targs['news'] = news\n\t\n\treturn render_to_response('news/team.html',args)",
"async def getch_team(self, id: str):\n return self.get_team(id) or await self.fetch_team(id)"
] |
[
"0.7538589",
"0.73244494",
"0.7296382",
"0.7267556",
"0.71839696",
"0.71808416",
"0.70882523",
"0.7044311",
"0.70201695",
"0.70070887",
"0.6972436",
"0.6941021",
"0.6889169",
"0.6869604",
"0.6866879",
"0.6845668",
"0.68446237",
"0.6834434",
"0.6823779",
"0.6790941",
"0.679071",
"0.67847836",
"0.67779666",
"0.67705554",
"0.6667011",
"0.6650933",
"0.66300595",
"0.66121846",
"0.6606158",
"0.6598623"
] |
0.7572377
|
0
|
Sets token for footballdata.org API
|
async def _tokenset(self, ctx: commands.Context, token: str):
self.config[ctx.message.server.id] = token
dataIO.save_json('data/football/config.json', self.config)
await self.bot.say('football-data API token set')
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def set_token(self, token):\n # type: (Token) -> None\n self.token = token\n self._token_header = \"Bearer \" + token[\"access_token\"]",
"def api_token(self, api_token):\n\n self._api_token = api_token",
"def api_token(self, api_token):\n\n self._api_token = api_token",
"def __init__(self, token):\n self.token = token\n self.session = requests.Session()\n self.session.headers.update({\"Authorization\": \"Bearer {token}\".format(token=self.token)})",
"def bearer_authentication(self, token: str) -> None:\n self.api_session.headers.update({'Authorization': f'Bearer {token}'})",
"def __init__(self, api_token):\n self.api_token = api_token",
"def set_maptoken(self, token):\n self._data['maptoken'] = token",
"def set_access_token(self, token):\n\n self.__current_request_mock.headers['Authorization'] = token",
"def set_token(self, token: AccessToken):\n self.access_token = token.access_token or \"\"\n if isinstance(token, AccessToken):\n self.refresh_token = token.refresh_token or \"\"\n self.token_type = token.token_type or \"\"\n self.expires_in = token.expires_in or 0\n\n lag = datetime.timedelta(seconds=-self.lag_time)\n if token.access_token and token.expires_in:\n lag = datetime.timedelta(seconds=token.expires_in - self.lag_time)\n self.expires_at = datetime.datetime.now() + lag",
"def __init__(self, token, api_key):\n self.token = token\n self.api_key = api_key",
"def login_token(self, token):\n self.token = token # this will also set the refresh_token to None",
"def _request_token(self):\n response = requests.post(\n \"%s/generateToken\" % self.root_uri.rstrip(\"/\"), {\n \"username\": self.username,\n \"password\": self.password,\n \"expiration\": '60',\n \"referer\": 'https://wsdot.maps.arcgis.com',\n \"f\": 'json'\n })\n\n token_info = response.json()\n if \"error\" in token_info:\n raise TokenError(token_info[\"error\"])\n self._token = token_info[\"token\"]\n self._expires = datetime.fromtimestamp(token_info[\"expires\"] / 1000)",
"def get_token(self):\n url = '/auth-token/'\n data = self._http_post(url, self.credentials)\n token = data['token']\n assert len(token) == 40, 'The length of seahub api auth token should be 40'\n self.token = 'Token ' + token",
"def request(self, token):\n pass",
"def __init__(self, token):\r\n self.apiroot = 'https://api-ssl.bitly.com/v3'\r\n\r\n self.access_token = token\r\n self.add_filter(self.add_authorization)",
"def __set_authentication_token(self, token):\n cache = {\"authentication_token\": token}\n save_json(self._tokenPath, cache)",
"async def token(self, token):\n # [p]set token <token>\n\n if len(token) < 50:\n await self.bot.say(\"Invalid token.\")\n else:\n CacheAPI.set(key='dwarf_token', value=token, timeout=None)\n await self.bot.say(\"Token set. Restart me.\")\n log.debug(\"Token changed.\")",
"def token(self, token):\n\n self._token = token",
"def token(self, token):\n\n self._token = token",
"def __init__(self, access_token, base_url=\"https://api.crowdstrike.com\"):\n self.headers = {'Authorization': 'Bearer {}'.format(access_token)}\n self.base_url = base_url",
"def __init__(self, api_token, api_server_url='https://api.github.com/graphql'):\n self._api_token = api_token\n self._client = GraphQLClient(api_server_url)\n if api_token:\n self._client.inject_token('bearer ' + api_token)",
"def __init__(self, api_token):\r\n self.apiroot = 'https://api.pipedrive.com/v1'\r\n self.api_token = api_token\r\n self.add_filter(self.add_auth)",
"def __init__(self, access_token, base_url='https://api.crowdstrike.com'):\n self.headers = { 'Authorization': 'Bearer {}'.format(access_token) }\n self.base_url = base_url",
"def _update_token(token):\n session.token = token",
"def __init__(self, key, token=None):\r\n self.apiroot = 'https://api.trello.com/1'\r\n self.key = key\r\n self.token = token\r\n self.add_filter(self.add_auth)\r\n self.add_filter(self.use_json)",
"def auth_token(self):",
"def init_headers(token):\n headers = {\n 'Content-Type': 'application/json',\n 'Authorization': 'Bearer ' + token\n }\n return headers",
"def get_token():\n token = getpass.getpass('Paste in your RDR API token and press Enter:')\n return {'Authorization': 'token ' + token}",
"def __init__(self, token):\n\n response = requests.get(self.BASE_URL + '/user?access_token=' + token)\n\n if response.status_code != 200:\n raise GistException(Gist.__get_response_error('The entered token is invalid', response))\n else:\n self.__user_data = response.json()\n self.__headers = {\n 'X-Github-Username': self.__user_data.get('login'),\n 'Content-Type': 'application/json',\n 'Authorization': 'token %s' %token\n }\n self.__defaults = {\n 'public': False,\n 'files': {}\n }",
"def token_data(self, token_data: TokenData):\n\n self._token_data = token_data"
] |
[
"0.72034425",
"0.6961557",
"0.6961557",
"0.6853091",
"0.6750332",
"0.6673931",
"0.66676825",
"0.6644839",
"0.66060215",
"0.65552807",
"0.652376",
"0.6491826",
"0.6440416",
"0.63904065",
"0.6361036",
"0.6346903",
"0.63006663",
"0.6299534",
"0.6299534",
"0.62760955",
"0.6268818",
"0.62540483",
"0.6206737",
"0.6192468",
"0.6159316",
"0.615049",
"0.61486626",
"0.60476726",
"0.6036454",
"0.60307485"
] |
0.752868
|
0
|
Gets last matchday fixtures
|
async def _lastfixtures(self, ctx: commands.Context, league_id: str):
headers = ['ID', 'Home', 'G', ' ', 'G', 'Away']
data = await self._get_league_fixtures_timeframe(ctx.message.server.id, league_id, 'p7')
await self.bot.say('```diff\n+ Last fixtures```')
pretty_data = []
for fixture in data['fixtures']:
pretty_data.append([
fixture['id'],
'[{}] {}'.format(fixture['homeTeamId'], fixture['homeTeamName']),
fixture['goalsHomeTeam'],
' - ',
fixture['goalsAwayTeam'],
'[{}] {}'.format(fixture['awayTeamId'], fixture['awayTeamName'])
])
await self.bot.say(box(tabulate(pretty_data, headers=headers)))
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"async def _get_league_fixtures_matchday(self, server_id: str, league_id: str, matchday: str):\n params = {'matchday': matchday}\n url = self.api_url + 'competitions/{}/fixtures'.format(league_id)\n\n return await self._make_request(url, params, server_id)",
"async def _matchdayfixtures(self, ctx: commands.Context, league_id: str, matchday: str='1'):\n headers = ['ID', 'Home', ' ', ' ', 'Away']\n data = await self._get_league_fixtures_matchday(ctx.message.server.id, league_id, matchday)\n\n await self.bot.say('```diff\\n+ Matchday ' + matchday + ' fixtures```')\n pretty_data = []\n for fixture in data['fixtures']:\n pretty_data.append([\n fixture['id'],\n '[{}] {}'.format(fixture['homeTeamId'], fixture['homeTeamName']),\n fixture['result']['goalsHomeTeam'],\n fixture['result']['goalsAwayTeam'],\n '[{}] {}'.format(fixture['awayTeamId'], fixture['awayTeamName'])\n ])\n\n await self.bot.say(box(tabulate(pretty_data, headers=headers)))",
"def test_last_tracking():\n yoga_last_day = analytics.last_day(yoga_habits_trackings())\n run_last_day = analytics.last_day(run_habits_trackings())\n read_last_day = analytics.last_day(read_habits_trackings())\n meditation_last_day = analytics.last_day(meditation_habits_trackings())\n french_last_day = analytics.last_day(french_habits_trackings(), 5)\n\n assert yoga_last_day == date(2021, 7, 23)\n assert run_last_day == date(2021, 7, 12)\n assert read_last_day == date(2021, 7, 22)\n assert meditation_last_day == date(2021, 7, 26)\n assert french_last_day == date(2021, 7, 26)",
"def test_get_latest_dweet_for(self):\n dweepy.dweet_for(self.my_thing_id, test_data)\n\n dweets = dweepy.get_latest_dweet_for(self.my_thing_id)\n check_valid_get_response(self, dweets)",
"def get_last_reverse_competing_matches(matches, date, home_team, away_team, x=2):\n away_matches = matches[(matches['home_team_api_id'] == away_team) & (matches['away_team_api_id'] == home_team)]\n last_matches = get_most_recent_matches_helper(away_matches, date, x)\n return last_matches",
"async def _get_league_fixtures_timeframe(self, server_id: str, league_id: str, timeframe: str):\n params = {'timeFrame': timeframe}\n url = self.api_url + 'competitions/{}/fixtures'.format(league_id)\n\n return await self._make_request(url, params, server_id)",
"def retrieve_fixture():\n j = json.load(open(\"./tests/fixtures/crond_event.json\"))\n return j",
"def test_get_last_tracking_insertation_():\n insert_habit()\n insert_day()\n trackings_table = analytics.trackings_table()\n assert trackings_table[60][:-1] == (6, str(date.today()))\n delete_habit()",
"def last_date_method(self) -> datetime.datetime:\n\n try:\n with open(os.path.join(THIS_DIR, \"data_file.json\"), mode='r') as json_file:\n try:\n data = datetime.datetime.strptime(json.load(json_file)[self.site],\n '%Y-%m-%d %H:%M:%S')\n return data\n\n except:\n return datetime.datetime(1970, 1, 1)\n except:\n with open(os.path.join(THIS_DIR, \"data_file.json\"), mode='a') as json_file:\n json.dump({}, json_file)\n json_file.close()\n\n return datetime.datetime(1970, 1, 1)",
"def get_last_competing_matches(matches, date, home_team, away_team, x=2):\n\n # Find matches of both teams\n home_matches = matches[(matches['home_team_api_id'] == home_team) & (matches['away_team_api_id'] == away_team)]\n last_matches = get_most_recent_matches_helper(home_matches, date, x)\n return last_matches",
"def get_past_matches_data(team):\n matches = team.get_past_matches()\n match_list = []\n for match in matches:\n match_dict = {}\n match_dict['match_date'] = match.match_date\n match_dict['match_name'] = match.__str__()\n match_dict['id'] = match.id\n innings = match.get_innings()\n if len(innings):\n if innings[0].runs > innings[1].runs:\n match_dict['winner_team'] = innings[0].bat_team\n match_dict['win_margin'] = innings[0].runs - innings[1].runs\n match_dict['win_type'] = 'Runs'\n match_dict['winner_score'] = str(innings[0].runs) + '/' + str(innings[0].wickets)\n else:\n match_dict['winner_team'] = innings[1].bat_team\n match_dict['win_margin'] = 10 - innings[1].wickets\n match_dict['win_type'] = 'Wickets'\n match_dict['winner_score'] = str(innings[1].runs) + '/' + str(innings[1].wickets)\n match_list.append(match_dict)\n return match_list",
"def get_last_asaway_matches(matches, date, season, team, x=5):\n\n # Filter team matches from matches\n total_matches = matches[matches['away_team_api_id'] == team]\n\n last_matches = get_most_recent_same_season_matches_helper(total_matches, date, season, x)\n\n # Return last matches\n return last_matches",
"def get_last_ashome_matches(matches, date, season, team, x=5):\n\n # Filter team matches from matches\n total_matches = matches[matches['home_team_api_id'] == team]\n\n # print(total_matches)\n\n last_matches = get_most_recent_same_season_matches_helper(total_matches, date, season, x)\n\n # omitting all with less than 5 recent matches\n # if last_matches.shape[0] != x:\n # return last_matches.iloc[0:0, :]\n\n # print(last_matches)\n # Return last matches\n return last_matches",
"def get_latest_league_data(self, df):\n max_date = pd.to_datetime(df[\"Date\"]).max()\n df = df[df[\"Date\"] == max_date]\n [latest_league_file_dir] = df[\"File\"].values\n df = self.extract_df(latest_league_file_dir)\n return df",
"def latest(cls, team):\n return cls.query(\n cls.team == team.lower()\n ).order(-cls.date).get()",
"def get_last_match_time():\n\n query = '''SELECT max(reported_at) FROM matches'''\n\n record = db.read(query)\n last_match_time = record[0][0]\n\n return last_match_time",
"def get_recent_matches(self, limit=None):\n return (Match.objects\n .filter(company=self)\n .order_by('-played_time')[:limit or MATCH_RESULT_LIMIT]\n )",
"def completedPrecoveryMaxDate(instance):\n # Get a cursor from the DB connection.\n cursor = Conection.connect(DB_USER, DB_PASSWD, instance, DB_HOST)\n \n # Fetch the maximum MJD precovery has processed.\n sql = 'select max(epoch_mjd) from detections d, tracklet_attrib ta '\n sql += 'where d.det_id = ta.det_id and ta.tracklet_id in '\n sql += '(select tracklet_id from history_precoveries)'\n nRes = cursor.execute(sql)\n return(cursor.fetchone()[0])",
"def test_ordinal_last(self):\n with open(\"tests/data_files/memorial_day_dates.txt\", \"r\") as dates_file:\n dates_strings_list = dates_file.read().splitlines()\n\n for date_string in dates_strings_list:\n test_date = date(*[int(p) for p in date_string.split(\"-\")])\n memorial_day = get_by_values(Ordinal.last, Weekday.Monday, Month.May, test_date.year)\n\n self.assertEquals(test_date, memorial_day)",
"def last(self):\n rows = sorted(self, key=lambda x: x.date)\n return rows[-1]",
"def last_days_results(self, days):\n return self.security['Date', 'Close', 'FinalDecision'][-days:]",
"def latestGamePack(team):\n lgr= get('schedule', {'ver':'v1', 'sportId':1, 'date':today, 'teamId':team, 'fields':['dates','games','gamePk'] })\n return lgr['dates'][0]['games'][0]['gamePk']",
"def fixture_stats_singel(self, fixture):\n ds = load_match_data(f'https://footballapi.pulselive.com/football/stats/match/{fixture}')\n return ds",
"def construct_fixtures_tweets():\n\n # Declares today's date\n today = str(datetime.date.today())\n\n # Gets today's fixtures data from football-data.org API\n connection = http.client.HTTPConnection('api.football-data.org')\n headers = {'X-Auth-Token': ''}\n connection.request('GET', '/v2/competitions/PL/matches?dateFrom='+today+'&dateTo='+today, None, headers)\n response = json.loads(connection.getresponse().read().decode())\n\n # Initialises fixtures tweet\n tweet1 = \"Today's #PremierLeague matches:\\n\"\n tweet2 = \"\"\n tweet3 = \"\"\n\n # Checks if any fixtures on today\n if response['matches']:\n # For each fixture obtained, appends line to tweet with information\n for i in range(len(response['matches'])):\n time = response['matches'][i]['utcDate']\n utc = datetime.datetime.strptime(time, '%Y-%m-%dT%H:%M:%SZ')\n gmt = pytz.timezone(\"Europe/London\").fromutc(utc)\n ko_time = gmt.strftime(\"%H:%M\")\n tweet_line = response['matches'][i]['homeTeam']['name'] + ' vs ' + response['matches'][i]['awayTeam'][\n 'name'] + ' (' + ko_time + ')' + '\\n'\n # Checks that tweet will not be too long (~ >280 chars), by splitting into separate tweets\n if len(tweet1) >= 220:\n tweet2 += tweet_line\n elif len(tweet2) >= 220:\n tweet3 += tweet_line\n else:\n tweet1 += tweet_line\n return send_fixtures_tweets(tweet1, tweet2, tweet3)\n else:\n return print('No PL fixtures today')",
"def get_last_player_attributes(self):\n max_date = \"0000-00-00\"\n last_player_attributes = None\n\n for player_attributes in self.get_player_attributes():\n if player_attributes.date > max_date:\n max_date = player_attributes.date\n last_player_attributes = player_attributes\n\n return last_player_attributes",
"def testLastBillable(self):\n months = range(1, 13)\n first_days = [utils.add_timezone(datetime.datetime(2011, month, 1))\n for month in months]\n last_billable = [utils.get_last_billable_day(day).day \\\n for day in first_days]\n #should equal the last saturday of every month in 2011\n self.assertEqual(last_billable,\n [30, 27, 27, 24, 29, 26, 31, 28, 25, 30, 27, 25])",
"def test_last_max_default(client):\n client.get(\"/weather/viena\")\n client.get(\"/weather/sorocaba\")\n client.get(\"/weather/barcelona\")\n client.get(\"/weather/belo horizonte\")\n client.get(\"/weather/rio de janeiro\")\n client.get(\"/weather/recife\")\n\n\n # Validate weither return two lasts cities in cache or not\n response = client.get(\"/weather/6\")\n print(response.data)\n assert (b\"Barcelona\" in response.data) & (b\"Recife\" in response.data) & (b\"Rio de Janeiro\" in response.data) & (b\"Belo Horizonte\" in response.data) & (b\"Sorocaba\" in response.data)",
"def get_last_week_game_data(match_history):\n\n # Get the of exactly 1 week ago, this will be used to find all the matches\n # that were played within the last week.\n last_week_time = get_time_last_week()\n\n # This will store the total duration of all matches played within the last\n # week.\n total_duration = datetime.timedelta()\n\n num_matches = 0 # The number of matches played in the last week\n\n # Loop through all of the matches in the given match history\n for match in match_history:\n # Check if the match occurred within the last week. If it's creation time\n # is greater than the time of exactly 7 days ago, we know that it occurred\n # within this last week.\n if match.creation >= last_week_time:\n total_duration += match.duration # Increment the total time\n num_matches += 1 #Increment the number of matches played in the last\n\n else:\n # Matches in the match history are organized chronologically, so we can\n # break from the for loop the instant a match is found that did not\n # occur within the last week.\n break\n\n return [total_duration, num_matches]",
"def get_dates(db):\n return db.meta.find_one({'name':\"dates\"})['dates']",
"def getLastData(self) -> ghidra.program.model.listing.Data:\n ..."
] |
[
"0.664862",
"0.61880636",
"0.61498684",
"0.57026047",
"0.55819744",
"0.5568221",
"0.55650574",
"0.55338144",
"0.5467822",
"0.543978",
"0.54388535",
"0.5433871",
"0.53994286",
"0.53809017",
"0.53425187",
"0.533992",
"0.533346",
"0.5309541",
"0.52856404",
"0.5279819",
"0.5279419",
"0.5279199",
"0.527915",
"0.5230126",
"0.52205974",
"0.5202811",
"0.5196544",
"0.5184317",
"0.5169785",
"0.5164204"
] |
0.6616225
|
1
|
Gets specific matchday fixtures Defaults to matchday 1
|
async def _matchdayfixtures(self, ctx: commands.Context, league_id: str, matchday: str='1'):
headers = ['ID', 'Home', ' ', ' ', 'Away']
data = await self._get_league_fixtures_matchday(ctx.message.server.id, league_id, matchday)
await self.bot.say('```diff\n+ Matchday ' + matchday + ' fixtures```')
pretty_data = []
for fixture in data['fixtures']:
pretty_data.append([
fixture['id'],
'[{}] {}'.format(fixture['homeTeamId'], fixture['homeTeamName']),
fixture['result']['goalsHomeTeam'],
fixture['result']['goalsAwayTeam'],
'[{}] {}'.format(fixture['awayTeamId'], fixture['awayTeamName'])
])
await self.bot.say(box(tabulate(pretty_data, headers=headers)))
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"async def _get_league_fixtures_matchday(self, server_id: str, league_id: str, matchday: str):\n params = {'matchday': matchday}\n url = self.api_url + 'competitions/{}/fixtures'.format(league_id)\n\n return await self._make_request(url, params, server_id)",
"def test_single_match(self):\n match = Match.select().first()\n with self.client:\n response = self.client.get(f'/get-match/{match.id}')\n data = json.loads(response.data.decode())\n date = datetime.strptime(data['data']['start_date_time'], \"%Y-%m-%dT%H:%M:%S\")\n self.assertEqual(response.status_code, 200)\n self.assertEqual(match.start_date_time.day, date.day)",
"def get_day():\n return handle_invalid_inputs(question_4, days)",
"def _test_find_day(self, days):\n msg = \"Find day in list of %d elements\" % len(days)\n for d in range(0, len(days)):\n self._test_giod(days, days[d], 0,\n d, msg)\n self._test_giod(days, days[d], 1,\n d, msg + \" (next = 1)\")\n self._test_giod(days, days[d], \"next = -1\",\n d, msg)",
"def get_match_data(event: str, match: int, team: int = -1):\n \n if team < 0:\n return requests.get(f'https://us-central1-pearadox-2020.cloudfunctions.net/GetMatchData/{event}/{match:03d}-').json()\n else:\n return requests.get(f'https://us-central1-pearadox-2020.cloudfunctions.net/GetMatchData/{event}/{match:03d}-').json()[f'{match:03}-{team:4}']",
"def get_match_week(match_id):\n a = service_request(\"GetMatchInfo\", {\"matchId\": match_id})\n\n try:\n return a[0][0]\n except: # match not found\n return None",
"def test_return_day(self):\n self.assertEqual(functions.return_day(1), \"Sunday\")\n self.assertEqual(functions.return_day(2), \"Monday\")\n self.assertEqual(functions.return_day(3), \"Tuesday\")\n self.assertEqual(functions.return_day(4), \"Wednesday\")\n self.assertEqual(functions.return_day(5), \"Thursday\")\n self.assertEqual(functions.return_day(6), \"Friday\")\n self.assertEqual(functions.return_day(41), None)",
"def day(self):\n data = await self.get_data(LIGHT)\n return data['day']",
"def _get_date_find(self, request):\n date_find = '{} 00:00:00'.format(dict(request.query_params)['day'][0])\n return date_find",
"def get_fixtures(team, dateFrom=None, dateTo=None, status=None, venue=None, limit=None):\r\n query_params = {}\r\n if dateFrom:\r\n query_params['dateFrom'] = dateFrom\r\n if dateTo:\r\n query_params['dateTo'] = dateTo\r\n if status:\r\n query_params['status'] = status\r\n else:\r\n query_params['status'] = 'SCHEDULED' \r\n if venue:\r\n query_params['venue'] = venue\r\n if limit :\r\n query_params['limit'] = limit \r\n \r\n url = _generate_url(f\"teams/{team}/matches\", query_params)\r\n fixtures = requests.get(url, headers=headers).json()\r\n \r\n return fixtures",
"def _parse_fixture(self, simulate, fixture_season, fixture_date, club_team, fields):\n # Slight hack to decide whether the fixture is specified or not\n if fields[OFFSET['fixture_type']] == \"\":\n print \"No match for {} on {}\".format(club_team['name'], fixture_date)\n return\n\n team_name = club_team['name']\n our_team = club_team['team']\n fixture_type = fields[OFFSET['fixture_type']]\n opp_team_name = fields[OFFSET['opposition']]\n venue_name = fields[OFFSET['venue']]\n fixture_time_str = fields[OFFSET['time']]\n home_away = fields[OFFSET['home_away']]\n\n try:\n opp = Team.objects.get(name=opp_team_name)\n except Team.DoesNotExist:\n print \"ERROR: Could not find opposition team with name '{}'\".format(opp_team_name)\n return\n\n # Create or retrieve Match model (based on opposition team, our team and date)\n if not simulate:\n match, created = Match.objects.get_or_create(season=fixture_season, opp_team=opp, our_team=our_team, date=fixture_date)\n else:\n created = not Match.objects.filter(season=fixture_season, opp_team=opp, our_team=our_team, date=fixture_date).exists()\n match = Match(season=fixture_season, opp_team=opp, our_team=our_team, date=fixture_date)\n\n # Match home/away designation\n try:\n match.home_away = HOME_AWAY[home_away.lower()]\n except:\n print \"ERROR: Invalid Home/Away designation '{}' for {} on {}\".format(home_away, team_name, fixture_date)\n return\n\n # Match fixture type (League/Cup/Friendly)\n try:\n match.fixture_type = MATCH_TYPE[fixture_type.lower()]\n except:\n print \"ERROR: Invalid fixture type designation '{}' for {} on {}\".format(fixture_type, team_name, fixture_date)\n return\n\n # Match time (can be null)\n if fixture_time_str:\n try:\n match.time = datetime.strptime(fixture_time_str, \"%H:%M\").time()\n except:\n print \"ERROR: Could not parse fixture time '{}' for {} on {}\".format(fixture_time_str, team_name, fixture_date)\n return\n else:\n match.time = None\n\n\n # Match Venue (can be null)\n if venue_name.lower() == 'away' or (home_away.lower() == 'a' and venue_name.lower() == ''):\n match.venue = None\n else:\n try:\n name_q = Q(name=venue_name) | Q(short_name=venue_name)\n match.venue = Venue.objects.get(name_q)\n except Venue.DoesNotExist:\n print \"ERROR: Could not find venue '{}' for {} on {}\".format(venue_name, team_name, fixture_date)\n return\n\n if not simulate:\n match.save()\n\n print \"{} {}\".format(\"Created\" if created else \"Updated\", match)",
"def lookup_daily(self, **kwargs):\n return self.lookup(period=self.PERIOD_DAILY, **kwargs)",
"def test_get_index_of_day_one_day_list(self):\n days = [\"15.07.2013\"]\n self._test_find_day(days)\n self._test_giod(days, \"16.07.2013\", 0,\n -1, \"Find not existing day in an One-Day-List\")\n self._test_giod(days, \"16.07.2013\", 1,\n 0, \"Find not existing day in an One-Day-List with next=1.\")\n self._test_giod(days, \"16.07.2013\", -1,\n 0, \"Find not existing day in an One-Day-List with next=-1.\")\n self._test_giod(days, \"10.07.2013\", 0,\n -1, \"Find not existing day in an One-Day-List\")\n self._test_giod(days, \"10.07.2013\", 1,\n 0, \"Find not existing day in an One-Day-List with next=1.\")\n self._test_giod(days, \"10.07.2013\", -1,\n 0, \"Find not existing day in an One-Day-List with next=-1.\")",
"def findWetWeatherDays(self, dbsession, today):\n wetDays = dbsession.query(self.dt).filter(or_(self.weather_description == \"light rain\", self.weather_description == \"moderate rain\")).all()\n # if one of those days is today return it.\n # else just return a wet day.\n for i in range(len(wetDays)):\n if today == wetDays[i][0].weekday():\n return wetDays[i][0]\n else:\n return wetDays[0][0]",
"def fetch_next_match() -> Optional[MatchDict]:\n future_matches = Match.objects.filter(start_date_time__gt=timezone.now())\n\n if not any(future_matches):\n return None\n\n next_match = min(future_matches, key=lambda match: match.start_date_time)\n\n return {\n \"round_number\": next_match.round_number,\n \"season\": next_match.start_date_time.year,\n }",
"def for_day(self, day_date, partner_name):\n\n plant_for_day = None\n taxon = None\n\n while not taxon:\n candidate_plant = self._pick_candidate_plant(\n day_date, partner_name)\n if candidate_plant:\n # Make sure this plant still exists in the main database.\n try:\n taxon = Taxon.objects.get(\n scientific_name=candidate_plant.scientific_name)\n plant_for_day = candidate_plant\n except ObjectDoesNotExist:\n # Disable this plant in the Plant of the Day list,\n # so it cannot be picked again.\n candidate_plant.include = False\n candidate_plant.save()\n else:\n break\n\n if plant_for_day:\n plant_for_day.last_seen = day_date\n plant_for_day.save()\n\n return plant_for_day",
"def fixture_stats_singel(self, fixture):\n ds = load_match_data(f'https://footballapi.pulselive.com/football/stats/match/{fixture}')\n return ds",
"def festival(month, day):\n return data.FESTIVALS.get((month, day))",
"def getOneDay(self,day_number=0):\n return self.event_time_sequence[day_number]",
"def get_fixtures(directory): \n fixtures_home = {}\n fixtures_away = {}\n fin = open(directory + \"/fixtures.csv\", 'rU')\n reader = csv.DictReader(fin)\n for row in reader:\n fixtures_home[int(row['id'])] = int(row['team_h'])\n fixtures_away[int(row['id'])] = int(row['team_a'])\n return fixtures_home, fixtures_away",
"def get_day(x):\n return x[\"SALE DATE\"].day",
"def test_get_index_of_day(self):\n days = [\"01.07.2013\",\n \"05.07.2013\",\n \"09.07.2013\",\n \"14.07.2013\",\n \"19.07.2013\"]\n # Find the days\n self._test_find_day(days)\n # Search for a day that is not part of the list\n # 1. A day before the first entry\n self._test_giod(days, \"01.01.2013\", 0,\n -1, \"Find not existing day in list\")\n self._test_giod(days, \"01.01.2013\", 1,\n 0, \"Find a date before days withe next = 1\")\n self._test_giod(days, \"01.01.2013\", 1,\n 0, \"Find a date before days withe next = -1\")\n # 2. A day after the last entry\n self._test_giod(days, \"01.12.2013\", 0,\n -1, \"Find not existing day in list\")\n self._test_giod(days, \"01.12.2013\", 1,\n 4, \"Find a date after days with next = 1\")\n self._test_giod(days, \"01.12.2013\", -1,\n 4, \"Find a date after days with next = -1\")\n # 3. A day in the middle\n self._test_giod(days, \"06.07.2013\", 0,\n -1, \"Find not existing day in list\")\n self._test_giod(days, \"06.07.2013\", 1,\n 2, \"Find a date after days with next = 1\")\n self._test_giod(days, \"06.07.2013\", -1,\n 1, \"Find a date after days with next = -1\")",
"def find_day(self, observance_id: str) -> Union[None, Tuple[date, Day]]:\n for date_, day in self._container.items():\n if observance_id in [ii.id for ii in day.all]:\n return date_, day",
"def day(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"day\")",
"def load_default_for_date(date):\n weekday = datetime.datetime.strptime(date, '%y%m%d').strftime('%a').lower()\n try:\n with open(DEFAULT_FNAME, 'r') as f:\n defaults = json.load(f)\n for ctrl_type in switch.TARGETS.keys():\n for key in defaults[ctrl_type].keys():\n if weekday in key:\n defaults[ctrl_type] = defaults[ctrl_type][key]\n break\n # by the end of the inner loop, today's settings for ctrl_type\n # should have been brought up by one level, directly under ctrl_type\n # now validate it as below\n first_key = next(iter(defaults[ctrl_type]))\n print type(defaults[ctrl_type][first_key])\n if type(defaults[ctrl_type][first_key]) is dict:\n # today's weekday is not found in default weekday settings\n logging.error(\"Today's (%s) setting not found in default settings.\", weekday)\n logging.warn(\"Turning CH and HW off for today due to lack of settings.\")\n switch.ch_off()\n switch.hw_off()\n beep()\n sys.exit()\n logging.info(\"Loaded default settings for %s\", date)\n return defaults\n except:\n logging.exception(\"Unable to load default settings: %s\", DEFAULT_FNAME)\n raise",
"def day(self):\n try:\n return self.schedule.day\n except Schedule.DoesNotExist:\n return None",
"def _first_good_date(self, day):\n count = 0\n while True:\n try:\n self.data.loc[day - timedelta(count)]\n return day - timedelta(count)\n except KeyError:\n count += 1",
"def get_birthdays(self, date=None, chat=None):\n # kind of hacky and not really universal at the moment\n if date:\n self.cursor.execute(\"\"\"\n SELECT * from birthday\n WHERE substr(date,1,5)=?\n \"\"\", (date,))\n elif chat:\n self.cursor.execute(\"\"\"\n SELECT * from birthday\n WHERE chat_id=?\n \"\"\", (chat.id,))\n return self.cursor.fetchall()",
"def select_weekdays(self, ldays):\n sel = []\n for i in range(self.dataset.shape[0]):\n stime = time.localtime(np.int32(self.dataset[i][2]))\n if stime[6] in ldays:\n sel.append(i)\n data = STData(self.wpath, self.city, self.application)\n data.dataset = self.dataset[sel]\n return data",
"def _day_rule_matches(self, rule, dt):\n if dt.weekday() == 4:\n sat = dt + datetime.timedelta(days=1)\n if super(SiteHolidays, self)._day_rule_matches(rule, sat):\n return True\n elif dt.weekday() == 0:\n sun = dt - datetime.timedelta(days=1)\n if super(SiteHolidays, self)._day_rule_matches(rule, sun):\n return True\n return super(SiteHolidays, self)._day_rule_matches(rule, dt)"
] |
[
"0.75872165",
"0.6141927",
"0.5587973",
"0.5584712",
"0.55028814",
"0.5417704",
"0.5377775",
"0.5377625",
"0.53597903",
"0.53553355",
"0.533468",
"0.52482533",
"0.524783",
"0.5247689",
"0.5236725",
"0.51911557",
"0.5182286",
"0.5163468",
"0.51237655",
"0.50984573",
"0.50659573",
"0.50621843",
"0.5045259",
"0.50325465",
"0.5026883",
"0.5026526",
"0.50039744",
"0.50003487",
"0.5000259",
"0.49581832"
] |
0.7391474
|
1
|
Generator for each file and directory
|
def files_and_folders(self):
yield from self._root.files_and_folders(0)
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def filelist_generator(self):\n for filename in self.filenames:\n yield filename",
"def directory_walker(start_dir):\n\n for root, dirs, files in os.walk(os.path.expanduser(start_dir)):\n for f in files:\n filename = os.path.join(root, f)\n # Only process if its a type of image\n file_type = mimetypes.guess_type(filename.lower())[0]\n if file_type is not None and file_type.startswith('image/'):\n yield filename",
"def data_iter(img_dirname):\n img_dir = Path(img_dirname)\n if not img_dir.exists():\n raise ValueError(f'{img_dirname} not exist!')\n if img_dir.is_file():\n yield img_dir\n else:\n for filename in img_dir.iter():\n yield filename",
"def _get_all_files(dir_path):\n for root, _, filenames in os.walk(dir_path):\n for name in filenames:\n target = os.path.join(root, name)\n yield target",
"def open_dir(input_path, patterns):\r\n for ext in patterns:\r\n for file in Path(input_path).glob('**/*.' + ext):\r\n yield file",
"def get_all_files(directory):\r\n for dirpath, _dirnames, filenames in os.walk(directory):\r\n for filename in filenames:\r\n yield (filename, dirpath)",
"def _iter_files_in_dir(directory):\n for filename in os.listdir(directory):\n filepath = os.path.join(directory, filename)\n if os.path.isfile(filepath):\n yield filepath",
"def walk(self):\n for _root, _dirs, files in os.walk(self.root):\n for filename in files:\n if self.is_key(filename):\n yield filename",
"def FileIter(func_name):\n \n if func_name == 'convert_pmids_to_pmcs':\n sdir = partial(os.path.join,'Data', 'SearchResults')\n pmc_file = os.path.join('Data', 'PMC-ids.csv')\n files = [x for x in os.listdir(sdir('')) if x.endswith('.res')]\n for f in files:\n yield (sdir(f), pmc_file), sdir(f+'.conv')\n\n elif func_name == 'search_pubmed':\n sdir = partial(os.path.join,'Data', 'SearchResults')\n queryfile = os.path.join('Data', 'QueryList.txt')\n with open(queryfile) as handle:\n for row in csv.DictReader(handle):\n fname = '%s--%s.res' % (GeneralUtils.slugify(row['org']), \n GeneralUtils.slugify(row['search']))\n ofile = sdir(fname)\n yield queryfile, ofile, row['search']\n\n elif func_name == 'download_pmids':\n \n sdir = partial(os.path.join,'Data', 'SearchResults')\n odir = os.path.join('Data', 'RawXML')\n files = [x for x in os.listdir(sdir('')) if x.endswith('.conv')]\n \n for f in files:\n yield sdir(f), sdir(f+'.dl'), odir\n\n elif func_name == 'extract_text':\n \n sdir = partial(os.path.join, 'Data', 'RawXML')\n odir = partial(os.path.join, 'Data', 'SentenceFiles')\n\n files = sorted([x for x in os.listdir(sdir('')) if x.endswith('.xml')])\n for f in files:\n name = f.split('.')[0]\n if f.startswith('PMC'):\n typ = 'pmc'\n else:\n typ = 'pubmed'\n\n yield sdir(f), odir(name+'.sent'), typ\n\n elif func_name == 'get_mutations':\n \n sdir = partial(os.path.join, 'Data', 'SentenceFiles')\n odir = partial(os.path.join, 'Data', 'MutFiles')\n finder = None#mutfinder_gen('regex.txt')\n\n files = sorted([x for x in os.listdir(sdir('')) if x.endswith('.sent')])\n\n for f in files:\n name = f.split('.')[0]\n yield sdir(f), odir(name + '.mut')\n \n elif func_name == 'process_mut_file':\n \n sdir = partial(os.path.join, 'Data', 'MutFiles')\n odir = partial(os.path.join, 'Data', 'ProteinFiles')\n\n files = sorted([x for x in os.listdir(sdir('')) if x.endswith('.mut')])\n\n for f in files:\n name = f.split('.')[0]\n yield sdir(f), (odir(name + '.prot'), odir(name + '.sen'))\n elif func_name == 'mapping_files':\n path = 'Data/Mapping/'\n items = (('ftp://ftp.uniprot.org/pub/databases/uniprot/current_release/knowledgebase/idmapping/idmapping.dat.gz', 'idmapping.dat.sort'),\n ('ftp://ftp.ncbi.nlm.nih.gov/gene/DATA/gene_info.gz', 'gene_info'),\n ('ftp://ftp.ncbi.nlm.nih.gov/pub/pmc/PMC-ids.csv.gz', 'PMC-ids.csv'),\n ('ftp://nlmpubs.nlm.nih.gov/online/mesh/.asciimesh/d2011.bin', 'd2011.bin'))\n for url, ofile in items:\n yield None, os.path.join(path, ofile), url, path",
"def files_and_folders(self, depth):\n for directory in self.rarc._directories[self.directory_index:][:self.directory_count]:\n yield depth, directory\n if isinstance(directory, Folder):\n if directory.data_offset < len(self.rarc._nodes):\n node = self.rarc._nodes[directory.data_offset]\n if directory.name == \".\" or directory.name == \"..\":\n continue\n yield from node.files_and_folders(depth + 1)",
"def dirGenerator(datadirectory):\n\n subdirectories = [row for row in os.listdir(datadirectory) if '$' not in row]\n\n #iterate through subdirectories\n for day in subdirectories:\n\n #collect raw data set file names in sub directories\n fileNames = [row for row in os.listdir(datadirectory + day + '\\\\RawDataFiles\\\\')]\n\n #iterate over the raw datasets\n print 'There are ' + str(len(fileNames)) + ' datasets in ' + day\n for index, datafile in enumerate(fileNames):\n yield datadirectory + day + '\\\\RawDataFiles\\\\' + datafile, day, datafile, index",
"def scandir(path='.'):\r\n for name in os.listdir(path):\r\n yield GenericDirEntry(path, name)",
"def _get_files(self):\n # pylint: disable=unused-variable\n for dirpath, __, filenames in os.walk(self.start_location):\n for file_ in filenames:\n if file_.endswith('.py'):\n yield \"{0}{1}\".format(dirpath, file_)",
"def walkdir(self, folder):\n for dirpath, dirs, files in os.walk(folder):\n for filename in files:\n yield os.path.abspath(os.path.join(dirpath, filename))",
"def iter_files(path):\n if os.path.isfile(path):\n yield path\n elif os.path.isdir(path):\n for dirpath, _, filenames in os.walk(path):\n for f in filenames:\n yield os.path.join(dirpath, f)\n else:\n raise RuntimeError('Path %s is invalid' % path)",
"def __iter__(self):\n for p in self.paths:\n yield Document.load(os.path.join(self.dirpath, p), fmt=self.fmt)",
"def ReadFilesGenerator(self):\n\n for file in self._file_names:\n file_list = []\n\n # TODO see further into yielding one line at a time\n with open(file, 'r', encoding='mbcs') as sped:\n file_list = sped.read().splitlines()\n\n if not self.isSigned(file_list):\n file_list = self.stripSignature(file_list)\n\n yield file, file_list",
"def get_object(directory):\r\n path = join(self.base_path, directory)\r\n for f in listdir(path):\r\n if isfile(join(path, f)):\r\n yield \"file\", f\r\n else:\r\n yield \"subdir\", join(directory, f)",
"def iterfiles(self, include_dirs: bool = False) -> Iterator[P]:\n dirs = deque([self.filetree])\n while dirs:\n for p in dirs.popleft().iterdir():\n if p.is_dir():\n dirs.append(p)\n if include_dirs:\n yield p\n else:\n yield p",
"def gen_recursive_filelist(d):\n \n for root, directories, files in os.walk(d):\n for file in files:\n yield os.path.join(root, file)",
"def gather_test_files(file_args):\n for dir_or_file in file_args:\n if path.isdir(dir_or_file):\n for current_dir, _, files in os.walk(dir_or_file):\n for file in filter(lambda f: f.endswith(\".09\"), files):\n yield TestFile(dir_or_file, path.join(current_dir, file))\n else:\n yield TestFile(path.dirname(dir_or_file), dir_or_file)",
"def _walk_dir(self, rootpath):\n assert os.path.isabs(rootpath)\n assert rootpath not in self._dirs\n relpath = self._get_rel_path(rootpath)\n self._dirs[relpath] = Directory(rootpath, relpath, None)\n for dirpath, dirnames, filenames in os.walk(rootpath):\n if 'refdata' in dirnames:\n dirnames.remove('refdata')\n currentdir = self._dirs[self._get_rel_path(dirpath)]\n # Loop through a copy so that we can modify dirnames.\n for dirname in list(dirnames):\n fullpath = os.path.join(dirpath, dirname)\n if fullpath == self._build_root:\n dirnames.remove(dirname)\n continue\n relpath = self._get_rel_path(fullpath)\n self._dirs[relpath] = Directory(fullpath, relpath, currentdir)\n extensions = ('.h', '.cuh', '.hpp', '.c', '.cc', '.cpp', '.cu', '.bm')\n for filename in filenames:\n basename, extension = os.path.splitext(filename)\n if extension in extensions:\n fullpath = os.path.join(dirpath, filename)\n relpath = self._get_rel_path(fullpath)\n self._files[relpath] = File(fullpath, relpath, currentdir)\n elif extension == '.cmakein':\n extension = os.path.splitext(basename)[1]\n if extension in extensions:\n fullpath = os.path.join(dirpath, filename)\n relpath = self._get_rel_path(fullpath)\n sourcefile = GeneratorSourceFile(fullpath, relpath, currentdir)\n self._files[relpath] = sourcefile\n fullpath = os.path.join(dirpath, basename)\n relpath = self._get_rel_path(fullpath)\n fullpath = os.path.join(self._build_root, relpath)\n generatedfile = GeneratedFile(fullpath, relpath, currentdir)\n self._files[relpath] = generatedfile\n generatedfile.set_generator_source(sourcefile)\n elif extension in ('.l', '.y', '.pre'):\n fullpath = os.path.join(dirpath, filename)\n relpath = self._get_rel_path(fullpath)\n self._files[relpath] = GeneratorSourceFile(fullpath, relpath, currentdir)",
"def open_dir(\n self, path, mode=\"r\", match=\".*\", force=False, compression=\"autodetect\"\n ):\n matcher = re.compile(match)\n for filename in os.listdir(path):\n # ignore non-matching filenames\n if not matcher.search(filename):\n continue\n fileobj = self.open_file(\n os.path.join(path, filename),\n mode=mode,\n force=force,\n compression=compression,\n )\n yield fileobj",
"def generateFileList(args):\n\textensions = (\"jpg\", \"gif\", \"png\", \"bmp\", \"xpm\", \"ico\")\n\n\tfor arg in args:\n\t\tif not os.access(arg, os.R_OK):\n\t\t\tprint >> sys.stderr, \"File not found: %s\" % arg \n\t\t\tcontinue\n\t\tif os.path.isdir(arg):\n\t\t\tfor (root, dirs, files) in os.walk(arg):\n\t\t\t\tfor file in files:\n\t\t\t\t\tif os.path.splitext(file)[1][1:].lower() in extensions:\n\t\t\t\t\t\tyield os.path.join(root, file)\n\t\telse:\n\t\t\tyield arg",
"def walk(self): # DirObj.walk\n for name, subdir in self.subdirs.iteritems():\n for e in subdir.walk():\n yield e\n for name, fileEntry in self.files.iteritems():\n yield fileEntry\n yield self",
"def walk(self):\n if os.path.exists(self.folder):\n for root_path, _, f_files in os.walk(self.folder):\n yield root_path, f_files\n if not self.recursive:\n break\n else:\n print(f\"[!e] Passed folder doesn't exist. Path: {self.folder}\",\n file=sys.stdout)\n exit(0)",
"def iterdir(path, guarantee_model_names=True):\n # Fix path\n path = os.path.expanduser(os.path.abspath(path))\n\n if not os.path.isdir(path):\n raise ValueError('Given path is not a directory.')\n\n # Scan files\n for fname in sorted(os.listdir(path)):\n fpath = os.path.join(path, fname)\n\n # Check if it's a model file\n if not os.path.isfile(fpath):\n continue\n base, ext = os.path.splitext(fname)\n if ext != '.mmt':\n continue\n\n # Read model & protocol\n model, protocol, x = myokit.load(fpath)\n\n # Skip files without model\n if model is None:\n continue\n\n # Set name attribute\n if guarantee_model_names:\n if not model.name():\n model.meta['name'] = base\n\n # Yield\n yield model, protocol",
"def files(self, glob='*', limit=0):\n for a in self.filenames(glob, limit=limit):\n yield Path(a)",
"def __iter__(self):\n for f in self.path.glob('**/*'):\n if f.is_file() and not os.stat(str(f.resolve())).st_size == 0:\n yield Resource(str(f.resolve()), DiskCrawler.compute_digest)",
"def generate_files(self):\n import re\n for year, url in self.metadata.build.sources.items():\n zf = self.filesystem.download(url)\n for fn in self.filesystem.unzip_dir(zf, re.compile(r'.*all.*', re.IGNORECASE)):\n yield year, fn"
] |
[
"0.7190483",
"0.7147999",
"0.70710903",
"0.70637256",
"0.7036718",
"0.70238703",
"0.70016503",
"0.6983245",
"0.6972447",
"0.6955986",
"0.6939163",
"0.693766",
"0.6891783",
"0.6865853",
"0.6865214",
"0.6835685",
"0.6812363",
"0.68081945",
"0.6771897",
"0.6769362",
"0.6762657",
"0.6733893",
"0.6729575",
"0.67271644",
"0.6720109",
"0.6714755",
"0.6695095",
"0.66622394",
"0.6649828",
"0.66466606"
] |
0.7265181
|
0
|
Read and parse RARC from buffer.
|
def read(buffer) -> RARC:
# TODO: Add error checking
header = struct.unpack('>IIIIIIII', buffer[:32])
info = struct.unpack('>IIIIIIHHI', buffer[32:][:32])
rarc = RARC(*header, *info)
data = buffer[32:]
file_data = data[rarc.file_offset:][:rarc.file_length]
read_string_table(rarc, data)
read_nodes(rarc, data)
read_directories(rarc, data, file_data)
return rarc
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def parse_from_bytes(self, raw_buffer):\n\n try:\n (cpu_svn,\n self.misc_select,\n _,\n attributes,\n mr_enclave,\n _,\n mr_signer,\n _,\n self.isv_prod_id,\n self.isv_svn,\n _,\n report_data) = \\\n struct.unpack(self._format, raw_buffer)\n\n # Further parse embedded structures\n self.cpu_svn.parse_from_bytes(cpu_svn)\n self.attributes.parse_from_bytes(attributes)\n self.mr_enclave.parse_from_bytes(mr_enclave)\n self.mr_signer.parse_from_bytes(mr_signer)\n self.report_data.parse_from_bytes(report_data)\n except struct.error as se:\n raise ValueError('Unable to parse: {}'.format(se))",
"def parse_rmc(self, pkt):\n fields = pkt.split(',')\n\n if fields[2] != 'A':\n self.clear_fix(\"rmc\")\n return\n\n try:\n fix_ll = self.parse_ll_fix(fields[3:7])\n ddmmyy = fields[9]\n dd = int(ddmmyy[0:2])\n mm = int(ddmmyy[2:4])\n yy = int(ddmmyy[4:])\n if yy < 100:\n yy += 2000\n fix_time = self.parse_gps_utc(fields[1])\n self.set_gps_time((yy, mm, dd) + fix_time, \"rmc\")\n self.set_fix(fix_time, fix_ll, \"rmc\")\n except:\n self.errlog(\"parse_rmc_fail\", \"Failed to parse: \" + pkt)\n\n return",
"def parse(buffer):\n pca, means, components = Parser.__get_pca(buffer, 1)\n return pca, means, components",
"def readline(self) -> bytes | None:",
"def _read(self):\n \n try:\n d = self._get_byte()\n ts = time.time()\n while d != self.HDLC_FLAG_BYTE:\n d = self._get_byte()\n ts = time.time()\n packet = [d]\n d = self._get_byte()\n if d == self.HDLC_FLAG_BYTE:\n d = self._get_byte()\n ts = time.time()\n else:\n packet.append(d)\n while d != self.HDLC_FLAG_BYTE:\n d = self._get_byte()\n packet.append(d)\n if self._debug == True:\n print \"Serial:_read: unescaped\", packet\n packet = self._unescape(packet)\n \n crc = self._crc16(0, packet[1:-3])\n packet_crc = self._decode(packet[-3:-1])\n \n if crc != packet_crc:\n print \"Warning: wrong CRC! %x != %x %s\" % (crc, packet_crc, [\"%2x\" % i for i in packet])\n if self._debug:\n if self._ts == None:\n self._ts = ts\n else:\n print \"Serial:_read: %.4f (%.4f) Recv:\" % (ts, ts - self._ts), self._format_packet(packet[1:-3])\n self._ts = ts\n return RawPacket(ts, packet[1:-3], crc == packet_crc)\n except socket.timeout:\n return None",
"def _parse_ach_file(self, contents):\n file_length = len(contents)\n\n for index in range(0, file_length, self.LINE_LENGTH):\n line = contents[index:index + self.LINE_LENGTH]\n\n if line.startswith('1'):\n self._read_header(line)\n elif line.startswith('5'):\n self._read_batch_header(line)\n elif line.startswith('6'):\n self._read_entry_detail(line)\n elif line.startswith('7'):\n self._read_addenda_record(line)\n elif line.startswith('8'):\n self._read_batch_control_record(line)\n elif line.startswith('9'):\n if line == '9' * 94:\n continue\n self._read_file_control_record(line)",
"def __reader(self):\n empty = bytes()\n\n try:\n while not self._wantExit:\n # logging.debug(\"reading character\")\n b = self._readBytes(1)\n # logging.debug(\"In reader loop\")\n if len(b) > 0:\n # logging.debug(f\"read returned {b}\")\n c = b[0]\n ptr = len(self._rxBuf)\n\n # Assume we want to append this byte, fixme use bytearray instead\n self._rxBuf = self._rxBuf + b\n\n if ptr == 0: # looking for START1\n if c != START1:\n self._rxBuf = empty # failed to find start\n if self.debugOut != None:\n try:\n self.debugOut.write(b.decode(\"utf-8\"))\n except:\n self.debugOut.write('?')\n\n elif ptr == 1: # looking for START2\n if c != START2:\n self._rxBuf = empty # failed to find start2\n elif ptr >= HEADER_LEN: # we've at least got a header\n # big endian length follos header\n packetlen = (self._rxBuf[2] << 8) + self._rxBuf[3]\n\n if ptr == HEADER_LEN: # we _just_ finished reading the header, validate length\n if packetlen > MAX_TO_FROM_RADIO_SIZE:\n self._rxBuf = empty # length ws out out bounds, restart\n\n if len(self._rxBuf) != 0 and ptr + 1 == packetlen + HEADER_LEN:\n try:\n self._handleFromRadio(self._rxBuf[HEADER_LEN:])\n except Exception as ex:\n logging.error(\n f\"Error while handling message from radio {ex}\")\n traceback.print_exc()\n self._rxBuf = empty\n else:\n # logging.debug(f\"timeout\")\n pass\n except serial.SerialException as ex:\n if not self._wantExit: # We might intentionally get an exception during shutdown\n logging.warn(f\"Meshtastic serial port disconnected, disconnecting... {ex}\")\n except OSError as ex:\n if not self._wantExit: # We might intentionally get an exception during shutdown\n logging.error(f\"Unexpected OSError, terminating meshtastic reader... {ex}\") \n except Exception as ex:\n logging.error(f\"Unexpected exception, terminating meshtastic reader... {ex}\")\n finally:\n logging.debug(\"reader is exiting\")\n self._disconnected()",
"def from_buffer(data, encoding='pem'):\n return X509Csr.from_open_file(io.BytesIO(data), encoding)",
"def Read_RMCA_basic(Complete_Path):\n fid = open(Complete_Path,'r')\n S = []\n while 1: \n line = fid.readline()\n if line =='': \n break \n else :\n S.append(float(line))\n #R.append(float(line[27:-2]))\n return np.array(S)",
"def parse(self, buffer, ignore_binary = False):\n self._buffer = buffer\n self._index = 0\n self._keep_binary = not ignore_binary\n return self._parse()",
"def parser(path):\n\t\n\tdata = Arff()\n\tdata.read_arff(path)\n\t\n\treturn data",
"def __parse(self) -> object:\r\n char = self.data[self.idx: self.idx + 1]\r\n if char in [b'1', b'2', b'3', b'4', b'5', b'6', b'7', b'8', b'9', b'0']:\r\n str_len = int(self.__read_to(b':'))\r\n return self.__read(str_len)\r\n elif char == b'i':\r\n self.idx += 1\r\n return int(self.__read_to(b'e'))\r\n elif char == b'd':\r\n return self.__parse_dict()\r\n elif char == b'l':\r\n return self.__parse_list()\r\n elif char == b'':\r\n raise DecodingError('Unexpected End of File at index position of {0}.'.format(str(self.idx)))\r\n else:\r\n raise DecodingError('Invalid token character ({0}) at position {1}.'.format(str(char), str(self.idx)))",
"def readline(self) -> bytes:\n ...",
"def readline(self) -> bytes:\n ...",
"def parse(cls, packet):\n buffer = DNSBuffer(packet)\n try:\n header = DNSHeader.parse(buffer)\n questions = []\n rr = []\n auth = []\n ar = []\n for i in range(header.q):\n questions.append(DNSQuestion.parse(buffer))\n for i in range(header.a):\n rr.append(RR.parse(buffer))\n for i in range(header.auth):\n auth.append(RR.parse(buffer))\n for i in range(header.ar):\n ar.append(RR.parse(buffer))\n return cls(header, questions, rr, auth=auth, ar=ar)\n except DNSError:\n raise\n except (BufferError, BimapError) as e:\n raise DNSError(\"Error unpacking DNSRecord [offset=%d]: %s\" % (\n buffer.offset, e))",
"def read(self, s):\n pass",
"def deserialize(self, str):\n if python3:\n codecs.lookup_error(\"rosmsg\").msg_type = self._type\n try:\n if self.header is None:\n self.header = std_msgs.msg.Header()\n end = 0\n _x = self\n start = end\n end += 12\n (_x.header.seq, _x.header.stamp.secs, _x.header.stamp.nsecs,) = _get_struct_3I().unpack(str[start:end])\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n self.header.frame_id = str[start:end].decode('utf-8', 'rosmsg')\n else:\n self.header.frame_id = str[start:end]\n _x = self\n start = end\n end += 86\n (_x.sample_count, _x.ekf_roll, _x.ekf_pitch, _x.ekf_yaw, _x.ekf_lat, _x.ekf_lon, _x.ekf_alt, _x.ekf_vN, _x.ekf_vE, _x.ekf_vD, _x.ekf_vX, _x.ekf_vY, _x.ekf_vZ, _x.rad_gyro_X, _x.rad_gyro_Y, _x.rad_gyro_Z, _x.angular_acc_X, _x.angular_acc_Y, _x.angular_acc_Z, _x.alt_DVL,) = _get_struct_I3f2d13fH().unpack(str[start:end])\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n self.dvl_error_code = str[start:end]\n _x = self\n start = end\n end += 73\n (_x.flag_to_check, _x.imu_deg_gyro_X, _x.imu_deg_gyro_Y, _x.imu_deg_gyro_Z, _x.imu_mag_X, _x.imu_mag_Y, _x.imu_mag_Z, _x.imu_acc_X, _x.imu_acc_Y, _x.imu_acc_Z, _x.gps_lat, _x.gps_lon, _x.gps_alt, _x.gps_vN, _x.gps_vE, _x.gps_vD, _x.dvl_vX, _x.dvl_vY, _x.dvl_vZ,) = _get_struct_B9f2i7f().unpack(str[start:end])\n return self\n except struct.error as e:\n raise genpy.DeserializationError(e) # most likely buffer underfill",
"def _readline(self):\n\n eol = b'\\r'\n leneol = len(eol)\n line = bytearray()\n while True:\n c = self.ser.read(1)\n if c:\n line += c\n if line[-leneol:] == eol:\n break\n else:\n break\n return bytes(line)",
"def read_buffer(serial):\r\n resp = serial.read_all()\r\n return resp.decode()",
"def deserialize(self, str):\n codecs.lookup_error(\"rosmsg\").msg_type = self._type\n try:\n if self.header is None:\n self.header = std_msgs.msg.Header()\n end = 0\n _x = self\n start = end\n end += 12\n (_x.header.seq, _x.header.stamp.secs, _x.header.stamp.nsecs,) = _get_struct_3I().unpack(str[start:end])\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n self.header.frame_id = str[start:end].decode('utf-8', 'rosmsg')\n else:\n self.header.frame_id = str[start:end]\n _x = self\n start = end\n end += 29\n (_x.status, _x.index, _x.range, _x.range_rate, _x.range_accl, _x.azimuth, _x.lateral_rate, _x.width, _x.is_mr_update, _x.is_lr_update, _x.amplitude,) = _get_struct_2B6f2Bb().unpack(str[start:end])\n self.is_mr_update = bool(self.is_mr_update)\n self.is_lr_update = bool(self.is_lr_update)\n return self\n except struct.error as e:\n raise genpy.DeserializationError(e) # most likely buffer underfill",
"def read(rcfilename, silent=False) :\n\n rcdict = RcFile(rcfilename, silent=silent)\n\n return rcdict.values",
"def deserialize(self, str):\n try:\n end = 0\n start = end\n end += 2580\n self.Rscanpose = _get_struct_645f().unpack(str[start:end])\n return self\n except struct.error as e:\n raise genpy.DeserializationError(e) #most likely buffer underfill",
"def deserialize(self, str):\n codecs.lookup_error(\"rosmsg\").msg_type = self._type\n try:\n end = 0\n _x = self\n start = end\n end += 72\n (_x.originId, _x.originType, _x.destinationId, _x.destinationType, _x.range, _x.ts, _x.seq, _x.rxPower, _x.channel, _x.datarate, _x.prf, _x.preambleLength, _x.txGain, _x.angle,) = _get_struct_ihih3i3d2i2d().unpack(str[start:end])\n return self\n except struct.error as e:\n raise genpy.DeserializationError(e) # most likely buffer underfill",
"def Read_RMCA_out(Complete_Path):\n fid = open(Complete_Path,'r')\n L,R = [],[]\n while 1: \n line = fid.readline()\n if line =='': \n break \n else :\n L.append(float(line[:25]))\n R.append(float(line[27:-2]))\n return np.array(L),np.array(R)",
"def read():\n # TODO",
"def read(filepath_or_buffer: FilePathOrBuffer) -> Grid:\n with _handle_buf(filepath_or_buffer) as buf:\n return ZincParser(ZincTokenizer(buf)).parse()",
"def parse(self, buffer, ignore_binary = False):\n if buffer == \"\":\n return False\n\n self._buffer = buffer\n self._index = 0\n return self._parse()",
"def parse_crn_string(data):\n crn_document = crn_document_setup()\n return _post_process(crn_document.parseString(data).asList())",
"def ParseRval(rval_content):\n\n lines = rval_content.split('\\n')\n last_line = lines.pop()\n assert last_line == ''\n verdict = NCVAL_VERDICT[lines.pop()]\n\n offsets = set()\n for prev_line, line in zip([None] + lines, lines):\n if line.startswith('VALIDATOR: Checking jump targets:'):\n continue\n if line.startswith('VALIDATOR: Checking that basic blocks are aligned'):\n continue\n\n # Skip disassembler output of the form\n # VALIDATOR: 0000000000000003: 49 89 14 07 mov [%r15+%rax*1], %rdx\n m = re.match(r'VALIDATOR: ([0-9a-f]+):', line, re.IGNORECASE)\n if m is not None:\n continue\n\n # Parse error message of the form\n # VALIDATOR: ERROR: 20: Bad basic block alignment.\n m = re.match(r'VALIDATOR: ERROR: ([0-9a-f]+): (.*)', line, re.IGNORECASE)\n if m is not None:\n offset = int(m.group(1), 16)\n offsets.add(offset)\n continue\n\n # Parse two-line error messages of the form\n # VALIDATOR: 0000000000000003: 49 89 14 07 mov [%r15+%rax*1], %rdx\n # VALIDATOR: ERROR: Invalid index register in memory offset\n m = re.match(r'VALIDATOR: (ERROR|WARNING): .*$', line, re.IGNORECASE)\n if m is not None:\n message_type = m.group(1)\n assert prev_line is not None, (\n \"can't deduce error offset because line %r \"\n \"is not preceded with disassembly\" % line)\n m2 = re.match(r'VALIDATOR: ([0-9a-f]+):', prev_line, re.IGNORECASE)\n assert m2 is not None, \"can't parse line %r preceding line %r\" % (\n prev_line,\n line)\n offset = int(m2.group(1), 16)\n if message_type != 'WARNING':\n offsets.add(offset)\n continue\n\n raise AssertionError(\"can't parse line %r\" % line)\n\n return ValidatorResult(verdict=verdict, offsets=offsets)",
"def __read_block(self, buffer, startchr, endchr):\n\t\ttoken = buffer.read(1)\n\t\twhile token != startchr:\n\t\t\ttoken = buffer.read(1)\n\t\t\tif not token:\n\t\t\t\traise ValueError(\"read_block could not find beginning of block\")\n\t\t\n\t\tret = []\n\t\tcount = 1\n\t\twhile count:\n\t\t\ttoken = buffer.read(1)\n\t\t\tif token == startchr:\n\t\t\t\tcount += 1\n\t\t\telif token == endchr:\n\t\t\t\tcount -= 1\n\t\t\tif count:\n\t\t\t\tret.append(token)\n\t\t\tif not token:\n\t\t\t\tbreak\n\t\t\n\t\treturn \"\".join(ret)"
] |
[
"0.5503501",
"0.54348403",
"0.5418983",
"0.5363614",
"0.53104985",
"0.5271902",
"0.5271709",
"0.5269464",
"0.52663326",
"0.52595025",
"0.5241821",
"0.52365834",
"0.51867217",
"0.51867217",
"0.5183559",
"0.5170246",
"0.51675284",
"0.5160428",
"0.5148865",
"0.5136454",
"0.5135368",
"0.51104254",
"0.51081103",
"0.5102294",
"0.51007617",
"0.5092493",
"0.5072205",
"0.5069695",
"0.5060593",
"0.505638"
] |
0.81372935
|
0
|
Compute the Longest Proper Prefix which is also a Suffix (LPS) array.
|
def compute_lsp(pattern, patt_len, lps):
pointer = 0
lps[0] = 0
i = 1
while i < patt_len:
if pattern[i] == pattern[pointer]:
pointer += 1
lps[i] = pointer
i += 1
else:
if pointer != 0:
pointer = lps[pointer - 1]
else:
lps[i] = 0
i += 1
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def get_lcp(s,sa):\n lcp = list()\n lcp.append(0)\n for i in range(1,len(sa)):\n lcp.append( longest_prefix_length(s, sa[i], sa[i-1]) )\n return lcp",
"def longest_increasing_subsequence(X):\r\n N = len(X)\r\n P = [0] * N\r\n M = [0] * (N+1) \r\n L = 0\r\n for i in range(N):\r\n lo = 1\r\n hi = L\r\n while lo <= hi:\r\n mid = (lo+hi)//2\r\n if (X[M[mid]] < X[i]):\r\n lo = mid+1\r\n else:\r\n hi = mid-1\r\n \r\n newL = lo\r\n P[i] = M[newL-1] \r\n M[newL] = i\r\n #print(newL)\r\n #print(M[L])\r\n \r\n if (newL > L):\r\n L = newL\r\n S = []\r\n k = M[L]\r\n for i in range(L-1, -1, -1):\r\n S.append(X[k])\r\n k = P[k]\r\n print(S)\r\n print(k+1)\r\n \r\n\r\n print('\\nLength of obtained LIS for 30 days stock prices is :: %d'%(len(S)))\r\n return S[::-1]",
"def _LPSArray(word):\r\n lps = [0] * len(word)\r\n wordIdx = 0\r\n delta = 1\r\n while delta < len(word):\r\n while delta < len(word) and word[delta] == word[wordIdx]:\r\n lps[delta] = wordIdx + 1\r\n wordIdx += 1\r\n delta += 1\r\n if wordIdx != 0:\r\n wordIdx = lps[wordIdx-1]\r\n else:\r\n delta += 1\r\n return lps",
"def good_suffix_rule(self, i):\n length = len(self.big_l)\n assert i < length\n if i == length - 1:\n return 0\n i += 1 # i points to leftmost matching position of P\n if self.big_l[i] > 0:\n return length - self.big_l[i]\n return length - self.small_l_prime[i]",
"def longest_prefix_length(s, i, j):\n l = 0\n while (i+l < len(s)) and (j+l < len(s)):\n if s[i+l] != s[j+l]:\n break\n l += 1\n return l",
"def _good_suffix_table_one(self, pattern):\n\n pat_len = len(pattern)\n L = [-1 for _ in pattern]\n preproc_array = self._preprocessing(pattern[::-1])\n preproc_array.reverse()\n for j in range(0, pat_len-1):\n i = pat_len - preproc_array[j]\n if i != pat_len:\n L[i] = j\n\n return L",
"def lps(mask):\n if not mask: return 0\n if not mask & (mask-1): return 1\n lo = int(log2(mask & ~(mask-1))) # least significant set bi\n hi = int(log2(mask)) # most significant set bit \n if s[lo] == s[hi]: return 2 + lps(mask^(1<<lo)^(1<<hi))\n return max(lps(mask^(1<<lo)), lps(mask^(1<<hi)))",
"def LPSubsequence(str):\n if str is None or len(str) == 0:\n return \"\"\n\n sl = len(str) # sl is string length\n\n # Create a table to store results of subproblems\n L = [[0 for x in range(sl)] for x in range(sl)]\n\n # Create palindrome of 1 for each character in input string (a)\n for i in range(sl):\n L[i][i] = str[i]\n\n # cl is check string length\n for cl in range(2, sl + 1):\n for start in range(sl - cl + 1):\n stop = start + cl - 1\n first = str[start]\n last = str[stop]\n if first == last and cl == 2:\n L[start][stop] = first * 2\n elif first == last:\n L[start][stop] = first + L[start + 1][stop - 1] + last\n else:\n L[start][stop] = LPSubsequenceLongest(\n L[start][stop - 1], L[start + 1][stop])\n\n return L[0][sl - 1]",
"def _get_maximum_prefix(self):\n return self.__maximum_prefix",
"def computeLPSArray(pattern):\n if isinstance(pattern, str) == False:\n raise Exception(\"The pattern is not of type string\")\n if pattern is None or pattern == \"\":\n raise Exception(\"Pattern is not defined correctly\")\n \n i = 0\n j = 1\n \n # Create an empty array, filled with 0.\n # IMPORTANT: since the 1st value of the array must be 0,\n # the index j (used to fill the array) will start from 1.\n arrayLPS = [0]*len(pattern)\n \n # Fill out the array\n while j < len(pattern):\n # If the current characters match, increment i, assign\n # to array[j] the new value of i, then increment i\n if pattern[i] == pattern[j]:\n i += 1\n arrayLPS[j] = i\n j += 1\n # If the current characters do not match and i is equal to 0\n # it means that for the current character, the current prefix \n # that is also a suffix has length 0, so we put 0 in our array\n elif i == 0:\n arrayLPS[j] = 0\n j += 1\n # If the current characters do not match but i is different \n # from 0, we need to check the value that is in the i-1 position\n # and assign it to i, without incrementing j (because we want to\n # check whether the characters corresponding to our new i and old j\n # are a match)\n else:\n i = arrayLPS[i-1]\n return arrayLPS",
"def _calc_lcs(\n self,\n reference: str,\n hypothesis: str) -> int:\n words_ref = ['<S>'] + reference.split(' ')\n words_hyp = ['<S>'] + hypothesis.split(' ')\n\n # calc LCS by DP\n N = len(words_ref)\n M = len(words_hyp)\n LCS = []\n\n for i in range(N):\n LCS.append([0] * M)\n\n for i in range(1, N):\n for j in range(1, M):\n if words_ref[i - 1] == words_hyp[j - 1]:\n LCS[i][j] = LCS[i - 1][j - 1] + 1\n else:\n LCS[i][j] = max(LCS[i - 1][j], LCS[i][j - 1])\n\n return LCS[N-1][M-1]",
"def __lcsm(protein_seqs):\n\tmax_motif = ''\n\tif not (protein_seqs and isinstance(protein_seqs, list)):\n\t\traise Exception(\n\t\t\t\"Invalid protein sequences! Please check your data.\")\n\tprotein_seqs.sort(key = lambda seq: len(seq))\n\tshortest_seq_len = len(protein_seqs[0])\n\tfor l in range(0, shortest_seq_len):\n\t\tr = shortest_seq_len-1\n\t\twhile r >= l:\n\t\t\tfound = True\n\t\t\tmotif = protein_seqs[0][l:r+1]\n\t\t\tfor seq in protein_seqs[1:]:\n\t\t\t\tif motif not in seq:\n\t\t\t\t\tfound = False\n\t\t\t\t\tbreak\n\t\t\tif found and len(motif)>=len(max_motif):\n\t\t\t\tmax_motif = motif\n\t\t\t\tbreak\n\t\t\tr-=1\n\treturn max_motif",
"def longest_common_prefix(strings: list):\n raise NotImplemented",
"def compute_lps_array(pattern):\n lps = [0 for _ in pattern]\n j = 0\n i = 1\n while i < len(pattern):\n if pattern[i] == pattern[j]:\n lps[i] = j + 1\n j += 1\n i += 1\n elif j != 0:\n j = lps[j-1]\n else:\n lps[i] = 0\n i += 1\n return lps",
"def longestPalindromeSubseq(self, s: str) -> int:\n n = len(s)\n dp = [[1] * n for _ in range(n)]\n for length in range(1, n + 1):\n for i in range(n - length + 1):\n j = i + length - 1\n print(i, j)\n if length == 1:\n dp[i][j] = 1\n elif s[i] == s[j]:\n dp[i][j] = dp[i + 1][j - 1] + 2\n else:\n dp[i][j] = max(dp[i][j - 1], dp[i + 1][j])\n return dp[0][n - 1]",
"def get_length_of_longest_sub_array(l):\n if len(l) < 1:\n return 0\n\n longest_seen_sequence = 0\n\n this_sequence_length = 1\n\n previous = l[0]\n\n for _, current in enumerate(l):\n\n if current > previous:\n this_sequence_length = this_sequence_length + 1\n\n if this_sequence_length > longest_seen_sequence:\n longest_seen_sequence = this_sequence_length\n\n else:\n this_sequence_length = 1\n\n if this_sequence_length > longest_seen_sequence:\n longest_seen_sequence = this_sequence_length\n\n previous = current\n\n return longest_seen_sequence",
"def get_top_mps(D, L):\n #mps = np.zeros([D]*(L+1),np.object) # L+1 because of dangling bond\n mps = np.zeros([D]*(L+1)) # L+1 because of dangling bond\n\n # phys dangler\n # (0000) 1 = 1\n # (1000) 0 = 1\n # (0100) 0 = 1\n # ... ... ...\n\n for i in range(L+1):\n index = [a]*(L+1)\n index[i] = z\n mps[tuple(index)] = 1\n\n return np.reshape(mps, [np.prod(mps.shape)])",
"def bu(lengths: List[int], L: int) -> int:\n N = len(lengths)\n dp = [0] + [-1]*L\n for l in lengths:\n for j in range(l, L+1):\n dp[j] = max(dp[j], dp[j-l]+1 if dp[j-l] != -1 else -1)\n return dp[-1]",
"def _lcs(pred_tokens: Sequence[str], target_tokens: Sequence[str], return_full_table: bool=False) ->Union[int, Sequence[Sequence[int]]]:\n lcs = [([0] * (len(pred_tokens) + 1)) for _ in range(len(target_tokens) + 1)]\n for i in range(1, len(target_tokens) + 1):\n for j in range(1, len(pred_tokens) + 1):\n if target_tokens[i - 1] == pred_tokens[j - 1]:\n lcs[i][j] = lcs[i - 1][j - 1] + 1\n else:\n lcs[i][j] = max(lcs[i - 1][j], lcs[i][j - 1])\n if return_full_table:\n return lcs\n return lcs[-1][-1]",
"def lengthOfLongestSubstring(s):\n arr = [1] * len(s)\n i = 0\n j = 1\n while j < len(s):\n if s[j] not in s[i:j]:\n arr[i] += 1\n j = j + 1\n else:\n i = i + 1\n j = i + 1\n return max(arr)",
"def _good_suffix_table_two(self, pattern):\n\n H = [0 for _ in pattern]\n preproc_array = self._preprocessing(pattern)\n longest = 0\n prep_ar_len = len(preproc_array)\n for i in xrange(0, prep_ar_len):\n if preproc_array[prep_ar_len-i-1] == i+1:\n longest = max(preproc_array[prep_ar_len-i-1], longest)\n H[-i-1] = longest\n\n return H",
"def find_lis(seq):\n\n # https://rosettacode.org/wiki/Longest_increasing_subsequence#Python:_O.28nlogn.29_Method_from_Wikipedia.27s_LIS_Article.5B1.5D\n\n l = len(seq)\n previous = [0] * l\n minimum = [0] * (l + 1)\n length = 0\n for i in range(l):\n low = 1\n high = length\n while low <= high:\n mid = (low + high) // 2\n if seq[minimum[mid]] < seq[i]:\n low = mid + 1\n else:\n high = mid - 1\n\n new = low\n previous[i] = minimum[new - 1]\n minimum[new] = i\n\n if new > length:\n length = new\n\n s = []\n k = minimum[length]\n for i in range(length - 1, -1, -1):\n s.append(seq[k])\n k = previous[k]\n return s[::-1]",
"def longest_common_prefix(fst: str, snd: str) -> str:\n bound = 0\n for a, b in zip(fst, snd):\n if a != b:\n break\n bound += 1\n return fst[:bound]",
"def lengthOfLIS(self, nums: List[int]) -> int:\n# time complexity: O(n^2), space complexity: O(n)\n# this is inspired by the solution provided by the question.\n# dp\n# the idea is to use a list longest to record say i-th element in nums, if as the last of the longest possible subsquence, how long the subsquence would be.\n \n\n# time complexity: O(nlogn), space complexity: O(n)\n# dp with binary search\n# the key idea is to use a list to store the longest possible sequence, but the element in the list is not necessarily correct. Every element say record_long[i] in the list means the end of longest subsequence of length i+1\n# this is inspired by @bolinq in the discussion area.\n import bisect\n record_long = []\n for num in nums:\n index = bisect.bisect_left(record_long, num)\n if index == len(record_long):\n record_long.append(num)\n else:\n record_long[index] = num\n \n return len(record_long)",
"def _lcs_dp(a, b):\n dp = [[0 for _ in range(0, len(b) + 1)]\n for _ in range(0, len(a) + 1)]\n # dp[i][j]: lcs_len(a[:i], b[:j])\n for i in range(1, len(a) + 1):\n for j in range(1, len(b) + 1):\n if a[i - 1] == b[j - 1]:\n dp[i][j] = dp[i - 1][j - 1] + 1\n else:\n dp[i][j] = max(dp[i - 1][j], dp[i][j - 1])\n return dp",
"def longestAwesome(self, s: str) -> int:\n\n # So we are moving right, and reducing length by 1\n # for every time we move right - we start from the longest substring that can be formed to lowest one\n # So the moment, we find something we can instantly breal\n\n max_length = 0\n\n if s == s[::-1]:\n return len(s)\n\n for i in range(0, len(s)):\n left = i\n right = len(s)\n\n if right - left > max_length:\n\n while right > left:\n\n candidate = s[left:right]\n # print(f\"The candidate is: {candidate}\")\n ctr = Counter(candidate)\n\n # initial base check\n odd_cnt = 0\n fl = False\n for k, v in ctr.items():\n if v & 1:\n odd_cnt += 1\n if odd_cnt > 1:\n fl = True\n break\n\n if not fl:\n if max_length < (right - left):\n max_length = right - left\n # max_length = max(max_length, len(candidate))\n\n right -= 1\n\n return max_length",
"def longest_increasing_sub_seq(A):\n\n # boundary cases\n\n # The lenght the of the given list\n arr_len = len(A)\n\n if arr_len <= 1:\n return arr_len\n\n # Create an auxiliary array that will hold the \"end elements\"\n # of the intermeditae LIS' that we will be creating\n\n aux_array = [0 for _ in range(arr_len + 1)]\n\n # Initialize aux_array[0] = A[0]\n aux_array[0] = A[0]\n\n # l acts as our pointer, always points to an empty slot\n l = 1\n\n # Now iterate through the array\n for i in range(1, arr_len):\n if A[i] < aux_array[0]:\n # This is the new smallest value\n # Replace aux_array[0] = A[i]\n\n # i.e we are starting over again, creating a new active list of lenght 1\n # Case 1\n aux_array[0] = A[i]\n\n elif A[i] > aux_array[l - 1]:\n # Case 2: A[i] is largets among all active lists\n aux_array[l] = A[i]\n l += 1\n\n else:\n # Case 3\n # A[i] is in between\n # A[i] wants to be current end candidate of an existing subsequence\n index = get_ceil_index(-1, l - 1, A[i], aux_array)\n aux_array[index] = A[i]\n\n\n return l",
"def mincost(L,m,n):\n # find the length of the strings\n # declaring the array for storing the dp values\n tc = [[0]*(n + 1) for i in range(m + 1)]\n tc[0][0]=L[0][0]\n for i in range(1,m + 1):\n tc[i][0]=tc[i-1][0]+L[i][0]\n for j in range(1,n + 1):\n tc[0][j]=tc[0][j-1]+L[0][j]\n for i in range(1,m+1):\n for j in range(1,n+1):\n tc[i][j]=min(tc[i-1][j-1],tc[i-1][j],tc[i][j-1])+L[i][j]\n\n\n # L[m][n] contains the length of LCS of X[0..n-1] & Y[0..m-1]\n return tc[m][n]",
"def generate_possible_freqL(pL,sL,er):\n h = sum(pL) # number of different haplotypes\n L = [ bin(x)[2:] for x in range(1,2**h-1) ] # range from 1 to 2**h-1 because we don't want 0% or 100% allele freq\n M = [ '0'*(len(L[-1])-len(x))+x for x in L ]\n p_freqL = []\n for i in range(len(pL)):\n p_freqL += [sL[i]/pL[i]]*pL[i]\n p_freqA = np.array(p_freqL)\n sA = np.array(sL)\n aL = []\n for g in M:\n aL.append(sum(np.array([ int(x) for x in list(g) ])*p_freqL))\n return sorted(list(set(aL+[er,1-er])))",
"def longest_common_subsequence(x, y):\n\n # find the length of the strings\n m = len(x)\n n = len(y)\n\n # declaring the array for storing the dp values\n lcs = np.zeros((m + 1, n + 1))\n\n # iterate through each sub problem\n for i in range(m + 1):\n for j in range(n + 1):\n if i == 0 or j == 0:\n lcs[i, j] = 0\n elif x[i - 1] == y[j - 1]:\n lcs[i, j] = lcs[i - 1, j - 1] + 1\n else:\n # use the optimal substructure property\n # of using already computed results previous subproblems\n lcs[i, j] = max(lcs[i - 1, j], lcs[i, j - 1])\n\n # L[m,n] contains the length of LCS of X[0..n-1] & Y[0..m-1]\n return lcs[m, n]"
] |
[
"0.6896043",
"0.58654726",
"0.5859839",
"0.58537763",
"0.56931084",
"0.56799144",
"0.56313735",
"0.5626806",
"0.55893284",
"0.55066836",
"0.54774266",
"0.53755206",
"0.53678685",
"0.5318928",
"0.5314617",
"0.5309503",
"0.52898115",
"0.5269063",
"0.52025235",
"0.5197834",
"0.5194505",
"0.51284677",
"0.512159",
"0.5100411",
"0.508769",
"0.5079199",
"0.5070503",
"0.5070427",
"0.5068015",
"0.50605434"
] |
0.60028046
|
1
|
Test adding a credential to MongoDB credential store.
|
def test_mdb_add_credential(self):
cred = vccs_auth.credential.from_dict(self.cred_data, None)
id_ = self.mdb.add_credential(cred)
print("Added credential -> id : {!r}".format(id_))
cred2 = self.mdb.get_credential(self.cred_data['credential_id'])
print("Fetched credential :\n{}".format(pformat(cred2)))
self.assertEqual(cred2.to_dict(), self.cred_data)
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def test_store_existing_cred(self):\n self.new_cred.save_cred()\n self.assertEqual(len(Credentials.cred_list), 1)",
"def test_save_creds(self):\n self.new_credentials.save_creds()\n self.assertEqual(len(Credentials.credential_list),1)",
"def test_credential_create(self):\n self.new_credential.credential_create()\n self.assertEqual(len(Credentials.credentials_list), 1)",
"def test_save_credential(self):\n self.new_credential.save_credential() # saving the new credential\n self.assertEqual(len(Credential.credential_list),1)",
"def test_save_credential(self) :\n self.new_credential.save_credential()\n self.assertEqual(len(Credential.credential_list),1)",
"def save_credential_test(self):\n\n self.new_credentials.save_attributes()\n self.assertEqual(len(Credentials.credentials_list), 1)",
"def add(self, credential: Credential) -> bool:\n try:\n result = self._coll.insert_one(credential.to_dict())\n except DuplicateKeyError:\n logger.warning(f'A credential with credential_id {credential.credential_id} already exists in the db')\n return False\n _success = result.inserted_id == credential.obj_id\n logger.debug(f'Added credential {credential} to the db: {_success}')\n return _success",
"def test_save_credential(self):\n self.new_credential.save_credential()\n self.assertEqual(len(Credential.credential_list),1)",
"def test_store_multiple_cred(self):\n self.new_cred.save_cred()\n test_cred = Credentials('stackoverflow','Lugaga', 'golfalpharomeo')\n test_cred.save_cred()\n self.assertEqual(len(Credentials.cred_list), 2)",
"def test_credential_exist(self):\n self.new_credentials.save_creds()\n account_found = Credentials.search_by_account(\"Instagram\")\n\n self.assertTrue(account_found)",
"def test_credential_exists(self):\n self.new_credentials.save_attributes()\n test_credential = Credentials(\"Instagram\", \"@zephonmakale\", \"123456\" )\n test_credential.save_attributes()\n\n credential_exist = Credentials.credentials_exist(\"Instagram\")\n self.assertTrue(credential_exist)",
"def test_save_account(self):\n self.new_account.save_account() # add account to list\n self.assertEqual(len(Credential.credential_list),\n 1) # check length of list",
"def test_mdb_add_duplicate_credential(self):\n this_id = 9797\n data = self.cred_data\n data['credential_id'] = this_id\n cred = vccs_auth.credential.from_dict(data, None)\n self.mdb.add_credential(cred)\n cred.derived_key(new='bb' * (512 / 8))\n print cred.to_dict()\n self.assertFalse(self.mdb.add_credential(cred))",
"def setUp(self):\n self.new_credential = Credential(\"winnie\", \"facebook\",\"deinawinnie\",\"winnie\")",
"def test_find_credentials(self):\n self.new_credentials.save_credentials()\n new_account= Credentials(\"Twitter\",\"josephat_otieno\", \"joseotis45\")\n new_account.save_credentials()\n\n found_credential= Credentials.find_credentials(\"Twitter\")\n\n self.assertEqual(found_credential.account_name,new_account.account_name)",
"def test_init(self):\n self.assertEqual(self.new_credential.app_name, \"MySpace\")\n self.assertEqual(self.new_credential.account_name, \"Ghostke99\")\n self.assertEqual(self.new_credential.account_password, \"daimaMkenya001\")",
"async def store_credential(\n self, cred_ex_record: V20CredExRecord, cred_id: str = None\n ) -> None:",
"def add_credential(self, authenticator_id, credential):\n pass",
"def test_search_duplicate(self):\n self.new_credential.credential_create()\n test_credentials = Credentials(\"MySpace\", \"Ghostke99\", \"daimaMkenya001\")\n test_credentials.credential_create()\n search_duplicate = Credentials.search_duplicate(\"MySpace\")\n self.assertTrue(search_duplicate)",
"def test_save_multiple_credential(self):\n self.new_credential.save_credential()\n test_credential = Credential(\"Facebook\",\"Chris\",\"[email protected]\",\"chris1\") # new credential\n test_credential.save_credential()\n self.assertEqual(len(Credential.credential_list),2)",
"def test_save_multiple_credential(self) :\n self.new_credential.save_credential()\n test_credential = Credential(\"Instagram\", \"[email protected]\", \"Insta002\") #new credential\n test_credential.save_credential()\n self.assertEqual(len(Credential.credential_list),2)",
"def setUp(self):\n # instantiate an object by populating with dummy values.\n self.new_credential = Credentials(\"MySpace\", \"Ghostke99\", \"daimaMkenya001\")",
"def add_credential(args):\n # first load any existing credentials\n try:\n creds = load_auth()\n except FileNotFoundError:\n # if no auth file exists we can just treat that as there being no credentials\n creds = []\n\n # next, load the new credential\n new_cred = read_new_credential(args.cred_file)\n\n # check for any conflicts between the new credential and an existing one\n conflicting_cred_idx = None\n for idx, cred in enumerate(creds):\n if cred.username == new_cred.username:\n if len(cred.hostname) > 0 and len(new_cred.hostname) > 0 \\\n and cred.hostname == new_cred.hostname:\n conflicting_cred_idx = idx\n elif len(cred.hostname) == 0 and len(new_cred.hostname) == 0:\n conflicting_cred_idx = idx\n if conflicting_cred_idx is not None:\n if args.force:\n creds[conflicting_cred_idx] = new_cred\n else:\n logger.error(\"Credential already exists; overwrite with --force\")\n return\n else:\n creds.append(new_cred)\n write_auth_data(configure.get_config_path(\"auth\"), creds)\n prune_outdated_auth()",
"def setUp(self):\n self.new_cred = Credentials('github','Lugaga', 'tangodown!')",
"def test_add_user_to_db():\n path_users = PATH_TEST / \"users.txt\"\n\n try:\n db = get_database()\n add_users_to_db(db, path_users)\n finally:\n db.drop_collection(USERS_COLLECTION)",
"def test_init(self):\n self.assertEqual(self.new_cred.account_name, 'github')\n self.assertEqual(self.new_cred.username, 'Lugaga')\n self.assertEqual(self.new_cred.password, 'tangodown!')",
"def test__init__(self) :\n self.assertEqual(self.new_credential.accountName, \"snapchat\")\n self.assertEqual(self.new_credential.email, \"[email protected]\")\n self.assertEqual(self.new_credential.password, \"chat001\")",
"def test_find_credentials(self):\n self.new_credentials.save_attributes()\n test_credential = Credentials(\"Instagram\", \"@zephonmakale\", \"123456\")\n test_credential.save_attributes()\n\n found_credential = Credentials.find_credentials(\"Instagram\")\n\n self.assertEqual(found_credential.account, test_credential.account)",
"def test_save_multiple_credential(self):\n self.new_credential.save_credential()\n test_credential = Credential(\"winnie\",\"test\",\"login\",\"winnie\")\n test_credential.save_credential()\n self.assertEqual(len(Credential.credential_list),2)",
"def test_delete_user(self) :\n self.new_credential.save_credential()\n test_credential = Credential(\"peter\", \"Peter\", \"Peter003\") # new user\n test_credential.save_credential()\n self.assertEqual(len(Credential.credential_list),2)"
] |
[
"0.73998183",
"0.7286204",
"0.7213283",
"0.71673906",
"0.7066448",
"0.69625634",
"0.68952906",
"0.6859978",
"0.68306863",
"0.6701606",
"0.6699401",
"0.6476767",
"0.64438003",
"0.6415699",
"0.6410146",
"0.63984615",
"0.63497376",
"0.63493955",
"0.62973535",
"0.62595546",
"0.6244621",
"0.6235788",
"0.62099826",
"0.61583567",
"0.6127118",
"0.61262435",
"0.61197525",
"0.610438",
"0.6084356",
"0.6039378"
] |
0.7739804
|
0
|
Test adding a duplicate credential to MongoDB credential store.
|
def test_mdb_add_duplicate_credential(self):
this_id = 9797
data = self.cred_data
data['credential_id'] = this_id
cred = vccs_auth.credential.from_dict(data, None)
self.mdb.add_credential(cred)
cred.derived_key(new='bb' * (512 / 8))
print cred.to_dict()
self.assertFalse(self.mdb.add_credential(cred))
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def test_search_duplicate(self):\n self.new_credential.credential_create()\n test_credentials = Credentials(\"MySpace\", \"Ghostke99\", \"daimaMkenya001\")\n test_credentials.credential_create()\n search_duplicate = Credentials.search_duplicate(\"MySpace\")\n self.assertTrue(search_duplicate)",
"def test_store_existing_cred(self):\n self.new_cred.save_cred()\n self.assertEqual(len(Credentials.cred_list), 1)",
"def test_mdb_add_credential(self):\n cred = vccs_auth.credential.from_dict(self.cred_data, None)\n id_ = self.mdb.add_credential(cred)\n print(\"Added credential -> id : {!r}\".format(id_))\n\n cred2 = self.mdb.get_credential(self.cred_data['credential_id'])\n print(\"Fetched credential :\\n{}\".format(pformat(cred2)))\n\n self.assertEqual(cred2.to_dict(), self.cred_data)",
"def add(self, credential: Credential) -> bool:\n try:\n result = self._coll.insert_one(credential.to_dict())\n except DuplicateKeyError:\n logger.warning(f'A credential with credential_id {credential.credential_id} already exists in the db')\n return False\n _success = result.inserted_id == credential.obj_id\n logger.debug(f'Added credential {credential} to the db: {_success}')\n return _success",
"def test_store_multiple_cred(self):\n self.new_cred.save_cred()\n test_cred = Credentials('stackoverflow','Lugaga', 'golfalpharomeo')\n test_cred.save_cred()\n self.assertEqual(len(Credentials.cred_list), 2)",
"def test_save_creds(self):\n self.new_credentials.save_creds()\n self.assertEqual(len(Credentials.credential_list),1)",
"def test_save_credential(self):\n self.new_credential.save_credential() # saving the new credential\n self.assertEqual(len(Credential.credential_list),1)",
"def test_credential_exists(self):\n self.new_credentials.save_attributes()\n test_credential = Credentials(\"Instagram\", \"@zephonmakale\", \"123456\" )\n test_credential.save_attributes()\n\n credential_exist = Credentials.credentials_exist(\"Instagram\")\n self.assertTrue(credential_exist)",
"def test_credential_exist(self):\n self.new_credentials.save_creds()\n account_found = Credentials.search_by_account(\"Instagram\")\n\n self.assertTrue(account_found)",
"def test_credential_create(self):\n self.new_credential.credential_create()\n self.assertEqual(len(Credentials.credentials_list), 1)",
"def test_save_credential(self) :\n self.new_credential.save_credential()\n self.assertEqual(len(Credential.credential_list),1)",
"def save_credential_test(self):\n\n self.new_credentials.save_attributes()\n self.assertEqual(len(Credentials.credentials_list), 1)",
"def test_save_multiple_credential(self):\n self.new_credential.save_credential()\n test_credential = Credential(\"Facebook\",\"Chris\",\"[email protected]\",\"chris1\") # new credential\n test_credential.save_credential()\n self.assertEqual(len(Credential.credential_list),2)",
"def test_save_multiple_credential(self) :\n self.new_credential.save_credential()\n test_credential = Credential(\"Instagram\", \"[email protected]\", \"Insta002\") #new credential\n test_credential.save_credential()\n self.assertEqual(len(Credential.credential_list),2)",
"def test_save_credential(self):\n self.new_credential.save_credential()\n self.assertEqual(len(Credential.credential_list),1)",
"def test_add_duplicate(self, api):\n self.builder.add_user(api.get_user())\n resp = api.add_user(api.get_user())\n self.builder.del_user(api.get_user())\n assert resp.status_code == 304",
"def test_duplicate_peer(self):\n\n\t\tself.db = {'test_hash': [('test', '100.100.100.100', 1000)]}\n\t\ttracker.add_peer(self.db, \\\n\t\t\t\"test_hash\", \"test\", \"100.100.100.100\", 1000)\n\t\tself.assertEqual(self.db, \\\n\t\t\t{'test_hash': [('test', '100.100.100.100', 1000)]})",
"def test_save_multiple_accounts(self):\n self.new_credentials.save_attributes()\n test_credential = Credentials(\"Instagram\", \"@zephonmakale\", \"123456\")\n test_credential.save_attributes()\n self.assertEqual(len(Credentials.credentials_list), 2)",
"def test_find_credentials(self):\n self.new_credentials.save_credentials()\n new_account= Credentials(\"Twitter\",\"josephat_otieno\", \"joseotis45\")\n new_account.save_credentials()\n\n found_credential= Credentials.find_credentials(\"Twitter\")\n\n self.assertEqual(found_credential.account_name,new_account.account_name)",
"def test_duplicate_user(self):\n json_resp = make_user(self.client)\n json_resp = make_user(self.client, username='Blah')\n # email should be taken\n self.assertEqual(json_resp['status'], 'email taken')\n # check only one user in the db\n self.assertEqual(User.query.count(), 1)\n # username should be taken\n json_resp = make_user(self.client, email='[email protected]')\n # check api response\n self.assertEqual(json_resp['status'], 'username taken')",
"def test_register_duplicate(self):\n self._storage.register_user(\"user1\", \"code1\")\n with self.assertRaises(DuplicateUserException):\n self._storage.register_user(\"user1\", \"code1\")",
"def test_save_multiple_credential(self):\n self.new_credential.save_credential()\n test_credential = Credential(\"winnie\",\"test\",\"login\",\"winnie\")\n test_credential.save_credential()\n self.assertEqual(len(Credential.credential_list),2)",
"def add_credential(args):\n # first load any existing credentials\n try:\n creds = load_auth()\n except FileNotFoundError:\n # if no auth file exists we can just treat that as there being no credentials\n creds = []\n\n # next, load the new credential\n new_cred = read_new_credential(args.cred_file)\n\n # check for any conflicts between the new credential and an existing one\n conflicting_cred_idx = None\n for idx, cred in enumerate(creds):\n if cred.username == new_cred.username:\n if len(cred.hostname) > 0 and len(new_cred.hostname) > 0 \\\n and cred.hostname == new_cred.hostname:\n conflicting_cred_idx = idx\n elif len(cred.hostname) == 0 and len(new_cred.hostname) == 0:\n conflicting_cred_idx = idx\n if conflicting_cred_idx is not None:\n if args.force:\n creds[conflicting_cred_idx] = new_cred\n else:\n logger.error(\"Credential already exists; overwrite with --force\")\n return\n else:\n creds.append(new_cred)\n write_auth_data(configure.get_config_path(\"auth\"), creds)\n prune_outdated_auth()",
"def test_secrets_add_already_exist():\n status_code = 409\n reana_token = \"000000\"\n env = {\"REANA_SERVER_URL\": \"localhost\"}\n message = \"One of the secrets already exists. No secrets were added.\"\n mock_http_response = Mock(\n status_code=status_code,\n reason=\"Conflict\",\n json=Mock(return_value={\"message\": \"Conflict\"}),\n )\n rs_api_client_mock = Mock()\n rs_api_client_mock.api.add_secrets = Mock(side_effect=HTTPError(mock_http_response))\n runner = CliRunner(env=env)\n with runner.isolation():\n with patch(\"reana_client.api.client.current_rs_api_client\", rs_api_client_mock):\n result = runner.invoke(\n cli, [\"secrets-add\", \"-t\", reana_token, \"--env\", \"USER=reanauser\"]\n )\n assert message in result.output\n assert result.exit_code == 1",
"async def test_invalid_insert_user_duplicate_key(database):\n await database.setup_database(reset=True)\n await database.insert_user(\"\")\n for user_id in zip([\"1\" for _ in range(0,10)]):\n try:\n await database.insert_user(user_id=user_id)\n assert False\n except:\n assert True\n await database.close_pool()",
"def test_save_account(self):\n self.new_account.save_account() # add account to list\n self.assertEqual(len(Credential.credential_list),\n 1) # check length of list",
"def test_with_duplicate_user(data_store_path):\n data_set = [{\"name\": \"Eric Idle\", \"phone\": \"123-456-7890\", \"address\": \"here\"}]\n data_store_path.write_text(yaml.dump(data_set))\n data_store = YAMLDataStore(file_path=str(data_store_path))\n assert data_store._users == data_set\n\n user = {\"name\": \"Eric Idle\", \"phone\": \"999-999-9999\", \"address\": \"not here\"}\n with pytest.raises(DuplicateUserError) as error:\n data_store.create(user)\n\n assert \"Eric Idle\" in str(error.value)",
"def test_duplicate_username(self):\n self.duplicate_username = {'user': {\n \"username\": \"remmy\",\n \"email\": \"[email protected]\",\n \"password\": \"@Password123\"\n }}\n\n self.duplicate_username2 = {'user': {\n \"username\": \"remmy\",\n \"email\": \"[email protected]\",\n \"password\": \"@Password123\"\n }}\n\n self.client.post(\n self.reg_url,\n self.duplicate_username,\n format=\"json\")\n\n response = self.client.post(\n self.reg_url,\n self.duplicate_username2,\n format=\"json\")\n\n self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)\n self.assertIn(b\"Username provided already in use\", response.content)",
"def test_add_with_existing_key(self):\n self.client.login(username='admin', password='admin')\n response = self.client.post('/add/', {'url': 'http://example.com', 'key': 'example'})\n # TODO status 201\n self.client.login(user='admin', password='admin')\n response = self.client.post('/add/', {'url': 'http://example.com', 'key': 'example'})\n # TODO status 409",
"def test_duplicate_entries(self):"
] |
[
"0.78255934",
"0.7580541",
"0.71481156",
"0.71136874",
"0.7055291",
"0.7006554",
"0.6912832",
"0.6901657",
"0.6888127",
"0.6855711",
"0.67945147",
"0.6759346",
"0.6653814",
"0.66450834",
"0.66354036",
"0.64718235",
"0.64647275",
"0.6450949",
"0.64090073",
"0.6342286",
"0.6331531",
"0.63221025",
"0.624176",
"0.6239293",
"0.62228507",
"0.62148815",
"0.61966777",
"0.61559296",
"0.614432",
"0.613651"
] |
0.7705258
|
1
|
Test fetching unknown credential.
|
def test_mdb_get_unknown_credential(self):
res = self.mdb.get_credential(1234567890)
self.assertEqual(res, None)
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def test_creds_not_found():\n assert_equal(find_credentials({'foo': 'bar'}), (None, None))",
"def test_getcredentials_failed_netrc(netrc):\n netrc.side_effect = FileNotFoundError(\"\")\n server = KasServer()\n assert not server._username\n assert not server._password",
"def test_credential_default_values():\n creds = Credentials()\n assert creds.url is None\n assert creds.token is None\n assert creds.org_key is None\n assert creds.ssl_verify\n assert creds.ssl_verify_hostname\n assert creds.ssl_cert_file is None\n assert not creds.ssl_force_tls_1_2\n assert creds.proxy is None\n assert not creds.ignore_system_proxy\n assert creds.integration is None\n with pytest.raises(AttributeError):\n assert creds.notexist is None",
"def test_credential_exist(self):\n self.new_credentials.save_creds()\n account_found = Credentials.search_by_account(\"Instagram\")\n\n self.assertTrue(account_found)",
"def test_retrieve_user_unauthorized(self):\n res = self.client.get(ME_URL)\n\n self.assertEqual(res.status_code, status.HTTP_401_UNAUTHORIZED)",
"def test_retrieve_user_unauthorized(self):\n res = self.client.get(ME_URL)\n\n self.assertEqual(res.status_code, status.HTTP_401_UNAUTHORIZED)",
"def test_retrieve_unauthorized_user(self):\n\n response = self.client.get(URL_ME)\n self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)",
"def test_missing_credentials(self):\n twine = Twine(source=self.VALID_CREDENTIALS_TWINE)\n with self.assertRaises(exceptions.CredentialNotFound):\n twine.validate_credentials()",
"def test_retrieve_user_unauthorized(self):\n # HTTP GET Request\n response = self.client.get(ME_URL)\n\n # If you call the URL without authorization\n self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)",
"def test_credential_partial_loads():\n init_dict = {\"url\": \"http://example.com\", \"ssl_verify\": 0}\n creds = Credentials(init_dict)\n assert creds.url == \"http://example.com\"\n assert creds.token is None\n assert creds.org_key is None\n assert not creds.ssl_verify\n assert creds.ssl_verify_hostname\n assert creds.ssl_cert_file is None\n assert not creds.ssl_force_tls_1_2\n assert creds.proxy is None\n assert not creds.ignore_system_proxy\n assert creds.integration is None",
"def test_retrieve_user_unauthorized(self):\r\n res = self.client.get(ME_URL)\r\n\r\n self.assertEqual(res.status_code, status.HTTP_401_UNAUTHORIZED)",
"def test_credential_boolean_parsing_failure():\n init_dict = {\"url\": \"http://example.com\", \"ssl_verify\": \"bogus\"}\n with pytest.raises(CredentialError):\n Credentials(init_dict)",
"def test_no_credentials(remove_api_key):\n with raises(\n RuntimeError,\n match=\"Failed to read API key. Did you forget to set GIPHY_API_KEY environment variable?\",\n ):\n api_credentials_provider.resolve_credentials()",
"def test_get_credentials_from_keyring_if_not_in_keyring(\n self, mock_keyring):\n mock_keyring.get_keyring.return_value = True\n return_values = {'username': None}\n\n def side_effect(_, arg):\n if arg is None:\n raise TypeError('NoneType instead of str')\n return return_values[arg]\n\n mock_keyring.get_password.side_effect = side_effect\n credentials = get_credentials_from_keyring('TestPlatform')\n self.assertEqual(credentials, None)",
"def test_discover_no_cli_creds(self):\n entry = mock.MagicMock(user=None, password=None, enable_password=None)\n vendor = mock.MagicMock()\n vendor_settings = mock.MagicMock()\n self.networking_handler._get_cli_credentials = mock.MagicMock(return_value=None)\n\n # act\n result = self.networking_handler.discover(entry=entry,\n vendor=vendor,\n vendor_settings=vendor_settings)\n\n # verify\n self.assertEqual(result, entry)\n self.assertEqual(entry.comment, \"Unable to discover device user/password/enable password\")\n self.assertIsNone(entry.user)\n self.assertIsNone(entry.password)\n self.assertIsNone(entry.enable_password)",
"def test_retrieve_user_unautherized(self):\n res = self.client.get(ME_URL)\n self.assertEqual(res.status_code, status.HTTP_401_UNAUTHORIZED)",
"def test_retrieve_users_unauthorized(setup_client):\n client = setup_client\n res = client.get(ME_URL)\n assert res.status_code == status.HTTP_401_UNAUTHORIZED",
"def test_retrive_user_unauthenticated(self):\n res = self.client.get(ME_URL)\n\n self.assertEqual(res.status_code, status.HTTP_401_UNAUTHORIZED)",
"def test_no_auth(self) -> None:\n channel = self.make_request(\"GET\", self.url, {})\n\n self.assertEqual(401, channel.code, msg=channel.json_body)\n self.assertEqual(Codes.MISSING_TOKEN, channel.json_body[\"errcode\"])",
"def test_no_auth(self) -> None:\n channel = self.make_request(\"GET\", self.url, {})\n\n self.assertEqual(401, channel.code, msg=channel.json_body)\n self.assertEqual(Codes.MISSING_TOKEN, channel.json_body[\"errcode\"])",
"def test_001_unauthorized_access(self):\n false_token = \"12345\"\n self.info(\"Will use token %s\", false_token)\n client = ComputeClient(self.clients.compute_url, false_token)\n client.CONNECTION_RETRY_LIMIT = self.clients.retry\n\n with self.assertRaises(ClientError) as cl_error:\n client.list_servers()\n self.assertEqual(cl_error.exception.status, 401)",
"def test_invalid_credentials_forbidden(self):\n response = self._mock_utility(get_kwargs=self._data(),\n error=fitbit_exceptions.HTTPForbidden)\n self._check_response(response, 103)\n self.assertEqual(UserFitbit.objects.count(), 0)",
"def test_invalid_credentials_unauthorized(self):\n response = self._mock_utility(get_kwargs=self._data(),\n error=fitbit_exceptions.HTTPUnauthorized)\n self._check_response(response, 103)\n self.assertEqual(UserFitbit.objects.count(), 0)",
"def test_get_availability_with_invalid_credentials(self, m):\n url = \"https://www.cellartracker.com/xlquery.asp?User=invalid-username&Password=invalid-password&Table=Availability&Format=tab&Location=1\"\n file = open(\"./tests/fixtures/not_logged.html\", \"r\")\n m.register_uri(\"GET\", url, status_code=200, text=file.read())\n file.close\n\n cellartracker = CellarTracker(username=\"invalid-username\", password=\"invalid-password\")\n with self.assertRaises(AuthenticationError):\n cellartracker.get_availability()",
"def test_ApiConnectionWillAuthenticate_InvalidCredentials_Unsuccessfully(self):\n connection = ApiConnection(self.userId, \"\")\n self.assertFalse(connection.connected())",
"def test_get_pending_with_invalid_credentials(self, m):\n url = \"https://www.cellartracker.com/xlquery.asp?User=invalid-username&Password=invalid-password&Table=Pending&Format=tab&Location=1\"\n file = open(\"./tests/fixtures/not_logged.html\", \"r\")\n m.register_uri(\"GET\", url, status_code=200, text=file.read())\n file.close\n\n cellartracker = CellarTracker(username=\"invalid-username\", password=\"invalid-password\")\n with self.assertRaises(AuthenticationError):\n cellartracker.get_pending()",
"def test_unhandledCredentials(self):\n realm = TestRealm()\n portal = Portal(realm)\n # This portal has no checkers, so all logins will fail with\n # UnhandledCredentials\n self.server.portal = portal\n\n self.server.challengers[b'LOGIN'] = loginCred = imap4.LOGINCredentials\n\n verifyClass(IChallengeResponse, loginCred)\n\n cAuth = imap4.LOGINAuthenticator(b'testuser')\n self.client.registerAuthenticator(cAuth)\n\n def auth():\n return self.client.authenticate(b'secret')\n\n d1 = self.connected.addCallback(strip(auth))\n d1.addErrback(self.assertClientFailureMessage,\n b\"Authentication failed: server misconfigured\")\n d1.addCallbacks(self._cbStopClient, self._ebGeneral)\n d = defer.gatherResults([self.loopback(), d1])\n return d",
"def test_request_fetch_bogus_url():\n with pytest.raises(SystemExit):\n request.fetch(\"lsdfjlsdjf\")",
"def test_GET_fetcher_fail():\n bad_url = GET_ECHO_ENDPOINT.replace('.com', '.comx')\n\n with pytest.raises(Exception): #TODO: specific exception?\n resp = wf_utils.fetch_GET_request(bad_url)\n\n #TODO: bad status code tests?",
"def test_get_invalid_secret(self):\n response = self.client.get(\n reverse(\n 'projectroles:api_remote_get', kwargs={'secret': build_secret()}\n )\n )\n self.assertEqual(response.status_code, 401)"
] |
[
"0.68713856",
"0.6636915",
"0.64731884",
"0.63755894",
"0.63538057",
"0.63538057",
"0.63509166",
"0.63429207",
"0.6336358",
"0.63246906",
"0.6319021",
"0.6315944",
"0.6310986",
"0.62921965",
"0.6276207",
"0.627014",
"0.6215178",
"0.6157719",
"0.61278623",
"0.61278623",
"0.6101735",
"0.6088584",
"0.607997",
"0.60741436",
"0.60715365",
"0.6035838",
"0.60340625",
"0.60082835",
"0.5986586",
"0.59854144"
] |
0.77316827
|
0
|
Test revoking a credential.
|
def test_mdb_revoking_credential(self):
this_id = 9898
data = self.cred_data
data['credential_id'] = this_id
cred = vccs_auth.credential.from_dict(data, None)
self.mdb.add_credential(cred)
# assert no exception
cred2 = self.mdb.get_credential(this_id)
print("Revoking credential :\n{}".format(pformat(cred2)))
cred2.revoke({'reason': 'unit testing'})
self.mdb.update_credential(cred2)
# assert exception when fetching revoked credential
with self.assertRaises(vccs_auth.credential.VCCSAuthCredentialError):
self.mdb.get_credential(this_id)
# assert exception when trying to activate credential again
with self.assertRaises(ValueError):
cred2.status('active')
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def test_is_revoked(self):\n self.assertEqual(self.project.is_revoked(), False)",
"def test_revoked_cert(self):\n\n # Initially should be able to to operations like open a session\n self._open_session()\n HttpAgentRpc().remove_host(self.host.fqdn)\n\n # After revokation any access should be bounced\n response = self._post([])\n self.assertEqual(response.status_code, 403)\n response = self._get()\n self.assertEqual(response.status_code, 403)",
"def test_logout_revoked(self):\n response = self.client.post('/api/v2/auth/logout',\n headers=self.attendant_headers)\n response = self.client.post('/api/v2/auth/logout',\n headers=self.attendant_headers)\n self.assertEqual(response.status_code, 401)\n self.assertIn('Token has been revoked', str(response.data))",
"def ensure_absent_credential(module, authentication_token):\n params = module.params\n\n url = params['system_url'] + '/iso/pam/credential/{}'.format(params['identifier'])\n\n payload = {\n \"identifier\": params[\"identifier\"]\n }\n\n headers = {'Content-Type': 'application/json',\n \"Authorization\": 'Bearer {}'.format(authentication_token)}\n\n\n r = iso_request(module, url, method=\"DELETE\", headers=headers, data=payload, required_http_code=[200,201,400])\n\n # Check idempotency status\n if r.status_code == 400:\n changed = False\n else:\n changed = True\n\n result = {\"result\": r.json()}\n return (changed, result, r.status_code)",
"def revoke(self):\n # Set the application as unsucessful with the current datetime\n self.status = self.Status.REVOKED\n self.revoked_datetime = timezone.now()\n\n # Removes credentialing from the user\n self.user.is_credentialed = False\n self.user.credential_datetime = None\n\n with transaction.atomic():\n self.user.save()\n self.save()\n\n logger.info('Credentialing for user {0} has been removed.'.format(\n self.user.email))",
"def delete_credential(self, credential):\r\n return self.delete(self.credential_path % (credential))",
"def test_delete_user(self) :\n self.new_credential.save_credential()\n test_credential = Credential(\"peter\", \"Peter\", \"Peter003\") # new user\n test_credential.save_credential()\n self.assertEqual(len(Credential.credential_list),2)",
"def delete_credential(credentials):\n credentials.delete_credentials()",
"def test_delete_credential(self):\n self.new_credential.save_credential()\n test_credential = Credential(\"Facebook\",\"Chris\",\"[email protected]\",\"chris1\") # new credential\n test_credential.save_credential()\n self.new_credential.delete_credential() # Deleting a credential object\n self.assertEqual(len(Credential.credential_list),1)",
"def test_forget_passwd(self, test_client):\n response = test_client.post(\n '/api/auth/forget', json=dict(email=\"[email protected]\"))\n res = json.loads(response.data)\n\n assert response.status_code == 200\n assert res['status'] == True",
"def test_delete_creds(self):\n self.new_credentials.save_creds()\n self.new_credentials.delete_creds()\n\n self.assertEqual(len(Credentials.credential_list),0)",
"def revoke(self):\n # Removes credentialing from the user\n with transaction.atomic():\n self.revoked_datetime = timezone.now()\n\n self.migrated_user.is_credentialed = False\n self.migrated_user.credential_datetime = None\n\n self.migrated_user.save()\n self.save()\n\n logger.info('Credentialing for user {0} has been removed.'.format(\n self.migrated_user.email))",
"def test_expired_credentials():\n pass",
"def testSignOutAndSignBackIn(self):\n self.host.ContinueAuth()\n self.host.SignIn(self.account['username'], self.account['password'])\n self.host.AllowAccess()\n self.host.SignOut()\n self.host.ContinueAuth()\n self.host.AllowAccess()",
"def revoke_authorization(self):\n if not self.runner.browser:\n raise Exception('Webdriver must be connected first.')\n\n self.runner.get(self.revoke_url)\n\n steps = itertools.chain(self.config.get('sign_in_steps', []),\n self.config.get('revoke_steps', []))\n for step in steps:\n self.runner.execute_step(step)",
"def test_delete_hyperflex_local_credential_policy(self):\n pass",
"def test_delete_credentials(self):\n\n self.new_credentials.save_attributes()\n test_credential = Credentials(\"Instagram\", \"@zephonmakale\", \"123456\")\n test_credential.save_attributes()\n\n self.new_credentials.delete_credentials()\n self.assertEqual(len(Credentials.credentials_list), 1)",
"def test_delete_credential(self):\n self.new_credential.save_credential()\n test_credential = Credential(\"0712345678\",\"test\",\"login\",\"0712345678\")# new credential\n test_credential.save_credential()\n\n self.new_credential.delete_credential()# delete a credential object\n self.assertEqual(len(Credential.credential_list),1)",
"def tearDown(self):\n Credential.credential_list = []",
"def test_user_cannot_reset_password_with_invalid_credential(self):\n self.client().post('/api/v1/auth/signup', data=self.user_data_2)\n login_response = self.client().post('/api/v1/auth/login', data=self.user_data_2)\n self.assertEqual(login_response.status_code, 200)\n #Define header dictionary\n access_token = json.loads(login_response.data.decode())['access_token']\n reset_password = {\n \"user_email\": \"[email protected]\",\n \"old_password\": \"exampletest\",\n \"new_password\": \"123456\"\n }\n reset_response = self.client().post('/api/v1/auth/reset-password',\n headers=dict(Authorization='Bearer ' + access_token), data=reset_password)\n #assert that the status code is equal to 401\n self.assertEqual(reset_response.status_code, 401)\n #return result in json format\n result = json.loads(reset_response.data.decode())\n #test that the response contains a message\n self.assertEqual(result[\"message\"],\n \"Wrong password or email\")",
"def _check_logoff(self, guest_obj):\n # call method and verify if it correctly closes ssh connection\n guest_obj.logoff()\n self._mock_ssh_client_obj.logoff.assert_called_with()",
"def test_reset_passwd(self, test_client, user_test1):\n response = test_client.post('/api/auth/reset', json=dict(\n reset_password_token=create_access_token(identity=user_test1),\n password=\"Azerty!123\"\n ))\n res = json.loads(response.data)\n\n assert response.status_code == 200\n assert res['status'] == True",
"def test_delete_o_auth_client_authorization(self):\n pass",
"def unset_credentials(ctx, user, store):\n try:\n logger.debug(\"store={store}, user={user}\".format(store=store, user=user))\n _pycred.unset_credentials(store, user)\n except Exception as e:\n logger.debug(e, exc_info=True)\n print('Error: {msg}'.format(msg=str(e)), file=sys.stderr)\n sys.exit(1)",
"def deleteCredential(self, credentialName):\n try:\n utility.execLog(\"Deleting Credential: %s\" % credentialName)\n self.browserObject, status, result = self.selectCredential(credentialName)\n if not status:\n return self.browserObject, False, result\n # Checking for Default Credentials - 'Delete' will be Disabled\n disabled = self.handleEvent(EC.presence_of_element_located((By.ID, self.CredentialsObjects('deleteCredentials'))), action=\"GET_ATTRIBUTE_VALUE\", attributeName=\"disabled\")\n if \"true\" in disabled:\n return self.browserObject, False, \"Unable to Delete Default Credential: %s\" % credentialName\n # Clicking on Delete\n self.handleEvent(EC.element_to_be_clickable((By.ID, self.CredentialsObjects('deleteCredentials'))), action=\"CLICK\")\n utility.execLog(\"Checking for Confirm Box...\")\n try:\n currentTitle = self.handleEvent(EC.element_to_be_clickable((By.XPATH, self.CommonObjects('GetFormTitle'))), action=\"GET_TEXT\")\n except:\n return self.browserObject, False, \"Unable to Load Confirm Box To Delete Credential\"\n if \"Confirm\" in currentTitle:\n utility.execLog(\"Confirm Box Loaded...Confirming to Delete Credential: '%s'\" % credentialName)\n self.handleEvent(EC.element_to_be_clickable((By.ID, self.CommonObjects('ConfirmYes'))), action=\"CLICK\")\n else:\n utility.execLog(\"Failed to Verify Confirm Delete Box :: Actual --> '%s' :: Expected --> '%s'\" % (currentTitle, \"Confirm\"))\n return self.browserObject, False, \"Failed to Verify Confirm Delete Box :: Actual --> '%s' :: Expected --> '%s'\" % (currentTitle, \"Confirm\")\n # Checking for Error Deleting a Credential\n try:\n errorRedBox = self.handleEvent(EC.visibility_of_element_located((By.XPATH, self.CommonObjects('RedBoxError'))), wait_time=10)\n if errorRedBox:\n errorMessage = self.handleEvent(EC.element_to_be_clickable((By.XPATH, self.CommonObjects('RedBoxErrorMessages'))),action=\"GET_TEXT\")\n return self.browserObject, False, \"Failed to Delete Credential :: '%s' :: Error -> %s\" % (credentialName, errorMessage)\n except:\n # Refresh Table\n self.handleEvent(EC.element_to_be_clickable((By.ID, self.CredentialsObjects('credentialsRefresh'))), action=\"CLICK\")\n time.sleep(3)\n # VALIDATION: Selecting deleted Credential\n self.browserObject, status, result = self.selectCredential(credentialName)\n if status:\n return self.browserObject, False, \"Failed to Delete Credential :: '%s' :: Error -> %s\" % (credentialName, \"Validation Error\")\n else:\n return self.browserObject, True, \"Successfully Deleted Credential: '%s'\" % credentialName\n except Exception as e:\n return self.browserObject, False, \"Exception while Deleting Credential :: '%s' :: Error -> %s\" % (credentialName, str(e) + format_exc())",
"def test_revoke_delegate(self):\n self.invite.role = self.role_delegate\n self.invite.save()\n url = reverse(\n 'projectroles:api_invite_revoke',\n kwargs={'projectinvite': self.invite.sodar_uuid},\n )\n response = self.request_knox(url, method='POST')\n self.assertEqual(response.status_code, 200, msg=response.content)\n self.invite.refresh_from_db()\n self.assertEqual(self.invite.active, False)",
"def testDenyAllowAccess(self):\n self.host.ContinueAuth()\n self.host.SignIn(self.account['username'], self.account['password'])\n self.host.DenyAccess()\n self.host.ContinueAuth()\n self.host.AllowAccess()",
"def test_mdb_get_unknown_credential(self):\n res = self.mdb.get_credential(1234567890)\n self.assertEqual(res, None)",
"def test_revoke_refresh_token(client, tokens):\n response = client.delete(\n \"/auth/refresh-token/\",\n headers={\"Authorization\": \"Bearer {}\".format(tokens[\"refresh\"])},\n )\n\n payload = response.get_json()\n assert response.status_code == HTTPStatus.OK\n assert payload[\"msg\"] == \"Refresh token successfully revoked\"",
"def _check_token_is_revoked(self, jti: str) -> None:\n redis = self._conn_redis()\n entry = redis.get(jti)\n if entry and entry == 'true':\n raise HTTPException(status_code=401,detail=\"Token has been revoked\")"
] |
[
"0.6389961",
"0.6314945",
"0.62947994",
"0.6172954",
"0.6162046",
"0.6151327",
"0.6143952",
"0.6141443",
"0.6120828",
"0.6111874",
"0.60948175",
"0.60479844",
"0.6022124",
"0.6012169",
"0.5950155",
"0.5893915",
"0.5893513",
"0.58884704",
"0.5770536",
"0.5764254",
"0.575241",
"0.57422316",
"0.5720725",
"0.5716079",
"0.5704338",
"0.5680294",
"0.5667594",
"0.56469214",
"0.5632094",
"0.5626138"
] |
0.8346957
|
0
|
Test the __repr__ method of a credential.
|
def test_mdb_credential_repr(self):
cred = vccs_auth.credential.from_dict(self.cred_data, None)
res = repr(cred)
print "Credential : {!r}".format(res)
self.assertTrue(hex(self.cred_data['key_handle']) in res)
self.assertTrue(self.cred_data['type'] in res)
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def test_repr(self):\n dummy = DummyCryptographicObject()\n repr(dummy)",
"def test_repr(self):\n self.assertEqual(\n repr(userbase.Preauthenticated('foo@bar')),\n '<Preauthenticated: foo@bar>')",
"def test_display_cred(self):\n self.assertEqual(Credentials.display_cred(), Credentials.cred_list)",
"def test_display_credentials(self):\n\n self.assertEqual(Credentials.display_credentials(),Credentials.credential_list)",
"def test_account_repr(self):\n account = Account('test-account')\n self.assertEqual(f'{account}', 'Account<test-account>')",
"def test_repr(self, user_factory):\n user = user_factory.get()\n assert repr(user)",
"def display_credential():\n return CredentialsData.display_credentials()",
"def test_repr(self):\n obj = objects.OpaqueObject(self.bytes_a, enums.OpaqueDataType.NONE)\n args = \"value={0}, opaque_type={1}\".format(\n binascii.hexlify(self.bytes_a), enums.OpaqueDataType.NONE)\n expected = \"OpaqueObject({0})\".format(args)\n observed = repr(obj)\n self.assertEqual(expected, observed)",
"def test_user_repr(self):\n\n self.assertEqual(repr(\n self.user),\n f\"<User #{self.user.id}: {self.user.username}, {self.user.email}>\")",
"def test_display_all_credentials(self):\n\n\n self.assertEqual(Credential.display_credentials(),Credential.credential_list)",
"def test_repr(self):\n self.assertEqual(repr(self.card), \"A of Spades\")",
"def test_repr_method(self):\n\n u = User(\n email=\"[email protected]\",\n username=\"testuser\",\n password=\"HASHED_PASSWORD\"\n )\n\n u.id = 9999\n\n db.session.add(u)\n db.session.commit()\n\n # Method should return: User <User #{self.id}: {self.username}, {self.email}>\n msg = self.u1.__repr__()\n self.assertEqual(1, 1)\n\n ### Following tests ###",
"def test_repr(self):\n hashing_algorithm = HashingAlgorithm(HashingAlgorithmEnum.MD5)\n digest_value = DigestValue(b'\\x00\\x01\\x02\\x03')\n key_format_type = KeyFormatType(KeyFormatTypeEnum.RAW)\n digest = Digest(\n hashing_algorithm=hashing_algorithm,\n digest_value=digest_value,\n key_format_type=key_format_type)\n\n hashing_algorithm = \"hashing_algorithm={0}\".format(\n repr(hashing_algorithm))\n digest_value = \"digest_value={0}\".format(repr(digest_value))\n key_format_type = \"key_format_type={0}\".format(repr(key_format_type))\n\n expected = \"Digest({0}, {1}, {2})\".format(\n hashing_algorithm, digest_value, key_format_type)\n observed = repr(digest)\n\n msg = \"expected:\\n{0},\\nobserved:\\n{1}\".format(expected, observed)\n self.assertEqual(expected, observed, msg)",
"def test_repr():\n c = Circle(4) \n assert c.__repr__() == 'Circle(4)'",
"def test_display_all_credential(self):\n self.assertEqual(Credential.display_credential(),Credential.credential_list)",
"def test_repr(self):\n certificate = Certificate(\n certificate_type=self.certificate_type_b,\n certificate_value=self.certificate_value_b)\n\n certificate_type = \"certificate_type={0}\".format(\n str(self.certificate_type_b))\n certificate_value = \"certificate_value=b'{0}'\".format(\n str(self.certificate_value_b))\n\n expected = \"Certificate({0}, {1})\".format(\n certificate_type, certificate_value)\n observed = repr(certificate)\n\n msg = \"\\nexpected:\\n{0}\\nobserved:\\n{1}\".format(expected, observed)\n self.assertEqual(expected, observed, msg)\n\n # NOTE (peter-hamilton) Testing with eval won't work due to null bytes.",
"def test__ApplicationRoleConnection__repr():\n platform_name = 'buta'\n platform_user_name = 'otome'\n metadata_values = {'old': '1'}\n \n connection = ApplicationRoleConnection(\n platform_name = platform_name,\n platform_user_name = platform_user_name,\n metadata_values = metadata_values,\n )\n \n vampytest.assert_instance(repr(connection), str)",
"def test_repr(self, cosmo_cls, cosmo):\n r = repr(cosmo)\n\n # class in string rep\n assert cosmo_cls.__qualname__ in r\n assert r.index(cosmo_cls.__qualname__) == 0 # it's the first thing\n r = r[len(cosmo_cls.__qualname__) + 1:] # remove\n\n # name in string rep\n if cosmo.name is not None:\n assert f\"name=\\\"{cosmo.name}\\\"\" in r\n assert r.index(\"name=\") == 0\n r = r[6 + len(cosmo.name) + 3:] # remove\n\n # parameters in string rep\n ps = {k: getattr(cosmo, k) for k in cosmo.__parameters__}\n cps = {k: getattr(cosmo_cls, k) for k in cosmo.__parameters__}\n for k, v in ps.items():\n sv = format(v, cps[k].format_spec if v is not None else '')\n assert (k + '=' + sv) in r\n assert r.index(k) == 0\n r = r[len((k + '=' + sv)) + 2:] # remove",
"def test_repr(self):\n\n char = Character.query.get(1111)\n expected = \"<Character Instance | ID: 1111 | Name: Mario | Game: Super Mario 64>\"\n\n self.assertEqual(expected, str(char))",
"def test_str_and_repr(self):\n assert str(self.enrollment) == \"[ProgramEnrollment id=1]\"\n assert repr(self.enrollment) == (\n \"<ProgramEnrollment id=1 user=<User: rocko> external_user_key='abc'\"\n \" program_uuid=UUID('88888888-4444-2222-1111-000000000000')\"\n \" curriculum_uuid=UUID('77777777-4444-2222-1111-000000000000')\"\n \" status='enrolled'>\"\n )",
"def test_display_all_credentials(self):\n\n self.assertEqual(Credentials.display_credentials(), Credentials.credentials_list)",
"def test_repr(self, cls):\n inst = cls()\n # Exact values aren't a concern so long as neither direction\n # raises an exception.\n pkl = cloudpickle.dumps(inst)\n cloudpickle.loads(pkl)",
"def test_repr(self):\n self.assertTrue(repr(self.obj1))\n self.assertTrue(repr(self.obj2))\n self.assertTrue(repr(self.obj3))\n self.assertTrue(repr(self.obj4))\n self.assertTrue(repr(self.obj5))",
"def test__ActivityMetadataBase__repr():\n activity_metadata = ActivityMetadataBase()\n \n vampytest.assert_instance(repr(activity_metadata), str)",
"def test_string_repr(self):\n customer = Customer(**customer_data)\n self.assertEqual(str(customer), customer.email)",
"def __repr__(self):\n return f'Connection(driver={self.driver}, host={self.host}, port={self.port}, auth={repr(self.auth)})'",
"def test_repr(self):\n self.assertEquals(\n repr(self.tabView),\n \"<TabView topLevel=False tabs=%r>\" % self.tabs)",
"def test__InteractionForm__repr():\n title = 'important'\n components = [Component(ComponentType.button, label = 'chata')]\n custom_id = 'lie'\n \n interaction_form = InteractionForm(title, components, custom_id)\n vampytest.assert_instance(repr(interaction_form), str)",
"def test_repr(self):\n self.assertEqual(repr(self.deck), \"Deck of 52 cards.\")",
"def test_repr_format(self):\n t = Identity()\n assert t.repr_format(\"asfa\") == \"asfa\""
] |
[
"0.74393976",
"0.70600057",
"0.70516384",
"0.69921756",
"0.68154055",
"0.65346986",
"0.6510147",
"0.64923185",
"0.648255",
"0.6446318",
"0.64374864",
"0.6412424",
"0.63760316",
"0.6320829",
"0.62903655",
"0.6257066",
"0.6190755",
"0.5962807",
"0.59566957",
"0.5948195",
"0.59374255",
"0.5919509",
"0.59163743",
"0.5903851",
"0.5883697",
"0.58523035",
"0.5850498",
"0.58448374",
"0.5843223",
"0.5842186"
] |
0.78560376
|
0
|
This snippet to decided what type of task is given for evaluation. This is really experiment specific and needs to be updated if things change. The only use for the task types is to make the evaluation on the classes with more than 100 samples at training for the epic evaluation. If actions are trained explicitly then they are task0 if verbs are trained with actions they they are task1 else they are task0 if nouns are trained they are always verbtask+1, so either task2 or task1 if hands are trained they are always the last task so they do not change the above order.
|
def get_task_type_epic(action_classes, verb_classes, noun_classes):
task_types = []
if action_classes > 0:
task_types.append("EpicActions")
if verb_classes > 0:
task_types.append("EpicVerbs")
if noun_classes > 0:
task_types.append("EpicNouns")
return task_types
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def load_task_specific_eval(task):\n if task == 'vrp':\n from evaluation.eval_VRP import eval_google_or,eval_Clarke_Wright\n\n return [(eval_google_or.EvalGoogleOR,'or_tools'), (eval_Clarke_Wright.EvalClarkeWright,'Clarke_Wright')]\n\n elif task == 'vrptw':\n from evaluation.eval_VRPTW import eval_tw_google_or,eval_I1_heuristics\n\n return [(eval_tw_google_or.EvalTWGoogleOR,'or_tools_tw'),(eval_I1_heuristics.EvalI1Heuristics,'I1_heuristic')]\n\n else:\n raise Exception('Task is not implemented')",
"def task_type(self):\n pass",
"def Experiment1(train_x,train_y,test_x,test_y,task):\r\n lambda_r = np.array(np.arange(0,151,1))\r\n if(task=='1'):\r\n #Task1: Effects of regularization parameters\r\n Exp1_task1(lambda_r,train_x,train_y,test_x,test_y)\r\n if(task=='2'):\r\n #Task2: Effects of No.of examples\r\n Exp1_task2(lambda_r,train_x,train_y,test_x,test_y)",
"def _get_ml_task(self):\n self._validate_ml_task()\n if self.ml_task == \"auto\":\n classes_number = self.n_classes\n if classes_number == 2:\n self._estimator_type = \"classifier\" # for sk-learn api\n return BINARY_CLASSIFICATION\n elif classes_number <= 20:\n self._estimator_type = \"classifier\" # for sk-learn api\n return MULTICLASS_CLASSIFICATION\n else:\n self._estimator_type = \"regressor\" # for sk-learn api\n return REGRESSION\n else:\n return deepcopy(self.ml_task)",
"def Experiment2(train_x,train_y,test_x,test_y,filename,task):\r\n if(task=='1'):\r\n #task: 3.1 Model selection using cross validation\r\n t1 = timeit.default_timer()\r\n print(\"file: \",filename)\r\n Exp2_t1(train_x,train_y,test_x,test_y)\r\n t2 = timeit.default_timer()\r\n t = t2-t1\r\n print(\"run time: \",t)\r\n if(task=='2'):\r\n #task: 3.2 Model selection using Evidence function\r\n t1 = timeit.default_timer()\r\n print(\"file: \",filename)\r\n Exp2_t2(train_x,train_y,test_x,test_y)\r\n t2 = timeit.default_timer()\r\n t = t2-t1\r\n print(\"run time: \",t)",
"def fewshot_eval_model(experiment_name, task_name, mt, eval_data, batch_size, \n k=0, random_seed=0, n=None, prompt_data=None, \n instructions=None, answers=None, template_id=0, cot_reasons=None,\n max_decode_steps=128, extract_answers=None,\n trigger_phrase=None,\n print_examples=0, print_all_wrong=False):\n # argument checks\n if k > 0 and prompt_data is None: \n assert len(prompt_data) >= 1, f\"need to provide prompt data of at least len {k}\"\n # define stats\n n_correct = 0\n n_str_em = 0\n n_datapoints = 0\n all_preds = []\n all_labels = []\n # task specific info\n task_name_to_hendrycks_em_group_by = {\n 'commonsense': 1,\n 'deontology': 4,\n 'justice': 4,\n 'utilitarianism': 1,\n 'virtue': 1, # we treat as multiple choice\n 'trolley' : 1,\n 'factual' : 1,\n 'counterfact' : 1,\n }\n if 'virtue' in task_name:\n assert answers is None, \"do not use answers with virtue subset\"\n if answers and not extract_answers:\n extract_answers = answers\n # subsample eval data if requested\n if n is not None:\n eval_data_loop = eval_data.sample(n=n, random_state=random_seed, replace=False)\n else:\n eval_data_loop = eval_data\n # begin eval loop\n # calculate query batch size based on if len(inputs) * len(answers) can fit in BATCH_SIZE query to model\n effective_batch_size = batch_size if not answers else batch_size // len(extract_answers)\n n_chunks = np.ceil(len(eval_data_loop) / effective_batch_size)\n for batch_num, batch in enumerate(np.array_split(eval_data_loop, n_chunks)):\n if batch_num > 0:\n running_acc = n_correct / n_datapoints \n check_answers = extract_answers if answers is None else answers\n prop_invalid_preds = compute_prop_invalid_preds(all_preds, check_answers)\n start = '\\r' # '\\n' if batch_num < 3 else \n print(f\"{start}Batch {batch_num-1} | Acc: {100*running_acc:.2f} | Invalid: {100*prop_invalid_preds:.2f}\", end=\"\")\n # make inputs and labels:\n query_inputs = []\n for test_input in batch.input:\n query_input = format_prompt_from_df(prompt_data, test_input, answers=answers, instructions=instructions, cot_reasons=cot_reasons, separator='\\n', template_id=template_id)\n query_inputs.append(query_input)\n labels = batch.label_str\n # make multiple choice answers for virtue\n if 'virtue' in task_name:\n answers = []\n for answer_list in batch.answers:\n answers.append(answer_list.split(','))\n answers = np.array(answers)\n # query model. query inputs may be editing when doing chain_of_thought multiple choice\n with torch.no_grad():\n preds, scores, query_inputs = predict_model(mt, \n query_inputs, \n answers, \n trigger_phrase=trigger_phrase, \n max_decode_steps=max_decode_steps)\n # record stats\n # first case is when we are generating predictions and extracting answers from them\n if answers is None and extract_answers is not None:\n batch_n_correct, correct_vec = first_appearance_fewshot_accuracy_sum(preds, labels, \n extract_answers=extract_answers, \n trigger_phrase=trigger_phrase,\n return_vec=True)\n else:\n batch_n_correct, correct_vec = fewshot_accuracy_sum(preds, labels, return_vec=True)\n n_correct += batch_n_correct\n n_str_em += em_accuracy_sum(preds, labels)\n n_datapoints += len(batch)\n all_preds.extend(list(preds))\n all_labels.extend(list(labels))\n if (print_examples>0 and batch_num == 0):\n print_idx = np.arange(min(print_examples, len(batch)))\n elif print_all_wrong:\n print_idx = np.argwhere(1-correct_vec).reshape(-1)\n else:\n print_idx = np.array([])\n if len(print_idx) > 0:\n print(f\"\\nExamples from batch {batch_num}...\")\n print(\"--------\")\n for i in print_idx:\n print(f\"Example {i}\")\n print(f\"point: \\n{batch.input.iloc[i]}\")\n print(f\"prompt: \\n{query_inputs[i]}\")\n print(\"pred: \", preds[i])\n print(\"label: \", labels.iloc[i])\n if isinstance(answers, np.ndarray):\n print(\"anwers: \", answers[i])\n print(\"exact scores: \", scores[i])\n print(\"correct: \", correct_vec[i])\n if 'completion' in batch.columns:\n print(\"gpt completion: \", batch.completion.iloc[i])\n print(\"--------\")\n print(f\"Examples acc: {correct_vec[print_idx].mean():.2f}\")\n print(\"--------\\n\")\n del batch, preds, labels, scores\n # calculate EM from Hendrycks et al paper\n group_by = task_name_to_hendrycks_em_group_by[task_name]\n hendrycks_em = get_hendrycks_em(all_preds, all_labels, answers, group_by)\n # make df with results\n results_dict = {\n 'exp_name' : experiment_name,\n 'task_name' : task_name,\n 'k' : k,\n 'n' : n,\n 'seed' : random_seed,\n 'acc' : n_correct / n_datapoints,\n 'acc_em' : n_str_em / n_datapoints,\n 'hendrycks_em': hendrycks_em,\n 'prop_invalid': compute_prop_invalid_preds(all_preds, answers)\n }\n results = pd.DataFrame.from_dict({k : [v] for k,v in results_dict.items()})\n print(\"\\nRESULTS:\")\n for k,v in results_dict.items():\n if any([x in k for x in ['acc', 'em', 'prop']]):\n v = f\"{100*v:.2f}\"\n print(f\" {k}: {str(v):10s}\")\n return results",
"def test_pyt_multitask(self):\n\n def run_display_test(defaults, ep_and_ex_counts):\n with testing_utils.capture_output() as f:\n parser = display_setup_args()\n parser.set_defaults(**defaults)\n opt = parser.parse_args()\n display_data(opt)\n str_output = f.getvalue()\n self.assertTrue(\n '[ loaded {} episodes with a total of {} examples ]'.format(\n ep_and_ex_counts[0], ep_and_ex_counts[1]\n ) in str_output,\n 'PytorchDataTeacher multitasking failed with '\n 'following args: {}'.format(opt)\n )\n\n task1 = 'babi:task1k:1'\n task2 = 'babi:task1k:2'\n dataset1 = 'flickr30k'\n dataset2 = 'vqa_v1'\n\n # Expected example and episode counts\n eps_and_exs_counts = [\n (1800, 1800),\n (1080, 1800),\n (29900, 29900),\n (29180, 29900),\n (277349, 277349)\n ]\n defaults = parser_defaults.copy()\n\n # 1.\n defaults['pytorch_teacher_task'] = '{},{}'.format(task1, task2)\n run_display_test(defaults, eps_and_exs_counts[0])\n\n # 2.\n defaults['pytorch_teacher_task'] = task1\n defaults['task'] = task2\n run_display_test(defaults, eps_and_exs_counts[1])\n\n # 3.\n del defaults['task']\n defaults['pytorch_teacher_dataset'] = dataset1\n run_display_test(defaults, eps_and_exs_counts[2])\n\n # 4.\n del defaults['pytorch_teacher_task']\n defaults['task'] = task1\n run_display_test(defaults, eps_and_exs_counts[3])\n\n # 5.\n del defaults['task']\n defaults['pytorch_teacher_dataset'] = '{},{}'.format(dataset1, dataset2)\n run_display_test(defaults, eps_and_exs_counts[4])",
"def detect_task_type(path):\n # distinguishing \"delay-response\" task or \"multi-target-licking\" task\n mat = spio.loadmat(path.as_posix(), squeeze_me=True, struct_as_record=False)\n GUI_fields = set(mat['SessionData'].SettingsFile.GUI._fieldnames)\n\n if ({'X_center', 'Y_center', 'Z_center'}.issubset(GUI_fields)\n and not {'SamplePeriod', 'DelayPeriod'}.issubset(GUI_fields)):\n task_type = 'multi-target-licking'\n else:\n task_type = 'delay-response'\n\n return task_type",
"def evaluate_one_task(prediction_file, label_file, task, language=None):\n predictions = READER_FUNCTION[task](prediction_file)\n labels = READER_FUNCTION[task](label_file)\n if task not in ['bucc2018', 'mlqa', 'tydiqa', 'xquad']:\n assert len(predictions) == len(labels), 'Number of examples in {} and {} not matched in {} task'.format(prediction_file, label_file, task)\n result = METRIC_FUNCTION[task](labels, predictions, language)\n return result",
"def choose_action( self):\n \"\"\"greedy, random, e-greedy, boltzmann, bayesian\"\"\"\n\tif self.exploration == \"greedy\":\n #Choose an action with the maximum expected value.\n a,allQ = sess.run([q_net.predict,q_net.Q_out],feed_dict={q_net.inputs:[s],q_net.keep_per:1.0})\n a = a[0]\n return a\n if self.exploration == \"random\":\n #Choose an action randomly.\n a = env.action_space.sample()\n if self.exploration == \"e-greedy\":\n #Choose an action by greedily (with e chance of random action) from the Q-network\n if np.random.rand(1) < e or total_steps < pre_train_steps:\n a = env.action_space.sample()\n else:\n a,allQ = sess.run([q_net.predict,q_net.Q_out],feed_dict={q_net.inputs:[s],q_net.keep_per:1.0})\n a = a[0]\n return a\n if self.exploration == \"boltzmann\":\n #Choose an action probabilistically, with weights relative to the Q-values.\n Q_d,allQ = sess.run([q_net.Q_dist,q_net.Q_out],feed_dict={q_net.inputs:[s],q_net.Temp:e,q_net.keep_per:1.0})\n a = np.random.choice(Q_d[0],p=Q_d[0])\n a = np.argmax(Q_d[0] == a)\n return a\n if self.exploration == \"bayesian\":\n #Choose an action using a sample from a dropout approximation of a bayesian q-network.\n a,allQ = sess.run([q_net.predict,q_net.Q_out],feed_dict={q_net.inputs:[s],q_net.keep_per:(1-e)+0.1})\n a = a[0]\n return a",
"def task_type(cls):\r\n raise NotImplementedError()",
"def __subtask_classification__(self,task_id,classification_tasks,marking_tasks,raw_classifications,aggregations):\n\n\n # go through the tools which actually have the followup questions\n for tool in classification_tasks[task_id]:\n\n # now go through the individual followup questions\n # range(len()) - since individual values will be either \"single\" or \"multiple\"\n\n for followup_question_index in range(len(classification_tasks[task_id][tool])):\n global_index = str(task_id)+\"_\" +str(tool)+\"_\"+str(followup_question_index)\n\n\n followup_classification = {}\n # this is used for inserting the results back into our running aggregation - which are based\n # on shapes, not tools\n shapes_per_cluster = {}\n\n # go through each cluster and find the corresponding raw classifications\n for subject_id in aggregations:\n if subject_id == \"param\":\n continue\n\n # has anyone done this task for this subject?\n if task_id in aggregations[subject_id]:\n # find the clusters which we have determined to be of the correct type\n # only consider those users who made the correct type marking\n # what shape did this particular tool make?\n shape = marking_tasks[task_id][tool]\n for cluster_index,cluster in aggregations[subject_id][task_id][shape + \" clusters\"].items():\n if cluster_index in [\"param\",\"all_users\"]:\n continue\n\n # what is the most likely tool for this cluster?\n most_likely_tool,_ = max(cluster[\"tool_classification\"][0].items(),key = lambda x:x[1])\n if int(most_likely_tool) != int(tool):\n continue\n\n\n # polygons and rectangles will pass cluster membership back as indices\n # ints => we can't case tuples\n if isinstance(cluster[\"cluster members\"][0],int):\n user_identifiers = zip(cluster[\"cluster members\"],cluster[\"users\"])\n else:\n user_identifiers = zip([tuple(x) for x in cluster[\"cluster members\"]],cluster[\"users\"])\n ballots = []\n\n for user_identifiers,tool_used in zip(user_identifiers,cluster[\"tools\"]):\n # did the user use the relevant tool - doesn't matter if most people\n # used another tool\n if tool_used == tool:\n\n followup_answer = raw_classifications[global_index][subject_id][user_identifiers]\n u = user_identifiers[1]\n ballots.append((u,followup_answer))\n\n followup_classification[(subject_id,cluster_index)] = deepcopy(ballots)\n shapes_per_cluster[(subject_id,cluster_index)] = shape\n\n\n followup_results = self.__task_aggregation__(followup_classification,global_index,{})\n assert isinstance(followup_results,dict)\n\n for subject_id,cluster_index in followup_results:\n shape = shapes_per_cluster[(subject_id,cluster_index)]\n # keyword_list = [subject_id,task_id,shape+ \" clusters\",cluster_index,\"followup_questions\"]\n new_results = followup_results[(subject_id,cluster_index)]\n # if this is the first question - insert\n # otherwise append\n\n if followup_question_index == 0:\n aggregations[subject_id][task_id][shape + \" clusters\"] [cluster_index][\"followup_question\"] = {}\n\n\n aggregations[subject_id][task_id][shape + \" clusters\"] [cluster_index][\"followup_question\"][followup_question_index] = new_results.values()[0]\n\n return aggregations",
"def _verify_task(self, task_type: str = None) -> bool:\n\n return task_type in [\n self.BINARY_CLASSIFICATION, self.CATEGORICAL_CLASSIFICATION,\n self.REGRESSION\n ]",
"def test_IODimensions(self):\n tasks = [(1,1,100,True),(10,1,100,True),(1,10,100,True),(10,10,100,True),\n (1,1,100,False),(10,1,100,False),(1,10,100,False),(10,10,100,False)]\n for t in tasks:\n N_in ,N_out, N_samples, tf = t\n X = np.random.randn(N_samples,N_in) if N_in > 1 else np.random.randn(N_samples)\n y = np.random.randn(N_samples,N_out) if N_out > 1 else np.random.randn(N_samples)\n Xp = np.random.randn(N_samples,N_in) if N_in > 1 else np.random.randn(N_samples)\n esn = ESN(N_in,N_out,teacher_forcing=tf)\n prediction_tr = esn.fit(X,y)\n prediction_t = esn.predict(Xp)\n self.assertEqual(prediction_tr.shape,(N_samples,N_out))\n self.assertEqual(prediction_t.shape,(N_samples,N_out))",
"def _get_task_type(self):\n\n if self.num_classes == 0:\n return ps_pb2.Type(\n one_dimensional_regression=ps_pb2.OneDimensionalRegression(\n label=self._label_key))\n if self.num_classes == 2:\n return ps_pb2.Type(\n binary_classification=ps_pb2.BinaryClassification(\n label=self._label_key))\n return ps_pb2.Type(\n multi_class_classification=ps_pb2.MultiClassClassification(\n label=self._label_key))",
"def task_type(cls):\n raise NotImplementedError()",
"def test_huggingface_models(tasks):\n model = huggingface_models.BIGBenchHFModel(model_name='gpt2', max_length=16)\n task_list = ['meta_hello_world', 'simple_arithmetic_json_multiple_choice']\n\n # If --tasks was specified, only evaluate on tasks that also appear there\n filter_list = get_task_list(tasks)\n \n if filter_list:\n task_list = [t for t in task_list if t in filter_list]\n\n if len(task_list) > 0:\n run_tasks(model, task_list=task_list)",
"def make_rand_task():\n rand_type = all_tasks.keys()[random.randint(0, len(all_tasks.keys()) - 1)]\n rand_hit = all_tasks[rand_type][random.randint(0, len(all_tasks[rand_type]) - 1)]\n\n if rand_hit['type'] == 'img':\n rand_hit['img_src'] = get_rand_img()\n\n return rand_hit",
"def evaluate(eval_ds, model, task):\n\n print('==========EVAL==========')\n # Testing contrastive accuracy\n if task['name'] == 'contrastive_accuracy':\n ds = eval_ds.map(data_utils.pretrain_preprocess)\n ds = ds.batch(128)\n test_contrast_acc = tf.keras.metrics.Accuracy(name='test_constrastive_accuracy')\n for x in ds:\n image = x['image']\n image = tf.transpose(image, [1, 0, 2, 3, 4])\n image = tf.reshape(\n image, \n (image.shape[0]*image.shape[1], image.shape[2], image.shape[3], image.shape[4])\n )\n out = model(image, mode='unsupervised', training=False)\n metrics.update_contrastive_accuracy2(test_contrast_acc, out, TEMP)\n print('test contrastive accuracy')\n print(test_contrast_acc.result())\n return \n\n # Testing classification accuracy \n ds = eval_ds.filter(lambda x: x['label'] != task['excluded_label'])\n ds = ds.map(data_utils.eval_preprocess)\n ds = ds.batch(FLAGS.eval_bs)\n test_class_acc = tf.keras.metrics.Accuracy(name='test_class_accuracy')\n for x in ds:\n image = x['image']\n labels = x[task['name']]\n if task['name'] == 'extr':\n out = model(image, mode='eval', sup_layers=2, training=False)\n else:\n out = model(image, mode='eval', sup_layers=1, training=False)\n metrics.update_supervised_accuracy(test_class_acc, labels, out)\n \n if FLAGS.debug:\n print(tf.math.argmax(out, axis=-1))\n print('test classification accuracy')\n print(test_class_acc.result())",
"def generic_task(self, x_in, y, task_name):\n self.fit(x_in, y, task_name=task_name)\n print 'The score for task ', task_name, ' is ', self.score(x_in, y)",
"def gen_task(self, q_type, q_option):\n # check what it the task about\n if q_type == \"write\":\n question = [\"Please, let me know what you most like on \",\n \"Please, write a short sentence about \",\n \"Please, write the first experience you had with \"]\n select_question = random.choice(question)\n\n elif q_type == \"draw\":\n draw = [\n \"In a separeted piece of paper, please draw your favorite \",\n \"In a separeted piece of paper, please draw the first thing it comes to your mind when you think about \"\n ]\n select_question = random.choice(draw)\n\n # output the task based on the option input\n select_option = q_option\n\n # join everything\n task = select_question + select_option\n\n return task",
"def task_type(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"task_type\")",
"def task_execution(self):\n\n # Only execute a task if the robot isn't currently in the middle of doing one\n print (\"Task: \", self.task)\n task_to_execute = None\n if self.task == 'task_vision':\n task_to_execute = self.world.task.task_vision\n if self.task == 'task_move_to_ball':\n task_to_execute = self.world.task.task_move_to_ball\n if self.task == 'task_kick_ball_in_goal':\n task_to_execute = self.world.task.task_kick_ball_in_goal\n if self.task == 'task_move_and_grab_ball':\n task_to_execute = self.world.task.task_move_and_grab_ball\n if self.task == 'task_rotate_and_grab':\n task_to_execute = self.world.task.task_rotate_and_grab\n if self.task == 'task_grab_rotate_kick':\n task_to_execute = self.world.task.task_grab_rotate_kick\n if self.task == 'task_defender':\n task_to_execute = self.world.task.task_defender\n if self.task == 'task_defender_kick_off':\n task_to_execute = self.world.task.task_defender_kick_off\n if self.task == 'task_attacker':\n task_to_execute = self.world.task.task_attacker\n if self.task == 'task_attacker_kick_off':\n task_to_execute = self.world.task.task_attacker_kick_off\n if self.task == 'task_penalty':\n task_to_execute = self.world.task.task_penalty\n if self.task == 'task_goalie':\n task_to_execute = self.world.task.task_penalty_goalie\n\n # if there's a task to do, let's try it\n if self.task:\n # if it's executed fine, then we've completed the task. otherwise we're going to loop round and try again\n if task_to_execute():\n self.task = None\n print(\"Task: COMPLETED\")",
"def evaluate(args, model, tokenizer, eval_dataset, eval_dataloader, task_name, model_type, split, step):\n model.eval()\n processor = MoralStoriesProcessor()\n results = dict()\n softmax = torch.nn.Softmax(dim=1)\n\n # Eval!\n logger.info('***** Running evaluation on the validation / test set *****')\n logger.info(' Num examples = %d', len(eval_dataset))\n logger.info(' Batch size = %d', args.eval_batch_size)\n batch_losses = list()\n eval_loss = 0.0\n micro_loss, macro_loss = 0.0, 0.0\n num_batches, num_tokens = 0, 0\n preds = None\n soft_preds = None\n out_label_ids = None\n # Perform a single evaluation step\n for batch in tqdm(eval_dataloader, desc='Evaluating', mininterval=10, ncols=100):\n batch = tuple(t.to(args.device) for t in batch)\n with torch.no_grad():\n if 'gen' not in task_name:\n inputs = {'input_ids': batch[0],\n 'attention_mask': batch[1],\n 'token_type_ids': batch[2] if model_type == 'bert' else None,\n 'labels': batch[3]}\n else:\n inputs = {'input_ids': batch[0],\n 'attention_mask': batch[1],\n 'labels': batch[3]}\n if 'gpt2' not in model_type:\n # Prepare decoder inputs and labels for enc-dec models\n inputs['labels'] = batch[3][:, 1:].contiguous() # shift\n decoder_input_ids = batch[3][:, :-1].clone() # shift\n decoder_input_ids[decoder_input_ids == -100] = tokenizer.pad_token_id # remove masking\n inputs['decoder_input_ids'] = decoder_input_ids.contiguous()\n\n outputs = model(**inputs)\n\n tmp_eval_loss, logits = outputs[:2]\n soft_logits = softmax(logits)\n eval_loss += tmp_eval_loss.mean().item()\n batch_losses.append(tmp_eval_loss.item())\n\n if 'gen' not in task_name:\n if preds is None:\n preds = logits.detach().cpu().numpy()\n soft_preds = soft_logits.detach().cpu().numpy()\n out_label_ids = inputs['labels'].detach().cpu().numpy()\n else:\n preds = np.append(preds, logits.detach().cpu().numpy(), axis=0)\n soft_preds = np.append(soft_preds, soft_logits.detach().cpu().numpy(), axis=0)\n out_label_ids = np.append(out_label_ids, inputs['labels'].detach().cpu().numpy(), axis=0)\n else:\n # Obtain per-token loss for perplexity computation\n batch_loss = get_token_loss(args, logits, batch[3], batch[4], model_type=model_type)\n macro_loss += batch_loss.mean().item()\n micro_loss += batch_loss.sum().item()\n num_batches += 1\n num_tokens += batch_loss.view(-1).shape[0]\n\n # Compute and update evaluation metric values\n if 'gen' not in task_name:\n # Isolate model predictions\n preds = np.argmax(preds, axis=1)\n soft_preds = soft_preds.tolist()\n curr_result = compute_cls_metrics(preds, out_label_ids)\n else:\n macro_perplexity = torch.exp(torch.tensor(macro_loss / num_batches)).item()\n micro_perplexity = torch.exp(torch.tensor(micro_loss / num_tokens)).item()\n curr_result = {'macro_perplexity': macro_perplexity,\n 'micro_perplexity': micro_perplexity}\n\n if len(results.keys()) == 0:\n for k, v in curr_result.items():\n results[k] = [v]\n else:\n for k, v in curr_result.items():\n results[k].append(v)\n\n # Log metrics\n output_eval_file = os.path.join(args.output_dir, 'results_{}_{}.txt'.format(task_name, split))\n with open(output_eval_file, 'a') as writer:\n logger.info('***** Eval results *****')\n writer.write('STEP: {:s}\\n'.format(str(step)))\n for key in sorted(curr_result.keys()):\n logger.info(' %s = %s', key, str(curr_result[key]))\n writer.write('%s = %s\\n' % (key, str(curr_result[key])))\n\n # Log predictions\n if 'gen' not in task_name:\n output_pred_file = \\\n os.path.join(args.output_dir, 'predictions_{}_{}_{}.lst'.format(task_name, split, step))\n with open(output_pred_file, 'w') as writer:\n logger.info('***** Write predictions *****')\n for pred in preds:\n writer.write('{}\\n'.format(processor.get_labels()[pred]))\n\n # Maintain a single metrics file\n if os.path.exists(args.output_dir):\n with open(os.path.join(args.output_dir, 'metrics_{}_{}.json'.format(task_name, split)), 'w') as f:\n f.write(json.dumps(results))\n f.close()\n\n # Report mean dev loss\n mean_eval_loss = eval_loss / len(eval_dataloader)\n logging.info('\\n' + '*' * 10)\n logging.info('Mean development loss: {:.4f}'.format(mean_eval_loss))\n logging.info('*' * 10 + '\\n')\n\n return results, mean_eval_loss, preds, soft_preds",
"def one_shot_test(self, model, support_set_size, number_of_tasks_per_alphabet,\n is_validation):\n\n # Set some variables that depend on dataset\n if is_validation:\n alphabets = self._validation_alphabets\n print('\\nMaking One Shot Task on validation alphabets:')\n else:\n alphabets = self._evaluation_alphabets\n print('\\nMaking One Shot Task on evaluation alphabets:')\n\n mean_global_accuracy = 0\n\n for alphabet in alphabets:\n mean_alphabet_accuracy = 0\n for _ in range(number_of_tasks_per_alphabet):\n images, _ = self.get_one_shot_batch(\n support_set_size, is_validation=is_validation)\n probabilities = model.predict_on_batch(images)\n\n # Added this condition because noticed that sometimes the outputs\n # of the classifier was almost the same in all images, meaning that\n # the argmax would be always by defenition 0.\n if np.argmax(probabilities) == 0 and probabilities.std()>0.01:\n accuracy = 1.0\n else:\n accuracy = 0.0\n\n mean_alphabet_accuracy += accuracy\n mean_global_accuracy += accuracy\n\n mean_alphabet_accuracy /= number_of_tasks_per_alphabet\n\n print(alphabet + ' alphabet' + ', accuracy: ' +\n str(mean_alphabet_accuracy))\n if is_validation:\n self._current_validation_alphabet_index += 1\n else:\n self._current_evaluation_alphabet_index += 1\n\n mean_global_accuracy /= (len(alphabets) *\n number_of_tasks_per_alphabet)\n\n print('\\nMean global accuracy: ' + str(mean_global_accuracy))\n\n # reset counter\n if is_validation:\n self._current_validation_alphabet_index = 0\n else:\n self._current_evaluation_alphabet_index = 0\n\n return mean_global_accuracy",
"def test_multitask(self):\n args = BASE_ARGS.copy()\n args.update(MULTITASK_ARGS)\n\n valid, test = testing_utils.train_model(args)\n self.assertLessEqual(\n valid['ppl'], 5.0, 'failed to train image_seq2seq on image+text task'\n )",
"def act_and_train(self, t: int) -> Tuple[TState, TAction, float]:\n pass",
"def gen_task(context, targets, upreds):\n # Fill with random rules up to certain task\n ctx = context.copy() # Don't modify the original context\n for _ in range(ARGS.noise_size):\n rtask = R.randint(1, max(1, ARGS.task))\n rtask = 1 if ARGS.arity != 2 and rtask == 8 else rtask\n ntask = \"gen_task\" + str(rtask)\n ctx.append(globals()[ntask](upreds))\n output(ctx, targets)",
"def generate_tasks(self, task):",
"def main(_) -> None:\n params = train_utils.parse_configuration(FLAGS)\n mode = FLAGS.mode\n model_dir = FLAGS.model_dir\n if 'train' in FLAGS.mode:\n # Pure eval modes do not output yaml files. Otherwise continuous eval job\n # may race against the train job for writing the same file.\n train_utils.serialize_config(params, model_dir)\n\n if FLAGS.seed is not None:\n logging.info('Setting tf seed.')\n tf.random.set_seed(FLAGS.seed)\n\n task = RankingTask(\n params=params.task,\n optimizer_config=params.trainer.optimizer_config,\n logging_dir=model_dir,\n steps_per_execution=params.trainer.steps_per_loop,\n name='RankingTask')\n\n enable_tensorboard = params.trainer.callbacks.enable_tensorboard\n\n strategy = distribute_utils.get_distribution_strategy(\n distribution_strategy=params.runtime.distribution_strategy,\n all_reduce_alg=params.runtime.all_reduce_alg,\n num_gpus=params.runtime.num_gpus,\n tpu_address=params.runtime.tpu)\n\n with strategy.scope():\n model = task.build_model()\n\n def get_dataset_fn(params):\n return lambda input_context: task.build_inputs(params, input_context)\n\n train_dataset = None\n if 'train' in mode:\n train_dataset = strategy.distribute_datasets_from_function(\n get_dataset_fn(params.task.train_data),\n options=tf.distribute.InputOptions(experimental_fetch_to_device=False))\n\n validation_dataset = None\n if 'eval' in mode:\n validation_dataset = strategy.distribute_datasets_from_function(\n get_dataset_fn(params.task.validation_data),\n options=tf.distribute.InputOptions(experimental_fetch_to_device=False))\n\n if params.trainer.use_orbit:\n with strategy.scope():\n checkpoint_exporter = train_utils.maybe_create_best_ckpt_exporter(\n params, model_dir)\n trainer = RankingTrainer(\n config=params,\n task=task,\n model=model,\n optimizer=model.optimizer,\n train='train' in mode,\n evaluate='eval' in mode,\n train_dataset=train_dataset,\n validation_dataset=validation_dataset,\n checkpoint_exporter=checkpoint_exporter)\n\n train_lib.run_experiment(\n distribution_strategy=strategy,\n task=task,\n mode=mode,\n params=params,\n model_dir=model_dir,\n trainer=trainer)\n\n else: # Compile/fit\n checkpoint = tf.train.Checkpoint(model=model, optimizer=model.optimizer)\n\n latest_checkpoint = tf.train.latest_checkpoint(model_dir)\n if latest_checkpoint:\n checkpoint.restore(latest_checkpoint)\n logging.info('Loaded checkpoint %s', latest_checkpoint)\n\n checkpoint_manager = tf.train.CheckpointManager(\n checkpoint,\n directory=model_dir,\n max_to_keep=params.trainer.max_to_keep,\n step_counter=model.optimizer.iterations,\n checkpoint_interval=params.trainer.checkpoint_interval)\n checkpoint_callback = keras_utils.SimpleCheckpoint(checkpoint_manager)\n\n time_callback = keras_utils.TimeHistory(\n params.task.train_data.global_batch_size,\n params.trainer.time_history.log_steps,\n logdir=model_dir if enable_tensorboard else None)\n callbacks = [checkpoint_callback, time_callback]\n\n if enable_tensorboard:\n tensorboard_callback = tf.keras.callbacks.TensorBoard(\n log_dir=model_dir,\n update_freq=min(1000, params.trainer.validation_interval),\n profile_batch=FLAGS.profile_steps)\n callbacks.append(tensorboard_callback)\n\n num_epochs = (params.trainer.train_steps //\n params.trainer.validation_interval)\n current_step = model.optimizer.iterations.numpy()\n initial_epoch = current_step // params.trainer.validation_interval\n\n eval_steps = params.trainer.validation_steps if 'eval' in mode else None\n\n if mode in ['train', 'train_and_eval']:\n logging.info('Training started')\n history = model.fit(\n train_dataset,\n initial_epoch=initial_epoch,\n epochs=num_epochs,\n steps_per_epoch=params.trainer.validation_interval,\n validation_data=validation_dataset,\n validation_steps=eval_steps,\n callbacks=callbacks,\n )\n model.summary()\n logging.info('Train history: %s', history.history)\n elif mode == 'eval':\n logging.info('Evaluation started')\n validation_output = model.evaluate(validation_dataset, steps=eval_steps)\n logging.info('Evaluation output: %s', validation_output)\n else:\n raise NotImplementedError('The mode is not implemented: %s' % mode)"
] |
[
"0.6503214",
"0.6361112",
"0.6304494",
"0.6277422",
"0.6171994",
"0.616361",
"0.6150416",
"0.606043",
"0.59382284",
"0.5891429",
"0.58487225",
"0.58432925",
"0.58132005",
"0.5798007",
"0.577507",
"0.57504714",
"0.5745241",
"0.57363325",
"0.57022166",
"0.5690334",
"0.5669375",
"0.56623214",
"0.562977",
"0.5614069",
"0.5599335",
"0.5534587",
"0.5534349",
"0.5523332",
"0.55192506",
"0.5511574"
] |
0.6772187
|
0
|
Splits a model state dictionary in subcheckpoints so that the final size of each subcheckpoint does not exceed a given size. The subcheckpoints are determined by iterating through the `state_dict` in the order of its keys, so there is no optimization made to make each subcheckpoint as close as possible to the maximum size passed. For example, if the limit is 10GB and we have weights of sizes [6GB, 6GB, 2GB, 6GB, 2GB, 2GB] they will get sharded as [6GB], [6+2GB], [6+2+2GB] and not [6+2+2GB], [6+2GB], [6GB]. If one of the model's weight is bigger that `max_shard_size`, it will end up in its own subcheckpoint which will have a size greater than `max_shard_size`.
|
def flax_shard_checkpoint(params, max_shard_size="10GB"):
max_shard_size = convert_file_size_to_int(max_shard_size)
sharded_state_dicts = []
current_block = {}
current_block_size = 0
total_size = 0
# flatten the weights to chunk
weights = flatten_dict(params, sep="/")
for item in weights:
weight_size = weights[item].size * dtype_byte_size(weights[item].dtype)
# If this weight is going to tip up over the maximal size, we split.
if current_block_size + weight_size > max_shard_size:
sharded_state_dicts.append(current_block)
current_block = {}
current_block_size = 0
current_block[item] = weights[item]
current_block_size += weight_size
total_size += weight_size
# Add the last block
sharded_state_dicts.append(current_block)
# If we only have one shard, we return it
if len(sharded_state_dicts) == 1:
return {FLAX_WEIGHTS_NAME: sharded_state_dicts[0]}, None
# Otherwise, let's build the index
weight_map = {}
shards = {}
for idx, shard in enumerate(sharded_state_dicts):
shard_file = FLAX_WEIGHTS_NAME.replace(".msgpack", f"-{idx+1:05d}-of-{len(sharded_state_dicts):05d}.msgpack")
shards[shard_file] = shard
for weight_name in shard.keys():
weight_map[weight_name] = shard_file
# Add the metadata
metadata = {"total_size": total_size}
index = {"metadata": metadata, "weight_map": weight_map}
return shards, index
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def test_partition_on_target_size_vertex_than_has_to_be_split(self):\n self.setup()\n large_vertex = TestVertex(1000, \"Large vertex\")\n large_vertex.add_constraint(PartitionerMaximumSizeConstraint(10))\n self.graph = ApplicationGraph(\n \"Graph with large vertex\", [large_vertex], [])\n graph, mapper = self.bp.partition(self.graph, self.machine)\n self.assertEqual(len(graph.vertices), 100)",
"def get_max_state_size(self) -> int:\n return self._j_checkpoint_storage.getMaxStateSize()",
"def split_state_dict(state_dict):\n\n optimizer_keys = ['Moment_1_', 'Moment_2_', 'Update_Count_', 'Step']\n split_sd = {'optimizer': {}, 'fp32_param': {}, 'fp16_param': {}}\n for k, v in state_dict.items():\n mode = 'fp32_param'\n for optim_key in optimizer_keys:\n if k.startswith(optim_key):\n mode = 'optimizer'\n break\n if k.endswith('_fp16'):\n mode = 'fp16_param'\n split_sd[mode][k] = v\n return split_sd",
"def label_size_from_inner_size(self, inner_size: int) -> int:\n return (2 ** self.n_folds) * (inner_size - 4) + 2",
"def split_chunk(chunk, sizes, max_iter=1000, rng=None):\n assert len(chunk) == sum(sizes), f\"{len(chunk)} != {sum(sizes)}\"\n if not isinstance(rng, random.Random):\n rng = random\n # Precompute neighbors for each cube in the chunk\n neighbors = dict()\n for c in chunk:\n neighbors[c] = set(c.neighbors()) & set(chunk)\n for i in range(max_iter):\n result = split_chunk_iter(chunk, sizes, neighbors, rng)\n if result != None:\n return result\n raise SplitChunkMaxIterationExceeded(\"Ran out of iterations trying to split chunk\")",
"def initialize_partitioned_model_states(\n mdl: model.BaseTask,\n prng_key: PRNGKey,\n) -> Tuple[TrainState, NestedShape, TrainState]:\n mdl.instantiate_variable_configs()\n # At this point, variable specs are already known.\n var_specs = mdl.vars\n train_state_partition_specs = mdl.create_train_state_partition_specs(\n var_specs)\n assert train_state_partition_specs is not None\n\n init_model_from_seed = functools.partial(initialize_model_state, mdl)\n\n in_shape = jax.ShapeDtypeStruct((2,), jnp.uint32)\n out_shape = jax.eval_shape(init_model_from_seed, in_shape)\n\n logging.info('in_shape: %s', in_shape)\n logging.info('out_shape: %s', out_shape)\n logging.info('train_state_partition_specs: %s', train_state_partition_specs)\n tf.nest.assert_same_structure(train_state_partition_specs, out_shape)\n\n init_fn = pjit.pjit(\n init_model_from_seed,\n in_axis_resources=(None,),\n out_axis_resources=train_state_partition_specs)\n\n assert base_layer.global_mesh_defined(), 'must be inside maps.mesh scope'\n partitioned_vars = init_fn(prng_key)\n\n # Make sure output is of the expected structure.\n tf.nest.assert_same_structure(out_shape, partitioned_vars)\n\n return train_state_partition_specs, out_shape, partitioned_vars",
"def test_chunk_size_priority_over_n_splits(self):\n with self.subTest(input='list', chunk_size=1, n_splits=6):\n self.assertEqual(get_n_chunks(self.test_data, iterable_len=None, chunk_size=1, n_splits=6, n_jobs=None), 13)\n with self.subTest(input='numpy', chunk_size=1, n_splits=6):\n self.assertEqual(get_n_chunks(self.test_data_numpy, iterable_len=None, chunk_size=1, n_splits=6,\n n_jobs=None), 100)\n\n with self.subTest(input='list', chunk_size=3, n_splits=3):\n self.assertEqual(get_n_chunks(self.test_data, iterable_len=None, chunk_size=3, n_splits=3, n_jobs=None), 5)\n with self.subTest(input='numpy', chunk_size=3, n_splits=3):\n self.assertEqual(get_n_chunks(self.test_data_numpy, iterable_len=None, chunk_size=3, n_splits=3,\n n_jobs=None), 34)",
"def split_state(self,nChunks):\n \n state_chunks = {}\n print(\"In split state!\")\n # Figure out how big each section must be\n bounds = self.chunk_bounds(nChunks)\n\n # Now separate along the location dimension according to the bounds\n for cnum, bnds in bounds.items():\n state_chunks[cnum] = \\\n xarray_Ensemble_State(state=self.state.isel(location=slice(bnds[0],bnds[1])))\n return state_chunks",
"def split(self, elIndicesDict, maxSubTreeSize=None, numSubTrees=None, verbosity=0):\n #dbList = self.generate_circuit_list()\n tm = _time.time()\n printer = _VerbosityPrinter.build_printer(verbosity)\n\n if (maxSubTreeSize is None and numSubTrees is None) or \\\n (maxSubTreeSize is not None and numSubTrees is not None):\n raise ValueError(\"Specify *either* maxSubTreeSize or numSubTrees\")\n if numSubTrees is not None and numSubTrees <= 0:\n raise ValueError(\"EvalTree split() error: numSubTrees must be > 0!\")\n\n #Don't split at all if it's unnecessary\n if maxSubTreeSize is None or len(self) < maxSubTreeSize:\n if numSubTrees is None or numSubTrees == 1: return elIndicesDict\n\n self.subTrees = []\n evalOrder = self.get_evaluation_order()\n printer.log(\"EvalTree.split done initial prep in %.0fs\" %\n (_time.time() - tm)); tm = _time.time()\n\n def nocache_create_equal_size_subtrees():\n \"\"\" A shortcut for special case when there is no cache so each\n circuit can be evaluated independently \"\"\"\n N = len(self)\n subTrees = [set(range(i, N, numSubTrees)) for i in range(numSubTrees)]\n totalCost = N\n return subTrees, totalCost\n\n def create_subtrees(maxCost, maxCostRate=0, costMetric=\"size\"):\n \"\"\"\n Find a set of subtrees by iterating through the tree\n and placing \"break\" points when the cost of evaluating the\n subtree exceeds some 'maxCost'. This ensure ~ equal cost\n trees, but doesn't ensure any particular number of them.\n\n maxCostRate can be set to implement a varying maxCost\n over the course of the iteration.\n \"\"\"\n\n if costMetric == \"applys\":\n def cost_fn(rem): return len(rem) # length of remainder = #-apply ops needed\n elif costMetric == \"size\":\n def cost_fn(rem): return 1 # everything costs 1 in size of tree\n else: raise ValueError(\"Uknown cost metric: %s\" % costMetric)\n\n subTrees = []\n curSubTree = set([evalOrder[0]])\n curTreeCost = cost_fn(self[evalOrder[0]][1]) # remainder length of 0th evaluant\n totalCost = 0\n cacheIndices = [None] * self.cache_size()\n\n for k in evalOrder:\n iStart, remainder, iCache = self[k]\n\n if iCache is not None:\n cacheIndices[iCache] = k\n\n #compute the cost (additional #applies) which results from\n # adding this element to the current tree.\n cost = cost_fn(remainder)\n inds = set([k])\n\n if iStart is not None and cacheIndices[iStart] not in curSubTree:\n #we need to add the tree elements traversed by\n #following iStart\n j = iStart # index into cache\n while j is not None:\n iStr = cacheIndices[j] # cacheIndices[ iStart ]\n inds.add(iStr)\n cost += cost_fn(self[iStr][1]) # remainder\n j = self[iStr][0] # iStart\n\n if curTreeCost + cost < maxCost:\n #Just add current string to current tree\n curTreeCost += cost\n curSubTree.update(inds)\n else:\n #End the current tree and begin a new one\n #print(\"cost %d+%d exceeds %d\" % (curTreeCost,cost,maxCost))\n subTrees.append(curSubTree)\n curSubTree = set([k])\n\n cost = cost_fn(remainder); j = iStart\n while j is not None: # always traverse back iStart\n iStr = cacheIndices[j]\n curSubTree.add(iStr)\n cost += cost_fn(self[iStr][1]) # remainder\n j = self[iStr][0] # iStart\n totalCost += curTreeCost\n curTreeCost = cost\n #print(\"Added new tree w/initial cost %d\" % (cost))\n\n maxCost += maxCostRate\n\n subTrees.append(curSubTree)\n totalCost += curTreeCost\n return subTrees, totalCost\n\n ##################################################################\n # Part I: find a list of where the current tree should be broken #\n ##################################################################\n\n if numSubTrees is not None and self.cache_size() == 0:\n #print(\"Split: EQUAL SUBTREES!\") #REMOVE\n subTreeSetList, totalCost = nocache_create_equal_size_subtrees()\n #printer.log(\"EvalTree.split PT1 %.1fs\" %\n # (_time.time()-tm)); tm = _time.time() #REMOVE\n\n elif numSubTrees is not None:\n\n #OLD METHOD: optimize max-cost to get the right number of trees\n # (but this can yield trees with unequal lengths or cache sizes,\n # which is what we're often after for memory reasons)\n costMet = \"size\" # cost metric\n if costMet == \"applies\":\n maxCost = self.get_num_applies() / numSubTrees\n else: maxCost = len(self) / numSubTrees\n maxCostLowerBound, maxCostUpperBound = maxCost, None\n maxCostRate, rateLowerBound, rateUpperBound = 0, -1.0, +1.0\n #OLD (& incorrect) vals were 0, -1.0/len(self), +1.0/len(self),\n # though current -1,1 vals are probably overly conservative...\n resultingSubtrees = numSubTrees + 1 # just to prime the loop\n iteration = 0\n\n #Iterate until the desired number of subtrees have been found.\n while resultingSubtrees != numSubTrees:\n subTreeSetList, totalCost = create_subtrees(maxCost, maxCostRate, costMet)\n resultingSubtrees = len(subTreeSetList)\n #print(\"DEBUG: resulting numTrees = %d (cost %g) w/maxCost = %g [%s,%s] & rate = %g [%g,%g]\" % \\\n # (resultingSubtrees, totalCost, maxCost, str(maxCostLowerBound), str(maxCostUpperBound),\n # maxCostRate, rateLowerBound, rateUpperBound))\n\n #DEBUG\n #totalSet = set()\n #for s in subTreeSetList:\n # totalSet.update(s)\n #print(\"DB: total set length = \",len(totalSet))\n #assert(len(totalSet) == len(self))\n\n #Perform binary search in maxCost then maxCostRate to find\n # desired final subtree count.\n if maxCostUpperBound is None or abs(maxCostLowerBound - maxCostUpperBound) > 1.0:\n # coarse adjust => vary maxCost\n last_maxCost = maxCost\n if resultingSubtrees <= numSubTrees: # too few trees: reduce maxCost\n maxCost = (maxCost + maxCostLowerBound) / 2.0\n maxCostUpperBound = last_maxCost\n else: # too many trees: raise maxCost\n if maxCostUpperBound is None:\n maxCost = totalCost # / numSubTrees\n else:\n maxCost = (maxCost + maxCostUpperBound) / 2.0\n maxCostLowerBound = last_maxCost\n else:\n # fine adjust => vary maxCostRate\n last_maxRate = maxCostRate\n if resultingSubtrees <= numSubTrees: # too few trees reduce maxCostRate\n maxCostRate = (maxCostRate + rateLowerBound) / 2.0\n rateUpperBound = last_maxRate\n else: # too many trees: increase maxCostRate\n maxCostRate = (maxCostRate + rateUpperBound) / 2.0\n rateLowerBound = last_maxRate\n\n iteration += 1\n assert(iteration < 100), \"Unsuccessful splitting for 100 iterations!\"\n\n else: # maxSubTreeSize is not None\n subTreeSetList, totalCost = create_subtrees(\n maxSubTreeSize, maxCostRate=0, costMetric=\"size\")\n\n ##########################################################\n # Part II: create subtrees from index sets\n ##########################################################\n # (common logic provided by base class up to providing a few helper fns)\n\n def permute_parent_element(perm, el):\n \"\"\"Applies a permutation to an element of the tree \"\"\"\n # perm[oldIndex] = newIndex\n #return (perm[el[0]] if (el[0] is not None) else None, el[1], el[2])\n return (el[0], el[1], el[2]) # no need to permute the cache element ([0])\n\n def create_subtree(parentIndices, numFinal, fullEvalOrder, sliceIntoParentsFinalArray, parentTree):\n \"\"\"\n Creates a subtree given requisite information:\n\n Parameters\n ----------\n parentIndices : list\n The ordered list of (parent-tree) indices to be included in\n the created subtree.\n\n numFinal : int\n The number of \"final\" elements, i.e. those that are used to\n construct the final array of results and not just an intermediate.\n The first numFinal elemements of parentIndices are \"final\", and\n 'sliceIntoParentsFinalArray' tells you which final indices of\n the parent they map to.\n\n fullEvalOrder : list\n A list of the integers between 0 and len(parentIndices)-1 which\n gives the evaluation order of the subtree *including* evaluation\n of any initial elements.\n\n sliceIntoParentsFinalArray : slice\n Described above - map between to-be-created subtree's final\n elements and parent-tree indices.\n\n parentTree : EvalTree\n The parent tree itself.\n \"\"\"\n #t0 = _time.time() #REMOVE\n subTree = MapEvalTree()\n subTree.myFinalToParentFinalMap = sliceIntoParentsFinalArray\n subTree.num_final_strs = numFinal\n subTree[:] = [None] * len(parentIndices)\n\n curCacheSize = 0\n subTreeCacheIndices = {}\n\n for ik in fullEvalOrder: # includes any initial indices\n k = parentIndices[ik] # original tree index\n\n oStart, remainder, oCache = self[k] # original tree data\n\n if oCache is not None: # this element was in parent's cache,\n subTreeCacheIndices[oCache] = curCacheSize # maps parent's cache indices to subtree's\n iCache = curCacheSize\n curCacheSize += 1\n else:\n iCache = None\n\n iStart = None if (oStart is None) else \\\n subTreeCacheIndices[oStart]\n subTree.eval_order.append(ik)\n\n assert(subTree[ik] is None)\n subTree[ik] = (iStart, remainder, iCache)\n\n #t1 = _time.time() #REMOVE\n subTree.cachesize = curCacheSize\n subTree.parentIndexMap = parentIndices # parent index of each subtree index\n subTree.simplified_circuit_spamTuples = [self.simplified_circuit_spamTuples[k]\n for k in _slct.indices(subTree.myFinalToParentFinalMap)]\n #subTree._compute_finalStringToEls() #depends on simplified_circuit_spamTuples\n\n #t2 = _time.time() #REMOVE\n final_el_startstops = []; i = 0\n for spamTuples in parentTree.simplified_circuit_spamTuples:\n final_el_startstops.append((i, i + len(spamTuples)))\n i += len(spamTuples)\n #t3 = _time.time() #REMOVE\n if len(_slct.indices(subTree.myFinalToParentFinalMap)) > 0:\n subTree.myFinalElsToParentFinalElsMap = _np.concatenate(\n [_np.arange(*final_el_startstops[k])\n for k in _slct.indices(subTree.myFinalToParentFinalMap)])\n #Note: myFinalToParentFinalMap maps only between *final* elements\n # (which are what is held in simplified_circuit_spamTuples)\n else: # no final elements (a \"dummy\" tree, useful just to keep extra procs busy)\n subTree.myFinalElsToParentFinalElsMap = _np.arange(0, 0) # empty array\n\n #t4 = _time.time() #REMOVE\n subTree.num_final_els = sum([len(v) for v in subTree.simplified_circuit_spamTuples])\n #t5 = _time.time() #REMOVE\n subTree.recompute_spamtuple_indices(bLocal=False)\n #t6 = _time.time() #REMOVE\n\n subTree.trim_nonfinal_els()\n #t7 = _time.time() #REMOVE\n subTree.opLabels = self._get_opLabels(subTree.generate_circuit_list(permute=False))\n #t8 = _time.time() #REMOVE\n # print(\"DB: create_subtree timing: \"\n # \"t1=%.3fs, t2=%.3fs, t3=%.3fs, t4=%.3fs, t5=%.3fs, t6=%.3fs, t7=%.3fs, t8=%.3fs\"\n # % (t1-t0,t2-t1,t3-t2,t4-t3,t5-t4,t6-t5,t7-t6,t8-t7))\n\n return subTree\n\n #printer.log(\"EvalTree.split PT2 %.1fs\" %\n # (_time.time()-tm)); tm = _time.time() #REMOVE\n\n updated_elIndices = self._finish_split(elIndicesDict, subTreeSetList,\n permute_parent_element, create_subtree,\n all_final=bool(self.cache_size() == 0))\n #printer.log(\"EvalTree.split PT3 %.1fs\" %\n # (_time.time()-tm)); tm = _time.time() #REMOVE\n\n printer.log(\"EvalTree.split done second pass in %.0fs\" %\n (_time.time() - tm)); tm = _time.time()\n return updated_elIndices",
"def inner_size_from_label_size(self, label_size: int) -> int:\n return 4 + math.ceil((label_size - 2) / (2 ** self.n_folds))",
"def load_state_dict(model, src_state_dict, fold_bnt=True):\n from torch.nn import Parameter\n\n dest_state_dict = model.state_dict()\n for name, param in src_state_dict.items():\n if name not in dest_state_dict:\n continue\n if isinstance(param, Parameter):\n # backwards compatibility for serialized parameters\n param = param.data\n try:\n dest_state_dict[name].copy_(param)\n except (Exception, msg):\n print(\"Warning: Error occurs when copying '{}': {}\".format(name, str(msg)))\n\n # New version of BN has buffer `num_batches_tracked`, which is not used\n # for normal BN, so we fold all these missing keys into one line\n def _fold_nbt(keys):\n nbt_keys = [s for s in keys if s.endswith('.num_batches_tracked')]\n if len(nbt_keys) > 0:\n keys = [s for s in keys if not s.endswith('.num_batches_tracked')] + ['num_batches_tracked x{}'.format(len(nbt_keys))]\n return keys",
"def split(self, elIndicesDict, maxSubTreeSize=None, numSubTrees=None, verbosity=0):\n #dbList = self.generate_circuit_list()\n tm = _time.time()\n printer = _VerbosityPrinter.build_printer(verbosity)\n\n if (maxSubTreeSize is None and numSubTrees is None) or \\\n (maxSubTreeSize is not None and numSubTrees is not None):\n raise ValueError(\"Specify *either* maxSubTreeSize or numSubTrees\")\n if numSubTrees is not None and numSubTrees <= 0:\n raise ValueError(\"EvalTree split() error: numSubTrees must be > 0!\")\n\n #Don't split at all if it's unnecessary\n if maxSubTreeSize is None or len(self) < maxSubTreeSize:\n if numSubTrees is None or numSubTrees == 1: return elIndicesDict\n\n self.subTrees = []\n evalOrder = self.get_evaluation_order()\n printer.log(\"EvalTree.split done initial prep in %.0fs\" %\n (_time.time() - tm)); tm = _time.time()\n\n def create_subtrees(maxCost, maxCostRate=0, costMetric=\"size\"):\n \"\"\"\n Find a set of subtrees by iterating through the tree\n and placing \"break\" points when the cost of evaluating the\n subtree exceeds some 'maxCost'. This ensure ~ equal cost\n trees, but doesn't ensure any particular number of them.\n\n maxCostRate can be set to implement a varying maxCost\n over the course of the iteration.\n \"\"\"\n\n if costMetric == \"applys\":\n def cost_fn(rem): return len(rem) # length of remainder = #-apply ops needed\n elif costMetric == \"size\":\n def cost_fn(rem): return 1 # everything costs 1 in size of tree\n else: raise ValueError(\"Uknown cost metric: %s\" % costMetric)\n\n subTrees = []\n curSubTree = set([evalOrder[0]])\n curTreeCost = cost_fn(self[evalOrder[0]][1]) # remainder length of 0th evaluant\n totalCost = 0\n cacheIndices = [None] * self.cache_size()\n\n for k in evalOrder:\n iStart, remainder, iCache = self[k]\n\n if iCache is not None:\n cacheIndices[iCache] = k\n\n #compute the cost (additional #applies) which results from\n # adding this element to the current tree.\n cost = cost_fn(remainder)\n inds = set([k])\n\n if iStart is not None and cacheIndices[iStart] not in curSubTree:\n #we need to add the tree elements traversed by\n #following iStart\n j = iStart # index into cache\n while j is not None:\n iStr = cacheIndices[j] # cacheIndices[ iStart ]\n inds.add(iStr)\n cost += cost_fn(self[iStr][1]) # remainder\n j = self[iStr][0] # iStart\n\n if curTreeCost + cost < maxCost:\n #Just add current string to current tree\n curTreeCost += cost\n curSubTree.update(inds)\n else:\n #End the current tree and begin a new one\n #print(\"cost %d+%d exceeds %d\" % (curTreeCost,cost,maxCost))\n subTrees.append(curSubTree)\n curSubTree = set([k])\n\n cost = cost_fn(remainder); j = iStart\n while j is not None: # always traverse back iStart\n iStr = cacheIndices[j]\n curSubTree.add(iStr)\n cost += cost_fn(self[iStr][1]) # remainder\n j = self[iStr][0] # iStart\n totalCost += curTreeCost\n curTreeCost = cost\n #print(\"Added new tree w/initial cost %d\" % (cost))\n\n maxCost += maxCostRate\n\n subTrees.append(curSubTree)\n totalCost += curTreeCost\n return subTrees, totalCost\n\n ##################################################################\n # Part I: find a list of where the current tree should be broken #\n ##################################################################\n\n subTreeSetList = []\n if numSubTrees is not None:\n\n subTreeSize = len(self) // numSubTrees\n for i in range(numSubTrees):\n end = (i + 1) * subTreeSize if (i < numSubTrees - 1) else len(self)\n subTreeSetList.append(set(range(i * subTreeSize, end)))\n\n else: # maxSubTreeSize is not None\n k = 0\n while k < len(self):\n end = min(k + maxSubTreeSize, len(self))\n subTreeSetList.append(set(range(k, end)))\n k = end\n\n ##########################################################\n # Part II: create subtrees from index sets\n ##########################################################\n # (common logic provided by base class up to providing a few helper fns)\n\n def permute_parent_element(perm, el):\n \"\"\"Applies a permutation to an element of the tree \"\"\"\n # perm[oldIndex] = newIndex\n return el # no need to permute operation sequence\n\n def create_subtree(parentIndices, numFinal, fullEvalOrder, sliceIntoParentsFinalArray, parentTree):\n \"\"\"\n Creates a subtree given requisite information:\n\n Parameters\n ----------\n parentIndices : list\n The ordered list of (parent-tree) indices to be included in\n the created subtree.\n\n numFinal : int\n The number of \"final\" elements, i.e. those that are used to\n construct the final array of results and not just an intermediate.\n The first numFinal elemements of parentIndices are \"final\", and\n 'sliceIntoParentsFinalArray' tells you which final indices of\n the parent they map to.\n\n fullEvalOrder : list\n A list of the integers between 0 and len(parentIndices)-1 which\n gives the evaluation order of the subtree *including* evaluation\n of any initial elements.\n\n sliceIntoParentsFinalArray : slice\n Described above - map between to-be-created subtree's final\n elements and parent-tree indices.\n\n parentTree : EvalTree\n The parent tree itself.\n \"\"\"\n subTree = TermEvalTree()\n subTree.myFinalToParentFinalMap = sliceIntoParentsFinalArray\n subTree.num_final_strs = numFinal\n subTree[:] = [None] * len(parentIndices)\n subTree.p_polys = {}\n subTree.dp_polys = {}\n subTree.hp_polys = {}\n subTree.repcache = {}\n\n for ik in fullEvalOrder: # includes any initial indices\n k = parentIndices[ik] # original tree index\n circuit = self[k] # original tree data\n subTree.eval_order.append(ik)\n assert(subTree[ik] is None)\n subTree[ik] = circuit\n\n subTree.parentIndexMap = parentIndices # parent index of each subtree index\n subTree.simplified_circuit_spamTuples = [self.simplified_circuit_spamTuples[kk]\n for kk in _slct.indices(subTree.myFinalToParentFinalMap)]\n #subTree._compute_finalStringToEls() #depends on simplified_circuit_spamTuples\n\n final_el_startstops = []; i = 0\n for spamTuples in parentTree.simplified_circuit_spamTuples:\n final_el_startstops.append((i, i + len(spamTuples)))\n i += len(spamTuples)\n subTree.myFinalElsToParentFinalElsMap = _np.concatenate(\n [_np.arange(*final_el_startstops[kk])\n for kk in _slct.indices(subTree.myFinalToParentFinalMap)])\n #Note: myFinalToParentFinalMap maps only between *final* elements\n # (which are what is held in simplified_circuit_spamTuples)\n\n subTree.num_final_els = sum([len(v) for v in subTree.simplified_circuit_spamTuples])\n subTree.recompute_spamtuple_indices(bLocal=False)\n\n subTree.opLabels = self._get_opLabels(subTree.generate_circuit_list(permute=False))\n\n return subTree\n\n updated_elIndices = self._finish_split(elIndicesDict, subTreeSetList,\n permute_parent_element, create_subtree)\n printer.log(\"EvalTree.split done second pass in %.0fs\" %\n (_time.time() - tm)); tm = _time.time()\n return updated_elIndices",
"def kb_train_test_split(test_size, random_state):\n\n cnxn_path = \"/polyaxon-data/goldenretriever/db_cnxn_str.txt\"\n conn = pyodbc.connect(open(cnxn_path, 'r').read())\n\n SQL_Query = pd.read_sql_query('''SELECT dbo.query_labels.id, dbo.query_db.query_string, \\\n dbo.kb_clauses.processed_string, dbo.kb_raw.kb_name, dbo.kb_raw.type FROM dbo.query_labels \\\n JOIN dbo.query_db ON dbo.query_labels.query_id = dbo.query_db.id \\\n JOIN dbo.kb_clauses ON dbo.query_labels.clause_id = dbo.kb_clauses.id \\\n JOIN dbo.kb_raw ON dbo.kb_clauses.raw_id = dbo.kb_raw.id''', conn)\n\n df = pd.DataFrame(SQL_Query).set_index('id')\n kb_names = df['kb_name'].unique()\n\n train_dict = dict()\n test_dict = dict()\n\n train_idx_all = []\n test_idx_all = []\n\n for kb_name in kb_names:\n kb_id = df[df['kb_name'] == kb_name].index.values\n train_idx, test_idx = train_test_split(kb_id, test_size=test_size,\n random_state=random_state)\n \n train_dict[kb_name] = train_idx\n test_dict[kb_name] = test_idx\n \n for k,v in train_dict.items():\n for idx in v:\n train_idx_all.append(idx)\n \n for k,v in test_dict.items():\n for idx in v:\n test_idx_all.append(idx)\n \n return df, train_dict, test_dict, train_idx_all, test_idx_all",
"def _resize_state(self):\n # self.n_estimators is the number of additional est to fit\n total_n_estimators = self.n_estimators\n if total_n_estimators < self.estimators_.shape[0]:\n raise ValueError('resize with smaller n_estimators %d < %d' %\n (total_n_estimators, self.estimators_[0]))\n\n self.estimators_.resize((total_n_estimators, self.loss_.K))\n self.train_score_.resize(total_n_estimators)\n if (self.subsample < 1 or hasattr(self, 'oob_improvement_')):\n # if do oob resize arrays or create new if not available\n if hasattr(self, 'oob_improvement_'):\n self.oob_improvement_.resize(total_n_estimators)\n else:\n self.oob_improvement_ = np.zeros((total_n_estimators,),\n dtype=np.float64)",
"def rechunking_plan(\n dim_sizes: Mapping[str, int],\n source_chunks: Mapping[str, int],\n target_chunks: Mapping[str, int],\n itemsize: int,\n max_mem: int,\n) -> List[Dict[str, int]]:\n plan_shapes = algorithm.rechunking_plan(\n shape=tuple(dim_sizes.values()),\n source_chunks=tuple(source_chunks[dim] for dim in dim_sizes),\n target_chunks=tuple(target_chunks[dim] for dim in dim_sizes),\n itemsize=itemsize,\n max_mem=max_mem,\n )\n return [dict(zip(dim_sizes.keys(), shapes)) for shapes in plan_shapes]",
"def test_partition_on_large_vertex_than_has_to_be_split(self):\n self.setup()\n large_vertex = TestVertex(300, \"Large vertex\")\n self.graph = ApplicationGraph(\n \"Graph with large vertex\", [large_vertex], [])\n graph, mapper = self.bp.partition(self.graph, self.machine)\n self.assertEqual(large_vertex._model_based_max_atoms_per_core, 256)\n self.assertGreater(len(graph.vertices), 1)",
"def _calculate_step_sizes(x_size, y_size, num_chunks):\n # First we try to split only along fast x axis\n xstep = max(1, int(x_size / num_chunks))\n\n # More chunks are needed only if xstep gives us fewer chunks than\n # requested.\n x_chunks = int(x_size / xstep)\n\n if x_chunks >= num_chunks:\n ystep = y_size\n else:\n # The x and y loops are nested, so the number of chunks\n # is multiplicative, not additive. Calculate the number\n # of y chunks we need to get at num_chunks.\n y_chunks = int(num_chunks / x_chunks) + 1\n ystep = max(1, int(y_size / y_chunks))\n\n return xstep, ystep",
"def init_states(self, batch_size: int) -> NestedMap:\n raise NotImplementedError('Abstract method')",
"def state_size(self):\n return [tf.TensorShape([self.dmodel]),tf.TensorShape([self.dmodel]),tf.TensorShape([self.dmodel])]",
"def __init__(self,\n total_kv_pooling,\n n_heads=1,\n dropout=0.0,\n n_raw_tokens_generated=1,\n max_inference_length=3072,\n chunk_len=None,\n chunk_offset=None,\n mode='train'):\n super().__init__(n_in=7, n_out=2)\n self._total_kv_pooling = total_kv_pooling\n self._n_heads = n_heads\n self._dropout = dropout\n self._n_raw_tokens_generated = n_raw_tokens_generated\n self._max_len = max_inference_length\n self._chunk_len = chunk_len\n self._chunk_offset = chunk_offset\n self._mode = mode",
"def get_optimal_submesh_assignments(\n best_n_stages, F_argmin, n_devices, n_ops, submesh_sizes\n):\n current_s = best_n_stages\n current_layer = 0\n current_devices = n_devices\n\n optimal_layer_submesh_assignments = []\n while current_s > 0 and current_layer < n_ops and current_devices > 0:\n next_start_layer, submesh_shape_idx, sharding_config_idx = F_argmin[\n current_s, current_layer, current_devices\n ]\n assert next_start_layer != -1 and current_devices != -1\n optimal_layer_submesh_assignments.append(\n ((current_layer, next_start_layer), submesh_shape_idx, sharding_config_idx)\n )\n current_s -= 1\n current_layer = next_start_layer\n current_devices -= submesh_sizes[submesh_shape_idx]\n\n assert current_s == 0 and current_layer == n_ops and current_devices == 0\n\n return optimal_layer_submesh_assignments",
"def split_train_eval(\n self,\n eval_size: Union[int, float] = 0.25,\n random_state: Optional[int] = None,\n ) -> None:\n (\n self.X_tr,\n self.X_ev,\n self.y_tr,\n self.y_ev,\n _,\n self.y_full_ev,\n ) = train_test_split(\n self.X, self.y, self.y_full, test_size=eval_size, random_state=random_state\n )\n self.n_rounds_ev = self.X_ev.shape[0]",
"def test_partition_on_very_large_vertex_than_has_to_be_split(self):\n self.setup()\n large_vertex = TestVertex(500, \"Large vertex\")\n self.assertEqual(large_vertex._model_based_max_atoms_per_core, 256)\n self.graph = ApplicationGraph(\n \"Graph with large vertex\", [large_vertex], [])\n graph, mapper = self.bp.partition(self.graph, self.machine)\n self.assertEqual(large_vertex._model_based_max_atoms_per_core, 256)\n self.assertGreater(len(graph.vertices), 1)",
"def _get_state_sizes(self):\n an_iname = self.node_list[0]\n an_inode = self.builder.nodes[an_iname]\n \n islot = 0\n parent_oslot_pair = 0\n oslot = 0\n an_ishape = an_inode.oshapes[self.node_list_tuples[islot][parent_oslot_pair][oslot]]\n \n return [[an_ishape[-1]]]",
"def ExpectedMaxBatchSizes(self, run_params):\n return self.max_batch_sizes",
"def _assert_like_multi_cell_state(x, layer_sizes, cell_type):\n if cell_type == 'Basic' or cell_type == 'GRU': # Basic and GRU states have same shape\n try:\n shapes = [_get_shape(layer) for layer in x]\n except:\n raise ValueError('State did not have expected form for Basic or GRU rnn state. Got:\\n{}'.format(x))\n batch_size = shapes[0][0]\n for (i, s) in enumerate(shapes):\n if s[0] != batch_size:\n raise ValueError('Inconsistent batch sizes. Expected {} based on 0th layer, but found {} in layer {}.'\n .format(batch_size, s[0], i))\n if s[1] != layer_sizes[i]:\n raise ValueError('State size at layer {} was {}, but layer size is {}.'.format(i, s[1], layer_sizes[i]))\n return\n elif cell_type == 'LSTM':\n try:\n shapes = [[_get_shape(xx) for xx in layer] for layer in x]\n except:\n raise ValueError('State did not have expected form for LSTM state. Got:\\n{}'.format(x))\n batch_size = shapes[0][0][0]\n for (i, s) in enumerate(shapes):\n if s[0][0] != batch_size:\n raise ValueError('Inconsistent batch sizes. Expected {} based on 0th layer, but found {} in c in layer {}.'\n .format(batch_size, s[0][0], i))\n if s[1][0] != batch_size:\n raise ValueError('Inconsistent batch sizes. Expected {} based on 0th layer, but found {} in h in layer {}.'\n .format(batch_size, s[1][0], i))\n if s[0][1] != layer_sizes[i]:\n raise ValueError('State size in c at layer {} was {}, but layer size is {}.'.format(i, s[0][1], layer_sizes[i]))\n if s[1][1] != layer_sizes[i]:\n raise ValueError('State size in h at layer {} was {}, but layer size is {}.'.format(i, s[1][1], layer_sizes[i]))\n return\n else:\n raise ValueError('Allowed cell types are \"Basic\", \"LSTM\" and \"GRU\". Got {}.'.format(x))",
"def pipeline_splitting_rule(val_size = 0.2, test_size = 0.2, random_state = 13):\n custom_val_size,custom_size,custom_random_state = val_size, test_size, random_state\n return(custom_val_size,custom_size,custom_random_state)",
"def dfs_maximizing(state) :\n #print state.describe_previous_move()\n global state_evals, path, _path, _score, level, _state;\n\n level+=1\n path.append(state)\n for stt in state.generate_next_states():\n score=0\n agenda.append((stt, level))\n \n if stt.is_game_over():\n state_evals+=1\n score=stt.get_endgame_score()\n if score>_score:\n _score=score\n _path = path[0:]\n _state = stt\n if not agenda:\n\n _path.append(_state)\n return [_path, _score, state_evals];\n else:\n new_state, level=agenda.pop()\n path=path[0:level]\n level-=1\n return dfs_maximizing(new_state)",
"def state_size(self):\n\t\treturn (\n\t\t\ttf.TensorShape([self.args[\"kb_node_max_len\"], self.args[\"mp_state_width\"]]),\n\t\t)",
"def _get_state_sizes(self):\n ds = self.builder.nodes[self.ds_inputs[0]]\n return [[ds.xdim]]"
] |
[
"0.5447984",
"0.5420202",
"0.5262327",
"0.5197391",
"0.518604",
"0.5176101",
"0.5175833",
"0.5174734",
"0.5156535",
"0.5135529",
"0.51065505",
"0.5106479",
"0.5078083",
"0.5054104",
"0.50533545",
"0.5044372",
"0.50167805",
"0.4993698",
"0.49864617",
"0.49733406",
"0.4969168",
"0.49465075",
"0.49429157",
"0.49384713",
"0.4917634",
"0.49135038",
"0.49053577",
"0.4902489",
"0.48704135",
"0.4863747"
] |
0.6677971
|
0
|
r""" Cast the floatingpoint `params` to `jax.numpy.bfloat16`. This returns a new `params` tree and does not cast the `params` in place. This method can be used on TPU to explicitly convert the model parameters to bfloat16 precision to do full halfprecision training or to save weights in bfloat16 for inference in order to save memory and improve speed.
|
def to_bf16(self, params: Union[Dict, FrozenDict], mask: Any = None):
return self._cast_floating_to(params, jnp.bfloat16, mask)
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def to_fp16(self, params: Union[Dict, FrozenDict], mask: Any = None):\n return self._cast_floating_to(params, jnp.float16, mask)",
"def cast_parameters_to_bf16(place, program, scope=None, to_bf16_var_names=None):\n all_parameters = []\n for block in program.blocks:\n all_parameters.extend(block.all_parameters())\n\n bf16_var_names = to_bf16_var_names if to_bf16_var_names else set()\n var_scope = scope if scope else global_scope()\n for param in all_parameters:\n if param.name in bf16_var_names:\n _logger.debug(f\"---- cast {param.name} to bf16 dtype ----\")\n param_t = var_scope.find_var(param.name).get_tensor()\n data = np.array(param_t)\n param_t.set(convert_float_to_uint16(data), place)",
"def _update_use_bfloat16(configs, use_bfloat16):\n configs[\"train_config\"].use_bfloat16 = use_bfloat16",
"def cast_model_to_bf16(\n program, startup_prog=None, amp_lists=None, use_bf16_guard=True\n):\n\n if amp_lists is None:\n amp_lists = AutoMixedPrecisionListsBF16()\n global_block = program.global_block()\n keep_fp32_ops = set()\n to_bf16_var_names = set()\n to_bf16_pre_cast_ops = set()\n origin_ops = []\n for block in program.blocks:\n origin_ops.extend(block.ops)\n\n for block in program.blocks:\n ops = block.ops\n for op in ops:\n if op.type == 'create_py_reader' or op.type == 'read':\n continue\n if _need_keep_fp32(op, amp_lists.unsupported_list, use_bf16_guard):\n keep_fp32_ops.add(op)\n continue # processed below\n for in_name in op.input_names:\n if op.type in {\n 'batch_norm',\n 'fused_bn_add_activation',\n 'layer_norm',\n } and in_name not in {'X', 'Z'}:\n continue\n for in_var_name in op.input(in_name):\n in_var = None\n try:\n in_var = block.var(in_var_name)\n except ValueError as e:\n _logger.debug(\n \"-- {}, try to get it in the global block --\".format(\n e\n )\n )\n in_var = global_block.var(in_var_name)\n if in_var is not None:\n _logger.debug(\n \"-- var {} is got in the global block --\".format(\n in_var_name\n )\n )\n\n if in_var is None or in_var.type not in _valid_types:\n continue\n\n if in_var.dtype == core.VarDesc.VarType.FP32:\n in_var.desc.set_dtype(core.VarDesc.VarType.BF16)\n to_bf16_var_names.add(in_var_name)\n\n _logger.debug(\n \"-- op type: {}, in var name: {}, in var dtype: {} --\".format(\n op.type, in_var_name, in_var.dtype\n )\n )\n\n for out_name in op.output_names:\n if (\n op.type\n in {'batch_norm', 'fused_bn_add_activation', 'layer_norm'}\n and out_name != 'Y'\n ):\n continue\n for out_var_name in op.output(out_name):\n out_var = None\n try:\n out_var = block.var(out_var_name)\n except ValueError as e:\n _logger.debug(\n \"-- {}, try to get it in the global block --\".format(\n e\n )\n )\n out_var = global_block.var(out_var_name)\n if out_var is not None:\n _logger.debug(\n \"-- var {} is got in the global block --\".format(\n out_var_name\n )\n )\n\n if out_var is None or out_var.type not in _valid_types:\n continue\n\n if out_var.dtype == core.VarDesc.VarType.FP32:\n out_var.desc.set_dtype(core.VarDesc.VarType.BF16)\n\n _logger.debug(\n \"-- op type: {}, out var name: {}, out var dtype: {} --\".format(\n op.type, out_var_name, out_var.dtype\n )\n )\n for attr_name in ['in_dtype', 'out_dtype', 'dtype']:\n if (\n op.has_attr(attr_name)\n and op.attr(attr_name) == core.VarDesc.VarType.FP32\n ):\n op._set_attr(attr_name, core.VarDesc.VarType.BF16)\n if op.has_attr('use_mkldnn'):\n op._set_attr('use_mkldnn', True)\n if op.has_attr('mkldnn_data_type'):\n op._set_attr('mkldnn_data_type', 'bfloat16')\n\n if startup_prog is not None:\n cast_initializers_to_bf16(\n startup_prog,\n amp_lists,\n global_block,\n ops,\n keep_fp32_ops,\n to_bf16_var_names,\n )\n\n # process ops in keep_fp32_ops\n op_var_rename_map = [\n collections.OrderedDict() for _ in range(len(program.blocks))\n ]\n for block in program.blocks:\n ops = block.ops\n idx = 0\n while idx < len(ops):\n op = ops[idx]\n num_cast_ops = 0\n if op not in keep_fp32_ops:\n if op in to_bf16_pre_cast_ops:\n in_var_cast_num = _insert_cast_op(\n block,\n op,\n idx,\n core.VarDesc.VarType.FP32,\n core.VarDesc.VarType.BF16,\n )\n num_cast_ops += in_var_cast_num\n else:\n pre_cast_num = _insert_cast_op(\n block,\n op,\n idx,\n core.VarDesc.VarType.BF16,\n core.VarDesc.VarType.FP32,\n )\n num_cast_ops += pre_cast_num\n for out_var_name in op.output_arg_names:\n out_var = block.vars.get(out_var_name)\n if out_var is None or out_var.type not in _valid_types:\n continue\n if out_var.dtype == core.VarDesc.VarType.BF16:\n out_var.desc.set_dtype(core.VarDesc.VarType.FP32)\n post_ops = find_true_post_op(ops, op, out_var_name)\n for post_op in post_ops:\n if post_op in keep_fp32_ops:\n continue\n post_cast_num = _insert_cast_post_op(\n block,\n op,\n idx + pre_cast_num + 1,\n core.VarDesc.VarType.FP32,\n core.VarDesc.VarType.BF16,\n out_var_name,\n op_var_rename_map,\n )\n num_cast_ops += post_cast_num\n idx += num_cast_ops + 1\n\n _rename_op_input(program, op_var_rename_map, origin_ops, keep_fp32_ops)\n return to_bf16_var_names",
"def bfloat16_to_float32(tensor):\n if tensor.dtype == tf.bfloat16:\n return tf.cast(tensor, dtype=tf.float32)\n else:\n return tensor",
"def _convert_example(example, use_bfloat16):\n for key in list(example.keys()):\n val = example[key]\n if tf.keras.backend.is_sparse(val):\n val = tf.sparse.to_dense(val)\n if val.dtype == tf.int64:\n val = tf.cast(val, tf.int32)\n if use_bfloat16 and val.dtype == tf.float32:\n val = tf.cast(val, tf.bfloat16)\n\n example[key] = val",
"def _cast_floating_to(self, params: Union[Dict, FrozenDict], dtype: jnp.dtype, mask: Any = None) -> Any:\n\n # taken from https://github.com/deepmind/jmp/blob/3a8318abc3292be38582794dbf7b094e6583b192/jmp/_src/policy.py#L27\n def conditional_cast(param):\n if isinstance(param, jnp.ndarray) and jnp.issubdtype(param.dtype, jnp.floating):\n param = param.astype(dtype)\n return param\n\n if mask is None:\n return jax.tree_util.tree_map(conditional_cast, params)\n\n flat_params = flatten_dict(params)\n flat_mask, _ = jax.tree_util.tree_flatten(mask)\n\n for masked, key in zip(flat_mask, flat_params.keys()):\n if masked:\n param = flat_params[key]\n flat_params[key] = conditional_cast(param)\n\n return unflatten_dict(flat_params)",
"def to_fp32(self, params: Union[Dict, FrozenDict], mask: Any = None):\n return self._cast_floating_to(params, jnp.float32, mask)",
"def bfloat16_to_float32_nested(bfloat16_tensor_dict):\n float32_tensor_dict = {}\n for k, v in bfloat16_tensor_dict.items():\n if isinstance(v, tf.Tensor):\n float32_tensor_dict[k] = bfloat16_to_float32(v)\n elif isinstance(v, (list, tuple)):\n float32_tensor_dict[k] = [bfloat16_to_float32(t) for t in v]\n return float32_tensor_dict",
"def convert_example(example, use_bfloat16=False):\n for key in list(example.keys()):\n val = example[key]\n if tf.keras.backend.is_sparse(val):\n val = tf.sparse.to_dense(val)\n if val.dtype == tf.int64:\n val = tf.cast(val, tf.int32)\n if use_bfloat16 and val.dtype == tf.float32:\n val = tf.cast(val, tf.bfloat16)\n\n example[key] = val",
"def _shared_params_fp16(self):\n return BenchmarkBase._shared_params(self)._replace(\n model='resnet50_v1.5',\n batch_size=256,\n distortions=False,\n use_fp16=True,\n optimizer='momentum',\n loss_type_to_report='base_loss',\n compute_lr_on_cpu=True,\n single_l2_loss_op=True\n )",
"def get_data_type(params):\n return tf.float16 if params.use_fp16 else tf.float32",
"def _shared_params_fp16(self):\n return BenchmarkBase._shared_params(self)._replace(\n model='resnet50',\n batch_size=256,\n distortions=False,\n use_fp16=True,\n optimizer='momentum',\n loss_type_to_report='base_loss',\n compute_lr_on_cpu=True,\n single_l2_loss_op=True\n )",
"def convert_model(self, target_dtype=\"float16\", **kwargs):\n if self._model_format == 'pb':\n self._quantizer.quantize_model(target_dtype=target_dtype, **kwargs)\n return None\n elif self._model_format == 'subclass':\n logger.warning('This function does not support subclass model.')\n return None\n\n ## need fold BN firstly as BN not support float16\n available_type = [\"float32\", \"float16\", \"float64\", \"bfloat16\"]\n if target_dtype not in available_type:\n logger.error('Unknown target data type: {}, expecte one of supported ' \\\n 'data type {}.'.format(target_dtype, available_type))\n self.optimize_model(remove_dropout=False, include_cle=False)\n\n model_config = self._optimized_model.get_config()\n for l in model_config[\"layers\"]:\n l[\"config\"][\"dtype\"] = target_dtype\n converted_model = keras.Model.from_config(\n model_config, custom_objects=self.custom_objects)\n weights = self._optimized_model.get_weights()\n weights = [w.astype(target_dtype) for w in weights]\n converted_model.set_weights(weights)\n logger.info(\"Convert model data type to {}\".format(target_dtype))\n return converted_model",
"def build_model(\n self,\n cfg: Config,\n fp16: bool = False,\n **kwargs,\n ) -> torch.nn.Module:\n model_builder = getattr(self, \"model_builder\", build_segmentor)\n model = model_builder(cfg, **kwargs)\n if bool(fp16):\n wrap_fp16_model(model)\n return model",
"def save_float16_npy(data, path):\n np.save(path, data.astype(np.float16))",
"def _(_: FloatType, value: float) -> bytes:\n return _FLOAT_STRUCT.pack(value)",
"def convert_uint16_to_float_ifneed(self, actual_np, expect_np):\n if actual_np.dtype == np.uint16:\n if expect_np.dtype in [np.float32, np.float64]:\n actual_np = convert_uint16_to_float(actual_np)\n self.rtol = 1.0e-2\n elif actual_np.dtype == np.float16:\n self.rtol = 1.0e-3\n else:\n self.rtol = 1.0e-5\n if (\n expect_np.dtype == np.uint16\n and actual_np.dtype == np.uint16\n ):\n nonlocal atol\n expect_np = convert_uint16_to_float(expect_np)\n actual_np = convert_uint16_to_float(actual_np)\n atol = max(atol, 0.03)\n return actual_np, expect_np",
"def rewrite_program_bf16(main_prog, amp_lists=None):\n if amp_lists is None:\n amp_lists = AutoMixedPrecisionListsBF16()\n block = main_prog.global_block()\n ops = block.ops\n bf16_op_set = set()\n fp32_op_set = set()\n for op in ops:\n # NOTE(zhiqiu): 'create_py_reader' and 'read' is used in non-iterable DataLoder,\n # we don't need to handle reader op and the input of 'create_py_reader' is not\n # in block, which may result in errors.\n # See GeneratorLoader._init_non_iterable() for details.\n if op.type == 'create_py_reader' or op.type == 'read':\n continue\n\n if amp_lists.fp32_varnames is not None and _is_in_fp32_varnames(\n op, amp_lists\n ):\n fp32_op_set.add(op)\n continue\n\n if op.type in amp_lists.fp32_list:\n fp32_op_set.add(op)\n elif op.type in amp_lists.bf16_list:\n bf16_op_set.add(op)\n elif op.type in amp_lists.gray_list:\n is_fp32_op = False\n is_bf16_op = False\n for in_name in op.input_names:\n # if this op has inputs\n if in_name:\n for in_var_name in op.input(in_name):\n in_var = block.var(in_var_name)\n # this in_var isn't the output of other op\n if in_var.op is None:\n continue\n elif in_var.op is op:\n prev_op = find_true_prev_op(ops, op, in_var_name)\n if prev_op is None:\n continue\n else:\n prev_op = in_var.op\n # if it's one of inputs\n if (\n prev_op in fp32_op_set\n or prev_op.type in amp_lists.fp32_list\n ):\n is_fp32_op = True\n elif (\n prev_op in bf16_op_set\n or prev_op.type in amp_lists.bf16_list\n ):\n is_bf16_op = True\n if is_fp32_op:\n fp32_op_set.add(op)\n elif is_bf16_op:\n bf16_op_set.add(op)\n else:\n pass\n else:\n # For numerical safe, we apply fp32 computation on ops that\n # are not determined which list they should stay.\n fp32_op_set.add(op)\n\n idx = 0\n while idx < len(ops):\n op = ops[idx]\n num_cast_ops = 0\n if op in fp32_op_set:\n num_cast_ops = _insert_cast_op(\n block,\n op,\n idx,\n core.VarDesc.VarType.BF16,\n core.VarDesc.VarType.FP32,\n )\n elif op in bf16_op_set:\n if op.has_attr('use_mkldnn'):\n op._set_attr('use_mkldnn', True)\n op._set_attr('mkldnn_data_type', 'bfloat16')\n elif (\n op.has_attr('dtype')\n and op.attr('dtype') == core.VarDesc.VarType.FP32\n ):\n op._set_attr('dtype', core.VarDesc.VarType.BF16)\n\n num_cast_ops = _insert_cast_op(\n block,\n op,\n idx,\n core.VarDesc.VarType.FP32,\n core.VarDesc.VarType.BF16,\n )\n else:\n pass\n\n idx += num_cast_ops + 1",
"def convertToFloat(boolean: bool) -> float:\n ...",
"def dtype_float(dtype: DType):\n return promote_dtypes(dtype, np.float16)",
"def _cast_grad_to_param_dtype(\n self,\n grad: torch.Tensor,\n param: FlatParameter,\n ):\n self._assert_state(TrainingState_.BACKWARD_POST)\n if (\n not self._low_precision_hook_enabled()\n and (\n self._mixed_precision_enabled_for_params()\n or self._mixed_precision_enabled_for_reduce()\n )\n ):\n low_prec_grad_data = grad.data\n grad.data = grad.data.to(dtype=param.dtype)\n # Do not let the low precision gradient memory get reused until\n # the cast to full parameter precision completes\n low_prec_grad_data.record_stream(torch.cuda.current_stream())",
"def set_params(self, params):\n self._W = np.reshape(params[0:self._W.size], self._W.shape)\n end = self._W.size + self._b.size\n self._b = np.reshape(params[self._W.size:end], self._b.shape)",
"def _shorts2float(lo_byte_pair, hi_byte_pair):\n\tba = bytearray(struct.pack(\"HH\", lo_byte_pair, hi_byte_pair))\n\t[f] = struct.unpack('f', ba)\n\treturn f",
"def get_b16_config():\n config = get_base_config()\n config.update(dict(patches=(16, 16)))\n return config",
"def tuning_cfg_to_fw(self, tuning_cfg):\n self.quantize_config['calib_iteration'] = tuning_cfg['calib_iteration']\n self.quantize_config['device'] = self.device\n fp32_ops = []\n bf16_ops = []\n for each_op_info in tuning_cfg['op']:\n op_name = each_op_info[0]\n\n if tuning_cfg['op'][each_op_info]['activation']['dtype'] in ['fp32', 'bf16']:\n if op_name in self.quantize_config['op_wise_config']:\n self.quantize_config['op_wise_config'].pop(op_name)\n if tuning_cfg['op'][each_op_info]['activation']['dtype'] == 'fp32':\n fp32_ops.append(op_name)\n if tuning_cfg['op'][each_op_info]['activation']['dtype'] == 'bf16':\n bf16_ops.append(op_name)\n continue\n\n is_perchannel = False\n if 'weight' in tuning_cfg['op'][each_op_info]:\n is_perchannel = tuning_cfg['op'][each_op_info]['weight'][\n 'granularity'] == 'per_channel'\n algorithm = tuning_cfg['op'][each_op_info]['activation']['algorithm']\n\n is_asymmetric = False\n if 'activation' in tuning_cfg['op'][each_op_info]:\n is_asymmetric = tuning_cfg['op'][each_op_info]['activation']['scheme'] == 'asym'\n self.quantize_config['op_wise_config'][op_name] = (is_perchannel,\n algorithm,\n is_asymmetric)\n self.fp32_ops = fp32_ops\n self.bf16_ops = bf16_ops\n int8_sum_count = 0\n bf16_sum_count = 0\n log_length = 50\n print('|', 'Mixed Precision Statistics'.center(log_length, \"*\"), \"|\")\n for i in self._init_op_stat:\n if len(self._init_op_stat[i]) == 0:\n continue\n count = 0\n for j in self.quantize_config['op_wise_config'].keys():\n if j in self._init_op_stat[i]:\n count += 1\n int8_sum_count += count\n print('|', 'INT8 {}: {} '.format(i, count).ljust(log_length), \"|\")\n bf16_count = 0\n for k in self.bf16_ops:\n if k in self._init_op_stat[i]:\n bf16_count += 1\n if bf16_count > 0:\n print('|', 'BF16 {}: {}'.format(i, bf16_count).ljust(log_length), \"|\")\n bf16_sum_count += bf16_count\n overall_ops_count = sum([len(v) for _, v in self._init_op_stat.items()])\n if overall_ops_count > 0:\n int8_percent = float(int8_sum_count / overall_ops_count)\n bf16_percent = float(bf16_sum_count / overall_ops_count)\n print('|', 'Overall: INT8 {:.2%} ({}/{}) BF16 {:.2%} ({}/{})'.format(int8_percent,\n int8_sum_count,\n overall_ops_count,\n bf16_percent,\n bf16_sum_count,\n overall_ops_count)\n .ljust(log_length),\n \"|\")\n print('|', '*' * log_length, \"|\")",
"def auto_fp16(apply_to=None, out_fp32=False):\n warnings.warn('auto_fp16 in mmpose will be deprecated in the next release.Please use mmcv.runner.auto_fp16 instead (mmcv>=1.3.1).', DeprecationWarning)\n\n def auto_fp16_wrapper(old_func):\n\n @functools.wraps(old_func)\n def new_func(*args, **kwargs):\n if not isinstance(args[0], torch.nn.Module):\n raise TypeError('@auto_fp16 can only be used to decorate the method of nn.Module')\n if not (hasattr(args[0], 'fp16_enabled') and args[0].fp16_enabled):\n return old_func(*args, **kwargs)\n args_info = getfullargspec(old_func)\n args_to_cast = args_info.args if apply_to is None else apply_to\n new_args = []\n if args:\n arg_names = args_info.args[:len(args)]\n for i, arg_name in enumerate(arg_names):\n if arg_name in args_to_cast:\n new_args.append(cast_tensor_type(args[i], torch.float, torch.half))\n else:\n new_args.append(args[i])\n new_kwargs = {}\n if kwargs:\n for arg_name, arg_value in kwargs.items():\n if arg_name in args_to_cast:\n new_kwargs[arg_name] = cast_tensor_type(arg_value, torch.float, torch.half)\n else:\n new_kwargs[arg_name] = arg_value\n output = old_func(*new_args, **new_kwargs)\n if out_fp32:\n output = cast_tensor_type(output, torch.half, torch.float)\n return output\n return new_func\n return auto_fp16_wrapper",
"def vgg16_bn(*args):\n return _VGGWrapper(models.vgg16_bn(*args))",
"def update_params(self, update_weights, update_bias):\n\n if not self.trainable:\n return\n\n update_weights = np.squeeze(update_weights)\n update_bias = np.squeeze(update_bias)\n\n # some have non-trainable parameters, in addition to the\n # weights and biases\n if len(self.params) == 2:\n self.params = (self.params[0] + update_weights, self.params[1] + update_bias)\n else:\n self.params = (self.params[0] + update_weights, self.params[1] + update_bias) + self.params[2:]\n\n # create updated function\n self.f = self.gen_f(self.params, self.output_shape)",
"def benchmark_fp16_synth_forward_batch16(self):\n params = self._shared_params()._replace(batch_size=16, use_fp16=True)\n self._run_benchmark(params)"
] |
[
"0.7070078",
"0.69352823",
"0.65822524",
"0.5615649",
"0.5578388",
"0.5462372",
"0.53054917",
"0.530453",
"0.52218384",
"0.51704764",
"0.5153915",
"0.51356757",
"0.51211435",
"0.50357515",
"0.5008127",
"0.4879392",
"0.485458",
"0.48361912",
"0.48303774",
"0.47836018",
"0.4773793",
"0.4760504",
"0.466456",
"0.46452013",
"0.46069628",
"0.45837954",
"0.4563658",
"0.4558076",
"0.45576093",
"0.45506346"
] |
0.75039154
|
0
|
r""" Cast the floatingpoint `parmas` to `jax.numpy.float32`. This method can be used to explicitly convert the model parameters to fp32 precision. This returns a new `params` tree and does not cast the `params` in place.
|
def to_fp32(self, params: Union[Dict, FrozenDict], mask: Any = None):
return self._cast_floating_to(params, jnp.float32, mask)
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def _cast_floating_to(self, params: Union[Dict, FrozenDict], dtype: jnp.dtype, mask: Any = None) -> Any:\n\n # taken from https://github.com/deepmind/jmp/blob/3a8318abc3292be38582794dbf7b094e6583b192/jmp/_src/policy.py#L27\n def conditional_cast(param):\n if isinstance(param, jnp.ndarray) and jnp.issubdtype(param.dtype, jnp.floating):\n param = param.astype(dtype)\n return param\n\n if mask is None:\n return jax.tree_util.tree_map(conditional_cast, params)\n\n flat_params = flatten_dict(params)\n flat_mask, _ = jax.tree_util.tree_flatten(mask)\n\n for masked, key in zip(flat_mask, flat_params.keys()):\n if masked:\n param = flat_params[key]\n flat_params[key] = conditional_cast(param)\n\n return unflatten_dict(flat_params)",
"def to_float32(elem):\n return elem.astype(np.float32)",
"def data_convert2float32 (self, data):\r\n data = data.astype(np.float32)\r\n\r\n return data",
"def to_float32(n):\n return np.cast[\"float32\"](n)",
"def ts_float32(val):\n return np.float64(val)",
"def convert_to_fp32(tensor):\n\n def _convert_to_fp32(tensor):\n return tensor.float()\n\n def _is_fp16_bf16_tensor(tensor):\n return hasattr(tensor, \"dtype\") and tensor.dtype in (torch.float16, torch.bfloat16)\n\n return recursively_apply(_convert_to_fp32, tensor, test_type=_is_fp16_bf16_tensor)",
"def _cast_forward_inputs(self, *args, **kwargs):\n # TODO: Do not use the side stream for tensor copies for now;\n # investigate the perf with/without it\n # TODO: For mixed precision, move the inputs to the compute device and\n # cast to reduced-precision in a single `to()` call\n args, kwargs = _to_kwargs(args, kwargs, self.compute_device.index, False)\n args = args[0]\n kwargs = kwargs[0]\n if self._mixed_precision_enabled_for_params():\n input_dtype = self.mixed_precision.param_dtype\n args, kwargs = self._cast_fp_inputs_to_dtype(\n input_dtype, *args, **kwargs,\n )\n return args, kwargs",
"def _cast_floats_tensors(dtype: torch.dtype, *args: Any,\n **kwargs: Any) -> Tuple[Any, Any]:\n\n def fn(t):\n if t.dtype != dtype and torch.is_floating_point(t):\n t = t.to(dtype)\n return t\n\n return apply_to_tensors(fn, args), apply_to_tensors(fn, kwargs)",
"def test_float32(self):\r\n start, stop, step = fscalars('start', 'stop', 'step')\r\n out = arange(start, stop, step)\r\n f = function([start, stop, step], out)\r\n\r\n if config.cast_policy == 'custom':\r\n assert out.dtype == start.type.dtype\r\n elif config.cast_policy == 'numpy':\r\n numpy_dtype = numpy.arange(numpy.array(0, dtype=start.dtype),\r\n numpy.array(1, dtype=stop.dtype),\r\n numpy.array(1, dtype=step.dtype)).dtype\r\n assert out.dtype == numpy_dtype\r\n elif config.cast_policy == 'numpy+floatX':\r\n assert out.dtype == config.floatX\r\n else:\r\n raise NotImplementedError(config.cast_policy)\r\n arg_vals = [(0, 5, 1), (2, 11, 4), (-5, 1.1, 1.2), (1.3, 2,\r\n -2.1), (10, 2, 2)]\r\n for arg_v in arg_vals:\r\n start_v, stop_v, step_v = arg_v\r\n start_v_, stop_v_, step_v_ = numpy.asarray(arg_v,\r\n dtype=start.type.dtype)\r\n f_val = f(start_v_, stop_v_, step_v_)\r\n if config.cast_policy == 'custom':\r\n expected_val = numpy.arange(start_v, stop_v, step_v,\r\n dtype=start.type.dtype)\r\n elif config.cast_policy in ('numpy', 'numpy+floatX'):\r\n expected_val = numpy.arange(start_v_, stop_v_, step_v_,\r\n dtype=out.dtype)\r\n else:\r\n raise NotImplementedError(config.cast_policy)\r\n assert numpy.all(f_val == expected_val)",
"def _cast_fp_inputs_to_dtype(\n dtype: torch.dtype,\n *args: Any,\n **kwargs: Any,\n) -> Tuple[Any, Any]:\n\n def cast_fn(x: torch.Tensor) -> torch.Tensor:\n if not torch.is_floating_point(x):\n return x\n y = x.to(dtype)\n # Explicitly copy over `requires_grad` since this runs inside\n # `torch.no_grad()`\n if x.is_leaf:\n y.requires_grad = x.requires_grad\n return y\n\n with torch.no_grad():\n return (_apply_to_tensors(cast_fn, args), _apply_to_tensors(cast_fn, kwargs))",
"def _cast_fp_inputs_to_dtype(\n self, dtype: torch.dtype, *args: Any, **kwargs: Any\n ) -> Tuple[Any, Any]:\n def cast_fn(x: torch.Tensor) -> torch.Tensor:\n if not torch.is_floating_point(x):\n return x\n y = x.to(dtype)\n # Explicitly copy over `requires_grad` since this runs inside\n # `torch.no_grad()`\n if x.is_leaf:\n y.requires_grad = x.requires_grad\n return y\n\n with torch.no_grad():\n return (\n _apply_to_tensors(cast_fn, args),\n _apply_to_tensors(cast_fn, kwargs)\n )",
"def bfloat16_to_float32(tensor):\n if tensor.dtype == tf.bfloat16:\n return tf.cast(tensor, dtype=tf.float32)\n else:\n return tensor",
"def _convert_samples_to_float32(samples):\n float32_samples = samples.astype('float32')\n if samples.dtype in np.sctypes['int']:\n bits = np.iinfo(samples.dtype).bits\n float32_samples *= 1.0 / 2 ** (bits - 1)\n elif samples.dtype in np.sctypes['float']:\n pass\n else:\n raise TypeError(\"Unsupported sample type: %s.\" % samples.dtype)\n return float32_samples",
"def _convert_samples_to_float32(samples):\n float32_samples = samples.astype('float32')\n if samples.dtype in np.sctypes['int']:\n bits = np.iinfo(samples.dtype).bits\n float32_samples *= (1. / 2 ** (bits - 1))\n elif samples.dtype in np.sctypes['float']:\n pass\n else:\n raise TypeError(\"Unsupported sample type: %s.\" % samples.dtype)\n return float32_samples",
"def asfloat(value):\n float_type = 'float32'\n\n if isinstance(value, (np.matrix, np.ndarray)):\n if value.dtype != np.dtype(float_type):\n return value.astype(float_type)\n\n return value\n\n elif isinstance(value, (tf.Tensor, tf.SparseTensor)):\n return tf.cast(value, tf.float32)\n\n elif issparse(value):\n return value\n\n float_x_type = np.cast[float_type]\n return float_x_type(value)",
"def read_float32(self):\n return self.read(BitTypes.FLOAT_LE_32.value)",
"def to_fp16(self, params: Union[Dict, FrozenDict], mask: Any = None):\n return self._cast_floating_to(params, jnp.float16, mask)",
"def _cast_types(args):\n\targs.x_val = None if args.x_val == 'None' else int(args.x_val)\n\targs.test_size = float(args.test_size)\n\targs.alpha = float(args.alpha)\n\targs.fit_prior = (args.fit_prior in ['True', \"True\", 'true', \"true\"])\n\n\t# class_prior - array like type (problem to convert)\n\tif args.class_prior == \"None\" or args.class_prior == 'None':\n\t\targs.class_prior = None\n\n\t# --------- #\n\treturn args",
"def make_fp32_optimiser(optimiser_class):\n class Fp32Optimiser(optimiser_class):\n def __init__(self, *args, **kwargs, ):\n super().__init__(*args, **kwargs)\n\n def _create_slots(self, var_list):\n new_var_list = []\n for v in var_list:\n if v.dtype.base_dtype != dtypes.float32:\n new_var_list.append(self._get_or_make_slot(v,\n math_ops.cast(v.initialized_value(),\n dtypes.float32),\n \"fp32\", \"fp32\"))\n else:\n new_var_list.append(v)\n return super()._create_slots(new_var_list)\n\n def _apply_weight_update(self, grad, var):\n if var.dtype.base_dtype == dtypes.float32:\n return super()._apply_weight_update(grad, var)\n else:\n orig_var = var\n var = self.get_slot(var, \"fp32\")\n updated_var = super()._apply_weight_update(\n math_ops.cast(grad, dtypes.float32), var)\n apply_fp32 = var.assign(updated_var)\n with tf.control_dependencies([apply_fp32]):\n return math_ops.cast(updated_var, orig_var.dtype.base_dtype)\n\n return Fp32Optimiser",
"def cast_if_floating_dtype(x, dtype=None):\n return nest.map_structure(functools.partial(cast_single_tensor, dtype=dtype),\n x)",
"def _cast_grad_to_param_dtype(\n self,\n grad: torch.Tensor,\n param: FlatParameter,\n ):\n self._assert_state(TrainingState_.BACKWARD_POST)\n if (\n not self._low_precision_hook_enabled()\n and (\n self._mixed_precision_enabled_for_params()\n or self._mixed_precision_enabled_for_reduce()\n )\n ):\n low_prec_grad_data = grad.data\n grad.data = grad.data.to(dtype=param.dtype)\n # Do not let the low precision gradient memory get reused until\n # the cast to full parameter precision completes\n low_prec_grad_data.record_stream(torch.cuda.current_stream())",
"def floatval(space, w_obj):\n return space.newfloat(w_obj.float_w(space))",
"def get_params(self, params):\n mapping = OrderedDict(\n (key, params[x]) if isinstance(x, str) else (key, float(x))\n for key, x in self.transformations.items()\n )\n return Params(**mapping)",
"def _float_ones_like(x):\r\n\r\n rval = tensor.ones_like(x)\r\n\r\n if rval.type.dtype.find('float') != -1:\r\n return rval\r\n\r\n return rval.astype(theano.config.floatX)",
"def convertParametersToLocals(self, *args):\n return _libsbml.Model_convertParametersToLocals(self, *args)",
"def test_convert_float16_to_float32(in_dtype):\n check_type_supported(in_dtype)\n\n f16_input = torch.tensor(range(-int(2 ** (16 - 1)), int(2 ** (16 - 1))), dtype=torch.int16).view(in_dtype)\n f32_output = convert_float_to_float32(f16_input)\n\n nan = f16_input.isnan()\n assert torch.all(f32_output[nan].isnan())\n inf = f16_input.isinf()\n assert torch.all(f32_output[inf].isinf())\n other = torch.logical_not(torch.logical_or(nan, inf))\n assert torch.all(f16_input[other] == f32_output[other])",
"def _cast_unsupported_dtypes(tensor):\n\n if tensor.dtype.__eq__(dtypes.int64):\n # outside-compilation doesn't support int64 input yet.\n return math_ops.cast(tensor, dtypes.int32)\n if tensor.dtype.__eq__(dtypes.bfloat16) or tensor.dtype.__eq__(\n dtypes.float16):\n # Since host can't handle bf16, convert tensor to f32.\n return math_ops.cast(tensor, dtypes.float32)\n return tensor",
"def _preprocess_float(values: Sequence) -> Tuple[Union[float, NullValue]]:\n\n processed = [float(x)\n if isinstance(x, numbers.Number)\n else x\n for x in values]\n\n return tuple(processed)",
"def convert_cast(node, **kwargs):\n name, input_nodes, attrs = get_inputs(node, kwargs)\n\n dtype = attrs[\"dtype\"]\n\n # dtype can be mapped only with types from TensorProto\n # float32 is mapped to float and float64 to double in onnx\n # following tensorproto mapping https://github.com/onnx/onnx/blob/master/onnx/mapping.py\n if dtype == 'float32':\n dtype = 'float'\n elif dtype == 'float64':\n dtype = 'double'\n\n node = onnx.helper.make_node(\n \"Cast\",\n input_nodes,\n [name],\n to=getattr(onnx.TensorProto, dtype.upper()),\n name=name,\n )\n return [node]",
"def _cast_to_float64(matrix):\n return matrix.astype(np.float64) if matrix.dtype != np.float64 else matrix"
] |
[
"0.5991893",
"0.597532",
"0.58963186",
"0.5659163",
"0.5620524",
"0.55966693",
"0.55932355",
"0.54186344",
"0.53701866",
"0.53546476",
"0.53410137",
"0.531362",
"0.52842003",
"0.527955",
"0.5197702",
"0.5188624",
"0.51253825",
"0.51162386",
"0.5039107",
"0.50123215",
"0.499994",
"0.49881482",
"0.49766502",
"0.49746016",
"0.49365443",
"0.48955718",
"0.4892205",
"0.48846382",
"0.4853895",
"0.48423484"
] |
0.73091394
|
0
|
r""" Cast the floatingpoint `parmas` to `jax.numpy.float16`. This returns a new `params` tree and does not cast the `params` in place. This method can be used on GPU to explicitly convert the model parameters to float16 precision to do full halfprecision training or to save weights in float16 for inference in order to save memory and improve speed.
|
def to_fp16(self, params: Union[Dict, FrozenDict], mask: Any = None):
return self._cast_floating_to(params, jnp.float16, mask)
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def to_bf16(self, params: Union[Dict, FrozenDict], mask: Any = None):\n return self._cast_floating_to(params, jnp.bfloat16, mask)",
"def cast_parameters_to_bf16(place, program, scope=None, to_bf16_var_names=None):\n all_parameters = []\n for block in program.blocks:\n all_parameters.extend(block.all_parameters())\n\n bf16_var_names = to_bf16_var_names if to_bf16_var_names else set()\n var_scope = scope if scope else global_scope()\n for param in all_parameters:\n if param.name in bf16_var_names:\n _logger.debug(f\"---- cast {param.name} to bf16 dtype ----\")\n param_t = var_scope.find_var(param.name).get_tensor()\n data = np.array(param_t)\n param_t.set(convert_float_to_uint16(data), place)",
"def to_fp32(self, params: Union[Dict, FrozenDict], mask: Any = None):\n return self._cast_floating_to(params, jnp.float32, mask)",
"def _cast_floating_to(self, params: Union[Dict, FrozenDict], dtype: jnp.dtype, mask: Any = None) -> Any:\n\n # taken from https://github.com/deepmind/jmp/blob/3a8318abc3292be38582794dbf7b094e6583b192/jmp/_src/policy.py#L27\n def conditional_cast(param):\n if isinstance(param, jnp.ndarray) and jnp.issubdtype(param.dtype, jnp.floating):\n param = param.astype(dtype)\n return param\n\n if mask is None:\n return jax.tree_util.tree_map(conditional_cast, params)\n\n flat_params = flatten_dict(params)\n flat_mask, _ = jax.tree_util.tree_flatten(mask)\n\n for masked, key in zip(flat_mask, flat_params.keys()):\n if masked:\n param = flat_params[key]\n flat_params[key] = conditional_cast(param)\n\n return unflatten_dict(flat_params)",
"def _update_use_bfloat16(configs, use_bfloat16):\n configs[\"train_config\"].use_bfloat16 = use_bfloat16",
"def _shared_params_fp16(self):\n return BenchmarkBase._shared_params(self)._replace(\n model='resnet50_v1.5',\n batch_size=256,\n distortions=False,\n use_fp16=True,\n optimizer='momentum',\n loss_type_to_report='base_loss',\n compute_lr_on_cpu=True,\n single_l2_loss_op=True\n )",
"def _shared_params_fp16(self):\n return BenchmarkBase._shared_params(self)._replace(\n model='resnet50',\n batch_size=256,\n distortions=False,\n use_fp16=True,\n optimizer='momentum',\n loss_type_to_report='base_loss',\n compute_lr_on_cpu=True,\n single_l2_loss_op=True\n )",
"def bfloat16_to_float32(tensor):\n if tensor.dtype == tf.bfloat16:\n return tf.cast(tensor, dtype=tf.float32)\n else:\n return tensor",
"def dtype_float(dtype: DType):\n return promote_dtypes(dtype, np.float16)",
"def convert_uint16_to_float_ifneed(self, actual_np, expect_np):\n if actual_np.dtype == np.uint16:\n if expect_np.dtype in [np.float32, np.float64]:\n actual_np = convert_uint16_to_float(actual_np)\n self.rtol = 1.0e-2\n elif actual_np.dtype == np.float16:\n self.rtol = 1.0e-3\n else:\n self.rtol = 1.0e-5\n if (\n expect_np.dtype == np.uint16\n and actual_np.dtype == np.uint16\n ):\n nonlocal atol\n expect_np = convert_uint16_to_float(expect_np)\n actual_np = convert_uint16_to_float(actual_np)\n atol = max(atol, 0.03)\n return actual_np, expect_np",
"def get_data_type(params):\n return tf.float16 if params.use_fp16 else tf.float32",
"def build_model(\n self,\n cfg: Config,\n fp16: bool = False,\n **kwargs,\n ) -> torch.nn.Module:\n model_builder = getattr(self, \"model_builder\", build_segmentor)\n model = model_builder(cfg, **kwargs)\n if bool(fp16):\n wrap_fp16_model(model)\n return model",
"def test_convert_float16_to_float32(in_dtype):\n check_type_supported(in_dtype)\n\n f16_input = torch.tensor(range(-int(2 ** (16 - 1)), int(2 ** (16 - 1))), dtype=torch.int16).view(in_dtype)\n f32_output = convert_float_to_float32(f16_input)\n\n nan = f16_input.isnan()\n assert torch.all(f32_output[nan].isnan())\n inf = f16_input.isinf()\n assert torch.all(f32_output[inf].isinf())\n other = torch.logical_not(torch.logical_or(nan, inf))\n assert torch.all(f16_input[other] == f32_output[other])",
"def save_float16_npy(data, path):\n np.save(path, data.astype(np.float16))",
"def benchmark_fp16_xla_synth_1gpu_gpuparams(self):\n params = self._shared_params()._replace(\n num_gpus=1, variable_update='parameter_server', xla=True, use_fp16=True)\n self._run_benchmark(params)",
"def benchmark_fp16_xla_synth_1gpu_gpuparams(self):\n params = self._shared_params()._replace(\n num_gpus=1, variable_update='parameter_server', xla=True, use_fp16=True)\n self._run_benchmark(params)",
"def benchmark_fp16_xla_synth_1gpu_gpuparams(self):\n params = self._shared_params()._replace(\n num_gpus=1, variable_update='parameter_server', xla=True, use_fp16=True)\n self._run_benchmark(params)",
"def benchmark_fp16_synth_1gpu_gpuparams(self):\n params = self._shared_params()._replace(\n num_gpus=1, use_fp16=True, variable_update='parameter_server')\n self._run_benchmark(params)",
"def benchmark_fp16_synth_1gpu_gpuparams(self):\n params = self._shared_params()._replace(\n num_gpus=1, use_fp16=True, variable_update='parameter_server')\n self._run_benchmark(params)",
"def benchmark_fp16_synth_1gpu_gpuparams(self):\n params = self._shared_params()._replace(\n num_gpus=1, use_fp16=True, variable_update='parameter_server')\n self._run_benchmark(params)",
"def benchmark_fp16_xla_synth_forward_batch16(self):\n params = self._shared_params()._replace(\n batch_size=16, use_fp16=True, xla=True)\n self._run_benchmark(params)",
"def convertToFloat(boolean: bool) -> float:\n ...",
"def _convert_example(example, use_bfloat16):\n for key in list(example.keys()):\n val = example[key]\n if tf.keras.backend.is_sparse(val):\n val = tf.sparse.to_dense(val)\n if val.dtype == tf.int64:\n val = tf.cast(val, tf.int32)\n if use_bfloat16 and val.dtype == tf.float32:\n val = tf.cast(val, tf.bfloat16)\n\n example[key] = val",
"def auto_fp16(apply_to=None, out_fp32=False):\n warnings.warn('auto_fp16 in mmpose will be deprecated in the next release.Please use mmcv.runner.auto_fp16 instead (mmcv>=1.3.1).', DeprecationWarning)\n\n def auto_fp16_wrapper(old_func):\n\n @functools.wraps(old_func)\n def new_func(*args, **kwargs):\n if not isinstance(args[0], torch.nn.Module):\n raise TypeError('@auto_fp16 can only be used to decorate the method of nn.Module')\n if not (hasattr(args[0], 'fp16_enabled') and args[0].fp16_enabled):\n return old_func(*args, **kwargs)\n args_info = getfullargspec(old_func)\n args_to_cast = args_info.args if apply_to is None else apply_to\n new_args = []\n if args:\n arg_names = args_info.args[:len(args)]\n for i, arg_name in enumerate(arg_names):\n if arg_name in args_to_cast:\n new_args.append(cast_tensor_type(args[i], torch.float, torch.half))\n else:\n new_args.append(args[i])\n new_kwargs = {}\n if kwargs:\n for arg_name, arg_value in kwargs.items():\n if arg_name in args_to_cast:\n new_kwargs[arg_name] = cast_tensor_type(arg_value, torch.float, torch.half)\n else:\n new_kwargs[arg_name] = arg_value\n output = old_func(*new_args, **new_kwargs)\n if out_fp32:\n output = cast_tensor_type(output, torch.half, torch.float)\n return output\n return new_func\n return auto_fp16_wrapper",
"def _shorts2float(lo_byte_pair, hi_byte_pair):\n\tba = bytearray(struct.pack(\"HH\", lo_byte_pair, hi_byte_pair))\n\t[f] = struct.unpack('f', ba)\n\treturn f",
"def _cast_grad_to_param_dtype(\n self,\n grad: torch.Tensor,\n param: FlatParameter,\n ):\n self._assert_state(TrainingState_.BACKWARD_POST)\n if (\n not self._low_precision_hook_enabled()\n and (\n self._mixed_precision_enabled_for_params()\n or self._mixed_precision_enabled_for_reduce()\n )\n ):\n low_prec_grad_data = grad.data\n grad.data = grad.data.to(dtype=param.dtype)\n # Do not let the low precision gradient memory get reused until\n # the cast to full parameter precision completes\n low_prec_grad_data.record_stream(torch.cuda.current_stream())",
"def benchmark_fp16_synth_forward_batch16(self):\n params = self._shared_params()._replace(batch_size=16, use_fp16=True)\n self._run_benchmark(params)",
"def convert_fp32_or_fp16(\n input_model_dir, output_model_dir, batch_size, precision_mode):\n trt.create_inference_graph(\n input_graph_def=None,\n outputs=None,\n max_batch_size=batch_size,\n input_saved_model_dir=input_model_dir,\n output_saved_model_dir=output_model_dir,\n precision_mode=precision_mode)",
"def cast_model_to_bf16(\n program, startup_prog=None, amp_lists=None, use_bf16_guard=True\n):\n\n if amp_lists is None:\n amp_lists = AutoMixedPrecisionListsBF16()\n global_block = program.global_block()\n keep_fp32_ops = set()\n to_bf16_var_names = set()\n to_bf16_pre_cast_ops = set()\n origin_ops = []\n for block in program.blocks:\n origin_ops.extend(block.ops)\n\n for block in program.blocks:\n ops = block.ops\n for op in ops:\n if op.type == 'create_py_reader' or op.type == 'read':\n continue\n if _need_keep_fp32(op, amp_lists.unsupported_list, use_bf16_guard):\n keep_fp32_ops.add(op)\n continue # processed below\n for in_name in op.input_names:\n if op.type in {\n 'batch_norm',\n 'fused_bn_add_activation',\n 'layer_norm',\n } and in_name not in {'X', 'Z'}:\n continue\n for in_var_name in op.input(in_name):\n in_var = None\n try:\n in_var = block.var(in_var_name)\n except ValueError as e:\n _logger.debug(\n \"-- {}, try to get it in the global block --\".format(\n e\n )\n )\n in_var = global_block.var(in_var_name)\n if in_var is not None:\n _logger.debug(\n \"-- var {} is got in the global block --\".format(\n in_var_name\n )\n )\n\n if in_var is None or in_var.type not in _valid_types:\n continue\n\n if in_var.dtype == core.VarDesc.VarType.FP32:\n in_var.desc.set_dtype(core.VarDesc.VarType.BF16)\n to_bf16_var_names.add(in_var_name)\n\n _logger.debug(\n \"-- op type: {}, in var name: {}, in var dtype: {} --\".format(\n op.type, in_var_name, in_var.dtype\n )\n )\n\n for out_name in op.output_names:\n if (\n op.type\n in {'batch_norm', 'fused_bn_add_activation', 'layer_norm'}\n and out_name != 'Y'\n ):\n continue\n for out_var_name in op.output(out_name):\n out_var = None\n try:\n out_var = block.var(out_var_name)\n except ValueError as e:\n _logger.debug(\n \"-- {}, try to get it in the global block --\".format(\n e\n )\n )\n out_var = global_block.var(out_var_name)\n if out_var is not None:\n _logger.debug(\n \"-- var {} is got in the global block --\".format(\n out_var_name\n )\n )\n\n if out_var is None or out_var.type not in _valid_types:\n continue\n\n if out_var.dtype == core.VarDesc.VarType.FP32:\n out_var.desc.set_dtype(core.VarDesc.VarType.BF16)\n\n _logger.debug(\n \"-- op type: {}, out var name: {}, out var dtype: {} --\".format(\n op.type, out_var_name, out_var.dtype\n )\n )\n for attr_name in ['in_dtype', 'out_dtype', 'dtype']:\n if (\n op.has_attr(attr_name)\n and op.attr(attr_name) == core.VarDesc.VarType.FP32\n ):\n op._set_attr(attr_name, core.VarDesc.VarType.BF16)\n if op.has_attr('use_mkldnn'):\n op._set_attr('use_mkldnn', True)\n if op.has_attr('mkldnn_data_type'):\n op._set_attr('mkldnn_data_type', 'bfloat16')\n\n if startup_prog is not None:\n cast_initializers_to_bf16(\n startup_prog,\n amp_lists,\n global_block,\n ops,\n keep_fp32_ops,\n to_bf16_var_names,\n )\n\n # process ops in keep_fp32_ops\n op_var_rename_map = [\n collections.OrderedDict() for _ in range(len(program.blocks))\n ]\n for block in program.blocks:\n ops = block.ops\n idx = 0\n while idx < len(ops):\n op = ops[idx]\n num_cast_ops = 0\n if op not in keep_fp32_ops:\n if op in to_bf16_pre_cast_ops:\n in_var_cast_num = _insert_cast_op(\n block,\n op,\n idx,\n core.VarDesc.VarType.FP32,\n core.VarDesc.VarType.BF16,\n )\n num_cast_ops += in_var_cast_num\n else:\n pre_cast_num = _insert_cast_op(\n block,\n op,\n idx,\n core.VarDesc.VarType.BF16,\n core.VarDesc.VarType.FP32,\n )\n num_cast_ops += pre_cast_num\n for out_var_name in op.output_arg_names:\n out_var = block.vars.get(out_var_name)\n if out_var is None or out_var.type not in _valid_types:\n continue\n if out_var.dtype == core.VarDesc.VarType.BF16:\n out_var.desc.set_dtype(core.VarDesc.VarType.FP32)\n post_ops = find_true_post_op(ops, op, out_var_name)\n for post_op in post_ops:\n if post_op in keep_fp32_ops:\n continue\n post_cast_num = _insert_cast_post_op(\n block,\n op,\n idx + pre_cast_num + 1,\n core.VarDesc.VarType.FP32,\n core.VarDesc.VarType.BF16,\n out_var_name,\n op_var_rename_map,\n )\n num_cast_ops += post_cast_num\n idx += num_cast_ops + 1\n\n _rename_op_input(program, op_var_rename_map, origin_ops, keep_fp32_ops)\n return to_bf16_var_names",
"def new_float(*args, **kwargs):\n return array.array(FLOAT_TYPECODE, *args, **kwargs)"
] |
[
"0.64355737",
"0.62667245",
"0.56928766",
"0.5585037",
"0.5519305",
"0.5508482",
"0.54957175",
"0.5233368",
"0.5187701",
"0.5149924",
"0.50876534",
"0.5062732",
"0.5032946",
"0.49982625",
"0.49486592",
"0.49486592",
"0.49486592",
"0.49480826",
"0.49480826",
"0.49480826",
"0.49295795",
"0.4916173",
"0.4906052",
"0.48764545",
"0.48763758",
"0.48690844",
"0.4796039",
"0.47686917",
"0.4761809",
"0.47297698"
] |
0.71142125
|
0
|
Register this class with a given auto class. This should only be used for custom models as the ones in the library are already mapped with an auto class. This API is experimental and may have some slight breaking changes in the next releases.
|
def register_for_auto_class(cls, auto_class="FlaxAutoModel"):
if not isinstance(auto_class, str):
auto_class = auto_class.__name__
import transformers.models.auto as auto_module
if not hasattr(auto_module, auto_class):
raise ValueError(f"{auto_class} is not a valid auto class.")
cls._auto_class = auto_class
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def register_class(cls):\n if cls is RegisteredType:\n raise \"Please do _not_ register RegisteredType!\"\n \n cid = RegisteredType._reg[autoid]\n RegisteredType._reg['classes'][cls] = cid\n RegisteredType._reg['classids'][cid] = cls\n RegisteredType._reg['autoid'] += 1",
"def register(cls):\n register(cls, cls.provided_class)",
"def register(cls, class_):\n cls._registered[class_.tag()] = class_",
"def register_class(obj):\r\n try:\r\n KnownClass.objects.get(module_name=obj.__module__, class_name=obj.__class__.__name__)\r\n except DoesNotExist:\r\n # Create it\r\n KnownClass(module_name = obj.__module__, class_name = obj.__class__.__name__).save()",
"def register_app_class(self, cls):\n assert isinstance(cls, type) and issubclass(cls, Model)\n name = cls.__name__\n if not valid_app_name(name):\n raise ValueError('Given app does not have a valid name %r' % name)\n pending, connected = [], []\n if name in self._proxies and cls is not self._proxies[name][0]:\n oldCls, pending, connected = self._proxies[name]\n logger.warn('Re-registering app class %r' % name)\n #raise ValueError('App with name %r already registered' % name)\n self._proxies[name] = cls, pending, connected",
"def register_class(self, cls, *, name=None):\n cls_name = self.host.cache_class(cls, name)\n self.register_constant(cls, cls_name)",
"def register(self, cls):\r\n\r\n # Do all checks and complain before changing any state.\r\n if len(cls.tags) == 0:\r\n raise ValueError(\"No tags specified for class {0}\".format(cls.__name__))\r\n\r\n for t in cls.tags:\r\n if t in self._mapping:\r\n other_cls = self._mapping[t]\r\n if cls == other_cls:\r\n # registering the same class multiple times seems silly, but ok\r\n continue\r\n raise ValueError(\"Tag {0} already registered by class {1}.\"\r\n \" Can't register for class {2}\"\r\n .format(t, other_cls.__name__, cls.__name__))\r\n\r\n # Ok, should be good to change state now.\r\n for t in cls.tags:\r\n self._mapping[t] = cls\r\n\r\n # Returning the cls means we can use this as a decorator.\r\n return cls",
"def register(cls, class_to_register):\n cls.registered_loaders.append(class_to_register)\n return class_to_register",
"def register(cls, model):\n cls.models[model] = True",
"def register_class(self, entity_class):\n key = entity_class.__collection_name__\n\n if key not in self._registered_types:\n self._registered_types[key] = entity_class",
"def register_model_once(cls, ModelClass, **kwargs):\n if cls._static_registry.get_for_model(ModelClass) is None:\n logger.warn(\"Model is already registered with {0}: '{1}'\"\n .format(cls, ModelClass))\n else:\n cls.register_model.register(ModelClass, **kwargs)",
"def register_keras_custom_object(cls):\n tf.keras.utils.get_custom_objects()[cls.__name__] = cls\n return cls",
"def register_instance(cls):\n\n @functools.wraps(cls)\n def wrapper_decorator(*args, **kwargs):\n\n instance = cls(*args, **kwargs)\n\n Register[cls.__name__] = instance\n\n return instance\n\n return wrapper_decorator",
"def _register(registry, cls):\n assert issubclass(cls, Registrable)\n\n reg_attr = f\"_{cls.__name__}_registered\"\n if getattr(cls, reg_attr, False):\n return cls\n\n name = cls.__fieldtype__()\n assert (\n name not in registry\n ), f\"{cls!r} cannot be registered as {name!r}: already used by {registry[name]!r}\"\n\n registry[name] = cls\n setattr(cls, reg_attr, True)\n return cls",
"def register_model(name):\n\n def register_model_cls(cls):\n if name in MODEL_REGISTRY:\n raise ValueError('Cannot register duplicate model ({})'.format(name))\n MODEL_REGISTRY[name] = cls\n return cls\n\n return register_model_cls",
"def register(self, klass):\n if klass not in self.extensions:\n self.extensions.append(klass)",
"def register_model(self, model):\n\n self._model = model",
"def register(self, model_or_iterable, handler_class, **kwargs):\n if isinstance(model_or_iterable, ModelBase):\n model_or_iterable = [model_or_iterable]\n for model in model_or_iterable:\n if model in self._registry:\n try:\n model_name = model._meta.model_name\n except AttributeError:\n # Django < 1.6\n model_name = model._meta.module_name\n raise ModelAlreadyRegistered(\n \"The model {} is already registered.\".format(model_name))\n handler = get_handler_instance(model, handler_class, kwargs)\n self._registry[model] = handler\n contribute_to_class(model)",
"def register(self, cls, force=False):\n if not issubclass(cls, self.type):\n raise InvalidRegistryItemType(\n \"Invalid item type `{0}` for registry \"\n \"`{1}`\".format(cls, self.__class__)\n )\n\n # If item has not been forced yet, add/replace its' value in the\n # registry.\n if force:\n\n if cls.uid not in self._forced:\n self._registry[cls.integrate_with][cls.uid] = cls\n self._forced[cls.integrate_with].append(cls.uid)\n return True\n else:\n return False\n\n else:\n\n if cls.uid in self._registry[cls.integrate_with]:\n return False\n else:\n self._registry[cls.integrate_with][cls.uid] = cls\n return True",
"def register_auto(cls, func=None):\n RecoverableError.register(cls, func)\n AutoRecoverableError.register(cls, func)",
"def _register(cls):\n clsid_path = \"Software\\\\Classes\\\\CLSID\\\\\" + cls._reg_clsid_\n progid_path = \"Software\\\\Classes\\\\\" + cls._reg_progid_\n spec = cls.__module__ + \".\" + cls.__name__\n\n # register the class information\n win32api.RegSetValue(win32con.HKEY_CURRENT_USER, clsid_path, win32con.REG_SZ, cls._reg_desc_)\n win32api.RegSetValue(win32con.HKEY_CURRENT_USER, clsid_path + \"\\\\ProgID\", win32con.REG_SZ, cls._reg_progid_)\n win32api.RegSetValue(win32con.HKEY_CURRENT_USER, clsid_path + \"\\\\PythonCOM\", win32con.REG_SZ, spec)\n hkey = win32api.RegCreateKey(win32con.HKEY_CURRENT_USER, clsid_path + \"\\\\InprocServer32\")\n win32api.RegSetValueEx(hkey, None, None, win32con.REG_SZ, pythoncom.__file__)\n win32api.RegSetValueEx(hkey, \"ThreadingModel\", None, win32con.REG_SZ, \"Both\")\n\n # and add the progid\n win32api.RegSetValue(win32con.HKEY_CURRENT_USER, progid_path, win32con.REG_SZ, cls._reg_desc_)\n win32api.RegSetValue(win32con.HKEY_CURRENT_USER, progid_path + \"\\\\CLSID\", win32con.REG_SZ, cls._reg_clsid_)",
"def _class(self, _class):\n\n self.__class = _class",
"def _class(self, _class):\n\n self.__class = _class",
"def register_model(self, model: Type[Model]):\n\n if not model in self._registered_models:\n self._registered_models.add(model)",
"def register(self, cls, force=False):\n if not issubclass(cls, self.type):\n raise InvalidRegistryItemType(\n \"Invalid item type `{0}` for registry \"\n \"`{1}`\".format(cls, self.__class__)\n )\n\n # If item has not been forced yet, add/replace its' value in the\n # registry.\n if force:\n\n if cls.uid not in self._forced:\n self._registry[cls.uid] = cls\n self._forced.append(cls.uid)\n return True\n else:\n return False\n\n else:\n\n if cls.uid in self._registry:\n return False\n else:\n self._registry[cls.uid] = cls\n return True",
"def register(cls, instance_class: Type, name: str = None):\n if name is None:\n name = instance_class.__name__\n if logger.isEnabledFor(logging.DEBUG):\n logger.debug(f'registering: {instance_class} for {cls} -> {name}')\n cls.INSTANCE_CLASSES[name] = instance_class",
"def register_config(cls):\n _configs[cls.__name__] = cls",
"def register_driver(self, key, cls):\n self.drivers.update({key: cls})",
"def on_register(cls):",
"def register_corrector(cls=None, *, name=None):\n\n def _register(cls):\n if name is None:\n local_name = cls.__name__\n else:\n local_name = name\n if local_name in _CORRECTORS:\n raise ValueError(f'Already registered models with name: {local_name}')\n _CORRECTORS[local_name] = cls\n return cls\n\n if cls is None:\n return _register\n else:\n return _register(cls)"
] |
[
"0.6776159",
"0.6657153",
"0.66237974",
"0.63050866",
"0.62078553",
"0.6145712",
"0.61293656",
"0.60562444",
"0.6045435",
"0.60446596",
"0.5937903",
"0.58827955",
"0.58161414",
"0.57446134",
"0.5714061",
"0.567607",
"0.5643422",
"0.5598895",
"0.5584236",
"0.55817175",
"0.5572318",
"0.5519869",
"0.5519869",
"0.551637",
"0.55138767",
"0.5496559",
"0.54689276",
"0.54651415",
"0.5462785",
"0.5445685"
] |
0.8423522
|
0
|
values as a dictionary is not hashable and hence cannot be used directly in the explored/visited set. This function changes values dict into a unique hashable string which can be used in the explored set. You may or may not use this
|
def convertStateToHash(values):
l = list(sorted(values.items()))
modl = [a+b for (a, b) in l]
return ''.join(modl)
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def HashValue(self) -> _n_0_t_3[_n_0_t_9]:",
"def __hash__(self):\n return hash(self.value)",
"def eliminate(values):\n solved_values = [box for box in values.keys() if len(values[box]) == 1]\n for box in solved_values:\n digit = values[box]\n for peer in peers[box]:\n # values[peer] = values[peer].replace(digit, '')\n new_value = values[peer].replace(digit, '')\n assign_value(values, peer, new_value)\n return values",
"def eliminate(values):\n solved_values = [box for box in values.keys() if len(values[box]) == 1]\n for box in solved_values:\n digit = values[box]\n for peer in peers[box]:\n values[peer] = values[peer].replace(digit,'')\n return values",
"def eliminate(values):\n solved_values = [box for box in values.keys() if len(values[box]) == 1]\n for box in solved_values:\n digit = values[box]\n for peer in peers[box]:\n values[peer] = values[peer].replace(digit,'')\n return values",
"def hash_value(board_state):\n res = \"\"\n for i in range(1,10):\n res = res + board_state[i]\n return res",
"def _findUniqueMappingValues(mapping):\n uniqueMappingValues = set()\n for entries in viewvalues(mapping):\n if len(entries) == 1:\n uniqueMappingValues.update(entries)\n return uniqueMappingValues",
"def stringify_values(dictionary):\n\n dict_copy = copy.deepcopy(dictionary)\n\n for key, value in dict_copy.iteritems():\n if isinstance(value, dict):\n dict_copy[key] = stringify_values(value)\n else:\n dict_copy[key] = str(value)\n return dict_copy",
"def __hash__(self):\n return self.value.__hash__()",
"def make_hash(o):\n if isinstance(o, (set, tuple, list)):\n return hash(tuple([make_hash(e) for e in o]))\n elif not isinstance(o, dict) and o.__class__.__module__ == 'builtins':\n return hash(o)\n elif not isinstance(o, dict):\n return make_hash(o.__dict__)\n\n new_o = copy.deepcopy(o)\n for k, v in new_o.items():\n new_o[k] = make_hash(v)\n return hash(tuple(frozenset(sorted(new_o.items()))))",
"def make_hash(o):\n\n if isinstance(o, (set, tuple, list)):\n\n return hash( tuple([make_hash(e) for e in o]) )\n\n elif not isinstance(o, dict):\n\n return hash(o)\n\n new_o = copy.deepcopy(o)\n for k, v in new_o.items():\n new_o[k] = make_hash(v)\n\n return hash(tuple(frozenset(sorted(new_o.items()))))",
"def hash_value(self, value):\n h = hashlib.sha256()\n h.update(str(value))\n return h.hexdigest()",
"def hash_entries(entries):\n d = dict()\n for e in entries:\n uri = e[\"uri\"]\n domain = re.match(\"^/view\\d*/(.*)$\", uri).group(1)\n if domain:\n visitor_id = e[\"visitor_id\"]\n if d.has_key(domain):\n store_page_entries = d[domain]\n store_page_entries.append(visitor_id)\n else:\n d[domain] = [visitor_id]\n print \"Retrieved {0} unique domains.\".format(len(d))\n return d",
"def replace_dict_value(d, bad_values): \n for key, value in d.copy().items(): \n for n in bad_values: \n if n == value: \n del d[key]",
"def hashColors(colors):\n return sum(HASH_COLORS[col] for col in set(colors))",
"def _hash_value(value):\n return hashlib.md5(value.encode('utf-8')).hexdigest()[:9]",
"def __hash__(self):\n return hash(('genes', tuple(self.genes), self.environment))",
"def __hash__(self):\n return hash((self._start, self._end, self._name, self._value))",
"def __repr__(self):\n return \"Set: \" + str(self.dict.keys())",
"def hash_key(self):",
"def get_encoded_values(self):\n stack = Stack()\n visit_order = []\n node = self.get_root().get_value()[1]\n visit_order.append(node)\n state = State(node)\n stack.push(state)\n count = 0\n encoded_value = \"\"\n ecv = {}\n\n while(node):\n count += 1\n\n if node.has_left_child() and not state.get_visited_left():\n encoded_value += \"0\"\n state.set_visited_left()\n node = node.get_left_child()[1]\n if type(node) == str:\n node = Node(node)\n visit_order.append(node.get_value())\n state = State(node)\n stack.push(state)\n ecv[node] = encoded_value\n\n elif node.has_right_child() and not state.get_visited_right():\n encoded_value += \"1\"\n state.set_visited_right()\n node = node.get_right_child()[1]\n if type(node) == str:\n node = Node(node)\n visit_order.append(node.get_value())\n state = State(node)\n stack.push(state)\n ecv[node] = encoded_value\n\n else:\n stack.pop()\n if not stack.is_empty():\n state = stack.top()\n node = state.get_node()\n else:\n node = None\n if ecv.get(node):\n encoded_value = ecv[node]\n else:\n encoded_value = \"\"\n\n return ecv",
"def __hash__(self):\n\n return hash((str(self.type) + str(self.value)))",
"def det_hash(x):\n\n def det_list(l): return '[%s]' % ','.join(map(det, sorted(l)))\n\n def det_dict(x):\n list_=map(lambda p: det(p[0]) + ':' + det(p[1]), sorted(x.items()))\n return '{%s}' % ','.join(list_)\n\n def det(x): return {list: det_list, dict: det_dict}.get(type(x), str)(x)\n\n return hash_(det(unpackage(package(x))))",
"def __get_hashstr(_config_object: dict):\n hashobj = hashlib.md5()\n json_str = json.dumps(_config_object, sort_keys=True).encode('utf-8')\n hashobj.update(json_str)\n dig = hashobj.hexdigest()\n return dig\n # return hashobj.update(json.dumps(_config_object, sort_keys=True).encode('utf-8')).hexdigest()",
"def dict_hash(dictionary: Dict[str, Any]) -> str:\n d_hash = hashlib.md5()\n # We need to sort arguments so {'a': 1, 'b': 2} is\n # the same as {'b': 2, 'a': 1}\n encoded = json.dumps(dictionary, sort_keys=True).encode()\n d_hash.update(encoded)\n return d_hash.hexdigest()",
"def _hash(self, value, get_val, get_child):\n hasher = getattr(hashlib, self.hash_func)\n children = get_child(value)\n\n # If leaf node\n if len(children) < 1:\n return hasher(get_val(value)).hexdigest()\n\n h = hasher()\n for child in children:\n # Tree is created recursively\n n = Node(child, get_val, get_child,\n self.hash_func)\n self.c.append(n)\n h.update(n.h.encode(\"utf-8\"))\n return h.hexdigest()",
"def get_hash_string(self) -> str:\n\t\ts = ''\n\t\tfor i in range(self.size):\n\t\t\ts += ''.join(map(str,self.tiles[i]))\n\t\treturn s",
"def __hash__(self):\n hash_content = []\n hash_content.extend(self.analyzer_options)\n hash_content.append(str(self.analyzer_type))\n hash_content.append(self.target[self.lang])\n hash_content.append(self.source)\n return hash(''.join(hash_content))",
"def get_hash(self):\n if self.contributes:\n return hash_from_values(self.iter_values())",
"def get_hash(dictionary):\n dhash = hashlib.md5()\n # We need to sort arguments so {'a': 1, 'b': 2} is\n # the same as {'b': 2, 'a': 1}\n encoded = json.dumps(dictionary, sort_keys=True).encode()\n dhash.update(encoded)\n return dhash.hexdigest()"
] |
[
"0.5454918",
"0.54015154",
"0.5385653",
"0.53502357",
"0.53502357",
"0.53162235",
"0.52739483",
"0.52719384",
"0.52631074",
"0.5217652",
"0.5215425",
"0.51902264",
"0.51642615",
"0.51546514",
"0.51301754",
"0.5089109",
"0.5087942",
"0.5077861",
"0.50689274",
"0.5043173",
"0.5031087",
"0.5023084",
"0.50024164",
"0.5001977",
"0.5000421",
"0.49978736",
"0.4996764",
"0.49964005",
"0.49909994",
"0.4984575"
] |
0.6645296
|
1
|
This function is used by '/book/search/' it can check whether the inputs are keyword of a book or isbn
|
def is_isbn_or_keyword(inputs):
isbn_or_keyword='keyword'
if len(inputs)==13 and inputs.isdigit():
isbn_or_keyword='isbn'
short_inputs=inputs.strip('-')
if '-' in inputs and short_inputs.isdigit() and len(short_inputs)==10:
isbn_or_keyword='isbn'
return isbn_or_keyword
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def is_isbn(val):\n if is_isbn10(val) or is_isbn13(val):\n if val[0:3] in [\"978\", \"979\"] or not is_ean13(val):\n return True\n return False",
"def book_search(library: list) -> None:\n options = ['Author', 'Title', 'Publisher', 'Shelf', 'Category', 'Subject']\n prompt = '\\nWhat option would you like to search by?'\n choice = get_user_choice(options, prompt)\n if choice == '1':\n search_by_chosen_option(library, options[0])\n elif choice == '2':\n search_by_chosen_option(library, options[1])\n elif choice == '3':\n search_by_chosen_option(library, options[2])\n elif choice == '4':\n search_by_shelf(library)\n elif choice == '5':\n search_by_chosen_option(library, options[4])\n elif choice == '6':\n search_by_chosen_option(library, options[5])",
"def searchbook(isbn):\r\n print(\"Searching for isbn \" + isbn + \" in googlebooks...\")\r\n result = _search_by_isbn(isbn)\r\n \r\n if result[\"totalItems\"] == 0:\r\n return None\r\n \r\n b = _item2book(result[\"items\"][0])\r\n return b",
"def search_by_type(book_search):\n\n book_type_translator = {\n '1':'fiction',\n '2':'crime',\n '3':'adventure'\n }\n\n print(\"What type of book are you looking for? Enter a number\")\n print(\n \"\\n\".join(f\"{num}.{genre}\" for num, genre in book_type_translator.items()))\n\n book_type_number = 0\n\n while True:\n book_type_number = input('> ')\n\n if book_type_number in book_type_translator:\n book_type = book_type_translator[book_type_number]\n book_printer(book_type)\n elif book_type_number == 'X':\n return\n else:\n print(\"Book type invalid, try again or [X] to exit\")",
"def supports_book_search(self):\n return False",
"def test_search_by_ISBN(self):\n s1 = System()\n b1 = Books(\"1984\", \"George Orwell\", \"Harvill Secker\", \"1949\", \"0123456789123\")\n self.assertEqual(s1.search_by_ISBN(\"0123456789123\"), 0)\n s1.add_resource(b1)\n self.assertEqual(s1.search_by_ISBN(\"0123456789123\"), 1)",
"def search_for_books(main_page): # Add information to the printout if the book is rented\n\n type_of_search = 0\n\n header = \"\"\"\n Do you want to search for books by the first letter of the title\n or by the type?\n \"\"\"\n search_choices= (\n (\"To search by letter\", search_by_letter),\n (\"To search by type\", search_by_type),\n (\"To exit\",exit.exit_to_main)\n )\n\n book_search = Screen(header,search_choices,\n main_page.login, main_page.password)\n book_search.activate()",
"def user_input_book_search(data: list) -> list:\n while True:\n try:\n search_point = input(\n \"Search by - \\n Enter (1) Author (2) Title (3) Publisher (4) Shelf (5) Category (6) Subject:\\n\")\n info = input(\"Search string: \").lower()\n for index, value in enumerate(ATTRIBUTE(), 1):\n if int(search_point) == index:\n return book_search(value, info, data)\n if int(search_point) > 6:\n raise IndexError(\"Please enter the number from 1 to 6.\")\n except ValueError:\n print(\"Please enter the number.\")",
"def search(self, title=\"\", author=\"\", year=\"\", isbn=\"\"):\n self.cursor.execute(\"SELECT * FROM Book WHERE Title = ? OR Author = ? \\\n OR Year = ? OR ISBN = ?\", (title, author, year, isbn))\n rows = self.cursor.fetchall()\n return rows",
"def library_searched():\n\n searched_result = []\n \n updated_books = duplicated_code()\n\n if request.method == 'POST':\n if request.form['type_search'] == 'book':\n book_title = request.form['search']\n for book in updated_books:\n if book['title'] == book_title:\n searched_result.append(book)\n return render_template(\"library_searched.html\", result = searched_result)\n elif request.form['type_search'] == 'genre':\n book_genre = request.form['search']\n for book in updated_books:\n if book['genre'] == book_genre:\n searched_result.append(book)\n return render_template(\"library_searched.html\", result = searched_result)\n elif request.form['type_search'] == 'author':\n book_author = request.form['search']\n for book in updated_books:\n if book['author_name'] == book_author:\n searched_result.append(book)\n return render_template(\"library_searched.html\", result = searched_result)\n else:\n return render_template(\"library_searched.html\")",
"def genSearch(request):\n \n assert isinstance(request, HttpRequest)\n booklist=[]\n form = request.GET.copy();\n searchvalue =form['query']\n for k,v in get_valid_Books().items():\n if searchvalue.lower() in v.title.lower() or searchvalue.lower() in v.desc.lower() or searchvalue.lower() in v.a_id.name.lower():\n booklist.append(v)\n if booklist is None:\n clearfilter=\"False\"\n else:\n clearfilter=\"True\"\n\n return render(\n request,\n 'app/about.html',\n {\n 'title':'Books',\n 'books':booklist,\n 'clearfilter':clearfilter,\n 'year':datetime.now().year,\n }\n )",
"def search():\n\n # TO DO: refine with wildcard to curb superfluous results\n \n # logged in users can search for books\n # via 'isbn', 'author', or 'title'\n query = request.form.get(\"search\")\n if not query:\n return render_template(\"home.html\", result=0, name=session[\"name\"],result_head=\"Results\")\n \n # query 'isbn'\n if query.isdigit():\n res = db.execute(\"SELECT * FROM books WHERE isbn LIKE :query\",\n {\"query\": f\"{query}%\"}).fetchall()\n else:\n # query 'author'\n res = db.execute(\"SELECT * FROM books WHERE author LIKE :query\",\n {\"query\": f\"{query}%\"}).fetchall()\n # If no result from author, query 'title'\n if len(res) == 0:\n res = db.execute(\"SELECT * FROM books WHERE title LIKE :query\",\n {\"query\": f\"{query}%\"}).fetchall()\n if len(res) == 0:\n res = 0\n return render_template(\"home.html\", result=res, name=session[\"name\"], result_head=\"Results\")",
"def isbn_lookup(isbn):\n base = \"https://www.googleapis.com/books/v1/volumes?q=isbn=\"\n# Unfortunately we can't use the superior \"with spam as eggs\" syntax here...\n search = urlopen(base + isbn + \"&prettyprint=false\")\n lines = search.read()\n search.close()\n for bool_pair in [(\"false\", \"False\"), (\"true\", \"True\")]:\n lines = lines.replace(*bool_pair)\n volume_info = literal_eval(lines)[\"items\"][0][\"volumeInfo\"]\n title = volume_info[\"title\"]\n authors = ', '.join(a for a in volume_info[\"authors\"])\n return \"Title:\\t\\t%s\\nAuthor(s):\\t%s\" % (title, authors)",
"def search():\n import booksearch as bs\n\n opt = var.get()\n term = searchBox.get()\n term2 = dateBox.get()\n\n # Case statement (substitute) for different search areas\n # Each key is an option in the OptionMenu\n searchBy = {\n \"Title & Author\" : bs.search(term),\n \"ID\" : bs.bookID(term),\n \"Date\" : bs.dateRange(term, term2),\n }\n query = searchBy[opt] # Make & stores a query (2D list)\n\n # Repopulates table\n if term != \"\":\n populate(query)",
"def search(title = \"\", author = \"\", year = \"\", isbn = \"\"):\n conn_obj = mysql.connector.connect(host='localhost',database='mydb',user='root',password='kks')\n cur_obj = conn_obj.cursor()\n cur_obj.execute(\"SELECT * \"\n \"FROM book \"\n \"WHERE title = %s OR author = %s OR year = %s OR isbn = %s\", \n (title, author, year, isbn))\n rows = cur_obj.fetchall()\n conn_obj.close()\n return rows",
"def valid_book(self, info):\n self.cursor.execute(\"SELECT ISBN, title, price, stock FROM book WHERE ISBN=%s\", (info['ISBN'],))\n for book in self.cursor.fetchall():\n return True, float(book[2]), book[1], book[3]\n return False, 0, 0, 0",
"def is_valid_book(current_author, inputed_name, availale_books):\n\tbook_info = []\n\tauthor_book = {}\n\n\tfor book in availale_books:\n\t\tauthor = book.author.username\n\t\tauthor_book[author] = book.book_name\n\t\tbook_info.append(author_book)\n\t\tauthor_book = {}\n\n\tfor book in book_info:\n\t\tfor author, book_name in book.items():\n\t\t\tif book_name == inputed_name and author == current_author:\n\t\t\t\treturn False\n\n\treturn True",
"def search_book():\n\n title = request.form.get(\"search\")\n books = book_search_results(GR_KEY, title)\n acct = get_current_account(session['acct'])\n search = True\n\n return render_template(\"index.html\", books=books, acct=acct, search=search)",
"def find_books(self, query, filters, dates, order, descending, semantics, loginID):\n if int(semantics):\n # OR semantics\n conjunction = ' UNION '\n else:\n # AND semantics\n conjunction = ' INTERSECT '\n results = {}\n query_sections = ''\n args = []\n # we don't want all filters off, because that would throw a SQL error. So if user does not select\n # any filters, we will assume they want all results.\n if not filters:\n filters['title_filt'] = 'on'\n filters['author_filt'] = 'on'\n filters['lang_filt'] = 'on'\n filters['publisher_filt'] = 'on'\n\n # go through each active filter and do a query based on that filter, then append results to the final\n # return value\n if 'title_filt' in filters and query[0]:\n query_sections += \"SELECT * FROM book WHERE title LIKE %s\"\n args.append('%' + query[0] + '%')\n\n if 'author_filt' in filters and query[1]:\n if query_sections:\n query_sections += conjunction\n query_sections += \"\"\"SELECT B.ISBN, title, publisher, B.lang, publicationDate, pageCount, \n stock, B.price, B.subject, avg_rating, total_rating_score, num_ratings FROM book B, author A, wrote W \n WHERE W.ISBN = B.ISBN AND W.authorID = A.ID AND A.name LIKE %s\"\"\"\n args.append('%' + query[1] + '%')\n\n if 'lang_filt' in filters and query[2]:\n if query_sections:\n query_sections += conjunction\n query_sections += \"SELECT * FROM book WHERE lang LIKE %s\"\n args.append('%' + query[2] + '%')\n\n if 'publisher_filt' in filters and query[3]:\n if query_sections:\n query_sections += conjunction\n query_sections += \"SELECT * FROM book WHERE publisher LIKE %s\"\n args.append('%' + query[3] + '%')\n\n # if the query is empty, that means they did not fill out any of the forms for filters they wanted.\n if not query_sections:\n return results\n # determine ordering method\n if order == '0':\n query_sections += \" ORDER BY publicationDate\"\n # if descending is true, add descending specification\n if int(descending):\n query_sections += \" DESC\"\n elif order == '1':\n query_sections += \"ORDER BY avg_rating\"\n # if descending is true, add descending specification\n if int(descending):\n query_sections += \" DESC\"\n\n # execute final constructed query and store results in a dict\n self.cursor.execute(query_sections, args)\n books = self.cursor.fetchall()\n\n for book in books:\n if str(book[0]) not in results:\n cur_authors = []\n results[str(book[0])] = book\n # now we need to find all the authors of this book so we can display them\n self.cursor.execute(\"\"\"SELECT name FROM author A, wrote W, book B WHERE A.ID = W.authorID AND\n W.ISBN = B.ISBN AND B.ISBN = %s\"\"\", (book[0],))\n for author in self.cursor.fetchall():\n cur_authors.append(author[0])\n results[str(book[0])] = [results[str(book[0])], cur_authors]\n # filter results so only trusted comments are included in average rating without changing database\n if order == '2':\n actual_ratings = []\n for book in books:\n if not any(str(book[0]) in sub for sub in actual_ratings):\n self.cursor.execute(\"\"\"SELECT score FROM trusts T, comment C WHERE T.loginID = %s AND\n T.otherLoginID = C.loginID AND T.trustStatus = 'TRUSTED' AND \n C.ISBN = %s\"\"\", (loginID, str(book[0])))\n current_sum = 0\n current_num_users = 0\n for score in self.cursor.fetchall():\n current_num_users = current_num_users+1\n current_sum = current_sum+score[0]\n final_score = None\n if current_num_users:\n final_score = current_sum/current_num_users\n else:\n final_score = None\n actual_ratings.append([str(book[0]), final_score])\n if int(descending):\n is_reverse = True\n else:\n is_reverse = False\n\n actual_ratings = sorted(actual_ratings, key=lambda l:-1*float('inf') if l[1] is None else l[1],\n reverse=is_reverse)\n sorted_results = {}\n for [book, score] in actual_ratings:\n unfiltered_data = results[book]\n t = unfiltered_data[0]\n new_data = [(t[0],t[1],t[2],t[3],t[4],t[5],t[6],t[7],t[8],round(score,2) if score is not None else score,\n t[9],t[10]), unfiltered_data[1]]\n sorted_results[book] = new_data\n results = sorted_results\n return results",
"def book_search(self, term):\n\n try:\n cur = self._db.cursor()\n search = f'%{term.upper()}%'\n cur.execute('SELECT rowid, * FROM books WHERE UPPER(title) like ? OR UPPER(author) like ?', (search, search))\n return self._cursor_to_booklist(cur)\n except sqlite3.Error as e:\n raise BookError(f'Error searching for books with search term {term}') from e",
"def find_books(self):\n search_query = unicode(self.search_input.data)\n q = u'%{}%'.format(search_query)\n\n # used for dummy emulation of caseinsensetive search\n qC = u'%{}%'.format(capfirst(search_query))\n\n books = Book.query.filter(db.or_(\n Book.authors.any(db.or_(\n Author.name.like(q),\n Author.name.like(qC))),\n Book.title.like(q),\n Book.title.like(qC)),)\n\n return books",
"def isISBN(code):\n if not (\n isinstance(code, str) and # code must be a string\n len(code) == 10 and # code must contain 10 characters\n code[:9].isdigit() # first nine characters must be digits\n ):\n return False\n\n # check the check digit\n return checkdigit(code) == code[-1]",
"def search(collection_of_books: tuple, search_tag: str, search_keyword: str) -> list:\r\n found_books = []\r\n\r\n if search_tag == \"Shelf\" and search_keyword.isnumeric():\r\n found_books = [book for book in collection_of_books if search_keyword == book[\"Shelf\"]]\r\n\r\n else:\r\n for book in collection_of_books:\r\n if search_keyword.lower() in book[search_tag].lower():\r\n found_books.append(book)\r\n\r\n return found_books",
"def filter_books():\n if request.method != \"POST\":\n return render_template(\"error.html\", message=\"First Login with your username.\")\n else:\n \n #according to the selected field, we did a search\n book_field = request.form.get(\"book_field\")\n book_field = book_field.lower()\n field_value = request.form.get(\"field_value\")\n field_value = '%' + field_value + '%'\n\n stmt = \"SELECT * FROM book WHERE \" + book_field +\" LIKE :field_value\"\n\n filter_books = db.execute(stmt, {\"book_field\":book_field , \"field_value\":field_value}).fetchall()\n \n\n # Get the all the books values\n books = db.execute(\"SELECT * FROM book\").fetchall()\n search_options = [\"ISBN\",\"Title\", \"Author\"]\n \n return render_template(\"search.html\", books=books, filter_books = filter_books,search_options=search_options)",
"def search_books_body(collection_of_books: tuple) -> list:\r\n search_tag, search_keyword = search_info()\r\n try:\r\n found_books = search(collection_of_books, search_tag, search_keyword)\r\n except KeyError as err:\r\n print(f\"Invalid input: {err}\\n\"\r\n f\"{err} raised KeyError. Please follow the instruction carefully.\\n\")\r\n else:\r\n print_as_ordered_list(found_books)\r\n\r\n return found_books",
"def areISBN(codes, isbn13=None):\n\n # initialize list of checks\n checks = []\n\n # construct list of checks\n for code in codes:\n\n if isinstance(code, str):\n\n if isbn13 is None:\n checks.append(isISBN(code, len(code) == 13))\n else:\n checks.append(isISBN(code, isbn13))\n\n else:\n\n checks.append(False)\n\n # return list of checks\n return checks",
"def validBookObject(bookObject):\n return (\"name\" in bookObject and\n \"price\" in bookObject and\n \"isbn\" in bookObject)",
"def iskeyword(self, arg: str):\n if arg.upper() in self._keywords:\n return True\n return False",
"def search(words):\n\tglobal books\n\tglobal word_count\n\tglobal titles\n\n\ttry:\n\t\t#index for titles and dictionary of books\n\t\ti = 0\n\t\t#condition for word not found\n\t\tif word_count.get(words) == None:\n\t\t\tprint(\"The word {} does not appear in any books in the library.\".format(words))\n\t\t\treturn\n\n\t\tfor vals in word_count[words]:\n\t\t\t# #if book index was empty - case where url was not active - index was removed - increase index\n\t\t\t# while(books.get(j) == None):\n\t\t\t# \tj += 1\n\t\t\t#print the count\n\t\t\tif(vals == 1):\n\t\t\t\tprint(\"The word {} appears {} time in {} (link: {})\".format(words,vals,titles[i],books[i]))\n\t\t\telif(vals == 0):\n\t\t\t\tcontinue\n\t\t\telse:\n\t\t\t\tprint(\"The word {} appears {} times in {} (link: {})\".format(words,vals,titles[i],books[i]))\n\t\t\t#increase index values\n\t\t\ti += 1\n\texcept:\n\t\tprint(\"Error while searching for word.\")",
"def isbn():\n message = 'Informe um ISBN válido'\n def _isbn(form, field):\n if not is_isbn10(field.data) and not is_isbn13(field.data):\n raise ValidationError(message)\n return _isbn"
] |
[
"0.65093267",
"0.63722867",
"0.6290724",
"0.625288",
"0.61700827",
"0.6067454",
"0.6060485",
"0.6046413",
"0.5957303",
"0.5878592",
"0.58614373",
"0.58510476",
"0.58190084",
"0.5804851",
"0.5802345",
"0.57644963",
"0.57568294",
"0.5722268",
"0.57160085",
"0.56934005",
"0.5646759",
"0.5646404",
"0.56427073",
"0.5632434",
"0.5616818",
"0.5609681",
"0.5608614",
"0.55353653",
"0.5515475",
"0.55146563"
] |
0.75670373
|
0
|
Removes rows where 'DESC' == 'RECOVR AUD' (not 'REGULAR'). These caused duplicate entries when grouping by turnstile & datetime. Removes DESC column. Fixes EXITS column name.
|
def clean_data(df_turnstiles):
# sort values in a such a way that the duplicate values sit directly below the originals, so they will be removed.
df_turnstiles.sort_values(
["C/A", "UNIT", "SCP", "STATION", "DATE_TIME"],
inplace=True,
ascending=False,
)
# keeps top row, deletes others
df_turnstiles.drop_duplicates(
subset=["C/A", "UNIT", "SCP", "STATION", "DATE_TIME"], inplace=True
)
# remove DESC column
df_turnstiles = df_turnstiles.drop(["DESC"], axis=1, errors="ignore")
# remove the many spaces in the EXITS column name
df_turnstiles.rename(
columns={
"EXITS ": "EXITS"
},
inplace=True,
)
return df_turnstiles
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def filter_unique_ticker(state: State):\n if state.events.extract_company_list + state.events.load_company_list == 200:\n try:\n state.files.combined_exchanges.columns = map(str.lower, state.files.combined_exchanges.columns)\n\n # Following line is dropping duplicates but there's not?\n state.output = state.files.combined_exchanges[[\"symbol\", 'name', 'lastsale', 'marketcap', 'ipoyear', 'sector', 'industry']].drop_duplicates()\n state.output.to_csv(f\"{PATH}/data/combined_exchanges.csv\")\n state.events.transform_company_list = 100\n except Exception as e:\n state.output = None\n LOGGER.warning(f\"Could not transform company data , error: {e}\")\n\n else:\n state.output = pd.read_csv(f\"{PATH}/data/combined_exchanges_sample.csv\")\n LOGGER.warning(f\"Using old company ticker file\")",
"def strip_duplicates(in_file, out_file, sep_type=\"\", header_rows=0):\n\n util.check_output_dir(out_file)\n\n if header_rows !=0: header=read_header(in_file, num_header_rows=header_rows, sep_type =\"\")\n\n if sep_type==\"\":\n data=pd.read_csv(in_file, skiprows=header_rows, header=None, delim_whitespace=True) \n else:\n data=pd.read_csv(in_file, skiprows=header_rows, header=None, sep=sep_type)\n\n dup=data.duplicated(keep='first')\n dup_False=np.where(dup==False)\n\t\n no_dup=data.loc[dup_False]\n\n len_no_dup=no_dup.shape[0]\n len_dup_False_indx=len(dup_False[0])\n\n try:\n assert len_no_dup == len_dup_False_indx\n except AssertionError:\n print(\"Removal of duplicates and creation of new output failed.\")\n print(\"Length of no duplicated indices does not match the subsampled main dataframe... function failiure :(\")\n\n\t\n if header_rows !=0: \n frames = [header, no_dup]\n no_dup = pd.concat(frames)\n\n if sep_type==\"\":\n no_dup.to_csv(out_file, sep=\"\\t\", header=False, index=False)\n print(\"Duplicates removed - output file: %s\" %(out_file))\n else:\n no_dup.to_csv(out_file, sep=sep_type, header=False, index=False)\n print(\"Duplicates removed - output file: %s\" %(out_file))",
"def _remove_tech_rep_negatives(self):\n\n # For each row in the post_med_df, find the mapping key that is a substring\n # Should be only one, check this.\n # Then once you have found the one, check all samples in the post_med df to see if it matches any other\n # if you return multiple matches, then keep only the one with the biggest number of contigs,\n # and all others to a drop list. Keep a checked list so that we don't have to check readsets twice.\n # Also up date a dictionary as you go that is the full readset to the sample-id that it needs to become.\n # Once this has been done for the post-med do it for the pre-med.\n # For the pre-med, use the dictionary we created while doing the post-med\n\n # Get the post med df. Read it in with index as false and set index manually without dropping\n # this way we can work with the index, but then we can not write it out later so as not\n # to disturb the column orders.\n post_med_count_path = os.path.join(self.negative_output_dir_path, 'post_med_seqs', [_ for _ in os.listdir(\n os.path.join(self.negative_output_dir_path, 'post_med_seqs')) if 'abund' in _][0])\n post_med_df = pd.read_csv(post_med_count_path, index_col=False)\n post_med_df = post_med_df.set_index('sample-id', drop=False)\n\n # Same for the pre_med\n pre_med_count_path = os.path.join(self.negative_output_dir_path, 'pre_med_seqs', [_ for _ in os.listdir(\n os.path.join(self.negative_output_dir_path, 'pre_med_seqs')) if 'abund' in _][0])\n pre_med_df = pd.read_csv(pre_med_count_path, index_col=False)\n pre_med_df = pre_med_df.set_index('sample-id', drop=False)\n\n # First check to see if the sample-ids have already been fixed\n if 'TARA' in pre_med_df.index[0] and 'TARA' in post_med_df.index[0]:\n return\n if 'TARA' in pre_med_df.index[0] and 'TARA' not in post_med_df.index[0]:\n raise RuntimeError\n if 'TARA' not in pre_med_df.index[0] and 'TARA' in post_med_df.index[0]:\n raise RuntimeError\n\n # The dictionary df that Setphane produced\n mapping_df = pd.read_csv(self.negative_mapping_file_path, index_col=0)\n # Make the mapping dictionary from the Stephane df\n raw_mapping_dict = {}\n for df_ind in mapping_df.index:\n raw_mapping_dict[df_ind] = mapping_df.at[df_ind, 'sample-id_source']\n\n # This is the dictionary we are going to populate that had the full genoscope readset\n # as the key and the equivalent TARA sample-id as the value\n curated_mapping_dict = {}\n\n # Check that the assumption holds that both of the indeces are identifcal except for order.\n # NB the post med df has an annoying row at the end.\n assert(set(post_med_df.index[:-1]) == set(pre_med_df.index))\n contig_dict = {readset: contig for readset, contig in zip(post_med_df['sample-id'][:-1], post_med_df['raw_contigs'][:-1])}\n\n to_drop_list = []\n checked_list = []\n for pm_ind in post_med_df.index[:-1]:\n if pm_ind in checked_list:\n continue\n match = []\n for map_ind in mapping_df.index:\n if map_ind in pm_ind:\n match.append(map_ind)\n if len(match) == 0:\n print(f'pm_ind: {pm_ind} found 0 matches. This sample will be dropped.')\n to_drop_list.append(pm_ind)\n continue\n elif len(match) > 1:\n raise RuntimeError\n\n # Now we have the mapping indice that matches\n match = match[0]\n pm_matches = []\n for pm_ind_again in post_med_df.index[:-1]:\n if match in pm_ind_again:\n pm_matches.append(pm_ind_again)\n assert(len(pm_matches) > 0)\n if len(pm_matches) > 1:\n # Then we have technical replicates and we only want to keep the largest\n contig_match_dict = {pm_match: contig_dict[pm_match] for pm_match in pm_matches}\n sorted_keys = sorted(contig_match_dict, key=contig_match_dict.get, reverse=True)\n # Add all of the matches to the check_list\n checked_list.extend(sorted_keys)\n curated_mapping_dict[sorted_keys[0]] = raw_mapping_dict[match]\n to_drop_list.extend(sorted_keys[1:])\n else:\n checked_list.append(pm_matches[0])\n curated_mapping_dict[pm_matches[0]] = raw_mapping_dict[match]\n\n # drop the rows\n post_med_df.drop(index=to_drop_list, inplace=True)\n # We now need to get rid of any sequence count columns that only have 0s after dropping the samples\n # The last meta column is post_med_unique\n cols = list(post_med_df)\n c_ind = cols.index('post_med_unique') + 1\n cols_to_check = cols[c_ind:]\n cols_to_drop = []\n for col in cols_to_check:\n if (post_med_df[col][:-1] == 0).all():\n cols_to_drop.append(col)\n\n # drop the cols\n post_med_df.drop(columns=cols_to_drop, inplace=True)\n\n # rename\n for ind in post_med_df.index[:-1]:\n current = post_med_df.at[ind, 'sample-id']\n post_med_df.at[ind, 'sample-id'] = curated_mapping_dict[current]\n\n # Here we have the curated mapping dict popualted and we can now use this to\n # process the pre_med df\n pre_med_df.drop(index=to_drop_list, inplace=True)\n # We now need to get rid of any sequence count columns that only have 0s after dropping the samples\n # The last meta column is post_med_unique\n cols = list(pre_med_df)\n c_ind = cols.index('sample-id') + 1\n cols_to_check = cols[c_ind:]\n cols_to_drop = []\n for col in cols_to_check:\n if (pre_med_df[col][:-1] == 0).all():\n cols_to_drop.append(col)\n\n # drop the cols\n pre_med_df.drop(columns=cols_to_drop, inplace=True)\n\n # rename\n for ind in pre_med_df.index:\n current = pre_med_df.at[ind, 'sample-id']\n pre_med_df.at[ind, 'sample-id'] = curated_mapping_dict[current]\n\n # Now convert the columns to int32\n d_type_dict = {col_name : pd.Int32Dtype() for col_name in list(post_med_df)[2:]}\n post_med_df = post_med_df.astype(d_type_dict)\n d_type_dict = {col_name : pd.Int32Dtype() for col_name in list(pre_med_df)[2:]}\n pre_med_df = pre_med_df.astype(d_type_dict)\n\n # Important to write out with index as false\n post_med_df.to_csv(post_med_count_path, index=False, header=True)\n pre_med_df.to_csv(pre_med_count_path, index=False, header=True)",
"def clean_and_enhance_dataframe(grouped, due_date_cutoff, euctr_url):\n grouped.replace('nan', np.nan, inplace=True)\n grouped['full_title'] = grouped.full_title.str.replace(r'\\r','')\n grouped['full_title'] = grouped.full_title.str.replace(r'\\n','')\n\n grouped.rename(columns={'eudract_number':'trial_id'}, inplace=True)\n grouped['min_end_date'] = pd.to_datetime(grouped['min_end_date'])\n grouped['max_end_date'] = pd.to_datetime(grouped['max_end_date'])\n grouped['has_results'] = (grouped.has_results == grouped.number_of_countries).astype(int)\n grouped['includes_pip'] = (grouped.includes_pip > 0).astype(int)\n grouped['exempt'] = ((grouped.includes_pip == 0) & (grouped.phase_1 == grouped.number_of_countries)).astype(int)\n\n sb_cond = [\n (grouped.single_blind == grouped.number_of_countries),\n (grouped.not_single_blind == grouped.number_of_countries)] \n sb_vals = [1,0]\n grouped['single_blind'] = np.select(sb_cond,sb_vals, default = 2)\n\n rd_cond = [\n (grouped.rare_disease == grouped.number_of_countries),\n (grouped.not_rare_disease == grouped.number_of_countries),\n (grouped.rare_disease_blank == grouped.number_of_countries)]\n rd_vals = [1,0,3]\n grouped['rare_disease'] = np.select(rd_cond,rd_vals, default = 2)\n\n ph_cond = [\n (grouped.phase_1 == grouped.number_of_countries),\n (grouped.phase_2 == grouped.number_of_countries),\n (grouped.phase_3 == grouped.number_of_countries),\n (grouped.phase_4 == grouped.number_of_countries)]\n ph_vals = [1,2,3,4]\n grouped['phase'] = np.select(ph_cond,ph_vals, default = 0)\n\n be_cond = [\n (grouped.bioequivalence == grouped.number_of_countries),\n (grouped.not_bioequivalence == grouped.number_of_countries)]\n be_vals = [1,0]\n grouped['bioequivalence_study'] = np.select(be_cond,be_vals, default = 2)\n\n hv_cond = [\n (grouped.healthy_volunteers == grouped.number_of_countries),\n (grouped.not_healthy_volunteers == grouped.number_of_countries)]\n hv_vals = [1,0]\n grouped['health_volunteers'] = np.select(hv_cond,hv_vals, default = 2)\n\n ts_cond = [\n (grouped.ongoing == grouped.number_of_countries),\n ((grouped.completed) + (grouped.terminated) == grouped.number_of_countries),\n (((grouped.completed) + (grouped.terminated)) > 0) & (((grouped.completed) + (grouped.terminated)) < grouped.number_of_countries),\n (grouped.no_status == grouped.number_of_countries)]\n ts_vals = [0,1,2,4]\n grouped['trial_status'] = np.select(ts_cond,ts_vals, default = 3)\n\n grouped['any_terminated'] = (grouped.terminated > 0).astype(int)\n grouped['all_terminated'] = (grouped.terminated == grouped.number_of_countries).astype(int)\n grouped['results_expected'] = (((grouped.completed) + (grouped.terminated) == grouped.number_of_countries) & \n (grouped.comp_date > 0) &\n (grouped.max_end_date < due_date_cutoff) &\n ~((grouped.includes_pip == 0) & (grouped.phase_1 == grouped.number_of_countries))).astype(int)\n grouped['all_completed_no_comp_date'] = (((grouped.completed) + (grouped.terminated) == grouped.number_of_countries) &\n (grouped.comp_date == 0)).astype(int)\n title_cond = [\n ((pd.isnull(grouped.full_title)) & (pd.notnull(grouped.abbreviated_title))),\n ((pd.isnull(grouped.full_title)) & (pd.isnull(grouped.abbreviated_title))),\n ((pd.notnull(grouped.full_title)) & (grouped.full_title.str.len() > 200))]\n title_vals = [grouped.abbreviated_title, 'No Title', grouped.full_title.str.slice(stop=200) + '...']\n grouped['trial_title'] = np.select(title_cond, title_vals, grouped.full_title)\n\n grouped['trial_url'] = euctr_url + grouped.trial_id\n grouped['comp_date_while_ongoing'] = ((grouped.comp_date > 0) & \n (((grouped.completed) + (grouped.terminated)) > 0) & \n (((grouped.completed) + (grouped.terminated)) < grouped.number_of_countries)).astype(int)\n grouped['contains_non_eu'] = (grouped.non_eu > 0).astype(int)\n grouped['only_non_eu'] = (grouped.non_eu == grouped.number_of_countries).astype(int)",
"def experience_clean_row(row_of_data):\n experience = row_of_data.get('experience')\n z = list(set(remove_filler_words(experience)))\n return z",
"def remove_data():\n # Removing the existing data\n col_answer_given.remove()\n col_answer_not_given.remove()\n col_q_not_given.remove()\n col_to_summarize.remove()",
"def clean(df):",
"def extract_description_features(df, desc_col_name, remove_var=False):\n df['isAcctNo'] = df[desc_col_name].str.contains('$ACCT_NO', regex=False).astype('int')\n df['isForeignCurr'] = df[desc_col_name].str.contains('$CURRENCY', regex=False).astype('int')\n # df['isForeignCountry'] = df[desc_col_name].str.contains('$FOREIGN_COUNTRY', regex=False).astype('int')\n\n if remove_var:\n regex = '\\$ACCT_NO|\\$CURRENCY|\\$FOREIGN_COUNTRY'\n df[desc_col_name] = df[desc_col_name].str.replace(regex, '', regex=True)\n return df",
"def remove_dup_mzs(df):\n return (df\n .sort_values(by='intensity', ascending=False)\n .drop_duplicates(subset=['inchi', 'mz'], keep='first'))",
"def _clean_results(self):\n\t\tif self.file_type == \"Automobile\":\n\t\t\tcols = [\"Year\", \"Mileage\", \"Price\"]\n\t\t\tself.data.Mileage.replace([',', 'mi.', 'nan', ' '], '', regex=True, inplace=True) # Fix mileage column\n\t\t\tself.data.Price.replace([',', '\\$'], '', regex=True, inplace=True) # Always fix price column (, and $ removed)\n\t\t\tself.data[cols] = self.data[cols].apply(pd.to_numeric, errors='coerce') # Coerces errors into NaN values\n\t\t\tself.data.drop(self.data[self.data.Year < 2000].index, inplace=True) # Remove cars made before 2000\n\t\t\tself.data.drop(self.data[self.data.Price > 30000].index, inplace=True) # Remove cars over $30,000\n\t\t\tself.data.drop(self.data[(self.data.Mileage < 1000) | (self.data.Mileage > 300000)].index, inplace=True) # Remove cars with over 300,000 miles\n\t\t\tself.data['Age'] = 2018 - self.data['Year'] # Change years to Age\n\t\telif self.file_type == \"Apartment\":\n\t\t\tself.data.Area.replace(['ft2'], '', regex=True, inplace=True) # Remove ft2 from square footage column\n\t\t\tself.data.Price.replace([',', '\\$'], '', regex=True, inplace=True) # Always fix price column (, and $ removed)\n\t\telse:\n\t\t\tself.data['Street'], self.data['City'], self.data['State'] = self.data['Address'].str.split(',', 2).str\n\t\t\tdel self.data.Address\n\t\t\tself.data.drop(self.data[self.data.Price > 1000000].index, inplace=True) # Remove houses worth more than $1 million\n\n\t\tself.data.replace('^\\s*$', np.nan, regex=True, inplace=True) # Replace all empty values with np.NaN\n\t\tself.data = self.data.dropna(axis=1, how='all') # Remove Null Columns\n\t\tself.data = self.data.apply(pd.to_numeric, errors='ignore') # Coerces errors into NaN values",
"def date_cleaner(dataset):\n dataset['document_last_edition'] = dataset['meta_lastEdition']\n dataset = dataset.drop(['meta_lastEdition'], axis=1)\n \n \n \"\"\"\n Get column to correct date format\n \"\"\"\n dataset['document_last_edition'] = dataset['document_last_edition'].apply(lambda x: str(unify_date_format(x))[:10]) \n \n \n \"\"\"\n meta_lastPublication renaming\n \"\"\"\n dataset['document_last_publication'] = dataset['meta_lastPublication']\n dataset = dataset.drop(['meta_lastPublication'], axis=1)\n\n # DROP HOURS/M/S\n dataset['document_last_publication'] = dataset['document_last_publication'].apply(lambda x: str(unify_date_format(x))[:10]) \n \n \n # META CREATED DATE\n dataset['meta_created_date'] = dataset['meta_created_date'].str.replace('_', '-')\n dataset['meta_created_date'] = dataset['meta_created_date'].apply(lambda x: str(unify_date_format(x))[:10])\n dataset['document_created_at'] = dataset['meta_created_date']\n dataset = dataset.drop(['meta_created_date'], axis=1)\n\n # META_REVISED_MODIFIED\n dataset['document_revised_modified'] = dataset['meta_revised_modified']\n dataset = dataset.drop(['meta_revised_modified'], axis=1) \n \n \n date_column_list = ['document_created_at','document_last_edition', 'document_last_publication', 'document_revised_modified']\n \n \"\"\"\n \n THE PLAN IS TO FIRST REPLACE EMPTY SPOTS IN META_CREATED_DATE WITH CREATED_AT\n THEN WE DROP CREATED_AT\n THEN WE REPLACE EMPTY SPOTS IN OTHER COLUMNS WITH document_created_at\n \"\"\" \n \n dataset[date_column_list] = dataset[date_column_list].replace('Not Specified', np.nan)\n dataset[date_column_list] = dataset[date_column_list].replace('Not Specif', np.nan)\n dataset[date_column_list] = dataset[date_column_list].replace('nan', np.nan) \n dataset['document_created_at'].fillna(dataset['created_at'], inplace=True) \n dataset = dataset.drop(['created_at'], axis=1)\n \n dataset['document_last_edition'].fillna(dataset['document_created_at'], inplace=True)\n dataset['document_last_publication'].fillna(dataset['document_created_at'], inplace=True)\n dataset['document_revised_modified'].fillna(dataset['document_created_at'], inplace=True)\n \n \n\n \n \"\"\"\n FIXING NON-EXISTING DATES IN DATASET\n \"\"\"\n \n dataset = dataset.replace(['2020-1-29'], ['2020-01-29'])\n \n \n \n created_at_unique = list(dataset['document_created_at'].unique())\n last_edition_unique = list(dataset['document_last_edition'].unique())\n last_publication_unique = list(dataset['document_last_publication'].unique())\n revised_modified_unique = list(dataset['document_revised_modified'].unique())\n \n \n # IF LIST NEED TO GET UPDATED\n invalid_created_at = is_valid_date(created_at_unique)\n invalid_last_edition_unique = is_valid_date(last_edition_unique)\n invalid_last_publication_unique = is_valid_date(last_publication_unique)\n invalid_revised_modified_unique = is_valid_date(revised_modified_unique) \n invalid_dates = list(set(itertools.chain(invalid_created_at, invalid_last_edition_unique, invalid_last_publication_unique, invalid_revised_modified_unique)))\n \n \n \n \n # Non-existing dates from the list\n dataset = dataset.replace(['2019-04-31', '2016-11-31', '2019-09-31', '2015-02-31', '2017-04-31', '2015-11-31', '2015-09-31', '2017-02-29', '2018-09-31', '2017-06-31', '2018-04-31', '2015-04-31', '2018-11-31', '2017-09-31', '2015-02-29', '2019-02-29', '2019-06-31', '2018-02-29', '2016-02-30', '2016-06-31', '2016-09-31', '2018-06-31', '2019-18-03', '2020-02-31', '9999-12-31'], \n ['2019-04-30', '2016-11-30', '2019-09-30', '2015-02-28', '2017-04-30', '2015-11-30', '2015-09-30', '2017-02-28', '2018-09-30', '2017-06-30', '2018-04-30', '2015-04-30', '2018-11-30', '2017-09-30', '2015-02-28', '2019-02-28', '2019-06-30', '2018-02-28', '2016-02-28', '2016-06-30', '2016-09-30', '2018-06-30', '2019-03-18', '2020-02-28', '1999-12-31'])\n\n\n \n \n\n\n return dataset",
"def clean_up_raw(df_raw):\n # exclude_subset = ['well', 'tile', 'cell', 'intensity', 'blob'] # causes issues with later joins, maybe a pandas bug\n import lasagna.utils\n df_raw[CYCLE] = df_raw[CYCLE].astype(int)\n df_raw = df_raw.sort_values([WELL, TILE, CELL, BLOB, CYCLE, CHANNEL])\n return df_raw",
"def clean_metadata(metaobj):\n if len(metaobj) == 1 and 'href' in metaobj[0] and '/api/programs/' in metaobj[0]['href']:\n metaobj = metaobj # Keep lastUpdated for program\n else:\n metaobj = remove_subset_from_set(metaobj, 'lastUpdated')\n metaobj = remove_subset_from_set(metaobj, 'lastUpdatedBy')\n metaobj = remove_subset_from_set(metaobj, 'created')\n metaobj = remove_subset_from_set(metaobj, 'createdBy')\n metaobj = remove_subset_from_set(metaobj, 'href')\n metaobj = remove_subset_from_set(metaobj, 'access')\n metaobj = remove_subset_from_set(metaobj, 'favorites')\n metaobj = remove_subset_from_set(metaobj, 'allItems')\n metaobj = remove_subset_from_set(metaobj, 'displayName')\n metaobj = remove_subset_from_set(metaobj, 'displayFormName')\n metaobj = remove_subset_from_set(metaobj, 'displayShortName')\n metaobj = remove_subset_from_set(metaobj, 'displayDenominatorDescription')\n metaobj = remove_subset_from_set(metaobj, 'displayNumeratorDescription')\n metaobj = remove_subset_from_set(metaobj, 'displayDescription')\n metaobj = remove_subset_from_set(metaobj, 'interpretations')\n if len(metaobj) > 0:\n for subtag in ['dashboardItems', 'analyticsPeriodBoundaries', 'mapViews', 'user', 'userGroupAccesses',\n 'programStageDataElements', 'programTrackedEntityAttributes',\n 'trackedEntityTypeAttributes', 'userCredentials', 'legends', 'greyedFields']:\n for i in range(0, len(metaobj)):\n if subtag in metaobj[i]:\n metaobj[i][subtag] = remove_subset_from_set(metaobj[i][subtag], 'lastUpdated')\n metaobj[i][subtag] = remove_subset_from_set(metaobj[i][subtag], 'lastUpdatedBy')\n metaobj[i][subtag] = remove_subset_from_set(metaobj[i][subtag], 'created')\n metaobj[i][subtag] = remove_subset_from_set(metaobj[i][subtag], 'createdBy')\n # There is access : { read: true, delete: false ... } dictionary\n # and there is access : \"rw----\"... Make sure we only delete the dictionary version\n if subtag not in ['user', 'userGroupAccesses']:\n metaobj[i][subtag] = remove_subset_from_set(metaobj[i][subtag], 'access')\n\n if subtag == 'programTrackedEntityAttributes':\n metaobj[i][subtag] = remove_subset_from_set(metaobj[i][subtag], 'name')\n metaobj[i][subtag] = remove_subset_from_set(metaobj[i][subtag], 'displayName')\n metaobj[i][subtag] = remove_subset_from_set(metaobj[i][subtag], 'displayFormName')\n metaobj[i][subtag] = remove_subset_from_set(metaobj[i][subtag], 'displayShortName')\n\n return metaobj",
"def create_df(filename=r'.\\data\\default of credit card clients.xls', remove_pay0=True, resample=False):\n\n filename = filename\n nanDict = {}\n\n df = pd.read_excel(filename, header=1, skiprows=0, index_col=0, na_values=nanDict)\n df.rename(index=str, columns={\"default payment next month\":\"defaultPaymentNextMonth\"}, inplace=True)\n\n # Remove instances with zeros only for past bill statements or paid amounts\n # and not or, remove only when true in all columns\n print('before removing instances where all bill statements or paid amount is zero:', df.shape)\n \n df = df.drop(df[(df.BILL_AMT1 == 0) &\n (df.BILL_AMT2 == 0) &\n (df.BILL_AMT3 == 0) &\n (df.BILL_AMT4 == 0) &\n (df.BILL_AMT5 == 0) &\n (df.BILL_AMT6 == 0)].index, axis=0)\n \n df = df.drop(df[(df.PAY_AMT1 == 0) &\n (df.PAY_AMT2 == 0) &\n (df.PAY_AMT3 == 0) &\n (df.PAY_AMT4 == 0) &\n (df.PAY_AMT5 == 0) &\n (df.PAY_AMT6 == 0)].index, axis=0)\n \n print('after removing instances where all bill statements or paid amount is zero:', df.shape)\n\n \n \n print('df shape before illegal values removed:',df.shape)\n print('df after removing illegals:')\n\n df = pay_remove_value(df,-2)\n print(' remove pay=-2', df.shape)\n\n df = bill_amt_remove_negative(df, 0)\n print(' remove Pay_amt, bill_amt <0:', df.shape)\n\n\n df = edu_marr_remove_value(df)\n print(' remove edy=0,5,6, marriage=0:', df.shape)\n\n if remove_pay0:# over 80 % of data lost\n\n df = pay_remove_value(df,0)\n print(' remove pay=0:',df.shape)\n\n\n\n # features and targets\n X = df.loc[:, df.columns !='defaultPaymentNextMonth'].values\n y = df.loc[:, df.columns =='defaultPaymentNextMonth'].values\n\n # categorical variables to one-hot's\n onehotencoder = OneHotEncoder(categories='auto')\n #print(df.iloc[0:, 3])\n \n # transform cat. var. columns into cat. variables.\n # new columns are added at the start, columns before col 1 put behind new columns\n \n X = ColumnTransformer(\n [(\"\",onehotencoder, [1,2,3, 5,6,7,8,9,10]),],\n remainder='passthrough'\n ).fit_transform(X)\n print(' shape of dataset without resampling', X.shape,y.shape)\n\n if resample:\n sm = SMOTE(random_state=seed)\n X, y = sm.fit_resample(X, y.ravel())\n y = y.reshape(-1,1)\n print(' shape of dataset after resampling', X.shape,y.shape)\n #sys.exit()\n return X, y",
"def alter_details(self, parsed_details_df):\n\n parsed_details_df = parsed_details_df[~pandas.isnull(parsed_details_df.key)]\n parsed_details_df[\"key\"] = parsed_details_df[\"key\"].apply(lambda key: key.replace(\":\", \"\").strip().upper())\n parsed_details_df[\"key\"] = parsed_details_df[\"key\"].apply(\n lambda key: self.details_mapping[key] if key in self.details_mapping.keys() else key)\n parsed_details_df.drop_duplicates(subset =\"key\", inplace = True)\n return parsed_details_df",
"def remove_duplicated_lines():\n\n work_folder = os.path.join(CURRENT_FOLDER, \"..\\\\Data\\\\weather_data\")\n unique_lines = []\n # compare line be line\n with open(os.path.join(work_folder, \"tempfile.csv\"), \"w\") as outfile:\n with open(os.path.join(work_folder, \"filtered_merged_history_KMDW.csv\")) as infile:\n for line in infile:\n if line not in unique_lines:\n outfile.write(line)\n unique_lines.append(line)\n # replace files\n shutil.copyfile(os.path.join(work_folder, 'tempfile.csv'), os.path.join(\n work_folder, \"filtered_merged_history_KMDW.csv\"))\n # remove temp file\n os.remove(os.path.join(work_folder, \"tempfile.csv\"))",
"def clean_df(dfin, top=10):\n\n dfin['crop'] = dfin['crop'].astype('str')\n dfin['crop'] = dfin.crop.str.lower()\n\n dfin[\"created_on\"] = dfin[\"created_on\"].astype(\"datetime64\")\n dfin['latitude'] = np.round(dfin.latitude.apply(pd.to_numeric),2)\n dfin['longitude'] = np.round(dfin.longitude.apply(pd.to_numeric),2)\n dfin['query_type'] = dfin['query_type'].astype('str')\n dfin['query_type'] = dfin.query_type.apply(str.lower)\n\n dfin['hits'] = 1\n\n dfin = dfin[pd.notnull(dfin.kcc_answer_raw)]\n dfin = dfin[pd.notnull(dfin['query_text_raw'])]\n\n dfin['query_text_raw'] = dfin.query_text_raw.str.lower()\n dfin['kcc_answer_raw'] = dfin.kcc_answer_raw.str.lower()\n\n dfin['state_name'] = dfin.state_name.str.lower()\n dfin['district_name'] = dfin.district_name.str.lower()\n\n dfin['crop_full'] = dfin.crop\n dfin['crop'] = [i.split()[0] if len(i.split())>1 else i for i in dfin.crop]\n dfin.dropna(how='all',inplace=True)\n\n #topcrop = dfin.crop.value_counts().head(top).index.tolist()\n topcrop = ['paddy', 'wheat', 'cotton', 'chillies', 'onion', 'brinjal', 'sugarcane', 'tomato', 'bengal', 'groundnut', 'soybean', 'potato','maize']\n dfin = dfin[dfin.crop.isin(topcrop)]\n print(dfin.crop.unique())\n\n dfin = dfin[['crop','created_on','latitude','longitude','query_type','query_text_raw','kcc_answer_raw','state_name','district_name','crop_full']]\n return dfin",
"def clean_dataframe(self, df_in , what = ''): \n \n if what == 'era5fb': # cleaning the era5 feedback only \n df = df_in[np.isfinite(df_in['obsvalue@body'])]\n try: \n df = df.loc[ df['vertco_type@body'] != 2 ] \n except:\n pass\n df = df.reindex()\n df = df[np.isfinite(df['vertco_reference_1@body'])]\n #print('check lengths: ' , len(df_in) , len(df) )\n new_ind = np.array ( range(len(df))) \n df['index'] =new_ind\n df = df.set_index('index')\n \n else: \n ### check if can be optimized ???\n df = df_in.loc[ df_in['z_coordinate_type'] != 2 ] # case where the levels are given in terms of geopotential only (pressure not available)\n \n df = df.loc[ (df['observation_value'] != -99999.0) \n & (df['observation_value'] != -999.0) \n & (df['observation_value'] != -9999) \n & (df['observation_value'] != -9999.0) \n & (df['observation_value'] != -999.9) \n & (df['observation_value'] != -8888 )\n & (df['observation_value'] != -8888.0 )\n \n #& (df['z_coordinate_type'] != 2) \n & (df['z_coordinate'] != -99999.0) \n & (df['z_coordinate'] != -9999.0 )\n & (df['z_coordinate'] != 999 )\n & (df['z_coordinate'] != 999.0 )\n \n \n ] #cleaning the values \n #clean = clean.loc[ (clean['z_coordinate_type'] != 2)] #cleaning the values\n #clean = clean.loc[ (clean['z_coordinate'] != -99999.0 )] #cleaning the values\n \n df = df[np.isfinite(df['observation_value'])] # excluding nan values \n df = df[np.isfinite(df['z_coordinate'])]\n \n return df",
"def _remove_redundant_columns(self):\n self.dataframe.drop(['letter', 'sentiment'], axis=1, inplace=True)",
"def header_clean_row(row_of_data):\n header = row_of_data.get('header')[1]\n z = list(set(remove_filler_words([header])))\n return z",
"def clean_file(df):\n df_clean = df.drop_duplicates()\n df_no_zeros = df_clean[df_clean[2] != 0]\n df_sorted = df_no_zeros.sort()\n\n return df_sorted",
"def identify_and_handel_outliers(self):\n col_list = [] # This will hold the column names created for the administration of the modified z-score test\n values_dropped = []\n cont_cols = self.df.select_dtypes(exclude=[\"category\"]).columns # Gets continous columns \n for col in cont_cols:\n#TODO: Add lines to check column len(), if len() == 0, drop drop column, create cont_cols and cat_cols, and drop from there as well. \n df_len = len(self.df)\n top_value = self.df[col].value_counts(normalize=True, ascending=False, dropna=True)\n top_value = top_value.head(1).reset_index().to_numpy()[0] #Gets the top occuring value along with its percentage of occurances\n if top_value[1] > 0.5:#Test if the top occuring value makes up more than 50% of the data\n remaining_col = self.df[col][~self.df[col].isin([top_value[0]])] #Gets all values not within the 50% of single value data\n self.df[f\"{col}_mod_z\"] = phase_one_data_prep.modified_zscore(remaining_col) #Gets modified z-score for remaining items\n self.df[f\"{col}_mod_z\"] = self.df[f\"{col}_mod_z\"].fillna(0) #Fills all missing z-scores\\\n #with zero(because that 50% of data removed would be zero anyways)\n self.df = self.df[self.df[f\"{col}_mod_z\"] < 3] #Removed all values outside 3\n col_list.append(f\"{col}_mod_z\")#Appends name of column to list\n values_dropped.append((col, df_len - len(self.df)))\n else:\n self.df[f\"{col}_mod_z\"] = phase_one_data_prep.modified_zscore(self.df[col]) #Gets modified z-score \n self.df[f\"{col}_mod_z\"] = self.df[f\"{col}_mod_z\"].fillna(0)\n self.df = self.df[self.df[f\"{col}_mod_z\"] < 3] #Removed all values outside 3\n col_list.append(f\"{col}_mod_z\")#Appends name of column to list\n values_dropped.append((col, df_len - len(self.df)))\n self.df.drop(columns = col_list, inplace=True)#Removed columns created to test modified z-score\n self.outliers_dropped = values_dropped",
"def remove_high_gi(df):\n df = df[df['2h-iAUC'] <= 75]\n writer = pd.ExcelWriter('final_dataset_with_median.xlsx', engine='xlsxwriter')\n df.to_excel(writer, sheet_name='Sheet1')\n writer.save()",
"def clean_duplicate_documents(self):\n title_k = lambda x: x.title\n for k, g in groupby(sorted(self.annotation_documents, key=title_k), title_k):\n g = list(g)\n if len(g) > 1:\n # check first if one is in test set\n to_remove = [x for x in g if x not in self.test]\n if (\n len(to_remove) > 1\n ): # if test is not matched, make subselection based on annotation unit count\n select_k = lambda x: (\n len(x.events) + len(x.sentiment_expressions),\n x.annotator_id != \"gilles\",\n )\n to_remove.sort(key=select_k, reverse=True)\n to_remove = to_remove[1:]\n for docrm in to_remove:\n self.annotation_documents.remove(docrm)\n if docrm in self.dev:\n self.dev.remove(docrm)\n elif docrm in self.test:\n self.test.remove(docrm)\n print(f\"Duplicate doc removed: {docrm}\")",
"def trimDf(df):\n cols = set(df.columns)\n\n cols.remove('exclamationCount') # bug in our feature extraction code\n cols.remove('price') # considered only free apps\n cols.remove('appName') # removing appNames\n\n # return df[list(cols)]\n\n\n\n return df[list(('revSent', 'appLabel'))]",
"def removeLegacy(self, path=None):\n\n df = pd.read_csv(path, compression='gzip')\n print(df.shape)\n gamelist = pd.read_csv('Resources/Genres.csv.gz', usecols=['appid'])\n gamelist = pd.DataFrame(gamelist.appid.unique(), columns=['appid'])\n print(gamelist)\n filter_df = pd.merge(df, gamelist, on='appid', how='inner')\n filter_df = filter_df.dropna()\n filter_df = filter_df.sort_values(['steamid', 'appid'], ascending=[True, True])\n print('done')\n print(filter_df.shape)\n print(filter_df)\n print(np.setdiff1d(df['appid'].unique(), filter_df['appid'].unique()))\n filter_df.to_csv(path, compression='gzip', columns=['steamid', 'appid', 'rating'], index=None)",
"def strip_sql(data, sap_stat=True):\r\n tab_fixs = [[\"PCOGIS.SDE.\", ''],\r\n [\"Auxiliary Equipment\", \"AUXILLARYEQUIPMENT\"]]\r\n for old_str, new_str in tab_fixs:\r\n data['TABLE'] = data['TABLE'].str.replace(old_str, new_str)\r\n data = data.dropna(subset=['COLUMN'])\r\n bad_atts = [\" \", \"SHAPE_Length\", \"HVFUSES\", \"LVFUSES\", \"SHAPE_Area\",\r\n \"None\", \"ACTUALLENGTH\", \"DECOMMISSIONINGDATE\",\r\n \"DECOMMISSIONINGREASON\", 'LOTS YET TO ADD']\r\n data = data[~data['COLUMN'].isin(bad_atts)]\r\n bad_tabs = ['LocationAttributes', 'CustomerConnections', 'TBD']\r\n data = data[~data['TABLE'].isin(bad_tabs)]\r\n bad_tab_atts = [['SWITCHUNIT$', 'INTERRUPTINGMEDIUM$'],\r\n ['DistributionMain$', 'CROSSINGID$'],\r\n ['DistributionMain$', 'MOUNTINGTYPE$'],\r\n ['DistributionMain$', 'MOUNTINGPOSITION$']]\r\n for tab_str, att_str in bad_tab_atts:\r\n data = data[~(data['TABLE'].str.match(tab_str) &\r\n data['COLUMN'].str.match(att_str))]\r\n bad_doubles = [['Regulator$', 'SUBTYPECD$', 'y'],\r\n ['RegulatorStation$', 'EQUIPMENTID$', 'N'],\r\n ['SurfaceStructure$', 'APPLICATION$', 'N'],\r\n ['SurfaceStructure$', 'ENTRY$', 'N'],\r\n ['SurfaceStructure$', 'FACILITYID$', 'N'],\r\n ['SurfaceStructure$', 'MANUFACTURER$', 'N'],\r\n ['SurfaceStructure$', 'MATERIAL$', 'N'],\r\n ['SurfaceStructure$', 'MODEL$', 'N'],\r\n ['SurfaceStructure$', 'STRUCTURESIZE$', 'N'],\r\n ['COMMSPOWERSUPPLY$', 'BATTERYAMPERAGEHOURS$', 'N'],\r\n ['COMMSPOWERSUPPLY$', 'BATTERYCOUNT$', 'N'],\r\n ['PillarPoint$', 'DATEMANUFACTURED$', 'TBC'],\r\n ['PillarPoint$', 'FACILITYID$', 'TBC'],\r\n ['PillarPoint$', 'FEEDERID$', 'TBC'],\r\n ['PillarPoint$', 'NUMBEROFUSEDCIRCUITS$', 'TBC'],\r\n ['PillarPoint$', 'SUBTYPECD$', 'N'],\r\n ['PillarPoint$', 'TOTALNUMBEROFCIRCUITS$', 'TBC'],\r\n ['PillarPoint$', 'TRUENZMGPOS$', 'N'],\r\n ['SupportStructure$', 'HIGHESTVOLTAGE$', 'N'],\r\n ['SurfaceStructure$', 'ASSETFUNCTION$', 'N'],\r\n ['SurfaceStructure$', 'ENCLOSUREMANUFACTURER$', 'N'],\r\n ['SurfaceStructure$', 'ENCLOSURETYPE$', 'N'],\r\n ['SurfaceStructure$', 'GLOBALID$', 'N'],\r\n ['SurfaceStructure$', 'STREETNAME$', 'N'],\r\n ['SurfaceStructure$', 'STREETNO$', 'N'],\r\n ['SurfaceStructure$', 'SUBURB$', 'N'],\r\n ['SurfaceStructure$', 'SYMBOLROTATION$', 'N'],\r\n ['SurfaceStructure$', 'TOWN$', 'N'],\r\n ['Switch$', 'FACILITYID$', 'N'],\r\n ['Switch$', 'FEEDERID$', 'N'],\r\n ['Switch$', 'FEEDERID2$', 'N'],\r\n ['Switch$', 'GEONETFEEDERCODE$', 'N'],\r\n ['Switch$', 'GLOBALID$', 'N'],\r\n ['Switch$', 'GROUNDEDINDICATOR$', 'N'],\r\n ['Switch$', 'INSTALLATIONDATE$', 'N'],\r\n ['Switch$', 'MOUNTING$', 'N'],\r\n ['Switch$', 'NORMALPOSITION$', 'N'],\r\n ['Switch$', 'NUMPHASES$', 'N'],\r\n ['Switch$', 'OPERATINGVOLTAGE$', 'N'],\r\n ['Switch$', 'OUTOFORDERINDICATOR$', 'N'],\r\n ['Switch$', 'REFERENCE$', 'N'],\r\n ['Switch$', 'REMOTECONTROLLED$', 'N'],\r\n ['Switch$', 'REMOTEINDICATION$', 'N'],\r\n ['Switch$', 'RETICULATION$', 'N'],\r\n ['Switch$', 'SITEID$', 'N'],\r\n ['Switch$', 'STREETNAME$', 'N'],\r\n ['Switch$', 'STREETNO$', 'N'],\r\n ['Switch$', 'SUBTYPECD$', 'N'],\r\n ['Switch$', 'SUBURB$', 'N'],\r\n ['Switch$', 'SYMBOLROTATION$', 'N'],\r\n ['Switch$', 'TOWN$', 'N'],\r\n ['Switch$', 'WORKORDERID$', 'N'],\r\n ['SWITCHUNIT$', 'ARCQUENCHING$', 'N'],\r\n ['SWITCHUNIT$', 'C_INTJDEID$', 'N'],\r\n ['SWITCHUNIT$', 'COMMENTS$', 'N'],\r\n ['SWITCHUNIT$', 'DATEMANUFACTURED$', 'N'],\r\n ['SWITCHUNIT$', 'DATEPURCHASED$', 'N'],\r\n ['SWITCHUNIT$', 'INSTALLATIONDATE$', 'N'],\r\n ['SWITCHUNIT$', 'INSULATIONMEDIUM$', 'N'],\r\n ['SWITCHUNIT$', 'LOADBREAKINGCAPACITY$', 'N'],\r\n ['SWITCHUNIT$', 'MANUFACTURER$', 'N'],\r\n ['SWITCHUNIT$', 'MODEL$', 'N'],\r\n ['SWITCHUNIT$', 'NORMALCURRENTRATING$', 'N'],\r\n ['SWITCHUNIT$', 'NUMPHASES$', 'N'],\r\n ['SWITCHUNIT$', 'OWNER$', 'N'],\r\n ['SWITCHUNIT$', 'REFERENCE$', 'N'],\r\n ['SWITCHUNIT$', 'SERIALNUMBER$', 'N'],\r\n ['SWITCHUNIT$', 'VISUALEARTHINDICATOR$', 'N'],\r\n ['SWITCHUNIT$', 'VOLTAGERATING$', 'N'],\r\n ['SWITCHUNIT$', 'WORKORDERID$', 'N'],\r\n ['UndergroundStructure$', 'C_INTJDEID$', 'N'],\r\n ['UndergroundStructure$', 'COMMENTS$', 'N'],\r\n ['UndergroundStructure$', 'FACILITYID$', 'N'],\r\n ['UndergroundStructure$', 'FEEDERID$', 'N'],\r\n ['UndergroundStructure$', 'GLOBALID$', 'N'],\r\n ['UndergroundStructure$', 'HIGHESTVOLTAGE$', 'N'],\r\n ['UndergroundStructure$', 'INSTALLATIONDATE$', 'N'],\r\n ['UndergroundStructure$', 'OUTOFORDERINDICATOR$', 'N'],\r\n ['UndergroundStructure$', 'OWNER$', 'N'],\r\n ['UndergroundStructure$', 'REFERENCE$', 'N'],\r\n ['UndergroundStructure$', 'STREETNAME$', 'N'],\r\n ['UndergroundStructure$', 'STREETNO$', 'N'],\r\n ['UndergroundStructure$', 'SUBURB$', 'N'],\r\n ['UndergroundStructure$', 'SYMBOLROTATION$', 'N'],\r\n ['UndergroundStructure$', 'TOWN$', 'N'],\r\n ['UndergroundStructure$', 'WORKORDERID$', 'N'],\r\n ['Fuse$', 'INSTALLATIONDATE$', 'N'],\r\n ['Ground$', 'BELOWGROUNDCONNECTION$', 'N'],\r\n ['POWERTRANSFORMERUNIT$', 'COOLINGTYPE$', 'TBD'],\r\n ['POWERTRANSFORMERUNIT$', 'COOLINGTYPE2$', 'TBD'],\r\n ['POWERTRANSFORMERUNIT$', 'COOLINGTYPE3$', 'TBD'],\r\n ['POWERTRANSFORMERUNIT$', 'CTBURDENVA$', 'N'],\r\n ['POWERTRANSFORMERUNIT$', 'CTCLASS$', 'N'],\r\n ['POWERTRANSFORMERUNIT$', 'CTQUANTITY$', 'N'],\r\n ['POWERTRANSFORMERUNIT$', 'CTRATIO$', 'N'],\r\n ['POWERTRANSFORMERUNIT$', 'IMPEDANCE2$', 'N'],\r\n ['POWERTRANSFORMERUNIT$', 'IMPEDANCE3$', 'N'],\r\n ['POWERTRANSFORMERUNIT$', 'IMPEDANCEZ0$', 'N'],\r\n ['POWERTRANSFORMERUNIT$', 'RATEDMVA$', 'N'],\r\n ['POWERTRANSFORMERUNIT$', 'RATEDMVA2$', 'N'],\r\n ['POWERTRANSFORMERUNIT$', 'RATEDMVA3$', 'N'],\r\n ['AUXILLARYEQUIPMENT$', 'MANUFACTURER$', 'N'],\r\n ['AUXILLARYEQUIPMENT$', 'MODEL$', 'N'],\r\n ['COMMSPOWERSUPPLY$', 'BATTERYTYPE$', 'N'],\r\n ['SupportStructure$', 'FUNCTION_$', 'N'],\r\n ['COMMSPOWERSUPPLY$', 'GENERATORFUELTYPE$', 'N'],\r\n ['COMMSPOWERSUPPLY$', 'HOURSOFSUPPLY$', 'N'],\r\n ['COMMSPOWERSUPPLY$', 'PARALELLCOUNT$', 'N'],\r\n ['COMMSPOWERSUPPLY$', 'PARALELLCOUNT$', 'TBD'],\r\n ['COMMSPOWERSUPPLY$', 'SYSTEMVOLTAGE$', 'TBD'],\r\n ['SurfaceStructure$', 'TRUENZMGPOS$', 'N'],\r\n ['SupportStructure$', 'ABSOLUTE$', 'N'],\r\n ['DISTTRANSFUSEUNIT$', 'VOLTAGERATING$', 'N'],\r\n ['DISTTRANSFUSEUNIT$', 'WORKORDERID$', 'N'],\r\n ['SupportStructure$', 'FEEDERID$', 'TBC'],\r\n ['SupportStructure$', 'SHAPE$', ' N'],\r\n ['SupportStructure$', 'SUBTYPECD$', 'TBD'],\r\n ['SupportStructure$', 'TREATMENTTYPE$', 'N'],\r\n ['SupportStructure$', 'TRUENZMG$', 'N'],\r\n ['SupportStructure$', 'TYPEOFTOP$', 'N'],\r\n ['SupportStructure$', 'USAGETYPE$', 'N']]\r\n if sap_stat is True:\r\n for tab_str, att_str, sap_str in bad_doubles:\r\n data = data[~(data['TABLE'].str.match(tab_str) &\r\n data['COLUMN'].str.match(att_str) &\r\n data['SAP'].str.match(sap_str))]\r\n bad_null = [['SurfaceStructure$', 'ENCLOSURE$'],\r\n ['SurfaceStructure$', 'ENCLOSUREMANUFACTURER$'],\r\n ['SurfaceStructure$', 'ENCLOSURETYPE$'],\r\n ['Fuse$', 'ACCURACY$'],\r\n ['Fuse$', 'ANCILLARYROLE$'],\r\n ['Fuse$', 'ASSETFUNCTION$'],\r\n ['Fuse$', 'C_INTJDEID$'],\r\n ['Fuse$', 'COMMENTS$'],\r\n ['Fuse$', 'CREATIONUSER$'],\r\n ['Fuse$', 'DATECREATED$'],\r\n ['Fuse$', 'DATEMODIFIED$'],\r\n ['Fuse$', 'DEVICETYPE$'],\r\n ['Fuse$', 'ELECTRICTRACEWEIGHT$'],\r\n ['Fuse$', 'ENABLED$'],\r\n ['Fuse$', 'FACILITYID$'],\r\n ['Fuse$', 'FEEDERID$'],\r\n ['Fuse$', 'FEEDERID2$'],\r\n ['Fuse$', 'FEEDERINFO$'],\r\n ['Fuse$', 'GEONETFEEDERCODE$'],\r\n ['Fuse$', 'GEONETFEEDERID$'],\r\n ['Fuse$', 'GEONETSUBSTATION$'],\r\n ['Fuse$', 'GLOBALID$'],\r\n ['Fuse$', 'INSTALLEDBY$'],\r\n ['Fuse$', 'LABELTEXT$'],\r\n ['Fuse$', 'LASTUSER$'],\r\n ['Fuse$', 'MANUFACTURER$'],\r\n ['Fuse$', 'MAXCONTINUOUSCURRENT$'],\r\n ['Fuse$', 'MAXINTERRUPTINGCURRENT$'],\r\n ['Fuse$', 'MAXOPERATINGVOLTAGE$'],\r\n ['Fuse$', 'MOUNTING$'],\r\n ['Fuse$', 'NOMINALVOLTAGE$'],\r\n ['Fuse$', 'NORMALPOSITION$'],\r\n ['Fuse$', 'NUMPHASES$'],\r\n ['Fuse$', 'OBJECTID$'],\r\n ['Fuse$', 'OPERATINGVOLTAGE$'],\r\n ['Fuse$', 'OUTOFORDERINDICATOR$'],\r\n ['Fuse$', 'OWNER$'],\r\n ['Fuse$', 'PARENTID$'],\r\n ['Fuse$', 'PHASEDESIGNATION$'],\r\n ['Fuse$', 'PREMISE$'],\r\n ['Fuse$', 'PRESENTPOSITION$'],\r\n ['Fuse$', 'RDB_UFID$'],\r\n ['Fuse$', 'REFERENCE$'],\r\n ['Fuse$', 'REMOTECONTROLLED$'],\r\n ['Fuse$', 'REMOTEINDICATION$'],\r\n ['Fuse$', 'RETICULATION$'],\r\n ['Fuse$', 'SCADACONTROLMECHANISM$'],\r\n ['Fuse$', 'SCADACONTROLTYPE$'],\r\n ['Fuse$', 'SCADAPTID$'],\r\n ['Fuse$', 'SHAPE$'],\r\n ['Fuse$', 'SITEID$'],\r\n ['Fuse$', 'STREETNAME$'],\r\n ['Fuse$', 'STREETNO$'],\r\n ['Fuse$', 'SUBTYPECD$'],\r\n ['Fuse$', 'SUBURB$'],\r\n ['Fuse$', 'SYMBOLROTATION$'],\r\n ['Fuse$', 'TIMESTAMP$'],\r\n ['Fuse$', 'TOWN$'],\r\n ['Fuse$', 'TYPE$'],\r\n ['Fuse$', 'WORKORDERID$'],\r\n ['Fuse$', 'ZONE$']]\r\n for tab_str, att_str in bad_null:\r\n data = data[~(data['TABLE'].str.match(tab_str) &\r\n data['COLUMN'].str.match(att_str) &\r\n data['SAP'].isnull())]\r\n return data",
"def _remove_dupes(recs, input, bad_movies, hist_list=[], feedback_list=[]):\n all_rated = input + bad_movies + hist_list + feedback_list\n nonlocal dupes\n dupes = [x for x in recs if x[0] in input]\n return [x for x in recs if x[0] not in all_rated]",
"def _remove_duplicates_(self):\n t = self.table_orig\n mask = []\n t_obs = np.unique(t['jdobs'])\n for t_ in t_obs:\n if np.sum(t['jdobs'] == t_) == 1:\n mask.append(True)\n else:\n mags = t['magpsf'][t['jdobs'] == t_]\n if len(np.unique(mags)) == 1:\n mask.append(True)\n for k in range(len(mags) - 1):\n mask.append(False)\n elif np.sum(np.unique(mags) < 90) == 1:\n done = False\n for m_ in mags:\n if m_ < 90. and not done:\n mask.append(True)\n done = True\n else:\n mask.append(False)\n else:\n mags_ = np.unique(mags)\n mags_ = np.array(mags_[mags_ < 90])\n\n done = [False for k in range(len(mags_))]\n for m_ in mags:\n if m_ < 90.:\n k = np.where(mags_ == m_)[0][0]\n if not done[k]:\n mask.append(True)\n done[k] = True\n else:\n mask.append(False)\n\n self.table = t[np.array(mask)]",
"def clean_data(df):\n \n # Put in code here to execute all main cleaning steps:\n # convert missing value codes into NaNs, ...\n count_miss = df.isnull().sum(axis=0).values #find number of nans for each column\n count_miss = [val for val in count_miss]\n \n drop_cols = []\n\n for ind, val in enumerate(count_miss):\n if val > 200000:\n drop_cols.append(ind)\n \n df_drop_cols = list(azdias.columns[drop_cols])\n df = df.drop(df_drop_cols, axis=1)\n \n for col in range(df.shape[1]): #loop through columns\n column_name = df.columns[col] #get column name\n missing_list = feat_info.iloc[col,3] #get missing_or_unknown column from feature info\n missing_list = missing_list.replace('[','') #remove left bracket from string\n missing_list = missing_list.replace(']','') #remove right bracket from string\n missing_list = missing_list.split(',') #split into individual strings\n \n #find data that is natually missing and continue loop to omit\n if missing_list == ['']:\n continue\n \n else:\n for dat_type in missing_list: \n if df[column_name].dtype == 'object': #find values that contain x\n df.loc[df[column_name] == dat_type, column_name] = np.nan #replace x with nan\n \n else:\n dat_type = int(dat_type) #if no x, convert to integer and replace with nan\n df.loc[df[column_name] == dat_type, column_name] = np.nan\n \n # select, re-encode, and engineer column values.\n \n # encode OST_WEST_KZ\n df.loc[df['OST_WEST_KZ'] == 'W','OST_WEST_KZ'] = 0\n df.loc[df['OST_WEST_KZ'] == 'O','OST_WEST_KZ'] = 1\n \n # Re-encode categorical variable(s) to be kept in the analysis.\n \n \n #get list of attributes with type categorical\n feat_info[feat_info['type'] == 'categorical']\n \n cat_new_cols = [] #initialize\n for i in feat_info[feat_info['type'] == 'categorical']['attribute']:\n cat_new_cols.append(i)\n \n for cols in df.columns:\n if cols in cat_new_cols:\n if df[cols].nunique(dropna=True) > 2: #if the number of unique values is greater than 2 \n df = df.drop(cols, axis=1) #drop from the analysis\n print(\"more than 2 categories: {}\".format(cols))\n \n else:\n if not df[cols].unique()[0] > 0:\n #if not df[cols].unique()[0] > 0:\n dummies = pd.get_dummies(df[cols], prefix=cols)\n df = df.drop(cols, axis=1) #create dummy variable\n df = df.join(dummies)\n print(\"transformed to dummy variable: {}\".format(cols))\n \n # create variable: MOVEMENT\n df.loc[df['PRAEGENDE_JUGENDJAHRE'].isin([1,3,5,8,10,12,14]),'MOVEMENT'] = 1\n df.loc[df['PRAEGENDE_JUGENDJAHRE'].isin([2,4,6,7,9,11,13,15]),'MOVEMENT'] = 2\n \n #Capture Decade\n df.loc[df['PRAEGENDE_JUGENDJAHRE'].isin([1,2]), 'DECADE'] = 40\n df.loc[df['PRAEGENDE_JUGENDJAHRE'].isin([3,4]), 'DECADE'] = 50\n df.loc[df['PRAEGENDE_JUGENDJAHRE'].isin([5,6,7]), 'DECADE'] = 60\n df.loc[df['PRAEGENDE_JUGENDJAHRE'].isin([8,9]), 'DECADE'] = 70\n df.loc[df['PRAEGENDE_JUGENDJAHRE'].isin([10,11,12,13]), 'DECADE'] = 80\n df.loc[df['PRAEGENDE_JUGENDJAHRE'].isin([14,15]), 'DECADE'] = 90\n \n df['CAMEO_INTL_2015'] = df['CAMEO_INTL_2015'].astype(float)\n\n # create new variable: WEALTH\n df.loc[df['CAMEO_INTL_2015'].isin([51,52,53,54,55]), 'WEALTH'] = 1\n df.loc[df['CAMEO_INTL_2015'].isin([41,42,43,44,45]), 'WEALTH'] = 2\n df.loc[df['CAMEO_INTL_2015'].isin([31,32,33,34,35]), 'WEALTH'] = 3\n df.loc[df['CAMEO_INTL_2015'].isin([21,22,23,24,25]), 'WEALTH'] = 4\n df.loc[df['CAMEO_INTL_2015'].isin([11,12,13,14,15]), 'WEALTH'] = 5\n \n # create new variable: LIFE_STAGE\n df.loc[df['CAMEO_INTL_2015'].isin([11,21,31,41,51]),'LIFE_STAGE'] = 1\n df.loc[df['CAMEO_INTL_2015'].isin([12,22,32,42,52]),'LIFE_STAGE'] = 2\n df.loc[df['CAMEO_INTL_2015'].isin([13,23,33,43,53]),'LIFE_STAGE'] = 3\n df.loc[df['CAMEO_INTL_2015'].isin([14,24,34,44,54]),'LIFE_STAGE'] = 4\n df.loc[df['CAMEO_INTL_2015'].isin([15,25,35,45,55]),'LIFE_STAGE'] = 5\n \n # remove selected columns and rows, ...\n df = df.drop('PRAEGENDE_JUGENDJAHRE', axis=1)\n df = df.drop('CAMEO_INTL_2015',axis=1)\n \n # Return the cleaned dataframe.\n return df"
] |
[
"0.56350195",
"0.54732513",
"0.52934396",
"0.5236066",
"0.5183868",
"0.5166554",
"0.5122519",
"0.50612414",
"0.50369066",
"0.4998789",
"0.49871984",
"0.49319714",
"0.49076754",
"0.4899998",
"0.4882397",
"0.48713702",
"0.48684317",
"0.48679164",
"0.48649603",
"0.48645777",
"0.48321435",
"0.4828463",
"0.4827077",
"0.48238412",
"0.48223448",
"0.4810755",
"0.48107168",
"0.4785339",
"0.47680682",
"0.47516367"
] |
0.6617085
|
0
|
Adds 'AMPM' and 'DAY_NAME' columns to the dataFrame.
|
def add_dt_cols(df_turnstiles):
df_turnstiles["AMPM"] = (
pd.DatetimeIndex(df_turnstiles["TIME"]).strftime("%r").str[-2:]
)
df_turnstiles["DAY_NAME"] = pd.to_datetime(df_turnstiles["DATE"]).dt.day_name()
return df_turnstiles
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def __append_columns(self, new_dataframe):\n self.dataframe = pd.merge(self.dataframe, new_dataframe)",
"def augment_dataframe(self, df: pd.DataFrame) -> pd.DataFrame:",
"def add_columns(self, **columns):\n return self.as_dataframe(self.data.assign(**columns))",
"def add_cols_to_cleaned_df(df):\n\n core_cols = ['time','lat','lon','depth','year','month','week','dayofyear','float_id','cycle']\n template_cols = core_cols + bgc_data_columns\n template_df = pd.DataFrame(columns=template_cols)\n df = template_df.append(df)[template_cols]\n return df",
"def _addcolumns(self, columnname, columndata=\"\"):\n self[columnname] = columndata",
"def modify_phc_tabe(df):\n df[\"UID\"] = df[\"UID\"].map(str)\n df = df.rename(columns={\"DiagnosisDateIndex\": \"date\"})\n df[\"diag_cd\"] = df[\"Code\"].map(\n lambda x: str(x).strip().replace(\".\", \"\").upper())\n return df",
"def simpleColumnNames():\n global masterdf\n\n df = masterdf.copy()\n #df = df[:int(len(df)*percentdata*0.01)]\n # new collumn names otherwise create_indicators break\n # [OPEN-HIGH-LOW-CLOSE-TICKVOL-VOL]\n # O-H-L-C-T-V-S colum suffixes\n newnames = [ symbols[i]+'_'+masterdf.columns[j][0]\n for i in range(len(symbols)) for j in range(7) ]\n df.columns = newnames\n\n return df",
"def _add_columns_energy_levels(self):\n if \"Energy Level (MeV)\" not in self.df:\n return\n # add column of integer M giving the isomer level (0, 1, 2, ...)\n self.df[\"M\"] = [0] * len(self)\n # add string m giving the isomer level name (e.g., '' or 'm' or 'm2')\n self.df[\"m\"] = [\"\"] * len(self)\n # loop over each isotope in the dataframe\n A_Z = [(a, z) for a, z in zip(self[\"A\"], self[\"Z\"])]\n A_Z = set(A_Z)\n for a, z in A_Z:\n isotope = (self[\"A\"] == a) & (self[\"Z\"] == z)\n e_levels = []\n e_levels_nominal = []\n for e_level in self[\"Energy Level (MeV)\"][isotope]:\n if isinstance(e_level, uncertainties.core.Variable):\n e_level_nominal = e_level.nominal_value\n else:\n e_level_nominal = e_level\n if e_level_nominal not in e_levels_nominal:\n e_levels.append(e_level)\n e_levels_nominal.append(e_level_nominal)\n e_levels = sorted(e_levels)\n for M, e_level in enumerate(e_levels):\n isomer = isotope & (abs(self[\"Energy Level (MeV)\"] - e_level) < 1e-10)\n self.df.loc[isomer, \"M\"] = M\n if M > 0:\n if len(e_levels) > 2:\n self.df.loc[isomer, \"m\"] = f\"m{M}\"\n else:\n self.df.loc[isomer, \"m\"] = \"m\"",
"def populate_indicators(self, dataframe: DataFrame, metadata: dict) -> DataFrame:\n\n dataframe['ema6'] = ta.EMA(dataframe, timeperiod=9)\n dataframe['ema24'] = ta.EMA(dataframe, timeperiod=18)\n\n dataframe['ema11'] = ta.EMA(dataframe, timeperiod=32)\n dataframe['ema25'] = ta.EMA(dataframe, timeperiod=64)\n\n dataframe['ema'] =dataframe['ema6']-dataframe['ema24']\n dataframe['ema2'] = dataframe['ema11'] - dataframe['ema25']\n\n dataframe['ema']= dataframe['ema']*0.6 + dataframe['ema2']*0.5\n dataframe['ema2'] = ta.SMA(dataframe['ema'], timeperiod=29)\n\n return dataframe",
"def construct_report_columns(self):\n return \"Date,Status\"",
"def _add_delta_times_to_df(self, route_df):\n\n \n\n route_df = route_df.assign(delta_times = self.delta_times)\n #route_df = route_df.assign(total_time = self.route_time)\n\n\n return route_df",
"def datetime_columns(df, feature):\r\n df['day'] = pd.to_datetime(df[feature]).dt.day\r\n df['month'] = pd.to_datetime(df[feature]).dt.month\r\n df['year'] = pd.to_datetime(df[feature]).dt.year\r\n return df",
"def cur_quotes_create_dataframe(self):\n self.cur_quotes_df = pandas.read_csv(self.cur_quotes_csvfile,header =None)\n self.cur_quotes_df.rename(columns={org: change.upper() for org, change\\\n in zip(self.cur_quotes_df.columns,self.cur_quotes_parm_headers)},\\\n inplace=True)",
"def add_date_features(data):\n data['member_day'] = data.became_member_on.dt.day\n data['member_weekday'] = data.became_member_on.dt.weekday\n data['member_year'] = data.became_member_on.dt.year\n data['member_month'] = data.became_member_on.dt.month\n\n return data",
"def macd(df, close_price_col_name=\"Close\"):\r\n\t# Add column to store value of MACD\r\n\tdf['Dif'] = df[close_price_col_name].ewm(span=12).mean() - df[close_price_col_name].ewm(span=26).mean()\r\n\r\n\t# Plot\r\n\tplt.plot(df[close_price_col_name], label=\"Closing price\")\r\n\tplt.plot(df[\"Dif\"], label=\"Moving Average Convergence/Divergence (MACD)\")\r\n\tplt.title(\"Visualization of Moving Average Convergence/Divergence\")\r\n\tplt.xlabel(\"Date\")\r\n\tplt.ylabel(\"Closing price\")\r\n\tplt.legend(loc='upper left')\r\n\tplt.show()\r\n\r\n\tdel df[\"Dif\"] # delete the WMA column for re-graphing\r",
"def add_column(values, df=pandas.DataFrame()):\n df['col_{}'.format(len(df.columns))] = values\n return df",
"def _format_meta_pre_merge(self):\n self.__col_name_map = {\n ColNameFormatter.fmt(c): c\n for c in self.data.solar_meta.columns.values\n }\n\n self._rename_cols(self.data.solar_meta, prefix=SOLAR_PREFIX)\n self._rename_cols(self.data.wind_meta, prefix=WIND_PREFIX)\n\n self._save_rep_prof_index_internally()",
"def data(self):\n dfdata = pd.concat([self.weights, self.returns, self.category], axis=1)\n dfdata.columns = ['weights', 'returns', self.category_name]\n if self.period is not None:\n dfdata['date'] = self.period\n return dfdata",
"def init_model_df(self):\n\n self.model_df = pd.DataFrame(columns=self.query_df[self.column_name].unique())\n\n # add _TIMESTAMP column to dataframe\n self.model_df[self.column_index] = self.min_increments\n\n # set row index to _TIMESTAMP\n self.model_df.set_index(self.column_index, inplace=True)",
"def _rearrange_columns(self, df):\n if self.all_columns is None:\n content_columns = [c for c in df.columns if not c.startswith(\"_\")]\n indicator_columns = [\"__in_{}\".format(t) for t in self.table_names\n ] if self.add_full_join_indicators else []\n fanout_columns = _get_fanout_columns(\n self.table_info) if self.add_full_join_fanouts else []\n self.all_columns = content_columns + indicator_columns + fanout_columns\n df = df[self.all_columns]\n if not self.disambiguate_column_names:\n df.columns = [\n c if c.startswith(\"_\") else c.split(\":\")[1] for c in df.columns\n ]\n return df",
"def setAllColumns(self, newAllColumns):\n \n pass",
"def add_column(values, df=None):\n if df is None:\n df=pandas.DataFrame()\n df['col_{}'.format(len(df.columns))] = values\n return df",
"def run_add_tone_columns_to_csv():\n add_tone_columns_to_csv('test_data_for_tone.csv', 'test_data_for_tone_added.csv')",
"def _addStatsHeadersToMatrix(self, m):\n\n atoz = \"JKLMNOPQRSTUVWXYZABCDEFGHI\"\n\n counter = 0\n\n for col in m.TopAxis.DataMembers:\n if counter < 26:\n logicalletter = str(atoz[counter])\n col.MemberSigTestHeading = logicalletter\n counter += 1\n else:\n counter = 0",
"def set_col(self, *, d, colname: str, values):\n d2 = d.with_columns([pl.Series(values=values).alias(colname)])\n return d2",
"def add_columns_for_taps(full_data: DataFrame, tap_data: DataFrame):\n for tap_file in tap_file_names:\n tap_type = tap_file_to_feature_name[tap_file]\n data = tap_data[tap_data['Type'] == tap_type].reset_index(drop = True)\n\n lead_file = 'Accelerometer.csv'\n time_column_name = x_columns[lead_file]\n data_times = full_data[time_column_name]\n data_index = 0\n\n new_column = []\n\n for tap_index in range(data.shape[0]):\n try:\n while data_times[data_index] < (data['Start'][tap_index] * 1000000):\n new_column.append(0) # Not in the midst of a tap\n data_index += 1\n if data_index >= full_data.shape[0]: break\n if data_index >= full_data.shape[0]: break\n new_column.append(1) # At least one value in the midst of the tap\n data_index += 1\n if data_index >= full_data.shape[0]: break\n while data_times[data_index] < (data['End'][tap_index] * 1000000):\n new_column.append(1)\n data_index += 1\n if data_index >= full_data.shape[0]: break\n if data_index >= full_data.shape[0]: break\n except KeyError:\n print(\"Okay, here's that thing again\")\n return\n\n \n while data_index < full_data.shape[0]:\n new_column.append(0)\n data_index += 1\n\n full_data[tap_type] = new_column",
"def better_add_column(values, df=None):\n # Update the function to create a default DataFrame\n if df is None:\n df = pandas.DataFrame()\n df['col_{}'.format(len(df.columns))] = values\n return df",
"def format_dataframe(df):\r\n\r\n # set data types\r\n df['WO Actual Finish Date'] = pd.to_datetime(df['WO Actual Finish Date'], format='%Y-%m-%d')\r\n df['Work Order Reported Date'] = pd.to_datetime(df['Work Order Reported Date'], format='%Y-%m-%d')\r\n df['WO Actual Finish Year'] = df['WO Actual Finish Year'].astype('int')\r\n\r\n # deal with missing values\r\n df.fillna('None Specified', inplace=True)\r\n\r\n # create new column YYYY-MM\r\n df['Actual Finish YYYY-MM'] = df['WO Actual Finish Date'].apply(lambda x: x.strftime('%Y-%m'))\r\n\r\n return df",
"def toSeasonAggFormat(self):\n df_a = self.toHomeAwayFormat().copy()\n df_b = df_a.copy()\n\n for col in list(df_a.columns).copy():\n if col[0] == \"H\":\n df_a = df_a.rename(columns={col:(col[1:])})\n\n if col[0] == \"A\":\n df_a = df_a.rename(columns={col:('Opp'+col[1:])})\n\n #df_a['Win'] = 1\n\n for col in list(df_b.columns).copy():\n if col[0] == \"H\":\n df_b = df_b.rename(columns={col:('Opp'+col[1:])})\n\n if col[0] == \"A\":\n df_b = df_b.rename(columns={col:(col[1:])})\n\n df_c = df_a.append(df_b, sort=True)\n\n return df_c",
"def reset_columns(self):\n\n reset_cols = [i for i in self.__cols if i in self.__df_timings.columns]\n self.__df_timings = self.__df_timings.loc[:, reset_cols]\n return"
] |
[
"0.5433198",
"0.54121774",
"0.5236415",
"0.52023053",
"0.5188201",
"0.50159407",
"0.5009321",
"0.4990669",
"0.49770692",
"0.49355897",
"0.49140552",
"0.4908264",
"0.48580837",
"0.48296228",
"0.48267817",
"0.4824319",
"0.48108315",
"0.47799486",
"0.47572893",
"0.47559062",
"0.47274792",
"0.4725381",
"0.47171232",
"0.4715791",
"0.47022504",
"0.47004455",
"0.46858865",
"0.46770215",
"0.46692488",
"0.46686894"
] |
0.6033092
|
0
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.