query
stringlengths 9
9.05k
| document
stringlengths 10
222k
| metadata
dict | negatives
listlengths 30
30
| negative_scores
listlengths 30
30
| document_score
stringlengths 4
10
| document_rank
stringclasses 2
values |
---|---|---|---|---|---|---|
Reduce this Dataset's data by applying ``sum`` along some dimension(s). | def sum(
self,
dim: Dims = None,
*,
skipna: bool | None = None,
min_count: int | None = None,
keep_attrs: bool | None = None,
**kwargs: Any,
) -> Dataset:
return self.reduce(
duck_array_ops.sum,
dim=dim,
skipna=skipna,
min_count=min_count,
numeric_only=True,
keep_attrs=keep_attrs,
**kwargs,
) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def sum(\n self,\n dim: Dims = None,\n *,\n skipna: bool | None = None,\n min_count: int | None = None,\n keep_attrs: bool | None = None,\n **kwargs: Any,\n ) -> DataArray:\n return self.reduce(\n duck_array_ops.sum,\n dim=dim,\n skipna=skipna,\n min_count=min_count,\n keep_attrs=keep_attrs,\n **kwargs,\n )",
"def sum(self, axis=None, keepdims=False, dtype=None, out=None):\n return np.add.reduce(self, out=out, axis=axis, keepdims=keepdims, dtype=dtype)",
"def sum(self, axis=None, keepdims=False):\n return F.Sum.apply(self, axis, keepdims)",
"def sum(self, axis: int = 0):\r\n self.values = self.values.sum(axis=axis)\r\n self.layers = [None]\r\n return self.copy()",
"def sum(\n self,\n dim: Dims = None,\n *,\n skipna: bool | None = None,\n min_count: int | None = None,\n keep_attrs: bool | None = None,\n **kwargs: Any,\n ) -> Dataset:\n if (\n flox_available\n and OPTIONS[\"use_flox\"]\n and contains_only_chunked_or_numpy(self._obj)\n ):\n return self._flox_reduce(\n func=\"sum\",\n dim=dim,\n skipna=skipna,\n min_count=min_count,\n numeric_only=True,\n # fill_value=fill_value,\n keep_attrs=keep_attrs,\n **kwargs,\n )\n else:\n return self.reduce(\n duck_array_ops.sum,\n dim=dim,\n skipna=skipna,\n min_count=min_count,\n numeric_only=True,\n keep_attrs=keep_attrs,\n **kwargs,\n )",
"def sum(\n self,\n dim: Dims = None,\n *,\n skipna: bool | None = None,\n min_count: int | None = None,\n keep_attrs: bool | None = None,\n **kwargs: Any,\n ) -> Dataset:\n if (\n flox_available\n and OPTIONS[\"use_flox\"]\n and contains_only_chunked_or_numpy(self._obj)\n ):\n return self._flox_reduce(\n func=\"sum\",\n dim=dim,\n skipna=skipna,\n min_count=min_count,\n numeric_only=True,\n # fill_value=fill_value,\n keep_attrs=keep_attrs,\n **kwargs,\n )\n else:\n return self.reduce(\n duck_array_ops.sum,\n dim=dim,\n skipna=skipna,\n min_count=min_count,\n numeric_only=True,\n keep_attrs=keep_attrs,\n **kwargs,\n )",
"def _reduce(self, values):\n\n if self.dim is None:\n return values\n\n if isinstance(self.dim, str):\n dim = [self.dim]\n else:\n dim = self.dim\n\n if self.reduce_func is None:\n for d in values.dims:\n if d not in dim:\n values = values.isel(**{d: 0})\n return values\n else:\n other_dims = [d for d in values.dims if d not in dim]\n return values.reduce(self.reduce_func, dim=other_dims)",
"def sum(x, reduce_instance_dims=True, name=None): # pylint: disable=redefined-builtin\n return _numeric_combine(x, np.sum, reduce_instance_dims, name)",
"def sum(\n self,\n dim: Dims = None,\n *,\n skipna: bool | None = None,\n min_count: int | None = None,\n keep_attrs: bool | None = None,\n **kwargs: Any,\n ) -> DataArray:\n if (\n flox_available\n and OPTIONS[\"use_flox\"]\n and contains_only_chunked_or_numpy(self._obj)\n ):\n return self._flox_reduce(\n func=\"sum\",\n dim=dim,\n skipna=skipna,\n min_count=min_count,\n # fill_value=fill_value,\n keep_attrs=keep_attrs,\n **kwargs,\n )\n else:\n return self.reduce(\n duck_array_ops.sum,\n dim=dim,\n skipna=skipna,\n min_count=min_count,\n keep_attrs=keep_attrs,\n **kwargs,\n )",
"def sum(\n self,\n dim: Dims = None,\n *,\n skipna: bool | None = None,\n min_count: int | None = None,\n keep_attrs: bool | None = None,\n **kwargs: Any,\n ) -> DataArray:\n if (\n flox_available\n and OPTIONS[\"use_flox\"]\n and contains_only_chunked_or_numpy(self._obj)\n ):\n return self._flox_reduce(\n func=\"sum\",\n dim=dim,\n skipna=skipna,\n min_count=min_count,\n # fill_value=fill_value,\n keep_attrs=keep_attrs,\n **kwargs,\n )\n else:\n return self.reduce(\n duck_array_ops.sum,\n dim=dim,\n skipna=skipna,\n min_count=min_count,\n keep_attrs=keep_attrs,\n **kwargs,\n )",
"def sum(self, axis=None):\n if axis is None:\n return numpy.ma.sum(self.data)\n\n new_data = numpy.ma.sum(self.data, axis=axis)\n remaining_axes = numpy.setdiff1d(range(self.ndim), axis)\n remaining_edges = [self.bset.edges[ax] for ax in remaining_axes]\n\n # This is kind of a hack that breaks good OO design, but is there\n # a better solution?\n if len(remaining_edges) == 2:\n return IntensityMap2D(new_data, (remaining_edges,))\n else:\n return IntensityMap(new_data, (remaining_edges,))",
"def dim_reduce(means, weights, d):\n return dim_reduce_data(means, d)",
"def reduce_sum_d(x, y, axis=None, keepdims=None, kernel_name=\"reduce_sum_d\"):\n\n dtype = x[\"dtype\"]\n dtype_lower = dtype.lower()\n check_list = (\"float16\", \"float32\")\n check_dtype(dtype_lower, check_list, param_name=\"x\")\n\n with te.op.compute():\n shape = x[\"shape\"]\n shape_range = x[\"range\"]\n\n axes = []\n shape_len = len(shape)\n if not axis:\n for i, _ in enumerate(shape):\n axes.append(i)\n else:\n axes = list(axis)\n axes = cce_util.axis_check(shape_len, axes)\n\n shape_new, shape_range_new, axes_new, fused_rel_dic = \\\n fused_reduce_axis(shape, shape_range, axes)\n\n add_compile_info(\"fused_rel_dic\", fused_rel_dic)\n x[\"shape\"] = shape_new\n x[\"range\"] = shape_range_new\n shape_var_new = variable_shape([x])[0]\n\n data_input = tvm.placeholder(shape_var_new, name=\"data_input\",\n dtype=dtype_lower)\n res = reduce_sum_d_compute(data_input, y, axes_new, keepdims)\n\n with tvm.target.cce():\n sch = generic.auto_schedule(res)\n\n # build\n config = {\"name\": kernel_name,\n \"tensor_list\": [data_input, res]}\n te.lang.dynamic.build(sch, config)",
"def with_sum_sum_reduction(self):\n return self.with_reduction(lambda x: x.sum())",
"def sum(input, axis=None, dtype=None, keepdims=False, acc_dtype=None):\r\n\r\n out = elemwise.Sum(axis=axis, dtype=dtype, acc_dtype=acc_dtype)(input)\r\n\r\n if keepdims:\r\n out = makeKeepDims(input, out, axis)\r\n return out",
"def sum(self):\n return np.dot(self.data.T, self.weights)",
"def sum(self):\n return self.aggregate(np.sum)",
"def colsums (self):\n return self.values.sum (axis=1)",
"def conv_reduce_sum(x, result_shape, padding, strides):\n if len(result_shape) == 3:\n return conv2d_reduce_sum(x, result_shape[0], result_shape[1],\n padding, strides)\n elif len(result_shape) == 2:\n return conv1d_reduce_sum(x, result_shape[0], padding, strides[0])\n else:\n raise ValueError()",
"def sum(self, dim=None):\n if dim is None:\n x = self.flatten()\n else:\n x = self.transpose(0, dim)\n\n # Add all BinarySharedTensors\n while x.size(0) > 1:\n extra = None\n if x.size(0) % 2 == 1:\n extra = x[0]\n x = x[1:]\n x0 = x[: (x.size(0) // 2)]\n x1 = x[(x.size(0) // 2) :]\n x = x0 + x1\n if extra is not None:\n x.share = torch_cat([x.share, extra.share.unsqueeze(0)])\n\n if dim is None:\n x = x.squeeze()\n else:\n x = x.transpose(0, dim).squeeze(dim)\n return x",
"def reduce_sum(\n input_tensor: remote_blob_util.BlobDef,\n axis: Optional[Union[int, Sequence[int]]] = None,\n keepdims: bool = False,\n name: Optional[str] = None,\n) -> remote_blob_util.BlobDef:\n name = _gen_unique_name_if_need(name, \"ReduceSum_\")\n\n axis = _check_axis(axis, input_tensor.shape)\n if len(axis) == 0:\n return input_tensor\n\n op = (\n flow.user_op_builder(name)\n .Op(\"reduce_sum\")\n .Input(\"input_tensor\", [input_tensor])\n .Output(\"output_tensor\")\n .Attr(\"axis\", axis)\n .Attr(\"keepdims\", keepdims)\n .Build()\n )\n return op.InferAndTryRun().SoleOutputBlob()",
"def weighted_sum_ds(ds, dim=None, weights=None):\n if weights is None:\n warn('Computing sum using equal weights for all data points')\n return ds.sum(dim)\n else:\n ds.apply(weighted_sum_da, dim=dim, weights=weights)",
"def my_sum(a, axis, count):\n if a.shape[axis] == count:\n return a.sum(axis)\n elif a.shape[axis] == 1:\n return count * a.sum(axis)\n else:\n raise IndexError('Cannot be broadcast: a.shape=%s, axis=%d, count=%d' % (a.shape, axis, count))",
"def acumsum (a,dimension=None):\r\n if dimension == None:\r\n a = N.ravel(a)\r\n dimension = 0\r\n if type(dimension) in [ListType, TupleType, N.ndarray]:\r\n dimension = list(dimension)\r\n dimension.sort()\r\n dimension.reverse()\r\n for d in dimension:\r\n a = N.add.accumulate(a,d)\r\n return a\r\n else:\r\n return N.add.accumulate(a,dimension)",
"def sum(tensor, axis=None):\n raise NotImplementedError",
"def dim_zero_sum(x: Tensor) ->Tensor:\n return torch.sum(x, dim=0)",
"def sum_to_0d(x):\n assert_equal(x.ndim, 1)\n return np.squeeze(np.sum(x, keepdims=True))",
"def reduce_summ(self, params):\n reduced_idxs = grid_tools.reduce_table_idx(self.params, params=params)\n return self.summ.iloc[reduced_idxs]",
"def sum_except_batch(x, num_dims=1):\n return x.reshape(*x.shape[:num_dims], -1).sum(-1)",
"def val_sum(self, axis = None):\n f = self\n if axis is None:\n axis = - np.arange(f.val_ndim) - 1\n axis = tuple(axis)\n else:\n ## make `axis` into a list\n try:\n axis = list(tuple(axis))\n except TypeError:\n axis = (axis, )\n \n ## force the `axis` to be positive\n axis = [i if i >= 0 else i + f.val_ndim for i in axis]\n assert all(0 <= i < f.val_ndim for i in axis )\n \n ##\n axis = np.array(axis) + f.batch_ndim + f.var_ndim\n axis = tuple(axis)\n \n #print(\"axis =\", axis)\n return Poly(\n coef = nptf.reduce_sum(\n f.coef, \n axis = axis\n ),\n batch_ndim = f.batch_ndim,\n var_ndim = f.var_ndim\n )"
] | [
"0.69223887",
"0.68643904",
"0.68584",
"0.68099666",
"0.6725604",
"0.6725604",
"0.66675615",
"0.6634007",
"0.6546494",
"0.6546494",
"0.6421243",
"0.64142853",
"0.63628495",
"0.63059735",
"0.62661374",
"0.62658197",
"0.6225546",
"0.6212655",
"0.61623865",
"0.6143209",
"0.6045762",
"0.6011392",
"0.60020375",
"0.59746635",
"0.5943627",
"0.589411",
"0.5870589",
"0.58691645",
"0.58563066",
"0.5853352"
] | 0.70593727 | 0 |
Reduce this DataArray's data by applying ``count`` along some dimension(s). | def count(
self,
dim: Dims = None,
*,
keep_attrs: bool | None = None,
**kwargs: Any,
) -> DataArray:
return self.reduce(
duck_array_ops.count,
dim=dim,
keep_attrs=keep_attrs,
**kwargs,
) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def count(\n self,\n dim: Dims = None,\n *,\n keep_attrs: bool | None = None,\n **kwargs: Any,\n ) -> DataArray:\n if (\n flox_available\n and OPTIONS[\"use_flox\"]\n and contains_only_chunked_or_numpy(self._obj)\n ):\n return self._flox_reduce(\n func=\"count\",\n dim=dim,\n # fill_value=fill_value,\n keep_attrs=keep_attrs,\n **kwargs,\n )\n else:\n return self.reduce(\n duck_array_ops.count,\n dim=dim,\n keep_attrs=keep_attrs,\n **kwargs,\n )",
"def count(\n self,\n dim: Dims = None,\n *,\n keep_attrs: bool | None = None,\n **kwargs: Any,\n ) -> DataArray:\n if (\n flox_available\n and OPTIONS[\"use_flox\"]\n and contains_only_chunked_or_numpy(self._obj)\n ):\n return self._flox_reduce(\n func=\"count\",\n dim=dim,\n # fill_value=fill_value,\n keep_attrs=keep_attrs,\n **kwargs,\n )\n else:\n return self.reduce(\n duck_array_ops.count,\n dim=dim,\n keep_attrs=keep_attrs,\n **kwargs,\n )",
"def count(\n self,\n dim: Dims = None,\n *,\n keep_attrs: bool | None = None,\n **kwargs: Any,\n ) -> Dataset:\n return self.reduce(\n duck_array_ops.count,\n dim=dim,\n numeric_only=False,\n keep_attrs=keep_attrs,\n **kwargs,\n )",
"def count(\n self,\n dim: Dims = None,\n *,\n keep_attrs: bool | None = None,\n **kwargs: Any,\n ) -> Dataset:\n if (\n flox_available\n and OPTIONS[\"use_flox\"]\n and contains_only_chunked_or_numpy(self._obj)\n ):\n return self._flox_reduce(\n func=\"count\",\n dim=dim,\n numeric_only=False,\n # fill_value=fill_value,\n keep_attrs=keep_attrs,\n **kwargs,\n )\n else:\n return self.reduce(\n duck_array_ops.count,\n dim=dim,\n numeric_only=False,\n keep_attrs=keep_attrs,\n **kwargs,\n )",
"def count(\n self,\n dim: Dims = None,\n *,\n keep_attrs: bool | None = None,\n **kwargs: Any,\n ) -> Dataset:\n if (\n flox_available\n and OPTIONS[\"use_flox\"]\n and contains_only_chunked_or_numpy(self._obj)\n ):\n return self._flox_reduce(\n func=\"count\",\n dim=dim,\n numeric_only=False,\n # fill_value=fill_value,\n keep_attrs=keep_attrs,\n **kwargs,\n )\n else:\n return self.reduce(\n duck_array_ops.count,\n dim=dim,\n numeric_only=False,\n keep_attrs=keep_attrs,\n **kwargs,\n )",
"def __len__(self):\n return self.flatten_dim(self.shape[0])",
"def count(self) -> Tuple[groupable, pdarray]:\n repMsg = generic_msg(\n cmd=\"countReduction\",\n args={\"segments\": cast(pdarray, self.segments), \"size\": self.length},\n )\n self.logger.debug(repMsg)\n return self.unique_keys, create_pdarray(repMsg)",
"def count(self):\r\n return self.data_array.size",
"def size(self, index):\n return self.d1.size(index)\n # FILTER BASED ON D1",
"def count_dims(da):\n return len(da.dims)",
"def dim_reduction(data_set, components):\n transformed = []\n index = -1\n transformed = data_set @ components\n return transformed",
"def dim_reduce(means, weights, d):\n return dim_reduce_data(means, d)",
"def _reduce(self, values):\n\n if self.dim is None:\n return values\n\n if isinstance(self.dim, str):\n dim = [self.dim]\n else:\n dim = self.dim\n\n if self.reduce_func is None:\n for d in values.dims:\n if d not in dim:\n values = values.isel(**{d: 0})\n return values\n else:\n other_dims = [d for d in values.dims if d not in dim]\n return values.reduce(self.reduce_func, dim=other_dims)",
"def my_sum(a, axis, count):\n if a.shape[axis] == count:\n return a.sum(axis)\n elif a.shape[axis] == 1:\n return count * a.sum(axis)\n else:\n raise IndexError('Cannot be broadcast: a.shape=%s, axis=%d, count=%d' % (a.shape, axis, count))",
"def countDataSize(self,filename):\n \n d = h5py.File(filename,'r')\n features = d['spectrometer/features'][:]\n select = self.selectData(features.astype(float), self.ifeature, d)\n N = len(features[select])\n d.close()\n\n N = (N//self.offsetLen) * self.offsetLen\n\n N = N*self.Nfeeds\n\n self.chunks += [[int(self.Nsamples), int(self.Nsamples+N)]]\n self.datasizes += [int(N/self.Nfeeds)]\n self.Nsamples += int(N)",
"def sum(\n self,\n dim: Dims = None,\n *,\n skipna: bool | None = None,\n min_count: int | None = None,\n keep_attrs: bool | None = None,\n **kwargs: Any,\n ) -> DataArray:\n return self.reduce(\n duck_array_ops.sum,\n dim=dim,\n skipna=skipna,\n min_count=min_count,\n keep_attrs=keep_attrs,\n **kwargs,\n )",
"def dtype_element_count( dtype, name = None ):\n def count( descr ):\n if len( descr ) > 2:\n shape = descr[ 2 ]\n # multiply the shape\n return reduce( lambda x, y: x * y, shape )\n else:\n return 1\n\n if name:\n shape = dtype[ name ].shape\n else:\n shape = dtype.shape\n\n if len(shape) > 0:\n return reduce( lambda x, y: x * y, shape )\n else:\n descr = dtype.descr\n size = 0\n for type in descr:\n size += count( type )\n return size",
"def size(self):\n return reduce(mul, self.shape, 1)",
"def dimension_count(self):\n return self._dimensionCount",
"def counts_to_density(\n x: Union[_cpp.DataArray, _cpp.Dataset], dim: str\n) -> Union[_cpp.DataArray, _cpp.Dataset]:\n return _call_cpp_func(_cpp.counts_to_density, x, dim)",
"def getDataSetCount(self):\n\t\treturn int(self.numberOfImages / self.slicesPerTimepoint)",
"def dim_reduction( M ):\n tot_count_per_type = M.sum(axis = 1)\n tot_count = float(tot_count_per_type.sum())\n sorted_index = np.argsort(tot_count_per_type)\n threshold = 0.01\n accu = 0\n for i in range(len(sorted_index)):\n perc = float(tot_count_per_type[sorted_index[i]])/tot_count\n accu = accu + perc\n if accu > threshold:\n break;\n \n return sorted_index[0:i]",
"def ndarray_size(self) -> int:\n pass",
"def density_to_counts(\n x: Union[_cpp.DataArray, _cpp.Dataset], dim: str\n) -> Union[_cpp.DataArray, _cpp.Dataset]:\n return _call_cpp_func(_cpp.density_to_counts, x, dim)",
"def size(self, batch):\n x,y,m = batch \n return sum([mm.sum() for mm in m])",
"def count(self, axis=None):\n return self.data.count(axis=axis)",
"def reduce_dimension(self, n_components=2):\n\n reducer = PCA(n_components=n_components)\n\n X = self.data.values.astype(np.float32)\n\n norm = Normalizer()\n Xnorm = norm.fit_transform(X)\n\n return reducer.fit_transform(Xnorm)",
"def __len__(self):\n return sum(f.count for f in self.filters)",
"def nbytes_at(self, device_id:int):\n if self._slices:\n if isinstance(self._coherence._local_states[device_id], dict): # there are subarrays no this device\n if self._slices_hash in self._coherence._local_states[device_id].keys(): # this subarray is already there\n return self._array.nbytes_at(device_id)\n else: # the subarray will be moved to there\n return self._array.nbytes_at(device_id) + self.subarray_nbytes # add the incoming subarray size\n else: # there is a complete copy on this device, no need to prepare subarray\n return self.nbytes\n else:\n return self.nbytes",
"def __len__(self):\n ret = self.data.shape[0]\n return ret"
] | [
"0.6582146",
"0.6582146",
"0.65379834",
"0.6224404",
"0.6224404",
"0.59004956",
"0.5841828",
"0.5838448",
"0.57762665",
"0.573857",
"0.56812054",
"0.5657385",
"0.55505395",
"0.55487853",
"0.55471367",
"0.55208504",
"0.5506666",
"0.54985136",
"0.5492544",
"0.54757905",
"0.54540455",
"0.5439526",
"0.5413956",
"0.54107267",
"0.53990823",
"0.5394898",
"0.5386082",
"0.5362852",
"0.535802",
"0.5347619"
] | 0.6898398 | 0 |
Reduce this DataArray's data by applying ``mean`` along some dimension(s). | def mean(
self,
dim: Dims = None,
*,
skipna: bool | None = None,
keep_attrs: bool | None = None,
**kwargs: Any,
) -> DataArray:
return self.reduce(
duck_array_ops.mean,
dim=dim,
skipna=skipna,
keep_attrs=keep_attrs,
**kwargs,
) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def mean(self):\n return self.data.mean(axis=-1, keepdims=True)",
"def mean(self, axis=0, **kwargs) -> \"Dataset\":\n return self.aggregate(axis=axis, func=np.mean, **kwargs)",
"def mean(\n self,\n dim: Dims = None,\n *,\n skipna: bool | None = None,\n keep_attrs: bool | None = None,\n **kwargs: Any,\n ) -> Dataset:\n return self.reduce(\n duck_array_ops.mean,\n dim=dim,\n skipna=skipna,\n numeric_only=True,\n keep_attrs=keep_attrs,\n **kwargs,\n )",
"def mean(\n self,\n dim: Dims = None,\n *,\n skipna: bool | None = None,\n keep_attrs: bool | None = None,\n **kwargs: Any,\n ) -> DataArray:\n if (\n flox_available\n and OPTIONS[\"use_flox\"]\n and contains_only_chunked_or_numpy(self._obj)\n ):\n return self._flox_reduce(\n func=\"mean\",\n dim=dim,\n skipna=skipna,\n # fill_value=fill_value,\n keep_attrs=keep_attrs,\n **kwargs,\n )\n else:\n return self.reduce(\n duck_array_ops.mean,\n dim=dim,\n skipna=skipna,\n keep_attrs=keep_attrs,\n **kwargs,\n )",
"def mean(\n self,\n dim: Dims = None,\n *,\n skipna: bool | None = None,\n keep_attrs: bool | None = None,\n **kwargs: Any,\n ) -> DataArray:\n if (\n flox_available\n and OPTIONS[\"use_flox\"]\n and contains_only_chunked_or_numpy(self._obj)\n ):\n return self._flox_reduce(\n func=\"mean\",\n dim=dim,\n skipna=skipna,\n # fill_value=fill_value,\n keep_attrs=keep_attrs,\n **kwargs,\n )\n else:\n return self.reduce(\n duck_array_ops.mean,\n dim=dim,\n skipna=skipna,\n keep_attrs=keep_attrs,\n **kwargs,\n )",
"def mean(self):\r\n return np.mean(self.data_array)",
"def amean (inarray,dimension=None,keepdims=0):\r\n if inarray.dtype in [N.int_, N.short,N.ubyte]:\r\n inarray = inarray.astype(N.float_)\r\n if dimension == None:\r\n inarray = N.ravel(inarray)\r\n sum = N.add.reduce(inarray)\r\n denom = float(len(inarray))\r\n elif type(dimension) in [IntType,FloatType]:\r\n sum = asum(inarray,dimension)\r\n denom = float(inarray.shape[dimension])\r\n if keepdims == 1:\r\n shp = list(inarray.shape)\r\n shp[dimension] = 1\r\n sum = N.reshape(sum,shp)\r\n else: # must be a TUPLE of dims to average over\r\n dims = list(dimension)\r\n dims.sort()\r\n dims.reverse()\r\n sum = inarray *1.0\r\n for dim in dims:\r\n sum = N.add.reduce(sum,dim)\r\n denom = N.array(N.multiply.reduce(N.take(inarray.shape,dims)),N.float_)\r\n if keepdims == 1:\r\n shp = list(inarray.shape)\r\n for dim in dims:\r\n shp[dim] = 1\r\n sum = N.reshape(sum,shp)\r\n return sum/denom",
"def mean(\n self,\n dim: Dims = None,\n *,\n skipna: bool | None = None,\n keep_attrs: bool | None = None,\n **kwargs: Any,\n ) -> Dataset:\n if (\n flox_available\n and OPTIONS[\"use_flox\"]\n and contains_only_chunked_or_numpy(self._obj)\n ):\n return self._flox_reduce(\n func=\"mean\",\n dim=dim,\n skipna=skipna,\n numeric_only=True,\n # fill_value=fill_value,\n keep_attrs=keep_attrs,\n **kwargs,\n )\n else:\n return self.reduce(\n duck_array_ops.mean,\n dim=dim,\n skipna=skipna,\n numeric_only=True,\n keep_attrs=keep_attrs,\n **kwargs,\n )",
"def mean(\n self,\n dim: Dims = None,\n *,\n skipna: bool | None = None,\n keep_attrs: bool | None = None,\n **kwargs: Any,\n ) -> Dataset:\n if (\n flox_available\n and OPTIONS[\"use_flox\"]\n and contains_only_chunked_or_numpy(self._obj)\n ):\n return self._flox_reduce(\n func=\"mean\",\n dim=dim,\n skipna=skipna,\n numeric_only=True,\n # fill_value=fill_value,\n keep_attrs=keep_attrs,\n **kwargs,\n )\n else:\n return self.reduce(\n duck_array_ops.mean,\n dim=dim,\n skipna=skipna,\n numeric_only=True,\n keep_attrs=keep_attrs,\n **kwargs,\n )",
"def nanmean(self, axis=0, **kwargs) -> \"Dataset\":\n return self.aggregate(axis=axis, func=np.nanmean, **kwargs)",
"def mean(self):\n mean = sum(self.data)/self.size\n return mean",
"def get_mean(self):\r\n for i in range(1,len(self.data[0])):\r\n self.prom.append(np.mean(self.data[:,i]))",
"def mean(self):\n\n\t\tif not self._masked:\n\t\t\t\n\t\t\treturn self.data.mean()\n\t\t\n\t\telse:\n\t\t\t\n\t\t\tif not hasattr(self,\"_full_mask\"):\n\t\t\t\tself.maskBoundaries()\n\t\t\t\n\t\t\treturn self.data[self._full_mask].mean()",
"def getMean(self, windowSize=0):\r\n try:\r\n if self._data.size == 0:\r\n raise RuntimeError(\"Filter1D data is empty. Call Filter1D.addDataPoint() to add data prior calling Filter1D.getMean().\")\r\n if type(windowSize) is int:\r\n if windowSize <= 0 or windowSize > self._maxSize:\r\n windowSize = self._maxSize\r\n return np.mean(self._data[-windowSize:])\r\n else:\r\n raise TypeError(\"windowSize must be an integer\")\r\n except TypeError or RuntimeError:\r\n raise",
"def subMeanAll(data=None):\n datamean = data.mean(axis = 0)\n data[:,3:] = data[:,3:] - datamean[3:]\n return data",
"def mean(self, axis=None, keepdims=False, dtype=None, out=None):\n\n if axis is None:\n axis = tuple(range(self.ndim))\n elif not isinstance(axis, tuple):\n axis = (axis,)\n den = reduce(operator.mul, (self.shape[i] for i in axis), 1)\n\n if dtype is None:\n if issubclass(self.dtype.type, (np.integer, np.bool_)):\n dtype = inter_dtype = np.dtype(\"f8\")\n else:\n dtype = self.dtype\n inter_dtype = (\n np.dtype(\"f4\") if issubclass(dtype.type, np.float16) else dtype\n )\n else:\n inter_dtype = dtype\n\n num = self.sum(axis=axis, keepdims=keepdims, dtype=inter_dtype)\n\n if num.ndim:\n out = np.true_divide(num, den, casting=\"unsafe\")\n return out.astype(dtype) if out.dtype != dtype else out\n return np.divide(num, den, dtype=dtype, out=out)",
"def ageometricmean (inarray,dimension=None,keepdims=0):\r\n inarray = N.array(inarray,N.float_)\r\n if dimension == None:\r\n inarray = N.ravel(inarray)\r\n size = len(inarray)\r\n mult = N.power(inarray,1.0/size)\r\n mult = N.multiply.reduce(mult)\r\n elif type(dimension) in [IntType,FloatType]:\r\n size = inarray.shape[dimension]\r\n mult = N.power(inarray,1.0/size)\r\n mult = N.multiply.reduce(mult,dimension)\r\n if keepdims == 1:\r\n shp = list(inarray.shape)\r\n shp[dimension] = 1\r\n sum = N.reshape(sum,shp)\r\n else: # must be a SEQUENCE of dims to average over\r\n dims = list(dimension)\r\n dims.sort()\r\n dims.reverse()\r\n size = N.array(N.multiply.reduce(N.take(inarray.shape,dims)),N.float_)\r\n mult = N.power(inarray,1.0/size)\r\n for dim in dims:\r\n mult = N.multiply.reduce(mult,dim)\r\n if keepdims == 1:\r\n shp = list(inarray.shape)\r\n for dim in dims:\r\n shp[dim] = 1\r\n mult = N.reshape(mult,shp)\r\n return mult",
"def average(self):\n return np.mean(self.buf[:self._size], axis=0)",
"def mean(self):\n mean=np.zeros(self.shape)\n if self.Fourier:\n ind=self.mean_index()\n for di in np.ndindex(*self.shape):\n mean[di]=np.real(self.val[di][ind])/self.fft_coef\n else:\n for di in np.ndindex(*self.shape):\n mean[di]=np.mean(self.val[di])\n return mean",
"def mean(self):\n return self.aggregate(np.mean)",
"def sparse_mean(x, axis=None):\n raise NotImplementedError()",
"def time_mean(self, width):\n import math\n\n for i in range(len(self.data)):\n for j in range(len(self.chans)):\n self.data[i,:,j,:] = self.data[i - width[j]/2 : i + int(math.ceil(width[j]/2.)), :, j, :].mean(axis=0)",
"def _arrays_mean(array_list):\n dims = array_list[0].shape[2]\n out = np.zeros(array_list[0].shape)\n var_out = out.copy()\n\n# i = 1\n for i in range(dims):\n temp = [j[:, :, i] for j in array_list]\n\n # calculate mean\n means_out = np.zeros(temp[0].shape)\n for k in temp:\n means_out += k # sum\n\n out[:, :, i] = means_out / len(array_list) # mean\n\n return(out)",
"def normalize_mean(dataset):\n normalized_dataset = np.array(dataset)\n return normalized_dataset - np.mean(normalized_dataset)",
"def average(self, weights, axis=0, **kwargs) -> \"Dataset\":\n\n def func(x, axis, keepdims):\n if keepdims:\n raise NotImplementedError()\n\n return np.average(x, weights=weights, axis=axis)\n\n return self.aggregate(axis=axis, func=func, **kwargs)",
"def meanOf(classObj):\r\n return np.mean(classObj.dataSet, axis=0)",
"def with_sum_mean_reduction(self):\n return self.with_reduction(lambda x: x.sum(1).mean(0))",
"def numpy_mean(arr):\n return arr.mean()",
"def ensemble_mean(self):\n return self.mean(dim='mem')",
"def nanmean(array_data, axis=0):\n\n mdat = np.ma.masked_array(array_data, np.isnan(array_data));\n retval = np.mean(mdat, axis=axis);\n \n return retval;"
] | [
"0.6932092",
"0.69198084",
"0.68324643",
"0.6829645",
"0.6829645",
"0.68155813",
"0.6670382",
"0.6551353",
"0.6551353",
"0.65419024",
"0.6500794",
"0.6467665",
"0.6430388",
"0.63965666",
"0.634229",
"0.6330614",
"0.63156646",
"0.6315549",
"0.62959886",
"0.62906164",
"0.62481505",
"0.624401",
"0.6229558",
"0.6217133",
"0.6206855",
"0.619712",
"0.61868316",
"0.61470175",
"0.61424166",
"0.6123737"
] | 0.7121785 | 0 |
Reduce this DataArray's data by applying ``prod`` along some dimension(s). | def prod(
self,
dim: Dims = None,
*,
skipna: bool | None = None,
min_count: int | None = None,
keep_attrs: bool | None = None,
**kwargs: Any,
) -> DataArray:
return self.reduce(
duck_array_ops.prod,
dim=dim,
skipna=skipna,
min_count=min_count,
keep_attrs=keep_attrs,
**kwargs,
) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def prod(self, axis=None, keepdims=False, dtype=None, out=None):\n return np.multiply.reduce(\n self, out=out, axis=axis, keepdims=keepdims, dtype=dtype\n )",
"def prod(\n self,\n dim: Dims = None,\n *,\n skipna: bool | None = None,\n min_count: int | None = None,\n keep_attrs: bool | None = None,\n **kwargs: Any,\n ) -> DataArray:\n if (\n flox_available\n and OPTIONS[\"use_flox\"]\n and contains_only_chunked_or_numpy(self._obj)\n ):\n return self._flox_reduce(\n func=\"prod\",\n dim=dim,\n skipna=skipna,\n min_count=min_count,\n # fill_value=fill_value,\n keep_attrs=keep_attrs,\n **kwargs,\n )\n else:\n return self.reduce(\n duck_array_ops.prod,\n dim=dim,\n skipna=skipna,\n min_count=min_count,\n keep_attrs=keep_attrs,\n **kwargs,\n )",
"def prod(\n self,\n dim: Dims = None,\n *,\n skipna: bool | None = None,\n min_count: int | None = None,\n keep_attrs: bool | None = None,\n **kwargs: Any,\n ) -> DataArray:\n if (\n flox_available\n and OPTIONS[\"use_flox\"]\n and contains_only_chunked_or_numpy(self._obj)\n ):\n return self._flox_reduce(\n func=\"prod\",\n dim=dim,\n skipna=skipna,\n min_count=min_count,\n # fill_value=fill_value,\n keep_attrs=keep_attrs,\n **kwargs,\n )\n else:\n return self.reduce(\n duck_array_ops.prod,\n dim=dim,\n skipna=skipna,\n min_count=min_count,\n keep_attrs=keep_attrs,\n **kwargs,\n )",
"def prod(\n self,\n dim: Dims = None,\n *,\n skipna: bool | None = None,\n min_count: int | None = None,\n keep_attrs: bool | None = None,\n **kwargs: Any,\n ) -> Dataset:\n return self.reduce(\n duck_array_ops.prod,\n dim=dim,\n skipna=skipna,\n min_count=min_count,\n numeric_only=True,\n keep_attrs=keep_attrs,\n **kwargs,\n )",
"def prod(input, axis=None, dtype=None, keepdims=False, acc_dtype=None,\r\n no_zeros_in_input=False):\r\n\r\n out = elemwise.Prod(axis, dtype=dtype, acc_dtype=acc_dtype,\r\n no_zeros_in_input=no_zeros_in_input)(input)\r\n\r\n if keepdims:\r\n out = makeKeepDims(input, out, axis)\r\n return out",
"def prod(\n self,\n dim: Dims = None,\n *,\n skipna: bool | None = None,\n min_count: int | None = None,\n keep_attrs: bool | None = None,\n **kwargs: Any,\n ) -> Dataset:\n if (\n flox_available\n and OPTIONS[\"use_flox\"]\n and contains_only_chunked_or_numpy(self._obj)\n ):\n return self._flox_reduce(\n func=\"prod\",\n dim=dim,\n skipna=skipna,\n min_count=min_count,\n numeric_only=True,\n # fill_value=fill_value,\n keep_attrs=keep_attrs,\n **kwargs,\n )\n else:\n return self.reduce(\n duck_array_ops.prod,\n dim=dim,\n skipna=skipna,\n min_count=min_count,\n numeric_only=True,\n keep_attrs=keep_attrs,\n **kwargs,\n )",
"def prod(\n self,\n dim: Dims = None,\n *,\n skipna: bool | None = None,\n min_count: int | None = None,\n keep_attrs: bool | None = None,\n **kwargs: Any,\n ) -> Dataset:\n if (\n flox_available\n and OPTIONS[\"use_flox\"]\n and contains_only_chunked_or_numpy(self._obj)\n ):\n return self._flox_reduce(\n func=\"prod\",\n dim=dim,\n skipna=skipna,\n min_count=min_count,\n numeric_only=True,\n # fill_value=fill_value,\n keep_attrs=keep_attrs,\n **kwargs,\n )\n else:\n return self.reduce(\n duck_array_ops.prod,\n dim=dim,\n skipna=skipna,\n min_count=min_count,\n numeric_only=True,\n keep_attrs=keep_attrs,\n **kwargs,\n )",
"def prod(tensor, axis=None):\n raise NotImplementedError",
"def convert_prod(node, **kwargs):\n name, input_nodes, attrs = get_inputs(node, kwargs)\n\n mx_axis = attrs.get(\"axis\", None)\n axes = convert_string_to_list(str(mx_axis)) if mx_axis is not None else None\n\n keepdims = get_boolean_attribute_value(attrs, \"keepdims\")\n\n if axes is not None:\n node = onnx.helper.make_node(\n 'ReduceProd',\n inputs=input_nodes,\n outputs=[name],\n axes=axes,\n keepdims=keepdims,\n name=name\n )\n\n return [node]\n else:\n node = onnx.helper.make_node(\n 'ReduceProd',\n inputs=input_nodes,\n outputs=[name],\n keepdims=keepdims,\n name=name\n )\n\n return [node]",
"def _reduce(self, values):\n\n if self.dim is None:\n return values\n\n if isinstance(self.dim, str):\n dim = [self.dim]\n else:\n dim = self.dim\n\n if self.reduce_func is None:\n for d in values.dims:\n if d not in dim:\n values = values.isel(**{d: 0})\n return values\n else:\n other_dims = [d for d in values.dims if d not in dim]\n return values.reduce(self.reduce_func, dim=other_dims)",
"def prod(self):\n return self._summarize(lambda c: c.prod)",
"def prod(self, values):\n return self.aggregate(values, \"prod\")",
"def prod(self):\n r = 0\n for i in range(len(self)):\n r *= self[i]\n\n return r",
"def prod(self, x, y):\n return self.reduce(x + y)",
"def frob_prod(self, B):\n m, n = self.shape\n k, r = B.shape\n assert (m == k\n and n == r), (\"Distinct shapes ({}, {}) and ({}, {})\".format(\n m, n, k, r))\n sum_ = 0\n for i in range(m):\n for j in range(n):\n sum_ += self[(i, j)] * B[(i, j)]\n return sum_",
"def prod(x):\n return functools.reduce(lambda a, b: a * b, x, 1)",
"def cumprod(\n self,\n dim: Dims = None,\n *,\n skipna: bool | None = None,\n keep_attrs: bool | None = None,\n **kwargs: Any,\n ) -> DataArray:\n return self.reduce(\n duck_array_ops.cumprod,\n dim=dim,\n skipna=skipna,\n keep_attrs=keep_attrs,\n **kwargs,\n )",
"def cumprod(\n self,\n dim: Dims = None,\n *,\n skipna: bool | None = None,\n keep_attrs: bool | None = None,\n **kwargs: Any,\n ) -> DataArray:\n return self.reduce(\n duck_array_ops.cumprod,\n dim=dim,\n skipna=skipna,\n keep_attrs=keep_attrs,\n **kwargs,\n )",
"def cumprod(\n self,\n dim: Dims = None,\n *,\n skipna: bool | None = None,\n keep_attrs: bool | None = None,\n **kwargs: Any,\n ) -> DataArray:\n return self.reduce(\n duck_array_ops.cumprod,\n dim=dim,\n skipna=skipna,\n keep_attrs=keep_attrs,\n **kwargs,\n )",
"def prodSumNumpy(*arrays):\n return np.sum(np.prod(arrays,axis=0))",
"def prod(self):\n # skipna == True\n # only_numerical == True\n return self._lift(\"prod\")",
"def prod(iterable):\n \n return reduce(operator.mul, iterable, 1)",
"def prod(iterable):\n return reduce(operator.mul, iterable, 1)",
"def prod(iterable):\n return reduce(operator.mul, iterable, 1)",
"def prod(iterable):\n return reduce(operator.mul, iterable, 1)",
"def prod(self, args):\n assert len(args) > 0, \"Cannot compute an empty product in a semigroup\"\n return prod(args[1:], args[0])",
"def dim_reduce(means, weights, d):\n return dim_reduce_data(means, d)",
"def cumprod(\n self,\n dim: Dims = None,\n *,\n skipna: bool | None = None,\n keep_attrs: bool | None = None,\n **kwargs: Any,\n ) -> Dataset:\n return self.reduce(\n duck_array_ops.cumprod,\n dim=dim,\n skipna=skipna,\n numeric_only=True,\n keep_attrs=keep_attrs,\n **kwargs,\n )",
"def cumprod(\n self,\n dim: Dims = None,\n *,\n skipna: bool | None = None,\n keep_attrs: bool | None = None,\n **kwargs: Any,\n ) -> Dataset:\n return self.reduce(\n duck_array_ops.cumprod,\n dim=dim,\n skipna=skipna,\n numeric_only=True,\n keep_attrs=keep_attrs,\n **kwargs,\n )",
"def cumprod(\n self,\n dim: Dims = None,\n *,\n skipna: bool | None = None,\n keep_attrs: bool | None = None,\n **kwargs: Any,\n ) -> Dataset:\n return self.reduce(\n duck_array_ops.cumprod,\n dim=dim,\n skipna=skipna,\n numeric_only=True,\n keep_attrs=keep_attrs,\n **kwargs,\n )"
] | [
"0.75356025",
"0.69897264",
"0.69897264",
"0.69731647",
"0.68985814",
"0.6810773",
"0.6810773",
"0.6472493",
"0.63539034",
"0.6327443",
"0.6250724",
"0.6143561",
"0.60524917",
"0.6012047",
"0.5895768",
"0.58667177",
"0.58586097",
"0.58586097",
"0.58586097",
"0.58337677",
"0.58119243",
"0.5803568",
"0.5800128",
"0.5800128",
"0.5800128",
"0.5733436",
"0.5710172",
"0.56996953",
"0.56996953",
"0.56996953"
] | 0.72356445 | 1 |
Reduce this Dataset's data by applying ``all`` along some dimension(s). | def all(
self,
dim: Dims = None,
*,
keep_attrs: bool | None = None,
**kwargs: Any,
) -> Dataset:
if (
flox_available
and OPTIONS["use_flox"]
and contains_only_chunked_or_numpy(self._obj)
):
return self._flox_reduce(
func="all",
dim=dim,
numeric_only=False,
# fill_value=fill_value,
keep_attrs=keep_attrs,
**kwargs,
)
else:
return self.reduce(
duck_array_ops.array_all,
dim=dim,
numeric_only=False,
keep_attrs=keep_attrs,
**kwargs,
) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def all(\n self,\n dim: Dims = None,\n *,\n keep_attrs: bool | None = None,\n **kwargs: Any,\n ) -> Dataset:\n return self.reduce(\n duck_array_ops.array_all,\n dim=dim,\n numeric_only=False,\n keep_attrs=keep_attrs,\n **kwargs,\n )",
"def all(\n self,\n dim: Dims = None,\n *,\n keep_attrs: bool | None = None,\n **kwargs: Any,\n ) -> DataArray:\n if (\n flox_available\n and OPTIONS[\"use_flox\"]\n and contains_only_chunked_or_numpy(self._obj)\n ):\n return self._flox_reduce(\n func=\"all\",\n dim=dim,\n # fill_value=fill_value,\n keep_attrs=keep_attrs,\n **kwargs,\n )\n else:\n return self.reduce(\n duck_array_ops.array_all,\n dim=dim,\n keep_attrs=keep_attrs,\n **kwargs,\n )",
"def all(\n self,\n dim: Dims = None,\n *,\n keep_attrs: bool | None = None,\n **kwargs: Any,\n ) -> DataArray:\n if (\n flox_available\n and OPTIONS[\"use_flox\"]\n and contains_only_chunked_or_numpy(self._obj)\n ):\n return self._flox_reduce(\n func=\"all\",\n dim=dim,\n # fill_value=fill_value,\n keep_attrs=keep_attrs,\n **kwargs,\n )\n else:\n return self.reduce(\n duck_array_ops.array_all,\n dim=dim,\n keep_attrs=keep_attrs,\n **kwargs,\n )",
"def all(self, axis=None, keepdims=False, out=None):\n return np.logical_and.reduce(self, out=out, axis=axis, keepdims=keepdims)",
"def all(\n self,\n dim: Dims = None,\n *,\n keep_attrs: bool | None = None,\n **kwargs: Any,\n ) -> DataArray:\n return self.reduce(\n duck_array_ops.array_all,\n dim=dim,\n keep_attrs=keep_attrs,\n **kwargs,\n )",
"def any(\n self,\n dim: Dims = None,\n *,\n keep_attrs: bool | None = None,\n **kwargs: Any,\n ) -> Dataset:\n if (\n flox_available\n and OPTIONS[\"use_flox\"]\n and contains_only_chunked_or_numpy(self._obj)\n ):\n return self._flox_reduce(\n func=\"any\",\n dim=dim,\n numeric_only=False,\n # fill_value=fill_value,\n keep_attrs=keep_attrs,\n **kwargs,\n )\n else:\n return self.reduce(\n duck_array_ops.array_any,\n dim=dim,\n numeric_only=False,\n keep_attrs=keep_attrs,\n **kwargs,\n )",
"def any(\n self,\n dim: Dims = None,\n *,\n keep_attrs: bool | None = None,\n **kwargs: Any,\n ) -> Dataset:\n if (\n flox_available\n and OPTIONS[\"use_flox\"]\n and contains_only_chunked_or_numpy(self._obj)\n ):\n return self._flox_reduce(\n func=\"any\",\n dim=dim,\n numeric_only=False,\n # fill_value=fill_value,\n keep_attrs=keep_attrs,\n **kwargs,\n )\n else:\n return self.reduce(\n duck_array_ops.array_any,\n dim=dim,\n numeric_only=False,\n keep_attrs=keep_attrs,\n **kwargs,\n )",
"def any(\n self,\n dim: Dims = None,\n *,\n keep_attrs: bool | None = None,\n **kwargs: Any,\n ) -> Dataset:\n return self.reduce(\n duck_array_ops.array_any,\n dim=dim,\n numeric_only=False,\n keep_attrs=keep_attrs,\n **kwargs,\n )",
"def any(\n self,\n dim: Dims = None,\n *,\n keep_attrs: bool | None = None,\n **kwargs: Any,\n ) -> DataArray:\n return self.reduce(\n duck_array_ops.array_any,\n dim=dim,\n keep_attrs=keep_attrs,\n **kwargs,\n )",
"def any(\n self,\n dim: Dims = None,\n *,\n keep_attrs: bool | None = None,\n **kwargs: Any,\n ) -> DataArray:\n if (\n flox_available\n and OPTIONS[\"use_flox\"]\n and contains_only_chunked_or_numpy(self._obj)\n ):\n return self._flox_reduce(\n func=\"any\",\n dim=dim,\n # fill_value=fill_value,\n keep_attrs=keep_attrs,\n **kwargs,\n )\n else:\n return self.reduce(\n duck_array_ops.array_any,\n dim=dim,\n keep_attrs=keep_attrs,\n **kwargs,\n )",
"def any(\n self,\n dim: Dims = None,\n *,\n keep_attrs: bool | None = None,\n **kwargs: Any,\n ) -> DataArray:\n if (\n flox_available\n and OPTIONS[\"use_flox\"]\n and contains_only_chunked_or_numpy(self._obj)\n ):\n return self._flox_reduce(\n func=\"any\",\n dim=dim,\n # fill_value=fill_value,\n keep_attrs=keep_attrs,\n **kwargs,\n )\n else:\n return self.reduce(\n duck_array_ops.array_any,\n dim=dim,\n keep_attrs=keep_attrs,\n **kwargs,\n )",
"def any(self, axis=None, keepdims=False, out=None):\n return np.logical_or.reduce(self, out=out, axis=axis, keepdims=keepdims)",
"def all(tensor):\n raise NotImplementedError",
"def all(self, *args, **kwargs):\n if self.fragmented:\n return (\n np.all(self[self._begin:].view(ndarray), *args, **kwargs) and\n np.all(self[:self._end].view(ndarray), *args, **kwargs)\n )\n else:\n if self._begin < self._end:\n part = self[self._begin:self._end]\n elif self._end == 0:\n part = self[self._begin:]\n\n return (np.all(part.view(ndarray), *args, **kwargs))",
"def check_if_reduce_needed(vars_to_modify):\n for var in vars_to_modify:\n if len(var.dimensions) > 2 and var[0,0,:].mask.all() and \\\n var[-1,1,:,:].mask.all():\n return True\n return False",
"def has_all_dims(dp_or_event, dims):\n return dims.items() <= {d.key: d.value for d in dp_or_event.dimensions}.items()",
"def has_datapoint_with_all_dims(fake_ingest, dims):\n for datapoint in fake_ingest.datapoints:\n if has_all_dims(datapoint, dims):\n return True\n return False",
"def indexed(cls, dataset, selection):\n selected = list(selection.keys())\n all_scalar = all((not isinstance(sel, (tuple, slice, set, list))\n and not callable(sel)) for sel in selection.values())\n all_kdims = all(d in selected for d in dataset.kdims)\n return all_scalar and all_kdims",
"def _reduce(self, values):\n\n if self.dim is None:\n return values\n\n if isinstance(self.dim, str):\n dim = [self.dim]\n else:\n dim = self.dim\n\n if self.reduce_func is None:\n for d in values.dims:\n if d not in dim:\n values = values.isel(**{d: 0})\n return values\n else:\n other_dims = [d for d in values.dims if d not in dim]\n return values.reduce(self.reduce_func, dim=other_dims)",
"def all(self, func=bool):\n return all(map(func, self._))",
"def check_all(X, x_lat_dim, x_lon_dim, x_sample_dim, x_feature_dim):\n\tcheck_dimensions(X, x_lat_dim, x_lon_dim, x_sample_dim, x_feature_dim)\n\tcheck_coords(X, x_lat_dim, x_lon_dim, x_sample_dim, x_feature_dim)\n\tcheck_consistent(X, x_lat_dim, x_lon_dim, x_sample_dim, x_feature_dim)\n\tcheck_type(X, x_lat_dim, x_lon_dim, x_sample_dim, x_feature_dim)\n\t#check_transposed(X, x_lat_dim, x_lon_dim, x_sample_dim, x_feature_dim)",
"def any(self):\n boolean = True\n if type(self.idxs) == np.ndarray:\n boolean = all(self.idxs.shape)\n elif type(self.idxs) == list:\n sh = np.array(self.idxs).shape\n if len(sh) >= 2:\n boolean = np.all(sh)\n return boolean",
"def all(self):\n for v in self.sects.values():\n if not np.all(v):\n return False\n if self.is_full():\n return True\n else:\n return np.all(self.defval)",
"def is_full_dimensional(self):\n\n return self.affine_dimension() == self.space_dimension()",
"def exclude_empty_feats(self):\n for dataset in self:\n dataset.dropna(axis=1, how=\"all\", inplace=True)",
"def _reduce(self, name, skipna=True, **kwargs):\n if name == \"any\" and pa.types.is_boolean(self.dtype.arrow_dtype):\n return any_op(self.data, skipna=skipna)\n elif name == \"all\" and pa.types.is_boolean(self.dtype.arrow_dtype):\n return all_op(self.data, skipna=skipna)\n\n raise TypeError(\n \"cannot perform {name} with type {dtype}\".format(\n name=name, dtype=self.dtype\n )\n )",
"def all(c):\n states(c)\n etc(c)\n prune(c)",
"def all(self, boolean_only=None):\n # skipna == True\n return self._lift(\"all\")",
"def any(tensor, axis=None, keepdims=False, **kwargs):\n return tensor.any(axis=axis, keepdims=keepdims, **kwargs)",
"def all(x) -> bool:\n pass"
] | [
"0.69627005",
"0.69300705",
"0.69300705",
"0.68929625",
"0.68462247",
"0.6555234",
"0.6555234",
"0.64865315",
"0.64326197",
"0.6402593",
"0.6402593",
"0.6244518",
"0.62295026",
"0.6124534",
"0.6039926",
"0.59571075",
"0.5946202",
"0.5869544",
"0.56962043",
"0.5633015",
"0.5614969",
"0.5597291",
"0.55347407",
"0.55093485",
"0.54941285",
"0.5483847",
"0.54576844",
"0.5444656",
"0.54346555",
"0.5425927"
] | 0.70568603 | 1 |
Reduce this Dataset's data by applying ``any`` along some dimension(s). | def any(
self,
dim: Dims = None,
*,
keep_attrs: bool | None = None,
**kwargs: Any,
) -> Dataset:
if (
flox_available
and OPTIONS["use_flox"]
and contains_only_chunked_or_numpy(self._obj)
):
return self._flox_reduce(
func="any",
dim=dim,
numeric_only=False,
# fill_value=fill_value,
keep_attrs=keep_attrs,
**kwargs,
)
else:
return self.reduce(
duck_array_ops.array_any,
dim=dim,
numeric_only=False,
keep_attrs=keep_attrs,
**kwargs,
) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def any(\n self,\n dim: Dims = None,\n *,\n keep_attrs: bool | None = None,\n **kwargs: Any,\n ) -> Dataset:\n return self.reduce(\n duck_array_ops.array_any,\n dim=dim,\n numeric_only=False,\n keep_attrs=keep_attrs,\n **kwargs,\n )",
"def any(self, axis=None, keepdims=False, out=None):\n return np.logical_or.reduce(self, out=out, axis=axis, keepdims=keepdims)",
"def any(\n self,\n dim: Dims = None,\n *,\n keep_attrs: bool | None = None,\n **kwargs: Any,\n ) -> DataArray:\n if (\n flox_available\n and OPTIONS[\"use_flox\"]\n and contains_only_chunked_or_numpy(self._obj)\n ):\n return self._flox_reduce(\n func=\"any\",\n dim=dim,\n # fill_value=fill_value,\n keep_attrs=keep_attrs,\n **kwargs,\n )\n else:\n return self.reduce(\n duck_array_ops.array_any,\n dim=dim,\n keep_attrs=keep_attrs,\n **kwargs,\n )",
"def any(\n self,\n dim: Dims = None,\n *,\n keep_attrs: bool | None = None,\n **kwargs: Any,\n ) -> DataArray:\n if (\n flox_available\n and OPTIONS[\"use_flox\"]\n and contains_only_chunked_or_numpy(self._obj)\n ):\n return self._flox_reduce(\n func=\"any\",\n dim=dim,\n # fill_value=fill_value,\n keep_attrs=keep_attrs,\n **kwargs,\n )\n else:\n return self.reduce(\n duck_array_ops.array_any,\n dim=dim,\n keep_attrs=keep_attrs,\n **kwargs,\n )",
"def any(\n self,\n dim: Dims = None,\n *,\n keep_attrs: bool | None = None,\n **kwargs: Any,\n ) -> DataArray:\n return self.reduce(\n duck_array_ops.array_any,\n dim=dim,\n keep_attrs=keep_attrs,\n **kwargs,\n )",
"def any(tensor, axis=None, keepdims=False, **kwargs):\n return tensor.any(axis=axis, keepdims=keepdims, **kwargs)",
"def check_if_reduce_needed(vars_to_modify):\n for var in vars_to_modify:\n if len(var.dimensions) > 2 and var[0,0,:].mask.all() and \\\n var[-1,1,:,:].mask.all():\n return True\n return False",
"def all(self, axis=None, keepdims=False, out=None):\n return np.logical_and.reduce(self, out=out, axis=axis, keepdims=keepdims)",
"def any(self):\n boolean = True\n if type(self.idxs) == np.ndarray:\n boolean = all(self.idxs.shape)\n elif type(self.idxs) == list:\n sh = np.array(self.idxs).shape\n if len(sh) >= 2:\n boolean = np.all(sh)\n return boolean",
"def any(self):\n return self._summarize(lambda c: c.any)",
"def matrix_any(condition):\n return np.sum(np.sum(condition)) > 0",
"def isAny(self,test):\n for x in np.nditer(self.t, op_flags=['readonly']):\n if op(x):\n return True\n return False",
"def has_all_dims(dp_or_event, dims):\n return dims.items() <= {d.key: d.value for d in dp_or_event.dimensions}.items()",
"def has_datapoint_with_all_dims(fake_ingest, dims):\n for datapoint in fake_ingest.datapoints:\n if has_all_dims(datapoint, dims):\n return True\n return False",
"def any(self, *args, **kwargs):\n if self.fragmented:\n return (\n np.any(self[self._begin:].view(ndarray), *args, **kwargs) and\n np.any(self[:self._end].view(ndarray), *args, **kwargs)\n )\n else:\n if self._begin < self._end:\n part = self[self._begin:self._end]\n elif self._end == 0:\n part = self[self._begin:]\n\n return (np.any(part.view(ndarray), *args, **kwargs))",
"def is_full_dimensional(self):\n\n return self.affine_dimension() == self.space_dimension()",
"def any(self, name, codes):\n if self.is_array(name):\n logics = []\n for s in self.sources(name):\n logics.append({s: has_any(codes)})\n slicer = self.take(union(logics))\n else:\n slicer = self.take({name: has_any(codes)})\n return slicer",
"def all(\n self,\n dim: Dims = None,\n *,\n keep_attrs: bool | None = None,\n **kwargs: Any,\n ) -> Dataset:\n if (\n flox_available\n and OPTIONS[\"use_flox\"]\n and contains_only_chunked_or_numpy(self._obj)\n ):\n return self._flox_reduce(\n func=\"all\",\n dim=dim,\n numeric_only=False,\n # fill_value=fill_value,\n keep_attrs=keep_attrs,\n **kwargs,\n )\n else:\n return self.reduce(\n duck_array_ops.array_all,\n dim=dim,\n numeric_only=False,\n keep_attrs=keep_attrs,\n **kwargs,\n )",
"def all(\n self,\n dim: Dims = None,\n *,\n keep_attrs: bool | None = None,\n **kwargs: Any,\n ) -> Dataset:\n if (\n flox_available\n and OPTIONS[\"use_flox\"]\n and contains_only_chunked_or_numpy(self._obj)\n ):\n return self._flox_reduce(\n func=\"all\",\n dim=dim,\n numeric_only=False,\n # fill_value=fill_value,\n keep_attrs=keep_attrs,\n **kwargs,\n )\n else:\n return self.reduce(\n duck_array_ops.array_all,\n dim=dim,\n numeric_only=False,\n keep_attrs=keep_attrs,\n **kwargs,\n )",
"def all(\n self,\n dim: Dims = None,\n *,\n keep_attrs: bool | None = None,\n **kwargs: Any,\n ) -> Dataset:\n return self.reduce(\n duck_array_ops.array_all,\n dim=dim,\n numeric_only=False,\n keep_attrs=keep_attrs,\n **kwargs,\n )",
"def indexed(cls, dataset, selection):\n selected = list(selection.keys())\n all_scalar = all((not isinstance(sel, (tuple, slice, set, list))\n and not callable(sel)) for sel in selection.values())\n all_kdims = all(d in selected for d in dataset.kdims)\n return all_scalar and all_kdims",
"def obs_with_data(x):\n num_toks = np.sum(x,axis=1)\n has_data = num_toks > 0\n return has_data",
"def all(\n self,\n dim: Dims = None,\n *,\n keep_attrs: bool | None = None,\n **kwargs: Any,\n ) -> DataArray:\n if (\n flox_available\n and OPTIONS[\"use_flox\"]\n and contains_only_chunked_or_numpy(self._obj)\n ):\n return self._flox_reduce(\n func=\"all\",\n dim=dim,\n # fill_value=fill_value,\n keep_attrs=keep_attrs,\n **kwargs,\n )\n else:\n return self.reduce(\n duck_array_ops.array_all,\n dim=dim,\n keep_attrs=keep_attrs,\n **kwargs,\n )",
"def all(\n self,\n dim: Dims = None,\n *,\n keep_attrs: bool | None = None,\n **kwargs: Any,\n ) -> DataArray:\n if (\n flox_available\n and OPTIONS[\"use_flox\"]\n and contains_only_chunked_or_numpy(self._obj)\n ):\n return self._flox_reduce(\n func=\"all\",\n dim=dim,\n # fill_value=fill_value,\n keep_attrs=keep_attrs,\n **kwargs,\n )\n else:\n return self.reduce(\n duck_array_ops.array_all,\n dim=dim,\n keep_attrs=keep_attrs,\n **kwargs,\n )",
"def filter(self):\n self.data = self.data.loc[~self.data.isnull().any(1),:]",
"def _reduce(self, values):\n\n if self.dim is None:\n return values\n\n if isinstance(self.dim, str):\n dim = [self.dim]\n else:\n dim = self.dim\n\n if self.reduce_func is None:\n for d in values.dims:\n if d not in dim:\n values = values.isel(**{d: 0})\n return values\n else:\n other_dims = [d for d in values.dims if d not in dim]\n return values.reduce(self.reduce_func, dim=other_dims)",
"def any(self, values):\n return self.aggregate(values, \"any\")",
"def any(self):\n return self._reduce_for_stat_function(\n lambda col: F.max(F.coalesce(col.cast('boolean'), F.lit(False))),\n only_numeric=False)",
"def all(\n self,\n dim: Dims = None,\n *,\n keep_attrs: bool | None = None,\n **kwargs: Any,\n ) -> DataArray:\n return self.reduce(\n duck_array_ops.array_all,\n dim=dim,\n keep_attrs=keep_attrs,\n **kwargs,\n )",
"def has_any(self) -> bool:\n return any(\n BlockAccessor.for_block(b).num_rows() > 0 for b in self._buffer)"
] | [
"0.7110761",
"0.7086105",
"0.703172",
"0.703172",
"0.70229447",
"0.6403789",
"0.6393615",
"0.6261724",
"0.61942995",
"0.6074665",
"0.59774745",
"0.5968099",
"0.5967931",
"0.59597373",
"0.58590436",
"0.57441306",
"0.57049936",
"0.5693786",
"0.5693786",
"0.5675616",
"0.56408346",
"0.55823976",
"0.555122",
"0.555122",
"0.55342424",
"0.5480288",
"0.54769874",
"0.5461213",
"0.5453995",
"0.54532737"
] | 0.7161406 | 1 |
Reduce this Dataset's data by applying ``median`` along some dimension(s). | def median(
self,
dim: Dims = None,
*,
skipna: bool | None = None,
keep_attrs: bool | None = None,
**kwargs: Any,
) -> Dataset:
return self.reduce(
duck_array_ops.median,
dim=dim,
skipna=skipna,
numeric_only=True,
keep_attrs=keep_attrs,
**kwargs,
) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def median_filter(self):\n print \"Median-Filtering...\"\n D = self.D\n x = np.median(np.median(D,axis=1),axis=1)\n for i in xrange(len(x)):\n D[i,:,:] -= x[i]\n self.D = D\n print \"done.\"",
"def median(\n self,\n dim: Dims = None,\n *,\n skipna: bool | None = None,\n keep_attrs: bool | None = None,\n **kwargs: Any,\n ) -> DataArray:\n return self.reduce(\n duck_array_ops.median,\n dim=dim,\n skipna=skipna,\n keep_attrs=keep_attrs,\n **kwargs,\n )",
"def median(\n self,\n dim: Dims = None,\n *,\n skipna: bool | None = None,\n keep_attrs: bool | None = None,\n **kwargs: Any,\n ) -> DataArray:\n return self.reduce(\n duck_array_ops.median,\n dim=dim,\n skipna=skipna,\n keep_attrs=keep_attrs,\n **kwargs,\n )",
"def median(\n self,\n dim: Dims = None,\n *,\n skipna: bool | None = None,\n keep_attrs: bool | None = None,\n **kwargs: Any,\n ) -> DataArray:\n return self.reduce(\n duck_array_ops.median,\n dim=dim,\n skipna=skipna,\n keep_attrs=keep_attrs,\n **kwargs,\n )",
"def median(self):\n return self._summarize(lambda c: c.median)",
"def median(X,**kwargs):\n \n m = np.median(X,axis=0)\n m = np.array(m).reshape(-1)\n return m",
"def Median(data):\n return data.median()",
"def getMedian(self, windowSize=0):\r\n try:\r\n if self._data.size == 0:\r\n raise RuntimeError(\"Filter1D data is empty. Call Filter1D.addDataPoint() to add data prior calling Filter1D.getMedian().\")\r\n if windowSize <= 0:\r\n windowSize = self._maxSize\r\n if windowSize % 2 == 1 and windowSize <= self._maxSize:\r\n return np.median(self._data[-windowSize:])\r\n else:\r\n raise ValueError(\"windowSize must be an odd integer <= maxSize\")\r\n except ValueError:\r\n raise",
"def median1D(self):\n # TO DO\n pass",
"def median(self):\n # TO DO\n pass",
"def untruncatedMedian(self, x):\n self.raiseAnError(NotImplementedError,'untruncatedMedian not yet implemented for ' + self.type)",
"def median(self):\n return self._lift(\"median\")",
"def untruncatedMedian(self):\n self.raiseAnError(NotImplementedError,'untruncatedMedian not yet implemented for ' + self.type)",
"def untruncatedMedian(self):\n self.raiseAnError(NotImplementedError,'untruncatedMedian not yet implemented for ' + self.type)",
"def get_median(self):\n med_value= self.df[self.col_name].median()\n return med_value",
"def median(self, name, **kwargs):\n data = self.get(name,**kwargs)\n return np.percentile(data,[50])",
"def median(self) -> \"Stream[float]\":\n return self.agg(np.median).astype(\"float\")",
"def _median(self, in_arr, takeEvenMean):\n if takeEvenMean:\n return numpy.median(in_arr)\n else:\n return numpy.sort(in_arr, axis=None)[(in_arr.size-1)/2]",
"def apply_1d_median_filter(n, timage):\n image_shape = timage.shape\n ovrlay = int(n / 2)\n res_matrix = np.copy(timage)\n for i in np.arange(image_shape[0])[1:-1]:\n local_matrix = timage[i - ovrlay:i + ovrlay + 1] \n median = np.median(local_matrix)\n res_matrix[i] = median\n return res_matrix",
"def median_f(self, x):\n # TODO: the axis used in nanmean is different for U and Uf\n # calcs - change Uf dims to make consistent?\n return stats.nanmedian(x, axis=1)",
"def samples_median(samples):\n return [np.median(s) for s in samples.T]",
"def samples_median(samples):\n return [np.median(s) for s in samples.T]",
"def median(self):\n self.data.sort()\n\n if len(self.data) % 2 == 1:\n median = self.data[int(self.size/2)]\n else:\n median = (self.data[int(self.size/2 - 1)] + \n self.data[int(self.size/2)]) / 2\n return median",
"def scipy_nanmedian(x, axis=0):\n x, axis = _chk_asarray(x, axis)\n if x.ndim == 0:\n return float(x.item())\n shape = list(x.shape)\n shape.pop(axis)\n if 0 in shape:\n x = np.empty(shape)\n else:\n x = x.copy()\n x = np.apply_along_axis(_nanmedian, axis, x)\n if x.ndim == 0:\n x = float(x.item())\n return x",
"def median_matrix(datasets, axes = None, label = None):\n fn = lambda fd, axis: fd[axis].median()\n \n return fn_matrix(datasets, fn, axes, label)",
"def untruncatedMedian(self):\n return self._distribution.untrMedian()",
"def test_median(self):\r\n m = array([[1, 2, 3], [4, 5, 6], [7, 8, 9], [10, 11, 12]])\r\n expected = 6.5\r\n observed = median(m, axis=None)\r\n self.assertEqual(observed, expected)\r\n\r\n expected = array([5.5, 6.5, 7.5])\r\n observed = median(m, axis=0)\r\n self.assertEqual(observed, expected)\r\n\r\n expected = array([2.0, 5.0, 8.0, 11.0])\r\n observed = median(m, axis=1)\r\n self.assertEqual(observed, expected)\r\n\r\n self.assertRaises(ValueError, median, m, 10)",
"def mad(array, axis=None, keepdims=False):\n ad = np.abs(array - np.median(array, axis, keepdims=True))\n mad = np.median(ad, axis, keepdims=keepdims)\n return mad",
"def image_median(self, mask=np.ones((32, 32), dtype=bool)):\n return np.median(self.__image[mask])",
"def median(self) -> Union[int, float]:\n return self._data.median()"
] | [
"0.7723946",
"0.73474264",
"0.73474264",
"0.73474264",
"0.67132735",
"0.6709282",
"0.6704458",
"0.6682933",
"0.6635858",
"0.656867",
"0.65672284",
"0.6512514",
"0.64834887",
"0.64834887",
"0.6444135",
"0.63871884",
"0.6383317",
"0.6333734",
"0.62151307",
"0.61960506",
"0.6195796",
"0.6195796",
"0.6149201",
"0.61276996",
"0.61172026",
"0.6112575",
"0.60945493",
"0.60429245",
"0.60347074",
"0.6029336"
] | 0.738869 | 1 |
Reduce this Dataset's data by applying ``cumsum`` along some dimension(s). | def cumsum(
self,
dim: Dims = None,
*,
skipna: bool | None = None,
keep_attrs: bool | None = None,
**kwargs: Any,
) -> Dataset:
return self.reduce(
duck_array_ops.cumsum,
dim=dim,
skipna=skipna,
numeric_only=True,
keep_attrs=keep_attrs,
**kwargs,
) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def cumsum(\n self,\n dim: Dims = None,\n *,\n skipna: bool | None = None,\n keep_attrs: bool | None = None,\n **kwargs: Any,\n ) -> DataArray:\n return self.reduce(\n duck_array_ops.cumsum,\n dim=dim,\n skipna=skipna,\n keep_attrs=keep_attrs,\n **kwargs,\n )",
"def cumsum(\n self,\n dim: Dims = None,\n *,\n skipna: bool | None = None,\n keep_attrs: bool | None = None,\n **kwargs: Any,\n ) -> DataArray:\n return self.reduce(\n duck_array_ops.cumsum,\n dim=dim,\n skipna=skipna,\n keep_attrs=keep_attrs,\n **kwargs,\n )",
"def cumsum(\n self,\n dim: Dims = None,\n *,\n skipna: bool | None = None,\n keep_attrs: bool | None = None,\n **kwargs: Any,\n ) -> DataArray:\n return self.reduce(\n duck_array_ops.cumsum,\n dim=dim,\n skipna=skipna,\n keep_attrs=keep_attrs,\n **kwargs,\n )",
"def cumsum(x, axis=None):\r\n return CumsumOp(axis=axis)(x)",
"def _cumsum(self) -> np.ndarray:\n\n if not hasattr(self, \"__cumsum\"):\n self.__cumsum = np.insert(np.cumsum(self.sizes), 0, 0)\n return self.__cumsum",
"def cumsum(tensor, axis=None):\n raise NotImplementedError",
"def multidim_cumsum(a):\n out = a.cumsum(-1)\n for i in range(2, a.ndim+1):\n np.cumsum(out, axis=-i, out=out)\n return out",
"def _strict_1d_cumsum(tensor, len_tensor):\n # Assumes tensor shape is fully defined.\n with ops.name_scope('strict_1d_cumsum', values=[tensor]):\n if len_tensor == 0:\n return constant_op.constant([])\n len_pad = len_tensor - 1\n x = array_ops.pad(tensor, [[len_pad, 0]])\n h = array_ops.ones_like(x)\n return _strict_conv1d(x, h)[:len_tensor]",
"def cumsum(self):\n return self._lift(lambda c: c.cumsum)",
"def _cumsum_einsum(x, precision=jax.lax.Precision.DEFAULT):\n mask = jnp.triu(jnp.ones(x.shape, dtype=jnp.bool_))\n return jnp.einsum(\"ij,jk\", x, mask, precision=precision)",
"def cumprod(\n self,\n dim: Dims = None,\n *,\n skipna: bool | None = None,\n keep_attrs: bool | None = None,\n **kwargs: Any,\n ) -> Dataset:\n return self.reduce(\n duck_array_ops.cumprod,\n dim=dim,\n skipna=skipna,\n numeric_only=True,\n keep_attrs=keep_attrs,\n **kwargs,\n )",
"def cumprod(\n self,\n dim: Dims = None,\n *,\n skipna: bool | None = None,\n keep_attrs: bool | None = None,\n **kwargs: Any,\n ) -> Dataset:\n return self.reduce(\n duck_array_ops.cumprod,\n dim=dim,\n skipna=skipna,\n numeric_only=True,\n keep_attrs=keep_attrs,\n **kwargs,\n )",
"def cumprod(\n self,\n dim: Dims = None,\n *,\n skipna: bool | None = None,\n keep_attrs: bool | None = None,\n **kwargs: Any,\n ) -> Dataset:\n return self.reduce(\n duck_array_ops.cumprod,\n dim=dim,\n skipna=skipna,\n numeric_only=True,\n keep_attrs=keep_attrs,\n **kwargs,\n )",
"def cumsum(module, y, dimension):\n _import_modules()\n if module in [np, ma, torch, jnp]:\n return module.cumsum(y, axis=dimension)\n elif module == tf:\n return tf.math.cumsum(y, dimension)",
"def exclusive_cumsum(x):\n return torch.cumsum(torch.cat([x.new_zeros(x.size(0), x.size(1), x.size(2), 1), x[:, :, :, :-1]], dim=-1), dim=-1)",
"def _reduce(self, values):\n\n if self.dim is None:\n return values\n\n if isinstance(self.dim, str):\n dim = [self.dim]\n else:\n dim = self.dim\n\n if self.reduce_func is None:\n for d in values.dims:\n if d not in dim:\n values = values.isel(**{d: 0})\n return values\n else:\n other_dims = [d for d in values.dims if d not in dim]\n return values.reduce(self.reduce_func, dim=other_dims)",
"def cumprod(\n self,\n dim: Dims = None,\n *,\n skipna: bool | None = None,\n keep_attrs: bool | None = None,\n **kwargs: Any,\n ) -> DataArray:\n return self.reduce(\n duck_array_ops.cumprod,\n dim=dim,\n skipna=skipna,\n keep_attrs=keep_attrs,\n **kwargs,\n )",
"def cumprod(\n self,\n dim: Dims = None,\n *,\n skipna: bool | None = None,\n keep_attrs: bool | None = None,\n **kwargs: Any,\n ) -> DataArray:\n return self.reduce(\n duck_array_ops.cumprod,\n dim=dim,\n skipna=skipna,\n keep_attrs=keep_attrs,\n **kwargs,\n )",
"def cumprod(\n self,\n dim: Dims = None,\n *,\n skipna: bool | None = None,\n keep_attrs: bool | None = None,\n **kwargs: Any,\n ) -> DataArray:\n return self.reduce(\n duck_array_ops.cumprod,\n dim=dim,\n skipna=skipna,\n keep_attrs=keep_attrs,\n **kwargs,\n )",
"def sum(\n self,\n dim: Dims = None,\n *,\n skipna: bool | None = None,\n min_count: int | None = None,\n keep_attrs: bool | None = None,\n **kwargs: Any,\n ) -> Dataset:\n return self.reduce(\n duck_array_ops.sum,\n dim=dim,\n skipna=skipna,\n min_count=min_count,\n numeric_only=True,\n keep_attrs=keep_attrs,\n **kwargs,\n )",
"def cum_sum(self):\n\n # create cdo command and runit\n cdo_command = \"cdo -timcumsum\"\n run_this(cdo_command, self, output=\"ensemble\")",
"def sum(\n self,\n dim: Dims = None,\n *,\n skipna: bool | None = None,\n min_count: int | None = None,\n keep_attrs: bool | None = None,\n **kwargs: Any,\n ) -> DataArray:\n return self.reduce(\n duck_array_ops.sum,\n dim=dim,\n skipna=skipna,\n min_count=min_count,\n keep_attrs=keep_attrs,\n **kwargs,\n )",
"def cumsumr(x, axis=0):\n cums = x.cumsum(axis=axis)\n return cums - np.maximum.accumulate(cums*(x == 0), axis=axis)",
"def normalize_cumsum(self, x):\n\n x = self.normalize_global(x)\n\n if self.right_context is None and self.left_context is None:\n return x\n\n if self.left_context is None:\n left_context = x.shape[0]\n else:\n left_context = self.left_context\n\n if self.right_context is None:\n right_context = x.shape[0]\n else:\n right_context = self.right_context\n\n total_context = left_context + right_context + 1\n\n if x.shape[0] <= min(right_context, left_context)+1:\n # if context is larger than the signal we still return global normalization\n return x\n\n c_x = np.zeros((x.shape[0]+total_context, x.shape[1],), dtype=float_cpu())\n counts = np.zeros((x.shape[0]+total_context, 1,), dtype=float_cpu())\n \n c_x[left_context+1:left_context+x.shape[0]+1] = np.cumsum(x, axis=0)\n c_x[left_context+x.shape[0]+1:] = c_x[left_context+x.shape[0]]\n counts[left_context+1:left_context+x.shape[0]+1] = np.arange(1, x.shape[0]+1, dtype=float_cpu())[:,None]\n counts[left_context+x.shape[0]+1:] = x.shape[0]\n\n if self.norm_var:\n c2_x = np.zeros((x.shape[0]+total_context, x.shape[1],), dtype=float_cpu())\n c2_x[left_context+1:left_context+x.shape[0]+1] = np.cumsum(x*x, axis=0)\n c2_x[left_context+x.shape[0]+1:] = c2_x[left_context+x.shape[0]]\n\n counts = counts[total_context:] - counts[:-total_context]\n m_x = (c_x[total_context:] - c_x[:-total_context])/counts\n\n if self.norm_mean:\n x -= m_x\n\n if self.norm_var:\n m2_x = (c2_x[total_context:] - c2_x[:-total_context])/counts\n s2_x=m2_x - m_x**2\n s2_x[s2_x<1e-5]=1e-5\n s_x = np.sqrt(s2_x)\n x /= s_x\n\n return x",
"def sum(\n self,\n dim: Dims = None,\n *,\n skipna: bool | None = None,\n min_count: int | None = None,\n keep_attrs: bool | None = None,\n **kwargs: Any,\n ) -> Dataset:\n if (\n flox_available\n and OPTIONS[\"use_flox\"]\n and contains_only_chunked_or_numpy(self._obj)\n ):\n return self._flox_reduce(\n func=\"sum\",\n dim=dim,\n skipna=skipna,\n min_count=min_count,\n numeric_only=True,\n # fill_value=fill_value,\n keep_attrs=keep_attrs,\n **kwargs,\n )\n else:\n return self.reduce(\n duck_array_ops.sum,\n dim=dim,\n skipna=skipna,\n min_count=min_count,\n numeric_only=True,\n keep_attrs=keep_attrs,\n **kwargs,\n )",
"def sum(\n self,\n dim: Dims = None,\n *,\n skipna: bool | None = None,\n min_count: int | None = None,\n keep_attrs: bool | None = None,\n **kwargs: Any,\n ) -> Dataset:\n if (\n flox_available\n and OPTIONS[\"use_flox\"]\n and contains_only_chunked_or_numpy(self._obj)\n ):\n return self._flox_reduce(\n func=\"sum\",\n dim=dim,\n skipna=skipna,\n min_count=min_count,\n numeric_only=True,\n # fill_value=fill_value,\n keep_attrs=keep_attrs,\n **kwargs,\n )\n else:\n return self.reduce(\n duck_array_ops.sum,\n dim=dim,\n skipna=skipna,\n min_count=min_count,\n numeric_only=True,\n keep_attrs=keep_attrs,\n **kwargs,\n )",
"def sum(self, axis: int = 0):\r\n self.values = self.values.sum(axis=axis)\r\n self.layers = [None]\r\n return self.copy()",
"def local_sum(a,tshape, padval):\n\n # zero-padding\n a = ndpad(a,tshape, padval)\n\n # difference between shifted copies of an array along a given dimension\n def shiftdiff(a,tshape,shiftdim):\n ind1 = [slice(None,None),]*a.ndim\n ind2 = [slice(None,None),]*a.ndim\n ind1[shiftdim] = slice(tshape[shiftdim],a.shape[shiftdim]-1)\n ind2[shiftdim] = slice(0,a.shape[shiftdim]-tshape[shiftdim]-1)\n return a[ind1] - a[ind2]\n\n # take the cumsum along each dimension and subtracting a shifted version\n # from itself. this reduces the number of computations to 2*N additions\n # and 2*N subtractions for an N-dimensional array, independent of its\n # size.\n #\n # See:\n # <http://www.idiom.com/~zilla/Papers/nvisionInterface/nip.html>\n for dd in xrange(a.ndim):\n a = np.cumsum(a,dd)\n a = shiftdiff(a,tshape,dd)\n return a",
"def sum(self, axis=None, keepdims=False, dtype=None, out=None):\n return np.add.reduce(self, out=out, axis=axis, keepdims=keepdims, dtype=dtype)",
"def sum(\n self,\n dim: Dims = None,\n *,\n skipna: bool | None = None,\n min_count: int | None = None,\n keep_attrs: bool | None = None,\n **kwargs: Any,\n ) -> DataArray:\n if (\n flox_available\n and OPTIONS[\"use_flox\"]\n and contains_only_chunked_or_numpy(self._obj)\n ):\n return self._flox_reduce(\n func=\"sum\",\n dim=dim,\n skipna=skipna,\n min_count=min_count,\n # fill_value=fill_value,\n keep_attrs=keep_attrs,\n **kwargs,\n )\n else:\n return self.reduce(\n duck_array_ops.sum,\n dim=dim,\n skipna=skipna,\n min_count=min_count,\n keep_attrs=keep_attrs,\n **kwargs,\n )"
] | [
"0.75369805",
"0.75369805",
"0.75369805",
"0.6748461",
"0.67054296",
"0.66430676",
"0.63819027",
"0.63511914",
"0.63409925",
"0.6273229",
"0.62687963",
"0.62687963",
"0.62687963",
"0.62631625",
"0.62614137",
"0.624574",
"0.61842084",
"0.61842084",
"0.61842084",
"0.60996747",
"0.60682416",
"0.6043646",
"0.6031098",
"0.58713037",
"0.5851386",
"0.5851386",
"0.58149624",
"0.58013064",
"0.57354873",
"0.57262105"
] | 0.7699135 | 1 |
Reduce this Dataset's data by applying ``cumprod`` along some dimension(s). | def cumprod(
self,
dim: Dims = None,
*,
skipna: bool | None = None,
keep_attrs: bool | None = None,
**kwargs: Any,
) -> Dataset:
return self.reduce(
duck_array_ops.cumprod,
dim=dim,
skipna=skipna,
numeric_only=True,
keep_attrs=keep_attrs,
**kwargs,
) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def cumprod(\n self,\n dim: Dims = None,\n *,\n skipna: bool | None = None,\n keep_attrs: bool | None = None,\n **kwargs: Any,\n ) -> DataArray:\n return self.reduce(\n duck_array_ops.cumprod,\n dim=dim,\n skipna=skipna,\n keep_attrs=keep_attrs,\n **kwargs,\n )",
"def cumprod(\n self,\n dim: Dims = None,\n *,\n skipna: bool | None = None,\n keep_attrs: bool | None = None,\n **kwargs: Any,\n ) -> DataArray:\n return self.reduce(\n duck_array_ops.cumprod,\n dim=dim,\n skipna=skipna,\n keep_attrs=keep_attrs,\n **kwargs,\n )",
"def cumprod(\n self,\n dim: Dims = None,\n *,\n skipna: bool | None = None,\n keep_attrs: bool | None = None,\n **kwargs: Any,\n ) -> DataArray:\n return self.reduce(\n duck_array_ops.cumprod,\n dim=dim,\n skipna=skipna,\n keep_attrs=keep_attrs,\n **kwargs,\n )",
"def cumprod(self):\n return self._lift(lambda c: c.cumprod)",
"def cumprod(x, axis=None):\r\n return CumprodOp(axis=axis)(x)",
"def cumprod(x, dim=-1, exclusive=False):\n if exclusive:\n length = x.size(dim)\n x = torch.narrow(F.pad(x, pad=(1, 0, 0, 0), value=1.0), dim, 0, length)\n return torch.cumprod(x, dim=dim)",
"def cumprod(a, axis=None, dtype=None, out=None):\n a = astensor(a)\n if dtype is None:\n dtype = np.empty((1,), dtype=a.dtype).cumprod().dtype\n op = TensorCumprod(axis=axis, dtype=dtype)\n return op(a, out=out)",
"def exclusive_cumprod(x):\n return torch.cumprod(torch.cat([x.new_ones(x.size(0), x.size(1), x.size(2), 1), x[:, :, :, :-1]], dim=-1), dim=-1)",
"def cumprod_nb(a):\n out = np.empty_like(a, dtype=np.float_)\n for col in range(a.shape[1]):\n out[:, col] = cumprod_1d_nb(a[:, col])\n return out",
"def prod(\n self,\n dim: Dims = None,\n *,\n skipna: bool | None = None,\n min_count: int | None = None,\n keep_attrs: bool | None = None,\n **kwargs: Any,\n ) -> Dataset:\n return self.reduce(\n duck_array_ops.prod,\n dim=dim,\n skipna=skipna,\n min_count=min_count,\n numeric_only=True,\n keep_attrs=keep_attrs,\n **kwargs,\n )",
"def _reduce(self, values):\n\n if self.dim is None:\n return values\n\n if isinstance(self.dim, str):\n dim = [self.dim]\n else:\n dim = self.dim\n\n if self.reduce_func is None:\n for d in values.dims:\n if d not in dim:\n values = values.isel(**{d: 0})\n return values\n else:\n other_dims = [d for d in values.dims if d not in dim]\n return values.reduce(self.reduce_func, dim=other_dims)",
"def exclusive_cumprod(x):\n batch_size, sequence_length = x.size()\n if torch.cuda.is_available():\n one_x = torch.cat([torch.ones(batch_size, 1).cuda(), x], dim=1)[:, :-1]\n else:\n one_x = torch.cat([torch.ones(batch_size, 1), x], dim=1)[:, :-1]\n return torch.cumprod(one_x, dim=1)",
"def cumprod(self: FrameLike, skipna: bool = True) -> FrameLike:\n return self._apply_series_op(lambda psser: psser._cumprod(skipna), should_resolve=True)",
"def prod(\n self,\n dim: Dims = None,\n *,\n skipna: bool | None = None,\n min_count: int | None = None,\n keep_attrs: bool | None = None,\n **kwargs: Any,\n ) -> DataArray:\n return self.reduce(\n duck_array_ops.prod,\n dim=dim,\n skipna=skipna,\n min_count=min_count,\n keep_attrs=keep_attrs,\n **kwargs,\n )",
"def prod(self, axis=None, keepdims=False, dtype=None, out=None):\n return np.multiply.reduce(\n self, out=out, axis=axis, keepdims=keepdims, dtype=dtype\n )",
"def prod(\n self,\n dim: Dims = None,\n *,\n skipna: bool | None = None,\n min_count: int | None = None,\n keep_attrs: bool | None = None,\n **kwargs: Any,\n ) -> Dataset:\n if (\n flox_available\n and OPTIONS[\"use_flox\"]\n and contains_only_chunked_or_numpy(self._obj)\n ):\n return self._flox_reduce(\n func=\"prod\",\n dim=dim,\n skipna=skipna,\n min_count=min_count,\n numeric_only=True,\n # fill_value=fill_value,\n keep_attrs=keep_attrs,\n **kwargs,\n )\n else:\n return self.reduce(\n duck_array_ops.prod,\n dim=dim,\n skipna=skipna,\n min_count=min_count,\n numeric_only=True,\n keep_attrs=keep_attrs,\n **kwargs,\n )",
"def prod(\n self,\n dim: Dims = None,\n *,\n skipna: bool | None = None,\n min_count: int | None = None,\n keep_attrs: bool | None = None,\n **kwargs: Any,\n ) -> Dataset:\n if (\n flox_available\n and OPTIONS[\"use_flox\"]\n and contains_only_chunked_or_numpy(self._obj)\n ):\n return self._flox_reduce(\n func=\"prod\",\n dim=dim,\n skipna=skipna,\n min_count=min_count,\n numeric_only=True,\n # fill_value=fill_value,\n keep_attrs=keep_attrs,\n **kwargs,\n )\n else:\n return self.reduce(\n duck_array_ops.prod,\n dim=dim,\n skipna=skipna,\n min_count=min_count,\n numeric_only=True,\n keep_attrs=keep_attrs,\n **kwargs,\n )",
"def cumsum(\n self,\n dim: Dims = None,\n *,\n skipna: bool | None = None,\n keep_attrs: bool | None = None,\n **kwargs: Any,\n ) -> Dataset:\n return self.reduce(\n duck_array_ops.cumsum,\n dim=dim,\n skipna=skipna,\n numeric_only=True,\n keep_attrs=keep_attrs,\n **kwargs,\n )",
"def cumsum(\n self,\n dim: Dims = None,\n *,\n skipna: bool | None = None,\n keep_attrs: bool | None = None,\n **kwargs: Any,\n ) -> Dataset:\n return self.reduce(\n duck_array_ops.cumsum,\n dim=dim,\n skipna=skipna,\n numeric_only=True,\n keep_attrs=keep_attrs,\n **kwargs,\n )",
"def cumsum(\n self,\n dim: Dims = None,\n *,\n skipna: bool | None = None,\n keep_attrs: bool | None = None,\n **kwargs: Any,\n ) -> Dataset:\n return self.reduce(\n duck_array_ops.cumsum,\n dim=dim,\n skipna=skipna,\n numeric_only=True,\n keep_attrs=keep_attrs,\n **kwargs,\n )",
"def prod(\n self,\n dim: Dims = None,\n *,\n skipna: bool | None = None,\n min_count: int | None = None,\n keep_attrs: bool | None = None,\n **kwargs: Any,\n ) -> DataArray:\n if (\n flox_available\n and OPTIONS[\"use_flox\"]\n and contains_only_chunked_or_numpy(self._obj)\n ):\n return self._flox_reduce(\n func=\"prod\",\n dim=dim,\n skipna=skipna,\n min_count=min_count,\n # fill_value=fill_value,\n keep_attrs=keep_attrs,\n **kwargs,\n )\n else:\n return self.reduce(\n duck_array_ops.prod,\n dim=dim,\n skipna=skipna,\n min_count=min_count,\n keep_attrs=keep_attrs,\n **kwargs,\n )",
"def prod(\n self,\n dim: Dims = None,\n *,\n skipna: bool | None = None,\n min_count: int | None = None,\n keep_attrs: bool | None = None,\n **kwargs: Any,\n ) -> DataArray:\n if (\n flox_available\n and OPTIONS[\"use_flox\"]\n and contains_only_chunked_or_numpy(self._obj)\n ):\n return self._flox_reduce(\n func=\"prod\",\n dim=dim,\n skipna=skipna,\n min_count=min_count,\n # fill_value=fill_value,\n keep_attrs=keep_attrs,\n **kwargs,\n )\n else:\n return self.reduce(\n duck_array_ops.prod,\n dim=dim,\n skipna=skipna,\n min_count=min_count,\n keep_attrs=keep_attrs,\n **kwargs,\n )",
"def cumprod_1d_nb(a):\n out = np.empty_like(a, dtype=np.float_)\n cumprod = 1\n for i in range(a.shape[0]):\n if ~np.isnan(a[i]):\n cumprod *= a[i]\n out[i] = cumprod\n else:\n out[i] = np.nan\n return out",
"def prod(input, axis=None, dtype=None, keepdims=False, acc_dtype=None,\r\n no_zeros_in_input=False):\r\n\r\n out = elemwise.Prod(axis, dtype=dtype, acc_dtype=acc_dtype,\r\n no_zeros_in_input=no_zeros_in_input)(input)\r\n\r\n if keepdims:\r\n out = makeKeepDims(input, out, axis)\r\n return out",
"def product(x: pd.Series, d: int or float) -> pd.Series:\n if isinstance(d, float):\n d = math.floor(d)\n\n def func(x):\n return np.nancumprod(x)[-1]\n\n if isinstance(x.index, pd.MultiIndex):\n return x.groupby(level=1).rolling(d).apply(func)\n else:\n return x.rolling(d).apply(func)",
"def row_reduce(self):\n res = self.row_echelon()\n for i in range(1, res.m):\n for j in range(res.n):\n if res[i, j] == 1:\n for k in range(i):\n constant = res[k, j]\n res.data[k] = [elem_k - elem_i * constant\n for elem_i, elem_k in\n zip(res.data[i], res.data[k])]\n break\n return res",
"def cumsum(\n self,\n dim: Dims = None,\n *,\n skipna: bool | None = None,\n keep_attrs: bool | None = None,\n **kwargs: Any,\n ) -> DataArray:\n return self.reduce(\n duck_array_ops.cumsum,\n dim=dim,\n skipna=skipna,\n keep_attrs=keep_attrs,\n **kwargs,\n )",
"def cumsum(\n self,\n dim: Dims = None,\n *,\n skipna: bool | None = None,\n keep_attrs: bool | None = None,\n **kwargs: Any,\n ) -> DataArray:\n return self.reduce(\n duck_array_ops.cumsum,\n dim=dim,\n skipna=skipna,\n keep_attrs=keep_attrs,\n **kwargs,\n )",
"def cumsum(\n self,\n dim: Dims = None,\n *,\n skipna: bool | None = None,\n keep_attrs: bool | None = None,\n **kwargs: Any,\n ) -> DataArray:\n return self.reduce(\n duck_array_ops.cumsum,\n dim=dim,\n skipna=skipna,\n keep_attrs=keep_attrs,\n **kwargs,\n )",
"def safe_cumprod(x, eps):\n return torch.exp(exclusive_cumsum(torch.log(torch.clamp(x, min=eps, max=1.0))))"
] | [
"0.78994447",
"0.78994447",
"0.78994447",
"0.70834005",
"0.7032063",
"0.69815505",
"0.6964613",
"0.6532074",
"0.62200755",
"0.6205164",
"0.62014854",
"0.61457735",
"0.6142508",
"0.6083153",
"0.60304993",
"0.59804726",
"0.59804726",
"0.5861358",
"0.5861358",
"0.5861358",
"0.5843626",
"0.5843626",
"0.58435524",
"0.5799236",
"0.5583073",
"0.5568974",
"0.5559146",
"0.5559146",
"0.5559146",
"0.5542244"
] | 0.8054797 | 1 |
Reduce this Dataset's data by applying ``all`` along some dimension(s). | def all(
self,
dim: Dims = None,
*,
keep_attrs: bool | None = None,
**kwargs: Any,
) -> Dataset:
if (
flox_available
and OPTIONS["use_flox"]
and contains_only_chunked_or_numpy(self._obj)
):
return self._flox_reduce(
func="all",
dim=dim,
numeric_only=False,
# fill_value=fill_value,
keep_attrs=keep_attrs,
**kwargs,
)
else:
return self.reduce(
duck_array_ops.array_all,
dim=dim,
numeric_only=False,
keep_attrs=keep_attrs,
**kwargs,
) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def all(\n self,\n dim: Dims = None,\n *,\n keep_attrs: bool | None = None,\n **kwargs: Any,\n ) -> Dataset:\n return self.reduce(\n duck_array_ops.array_all,\n dim=dim,\n numeric_only=False,\n keep_attrs=keep_attrs,\n **kwargs,\n )",
"def all(\n self,\n dim: Dims = None,\n *,\n keep_attrs: bool | None = None,\n **kwargs: Any,\n ) -> DataArray:\n if (\n flox_available\n and OPTIONS[\"use_flox\"]\n and contains_only_chunked_or_numpy(self._obj)\n ):\n return self._flox_reduce(\n func=\"all\",\n dim=dim,\n # fill_value=fill_value,\n keep_attrs=keep_attrs,\n **kwargs,\n )\n else:\n return self.reduce(\n duck_array_ops.array_all,\n dim=dim,\n keep_attrs=keep_attrs,\n **kwargs,\n )",
"def all(\n self,\n dim: Dims = None,\n *,\n keep_attrs: bool | None = None,\n **kwargs: Any,\n ) -> DataArray:\n if (\n flox_available\n and OPTIONS[\"use_flox\"]\n and contains_only_chunked_or_numpy(self._obj)\n ):\n return self._flox_reduce(\n func=\"all\",\n dim=dim,\n # fill_value=fill_value,\n keep_attrs=keep_attrs,\n **kwargs,\n )\n else:\n return self.reduce(\n duck_array_ops.array_all,\n dim=dim,\n keep_attrs=keep_attrs,\n **kwargs,\n )",
"def all(self, axis=None, keepdims=False, out=None):\n return np.logical_and.reduce(self, out=out, axis=axis, keepdims=keepdims)",
"def all(\n self,\n dim: Dims = None,\n *,\n keep_attrs: bool | None = None,\n **kwargs: Any,\n ) -> DataArray:\n return self.reduce(\n duck_array_ops.array_all,\n dim=dim,\n keep_attrs=keep_attrs,\n **kwargs,\n )",
"def any(\n self,\n dim: Dims = None,\n *,\n keep_attrs: bool | None = None,\n **kwargs: Any,\n ) -> Dataset:\n if (\n flox_available\n and OPTIONS[\"use_flox\"]\n and contains_only_chunked_or_numpy(self._obj)\n ):\n return self._flox_reduce(\n func=\"any\",\n dim=dim,\n numeric_only=False,\n # fill_value=fill_value,\n keep_attrs=keep_attrs,\n **kwargs,\n )\n else:\n return self.reduce(\n duck_array_ops.array_any,\n dim=dim,\n numeric_only=False,\n keep_attrs=keep_attrs,\n **kwargs,\n )",
"def any(\n self,\n dim: Dims = None,\n *,\n keep_attrs: bool | None = None,\n **kwargs: Any,\n ) -> Dataset:\n if (\n flox_available\n and OPTIONS[\"use_flox\"]\n and contains_only_chunked_or_numpy(self._obj)\n ):\n return self._flox_reduce(\n func=\"any\",\n dim=dim,\n numeric_only=False,\n # fill_value=fill_value,\n keep_attrs=keep_attrs,\n **kwargs,\n )\n else:\n return self.reduce(\n duck_array_ops.array_any,\n dim=dim,\n numeric_only=False,\n keep_attrs=keep_attrs,\n **kwargs,\n )",
"def any(\n self,\n dim: Dims = None,\n *,\n keep_attrs: bool | None = None,\n **kwargs: Any,\n ) -> Dataset:\n return self.reduce(\n duck_array_ops.array_any,\n dim=dim,\n numeric_only=False,\n keep_attrs=keep_attrs,\n **kwargs,\n )",
"def any(\n self,\n dim: Dims = None,\n *,\n keep_attrs: bool | None = None,\n **kwargs: Any,\n ) -> DataArray:\n return self.reduce(\n duck_array_ops.array_any,\n dim=dim,\n keep_attrs=keep_attrs,\n **kwargs,\n )",
"def any(\n self,\n dim: Dims = None,\n *,\n keep_attrs: bool | None = None,\n **kwargs: Any,\n ) -> DataArray:\n if (\n flox_available\n and OPTIONS[\"use_flox\"]\n and contains_only_chunked_or_numpy(self._obj)\n ):\n return self._flox_reduce(\n func=\"any\",\n dim=dim,\n # fill_value=fill_value,\n keep_attrs=keep_attrs,\n **kwargs,\n )\n else:\n return self.reduce(\n duck_array_ops.array_any,\n dim=dim,\n keep_attrs=keep_attrs,\n **kwargs,\n )",
"def any(\n self,\n dim: Dims = None,\n *,\n keep_attrs: bool | None = None,\n **kwargs: Any,\n ) -> DataArray:\n if (\n flox_available\n and OPTIONS[\"use_flox\"]\n and contains_only_chunked_or_numpy(self._obj)\n ):\n return self._flox_reduce(\n func=\"any\",\n dim=dim,\n # fill_value=fill_value,\n keep_attrs=keep_attrs,\n **kwargs,\n )\n else:\n return self.reduce(\n duck_array_ops.array_any,\n dim=dim,\n keep_attrs=keep_attrs,\n **kwargs,\n )",
"def any(self, axis=None, keepdims=False, out=None):\n return np.logical_or.reduce(self, out=out, axis=axis, keepdims=keepdims)",
"def all(tensor):\n raise NotImplementedError",
"def all(self, *args, **kwargs):\n if self.fragmented:\n return (\n np.all(self[self._begin:].view(ndarray), *args, **kwargs) and\n np.all(self[:self._end].view(ndarray), *args, **kwargs)\n )\n else:\n if self._begin < self._end:\n part = self[self._begin:self._end]\n elif self._end == 0:\n part = self[self._begin:]\n\n return (np.all(part.view(ndarray), *args, **kwargs))",
"def check_if_reduce_needed(vars_to_modify):\n for var in vars_to_modify:\n if len(var.dimensions) > 2 and var[0,0,:].mask.all() and \\\n var[-1,1,:,:].mask.all():\n return True\n return False",
"def has_all_dims(dp_or_event, dims):\n return dims.items() <= {d.key: d.value for d in dp_or_event.dimensions}.items()",
"def has_datapoint_with_all_dims(fake_ingest, dims):\n for datapoint in fake_ingest.datapoints:\n if has_all_dims(datapoint, dims):\n return True\n return False",
"def indexed(cls, dataset, selection):\n selected = list(selection.keys())\n all_scalar = all((not isinstance(sel, (tuple, slice, set, list))\n and not callable(sel)) for sel in selection.values())\n all_kdims = all(d in selected for d in dataset.kdims)\n return all_scalar and all_kdims",
"def _reduce(self, values):\n\n if self.dim is None:\n return values\n\n if isinstance(self.dim, str):\n dim = [self.dim]\n else:\n dim = self.dim\n\n if self.reduce_func is None:\n for d in values.dims:\n if d not in dim:\n values = values.isel(**{d: 0})\n return values\n else:\n other_dims = [d for d in values.dims if d not in dim]\n return values.reduce(self.reduce_func, dim=other_dims)",
"def all(self, func=bool):\n return all(map(func, self._))",
"def check_all(X, x_lat_dim, x_lon_dim, x_sample_dim, x_feature_dim):\n\tcheck_dimensions(X, x_lat_dim, x_lon_dim, x_sample_dim, x_feature_dim)\n\tcheck_coords(X, x_lat_dim, x_lon_dim, x_sample_dim, x_feature_dim)\n\tcheck_consistent(X, x_lat_dim, x_lon_dim, x_sample_dim, x_feature_dim)\n\tcheck_type(X, x_lat_dim, x_lon_dim, x_sample_dim, x_feature_dim)\n\t#check_transposed(X, x_lat_dim, x_lon_dim, x_sample_dim, x_feature_dim)",
"def any(self):\n boolean = True\n if type(self.idxs) == np.ndarray:\n boolean = all(self.idxs.shape)\n elif type(self.idxs) == list:\n sh = np.array(self.idxs).shape\n if len(sh) >= 2:\n boolean = np.all(sh)\n return boolean",
"def all(self):\n for v in self.sects.values():\n if not np.all(v):\n return False\n if self.is_full():\n return True\n else:\n return np.all(self.defval)",
"def is_full_dimensional(self):\n\n return self.affine_dimension() == self.space_dimension()",
"def exclude_empty_feats(self):\n for dataset in self:\n dataset.dropna(axis=1, how=\"all\", inplace=True)",
"def _reduce(self, name, skipna=True, **kwargs):\n if name == \"any\" and pa.types.is_boolean(self.dtype.arrow_dtype):\n return any_op(self.data, skipna=skipna)\n elif name == \"all\" and pa.types.is_boolean(self.dtype.arrow_dtype):\n return all_op(self.data, skipna=skipna)\n\n raise TypeError(\n \"cannot perform {name} with type {dtype}\".format(\n name=name, dtype=self.dtype\n )\n )",
"def all(c):\n states(c)\n etc(c)\n prune(c)",
"def all(self, boolean_only=None):\n # skipna == True\n return self._lift(\"all\")",
"def any(tensor, axis=None, keepdims=False, **kwargs):\n return tensor.any(axis=axis, keepdims=keepdims, **kwargs)",
"def all(x) -> bool:\n pass"
] | [
"0.69636464",
"0.69317317",
"0.69317317",
"0.68936807",
"0.6847603",
"0.65551496",
"0.65551496",
"0.6485944",
"0.64324665",
"0.64027697",
"0.64027697",
"0.6244525",
"0.62312984",
"0.6125453",
"0.60393447",
"0.59572035",
"0.5945839",
"0.58689076",
"0.5696258",
"0.5634013",
"0.5615411",
"0.5598613",
"0.55357015",
"0.5509906",
"0.5494616",
"0.5482695",
"0.5458789",
"0.544619",
"0.5435216",
"0.5425543"
] | 0.7058243 | 0 |
Reduce this Dataset's data by applying ``any`` along some dimension(s). | def any(
self,
dim: Dims = None,
*,
keep_attrs: bool | None = None,
**kwargs: Any,
) -> Dataset:
if (
flox_available
and OPTIONS["use_flox"]
and contains_only_chunked_or_numpy(self._obj)
):
return self._flox_reduce(
func="any",
dim=dim,
numeric_only=False,
# fill_value=fill_value,
keep_attrs=keep_attrs,
**kwargs,
)
else:
return self.reduce(
duck_array_ops.array_any,
dim=dim,
numeric_only=False,
keep_attrs=keep_attrs,
**kwargs,
) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def any(\n self,\n dim: Dims = None,\n *,\n keep_attrs: bool | None = None,\n **kwargs: Any,\n ) -> Dataset:\n return self.reduce(\n duck_array_ops.array_any,\n dim=dim,\n numeric_only=False,\n keep_attrs=keep_attrs,\n **kwargs,\n )",
"def any(self, axis=None, keepdims=False, out=None):\n return np.logical_or.reduce(self, out=out, axis=axis, keepdims=keepdims)",
"def any(\n self,\n dim: Dims = None,\n *,\n keep_attrs: bool | None = None,\n **kwargs: Any,\n ) -> DataArray:\n if (\n flox_available\n and OPTIONS[\"use_flox\"]\n and contains_only_chunked_or_numpy(self._obj)\n ):\n return self._flox_reduce(\n func=\"any\",\n dim=dim,\n # fill_value=fill_value,\n keep_attrs=keep_attrs,\n **kwargs,\n )\n else:\n return self.reduce(\n duck_array_ops.array_any,\n dim=dim,\n keep_attrs=keep_attrs,\n **kwargs,\n )",
"def any(\n self,\n dim: Dims = None,\n *,\n keep_attrs: bool | None = None,\n **kwargs: Any,\n ) -> DataArray:\n if (\n flox_available\n and OPTIONS[\"use_flox\"]\n and contains_only_chunked_or_numpy(self._obj)\n ):\n return self._flox_reduce(\n func=\"any\",\n dim=dim,\n # fill_value=fill_value,\n keep_attrs=keep_attrs,\n **kwargs,\n )\n else:\n return self.reduce(\n duck_array_ops.array_any,\n dim=dim,\n keep_attrs=keep_attrs,\n **kwargs,\n )",
"def any(\n self,\n dim: Dims = None,\n *,\n keep_attrs: bool | None = None,\n **kwargs: Any,\n ) -> DataArray:\n return self.reduce(\n duck_array_ops.array_any,\n dim=dim,\n keep_attrs=keep_attrs,\n **kwargs,\n )",
"def any(tensor, axis=None, keepdims=False, **kwargs):\n return tensor.any(axis=axis, keepdims=keepdims, **kwargs)",
"def check_if_reduce_needed(vars_to_modify):\n for var in vars_to_modify:\n if len(var.dimensions) > 2 and var[0,0,:].mask.all() and \\\n var[-1,1,:,:].mask.all():\n return True\n return False",
"def all(self, axis=None, keepdims=False, out=None):\n return np.logical_and.reduce(self, out=out, axis=axis, keepdims=keepdims)",
"def any(self):\n boolean = True\n if type(self.idxs) == np.ndarray:\n boolean = all(self.idxs.shape)\n elif type(self.idxs) == list:\n sh = np.array(self.idxs).shape\n if len(sh) >= 2:\n boolean = np.all(sh)\n return boolean",
"def any(self):\n return self._summarize(lambda c: c.any)",
"def matrix_any(condition):\n return np.sum(np.sum(condition)) > 0",
"def has_all_dims(dp_or_event, dims):\n return dims.items() <= {d.key: d.value for d in dp_or_event.dimensions}.items()",
"def isAny(self,test):\n for x in np.nditer(self.t, op_flags=['readonly']):\n if op(x):\n return True\n return False",
"def has_datapoint_with_all_dims(fake_ingest, dims):\n for datapoint in fake_ingest.datapoints:\n if has_all_dims(datapoint, dims):\n return True\n return False",
"def any(self, *args, **kwargs):\n if self.fragmented:\n return (\n np.any(self[self._begin:].view(ndarray), *args, **kwargs) and\n np.any(self[:self._end].view(ndarray), *args, **kwargs)\n )\n else:\n if self._begin < self._end:\n part = self[self._begin:self._end]\n elif self._end == 0:\n part = self[self._begin:]\n\n return (np.any(part.view(ndarray), *args, **kwargs))",
"def is_full_dimensional(self):\n\n return self.affine_dimension() == self.space_dimension()",
"def any(self, name, codes):\n if self.is_array(name):\n logics = []\n for s in self.sources(name):\n logics.append({s: has_any(codes)})\n slicer = self.take(union(logics))\n else:\n slicer = self.take({name: has_any(codes)})\n return slicer",
"def all(\n self,\n dim: Dims = None,\n *,\n keep_attrs: bool | None = None,\n **kwargs: Any,\n ) -> Dataset:\n if (\n flox_available\n and OPTIONS[\"use_flox\"]\n and contains_only_chunked_or_numpy(self._obj)\n ):\n return self._flox_reduce(\n func=\"all\",\n dim=dim,\n numeric_only=False,\n # fill_value=fill_value,\n keep_attrs=keep_attrs,\n **kwargs,\n )\n else:\n return self.reduce(\n duck_array_ops.array_all,\n dim=dim,\n numeric_only=False,\n keep_attrs=keep_attrs,\n **kwargs,\n )",
"def all(\n self,\n dim: Dims = None,\n *,\n keep_attrs: bool | None = None,\n **kwargs: Any,\n ) -> Dataset:\n if (\n flox_available\n and OPTIONS[\"use_flox\"]\n and contains_only_chunked_or_numpy(self._obj)\n ):\n return self._flox_reduce(\n func=\"all\",\n dim=dim,\n numeric_only=False,\n # fill_value=fill_value,\n keep_attrs=keep_attrs,\n **kwargs,\n )\n else:\n return self.reduce(\n duck_array_ops.array_all,\n dim=dim,\n numeric_only=False,\n keep_attrs=keep_attrs,\n **kwargs,\n )",
"def all(\n self,\n dim: Dims = None,\n *,\n keep_attrs: bool | None = None,\n **kwargs: Any,\n ) -> Dataset:\n return self.reduce(\n duck_array_ops.array_all,\n dim=dim,\n numeric_only=False,\n keep_attrs=keep_attrs,\n **kwargs,\n )",
"def indexed(cls, dataset, selection):\n selected = list(selection.keys())\n all_scalar = all((not isinstance(sel, (tuple, slice, set, list))\n and not callable(sel)) for sel in selection.values())\n all_kdims = all(d in selected for d in dataset.kdims)\n return all_scalar and all_kdims",
"def obs_with_data(x):\n num_toks = np.sum(x,axis=1)\n has_data = num_toks > 0\n return has_data",
"def all(\n self,\n dim: Dims = None,\n *,\n keep_attrs: bool | None = None,\n **kwargs: Any,\n ) -> DataArray:\n if (\n flox_available\n and OPTIONS[\"use_flox\"]\n and contains_only_chunked_or_numpy(self._obj)\n ):\n return self._flox_reduce(\n func=\"all\",\n dim=dim,\n # fill_value=fill_value,\n keep_attrs=keep_attrs,\n **kwargs,\n )\n else:\n return self.reduce(\n duck_array_ops.array_all,\n dim=dim,\n keep_attrs=keep_attrs,\n **kwargs,\n )",
"def all(\n self,\n dim: Dims = None,\n *,\n keep_attrs: bool | None = None,\n **kwargs: Any,\n ) -> DataArray:\n if (\n flox_available\n and OPTIONS[\"use_flox\"]\n and contains_only_chunked_or_numpy(self._obj)\n ):\n return self._flox_reduce(\n func=\"all\",\n dim=dim,\n # fill_value=fill_value,\n keep_attrs=keep_attrs,\n **kwargs,\n )\n else:\n return self.reduce(\n duck_array_ops.array_all,\n dim=dim,\n keep_attrs=keep_attrs,\n **kwargs,\n )",
"def filter(self):\n self.data = self.data.loc[~self.data.isnull().any(1),:]",
"def _reduce(self, values):\n\n if self.dim is None:\n return values\n\n if isinstance(self.dim, str):\n dim = [self.dim]\n else:\n dim = self.dim\n\n if self.reduce_func is None:\n for d in values.dims:\n if d not in dim:\n values = values.isel(**{d: 0})\n return values\n else:\n other_dims = [d for d in values.dims if d not in dim]\n return values.reduce(self.reduce_func, dim=other_dims)",
"def any(self, values):\n return self.aggregate(values, \"any\")",
"def any(self):\n return self._reduce_for_stat_function(\n lambda col: F.max(F.coalesce(col.cast('boolean'), F.lit(False))),\n only_numeric=False)",
"def all(\n self,\n dim: Dims = None,\n *,\n keep_attrs: bool | None = None,\n **kwargs: Any,\n ) -> DataArray:\n return self.reduce(\n duck_array_ops.array_all,\n dim=dim,\n keep_attrs=keep_attrs,\n **kwargs,\n )",
"def has_any(self) -> bool:\n return any(\n BlockAccessor.for_block(b).num_rows() > 0 for b in self._buffer)"
] | [
"0.7112423",
"0.70864093",
"0.7033621",
"0.7033621",
"0.7024333",
"0.6406168",
"0.63930935",
"0.6261834",
"0.61942494",
"0.6075047",
"0.59765315",
"0.5969457",
"0.5967703",
"0.59609085",
"0.5858443",
"0.5745467",
"0.57042044",
"0.56952304",
"0.56952304",
"0.5676726",
"0.56422615",
"0.5582042",
"0.5552464",
"0.5552464",
"0.5532824",
"0.5478939",
"0.54758924",
"0.5461427",
"0.54549146",
"0.5453845"
] | 0.7163614 | 0 |
Reduce this Dataset's data by applying ``cumsum`` along some dimension(s). | def cumsum(
self,
dim: Dims = None,
*,
skipna: bool | None = None,
keep_attrs: bool | None = None,
**kwargs: Any,
) -> Dataset:
return self.reduce(
duck_array_ops.cumsum,
dim=dim,
skipna=skipna,
numeric_only=True,
keep_attrs=keep_attrs,
**kwargs,
) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def cumsum(\n self,\n dim: Dims = None,\n *,\n skipna: bool | None = None,\n keep_attrs: bool | None = None,\n **kwargs: Any,\n ) -> DataArray:\n return self.reduce(\n duck_array_ops.cumsum,\n dim=dim,\n skipna=skipna,\n keep_attrs=keep_attrs,\n **kwargs,\n )",
"def cumsum(\n self,\n dim: Dims = None,\n *,\n skipna: bool | None = None,\n keep_attrs: bool | None = None,\n **kwargs: Any,\n ) -> DataArray:\n return self.reduce(\n duck_array_ops.cumsum,\n dim=dim,\n skipna=skipna,\n keep_attrs=keep_attrs,\n **kwargs,\n )",
"def cumsum(\n self,\n dim: Dims = None,\n *,\n skipna: bool | None = None,\n keep_attrs: bool | None = None,\n **kwargs: Any,\n ) -> DataArray:\n return self.reduce(\n duck_array_ops.cumsum,\n dim=dim,\n skipna=skipna,\n keep_attrs=keep_attrs,\n **kwargs,\n )",
"def cumsum(x, axis=None):\r\n return CumsumOp(axis=axis)(x)",
"def _cumsum(self) -> np.ndarray:\n\n if not hasattr(self, \"__cumsum\"):\n self.__cumsum = np.insert(np.cumsum(self.sizes), 0, 0)\n return self.__cumsum",
"def cumsum(tensor, axis=None):\n raise NotImplementedError",
"def multidim_cumsum(a):\n out = a.cumsum(-1)\n for i in range(2, a.ndim+1):\n np.cumsum(out, axis=-i, out=out)\n return out",
"def _strict_1d_cumsum(tensor, len_tensor):\n # Assumes tensor shape is fully defined.\n with ops.name_scope('strict_1d_cumsum', values=[tensor]):\n if len_tensor == 0:\n return constant_op.constant([])\n len_pad = len_tensor - 1\n x = array_ops.pad(tensor, [[len_pad, 0]])\n h = array_ops.ones_like(x)\n return _strict_conv1d(x, h)[:len_tensor]",
"def cumsum(self):\n return self._lift(lambda c: c.cumsum)",
"def _cumsum_einsum(x, precision=jax.lax.Precision.DEFAULT):\n mask = jnp.triu(jnp.ones(x.shape, dtype=jnp.bool_))\n return jnp.einsum(\"ij,jk\", x, mask, precision=precision)",
"def cumprod(\n self,\n dim: Dims = None,\n *,\n skipna: bool | None = None,\n keep_attrs: bool | None = None,\n **kwargs: Any,\n ) -> Dataset:\n return self.reduce(\n duck_array_ops.cumprod,\n dim=dim,\n skipna=skipna,\n numeric_only=True,\n keep_attrs=keep_attrs,\n **kwargs,\n )",
"def cumprod(\n self,\n dim: Dims = None,\n *,\n skipna: bool | None = None,\n keep_attrs: bool | None = None,\n **kwargs: Any,\n ) -> Dataset:\n return self.reduce(\n duck_array_ops.cumprod,\n dim=dim,\n skipna=skipna,\n numeric_only=True,\n keep_attrs=keep_attrs,\n **kwargs,\n )",
"def cumprod(\n self,\n dim: Dims = None,\n *,\n skipna: bool | None = None,\n keep_attrs: bool | None = None,\n **kwargs: Any,\n ) -> Dataset:\n return self.reduce(\n duck_array_ops.cumprod,\n dim=dim,\n skipna=skipna,\n numeric_only=True,\n keep_attrs=keep_attrs,\n **kwargs,\n )",
"def cumsum(module, y, dimension):\n _import_modules()\n if module in [np, ma, torch, jnp]:\n return module.cumsum(y, axis=dimension)\n elif module == tf:\n return tf.math.cumsum(y, dimension)",
"def exclusive_cumsum(x):\n return torch.cumsum(torch.cat([x.new_zeros(x.size(0), x.size(1), x.size(2), 1), x[:, :, :, :-1]], dim=-1), dim=-1)",
"def _reduce(self, values):\n\n if self.dim is None:\n return values\n\n if isinstance(self.dim, str):\n dim = [self.dim]\n else:\n dim = self.dim\n\n if self.reduce_func is None:\n for d in values.dims:\n if d not in dim:\n values = values.isel(**{d: 0})\n return values\n else:\n other_dims = [d for d in values.dims if d not in dim]\n return values.reduce(self.reduce_func, dim=other_dims)",
"def cumprod(\n self,\n dim: Dims = None,\n *,\n skipna: bool | None = None,\n keep_attrs: bool | None = None,\n **kwargs: Any,\n ) -> DataArray:\n return self.reduce(\n duck_array_ops.cumprod,\n dim=dim,\n skipna=skipna,\n keep_attrs=keep_attrs,\n **kwargs,\n )",
"def cumprod(\n self,\n dim: Dims = None,\n *,\n skipna: bool | None = None,\n keep_attrs: bool | None = None,\n **kwargs: Any,\n ) -> DataArray:\n return self.reduce(\n duck_array_ops.cumprod,\n dim=dim,\n skipna=skipna,\n keep_attrs=keep_attrs,\n **kwargs,\n )",
"def cumprod(\n self,\n dim: Dims = None,\n *,\n skipna: bool | None = None,\n keep_attrs: bool | None = None,\n **kwargs: Any,\n ) -> DataArray:\n return self.reduce(\n duck_array_ops.cumprod,\n dim=dim,\n skipna=skipna,\n keep_attrs=keep_attrs,\n **kwargs,\n )",
"def sum(\n self,\n dim: Dims = None,\n *,\n skipna: bool | None = None,\n min_count: int | None = None,\n keep_attrs: bool | None = None,\n **kwargs: Any,\n ) -> Dataset:\n return self.reduce(\n duck_array_ops.sum,\n dim=dim,\n skipna=skipna,\n min_count=min_count,\n numeric_only=True,\n keep_attrs=keep_attrs,\n **kwargs,\n )",
"def cum_sum(self):\n\n # create cdo command and runit\n cdo_command = \"cdo -timcumsum\"\n run_this(cdo_command, self, output=\"ensemble\")",
"def sum(\n self,\n dim: Dims = None,\n *,\n skipna: bool | None = None,\n min_count: int | None = None,\n keep_attrs: bool | None = None,\n **kwargs: Any,\n ) -> DataArray:\n return self.reduce(\n duck_array_ops.sum,\n dim=dim,\n skipna=skipna,\n min_count=min_count,\n keep_attrs=keep_attrs,\n **kwargs,\n )",
"def cumsumr(x, axis=0):\n cums = x.cumsum(axis=axis)\n return cums - np.maximum.accumulate(cums*(x == 0), axis=axis)",
"def normalize_cumsum(self, x):\n\n x = self.normalize_global(x)\n\n if self.right_context is None and self.left_context is None:\n return x\n\n if self.left_context is None:\n left_context = x.shape[0]\n else:\n left_context = self.left_context\n\n if self.right_context is None:\n right_context = x.shape[0]\n else:\n right_context = self.right_context\n\n total_context = left_context + right_context + 1\n\n if x.shape[0] <= min(right_context, left_context)+1:\n # if context is larger than the signal we still return global normalization\n return x\n\n c_x = np.zeros((x.shape[0]+total_context, x.shape[1],), dtype=float_cpu())\n counts = np.zeros((x.shape[0]+total_context, 1,), dtype=float_cpu())\n \n c_x[left_context+1:left_context+x.shape[0]+1] = np.cumsum(x, axis=0)\n c_x[left_context+x.shape[0]+1:] = c_x[left_context+x.shape[0]]\n counts[left_context+1:left_context+x.shape[0]+1] = np.arange(1, x.shape[0]+1, dtype=float_cpu())[:,None]\n counts[left_context+x.shape[0]+1:] = x.shape[0]\n\n if self.norm_var:\n c2_x = np.zeros((x.shape[0]+total_context, x.shape[1],), dtype=float_cpu())\n c2_x[left_context+1:left_context+x.shape[0]+1] = np.cumsum(x*x, axis=0)\n c2_x[left_context+x.shape[0]+1:] = c2_x[left_context+x.shape[0]]\n\n counts = counts[total_context:] - counts[:-total_context]\n m_x = (c_x[total_context:] - c_x[:-total_context])/counts\n\n if self.norm_mean:\n x -= m_x\n\n if self.norm_var:\n m2_x = (c2_x[total_context:] - c2_x[:-total_context])/counts\n s2_x=m2_x - m_x**2\n s2_x[s2_x<1e-5]=1e-5\n s_x = np.sqrt(s2_x)\n x /= s_x\n\n return x",
"def sum(\n self,\n dim: Dims = None,\n *,\n skipna: bool | None = None,\n min_count: int | None = None,\n keep_attrs: bool | None = None,\n **kwargs: Any,\n ) -> Dataset:\n if (\n flox_available\n and OPTIONS[\"use_flox\"]\n and contains_only_chunked_or_numpy(self._obj)\n ):\n return self._flox_reduce(\n func=\"sum\",\n dim=dim,\n skipna=skipna,\n min_count=min_count,\n numeric_only=True,\n # fill_value=fill_value,\n keep_attrs=keep_attrs,\n **kwargs,\n )\n else:\n return self.reduce(\n duck_array_ops.sum,\n dim=dim,\n skipna=skipna,\n min_count=min_count,\n numeric_only=True,\n keep_attrs=keep_attrs,\n **kwargs,\n )",
"def sum(\n self,\n dim: Dims = None,\n *,\n skipna: bool | None = None,\n min_count: int | None = None,\n keep_attrs: bool | None = None,\n **kwargs: Any,\n ) -> Dataset:\n if (\n flox_available\n and OPTIONS[\"use_flox\"]\n and contains_only_chunked_or_numpy(self._obj)\n ):\n return self._flox_reduce(\n func=\"sum\",\n dim=dim,\n skipna=skipna,\n min_count=min_count,\n numeric_only=True,\n # fill_value=fill_value,\n keep_attrs=keep_attrs,\n **kwargs,\n )\n else:\n return self.reduce(\n duck_array_ops.sum,\n dim=dim,\n skipna=skipna,\n min_count=min_count,\n numeric_only=True,\n keep_attrs=keep_attrs,\n **kwargs,\n )",
"def sum(self, axis: int = 0):\r\n self.values = self.values.sum(axis=axis)\r\n self.layers = [None]\r\n return self.copy()",
"def local_sum(a,tshape, padval):\n\n # zero-padding\n a = ndpad(a,tshape, padval)\n\n # difference between shifted copies of an array along a given dimension\n def shiftdiff(a,tshape,shiftdim):\n ind1 = [slice(None,None),]*a.ndim\n ind2 = [slice(None,None),]*a.ndim\n ind1[shiftdim] = slice(tshape[shiftdim],a.shape[shiftdim]-1)\n ind2[shiftdim] = slice(0,a.shape[shiftdim]-tshape[shiftdim]-1)\n return a[ind1] - a[ind2]\n\n # take the cumsum along each dimension and subtracting a shifted version\n # from itself. this reduces the number of computations to 2*N additions\n # and 2*N subtractions for an N-dimensional array, independent of its\n # size.\n #\n # See:\n # <http://www.idiom.com/~zilla/Papers/nvisionInterface/nip.html>\n for dd in xrange(a.ndim):\n a = np.cumsum(a,dd)\n a = shiftdiff(a,tshape,dd)\n return a",
"def sum(self, axis=None, keepdims=False, dtype=None, out=None):\n return np.add.reduce(self, out=out, axis=axis, keepdims=keepdims, dtype=dtype)",
"def sum(\n self,\n dim: Dims = None,\n *,\n skipna: bool | None = None,\n min_count: int | None = None,\n keep_attrs: bool | None = None,\n **kwargs: Any,\n ) -> DataArray:\n if (\n flox_available\n and OPTIONS[\"use_flox\"]\n and contains_only_chunked_or_numpy(self._obj)\n ):\n return self._flox_reduce(\n func=\"sum\",\n dim=dim,\n skipna=skipna,\n min_count=min_count,\n # fill_value=fill_value,\n keep_attrs=keep_attrs,\n **kwargs,\n )\n else:\n return self.reduce(\n duck_array_ops.sum,\n dim=dim,\n skipna=skipna,\n min_count=min_count,\n keep_attrs=keep_attrs,\n **kwargs,\n )"
] | [
"0.7537239",
"0.7537239",
"0.7537239",
"0.67484677",
"0.67069674",
"0.66436154",
"0.63819385",
"0.6350921",
"0.6343282",
"0.62725073",
"0.6268484",
"0.6268484",
"0.6268484",
"0.6262665",
"0.6262446",
"0.62461925",
"0.61838704",
"0.61838704",
"0.61838704",
"0.61028945",
"0.6068384",
"0.60468733",
"0.60332865",
"0.58722097",
"0.5854521",
"0.5854521",
"0.58188057",
"0.5803276",
"0.5739192",
"0.57293564"
] | 0.769945 | 0 |
Reduce this Dataset's data by applying ``cumprod`` along some dimension(s). | def cumprod(
self,
dim: Dims = None,
*,
skipna: bool | None = None,
keep_attrs: bool | None = None,
**kwargs: Any,
) -> Dataset:
return self.reduce(
duck_array_ops.cumprod,
dim=dim,
skipna=skipna,
numeric_only=True,
keep_attrs=keep_attrs,
**kwargs,
) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def cumprod(\n self,\n dim: Dims = None,\n *,\n skipna: bool | None = None,\n keep_attrs: bool | None = None,\n **kwargs: Any,\n ) -> DataArray:\n return self.reduce(\n duck_array_ops.cumprod,\n dim=dim,\n skipna=skipna,\n keep_attrs=keep_attrs,\n **kwargs,\n )",
"def cumprod(\n self,\n dim: Dims = None,\n *,\n skipna: bool | None = None,\n keep_attrs: bool | None = None,\n **kwargs: Any,\n ) -> DataArray:\n return self.reduce(\n duck_array_ops.cumprod,\n dim=dim,\n skipna=skipna,\n keep_attrs=keep_attrs,\n **kwargs,\n )",
"def cumprod(\n self,\n dim: Dims = None,\n *,\n skipna: bool | None = None,\n keep_attrs: bool | None = None,\n **kwargs: Any,\n ) -> DataArray:\n return self.reduce(\n duck_array_ops.cumprod,\n dim=dim,\n skipna=skipna,\n keep_attrs=keep_attrs,\n **kwargs,\n )",
"def cumprod(self):\n return self._lift(lambda c: c.cumprod)",
"def cumprod(x, axis=None):\r\n return CumprodOp(axis=axis)(x)",
"def cumprod(x, dim=-1, exclusive=False):\n if exclusive:\n length = x.size(dim)\n x = torch.narrow(F.pad(x, pad=(1, 0, 0, 0), value=1.0), dim, 0, length)\n return torch.cumprod(x, dim=dim)",
"def cumprod(a, axis=None, dtype=None, out=None):\n a = astensor(a)\n if dtype is None:\n dtype = np.empty((1,), dtype=a.dtype).cumprod().dtype\n op = TensorCumprod(axis=axis, dtype=dtype)\n return op(a, out=out)",
"def exclusive_cumprod(x):\n return torch.cumprod(torch.cat([x.new_ones(x.size(0), x.size(1), x.size(2), 1), x[:, :, :, :-1]], dim=-1), dim=-1)",
"def cumprod_nb(a):\n out = np.empty_like(a, dtype=np.float_)\n for col in range(a.shape[1]):\n out[:, col] = cumprod_1d_nb(a[:, col])\n return out",
"def prod(\n self,\n dim: Dims = None,\n *,\n skipna: bool | None = None,\n min_count: int | None = None,\n keep_attrs: bool | None = None,\n **kwargs: Any,\n ) -> Dataset:\n return self.reduce(\n duck_array_ops.prod,\n dim=dim,\n skipna=skipna,\n min_count=min_count,\n numeric_only=True,\n keep_attrs=keep_attrs,\n **kwargs,\n )",
"def _reduce(self, values):\n\n if self.dim is None:\n return values\n\n if isinstance(self.dim, str):\n dim = [self.dim]\n else:\n dim = self.dim\n\n if self.reduce_func is None:\n for d in values.dims:\n if d not in dim:\n values = values.isel(**{d: 0})\n return values\n else:\n other_dims = [d for d in values.dims if d not in dim]\n return values.reduce(self.reduce_func, dim=other_dims)",
"def exclusive_cumprod(x):\n batch_size, sequence_length = x.size()\n if torch.cuda.is_available():\n one_x = torch.cat([torch.ones(batch_size, 1).cuda(), x], dim=1)[:, :-1]\n else:\n one_x = torch.cat([torch.ones(batch_size, 1), x], dim=1)[:, :-1]\n return torch.cumprod(one_x, dim=1)",
"def cumprod(self: FrameLike, skipna: bool = True) -> FrameLike:\n return self._apply_series_op(lambda psser: psser._cumprod(skipna), should_resolve=True)",
"def prod(\n self,\n dim: Dims = None,\n *,\n skipna: bool | None = None,\n min_count: int | None = None,\n keep_attrs: bool | None = None,\n **kwargs: Any,\n ) -> DataArray:\n return self.reduce(\n duck_array_ops.prod,\n dim=dim,\n skipna=skipna,\n min_count=min_count,\n keep_attrs=keep_attrs,\n **kwargs,\n )",
"def prod(self, axis=None, keepdims=False, dtype=None, out=None):\n return np.multiply.reduce(\n self, out=out, axis=axis, keepdims=keepdims, dtype=dtype\n )",
"def prod(\n self,\n dim: Dims = None,\n *,\n skipna: bool | None = None,\n min_count: int | None = None,\n keep_attrs: bool | None = None,\n **kwargs: Any,\n ) -> Dataset:\n if (\n flox_available\n and OPTIONS[\"use_flox\"]\n and contains_only_chunked_or_numpy(self._obj)\n ):\n return self._flox_reduce(\n func=\"prod\",\n dim=dim,\n skipna=skipna,\n min_count=min_count,\n numeric_only=True,\n # fill_value=fill_value,\n keep_attrs=keep_attrs,\n **kwargs,\n )\n else:\n return self.reduce(\n duck_array_ops.prod,\n dim=dim,\n skipna=skipna,\n min_count=min_count,\n numeric_only=True,\n keep_attrs=keep_attrs,\n **kwargs,\n )",
"def prod(\n self,\n dim: Dims = None,\n *,\n skipna: bool | None = None,\n min_count: int | None = None,\n keep_attrs: bool | None = None,\n **kwargs: Any,\n ) -> Dataset:\n if (\n flox_available\n and OPTIONS[\"use_flox\"]\n and contains_only_chunked_or_numpy(self._obj)\n ):\n return self._flox_reduce(\n func=\"prod\",\n dim=dim,\n skipna=skipna,\n min_count=min_count,\n numeric_only=True,\n # fill_value=fill_value,\n keep_attrs=keep_attrs,\n **kwargs,\n )\n else:\n return self.reduce(\n duck_array_ops.prod,\n dim=dim,\n skipna=skipna,\n min_count=min_count,\n numeric_only=True,\n keep_attrs=keep_attrs,\n **kwargs,\n )",
"def cumsum(\n self,\n dim: Dims = None,\n *,\n skipna: bool | None = None,\n keep_attrs: bool | None = None,\n **kwargs: Any,\n ) -> Dataset:\n return self.reduce(\n duck_array_ops.cumsum,\n dim=dim,\n skipna=skipna,\n numeric_only=True,\n keep_attrs=keep_attrs,\n **kwargs,\n )",
"def cumsum(\n self,\n dim: Dims = None,\n *,\n skipna: bool | None = None,\n keep_attrs: bool | None = None,\n **kwargs: Any,\n ) -> Dataset:\n return self.reduce(\n duck_array_ops.cumsum,\n dim=dim,\n skipna=skipna,\n numeric_only=True,\n keep_attrs=keep_attrs,\n **kwargs,\n )",
"def cumsum(\n self,\n dim: Dims = None,\n *,\n skipna: bool | None = None,\n keep_attrs: bool | None = None,\n **kwargs: Any,\n ) -> Dataset:\n return self.reduce(\n duck_array_ops.cumsum,\n dim=dim,\n skipna=skipna,\n numeric_only=True,\n keep_attrs=keep_attrs,\n **kwargs,\n )",
"def prod(\n self,\n dim: Dims = None,\n *,\n skipna: bool | None = None,\n min_count: int | None = None,\n keep_attrs: bool | None = None,\n **kwargs: Any,\n ) -> DataArray:\n if (\n flox_available\n and OPTIONS[\"use_flox\"]\n and contains_only_chunked_or_numpy(self._obj)\n ):\n return self._flox_reduce(\n func=\"prod\",\n dim=dim,\n skipna=skipna,\n min_count=min_count,\n # fill_value=fill_value,\n keep_attrs=keep_attrs,\n **kwargs,\n )\n else:\n return self.reduce(\n duck_array_ops.prod,\n dim=dim,\n skipna=skipna,\n min_count=min_count,\n keep_attrs=keep_attrs,\n **kwargs,\n )",
"def prod(\n self,\n dim: Dims = None,\n *,\n skipna: bool | None = None,\n min_count: int | None = None,\n keep_attrs: bool | None = None,\n **kwargs: Any,\n ) -> DataArray:\n if (\n flox_available\n and OPTIONS[\"use_flox\"]\n and contains_only_chunked_or_numpy(self._obj)\n ):\n return self._flox_reduce(\n func=\"prod\",\n dim=dim,\n skipna=skipna,\n min_count=min_count,\n # fill_value=fill_value,\n keep_attrs=keep_attrs,\n **kwargs,\n )\n else:\n return self.reduce(\n duck_array_ops.prod,\n dim=dim,\n skipna=skipna,\n min_count=min_count,\n keep_attrs=keep_attrs,\n **kwargs,\n )",
"def cumprod_1d_nb(a):\n out = np.empty_like(a, dtype=np.float_)\n cumprod = 1\n for i in range(a.shape[0]):\n if ~np.isnan(a[i]):\n cumprod *= a[i]\n out[i] = cumprod\n else:\n out[i] = np.nan\n return out",
"def prod(input, axis=None, dtype=None, keepdims=False, acc_dtype=None,\r\n no_zeros_in_input=False):\r\n\r\n out = elemwise.Prod(axis, dtype=dtype, acc_dtype=acc_dtype,\r\n no_zeros_in_input=no_zeros_in_input)(input)\r\n\r\n if keepdims:\r\n out = makeKeepDims(input, out, axis)\r\n return out",
"def product(x: pd.Series, d: int or float) -> pd.Series:\n if isinstance(d, float):\n d = math.floor(d)\n\n def func(x):\n return np.nancumprod(x)[-1]\n\n if isinstance(x.index, pd.MultiIndex):\n return x.groupby(level=1).rolling(d).apply(func)\n else:\n return x.rolling(d).apply(func)",
"def row_reduce(self):\n res = self.row_echelon()\n for i in range(1, res.m):\n for j in range(res.n):\n if res[i, j] == 1:\n for k in range(i):\n constant = res[k, j]\n res.data[k] = [elem_k - elem_i * constant\n for elem_i, elem_k in\n zip(res.data[i], res.data[k])]\n break\n return res",
"def cumsum(\n self,\n dim: Dims = None,\n *,\n skipna: bool | None = None,\n keep_attrs: bool | None = None,\n **kwargs: Any,\n ) -> DataArray:\n return self.reduce(\n duck_array_ops.cumsum,\n dim=dim,\n skipna=skipna,\n keep_attrs=keep_attrs,\n **kwargs,\n )",
"def cumsum(\n self,\n dim: Dims = None,\n *,\n skipna: bool | None = None,\n keep_attrs: bool | None = None,\n **kwargs: Any,\n ) -> DataArray:\n return self.reduce(\n duck_array_ops.cumsum,\n dim=dim,\n skipna=skipna,\n keep_attrs=keep_attrs,\n **kwargs,\n )",
"def cumsum(\n self,\n dim: Dims = None,\n *,\n skipna: bool | None = None,\n keep_attrs: bool | None = None,\n **kwargs: Any,\n ) -> DataArray:\n return self.reduce(\n duck_array_ops.cumsum,\n dim=dim,\n skipna=skipna,\n keep_attrs=keep_attrs,\n **kwargs,\n )",
"def dim_reduce(means, weights, d):\n return dim_reduce_data(means, d)"
] | [
"0.7896212",
"0.7896212",
"0.7896212",
"0.70786357",
"0.7026533",
"0.6977891",
"0.6960451",
"0.6529018",
"0.62155104",
"0.620573",
"0.6204624",
"0.61433333",
"0.61382365",
"0.6083183",
"0.6030813",
"0.59799564",
"0.59799564",
"0.5859764",
"0.5859764",
"0.5859764",
"0.5842625",
"0.5842625",
"0.5839375",
"0.57987845",
"0.55779845",
"0.55708915",
"0.555723",
"0.555723",
"0.555723",
"0.5544854"
] | 0.8052243 | 0 |
Reduce this DataArray's data by applying ``all`` along some dimension(s). | def all(
self,
dim: Dims = None,
*,
keep_attrs: bool | None = None,
**kwargs: Any,
) -> DataArray:
if (
flox_available
and OPTIONS["use_flox"]
and contains_only_chunked_or_numpy(self._obj)
):
return self._flox_reduce(
func="all",
dim=dim,
# fill_value=fill_value,
keep_attrs=keep_attrs,
**kwargs,
)
else:
return self.reduce(
duck_array_ops.array_all,
dim=dim,
keep_attrs=keep_attrs,
**kwargs,
) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def all(self, axis=None, keepdims=False, out=None):\n return np.logical_and.reduce(self, out=out, axis=axis, keepdims=keepdims)",
"def all(\n self,\n dim: Dims = None,\n *,\n keep_attrs: bool | None = None,\n **kwargs: Any,\n ) -> DataArray:\n return self.reduce(\n duck_array_ops.array_all,\n dim=dim,\n keep_attrs=keep_attrs,\n **kwargs,\n )",
"def all(\n self,\n dim: Dims = None,\n *,\n keep_attrs: bool | None = None,\n **kwargs: Any,\n ) -> Dataset:\n if (\n flox_available\n and OPTIONS[\"use_flox\"]\n and contains_only_chunked_or_numpy(self._obj)\n ):\n return self._flox_reduce(\n func=\"all\",\n dim=dim,\n numeric_only=False,\n # fill_value=fill_value,\n keep_attrs=keep_attrs,\n **kwargs,\n )\n else:\n return self.reduce(\n duck_array_ops.array_all,\n dim=dim,\n numeric_only=False,\n keep_attrs=keep_attrs,\n **kwargs,\n )",
"def all(\n self,\n dim: Dims = None,\n *,\n keep_attrs: bool | None = None,\n **kwargs: Any,\n ) -> Dataset:\n if (\n flox_available\n and OPTIONS[\"use_flox\"]\n and contains_only_chunked_or_numpy(self._obj)\n ):\n return self._flox_reduce(\n func=\"all\",\n dim=dim,\n numeric_only=False,\n # fill_value=fill_value,\n keep_attrs=keep_attrs,\n **kwargs,\n )\n else:\n return self.reduce(\n duck_array_ops.array_all,\n dim=dim,\n numeric_only=False,\n keep_attrs=keep_attrs,\n **kwargs,\n )",
"def all(\n self,\n dim: Dims = None,\n *,\n keep_attrs: bool | None = None,\n **kwargs: Any,\n ) -> Dataset:\n return self.reduce(\n duck_array_ops.array_all,\n dim=dim,\n numeric_only=False,\n keep_attrs=keep_attrs,\n **kwargs,\n )",
"def any(\n self,\n dim: Dims = None,\n *,\n keep_attrs: bool | None = None,\n **kwargs: Any,\n ) -> DataArray:\n if (\n flox_available\n and OPTIONS[\"use_flox\"]\n and contains_only_chunked_or_numpy(self._obj)\n ):\n return self._flox_reduce(\n func=\"any\",\n dim=dim,\n # fill_value=fill_value,\n keep_attrs=keep_attrs,\n **kwargs,\n )\n else:\n return self.reduce(\n duck_array_ops.array_any,\n dim=dim,\n keep_attrs=keep_attrs,\n **kwargs,\n )",
"def any(\n self,\n dim: Dims = None,\n *,\n keep_attrs: bool | None = None,\n **kwargs: Any,\n ) -> DataArray:\n if (\n flox_available\n and OPTIONS[\"use_flox\"]\n and contains_only_chunked_or_numpy(self._obj)\n ):\n return self._flox_reduce(\n func=\"any\",\n dim=dim,\n # fill_value=fill_value,\n keep_attrs=keep_attrs,\n **kwargs,\n )\n else:\n return self.reduce(\n duck_array_ops.array_any,\n dim=dim,\n keep_attrs=keep_attrs,\n **kwargs,\n )",
"def any(\n self,\n dim: Dims = None,\n *,\n keep_attrs: bool | None = None,\n **kwargs: Any,\n ) -> DataArray:\n return self.reduce(\n duck_array_ops.array_any,\n dim=dim,\n keep_attrs=keep_attrs,\n **kwargs,\n )",
"def any(\n self,\n dim: Dims = None,\n *,\n keep_attrs: bool | None = None,\n **kwargs: Any,\n ) -> Dataset:\n if (\n flox_available\n and OPTIONS[\"use_flox\"]\n and contains_only_chunked_or_numpy(self._obj)\n ):\n return self._flox_reduce(\n func=\"any\",\n dim=dim,\n numeric_only=False,\n # fill_value=fill_value,\n keep_attrs=keep_attrs,\n **kwargs,\n )\n else:\n return self.reduce(\n duck_array_ops.array_any,\n dim=dim,\n numeric_only=False,\n keep_attrs=keep_attrs,\n **kwargs,\n )",
"def any(\n self,\n dim: Dims = None,\n *,\n keep_attrs: bool | None = None,\n **kwargs: Any,\n ) -> Dataset:\n if (\n flox_available\n and OPTIONS[\"use_flox\"]\n and contains_only_chunked_or_numpy(self._obj)\n ):\n return self._flox_reduce(\n func=\"any\",\n dim=dim,\n numeric_only=False,\n # fill_value=fill_value,\n keep_attrs=keep_attrs,\n **kwargs,\n )\n else:\n return self.reduce(\n duck_array_ops.array_any,\n dim=dim,\n numeric_only=False,\n keep_attrs=keep_attrs,\n **kwargs,\n )",
"def any(self, axis=None, keepdims=False, out=None):\n return np.logical_or.reduce(self, out=out, axis=axis, keepdims=keepdims)",
"def all(self, *args, **kwargs):\n if self.fragmented:\n return (\n np.all(self[self._begin:].view(ndarray), *args, **kwargs) and\n np.all(self[:self._end].view(ndarray), *args, **kwargs)\n )\n else:\n if self._begin < self._end:\n part = self[self._begin:self._end]\n elif self._end == 0:\n part = self[self._begin:]\n\n return (np.all(part.view(ndarray), *args, **kwargs))",
"def any(\n self,\n dim: Dims = None,\n *,\n keep_attrs: bool | None = None,\n **kwargs: Any,\n ) -> Dataset:\n return self.reduce(\n duck_array_ops.array_any,\n dim=dim,\n numeric_only=False,\n keep_attrs=keep_attrs,\n **kwargs,\n )",
"def check_if_reduce_needed(vars_to_modify):\n for var in vars_to_modify:\n if len(var.dimensions) > 2 and var[0,0,:].mask.all() and \\\n var[-1,1,:,:].mask.all():\n return True\n return False",
"def _reduce(self, values):\n\n if self.dim is None:\n return values\n\n if isinstance(self.dim, str):\n dim = [self.dim]\n else:\n dim = self.dim\n\n if self.reduce_func is None:\n for d in values.dims:\n if d not in dim:\n values = values.isel(**{d: 0})\n return values\n else:\n other_dims = [d for d in values.dims if d not in dim]\n return values.reduce(self.reduce_func, dim=other_dims)",
"def all(tensor):\n raise NotImplementedError",
"def any(self):\n boolean = True\n if type(self.idxs) == np.ndarray:\n boolean = all(self.idxs.shape)\n elif type(self.idxs) == list:\n sh = np.array(self.idxs).shape\n if len(sh) >= 2:\n boolean = np.all(sh)\n return boolean",
"def has_all_dims(dp_or_event, dims):\n return dims.items() <= {d.key: d.value for d in dp_or_event.dimensions}.items()",
"def _reduce(self, name, skipna=True, **kwargs):\n if name == \"any\" and pa.types.is_boolean(self.dtype.arrow_dtype):\n return any_op(self.data, skipna=skipna)\n elif name == \"all\" and pa.types.is_boolean(self.dtype.arrow_dtype):\n return all_op(self.data, skipna=skipna)\n\n raise TypeError(\n \"cannot perform {name} with type {dtype}\".format(\n name=name, dtype=self.dtype\n )\n )",
"def has_datapoint_with_all_dims(fake_ingest, dims):\n for datapoint in fake_ingest.datapoints:\n if has_all_dims(datapoint, dims):\n return True\n return False",
"def all(self, func=bool):\n return all(map(func, self._))",
"def perform_reduce(data, idx_list):\n bool_mask = np.zeros(len(data), dtype=np.bool)\n bool_mask[idx_list] = True\n data = data[bool_mask]\n return(data)",
"def all(self, key: Callable[[T], bool]=None) -> bool:\n if key is None:\n return all(self.array)\n return all(key(x) for x in self.array)",
"def all(self):\n for v in self.sects.values():\n if not np.all(v):\n return False\n if self.is_full():\n return True\n else:\n return np.all(self.defval)",
"def allreduce( # pylint:disable=invalid-name\n data: np.ndarray, op: Op, prepare_fun: Optional[Callable[[np.ndarray], None]] = None\n) -> np.ndarray:\n if prepare_fun is None:\n return collective.allreduce(data, collective.Op(op))\n raise Exception(\"preprocessing function is no longer supported\")",
"def is_full_dimensional(self):\n\n return self.affine_dimension() == self.space_dimension()",
"def indexed(cls, dataset, selection):\n selected = list(selection.keys())\n all_scalar = all((not isinstance(sel, (tuple, slice, set, list))\n and not callable(sel)) for sel in selection.values())\n all_kdims = all(d in selected for d in dataset.kdims)\n return all_scalar and all_kdims",
"def check_all(X, x_lat_dim, x_lon_dim, x_sample_dim, x_feature_dim):\n\tcheck_dimensions(X, x_lat_dim, x_lon_dim, x_sample_dim, x_feature_dim)\n\tcheck_coords(X, x_lat_dim, x_lon_dim, x_sample_dim, x_feature_dim)\n\tcheck_consistent(X, x_lat_dim, x_lon_dim, x_sample_dim, x_feature_dim)\n\tcheck_type(X, x_lat_dim, x_lon_dim, x_sample_dim, x_feature_dim)\n\t#check_transposed(X, x_lat_dim, x_lon_dim, x_sample_dim, x_feature_dim)",
"def any(self, *args, **kwargs):\n if self.fragmented:\n return (\n np.any(self[self._begin:].view(ndarray), *args, **kwargs) and\n np.any(self[:self._end].view(ndarray), *args, **kwargs)\n )\n else:\n if self._begin < self._end:\n part = self[self._begin:self._end]\n elif self._end == 0:\n part = self[self._begin:]\n\n return (np.any(part.view(ndarray), *args, **kwargs))",
"def is_full(self) -> bool:\n return self._array[0].all()"
] | [
"0.6964989",
"0.6916764",
"0.68831027",
"0.68831027",
"0.6737816",
"0.6579109",
"0.6579109",
"0.6542948",
"0.63997453",
"0.63997453",
"0.6337151",
"0.63178223",
"0.6278503",
"0.5919452",
"0.5876098",
"0.5819424",
"0.57391477",
"0.5717155",
"0.56248033",
"0.55787617",
"0.5542761",
"0.55341935",
"0.5483817",
"0.5483001",
"0.54796726",
"0.54514015",
"0.5425047",
"0.54002756",
"0.53731555",
"0.536152"
] | 0.7083386 | 1 |
Reduce this DataArray's data by applying ``any`` along some dimension(s). | def any(
self,
dim: Dims = None,
*,
keep_attrs: bool | None = None,
**kwargs: Any,
) -> DataArray:
if (
flox_available
and OPTIONS["use_flox"]
and contains_only_chunked_or_numpy(self._obj)
):
return self._flox_reduce(
func="any",
dim=dim,
# fill_value=fill_value,
keep_attrs=keep_attrs,
**kwargs,
)
else:
return self.reduce(
duck_array_ops.array_any,
dim=dim,
keep_attrs=keep_attrs,
**kwargs,
) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def any(self, axis=None, keepdims=False, out=None):\n return np.logical_or.reduce(self, out=out, axis=axis, keepdims=keepdims)",
"def any(\n self,\n dim: Dims = None,\n *,\n keep_attrs: bool | None = None,\n **kwargs: Any,\n ) -> DataArray:\n return self.reduce(\n duck_array_ops.array_any,\n dim=dim,\n keep_attrs=keep_attrs,\n **kwargs,\n )",
"def any(\n self,\n dim: Dims = None,\n *,\n keep_attrs: bool | None = None,\n **kwargs: Any,\n ) -> Dataset:\n if (\n flox_available\n and OPTIONS[\"use_flox\"]\n and contains_only_chunked_or_numpy(self._obj)\n ):\n return self._flox_reduce(\n func=\"any\",\n dim=dim,\n numeric_only=False,\n # fill_value=fill_value,\n keep_attrs=keep_attrs,\n **kwargs,\n )\n else:\n return self.reduce(\n duck_array_ops.array_any,\n dim=dim,\n numeric_only=False,\n keep_attrs=keep_attrs,\n **kwargs,\n )",
"def any(\n self,\n dim: Dims = None,\n *,\n keep_attrs: bool | None = None,\n **kwargs: Any,\n ) -> Dataset:\n if (\n flox_available\n and OPTIONS[\"use_flox\"]\n and contains_only_chunked_or_numpy(self._obj)\n ):\n return self._flox_reduce(\n func=\"any\",\n dim=dim,\n numeric_only=False,\n # fill_value=fill_value,\n keep_attrs=keep_attrs,\n **kwargs,\n )\n else:\n return self.reduce(\n duck_array_ops.array_any,\n dim=dim,\n numeric_only=False,\n keep_attrs=keep_attrs,\n **kwargs,\n )",
"def any(\n self,\n dim: Dims = None,\n *,\n keep_attrs: bool | None = None,\n **kwargs: Any,\n ) -> Dataset:\n return self.reduce(\n duck_array_ops.array_any,\n dim=dim,\n numeric_only=False,\n keep_attrs=keep_attrs,\n **kwargs,\n )",
"def any(self):\n boolean = True\n if type(self.idxs) == np.ndarray:\n boolean = all(self.idxs.shape)\n elif type(self.idxs) == list:\n sh = np.array(self.idxs).shape\n if len(sh) >= 2:\n boolean = np.all(sh)\n return boolean",
"def all(self, axis=None, keepdims=False, out=None):\n return np.logical_and.reduce(self, out=out, axis=axis, keepdims=keepdims)",
"def any(tensor, axis=None, keepdims=False, **kwargs):\n return tensor.any(axis=axis, keepdims=keepdims, **kwargs)",
"def check_if_reduce_needed(vars_to_modify):\n for var in vars_to_modify:\n if len(var.dimensions) > 2 and var[0,0,:].mask.all() and \\\n var[-1,1,:,:].mask.all():\n return True\n return False",
"def any(self, *args, **kwargs):\n if self.fragmented:\n return (\n np.any(self[self._begin:].view(ndarray), *args, **kwargs) and\n np.any(self[:self._end].view(ndarray), *args, **kwargs)\n )\n else:\n if self._begin < self._end:\n part = self[self._begin:self._end]\n elif self._end == 0:\n part = self[self._begin:]\n\n return (np.any(part.view(ndarray), *args, **kwargs))",
"def matrix_any(condition):\n return np.sum(np.sum(condition)) > 0",
"def isAny(self,test):\n for x in np.nditer(self.t, op_flags=['readonly']):\n if op(x):\n return True\n return False",
"def any(self):\n return self._summarize(lambda c: c.any)",
"def any(self, name, codes):\n if self.is_array(name):\n logics = []\n for s in self.sources(name):\n logics.append({s: has_any(codes)})\n slicer = self.take(union(logics))\n else:\n slicer = self.take({name: has_any(codes)})\n return slicer",
"def is_full_dimensional(self):\n\n return self.affine_dimension() == self.space_dimension()",
"def _reduce(self, values):\n\n if self.dim is None:\n return values\n\n if isinstance(self.dim, str):\n dim = [self.dim]\n else:\n dim = self.dim\n\n if self.reduce_func is None:\n for d in values.dims:\n if d not in dim:\n values = values.isel(**{d: 0})\n return values\n else:\n other_dims = [d for d in values.dims if d not in dim]\n return values.reduce(self.reduce_func, dim=other_dims)",
"def has_all_dims(dp_or_event, dims):\n return dims.items() <= {d.key: d.value for d in dp_or_event.dimensions}.items()",
"def all(\n self,\n dim: Dims = None,\n *,\n keep_attrs: bool | None = None,\n **kwargs: Any,\n ) -> DataArray:\n if (\n flox_available\n and OPTIONS[\"use_flox\"]\n and contains_only_chunked_or_numpy(self._obj)\n ):\n return self._flox_reduce(\n func=\"all\",\n dim=dim,\n # fill_value=fill_value,\n keep_attrs=keep_attrs,\n **kwargs,\n )\n else:\n return self.reduce(\n duck_array_ops.array_all,\n dim=dim,\n keep_attrs=keep_attrs,\n **kwargs,\n )",
"def all(\n self,\n dim: Dims = None,\n *,\n keep_attrs: bool | None = None,\n **kwargs: Any,\n ) -> DataArray:\n if (\n flox_available\n and OPTIONS[\"use_flox\"]\n and contains_only_chunked_or_numpy(self._obj)\n ):\n return self._flox_reduce(\n func=\"all\",\n dim=dim,\n # fill_value=fill_value,\n keep_attrs=keep_attrs,\n **kwargs,\n )\n else:\n return self.reduce(\n duck_array_ops.array_all,\n dim=dim,\n keep_attrs=keep_attrs,\n **kwargs,\n )",
"def has_datapoint_with_all_dims(fake_ingest, dims):\n for datapoint in fake_ingest.datapoints:\n if has_all_dims(datapoint, dims):\n return True\n return False",
"def any(self, values):\n return self.aggregate(values, \"any\")",
"def any(self, key: Callable[[T], bool]=None) -> bool:\n if key is None:\n return any(self.array)\n return any(key(x) for x in self.array)",
"def eo_filter(source):\n nodata_bools = source.apply(lambda array: array == array.nodata).to_array(dim='band')\n\n nothingness = nodata_bools.all(dim='band')\n noncontiguous = nodata_bools.any(dim='band')\n\n return np.uint8(NO_DATA) * nothingness | np.uint8(MASKED_NO_CONTIGUITY) * noncontiguous",
"def all(\n self,\n dim: Dims = None,\n *,\n keep_attrs: bool | None = None,\n **kwargs: Any,\n ) -> DataArray:\n return self.reduce(\n duck_array_ops.array_all,\n dim=dim,\n keep_attrs=keep_attrs,\n **kwargs,\n )",
"def is1d(a):\n return np.sum(asarray(asarray(a).shape) > 1) <= 1",
"def any(self):\n for v in self.sects.values():\n if np.any(v):\n return True\n if self.is_full():\n return False\n else:\n return np.any(self.defval)",
"def all(\n self,\n dim: Dims = None,\n *,\n keep_attrs: bool | None = None,\n **kwargs: Any,\n ) -> Dataset:\n if (\n flox_available\n and OPTIONS[\"use_flox\"]\n and contains_only_chunked_or_numpy(self._obj)\n ):\n return self._flox_reduce(\n func=\"all\",\n dim=dim,\n numeric_only=False,\n # fill_value=fill_value,\n keep_attrs=keep_attrs,\n **kwargs,\n )\n else:\n return self.reduce(\n duck_array_ops.array_all,\n dim=dim,\n numeric_only=False,\n keep_attrs=keep_attrs,\n **kwargs,\n )",
"def all(\n self,\n dim: Dims = None,\n *,\n keep_attrs: bool | None = None,\n **kwargs: Any,\n ) -> Dataset:\n if (\n flox_available\n and OPTIONS[\"use_flox\"]\n and contains_only_chunked_or_numpy(self._obj)\n ):\n return self._flox_reduce(\n func=\"all\",\n dim=dim,\n numeric_only=False,\n # fill_value=fill_value,\n keep_attrs=keep_attrs,\n **kwargs,\n )\n else:\n return self.reduce(\n duck_array_ops.array_all,\n dim=dim,\n numeric_only=False,\n keep_attrs=keep_attrs,\n **kwargs,\n )",
"def reduce_any(\n state: State,\n action: Action,\n next_state: State,\n *,\n terminating_functions: Sequence[TerminatingFunction],\n) -> bool:\n return reduce(\n state,\n action,\n next_state,\n terminating_functions=terminating_functions,\n reduction=any,\n )",
"def obs_with_data(x):\n num_toks = np.sum(x,axis=1)\n has_data = num_toks > 0\n return has_data"
] | [
"0.71060604",
"0.7097509",
"0.69430715",
"0.69430715",
"0.686053",
"0.6258159",
"0.6215559",
"0.61174256",
"0.6110123",
"0.6066965",
"0.60409343",
"0.5994262",
"0.5851336",
"0.56650263",
"0.5628333",
"0.5623232",
"0.5615794",
"0.55963945",
"0.55963945",
"0.5479969",
"0.5478053",
"0.5473746",
"0.5456341",
"0.54368055",
"0.5430094",
"0.54249406",
"0.5421376",
"0.5421376",
"0.5412339",
"0.54108405"
] | 0.7147029 | 1 |
Reduce this DataArray's data by applying ``cumsum`` along some dimension(s). | def cumsum(
self,
dim: Dims = None,
*,
skipna: bool | None = None,
keep_attrs: bool | None = None,
**kwargs: Any,
) -> DataArray:
return self.reduce(
duck_array_ops.cumsum,
dim=dim,
skipna=skipna,
keep_attrs=keep_attrs,
**kwargs,
) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def cumsum(\n self,\n dim: Dims = None,\n *,\n skipna: bool | None = None,\n keep_attrs: bool | None = None,\n **kwargs: Any,\n ) -> Dataset:\n return self.reduce(\n duck_array_ops.cumsum,\n dim=dim,\n skipna=skipna,\n numeric_only=True,\n keep_attrs=keep_attrs,\n **kwargs,\n )",
"def cumsum(\n self,\n dim: Dims = None,\n *,\n skipna: bool | None = None,\n keep_attrs: bool | None = None,\n **kwargs: Any,\n ) -> Dataset:\n return self.reduce(\n duck_array_ops.cumsum,\n dim=dim,\n skipna=skipna,\n numeric_only=True,\n keep_attrs=keep_attrs,\n **kwargs,\n )",
"def cumsum(\n self,\n dim: Dims = None,\n *,\n skipna: bool | None = None,\n keep_attrs: bool | None = None,\n **kwargs: Any,\n ) -> Dataset:\n return self.reduce(\n duck_array_ops.cumsum,\n dim=dim,\n skipna=skipna,\n numeric_only=True,\n keep_attrs=keep_attrs,\n **kwargs,\n )",
"def multidim_cumsum(a):\n out = a.cumsum(-1)\n for i in range(2, a.ndim+1):\n np.cumsum(out, axis=-i, out=out)\n return out",
"def _cumsum(self) -> np.ndarray:\n\n if not hasattr(self, \"__cumsum\"):\n self.__cumsum = np.insert(np.cumsum(self.sizes), 0, 0)\n return self.__cumsum",
"def cumsum(x, axis=None):\r\n return CumsumOp(axis=axis)(x)",
"def cumsum(tensor, axis=None):\n raise NotImplementedError",
"def _cumsum_einsum(x, precision=jax.lax.Precision.DEFAULT):\n mask = jnp.triu(jnp.ones(x.shape, dtype=jnp.bool_))\n return jnp.einsum(\"ij,jk\", x, mask, precision=precision)",
"def cumsum(self):\n return self._lift(lambda c: c.cumsum)",
"def exclusive_cumsum(x):\n return torch.cumsum(torch.cat([x.new_zeros(x.size(0), x.size(1), x.size(2), 1), x[:, :, :, :-1]], dim=-1), dim=-1)",
"def cumsumr(x, axis=0):\n cums = x.cumsum(axis=axis)\n return cums - np.maximum.accumulate(cums*(x == 0), axis=axis)",
"def cumsum(module, y, dimension):\n _import_modules()\n if module in [np, ma, torch, jnp]:\n return module.cumsum(y, axis=dimension)\n elif module == tf:\n return tf.math.cumsum(y, dimension)",
"def _strict_1d_cumsum(tensor, len_tensor):\n # Assumes tensor shape is fully defined.\n with ops.name_scope('strict_1d_cumsum', values=[tensor]):\n if len_tensor == 0:\n return constant_op.constant([])\n len_pad = len_tensor - 1\n x = array_ops.pad(tensor, [[len_pad, 0]])\n h = array_ops.ones_like(x)\n return _strict_conv1d(x, h)[:len_tensor]",
"def local_sum(a,tshape, padval):\n\n # zero-padding\n a = ndpad(a,tshape, padval)\n\n # difference between shifted copies of an array along a given dimension\n def shiftdiff(a,tshape,shiftdim):\n ind1 = [slice(None,None),]*a.ndim\n ind2 = [slice(None,None),]*a.ndim\n ind1[shiftdim] = slice(tshape[shiftdim],a.shape[shiftdim]-1)\n ind2[shiftdim] = slice(0,a.shape[shiftdim]-tshape[shiftdim]-1)\n return a[ind1] - a[ind2]\n\n # take the cumsum along each dimension and subtracting a shifted version\n # from itself. this reduces the number of computations to 2*N additions\n # and 2*N subtractions for an N-dimensional array, independent of its\n # size.\n #\n # See:\n # <http://www.idiom.com/~zilla/Papers/nvisionInterface/nip.html>\n for dd in xrange(a.ndim):\n a = np.cumsum(a,dd)\n a = shiftdiff(a,tshape,dd)\n return a",
"def cumprod(\n self,\n dim: Dims = None,\n *,\n skipna: bool | None = None,\n keep_attrs: bool | None = None,\n **kwargs: Any,\n ) -> DataArray:\n return self.reduce(\n duck_array_ops.cumprod,\n dim=dim,\n skipna=skipna,\n keep_attrs=keep_attrs,\n **kwargs,\n )",
"def cumprod(\n self,\n dim: Dims = None,\n *,\n skipna: bool | None = None,\n keep_attrs: bool | None = None,\n **kwargs: Any,\n ) -> DataArray:\n return self.reduce(\n duck_array_ops.cumprod,\n dim=dim,\n skipna=skipna,\n keep_attrs=keep_attrs,\n **kwargs,\n )",
"def cumprod(\n self,\n dim: Dims = None,\n *,\n skipna: bool | None = None,\n keep_attrs: bool | None = None,\n **kwargs: Any,\n ) -> DataArray:\n return self.reduce(\n duck_array_ops.cumprod,\n dim=dim,\n skipna=skipna,\n keep_attrs=keep_attrs,\n **kwargs,\n )",
"def sum(\n self,\n dim: Dims = None,\n *,\n skipna: bool | None = None,\n min_count: int | None = None,\n keep_attrs: bool | None = None,\n **kwargs: Any,\n ) -> DataArray:\n return self.reduce(\n duck_array_ops.sum,\n dim=dim,\n skipna=skipna,\n min_count=min_count,\n keep_attrs=keep_attrs,\n **kwargs,\n )",
"def acumsum (a,dimension=None):\r\n if dimension == None:\r\n a = N.ravel(a)\r\n dimension = 0\r\n if type(dimension) in [ListType, TupleType, N.ndarray]:\r\n dimension = list(dimension)\r\n dimension.sort()\r\n dimension.reverse()\r\n for d in dimension:\r\n a = N.add.accumulate(a,d)\r\n return a\r\n else:\r\n return N.add.accumulate(a,dimension)",
"def cumulative_sum(array):\n res = []\n val = 0\n for elem in array:\n val += elem\n res.append(val)\n return res",
"def cumsum_nb(a):\n out = np.empty_like(a, dtype=np.float_)\n for col in range(a.shape[1]):\n out[:, col] = cumsum_1d_nb(a[:, col])\n return out",
"def get_ragged_sum(\n arr,\n lens,\n axis = -1,\n device=torch.device('cpu'),\n):\n # add zero as first dimension so that when there are zero non-nan values,\n # it selects zero as the value\n zeros_shape = list(arr.shape)\n zeros_shape[axis] = 1\n zero = torch.zeros(zeros_shape).to(device)\n arr = torch.cat([zero, arr], dim=axis)\n arr = torch.cumsum(arr, axis)\n\n sums = torch.gather(arr, axis, lens)\n\n mask = (lens > 0).float()\n sums = sums * mask\n arr = sums.squeeze(axis)\n return arr",
"def _reduce(self, values):\n\n if self.dim is None:\n return values\n\n if isinstance(self.dim, str):\n dim = [self.dim]\n else:\n dim = self.dim\n\n if self.reduce_func is None:\n for d in values.dims:\n if d not in dim:\n values = values.isel(**{d: 0})\n return values\n else:\n other_dims = [d for d in values.dims if d not in dim]\n return values.reduce(self.reduce_func, dim=other_dims)",
"def normalize_cumsum(self, x):\n\n x = self.normalize_global(x)\n\n if self.right_context is None and self.left_context is None:\n return x\n\n if self.left_context is None:\n left_context = x.shape[0]\n else:\n left_context = self.left_context\n\n if self.right_context is None:\n right_context = x.shape[0]\n else:\n right_context = self.right_context\n\n total_context = left_context + right_context + 1\n\n if x.shape[0] <= min(right_context, left_context)+1:\n # if context is larger than the signal we still return global normalization\n return x\n\n c_x = np.zeros((x.shape[0]+total_context, x.shape[1],), dtype=float_cpu())\n counts = np.zeros((x.shape[0]+total_context, 1,), dtype=float_cpu())\n \n c_x[left_context+1:left_context+x.shape[0]+1] = np.cumsum(x, axis=0)\n c_x[left_context+x.shape[0]+1:] = c_x[left_context+x.shape[0]]\n counts[left_context+1:left_context+x.shape[0]+1] = np.arange(1, x.shape[0]+1, dtype=float_cpu())[:,None]\n counts[left_context+x.shape[0]+1:] = x.shape[0]\n\n if self.norm_var:\n c2_x = np.zeros((x.shape[0]+total_context, x.shape[1],), dtype=float_cpu())\n c2_x[left_context+1:left_context+x.shape[0]+1] = np.cumsum(x*x, axis=0)\n c2_x[left_context+x.shape[0]+1:] = c2_x[left_context+x.shape[0]]\n\n counts = counts[total_context:] - counts[:-total_context]\n m_x = (c_x[total_context:] - c_x[:-total_context])/counts\n\n if self.norm_mean:\n x -= m_x\n\n if self.norm_var:\n m2_x = (c2_x[total_context:] - c2_x[:-total_context])/counts\n s2_x=m2_x - m_x**2\n s2_x[s2_x<1e-5]=1e-5\n s_x = np.sqrt(s2_x)\n x /= s_x\n\n return x",
"def cumprod(\n self,\n dim: Dims = None,\n *,\n skipna: bool | None = None,\n keep_attrs: bool | None = None,\n **kwargs: Any,\n ) -> Dataset:\n return self.reduce(\n duck_array_ops.cumprod,\n dim=dim,\n skipna=skipna,\n numeric_only=True,\n keep_attrs=keep_attrs,\n **kwargs,\n )",
"def cumprod(\n self,\n dim: Dims = None,\n *,\n skipna: bool | None = None,\n keep_attrs: bool | None = None,\n **kwargs: Any,\n ) -> Dataset:\n return self.reduce(\n duck_array_ops.cumprod,\n dim=dim,\n skipna=skipna,\n numeric_only=True,\n keep_attrs=keep_attrs,\n **kwargs,\n )",
"def cumprod(\n self,\n dim: Dims = None,\n *,\n skipna: bool | None = None,\n keep_attrs: bool | None = None,\n **kwargs: Any,\n ) -> Dataset:\n return self.reduce(\n duck_array_ops.cumprod,\n dim=dim,\n skipna=skipna,\n numeric_only=True,\n keep_attrs=keep_attrs,\n **kwargs,\n )",
"def cumsum_1d_nb(a):\n out = np.empty_like(a, dtype=np.float_)\n cumsum = 0\n for i in range(a.shape[0]):\n if ~np.isnan(a[i]):\n cumsum += a[i]\n out[i] = cumsum\n else:\n out[i] = np.nan\n return out",
"def _gu_sum(a, **kwds):\n return np.sum(np.ascontiguousarray(a), axis=-1, **kwds)",
"def sum(\n self,\n dim: Dims = None,\n *,\n skipna: bool | None = None,\n min_count: int | None = None,\n keep_attrs: bool | None = None,\n **kwargs: Any,\n ) -> Dataset:\n return self.reduce(\n duck_array_ops.sum,\n dim=dim,\n skipna=skipna,\n min_count=min_count,\n numeric_only=True,\n keep_attrs=keep_attrs,\n **kwargs,\n )"
] | [
"0.7601719",
"0.7601719",
"0.7601719",
"0.71929914",
"0.70374393",
"0.6974349",
"0.6739214",
"0.6690532",
"0.6560123",
"0.64532024",
"0.6444809",
"0.6434722",
"0.6355989",
"0.6352609",
"0.6270605",
"0.6270605",
"0.6270605",
"0.62120354",
"0.6194247",
"0.6184793",
"0.6179344",
"0.6163564",
"0.6073962",
"0.60379475",
"0.6033755",
"0.6033755",
"0.6033755",
"0.6013043",
"0.6004323",
"0.59458274"
] | 0.7733504 | 1 |
Reduce this DataArray's data by applying ``cumprod`` along some dimension(s). | def cumprod(
self,
dim: Dims = None,
*,
skipna: bool | None = None,
keep_attrs: bool | None = None,
**kwargs: Any,
) -> DataArray:
return self.reduce(
duck_array_ops.cumprod,
dim=dim,
skipna=skipna,
keep_attrs=keep_attrs,
**kwargs,
) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def cumprod(\n self,\n dim: Dims = None,\n *,\n skipna: bool | None = None,\n keep_attrs: bool | None = None,\n **kwargs: Any,\n ) -> Dataset:\n return self.reduce(\n duck_array_ops.cumprod,\n dim=dim,\n skipna=skipna,\n numeric_only=True,\n keep_attrs=keep_attrs,\n **kwargs,\n )",
"def cumprod(\n self,\n dim: Dims = None,\n *,\n skipna: bool | None = None,\n keep_attrs: bool | None = None,\n **kwargs: Any,\n ) -> Dataset:\n return self.reduce(\n duck_array_ops.cumprod,\n dim=dim,\n skipna=skipna,\n numeric_only=True,\n keep_attrs=keep_attrs,\n **kwargs,\n )",
"def cumprod(\n self,\n dim: Dims = None,\n *,\n skipna: bool | None = None,\n keep_attrs: bool | None = None,\n **kwargs: Any,\n ) -> Dataset:\n return self.reduce(\n duck_array_ops.cumprod,\n dim=dim,\n skipna=skipna,\n numeric_only=True,\n keep_attrs=keep_attrs,\n **kwargs,\n )",
"def cumprod(a, axis=None, dtype=None, out=None):\n a = astensor(a)\n if dtype is None:\n dtype = np.empty((1,), dtype=a.dtype).cumprod().dtype\n op = TensorCumprod(axis=axis, dtype=dtype)\n return op(a, out=out)",
"def cumprod(x, axis=None):\r\n return CumprodOp(axis=axis)(x)",
"def cumprod(self):\n return self._lift(lambda c: c.cumprod)",
"def cumprod(x, dim=-1, exclusive=False):\n if exclusive:\n length = x.size(dim)\n x = torch.narrow(F.pad(x, pad=(1, 0, 0, 0), value=1.0), dim, 0, length)\n return torch.cumprod(x, dim=dim)",
"def cumprod_nb(a):\n out = np.empty_like(a, dtype=np.float_)\n for col in range(a.shape[1]):\n out[:, col] = cumprod_1d_nb(a[:, col])\n return out",
"def exclusive_cumprod(x):\n return torch.cumprod(torch.cat([x.new_ones(x.size(0), x.size(1), x.size(2), 1), x[:, :, :, :-1]], dim=-1), dim=-1)",
"def cumprod_1d_nb(a):\n out = np.empty_like(a, dtype=np.float_)\n cumprod = 1\n for i in range(a.shape[0]):\n if ~np.isnan(a[i]):\n cumprod *= a[i]\n out[i] = cumprod\n else:\n out[i] = np.nan\n return out",
"def exclusive_cumprod(x):\n batch_size, sequence_length = x.size()\n if torch.cuda.is_available():\n one_x = torch.cat([torch.ones(batch_size, 1).cuda(), x], dim=1)[:, :-1]\n else:\n one_x = torch.cat([torch.ones(batch_size, 1), x], dim=1)[:, :-1]\n return torch.cumprod(one_x, dim=1)",
"def _reduce(self, values):\n\n if self.dim is None:\n return values\n\n if isinstance(self.dim, str):\n dim = [self.dim]\n else:\n dim = self.dim\n\n if self.reduce_func is None:\n for d in values.dims:\n if d not in dim:\n values = values.isel(**{d: 0})\n return values\n else:\n other_dims = [d for d in values.dims if d not in dim]\n return values.reduce(self.reduce_func, dim=other_dims)",
"def cumprod(self: FrameLike, skipna: bool = True) -> FrameLike:\n return self._apply_series_op(lambda psser: psser._cumprod(skipna), should_resolve=True)",
"def prod(self, axis=None, keepdims=False, dtype=None, out=None):\n return np.multiply.reduce(\n self, out=out, axis=axis, keepdims=keepdims, dtype=dtype\n )",
"def prod(input, axis=None, dtype=None, keepdims=False, acc_dtype=None,\r\n no_zeros_in_input=False):\r\n\r\n out = elemwise.Prod(axis, dtype=dtype, acc_dtype=acc_dtype,\r\n no_zeros_in_input=no_zeros_in_input)(input)\r\n\r\n if keepdims:\r\n out = makeKeepDims(input, out, axis)\r\n return out",
"def prod(\n self,\n dim: Dims = None,\n *,\n skipna: bool | None = None,\n min_count: int | None = None,\n keep_attrs: bool | None = None,\n **kwargs: Any,\n ) -> DataArray:\n return self.reduce(\n duck_array_ops.prod,\n dim=dim,\n skipna=skipna,\n min_count=min_count,\n keep_attrs=keep_attrs,\n **kwargs,\n )",
"def multidim_cumsum(a):\n out = a.cumsum(-1)\n for i in range(2, a.ndim+1):\n np.cumsum(out, axis=-i, out=out)\n return out",
"def cumsum(\n self,\n dim: Dims = None,\n *,\n skipna: bool | None = None,\n keep_attrs: bool | None = None,\n **kwargs: Any,\n ) -> Dataset:\n return self.reduce(\n duck_array_ops.cumsum,\n dim=dim,\n skipna=skipna,\n numeric_only=True,\n keep_attrs=keep_attrs,\n **kwargs,\n )",
"def cumsum(\n self,\n dim: Dims = None,\n *,\n skipna: bool | None = None,\n keep_attrs: bool | None = None,\n **kwargs: Any,\n ) -> Dataset:\n return self.reduce(\n duck_array_ops.cumsum,\n dim=dim,\n skipna=skipna,\n numeric_only=True,\n keep_attrs=keep_attrs,\n **kwargs,\n )",
"def cumsum(\n self,\n dim: Dims = None,\n *,\n skipna: bool | None = None,\n keep_attrs: bool | None = None,\n **kwargs: Any,\n ) -> Dataset:\n return self.reduce(\n duck_array_ops.cumsum,\n dim=dim,\n skipna=skipna,\n numeric_only=True,\n keep_attrs=keep_attrs,\n **kwargs,\n )",
"def prod(\n self,\n dim: Dims = None,\n *,\n skipna: bool | None = None,\n min_count: int | None = None,\n keep_attrs: bool | None = None,\n **kwargs: Any,\n ) -> DataArray:\n if (\n flox_available\n and OPTIONS[\"use_flox\"]\n and contains_only_chunked_or_numpy(self._obj)\n ):\n return self._flox_reduce(\n func=\"prod\",\n dim=dim,\n skipna=skipna,\n min_count=min_count,\n # fill_value=fill_value,\n keep_attrs=keep_attrs,\n **kwargs,\n )\n else:\n return self.reduce(\n duck_array_ops.prod,\n dim=dim,\n skipna=skipna,\n min_count=min_count,\n keep_attrs=keep_attrs,\n **kwargs,\n )",
"def prod(\n self,\n dim: Dims = None,\n *,\n skipna: bool | None = None,\n min_count: int | None = None,\n keep_attrs: bool | None = None,\n **kwargs: Any,\n ) -> DataArray:\n if (\n flox_available\n and OPTIONS[\"use_flox\"]\n and contains_only_chunked_or_numpy(self._obj)\n ):\n return self._flox_reduce(\n func=\"prod\",\n dim=dim,\n skipna=skipna,\n min_count=min_count,\n # fill_value=fill_value,\n keep_attrs=keep_attrs,\n **kwargs,\n )\n else:\n return self.reduce(\n duck_array_ops.prod,\n dim=dim,\n skipna=skipna,\n min_count=min_count,\n keep_attrs=keep_attrs,\n **kwargs,\n )",
"def cumsum(\n self,\n dim: Dims = None,\n *,\n skipna: bool | None = None,\n keep_attrs: bool | None = None,\n **kwargs: Any,\n ) -> DataArray:\n return self.reduce(\n duck_array_ops.cumsum,\n dim=dim,\n skipna=skipna,\n keep_attrs=keep_attrs,\n **kwargs,\n )",
"def cumsum(\n self,\n dim: Dims = None,\n *,\n skipna: bool | None = None,\n keep_attrs: bool | None = None,\n **kwargs: Any,\n ) -> DataArray:\n return self.reduce(\n duck_array_ops.cumsum,\n dim=dim,\n skipna=skipna,\n keep_attrs=keep_attrs,\n **kwargs,\n )",
"def cumsum(\n self,\n dim: Dims = None,\n *,\n skipna: bool | None = None,\n keep_attrs: bool | None = None,\n **kwargs: Any,\n ) -> DataArray:\n return self.reduce(\n duck_array_ops.cumsum,\n dim=dim,\n skipna=skipna,\n keep_attrs=keep_attrs,\n **kwargs,\n )",
"def prod(\n self,\n dim: Dims = None,\n *,\n skipna: bool | None = None,\n min_count: int | None = None,\n keep_attrs: bool | None = None,\n **kwargs: Any,\n ) -> Dataset:\n return self.reduce(\n duck_array_ops.prod,\n dim=dim,\n skipna=skipna,\n min_count=min_count,\n numeric_only=True,\n keep_attrs=keep_attrs,\n **kwargs,\n )",
"def reduce_by_multiplication(data):\n total = 1\n for num in data:\n total *= num\n return total",
"def relay_array_reduce(c, fn, array, shape):\n assert fn.is_constant(Primitive)\n assert shape.is_constant(tuple)\n fn = fn.value\n tshp = shape.value\n ary = c.ref(array)\n if fn == P.scalar_add:\n ashp = ashape(array)\n if len(tshp) < len(ashp):\n ts = (1,) * (len(ashp) - len(tshp)) + tshp\n else:\n ts = tshp\n axis = tuple(i for i, t in enumerate(ts) if t == 1)\n res = relay.op.sum(ary, axis=axis, keepdims=True)\n if len(tshp) < len(ashp):\n rtshp = tshp\n if tshp == ():\n tshp = (1,)\n res = relay.op.reshape(res, newshape=tshp)\n if rtshp == ():\n res = relay.op.take(res, relay.const(0))\n return res\n elif fn == P.scalar_mul:\n ashp = ashape(array)\n if len(tshp) in (0, len(ashp)):\n res = relay.op.prod(ary)\n else:\n raise NotImplementedError(\n 'We currently support only full product on an array.')\n return res\n else:\n raise NotImplementedError(f\"reduce with {fn}\")",
"def prod(\n self,\n dim: Dims = None,\n *,\n skipna: bool | None = None,\n min_count: int | None = None,\n keep_attrs: bool | None = None,\n **kwargs: Any,\n ) -> Dataset:\n if (\n flox_available\n and OPTIONS[\"use_flox\"]\n and contains_only_chunked_or_numpy(self._obj)\n ):\n return self._flox_reduce(\n func=\"prod\",\n dim=dim,\n skipna=skipna,\n min_count=min_count,\n numeric_only=True,\n # fill_value=fill_value,\n keep_attrs=keep_attrs,\n **kwargs,\n )\n else:\n return self.reduce(\n duck_array_ops.prod,\n dim=dim,\n skipna=skipna,\n min_count=min_count,\n numeric_only=True,\n keep_attrs=keep_attrs,\n **kwargs,\n )",
"def prod(\n self,\n dim: Dims = None,\n *,\n skipna: bool | None = None,\n min_count: int | None = None,\n keep_attrs: bool | None = None,\n **kwargs: Any,\n ) -> Dataset:\n if (\n flox_available\n and OPTIONS[\"use_flox\"]\n and contains_only_chunked_or_numpy(self._obj)\n ):\n return self._flox_reduce(\n func=\"prod\",\n dim=dim,\n skipna=skipna,\n min_count=min_count,\n numeric_only=True,\n # fill_value=fill_value,\n keep_attrs=keep_attrs,\n **kwargs,\n )\n else:\n return self.reduce(\n duck_array_ops.prod,\n dim=dim,\n skipna=skipna,\n min_count=min_count,\n numeric_only=True,\n keep_attrs=keep_attrs,\n **kwargs,\n )"
] | [
"0.78693175",
"0.78693175",
"0.78693175",
"0.74438745",
"0.73570323",
"0.72512066",
"0.71616554",
"0.6794507",
"0.66913575",
"0.6365985",
"0.6155289",
"0.60739994",
"0.6059755",
"0.60538614",
"0.602666",
"0.59202105",
"0.57904106",
"0.57707596",
"0.57707596",
"0.57707596",
"0.5762588",
"0.5762588",
"0.5749816",
"0.5749816",
"0.5749816",
"0.5738581",
"0.5718635",
"0.5644082",
"0.55872554",
"0.55872554"
] | 0.8048191 | 1 |
Reduce this DataArray's data by applying ``count`` along some dimension(s). | def count(
self,
dim: Dims = None,
*,
keep_attrs: bool | None = None,
**kwargs: Any,
) -> DataArray:
if (
flox_available
and OPTIONS["use_flox"]
and contains_only_chunked_or_numpy(self._obj)
):
return self._flox_reduce(
func="count",
dim=dim,
# fill_value=fill_value,
keep_attrs=keep_attrs,
**kwargs,
)
else:
return self.reduce(
duck_array_ops.count,
dim=dim,
keep_attrs=keep_attrs,
**kwargs,
) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def count(\n self,\n dim: Dims = None,\n *,\n keep_attrs: bool | None = None,\n **kwargs: Any,\n ) -> DataArray:\n return self.reduce(\n duck_array_ops.count,\n dim=dim,\n keep_attrs=keep_attrs,\n **kwargs,\n )",
"def count(\n self,\n dim: Dims = None,\n *,\n keep_attrs: bool | None = None,\n **kwargs: Any,\n ) -> Dataset:\n return self.reduce(\n duck_array_ops.count,\n dim=dim,\n numeric_only=False,\n keep_attrs=keep_attrs,\n **kwargs,\n )",
"def count(\n self,\n dim: Dims = None,\n *,\n keep_attrs: bool | None = None,\n **kwargs: Any,\n ) -> Dataset:\n if (\n flox_available\n and OPTIONS[\"use_flox\"]\n and contains_only_chunked_or_numpy(self._obj)\n ):\n return self._flox_reduce(\n func=\"count\",\n dim=dim,\n numeric_only=False,\n # fill_value=fill_value,\n keep_attrs=keep_attrs,\n **kwargs,\n )\n else:\n return self.reduce(\n duck_array_ops.count,\n dim=dim,\n numeric_only=False,\n keep_attrs=keep_attrs,\n **kwargs,\n )",
"def count(\n self,\n dim: Dims = None,\n *,\n keep_attrs: bool | None = None,\n **kwargs: Any,\n ) -> Dataset:\n if (\n flox_available\n and OPTIONS[\"use_flox\"]\n and contains_only_chunked_or_numpy(self._obj)\n ):\n return self._flox_reduce(\n func=\"count\",\n dim=dim,\n numeric_only=False,\n # fill_value=fill_value,\n keep_attrs=keep_attrs,\n **kwargs,\n )\n else:\n return self.reduce(\n duck_array_ops.count,\n dim=dim,\n numeric_only=False,\n keep_attrs=keep_attrs,\n **kwargs,\n )",
"def __len__(self):\n return self.flatten_dim(self.shape[0])",
"def count(self) -> Tuple[groupable, pdarray]:\n repMsg = generic_msg(\n cmd=\"countReduction\",\n args={\"segments\": cast(pdarray, self.segments), \"size\": self.length},\n )\n self.logger.debug(repMsg)\n return self.unique_keys, create_pdarray(repMsg)",
"def count(self):\r\n return self.data_array.size",
"def size(self, index):\n return self.d1.size(index)\n # FILTER BASED ON D1",
"def count_dims(da):\n return len(da.dims)",
"def dim_reduction(data_set, components):\n transformed = []\n index = -1\n transformed = data_set @ components\n return transformed",
"def dim_reduce(means, weights, d):\n return dim_reduce_data(means, d)",
"def _reduce(self, values):\n\n if self.dim is None:\n return values\n\n if isinstance(self.dim, str):\n dim = [self.dim]\n else:\n dim = self.dim\n\n if self.reduce_func is None:\n for d in values.dims:\n if d not in dim:\n values = values.isel(**{d: 0})\n return values\n else:\n other_dims = [d for d in values.dims if d not in dim]\n return values.reduce(self.reduce_func, dim=other_dims)",
"def my_sum(a, axis, count):\n if a.shape[axis] == count:\n return a.sum(axis)\n elif a.shape[axis] == 1:\n return count * a.sum(axis)\n else:\n raise IndexError('Cannot be broadcast: a.shape=%s, axis=%d, count=%d' % (a.shape, axis, count))",
"def countDataSize(self,filename):\n \n d = h5py.File(filename,'r')\n features = d['spectrometer/features'][:]\n select = self.selectData(features.astype(float), self.ifeature, d)\n N = len(features[select])\n d.close()\n\n N = (N//self.offsetLen) * self.offsetLen\n\n N = N*self.Nfeeds\n\n self.chunks += [[int(self.Nsamples), int(self.Nsamples+N)]]\n self.datasizes += [int(N/self.Nfeeds)]\n self.Nsamples += int(N)",
"def sum(\n self,\n dim: Dims = None,\n *,\n skipna: bool | None = None,\n min_count: int | None = None,\n keep_attrs: bool | None = None,\n **kwargs: Any,\n ) -> DataArray:\n return self.reduce(\n duck_array_ops.sum,\n dim=dim,\n skipna=skipna,\n min_count=min_count,\n keep_attrs=keep_attrs,\n **kwargs,\n )",
"def dtype_element_count( dtype, name = None ):\n def count( descr ):\n if len( descr ) > 2:\n shape = descr[ 2 ]\n # multiply the shape\n return reduce( lambda x, y: x * y, shape )\n else:\n return 1\n\n if name:\n shape = dtype[ name ].shape\n else:\n shape = dtype.shape\n\n if len(shape) > 0:\n return reduce( lambda x, y: x * y, shape )\n else:\n descr = dtype.descr\n size = 0\n for type in descr:\n size += count( type )\n return size",
"def size(self):\n return reduce(mul, self.shape, 1)",
"def dimension_count(self):\n return self._dimensionCount",
"def counts_to_density(\n x: Union[_cpp.DataArray, _cpp.Dataset], dim: str\n) -> Union[_cpp.DataArray, _cpp.Dataset]:\n return _call_cpp_func(_cpp.counts_to_density, x, dim)",
"def getDataSetCount(self):\n\t\treturn int(self.numberOfImages / self.slicesPerTimepoint)",
"def dim_reduction( M ):\n tot_count_per_type = M.sum(axis = 1)\n tot_count = float(tot_count_per_type.sum())\n sorted_index = np.argsort(tot_count_per_type)\n threshold = 0.01\n accu = 0\n for i in range(len(sorted_index)):\n perc = float(tot_count_per_type[sorted_index[i]])/tot_count\n accu = accu + perc\n if accu > threshold:\n break;\n \n return sorted_index[0:i]",
"def ndarray_size(self) -> int:\n pass",
"def density_to_counts(\n x: Union[_cpp.DataArray, _cpp.Dataset], dim: str\n) -> Union[_cpp.DataArray, _cpp.Dataset]:\n return _call_cpp_func(_cpp.density_to_counts, x, dim)",
"def size(self, batch):\n x,y,m = batch \n return sum([mm.sum() for mm in m])",
"def count(self, axis=None):\n return self.data.count(axis=axis)",
"def reduce_dimension(self, n_components=2):\n\n reducer = PCA(n_components=n_components)\n\n X = self.data.values.astype(np.float32)\n\n norm = Normalizer()\n Xnorm = norm.fit_transform(X)\n\n return reducer.fit_transform(Xnorm)",
"def __len__(self):\n return sum(f.count for f in self.filters)",
"def nbytes_at(self, device_id:int):\n if self._slices:\n if isinstance(self._coherence._local_states[device_id], dict): # there are subarrays no this device\n if self._slices_hash in self._coherence._local_states[device_id].keys(): # this subarray is already there\n return self._array.nbytes_at(device_id)\n else: # the subarray will be moved to there\n return self._array.nbytes_at(device_id) + self.subarray_nbytes # add the incoming subarray size\n else: # there is a complete copy on this device, no need to prepare subarray\n return self.nbytes\n else:\n return self.nbytes",
"def __len__(self):\n ret = self.data.shape[0]\n return ret",
"def sum(\n self,\n dim: Dims = None,\n *,\n skipna: bool | None = None,\n min_count: int | None = None,\n keep_attrs: bool | None = None,\n **kwargs: Any,\n ) -> Dataset:\n return self.reduce(\n duck_array_ops.sum,\n dim=dim,\n skipna=skipna,\n min_count=min_count,\n numeric_only=True,\n keep_attrs=keep_attrs,\n **kwargs,\n )"
] | [
"0.6898398",
"0.65379834",
"0.6224404",
"0.6224404",
"0.59004956",
"0.5841828",
"0.5838448",
"0.57762665",
"0.573857",
"0.56812054",
"0.5657385",
"0.55505395",
"0.55487853",
"0.55471367",
"0.55208504",
"0.5506666",
"0.54985136",
"0.5492544",
"0.54757905",
"0.54540455",
"0.5439526",
"0.5413956",
"0.54107267",
"0.53990823",
"0.5394898",
"0.5386082",
"0.5362852",
"0.535802",
"0.5347619",
"0.53463036"
] | 0.6582146 | 1 |
Reduce this DataArray's data by applying ``all`` along some dimension(s). | def all(
self,
dim: Dims = None,
*,
keep_attrs: bool | None = None,
**kwargs: Any,
) -> DataArray:
if (
flox_available
and OPTIONS["use_flox"]
and contains_only_chunked_or_numpy(self._obj)
):
return self._flox_reduce(
func="all",
dim=dim,
# fill_value=fill_value,
keep_attrs=keep_attrs,
**kwargs,
)
else:
return self.reduce(
duck_array_ops.array_all,
dim=dim,
keep_attrs=keep_attrs,
**kwargs,
) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def all(self, axis=None, keepdims=False, out=None):\n return np.logical_and.reduce(self, out=out, axis=axis, keepdims=keepdims)",
"def all(\n self,\n dim: Dims = None,\n *,\n keep_attrs: bool | None = None,\n **kwargs: Any,\n ) -> DataArray:\n return self.reduce(\n duck_array_ops.array_all,\n dim=dim,\n keep_attrs=keep_attrs,\n **kwargs,\n )",
"def all(\n self,\n dim: Dims = None,\n *,\n keep_attrs: bool | None = None,\n **kwargs: Any,\n ) -> Dataset:\n if (\n flox_available\n and OPTIONS[\"use_flox\"]\n and contains_only_chunked_or_numpy(self._obj)\n ):\n return self._flox_reduce(\n func=\"all\",\n dim=dim,\n numeric_only=False,\n # fill_value=fill_value,\n keep_attrs=keep_attrs,\n **kwargs,\n )\n else:\n return self.reduce(\n duck_array_ops.array_all,\n dim=dim,\n numeric_only=False,\n keep_attrs=keep_attrs,\n **kwargs,\n )",
"def all(\n self,\n dim: Dims = None,\n *,\n keep_attrs: bool | None = None,\n **kwargs: Any,\n ) -> Dataset:\n if (\n flox_available\n and OPTIONS[\"use_flox\"]\n and contains_only_chunked_or_numpy(self._obj)\n ):\n return self._flox_reduce(\n func=\"all\",\n dim=dim,\n numeric_only=False,\n # fill_value=fill_value,\n keep_attrs=keep_attrs,\n **kwargs,\n )\n else:\n return self.reduce(\n duck_array_ops.array_all,\n dim=dim,\n numeric_only=False,\n keep_attrs=keep_attrs,\n **kwargs,\n )",
"def all(\n self,\n dim: Dims = None,\n *,\n keep_attrs: bool | None = None,\n **kwargs: Any,\n ) -> Dataset:\n return self.reduce(\n duck_array_ops.array_all,\n dim=dim,\n numeric_only=False,\n keep_attrs=keep_attrs,\n **kwargs,\n )",
"def any(\n self,\n dim: Dims = None,\n *,\n keep_attrs: bool | None = None,\n **kwargs: Any,\n ) -> DataArray:\n if (\n flox_available\n and OPTIONS[\"use_flox\"]\n and contains_only_chunked_or_numpy(self._obj)\n ):\n return self._flox_reduce(\n func=\"any\",\n dim=dim,\n # fill_value=fill_value,\n keep_attrs=keep_attrs,\n **kwargs,\n )\n else:\n return self.reduce(\n duck_array_ops.array_any,\n dim=dim,\n keep_attrs=keep_attrs,\n **kwargs,\n )",
"def any(\n self,\n dim: Dims = None,\n *,\n keep_attrs: bool | None = None,\n **kwargs: Any,\n ) -> DataArray:\n if (\n flox_available\n and OPTIONS[\"use_flox\"]\n and contains_only_chunked_or_numpy(self._obj)\n ):\n return self._flox_reduce(\n func=\"any\",\n dim=dim,\n # fill_value=fill_value,\n keep_attrs=keep_attrs,\n **kwargs,\n )\n else:\n return self.reduce(\n duck_array_ops.array_any,\n dim=dim,\n keep_attrs=keep_attrs,\n **kwargs,\n )",
"def any(\n self,\n dim: Dims = None,\n *,\n keep_attrs: bool | None = None,\n **kwargs: Any,\n ) -> DataArray:\n return self.reduce(\n duck_array_ops.array_any,\n dim=dim,\n keep_attrs=keep_attrs,\n **kwargs,\n )",
"def any(\n self,\n dim: Dims = None,\n *,\n keep_attrs: bool | None = None,\n **kwargs: Any,\n ) -> Dataset:\n if (\n flox_available\n and OPTIONS[\"use_flox\"]\n and contains_only_chunked_or_numpy(self._obj)\n ):\n return self._flox_reduce(\n func=\"any\",\n dim=dim,\n numeric_only=False,\n # fill_value=fill_value,\n keep_attrs=keep_attrs,\n **kwargs,\n )\n else:\n return self.reduce(\n duck_array_ops.array_any,\n dim=dim,\n numeric_only=False,\n keep_attrs=keep_attrs,\n **kwargs,\n )",
"def any(\n self,\n dim: Dims = None,\n *,\n keep_attrs: bool | None = None,\n **kwargs: Any,\n ) -> Dataset:\n if (\n flox_available\n and OPTIONS[\"use_flox\"]\n and contains_only_chunked_or_numpy(self._obj)\n ):\n return self._flox_reduce(\n func=\"any\",\n dim=dim,\n numeric_only=False,\n # fill_value=fill_value,\n keep_attrs=keep_attrs,\n **kwargs,\n )\n else:\n return self.reduce(\n duck_array_ops.array_any,\n dim=dim,\n numeric_only=False,\n keep_attrs=keep_attrs,\n **kwargs,\n )",
"def any(self, axis=None, keepdims=False, out=None):\n return np.logical_or.reduce(self, out=out, axis=axis, keepdims=keepdims)",
"def all(self, *args, **kwargs):\n if self.fragmented:\n return (\n np.all(self[self._begin:].view(ndarray), *args, **kwargs) and\n np.all(self[:self._end].view(ndarray), *args, **kwargs)\n )\n else:\n if self._begin < self._end:\n part = self[self._begin:self._end]\n elif self._end == 0:\n part = self[self._begin:]\n\n return (np.all(part.view(ndarray), *args, **kwargs))",
"def any(\n self,\n dim: Dims = None,\n *,\n keep_attrs: bool | None = None,\n **kwargs: Any,\n ) -> Dataset:\n return self.reduce(\n duck_array_ops.array_any,\n dim=dim,\n numeric_only=False,\n keep_attrs=keep_attrs,\n **kwargs,\n )",
"def check_if_reduce_needed(vars_to_modify):\n for var in vars_to_modify:\n if len(var.dimensions) > 2 and var[0,0,:].mask.all() and \\\n var[-1,1,:,:].mask.all():\n return True\n return False",
"def _reduce(self, values):\n\n if self.dim is None:\n return values\n\n if isinstance(self.dim, str):\n dim = [self.dim]\n else:\n dim = self.dim\n\n if self.reduce_func is None:\n for d in values.dims:\n if d not in dim:\n values = values.isel(**{d: 0})\n return values\n else:\n other_dims = [d for d in values.dims if d not in dim]\n return values.reduce(self.reduce_func, dim=other_dims)",
"def all(tensor):\n raise NotImplementedError",
"def any(self):\n boolean = True\n if type(self.idxs) == np.ndarray:\n boolean = all(self.idxs.shape)\n elif type(self.idxs) == list:\n sh = np.array(self.idxs).shape\n if len(sh) >= 2:\n boolean = np.all(sh)\n return boolean",
"def has_all_dims(dp_or_event, dims):\n return dims.items() <= {d.key: d.value for d in dp_or_event.dimensions}.items()",
"def _reduce(self, name, skipna=True, **kwargs):\n if name == \"any\" and pa.types.is_boolean(self.dtype.arrow_dtype):\n return any_op(self.data, skipna=skipna)\n elif name == \"all\" and pa.types.is_boolean(self.dtype.arrow_dtype):\n return all_op(self.data, skipna=skipna)\n\n raise TypeError(\n \"cannot perform {name} with type {dtype}\".format(\n name=name, dtype=self.dtype\n )\n )",
"def has_datapoint_with_all_dims(fake_ingest, dims):\n for datapoint in fake_ingest.datapoints:\n if has_all_dims(datapoint, dims):\n return True\n return False",
"def all(self, func=bool):\n return all(map(func, self._))",
"def perform_reduce(data, idx_list):\n bool_mask = np.zeros(len(data), dtype=np.bool)\n bool_mask[idx_list] = True\n data = data[bool_mask]\n return(data)",
"def all(self, key: Callable[[T], bool]=None) -> bool:\n if key is None:\n return all(self.array)\n return all(key(x) for x in self.array)",
"def all(self):\n for v in self.sects.values():\n if not np.all(v):\n return False\n if self.is_full():\n return True\n else:\n return np.all(self.defval)",
"def allreduce( # pylint:disable=invalid-name\n data: np.ndarray, op: Op, prepare_fun: Optional[Callable[[np.ndarray], None]] = None\n) -> np.ndarray:\n if prepare_fun is None:\n return collective.allreduce(data, collective.Op(op))\n raise Exception(\"preprocessing function is no longer supported\")",
"def is_full_dimensional(self):\n\n return self.affine_dimension() == self.space_dimension()",
"def indexed(cls, dataset, selection):\n selected = list(selection.keys())\n all_scalar = all((not isinstance(sel, (tuple, slice, set, list))\n and not callable(sel)) for sel in selection.values())\n all_kdims = all(d in selected for d in dataset.kdims)\n return all_scalar and all_kdims",
"def check_all(X, x_lat_dim, x_lon_dim, x_sample_dim, x_feature_dim):\n\tcheck_dimensions(X, x_lat_dim, x_lon_dim, x_sample_dim, x_feature_dim)\n\tcheck_coords(X, x_lat_dim, x_lon_dim, x_sample_dim, x_feature_dim)\n\tcheck_consistent(X, x_lat_dim, x_lon_dim, x_sample_dim, x_feature_dim)\n\tcheck_type(X, x_lat_dim, x_lon_dim, x_sample_dim, x_feature_dim)\n\t#check_transposed(X, x_lat_dim, x_lon_dim, x_sample_dim, x_feature_dim)",
"def any(self, *args, **kwargs):\n if self.fragmented:\n return (\n np.any(self[self._begin:].view(ndarray), *args, **kwargs) and\n np.any(self[:self._end].view(ndarray), *args, **kwargs)\n )\n else:\n if self._begin < self._end:\n part = self[self._begin:self._end]\n elif self._end == 0:\n part = self[self._begin:]\n\n return (np.any(part.view(ndarray), *args, **kwargs))",
"def is_full(self) -> bool:\n return self._array[0].all()"
] | [
"0.6964989",
"0.6916764",
"0.68831027",
"0.68831027",
"0.6737816",
"0.6579109",
"0.6579109",
"0.6542948",
"0.63997453",
"0.63997453",
"0.6337151",
"0.63178223",
"0.6278503",
"0.5919452",
"0.5876098",
"0.5819424",
"0.57391477",
"0.5717155",
"0.56248033",
"0.55787617",
"0.5542761",
"0.55341935",
"0.5483817",
"0.5483001",
"0.54796726",
"0.54514015",
"0.5425047",
"0.54002756",
"0.53731555",
"0.536152"
] | 0.7083386 | 0 |
Reduce this DataArray's data by applying ``any`` along some dimension(s). | def any(
self,
dim: Dims = None,
*,
keep_attrs: bool | None = None,
**kwargs: Any,
) -> DataArray:
if (
flox_available
and OPTIONS["use_flox"]
and contains_only_chunked_or_numpy(self._obj)
):
return self._flox_reduce(
func="any",
dim=dim,
# fill_value=fill_value,
keep_attrs=keep_attrs,
**kwargs,
)
else:
return self.reduce(
duck_array_ops.array_any,
dim=dim,
keep_attrs=keep_attrs,
**kwargs,
) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def any(self, axis=None, keepdims=False, out=None):\n return np.logical_or.reduce(self, out=out, axis=axis, keepdims=keepdims)",
"def any(\n self,\n dim: Dims = None,\n *,\n keep_attrs: bool | None = None,\n **kwargs: Any,\n ) -> DataArray:\n return self.reduce(\n duck_array_ops.array_any,\n dim=dim,\n keep_attrs=keep_attrs,\n **kwargs,\n )",
"def any(\n self,\n dim: Dims = None,\n *,\n keep_attrs: bool | None = None,\n **kwargs: Any,\n ) -> Dataset:\n if (\n flox_available\n and OPTIONS[\"use_flox\"]\n and contains_only_chunked_or_numpy(self._obj)\n ):\n return self._flox_reduce(\n func=\"any\",\n dim=dim,\n numeric_only=False,\n # fill_value=fill_value,\n keep_attrs=keep_attrs,\n **kwargs,\n )\n else:\n return self.reduce(\n duck_array_ops.array_any,\n dim=dim,\n numeric_only=False,\n keep_attrs=keep_attrs,\n **kwargs,\n )",
"def any(\n self,\n dim: Dims = None,\n *,\n keep_attrs: bool | None = None,\n **kwargs: Any,\n ) -> Dataset:\n if (\n flox_available\n and OPTIONS[\"use_flox\"]\n and contains_only_chunked_or_numpy(self._obj)\n ):\n return self._flox_reduce(\n func=\"any\",\n dim=dim,\n numeric_only=False,\n # fill_value=fill_value,\n keep_attrs=keep_attrs,\n **kwargs,\n )\n else:\n return self.reduce(\n duck_array_ops.array_any,\n dim=dim,\n numeric_only=False,\n keep_attrs=keep_attrs,\n **kwargs,\n )",
"def any(\n self,\n dim: Dims = None,\n *,\n keep_attrs: bool | None = None,\n **kwargs: Any,\n ) -> Dataset:\n return self.reduce(\n duck_array_ops.array_any,\n dim=dim,\n numeric_only=False,\n keep_attrs=keep_attrs,\n **kwargs,\n )",
"def any(self):\n boolean = True\n if type(self.idxs) == np.ndarray:\n boolean = all(self.idxs.shape)\n elif type(self.idxs) == list:\n sh = np.array(self.idxs).shape\n if len(sh) >= 2:\n boolean = np.all(sh)\n return boolean",
"def all(self, axis=None, keepdims=False, out=None):\n return np.logical_and.reduce(self, out=out, axis=axis, keepdims=keepdims)",
"def any(tensor, axis=None, keepdims=False, **kwargs):\n return tensor.any(axis=axis, keepdims=keepdims, **kwargs)",
"def check_if_reduce_needed(vars_to_modify):\n for var in vars_to_modify:\n if len(var.dimensions) > 2 and var[0,0,:].mask.all() and \\\n var[-1,1,:,:].mask.all():\n return True\n return False",
"def any(self, *args, **kwargs):\n if self.fragmented:\n return (\n np.any(self[self._begin:].view(ndarray), *args, **kwargs) and\n np.any(self[:self._end].view(ndarray), *args, **kwargs)\n )\n else:\n if self._begin < self._end:\n part = self[self._begin:self._end]\n elif self._end == 0:\n part = self[self._begin:]\n\n return (np.any(part.view(ndarray), *args, **kwargs))",
"def matrix_any(condition):\n return np.sum(np.sum(condition)) > 0",
"def isAny(self,test):\n for x in np.nditer(self.t, op_flags=['readonly']):\n if op(x):\n return True\n return False",
"def any(self):\n return self._summarize(lambda c: c.any)",
"def any(self, name, codes):\n if self.is_array(name):\n logics = []\n for s in self.sources(name):\n logics.append({s: has_any(codes)})\n slicer = self.take(union(logics))\n else:\n slicer = self.take({name: has_any(codes)})\n return slicer",
"def is_full_dimensional(self):\n\n return self.affine_dimension() == self.space_dimension()",
"def _reduce(self, values):\n\n if self.dim is None:\n return values\n\n if isinstance(self.dim, str):\n dim = [self.dim]\n else:\n dim = self.dim\n\n if self.reduce_func is None:\n for d in values.dims:\n if d not in dim:\n values = values.isel(**{d: 0})\n return values\n else:\n other_dims = [d for d in values.dims if d not in dim]\n return values.reduce(self.reduce_func, dim=other_dims)",
"def has_all_dims(dp_or_event, dims):\n return dims.items() <= {d.key: d.value for d in dp_or_event.dimensions}.items()",
"def all(\n self,\n dim: Dims = None,\n *,\n keep_attrs: bool | None = None,\n **kwargs: Any,\n ) -> DataArray:\n if (\n flox_available\n and OPTIONS[\"use_flox\"]\n and contains_only_chunked_or_numpy(self._obj)\n ):\n return self._flox_reduce(\n func=\"all\",\n dim=dim,\n # fill_value=fill_value,\n keep_attrs=keep_attrs,\n **kwargs,\n )\n else:\n return self.reduce(\n duck_array_ops.array_all,\n dim=dim,\n keep_attrs=keep_attrs,\n **kwargs,\n )",
"def all(\n self,\n dim: Dims = None,\n *,\n keep_attrs: bool | None = None,\n **kwargs: Any,\n ) -> DataArray:\n if (\n flox_available\n and OPTIONS[\"use_flox\"]\n and contains_only_chunked_or_numpy(self._obj)\n ):\n return self._flox_reduce(\n func=\"all\",\n dim=dim,\n # fill_value=fill_value,\n keep_attrs=keep_attrs,\n **kwargs,\n )\n else:\n return self.reduce(\n duck_array_ops.array_all,\n dim=dim,\n keep_attrs=keep_attrs,\n **kwargs,\n )",
"def has_datapoint_with_all_dims(fake_ingest, dims):\n for datapoint in fake_ingest.datapoints:\n if has_all_dims(datapoint, dims):\n return True\n return False",
"def any(self, values):\n return self.aggregate(values, \"any\")",
"def any(self, key: Callable[[T], bool]=None) -> bool:\n if key is None:\n return any(self.array)\n return any(key(x) for x in self.array)",
"def eo_filter(source):\n nodata_bools = source.apply(lambda array: array == array.nodata).to_array(dim='band')\n\n nothingness = nodata_bools.all(dim='band')\n noncontiguous = nodata_bools.any(dim='band')\n\n return np.uint8(NO_DATA) * nothingness | np.uint8(MASKED_NO_CONTIGUITY) * noncontiguous",
"def all(\n self,\n dim: Dims = None,\n *,\n keep_attrs: bool | None = None,\n **kwargs: Any,\n ) -> DataArray:\n return self.reduce(\n duck_array_ops.array_all,\n dim=dim,\n keep_attrs=keep_attrs,\n **kwargs,\n )",
"def is1d(a):\n return np.sum(asarray(asarray(a).shape) > 1) <= 1",
"def any(self):\n for v in self.sects.values():\n if np.any(v):\n return True\n if self.is_full():\n return False\n else:\n return np.any(self.defval)",
"def all(\n self,\n dim: Dims = None,\n *,\n keep_attrs: bool | None = None,\n **kwargs: Any,\n ) -> Dataset:\n if (\n flox_available\n and OPTIONS[\"use_flox\"]\n and contains_only_chunked_or_numpy(self._obj)\n ):\n return self._flox_reduce(\n func=\"all\",\n dim=dim,\n numeric_only=False,\n # fill_value=fill_value,\n keep_attrs=keep_attrs,\n **kwargs,\n )\n else:\n return self.reduce(\n duck_array_ops.array_all,\n dim=dim,\n numeric_only=False,\n keep_attrs=keep_attrs,\n **kwargs,\n )",
"def all(\n self,\n dim: Dims = None,\n *,\n keep_attrs: bool | None = None,\n **kwargs: Any,\n ) -> Dataset:\n if (\n flox_available\n and OPTIONS[\"use_flox\"]\n and contains_only_chunked_or_numpy(self._obj)\n ):\n return self._flox_reduce(\n func=\"all\",\n dim=dim,\n numeric_only=False,\n # fill_value=fill_value,\n keep_attrs=keep_attrs,\n **kwargs,\n )\n else:\n return self.reduce(\n duck_array_ops.array_all,\n dim=dim,\n numeric_only=False,\n keep_attrs=keep_attrs,\n **kwargs,\n )",
"def reduce_any(\n state: State,\n action: Action,\n next_state: State,\n *,\n terminating_functions: Sequence[TerminatingFunction],\n) -> bool:\n return reduce(\n state,\n action,\n next_state,\n terminating_functions=terminating_functions,\n reduction=any,\n )",
"def obs_with_data(x):\n num_toks = np.sum(x,axis=1)\n has_data = num_toks > 0\n return has_data"
] | [
"0.71060604",
"0.7097509",
"0.69430715",
"0.69430715",
"0.686053",
"0.6258159",
"0.6215559",
"0.61174256",
"0.6110123",
"0.6066965",
"0.60409343",
"0.5994262",
"0.5851336",
"0.56650263",
"0.5628333",
"0.5623232",
"0.5615794",
"0.55963945",
"0.55963945",
"0.5479969",
"0.5478053",
"0.5473746",
"0.5456341",
"0.54368055",
"0.5430094",
"0.54249406",
"0.5421376",
"0.5421376",
"0.5412339",
"0.54108405"
] | 0.7147029 | 0 |
Reduce this DataArray's data by applying ``median`` along some dimension(s). | def median(
self,
dim: Dims = None,
*,
skipna: bool | None = None,
keep_attrs: bool | None = None,
**kwargs: Any,
) -> DataArray:
return self.reduce(
duck_array_ops.median,
dim=dim,
skipna=skipna,
keep_attrs=keep_attrs,
**kwargs,
) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def median_filter(self):\n print \"Median-Filtering...\"\n D = self.D\n x = np.median(np.median(D,axis=1),axis=1)\n for i in xrange(len(x)):\n D[i,:,:] -= x[i]\n self.D = D\n print \"done.\"",
"def median(\n self,\n dim: Dims = None,\n *,\n skipna: bool | None = None,\n keep_attrs: bool | None = None,\n **kwargs: Any,\n ) -> Dataset:\n return self.reduce(\n duck_array_ops.median,\n dim=dim,\n skipna=skipna,\n numeric_only=True,\n keep_attrs=keep_attrs,\n **kwargs,\n )",
"def median(\n self,\n dim: Dims = None,\n *,\n skipna: bool | None = None,\n keep_attrs: bool | None = None,\n **kwargs: Any,\n ) -> Dataset:\n return self.reduce(\n duck_array_ops.median,\n dim=dim,\n skipna=skipna,\n numeric_only=True,\n keep_attrs=keep_attrs,\n **kwargs,\n )",
"def median(\n self,\n dim: Dims = None,\n *,\n skipna: bool | None = None,\n keep_attrs: bool | None = None,\n **kwargs: Any,\n ) -> Dataset:\n return self.reduce(\n duck_array_ops.median,\n dim=dim,\n skipna=skipna,\n numeric_only=True,\n keep_attrs=keep_attrs,\n **kwargs,\n )",
"def median(X,**kwargs):\n \n m = np.median(X,axis=0)\n m = np.array(m).reshape(-1)\n return m",
"def getMedian(self, windowSize=0):\r\n try:\r\n if self._data.size == 0:\r\n raise RuntimeError(\"Filter1D data is empty. Call Filter1D.addDataPoint() to add data prior calling Filter1D.getMedian().\")\r\n if windowSize <= 0:\r\n windowSize = self._maxSize\r\n if windowSize % 2 == 1 and windowSize <= self._maxSize:\r\n return np.median(self._data[-windowSize:])\r\n else:\r\n raise ValueError(\"windowSize must be an odd integer <= maxSize\")\r\n except ValueError:\r\n raise",
"def _median(self, in_arr, takeEvenMean):\n if takeEvenMean:\n return numpy.median(in_arr)\n else:\n return numpy.sort(in_arr, axis=None)[(in_arr.size-1)/2]",
"def Median(data):\n return data.median()",
"def median1D(self):\n # TO DO\n pass",
"def median(self):\n return self._summarize(lambda c: c.median)",
"def median(self):\n # TO DO\n pass",
"def untruncatedMedian(self, x):\n self.raiseAnError(NotImplementedError,'untruncatedMedian not yet implemented for ' + self.type)",
"def mad(array, axis=None, keepdims=False):\n ad = np.abs(array - np.median(array, axis, keepdims=True))\n mad = np.median(ad, axis, keepdims=keepdims)\n return mad",
"def data_filter(input_array, step):\n mod = input_array.shape[0] % step\n rows = input_array.shape[0] // step\n factor = np.arange(rows)\n if mod:\n in_mat = np.reshape(input_array[:-mod], (rows, -1))\n min_array = np.r_[in_mat.min(axis=1), min(input_array[-mod:])]\n max_array = np.r_[in_mat.max(axis=1), max(input_array[-mod:])]\n median = np.median(in_mat, axis=1)\n median_rest = np.median(input_array[-mod:])\n median_array = np.r_[median, median_rest]\n\n # get min, max and average value indices\n min_ind = in_mat.argmin(axis=1)\n min_ind += factor * step\n min_ind = np.append(min_ind, input_array[-mod:].argmin() + rows * step)\n\n max_ind = in_mat.argmax(axis=1)\n max_ind += factor * step\n max_ind = np.append(max_ind, input_array[-mod:].argmax() + rows * step)\n\n median_trans = np.reshape(median, (rows, -1))\n median_ind = abs(in_mat - median_trans).argmin(axis=1)\n median_ind += factor * step\n median_ind = np.append(median_ind, abs(\n input_array[-mod:] - median_rest).argmin() + rows * step)\n\n else:\n in_mat = np.reshape(input_array, (input_array.shape[0] // step, -1))\n min_array = in_mat.min(axis=1)\n max_array = in_mat.max(axis=1)\n median_array = np.median(in_mat, axis=1)\n\n # get min, max and average value indices\n min_ind = in_mat.argmin(axis=1)\n min_ind += factor * step\n\n max_ind = in_mat.argmax(axis=1)\n max_ind += factor * step\n\n median_trans = np.reshape(median_array, (rows, -1))\n median_ind = abs(in_mat - median_trans).argmin(axis=1)\n median_ind += factor * step\n\n return min_array, median_array, max_array, min_ind, median_ind, max_ind",
"def median(self):\n return self._lift(\"median\")",
"def median(a, axis=0, out=None, overwrite_input=False):\n if overwrite_input:\n if axis is None:\n sorted = a.ravel()\n sorted.sort()\n else:\n a.sort(axis=axis)\n sorted = a\n else:\n sorted = sort(a, axis=axis)\n if axis is None:\n axis = 0\n indexer = [slice(None)] * sorted.ndim\n index = int(sorted.shape[axis]/2)\n if sorted.shape[axis] % 2 == 1:\n # index with slice to allow mean (below) to work\n indexer[axis] = slice(index, index+1)\n else:\n indexer[axis] = slice(index-1, index+1)\n # Use mean in odd and even case to coerce data type\n # and check, use out array.\n return mean(sorted[indexer], axis=axis, out=out)",
"def nanmedian(arr, **kwargs):\r\n return ma.median( ma.masked_where(arr!=arr, arr), **kwargs )",
"def scipy_nanmedian(x, axis=0):\n x, axis = _chk_asarray(x, axis)\n if x.ndim == 0:\n return float(x.item())\n shape = list(x.shape)\n shape.pop(axis)\n if 0 in shape:\n x = np.empty(shape)\n else:\n x = x.copy()\n x = np.apply_along_axis(_nanmedian, axis, x)\n if x.ndim == 0:\n x = float(x.item())\n return x",
"def apply_1d_median_filter(n, timage):\n image_shape = timage.shape\n ovrlay = int(n / 2)\n res_matrix = np.copy(timage)\n for i in np.arange(image_shape[0])[1:-1]:\n local_matrix = timage[i - ovrlay:i + ovrlay + 1] \n median = np.median(local_matrix)\n res_matrix[i] = median\n return res_matrix",
"def median(self) -> \"Stream[float]\":\n return self.agg(np.median).astype(\"float\")",
"def samples_median(samples):\n return [np.median(s) for s in samples.T]",
"def samples_median(samples):\n return [np.median(s) for s in samples.T]",
"def median(self):\n self.data.sort()\n\n if len(self.data) % 2 == 1:\n median = self.data[int(self.size/2)]\n else:\n median = (self.data[int(self.size/2 - 1)] + \n self.data[int(self.size/2)]) / 2\n return median",
"def median_f(self, x):\n # TODO: the axis used in nanmean is different for U and Uf\n # calcs - change Uf dims to make consistent?\n return stats.nanmedian(x, axis=1)",
"def untruncatedMedian(self):\n self.raiseAnError(NotImplementedError,'untruncatedMedian not yet implemented for ' + self.type)",
"def untruncatedMedian(self):\n self.raiseAnError(NotImplementedError,'untruncatedMedian not yet implemented for ' + self.type)",
"def median(self, name, **kwargs):\n data = self.get(name,**kwargs)\n return np.percentile(data,[50])",
"def test_median(self):\r\n m = array([[1, 2, 3], [4, 5, 6], [7, 8, 9], [10, 11, 12]])\r\n expected = 6.5\r\n observed = median(m, axis=None)\r\n self.assertEqual(observed, expected)\r\n\r\n expected = array([5.5, 6.5, 7.5])\r\n observed = median(m, axis=0)\r\n self.assertEqual(observed, expected)\r\n\r\n expected = array([2.0, 5.0, 8.0, 11.0])\r\n observed = median(m, axis=1)\r\n self.assertEqual(observed, expected)\r\n\r\n self.assertRaises(ValueError, median, m, 10)",
"def _nanmedian(arr1d): # This only works on 1d arrays\n cond = 1-np.isnan(arr1d)\n x = np.sort(np.compress(cond,arr1d,axis=-1))\n if x.size == 0:\n return np.nan\n return np.median(x)",
"def nanmedian(x, axis=0, preop=None):\n x = np.asarray(x)\n if axis is None:\n x = x.ravel()\n axis = 0\n if x.ndim == 0:\n return float(x.item())\n if preop is None and hasattr(np, 'nanmedian'):\n return np.nanmedian(x, axis)\n x = np.apply_along_axis(_nanmedian, axis, x, preop)\n if x.ndim == 0:\n x = float(x.item())\n return x"
] | [
"0.7840716",
"0.71675795",
"0.71675795",
"0.71675795",
"0.69720316",
"0.6909001",
"0.6809316",
"0.67637587",
"0.6649234",
"0.65772283",
"0.649336",
"0.6491507",
"0.64689547",
"0.64432216",
"0.6414413",
"0.6403336",
"0.6348854",
"0.6339596",
"0.6326826",
"0.6324761",
"0.6321141",
"0.6321141",
"0.6319095",
"0.6315058",
"0.63099444",
"0.63099444",
"0.6305441",
"0.62466276",
"0.62172055",
"0.6200517"
] | 0.7371451 | 1 |
Reduce this DataArray's data by applying ``cumsum`` along some dimension(s). | def cumsum(
self,
dim: Dims = None,
*,
skipna: bool | None = None,
keep_attrs: bool | None = None,
**kwargs: Any,
) -> DataArray:
return self.reduce(
duck_array_ops.cumsum,
dim=dim,
skipna=skipna,
keep_attrs=keep_attrs,
**kwargs,
) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def cumsum(\n self,\n dim: Dims = None,\n *,\n skipna: bool | None = None,\n keep_attrs: bool | None = None,\n **kwargs: Any,\n ) -> Dataset:\n return self.reduce(\n duck_array_ops.cumsum,\n dim=dim,\n skipna=skipna,\n numeric_only=True,\n keep_attrs=keep_attrs,\n **kwargs,\n )",
"def cumsum(\n self,\n dim: Dims = None,\n *,\n skipna: bool | None = None,\n keep_attrs: bool | None = None,\n **kwargs: Any,\n ) -> Dataset:\n return self.reduce(\n duck_array_ops.cumsum,\n dim=dim,\n skipna=skipna,\n numeric_only=True,\n keep_attrs=keep_attrs,\n **kwargs,\n )",
"def cumsum(\n self,\n dim: Dims = None,\n *,\n skipna: bool | None = None,\n keep_attrs: bool | None = None,\n **kwargs: Any,\n ) -> Dataset:\n return self.reduce(\n duck_array_ops.cumsum,\n dim=dim,\n skipna=skipna,\n numeric_only=True,\n keep_attrs=keep_attrs,\n **kwargs,\n )",
"def multidim_cumsum(a):\n out = a.cumsum(-1)\n for i in range(2, a.ndim+1):\n np.cumsum(out, axis=-i, out=out)\n return out",
"def _cumsum(self) -> np.ndarray:\n\n if not hasattr(self, \"__cumsum\"):\n self.__cumsum = np.insert(np.cumsum(self.sizes), 0, 0)\n return self.__cumsum",
"def cumsum(x, axis=None):\r\n return CumsumOp(axis=axis)(x)",
"def cumsum(tensor, axis=None):\n raise NotImplementedError",
"def _cumsum_einsum(x, precision=jax.lax.Precision.DEFAULT):\n mask = jnp.triu(jnp.ones(x.shape, dtype=jnp.bool_))\n return jnp.einsum(\"ij,jk\", x, mask, precision=precision)",
"def cumsum(self):\n return self._lift(lambda c: c.cumsum)",
"def exclusive_cumsum(x):\n return torch.cumsum(torch.cat([x.new_zeros(x.size(0), x.size(1), x.size(2), 1), x[:, :, :, :-1]], dim=-1), dim=-1)",
"def cumsumr(x, axis=0):\n cums = x.cumsum(axis=axis)\n return cums - np.maximum.accumulate(cums*(x == 0), axis=axis)",
"def cumsum(module, y, dimension):\n _import_modules()\n if module in [np, ma, torch, jnp]:\n return module.cumsum(y, axis=dimension)\n elif module == tf:\n return tf.math.cumsum(y, dimension)",
"def _strict_1d_cumsum(tensor, len_tensor):\n # Assumes tensor shape is fully defined.\n with ops.name_scope('strict_1d_cumsum', values=[tensor]):\n if len_tensor == 0:\n return constant_op.constant([])\n len_pad = len_tensor - 1\n x = array_ops.pad(tensor, [[len_pad, 0]])\n h = array_ops.ones_like(x)\n return _strict_conv1d(x, h)[:len_tensor]",
"def local_sum(a,tshape, padval):\n\n # zero-padding\n a = ndpad(a,tshape, padval)\n\n # difference between shifted copies of an array along a given dimension\n def shiftdiff(a,tshape,shiftdim):\n ind1 = [slice(None,None),]*a.ndim\n ind2 = [slice(None,None),]*a.ndim\n ind1[shiftdim] = slice(tshape[shiftdim],a.shape[shiftdim]-1)\n ind2[shiftdim] = slice(0,a.shape[shiftdim]-tshape[shiftdim]-1)\n return a[ind1] - a[ind2]\n\n # take the cumsum along each dimension and subtracting a shifted version\n # from itself. this reduces the number of computations to 2*N additions\n # and 2*N subtractions for an N-dimensional array, independent of its\n # size.\n #\n # See:\n # <http://www.idiom.com/~zilla/Papers/nvisionInterface/nip.html>\n for dd in xrange(a.ndim):\n a = np.cumsum(a,dd)\n a = shiftdiff(a,tshape,dd)\n return a",
"def cumprod(\n self,\n dim: Dims = None,\n *,\n skipna: bool | None = None,\n keep_attrs: bool | None = None,\n **kwargs: Any,\n ) -> DataArray:\n return self.reduce(\n duck_array_ops.cumprod,\n dim=dim,\n skipna=skipna,\n keep_attrs=keep_attrs,\n **kwargs,\n )",
"def cumprod(\n self,\n dim: Dims = None,\n *,\n skipna: bool | None = None,\n keep_attrs: bool | None = None,\n **kwargs: Any,\n ) -> DataArray:\n return self.reduce(\n duck_array_ops.cumprod,\n dim=dim,\n skipna=skipna,\n keep_attrs=keep_attrs,\n **kwargs,\n )",
"def cumprod(\n self,\n dim: Dims = None,\n *,\n skipna: bool | None = None,\n keep_attrs: bool | None = None,\n **kwargs: Any,\n ) -> DataArray:\n return self.reduce(\n duck_array_ops.cumprod,\n dim=dim,\n skipna=skipna,\n keep_attrs=keep_attrs,\n **kwargs,\n )",
"def sum(\n self,\n dim: Dims = None,\n *,\n skipna: bool | None = None,\n min_count: int | None = None,\n keep_attrs: bool | None = None,\n **kwargs: Any,\n ) -> DataArray:\n return self.reduce(\n duck_array_ops.sum,\n dim=dim,\n skipna=skipna,\n min_count=min_count,\n keep_attrs=keep_attrs,\n **kwargs,\n )",
"def acumsum (a,dimension=None):\r\n if dimension == None:\r\n a = N.ravel(a)\r\n dimension = 0\r\n if type(dimension) in [ListType, TupleType, N.ndarray]:\r\n dimension = list(dimension)\r\n dimension.sort()\r\n dimension.reverse()\r\n for d in dimension:\r\n a = N.add.accumulate(a,d)\r\n return a\r\n else:\r\n return N.add.accumulate(a,dimension)",
"def cumulative_sum(array):\n res = []\n val = 0\n for elem in array:\n val += elem\n res.append(val)\n return res",
"def cumsum_nb(a):\n out = np.empty_like(a, dtype=np.float_)\n for col in range(a.shape[1]):\n out[:, col] = cumsum_1d_nb(a[:, col])\n return out",
"def get_ragged_sum(\n arr,\n lens,\n axis = -1,\n device=torch.device('cpu'),\n):\n # add zero as first dimension so that when there are zero non-nan values,\n # it selects zero as the value\n zeros_shape = list(arr.shape)\n zeros_shape[axis] = 1\n zero = torch.zeros(zeros_shape).to(device)\n arr = torch.cat([zero, arr], dim=axis)\n arr = torch.cumsum(arr, axis)\n\n sums = torch.gather(arr, axis, lens)\n\n mask = (lens > 0).float()\n sums = sums * mask\n arr = sums.squeeze(axis)\n return arr",
"def _reduce(self, values):\n\n if self.dim is None:\n return values\n\n if isinstance(self.dim, str):\n dim = [self.dim]\n else:\n dim = self.dim\n\n if self.reduce_func is None:\n for d in values.dims:\n if d not in dim:\n values = values.isel(**{d: 0})\n return values\n else:\n other_dims = [d for d in values.dims if d not in dim]\n return values.reduce(self.reduce_func, dim=other_dims)",
"def normalize_cumsum(self, x):\n\n x = self.normalize_global(x)\n\n if self.right_context is None and self.left_context is None:\n return x\n\n if self.left_context is None:\n left_context = x.shape[0]\n else:\n left_context = self.left_context\n\n if self.right_context is None:\n right_context = x.shape[0]\n else:\n right_context = self.right_context\n\n total_context = left_context + right_context + 1\n\n if x.shape[0] <= min(right_context, left_context)+1:\n # if context is larger than the signal we still return global normalization\n return x\n\n c_x = np.zeros((x.shape[0]+total_context, x.shape[1],), dtype=float_cpu())\n counts = np.zeros((x.shape[0]+total_context, 1,), dtype=float_cpu())\n \n c_x[left_context+1:left_context+x.shape[0]+1] = np.cumsum(x, axis=0)\n c_x[left_context+x.shape[0]+1:] = c_x[left_context+x.shape[0]]\n counts[left_context+1:left_context+x.shape[0]+1] = np.arange(1, x.shape[0]+1, dtype=float_cpu())[:,None]\n counts[left_context+x.shape[0]+1:] = x.shape[0]\n\n if self.norm_var:\n c2_x = np.zeros((x.shape[0]+total_context, x.shape[1],), dtype=float_cpu())\n c2_x[left_context+1:left_context+x.shape[0]+1] = np.cumsum(x*x, axis=0)\n c2_x[left_context+x.shape[0]+1:] = c2_x[left_context+x.shape[0]]\n\n counts = counts[total_context:] - counts[:-total_context]\n m_x = (c_x[total_context:] - c_x[:-total_context])/counts\n\n if self.norm_mean:\n x -= m_x\n\n if self.norm_var:\n m2_x = (c2_x[total_context:] - c2_x[:-total_context])/counts\n s2_x=m2_x - m_x**2\n s2_x[s2_x<1e-5]=1e-5\n s_x = np.sqrt(s2_x)\n x /= s_x\n\n return x",
"def cumprod(\n self,\n dim: Dims = None,\n *,\n skipna: bool | None = None,\n keep_attrs: bool | None = None,\n **kwargs: Any,\n ) -> Dataset:\n return self.reduce(\n duck_array_ops.cumprod,\n dim=dim,\n skipna=skipna,\n numeric_only=True,\n keep_attrs=keep_attrs,\n **kwargs,\n )",
"def cumprod(\n self,\n dim: Dims = None,\n *,\n skipna: bool | None = None,\n keep_attrs: bool | None = None,\n **kwargs: Any,\n ) -> Dataset:\n return self.reduce(\n duck_array_ops.cumprod,\n dim=dim,\n skipna=skipna,\n numeric_only=True,\n keep_attrs=keep_attrs,\n **kwargs,\n )",
"def cumprod(\n self,\n dim: Dims = None,\n *,\n skipna: bool | None = None,\n keep_attrs: bool | None = None,\n **kwargs: Any,\n ) -> Dataset:\n return self.reduce(\n duck_array_ops.cumprod,\n dim=dim,\n skipna=skipna,\n numeric_only=True,\n keep_attrs=keep_attrs,\n **kwargs,\n )",
"def cumsum_1d_nb(a):\n out = np.empty_like(a, dtype=np.float_)\n cumsum = 0\n for i in range(a.shape[0]):\n if ~np.isnan(a[i]):\n cumsum += a[i]\n out[i] = cumsum\n else:\n out[i] = np.nan\n return out",
"def _gu_sum(a, **kwds):\n return np.sum(np.ascontiguousarray(a), axis=-1, **kwds)",
"def sum(\n self,\n dim: Dims = None,\n *,\n skipna: bool | None = None,\n min_count: int | None = None,\n keep_attrs: bool | None = None,\n **kwargs: Any,\n ) -> Dataset:\n return self.reduce(\n duck_array_ops.sum,\n dim=dim,\n skipna=skipna,\n min_count=min_count,\n numeric_only=True,\n keep_attrs=keep_attrs,\n **kwargs,\n )"
] | [
"0.7601719",
"0.7601719",
"0.7601719",
"0.71929914",
"0.70374393",
"0.6974349",
"0.6739214",
"0.6690532",
"0.6560123",
"0.64532024",
"0.6444809",
"0.6434722",
"0.6355989",
"0.6352609",
"0.6270605",
"0.6270605",
"0.6270605",
"0.62120354",
"0.6194247",
"0.6184793",
"0.6179344",
"0.6163564",
"0.6073962",
"0.60379475",
"0.6033755",
"0.6033755",
"0.6033755",
"0.6013043",
"0.6004323",
"0.59458274"
] | 0.7733504 | 0 |
Reduce this DataArray's data by applying ``cumprod`` along some dimension(s). | def cumprod(
self,
dim: Dims = None,
*,
skipna: bool | None = None,
keep_attrs: bool | None = None,
**kwargs: Any,
) -> DataArray:
return self.reduce(
duck_array_ops.cumprod,
dim=dim,
skipna=skipna,
keep_attrs=keep_attrs,
**kwargs,
) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def cumprod(\n self,\n dim: Dims = None,\n *,\n skipna: bool | None = None,\n keep_attrs: bool | None = None,\n **kwargs: Any,\n ) -> Dataset:\n return self.reduce(\n duck_array_ops.cumprod,\n dim=dim,\n skipna=skipna,\n numeric_only=True,\n keep_attrs=keep_attrs,\n **kwargs,\n )",
"def cumprod(\n self,\n dim: Dims = None,\n *,\n skipna: bool | None = None,\n keep_attrs: bool | None = None,\n **kwargs: Any,\n ) -> Dataset:\n return self.reduce(\n duck_array_ops.cumprod,\n dim=dim,\n skipna=skipna,\n numeric_only=True,\n keep_attrs=keep_attrs,\n **kwargs,\n )",
"def cumprod(\n self,\n dim: Dims = None,\n *,\n skipna: bool | None = None,\n keep_attrs: bool | None = None,\n **kwargs: Any,\n ) -> Dataset:\n return self.reduce(\n duck_array_ops.cumprod,\n dim=dim,\n skipna=skipna,\n numeric_only=True,\n keep_attrs=keep_attrs,\n **kwargs,\n )",
"def cumprod(a, axis=None, dtype=None, out=None):\n a = astensor(a)\n if dtype is None:\n dtype = np.empty((1,), dtype=a.dtype).cumprod().dtype\n op = TensorCumprod(axis=axis, dtype=dtype)\n return op(a, out=out)",
"def cumprod(x, axis=None):\r\n return CumprodOp(axis=axis)(x)",
"def cumprod(self):\n return self._lift(lambda c: c.cumprod)",
"def cumprod(x, dim=-1, exclusive=False):\n if exclusive:\n length = x.size(dim)\n x = torch.narrow(F.pad(x, pad=(1, 0, 0, 0), value=1.0), dim, 0, length)\n return torch.cumprod(x, dim=dim)",
"def cumprod_nb(a):\n out = np.empty_like(a, dtype=np.float_)\n for col in range(a.shape[1]):\n out[:, col] = cumprod_1d_nb(a[:, col])\n return out",
"def exclusive_cumprod(x):\n return torch.cumprod(torch.cat([x.new_ones(x.size(0), x.size(1), x.size(2), 1), x[:, :, :, :-1]], dim=-1), dim=-1)",
"def cumprod_1d_nb(a):\n out = np.empty_like(a, dtype=np.float_)\n cumprod = 1\n for i in range(a.shape[0]):\n if ~np.isnan(a[i]):\n cumprod *= a[i]\n out[i] = cumprod\n else:\n out[i] = np.nan\n return out",
"def exclusive_cumprod(x):\n batch_size, sequence_length = x.size()\n if torch.cuda.is_available():\n one_x = torch.cat([torch.ones(batch_size, 1).cuda(), x], dim=1)[:, :-1]\n else:\n one_x = torch.cat([torch.ones(batch_size, 1), x], dim=1)[:, :-1]\n return torch.cumprod(one_x, dim=1)",
"def _reduce(self, values):\n\n if self.dim is None:\n return values\n\n if isinstance(self.dim, str):\n dim = [self.dim]\n else:\n dim = self.dim\n\n if self.reduce_func is None:\n for d in values.dims:\n if d not in dim:\n values = values.isel(**{d: 0})\n return values\n else:\n other_dims = [d for d in values.dims if d not in dim]\n return values.reduce(self.reduce_func, dim=other_dims)",
"def cumprod(self: FrameLike, skipna: bool = True) -> FrameLike:\n return self._apply_series_op(lambda psser: psser._cumprod(skipna), should_resolve=True)",
"def prod(self, axis=None, keepdims=False, dtype=None, out=None):\n return np.multiply.reduce(\n self, out=out, axis=axis, keepdims=keepdims, dtype=dtype\n )",
"def prod(input, axis=None, dtype=None, keepdims=False, acc_dtype=None,\r\n no_zeros_in_input=False):\r\n\r\n out = elemwise.Prod(axis, dtype=dtype, acc_dtype=acc_dtype,\r\n no_zeros_in_input=no_zeros_in_input)(input)\r\n\r\n if keepdims:\r\n out = makeKeepDims(input, out, axis)\r\n return out",
"def prod(\n self,\n dim: Dims = None,\n *,\n skipna: bool | None = None,\n min_count: int | None = None,\n keep_attrs: bool | None = None,\n **kwargs: Any,\n ) -> DataArray:\n return self.reduce(\n duck_array_ops.prod,\n dim=dim,\n skipna=skipna,\n min_count=min_count,\n keep_attrs=keep_attrs,\n **kwargs,\n )",
"def multidim_cumsum(a):\n out = a.cumsum(-1)\n for i in range(2, a.ndim+1):\n np.cumsum(out, axis=-i, out=out)\n return out",
"def cumsum(\n self,\n dim: Dims = None,\n *,\n skipna: bool | None = None,\n keep_attrs: bool | None = None,\n **kwargs: Any,\n ) -> Dataset:\n return self.reduce(\n duck_array_ops.cumsum,\n dim=dim,\n skipna=skipna,\n numeric_only=True,\n keep_attrs=keep_attrs,\n **kwargs,\n )",
"def cumsum(\n self,\n dim: Dims = None,\n *,\n skipna: bool | None = None,\n keep_attrs: bool | None = None,\n **kwargs: Any,\n ) -> Dataset:\n return self.reduce(\n duck_array_ops.cumsum,\n dim=dim,\n skipna=skipna,\n numeric_only=True,\n keep_attrs=keep_attrs,\n **kwargs,\n )",
"def cumsum(\n self,\n dim: Dims = None,\n *,\n skipna: bool | None = None,\n keep_attrs: bool | None = None,\n **kwargs: Any,\n ) -> Dataset:\n return self.reduce(\n duck_array_ops.cumsum,\n dim=dim,\n skipna=skipna,\n numeric_only=True,\n keep_attrs=keep_attrs,\n **kwargs,\n )",
"def prod(\n self,\n dim: Dims = None,\n *,\n skipna: bool | None = None,\n min_count: int | None = None,\n keep_attrs: bool | None = None,\n **kwargs: Any,\n ) -> DataArray:\n if (\n flox_available\n and OPTIONS[\"use_flox\"]\n and contains_only_chunked_or_numpy(self._obj)\n ):\n return self._flox_reduce(\n func=\"prod\",\n dim=dim,\n skipna=skipna,\n min_count=min_count,\n # fill_value=fill_value,\n keep_attrs=keep_attrs,\n **kwargs,\n )\n else:\n return self.reduce(\n duck_array_ops.prod,\n dim=dim,\n skipna=skipna,\n min_count=min_count,\n keep_attrs=keep_attrs,\n **kwargs,\n )",
"def prod(\n self,\n dim: Dims = None,\n *,\n skipna: bool | None = None,\n min_count: int | None = None,\n keep_attrs: bool | None = None,\n **kwargs: Any,\n ) -> DataArray:\n if (\n flox_available\n and OPTIONS[\"use_flox\"]\n and contains_only_chunked_or_numpy(self._obj)\n ):\n return self._flox_reduce(\n func=\"prod\",\n dim=dim,\n skipna=skipna,\n min_count=min_count,\n # fill_value=fill_value,\n keep_attrs=keep_attrs,\n **kwargs,\n )\n else:\n return self.reduce(\n duck_array_ops.prod,\n dim=dim,\n skipna=skipna,\n min_count=min_count,\n keep_attrs=keep_attrs,\n **kwargs,\n )",
"def cumsum(\n self,\n dim: Dims = None,\n *,\n skipna: bool | None = None,\n keep_attrs: bool | None = None,\n **kwargs: Any,\n ) -> DataArray:\n return self.reduce(\n duck_array_ops.cumsum,\n dim=dim,\n skipna=skipna,\n keep_attrs=keep_attrs,\n **kwargs,\n )",
"def cumsum(\n self,\n dim: Dims = None,\n *,\n skipna: bool | None = None,\n keep_attrs: bool | None = None,\n **kwargs: Any,\n ) -> DataArray:\n return self.reduce(\n duck_array_ops.cumsum,\n dim=dim,\n skipna=skipna,\n keep_attrs=keep_attrs,\n **kwargs,\n )",
"def cumsum(\n self,\n dim: Dims = None,\n *,\n skipna: bool | None = None,\n keep_attrs: bool | None = None,\n **kwargs: Any,\n ) -> DataArray:\n return self.reduce(\n duck_array_ops.cumsum,\n dim=dim,\n skipna=skipna,\n keep_attrs=keep_attrs,\n **kwargs,\n )",
"def prod(\n self,\n dim: Dims = None,\n *,\n skipna: bool | None = None,\n min_count: int | None = None,\n keep_attrs: bool | None = None,\n **kwargs: Any,\n ) -> Dataset:\n return self.reduce(\n duck_array_ops.prod,\n dim=dim,\n skipna=skipna,\n min_count=min_count,\n numeric_only=True,\n keep_attrs=keep_attrs,\n **kwargs,\n )",
"def reduce_by_multiplication(data):\n total = 1\n for num in data:\n total *= num\n return total",
"def relay_array_reduce(c, fn, array, shape):\n assert fn.is_constant(Primitive)\n assert shape.is_constant(tuple)\n fn = fn.value\n tshp = shape.value\n ary = c.ref(array)\n if fn == P.scalar_add:\n ashp = ashape(array)\n if len(tshp) < len(ashp):\n ts = (1,) * (len(ashp) - len(tshp)) + tshp\n else:\n ts = tshp\n axis = tuple(i for i, t in enumerate(ts) if t == 1)\n res = relay.op.sum(ary, axis=axis, keepdims=True)\n if len(tshp) < len(ashp):\n rtshp = tshp\n if tshp == ():\n tshp = (1,)\n res = relay.op.reshape(res, newshape=tshp)\n if rtshp == ():\n res = relay.op.take(res, relay.const(0))\n return res\n elif fn == P.scalar_mul:\n ashp = ashape(array)\n if len(tshp) in (0, len(ashp)):\n res = relay.op.prod(ary)\n else:\n raise NotImplementedError(\n 'We currently support only full product on an array.')\n return res\n else:\n raise NotImplementedError(f\"reduce with {fn}\")",
"def prod(\n self,\n dim: Dims = None,\n *,\n skipna: bool | None = None,\n min_count: int | None = None,\n keep_attrs: bool | None = None,\n **kwargs: Any,\n ) -> Dataset:\n if (\n flox_available\n and OPTIONS[\"use_flox\"]\n and contains_only_chunked_or_numpy(self._obj)\n ):\n return self._flox_reduce(\n func=\"prod\",\n dim=dim,\n skipna=skipna,\n min_count=min_count,\n numeric_only=True,\n # fill_value=fill_value,\n keep_attrs=keep_attrs,\n **kwargs,\n )\n else:\n return self.reduce(\n duck_array_ops.prod,\n dim=dim,\n skipna=skipna,\n min_count=min_count,\n numeric_only=True,\n keep_attrs=keep_attrs,\n **kwargs,\n )",
"def prod(\n self,\n dim: Dims = None,\n *,\n skipna: bool | None = None,\n min_count: int | None = None,\n keep_attrs: bool | None = None,\n **kwargs: Any,\n ) -> Dataset:\n if (\n flox_available\n and OPTIONS[\"use_flox\"]\n and contains_only_chunked_or_numpy(self._obj)\n ):\n return self._flox_reduce(\n func=\"prod\",\n dim=dim,\n skipna=skipna,\n min_count=min_count,\n numeric_only=True,\n # fill_value=fill_value,\n keep_attrs=keep_attrs,\n **kwargs,\n )\n else:\n return self.reduce(\n duck_array_ops.prod,\n dim=dim,\n skipna=skipna,\n min_count=min_count,\n numeric_only=True,\n keep_attrs=keep_attrs,\n **kwargs,\n )"
] | [
"0.78693175",
"0.78693175",
"0.78693175",
"0.74438745",
"0.73570323",
"0.72512066",
"0.71616554",
"0.6794507",
"0.66913575",
"0.6365985",
"0.6155289",
"0.60739994",
"0.6059755",
"0.60538614",
"0.602666",
"0.59202105",
"0.57904106",
"0.57707596",
"0.57707596",
"0.57707596",
"0.5762588",
"0.5762588",
"0.5749816",
"0.5749816",
"0.5749816",
"0.5738581",
"0.5718635",
"0.5644082",
"0.55872554",
"0.55872554"
] | 0.8048191 | 0 |
Menaikan jabatan role xp member ke role xp selanjutnya(admin only). | async def promote(self, ctx, *, member = None):
# Only allow admins to change server stats
if not await self._can_run(ctx): return
em = discord.Embed(color = 0XFF8C00, description = "Menaikan jabatan role xp member ke role xp selanjutnya\n\n"
"**Panduan**\n"
"*`{}promote [member]`*"
.format(ctx.prefix))
em.set_footer(text = "Saat mengetik command, tanda [] tidak usah digunakan.\n{}".format(ctx.author),
icon_url = f"{ctx.author.avatar_url}")
if member == None:
return await ctx.send(embed=em)
memberName = member
member = DisplayName.memberForName(memberName, ctx.guild)
if not member:
msg = Utils.suppressed(ctx, '┐( ̄ヘ ̄;)┌\nAku tidak dapat menemukan *{}*...'.format(memberName))
em = discord.Embed(color = 0XFF8C00, description = msg)
em.set_footer(text = "{}".format(ctx.author), icon_url = "{}".format(ctx.author.avatar_url))
return await ctx.send(embed = em)
# Get user's xp
xp = int(self.settings.getUserStat(member, ctx.guild, "XP"))
# Get the role list
promoArray = self.getSortedRoles(ctx.guild)
currentRole = self.getCurrentRoleIndex(member, ctx.guild)
nextRole = currentRole + 1
neededXp = 0
if nextRole >= len(promoArray):
msg = '┐( ̄ヘ ̄;)┌\nTidak ada role yang lebih tinggi untuk promote *{}*.'.format(DisplayName.name(member))
else:
newRole = DisplayName.roleForID(promoArray[nextRole]['ID'], ctx.guild)
neededXp = int(promoArray[nextRole]['XP'])-xp
self.settings.incrementStat(member, ctx.guild, "XP", neededXp)
# Start at the bottom role and add all roles up to newRole
addRoles = []
for i in range(0, nextRole+1):
addRole = DisplayName.roleForID(promoArray[i]['ID'], ctx.guild)
if addRole:
if not addRole in member.roles:
addRoles.append(addRole)
# await member.add_roles(*addRoles)
# Use role manager instead
self.settings.role.add_roles(member, addRoles)
if not newRole:
# Promotion role doesn't exist
msg = '┐( ̄ヘ ̄;)┌\nSepertinya role **{}** tidak ada dalam server.\n*{}* tetap diberikan sejumlah *{:,} xp*, tapi aku tidak dapat promote ke role yang tidak tercantum dalam list.\nPertimbangkan lagi untuk merevisi role xp.'.format(promoArray[nextRole]['Name'], DisplayName.name(member), neededXp)
else:
msg = '*{}* telah memberikan sejumlah *{:,} xp* dan menaikan ke role **{}**!'.format(DisplayName.name(member), neededXp, newRole.name)
self.bot.dispatch("xp", member, ctx.author, neededXp)
msgDone = Utils.suppressed(ctx,msg)
em = discord.Embed(color = 0XFF8C00, description = msgDone)
em.set_footer(text = "{}".format(ctx.author), icon_url = "{}".format(ctx.author.avatar_url))
await ctx.send(embed = em) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"async def promoteto(self, ctx, *, member = None, role = None):\r\n if not await self._can_run(ctx): return\r\n em = discord.Embed(color = 0XFF8C00, description = \"Menaikan role xp member ke role yang ditentukan\\n\"\r\n \"Pastikan role xp sudah terdaftar dalam list\\n\\n\"\r\n \"**Panduan**\\n\"\r\n \"*`{}promoteto [member] [role]`*\"\r\n .format(ctx.prefix))\r\n em.set_footer(text = \"Saat mengetik command, tanda [] tidak usah digunakan.\\n{}\".format(ctx.author),\r\n icon_url = \"{}\".format(ctx.author.avatar_url))\r\n\r\n if member == None:\r\n return await ctx.send(embed=em)\r\n\r\n if role == None:\r\n # Either a role wasn't set - or it's the last section\r\n if type(member) is str:\r\n # It' a string - the hope continues\r\n # Let's search for a name at the beginning - and a role at the end\r\n parts = member.split()\r\n memFromName = None\r\n for j in range(len(parts)):\r\n # Reverse search direction\r\n i = len(parts)-1-j\r\n # Name = 0 up to i joined by space\r\n nameStr = ' '.join(parts[0:i+1])\r\n # Role = end of name -> end of parts joined by space\r\n roleStr = ' '.join(parts[i+1:])\r\n memFromName = DisplayName.memberForName(nameStr, ctx.guild)\r\n if memFromName:\r\n # We got a member - let's check for a role\r\n roleFromName = DisplayName.roleForName(roleStr, ctx.guild)\r\n \r\n if not roleFromName == None:\r\n # We got a member and a role - break\r\n role = roleFromName\r\n break\r\n if memFromName == None:\r\n # Never found a member at all\r\n msg = '┐( ̄ヘ ̄;)┌\\nAku tidak dapat menemukan *{}* dalam server.'.format(member)\r\n msgDone = Utils.suppressed(ctx,msg)\r\n em = discord.Embed(color = 0XFF8C00, description = msgDone)\r\n em.set_footer(text = \"{}\".format(ctx.author), icon_url = \"{}\".format(ctx.author.avatar_url))\r\n return await ctx.send(embed = em)\r\n if roleFromName == None:\r\n # We couldn't find one or the other\r\n return await ctx.send(embed = em)\r\n\r\n member = memFromName\r\n\r\n # Get user's xp\r\n xp = int(self.settings.getUserStat(member, ctx.guild, \"XP\"))\r\n\r\n # Get the role list\r\n promoArray = self.getSortedRoles(ctx.guild)\r\n nextRole = self.getIndexForRole(role, ctx.guild)\r\n currentRole = self.getCurrentRoleIndex(member, ctx.guild)\r\n vowels = 'aeiou'\r\n\r\n if nextRole == None:\r\n em = discord.Embed(color = 0XFF8C00, description = \"> ┐( ̄ヘ ̄;)┌\\n\"\r\n \"> Role **{}** tidak terdaftar dalam list role xp.\\n> \\n\"\r\n \"> Kamu dapat menambahkan role xp dengan cara:\\n\"\r\n \"> `{}addxprole [role] [jumlah xp]`\"\r\n .format(role.name,\r\n ctx.prefix))\r\n em.set_author(name = \"Role xp tidak terdaftar\", icon_url = \"https://cdn.discordapp.com/attachments/518118753226063887/725569194304733435/photo.jpg\")\r\n em.set_footer(name = \"Saat mengetik command, tanda [] tidak usah digunakan.\\nHelp command color\", text = f\"Request By : {ctx.author.name}\", icon_url = f\"{ctx.author.avatar_url}\")\r\n return await ctx.send(embed=em)\r\n \r\n if currentRole == nextRole:\r\n # We are already the target role\r\n if role.name[:1].lower() in vowels:\r\n msg = '*{}* sudah memiliki role **{}**.'.format(DisplayName.name(member), role.name)\r\n else:\r\n msg = '*{}* sudah memiliki role **{}**.'.format(DisplayName.name(member), role.name)\r\n msgDone = Utils.suppressed(ctx,msg)\r\n em = discord.Embed(color = 0XFF8C00, description = msgDone)\r\n em.set_footer(text = \"{}\".format(ctx.author), icon_url = \"{}\".format(ctx.author.avatar_url))\r\n return await ctx.send(embed = em)\r\n elif currentRole > nextRole:\r\n # We are a higher role than the target\r\n msg = '*{}* sudah memiliki role **{}** dalam koleksi role mereka.'.format(DisplayName.name(member), role.name)\r\n msgDone = Utils.suppressed(ctx,msg)\r\n em = discord.Embed(color = 0XFF8C00, description = msgDone)\r\n em.set_footer(text = \"{}\".format(ctx.author), icon_url = \"{}\".format(ctx.author.avatar_url))\r\n return await ctx.send(embed = em)\r\n\r\n if nextRole >= len(promoArray):\r\n msg = '┐( ̄ヘ ̄;)┌\\nTidak ada role yang lebih tinggi untuk mempromosikan kenaikan role xp *{}*.'.format(DisplayName.name(member))\r\n else:\r\n newRole = DisplayName.roleForID(promoArray[nextRole]['ID'], ctx.guild)\r\n neededXp = int(promoArray[nextRole]['XP'])-xp\r\n self.settings.incrementStat(member, ctx.guild, \"XP\", neededXp)\r\n # Start at the bottom role and add all roles up to newRole\r\n addRoles = []\r\n for i in range(0, nextRole+1):\r\n addRole = DisplayName.roleForID(promoArray[i]['ID'], ctx.guild)\r\n if addRole:\r\n if not addRole in member.roles:\r\n addRoles.append(addRole)\r\n # await member.add_roles(*addRoles)\r\n # Use role manager instead\r\n self.settings.role.add_roles(member, addRoles)\r\n if not newRole:\r\n # Promotion role doesn't exist\r\n msg = '┐( ̄ヘ ̄;)┌\\nSepertinya **{}** tidak ada dalam server.\\n*{}* akan tetap diberikan sejumlah *{:,} xp*, tapi aku tidak bisa menambahkan role yang tidak ada dalam list. Pertimbangkan lagi untuk merevisi role xp dalam server mu.'.format(promoArray[nextRole]['Name'], DisplayName.name(member), neededXp)\r\n else:\r\n msg = '*{}* telah di berikan sejumlah *{:,} xp* dan dinaikan ke role **{}**!'.format(DisplayName.name(member), neededXp, newRole.name)\r\n self.bot.dispatch(\"xp\", member, ctx.author, neededXp)\r\n msgDone = Utils.suppressed(ctx,msg)\r\n em = discord.Embed(color = 0XFF8C00, description = msgDone)\r\n em.set_footer(text = \"{}\".format(ctx.author), icon_url = \"{}\".format(ctx.author.avatar_url))\r\n return await ctx.send(embed = em)",
"async def demote(self, ctx, *, member = None):\r\n if not await self._can_run(ctx): return \r\n em = discord.Embed(color = 0XFF8C00, description = \"> Menurunkan jabatan role xp kepada member ke role xp dibawahnya\\n> \\n\"\r\n \"> **Panduan**\\n\"\r\n \"> `{}demote [member]`\"\r\n .format(ctx.prefix))\r\n em.set_footer(text = \"Saat mengetik command, tanda [] tidak usah digunakan.\\n{}\".format(ctx.author), icon_url = f\"{ctx.author.avatar_url}\")\r\n\r\n if member == None:\r\n return await ctx.send(embed=em)\r\n\r\n if type(member) is str:\r\n memberName = member\r\n member = DisplayName.memberForName(memberName, ctx.message.guild)\r\n if not member:\r\n msg = '┐( ̄ヘ ̄;)┌\\nAku tidak dapat menemukan *{}* dalam server...'.format(memberName)\r\n # Check for suppress\r\n msgDone = Utils.suppressed(ctx,msg)\r\n em = discord.Embed(color = 0XFF8C00, description = msgDone)\r\n em.set_footer(text = \"{}\".format(ctx.author), icon_url = \"{}\".format(ctx.author.avatar_url))\r\n return await ctx.send(embed = em)\r\n\r\n # Get user's xp\r\n xp = int(self.settings.getUserStat(member, ctx.guild, \"XP\"))\r\n\r\n # Get the role list\r\n promoArray = self.getSortedRoles(ctx.guild)\r\n currentRole = self.getCurrentRoleIndex(member, ctx.guild)\r\n nextRole = currentRole - 1\r\n if nextRole == -1:\r\n # We're removing the user from all roles\r\n neededXp = int(promoArray[0]['XP'])-xp-1\r\n self.settings.incrementStat(member, ctx.guild, \"XP\", neededXp)\r\n remRoles = []\r\n for i in range(0, len(promoArray)):\r\n remRole = DisplayName.roleForID(promoArray[i]['ID'], ctx.guild)\r\n if remRole:\r\n if remRole in member.roles:\r\n remRoles.append(remRole)\r\n # await member.remove_roles(*remRoles)\r\n # Use role manager instead\r\n self.settings.role.rem_roles(member, remRoles)\r\n msg = 'sejumlah *{} xp* telah dikurangi dari *{}* dan role dia telah diturunkan dari system xp!'.format(neededXp*-1, DisplayName.name(member))\r\n self.bot.dispatch(\"xp\", member, ctx.author, neededXp)\r\n elif nextRole < -1:\r\n msg = '┐( ̄ヘ ̄;)┌\\nTidak ada role xp yang lebih rendah untuk menurunkan role milik *{}*.'.format(DisplayName.name(member))\r\n else:\r\n newRole = DisplayName.roleForID(promoArray[nextRole]['ID'], ctx.guild)\r\n neededXp = int(promoArray[nextRole]['XP'])-xp\r\n self.settings.incrementStat(member, ctx.guild, \"XP\", neededXp)\r\n # Start at the currentRole and remove that and all roles above\r\n remRoles = []\r\n for i in range(currentRole, len(promoArray)):\r\n remRole = DisplayName.roleForID(promoArray[i]['ID'], ctx.guild)\r\n if remRole:\r\n if remRole in member.roles:\r\n remRoles.append(remRole)\r\n # await member.remove_roles(*remRoles)\r\n # Use role manager instead\r\n self.settings.role.rem_roles(member, remRoles)\r\n if not newRole:\r\n # Promotion role doesn't exist\r\n msg = '┐( ̄ヘ ̄;)┌\\nSepertinya **{}** sudah tidak ada dalam server. namun sejumlah *{:,} xp* milik *{}* akan tetap dikurangi\\n tapi aku tidak dapat menurunkan jabatan role xp, pertimbangkan lagi untuk merevisi role xp dalam server mu.'.format(promoArray[nextRole]['Name'], neededXp*-1, DisplayName.name(member))\r\n else:\r\n msg = 'sejumlah *{:,} xp* milik *{}* telah dikurangi dan jabatan role xp telah diturunkan ke **{}**!'.format(neededXp*-1, DisplayName.name(member), newRole.name)\r\n self.bot.dispatch(\"xp\", member, ctx.author, neededXp)\r\n msgDone = Utils.suppressed(ctx,msg)\r\n em = discord.Embed(color = 0XFF8C00, description = msgDone)\r\n em.set_footer(text = \"{}\".format(ctx.author), icon_url = \"{}\".format(ctx.author.avatar_url))\r\n return await ctx.send(embed = em)",
"def addExperience(self, xp):\n self.xp += xp\n if self.xp >= self.xpNeeded:\n self.LevelUpPlayer()",
"async def xp_handler(self, user: discord.User):\n\n xp = await self.get_player_stat(user, 'xp')\n lvl = await self.get_player_stat(user, 'lvl')\n\n next_xp = self.XP_LEVELS[str(lvl)][\"Progress\"]\n\n if xp >= next_xp:\n carry = xp - next_xp\n else:\n return False\n\n await self.update_player_stat(user, 'xp', carry)\n await self.update_player_stat(user, 'lvl', lvl + 1)\n\n level_up_msg = f\"Level up! You have reached level {lvl+1}.\"\n\n reward_tokens = self.XP_LEVELS[str(lvl)][\"TokensRewardCount\"]\n\n tokens = await self.get_player_stat(user, 'tokens')\n\n token_doubler = await self.get_player_stat(user, 'token_doubler')\n\n upd_td = token_doubler - reward_tokens\n if upd_td < 0:\n upd_td = 0\n\n if token_doubler > reward_tokens:\n reward_tokens *= 2\n else:\n reward_tokens += token_doubler\n\n reward_msg = f\"Rewards: {reward_tokens} {emojis['token']}\"\n\n tokens += reward_tokens\n await self.update_player_stat(user, 'tokens', tokens)\n await self.update_player_stat(user, 'token_doubler', upd_td)\n\n return (level_up_msg, reward_msg)",
"async def admin(self, ctx):\n if ctx.message.author.top_role.name.lower() == 'officer':\n await ctx.message.channel.send(\n 'List of useable commands for the parent command: **admin**\\n\\n **eboard admin auto** - updates the '\n 'new seats given current election data.\\n\\n**eboard admin set <position> <User#0000>** - assigns a '\n 'position to target user.\\n\\n**eboard admin remove <position> <User#0000>** - remove a target user '\n 'from their position.\\n\\n**eboard admin list** - lists the positions in the SQLite table.')",
"async def add_experience(message, user, exp):\n server = db[str(user.guild.id)]\n stats = list(server.find({'id': user.id}))\n exp = stats[-1]['experience'] + exp\n new_stats = {\"$set\": {'experience': exp}}\n server.update_one(stats[-1], new_stats)\n print(f'Added xp {exp} to {user.id} in {user.guild.name}')\n await level_up(message.author, message.channel)",
"async def listxpblock(self, ctx):\r\n\r\n\t\t# Check if we're suppressing @here and @everyone mentions\r\n\t\tif self.settings.getServerStat(ctx.message.guild, \"SuppressMentions\"):\r\n\t\t\tsuppress = True\r\n\t\telse:\r\n\t\t\tsuppress = False\r\n\r\n\t\tpromoArray = self.settings.getServerStat(ctx.message.guild, \"XpBlockArray\")\r\n\t\t\r\n\t\t# rows_by_lfname = sorted(rows, key=itemgetter('lname','fname'))\r\n\t\t\r\n\t\t#promoSorted = sorted(promoArray, key=itemgetter('Name'))\r\n\r\n\t\tif not len(promoArray):\r\n\t\t\troleText = \"There are no users or roles xp blocked yet. Use `{}xpblock [user_or_role]` to add some.\".format(ctx.prefix)\r\n\t\t\tawait ctx.channel.send(roleText)\r\n\t\t\treturn\r\n\t\t\r\n\t\troleText = \"__**Current Xp Blocked Users and Roles:**__\\n\\n\"\r\n\t\t\r\n\t\tfor arole in promoArray:\r\n\t\t\ttest = DisplayName.memberForID(arole, ctx.guild)\r\n\t\t\tif test:\r\n\t\t\t\t# It's a user\r\n\t\t\t\troleText = roleText + \"**{}**, \".format(DisplayName.name(test))\r\n\t\t\t\tcontinue\r\n\t\t\ttest = DisplayName.roleForID(arole, ctx.guild)\r\n\t\t\tif test:\r\n\t\t\t\t# It's a role\r\n\t\t\t\troleText = roleText + \"**{}** (Role), \".format(Nullify.escape_all(test.name))\r\n\t\t\t\tcontinue\r\n\t\t\t# Didn't find a role or person\r\n\t\t\troleText = roleText + \"**{}** (removed from server), \".format(arole)\r\n\r\n\t\troleText = roleText[:-2]\r\n\r\n\t\tawait ctx.channel.send(roleText)",
"def role_command():",
"async def xp(ctx, user: discord.Member = None):\n user = user or ctx.message.author\n with open('users.json') as f:\n data = json.load(f)\n\n if data.get(user.id) is not None:\n await bot.say(f'`XP count is at {data[user.id][\"experience\"]}.`')\n else:\n await bot.say(f'`I cannot see {user.mention} in my list of users.`')",
"async def perm_check(ctx,roles_list: List[int]):\n for n,role in enumerate(ctx.author.roles):\n # If authorized\n if role.id in roles_list:\n return \"pass\"\n # Not authorized\n if n == len(ctx.author.roles) - 1:\n return await ctx.send(embed=Embed(title=\"> **⚠ Attention !**\",description=\"Vous n'avez pas la permission d'éxecutez cette commande !\",color=Colour.from_rgb(255,255,0)).set_author(name=ctx.author.name,icon_url=ctx.author.avatar_url))",
"def hasPerm(self,request):\n request.needAuthType(request.ADMIN)\n request.checkArgs(\"perm_name\",\"admin_username\")\n if request.auth_name!=request[\"admin_username\"]: \n request.getAuthNameObj().canDo(\"SEE ADMIN PERMISSIONS\")\n return admin_main.getLoader().getAdminByName(request[\"admin_username\"]).hasPerm(request[\"perm_name\"])",
"async def demoteto(self, ctx, *, member = None, role = None):\r\n if not await self._can_run(ctx): return\r\n em = discord.Embed(color = 0XFF8C00, description = \"> Menurunkan jabatan role xp ke role xp tertentu kepada member\\n\"\r\n \"> Pastikan role xp sudah terdaftar dalam list\\n> \\n\"\r\n \"> **Panduan**\\n\"\r\n \"> `{}demote [member]`\"\r\n .format(ctx.prefix))\r\n em.set_footer(text = \"Saat mengetik command, tanda [] tidak usah digunakan.\\n{}\".format(ctx.author), icon_url = f\"{ctx.author.avatar_url}\")\r\n\r\n if member == None:\r\n return await ctx.send(embed=em)\r\n\r\n if role == None:\r\n # Either a role wasn't set - or it's the last section\r\n if type(member) is str:\r\n # It' a string - the hope continues\r\n # Let's search for a name at the beginning - and a role at the end\r\n parts = member.split()\r\n memFromName = None\r\n for j in range(len(parts)):\r\n # Reverse search direction\r\n i = len(parts)-1-j\r\n # Name = 0 up to i joined by space\r\n nameStr = ' '.join(parts[0:i+1])\r\n # Role = end of name -> end of parts joined by space\r\n roleStr = ' '.join(parts[i+1:])\r\n memFromName = DisplayName.memberForName(nameStr, ctx.message.guild)\r\n if memFromName:\r\n # We got a member - let's check for a role\r\n roleFromName = DisplayName.roleForName(roleStr, ctx.message.guild)\r\n \r\n if not roleFromName == None:\r\n # We got a member and a role - break\r\n role = roleFromName\r\n break\r\n if memFromName == None:\r\n # Never found a member at all\r\n msg = 'I couldn\\'t find *{}* on the server.'.format(member)\r\n msgDone = Utils.suppressed(ctx,msg)\r\n em = discord.Embed(color = 0XFF8C00, description = msgDone)\r\n em.set_footer(text = \"{}\".format(ctx.author), icon_url = \"{}\".format(ctx.author.avatar_url))\r\n return await ctx.send(embed = em)\r\n if roleFromName == None:\r\n # We couldn't find one or the other\r\n return await ctx.send(embed = em)\r\n\r\n member = memFromName\r\n\r\n # Get user's xp\r\n xp = int(self.settings.getUserStat(member, ctx.guild, \"XP\"))\r\n\r\n # Get the role list\r\n promoArray = self.getSortedRoles(ctx.guild)\r\n nextRole = self.getIndexForRole(role, ctx.guild)\r\n currentRole = self.getCurrentRoleIndex(member, ctx.guild)\r\n vowels = 'aeiou'\r\n \r\n if nextRole == None:\r\n msg = '┐( ̄ヘ ̄;)┌\\nRole **{}** tidak terdaftar dalam list xp role\\nKamu dapat menambahkannya dengan command `{}addxprole [role] [jumlah xp]`.'.format(role.name, ctx.prefix)\r\n msgDone = Utils.suppressed(ctx,msg)\r\n em = discord.Embed(color = 0XFF8C00, description = msgDone)\r\n em.set_footer(text = \"{}\".format(ctx.author), icon_url = \"{}\".format(ctx.author.avatar_url))\r\n return await ctx.send(embed = em)\r\n\r\n if currentRole == nextRole:\r\n # We are already the target role\r\n if role.name[:1].lower() in vowels:\r\n msg = '*{}* sudah mendapatkan role **{}**.'.format(DisplayName.name(member), role.name)\r\n else:\r\n msg = '*{}* sudah mendapatkan role **{}**.'.format(DisplayName.name(member), role.name)\r\n msgDone = Utils.suppressed(ctx,msg)\r\n em = discord.Embed(color = 0XFF8C00, description = msgDone)\r\n em.set_footer(text = \"{}\".format(ctx.author), icon_url = \"{}\".format(ctx.author.avatar_url))\r\n return await ctx.send(embed = em)\r\n elif currentRole < nextRole:\r\n # We are a higher role than the target\r\n msg = '┐( ̄ヘ ̄;)┌\\nAku tidak dapat menurunkan xp role *{}* ke xp role yang lebih tinggi.'.format(DisplayName.name(member))\r\n msgDone = Utils.suppressed(ctx,msg)\r\n em = discord.Embed(color = 0XFF8C00, description = msgDone)\r\n em.set_footer(text = \"{}\".format(ctx.author), icon_url = \"{}\".format(ctx.author.avatar_url))\r\n return await ctx.send(Utils.suppressed(ctx,msg))\r\n\r\n newRole = DisplayName.roleForID(promoArray[nextRole]['ID'], ctx.guild)\r\n neededXp = int(promoArray[nextRole]['XP'])-xp\r\n self.settings.incrementStat(member, ctx.guild, \"XP\", neededXp)\r\n # Start at the currentRole and remove that and all roles above\r\n remRoles = []\r\n for i in range(currentRole, len(promoArray)):\r\n remRole = DisplayName.roleForID(promoArray[i]['ID'], ctx.guild)\r\n if remRole:\r\n if remRole in member.roles:\r\n # Only add the ones we have\r\n remRoles.append(remRole)\r\n # await member.remove_roles(*remRoles)\r\n # Use role manager instead\r\n self.settings.role.rem_roles(member, remRoles)\r\n if not newRole:\r\n # Promotion role doesn't exist\r\n msg = '┐( ̄ヘ ̄;)┌\\nSepertinya **{}** sudah tidak ada dalam server. namun sejumlah *{:,} xp* milik *{}* akan tetap dikurangi\\n tapi aku tidak dapat menurunkan jabatan role xp, pertimbangkan lagi untuk merevisi role xp dalam server mu.'.format(promoArray[nextRole]['Name'], neededXp*-1, DisplayName.name(member))\r\n else:\r\n msg = 'sejumlah *{:,} xp* milik *{}* telah dikurangi dan jabatan role xp telah diturunkan ke **{}**!'.format(neededXp*-1, DisplayName.name(member), newRole.name)\r\n self.bot.dispatch(\"xp\", member, ctx.author, neededXp)\r\n msgDone = Utils.suppressed(ctx,msg)\r\n em = discord.Embed(color = 0XFF8C00, description = msgDone)\r\n em.set_footer(text = \"{}\".format(ctx.author), icon_url = \"{}\".format(ctx.author.avatar_url))\r\n return await ctx.send(embed = em)",
"async def xpblock(self, ctx, *, user_or_role : str = None):\r\n\r\n\t\tusage = 'Usage: `{}xpblock [user_or_role]`'.format(ctx.prefix)\r\n\r\n\t\tif not await Utils.is_bot_admin_reply(ctx): return\r\n\r\n\t\tif user_or_role == None:\r\n\t\t\tawait ctx.message.channel.send(usage)\r\n\t\t\treturn\r\n\r\n\t\troleName = user_or_role\r\n\t\tis_user = True\r\n\t\tif type(user_or_role) is str:\r\n\t\t\t# Check user first\r\n\t\t\tuser_or_role = DisplayName.memberForName(roleName, ctx.guild)\r\n\t\t\tif not user_or_role:\r\n\t\t\t\tis_user = False\r\n\t\t\t\t# Check role\r\n\t\t\t\tif roleName.lower() == \"everyone\" or roleName.lower() == \"@everyone\":\r\n\t\t\t\t\tuser_or_role = ctx.guild.default_role\r\n\t\t\t\telse:\r\n\t\t\t\t\tuser_or_role = DisplayName.roleForName(roleName, ctx.guild)\r\n\t\t\t\t\t\r\n\t\t\tif not user_or_role:\r\n\t\t\t\tmsg = 'I couldn\\'t find *{}*...'.format(Nullify.escape_all(roleName))\r\n\t\t\t\tawait ctx.message.channel.send(msg)\r\n\t\t\t\treturn\r\n\t\t\r\n\t\t# Check if they're admin or bot admin\r\n\t\tif Utils.is_bot_admin(user_or_role):\r\n\t\t\treturn await ctx.send(\"You can't block other admins with this command.\")\r\n\t\tif is_user:\r\n\t\t\tur_name = DisplayName.name(user_or_role)\r\n\t\telse:\r\n\t\t\tur_name = Nullify.escape_all(user_or_role.name)\r\n\r\n\t\t# Now we see if we already have that role in our list\r\n\t\tpromoArray = self.settings.getServerStat(ctx.message.guild, \"XpBlockArray\")\r\n\r\n\t\tfor aRole in promoArray:\r\n\t\t\t# Get the role that corresponds to the id\r\n\t\t\tif str(aRole) == str(user_or_role.id):\r\n\t\t\t\t# We found it - throw an error message and return\r\n\t\t\t\tmsg = '**{}** is already in the list.'.format(ur_name)\r\n\t\t\t\tawait ctx.message.channel.send(msg)\r\n\t\t\t\treturn\r\n\r\n\t\t# If we made it this far - then we can add it\r\n\t\tpromoArray.append(user_or_role.id)\r\n\t\tself.settings.setServerStat(ctx.message.guild, \"XpBlockArray\", promoArray)\r\n\r\n\t\tmsg = '**{}** added to list.'.format(ur_name)\r\n\t\tawait ctx.message.channel.send(msg)\r\n\t\treturn",
"def add_xp(self, xp_lookup, xp):\n self.current_xp += xp\n old_level = self.level\n self.level = self.species.level(xp_lookup, self.current_xp)\n\n messages = [u\"{0} gains {1} experience\".format(self.in_battle_name(), xp)]\n\n if old_level != self.level:\n messages.append(u\"{0} is now level {1}!\".format(self.in_battle_name(), self.level))\n\n return messages",
"def add_xp(self):\n pass",
"async def vouch(ctx, *, member_name=None):\n if ctx.message.channel.name.lower() not in bot_channels:\n return\n\n server = ctx.message.server\n member_roles = ctx.message.author.roles\n member_admin = discord.utils.find(lambda r: r.name.lower() in admin_roles, member_roles)\n if member_admin is not None:\n member = discord.utils.find(lambda c: c.name.lower() == member_name.lower(), server.members)\n roles = member.roles\n new_role = discord.utils.find(lambda r: r.name.lower() == required_role, server.roles)\n roles.append(new_role)\n await amor_manager.replace_roles(member, *roles)\n await amor_manager.say('{0} granted citizenship'.format(member.name))",
"def admin(ctx):\n return ctx.message.author.permissions_in(ctx.channel).administrator",
"def administrarpermiso():\n if not current_user.is_authenticated():\n flash('Debe loguearse primeramente!!!!', 'loggin')\n return render_template('index.html')\n \n permission = UserRol('ADMINISTRADOR')\n if permission.can():\n isAdmin = request.args.get('value')\n rol = request.args.get('idrol')\n permisos = db_session.query(Permiso).order_by(Permiso.id)\n lista = []\n if not isAdmin :\n #=======================================================================\n # en esta lista se inserta los permisos que estan asignados a un rol\n #=======================================================================\n yourPermiso=getPermisosByRol(rol)\n for p in yourPermiso:\n lista.append(p)\n return render_template('permiso/administrarpermiso.html', permisos = permisos, isAdministrar = isAdmin, idrol = rol, asignados = lista)\n else:\n flash('Sin permisos para administrar proyectos', 'permiso')\n return render_template('index.html')",
"def manage_roles(_) -> int:\n return 1 << 28",
"def manage_roles(_) -> int:\n return 1 << 28",
"def admin_roles(request):\n user = User.objects.get(username=request.user.username)\n permisos = get_permisos_sistema(user)\n return render_to_response('admin/roles/roles.html',{'user':user,\n 'ver_roles':'Ver roles' in permisos,\n 'crear_rol': 'Crear rol' in permisos,\n 'mod_rol': 'Modificar rol' in permisos,\n 'eliminar_rol': 'Eliminar rol' in permisos},context_instance=RequestContext(request))",
"async def level(self, ctx, user: discord.User = None):\n settings = config.load_settings()\n if settings['guilds'][str(ctx.guild.id)][\"leveling\"] is True:\n guild = ctx.guild.id\n if user is None:\n id = ctx.author.id\n name = ctx.author.display_name\n else:\n id = user.id\n name = user.display_name\n xp = config.load_xp()\n exp = 0\n level = 0\n if str(guild) in xp['guilds']:\n if str(id) in xp['guilds'][str(guild)]:\n exp = xp['guilds'][str(guild)][str(id)]['xp']\n level = xp['guilds'][str(guild)][str(id)]['level']\n await ctx.send(name + \" is currently level: \" + str(level) + \" with \" + str(exp) + \" experience!\")\n else:\n await ctx.send(\"leveling is currently disabled on this server!\")",
"def promote_user(username):\n user = User.get_user_by_username(username)\n user.is_admin = True\n user.save()",
"def check_if_admin(connection,username):\r\n with connection:\r\n c = connection.execute(SELECT_USER_BY_ADMIN_PREVILAGES,(username,))\r\n return c.fetchone()",
"def someturbotadmin():\n cond = lambda member: member != ADMIN and ADMIN_ROLE in member.roles\n return random.choice(list(filter(cond, CHANNEL_MEMBERS)))",
"def check_admin():\n\tif not current_user.is_admin:\n\t\tabort(403)",
"def show_privileges(self):\n print(\"The set of privileges for this administrator are as follows: \")\n if self.privileges:\n \"\"\"I forgot if and else statment\"\"\"\n for privilege in self.privileges:\n print(\"-\" + str(privilege.title()))\n else:\n print(\"This user has no privileges.\")",
"def __promote_admin_pressed(self):\n\n id = self.__admin_controls_entry.get()\n if not id.isdigit():\n print(\"failed to promote the user, the given id isn't a number\")\n return\n self.__admin_controls_entry.delete(0, 'end')\n self.__data_base.promote_admin(id=id)",
"def show_privileges(self):\n print(\"This admin user has the following privileges:\")\n for item in self.privileges:\n print(f\"- {item}\")",
"def check_if_admin(bot, update, *args, **kwargs):\n user_id = update._effective_user\n # print(\"cerco user con id \" + str(user_id) + \", nel database\")\n user = DB.execute(TABELLE[\"id_users\"][\"select\"][\"from_id\"], (user_id['id'],))\n # print(\"ho trovato : \" + str(user))\n if not user:\n self.request_access(bot, user_id)\n return\n elif user[\"banned\"]:\n update.message.reply_text(\"Spiacente sei stato bannato dal bot\")\n return\n elif user[\"loot_admin\"] or user[\"admin\"]:\n sig = signature(func)\n if len(sig.parameters) > 1:\n return func(bot, update, *args, **kwargs)\n else:\n return func(*args, **kwargs)\n else:\n update.message.reply_text(\"Non sei abilitato ad usare questo comando\")\n return"
] | [
"0.64883465",
"0.6299787",
"0.61075544",
"0.5897025",
"0.5765006",
"0.57568175",
"0.56386036",
"0.56318307",
"0.56316966",
"0.56002736",
"0.55545866",
"0.55380803",
"0.55184126",
"0.54676664",
"0.54562855",
"0.5447556",
"0.5395318",
"0.5377828",
"0.53774434",
"0.53774434",
"0.53772205",
"0.5364498",
"0.5350239",
"0.5332197",
"0.53196955",
"0.53057736",
"0.5304118",
"0.5301244",
"0.5279157",
"0.52705467"
] | 0.7284638 | 0 |
Authenticates a user based on the OIDC code flow. | def authenticate(self, request, **kwargs):
self.request = request
if not self.request:
return None
state = self.request.GET.get('state')
code = self.request.GET.get('code')
nonce = kwargs.pop('nonce', None)
if not code or not state:
return None
reverse_url = import_from_settings('OIDC_AUTHENTICATION_CALLBACK_URL',
'oidc_authentication_callback')
token_payload = {
'client_id': self.OIDC_RP_CLIENT_ID,
'client_secret': self.OIDC_RP_CLIENT_SECRET,
'grant_type': 'authorization_code',
'code': code,
'redirect_uri': absolutify(
self.request,
reverse(reverse_url)
),
}
# Get the token
token_info = self.get_token(token_payload)
id_token = token_info.get('id_token')
access_token = token_info.get('access_token')
refresh_token = token_info.get('refresh_token')
# Validate the token
payload = self.verify_token(id_token, nonce=nonce)
# Store users tokens
usertokens, created = UserTokens.objects.update_or_create(
user=payload['sub'],
defaults={'access_token': access_token,
'refresh_token': refresh_token}
)
if payload:
self.store_tokens(access_token, id_token)
try:
return self.get_or_create_user(access_token, id_token, payload)
except SuspiciousOperation as exc:
LOGGER.warning('failed to get or create user: %s', exc)
return None
return None | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def authenticate_user(authentication_code):\n\n for suffix in ('', '=', '=='):\n attempt = authentication_code + suffix\n decoded = base64.decodestring(attempt)\n fields = decoded.split('_')\n\n email, user_id, time_stamp, str_hex = fields\n\n if time_stamp < time.time():\n # Authentication Code Expired\n raise seerpod_exceptions.AuthenticationCodeExpired('Authentication code expired',\n response_data=authentication_code)\n user = None #business_contact_api.BusinessContacts().get_user_detail_from_email(email)\n\n if not user:\n continue\n\n if attempt == generate_authentication_code(\n user.id, time_stamp, user.owner_email_id, user.password):\n return user\n\n # Invalid authentication code\n raise seerpod_exceptions.InvalidAuthenticationCode('Invalid Authentication code',\n response_data=authentication_code)",
"async def async_step_auth(self, user_input=None):\n if user_input.get(const.CODE):\n self.data = user_input\n return self.async_external_step_done(next_step_id=\"finish\")\n\n profile = user_input.get(const.PROFILE)\n\n auth_client = self.get_auth_client(profile)\n\n url = auth_client.get_authorize_url()\n\n return self.async_external_step(step_id=\"auth\", url=url)",
"def authenticate_user():\n\n error = request.args.get(\"error\")\n if error:\n logger.warning(\"Google sent us an error via OAuth2: %s\", error)\n\n return redirect(url_for(\"login\"))\n\n # Get OAuth2 authentication code\n code = request.args.get(\"code\")\n\n # Exchange code for fresh credentials\n credentials = flow.step2_exchange(code)\n\n # Extract email and email verification\n id_token = credentials.id_token\n email = id_token[\"email\"]\n verified_email = id_token[\"email_verified\"]\n\n if verified_email is True:\n # Find the user with the given email\n try:\n user = FlaskUser(User.objects.get(email = email))\n except User.DoesNotExist:\n user = None\n\n if not user:\n flash(\"A Galah account does not exist for this email.\", \"error\")\n\n logger.info(\n \"User %s has attempted to log in via OAuth2 but an account \"\n \"does not exist for them.\", email\n )\n else:\n login_user(user)\n\n logger.info(\n \"User %s has succesfully logged in via OAuth2.\", email\n )\n\n return redirect(url_for(\"home\"))\n\n else:\n flash(\"Sorry, we couldn't verify your email\", \"error\")\n\n logger.info(\"User %s failed to authenticate with OAuth2 because \"\n \"their email has not been verified with google.\", email)\n\n return redirect(url_for(\"login\"))",
"def auth_user():\n global token\n app.logger.info(\"Microsoft Planner Service running on /auth port as expected\")\n try:\n request_count = 0\n if request_count == 0:\n token = get_tokens_as_app(client_id, user_code_info, tenant_id)\n request_count = 1 \n if 'access_token' in token:\n app.logger.info('Adding access token to cache...')\n add_token_to_cache(client_id, tenant_id, token)\n return_object = (f\"{token['refresh_token']}\")\n return render_template('token.html', return_object=return_object)\n else:\n return_error = (\"Token response did not result in a proper response. Athenticate again please.\")\n return render_template('token.html', return_error=return_error)\n except AttributeError or TypeError:\n return_error = ('Authentification failed. Please pull and restart your system and authenticate again.')\n return render_template('token.html', return_error=return_error)\n except adal.AdalError as err:\n return_error = (\"You're logged in with the wrong user. Please log out and authenticate again.\")\n return render_template('token.html', return_error=return_error)",
"def authenticate_user(self, *, provider: str, user_name: str, password: str) -> UserId:",
"def _oauth_callback(self):\n tokens = self.oauth.get_raw_access_token(data={\n 'code': flask.request.args.get('code', ''),\n 'redirect_uri': self.oauth_redirect_uri,\n 'grant_type': 'authorization_code'\n }).json()\n user = User(tokens=tokens, app=self)\n\n # Add subscriptions\n self.subscriptions.init_user(user)\n\n # Call endpoint for user login\n return self.subscriptions.call_endpoint(\"login\", user) or \"\"",
"def get_authenticated_user(self, redirect_uri, client_id, client_secret, code, callback):\n http = self.get_auth_http_client()\n body = urllib_parse.urlencode({\n \"redirect_uri\": redirect_uri,\n \"code\": code,\n \"client_id\": client_id, #self.settings[self._OAUTH_SETTINGS_KEY]['key'],\n \"client_secret\": client_secret,#self.settings[self._OAUTH_SETTINGS_KEY]['secret'],\n \"grant_type\": \"authorization_code\",\n })\n\n http.fetch(self._OAUTH_ACCESS_TOKEN_URL,\n functools.partial(self._on_access_token, callback),\n method=\"POST\", headers={'Content-Type': 'application/x-www-form-urlencoded'}, body=body)",
"def authenticate(user, request):",
"def get_authenticated_user(self, redirect_uri, client_id, client_secret,\n code, callback, extra_fields=None):\n logging.debug('gau ' + redirect_uri)\n http = tornado.httpclient.AsyncHTTPClient()\n args = {\n \"redirect_uri\": redirect_uri,\n \"code\": code,\n \"client_id\": client_id,\n \"client_secret\": client_secret,\n }\n \n fields = set(['id', 'name', 'first_name', 'last_name',\n 'locale', 'picture', 'link'])\n if extra_fields:\n fields.update(extra_fields)\n\n \n http.fetch(self._oauth_request_token_url(**args),\n self.async_callback(self._on_access_token, redirect_uri, client_id,\n client_secret, callback, fields))",
"def auth_orcid(request):\n\n client_id = settings.ORCID_CLIENT_ID\n client_secret = settings.ORCID_CLIENT_SECRET\n redirect_uri = settings.ORCID_REDIRECT_URI\n scope = list(settings.ORCID_SCOPE.split(\",\"))\n oauth = OAuth2Session(client_id, redirect_uri=redirect_uri,\n scope=scope)\n params = request.GET.copy()\n code = params['code']\n\n try:\n token = oauth.fetch_token(settings.ORCID_TOKEN_URL, code=code,\n include_client_id=True, client_secret=client_secret)\n try:\n validators.validate_orcid_token(token['access_token'])\n token_valid = True\n except ValidationError:\n messages.error(request, 'Validation Error: ORCID token validation failed.')\n token_valid = False\n except InvalidGrantError:\n messages.error(request, 'Invalid Grant Error: authorization code may be expired or invalid.')\n token_valid = False\n\n if token_valid:\n orcid_profile, _ = Orcid.objects.get_or_create(user=request.user)\n orcid_profile.orcid_id = token.get('orcid')\n orcid_profile.name = token.get('name')\n orcid_profile.access_token = token.get('access_token')\n orcid_profile.refresh_token = token.get('refresh_token')\n orcid_profile.token_type = token.get('token_type')\n orcid_profile.token_scope = token.get('scope')\n orcid_profile.token_expiration = token.get('expires_at')\n orcid_profile.full_clean()\n orcid_profile.save()\n\n return redirect('edit_orcid')",
"def _auth_oauth_signin(self, provider, validation, params):\n\t\toauth_uid = validation['user_id']\n\t\ttry:\n\t\t\toauth_user = self.search([(\"oauth_uid\", \"=\", oauth_uid), ('oauth_provider_id', '=', provider)])\n\t\t\tif not oauth_user:\n\t\t\t\traise AccessDenied()\n\t\t\tassert len(oauth_user) == 1\n\t\t\toauth_user.write({'oauth_access_token': params['access_token']})\n\t\t\treturn oauth_user.login\n\t\texcept AccessDenied as access_denied_exception:\n\t\t\tif self.env.context.get('no_user_creation'):\n\t\t\t\treturn None\n\t\t\tstate = json.loads(params['state'])\n\t\t\tif 't' in state:\n\t\t\t\ttoken = state.get('t')\n\t\t\telse:\n\t\t\t\ttoken = None\n\t\t\tvalues = self._generate_signup_values(provider, validation, params)\n\t\t\ttry:\n\t\t\t\t_, login, _ = self.signup(values, token)\n\t\t\t\treturn login\n\t\t\texcept (SignupError, UserError):\n\t\t\t\traise access_denied_exception",
"def auth(self, user):",
"def authorize():\n resp = git_auth.authorized_response()\n user_info = git_auth.get('user', token=(resp[\"access_token\"],)).data\n u = db_session.query(User).filter(User.email == user_info['email']).first()\n if not u:\n u = User(user_info['login'], user_info['email'])\n db_session.add(u)\n db_session.commit()\n login_user(u, remember=True)\n return redirect(url_for('index'))",
"def login():\n if app.testing:\n callback_url = url_for('user.authorize', _external=True)\n else:\n callback_url = 'https://codegolf.uqcs.org.au/user/authorize'\n return git_auth.authorize(callback=callback_url)",
"def initiateAuthentication(identity_url, return_to=None):",
"def login():\n auth_state = str(uuid.uuid4())\n SESSION.auth_state = auth_state\n\n # For this sample, the user selects an account to authenticate. Change\n # this value to 'none' for \"silent SSO\" behavior, and if the user is\n # already authenticated they won't need to re-authenticate.\n prompt_behavior = 'select_account'\n\n params = urllib.parse.urlencode({'response_type': 'code',\n 'client_id': config.CLIENT_ID,\n 'redirect_uri': config.REDIRECT_URI,\n 'state': auth_state,\n 'resource': config.RESOURCE,\n 'prompt': prompt_behavior})\n\n return bottle.redirect(config.AUTHORITY_URL + '/oauth2/authorize?' + params)",
"def authenticate(self, *args, **kwargs):\n # Validate backend and arguments. Require that the Social Auth\n # response be passed in as a keyword argument, to make sure we\n # don't match the username/password calling conventions of\n # authenticate.\n if not (self.name and kwargs.get(self.name) and 'response' in kwargs):\n return None\n\n response = kwargs.get('response')\n pipeline = PIPELINE\n kwargs = kwargs.copy()\n kwargs['backend'] = self\n\n if 'pipeline_index' in kwargs:\n pipeline = pipeline[kwargs['pipeline_index']:]\n else:\n kwargs['details'] = self.get_user_details(response)\n kwargs['uid'] = self.get_user_id(kwargs['request'])\n kwargs['is_new'] = False\n \n out = self.pipeline(pipeline, *args, **kwargs)\n if not isinstance(out, dict):\n return out\n\n social_user = out.get('social_user')\n if social_user:\n # define user.social_user attribute to track current social\n # account\n user = social_user.user\n user.social_user = social_user\n user.is_new = out.get('is_new')\n return user",
"def step_impl(context):\n fields = {\n 'grant_type': 'authorization_code',\n 'code': context.code,\n 'client_id': context.vendor_config['auth']['client_id'],\n 'redirect_uri': context.vendor_config['auth']['redirect_uri'],\n }\n\n context.response = token_request(fields,\n context.vendor_config['auth'],\n context.conformance)",
"async def oidc_start(request: aiohttp.web.Request) -> aiohttp.web.Response:\n try:\n oidc = request.app[\"oidc_client\"].begin(\"oidc\")\n except Exception as e:\n # This can be caused if config is improperly configured, and\n # oidcrp is unable to fetch oidc configuration from the given URL\n request.app[\"Log\"].error(f\"OIDC authorization request failed: {e}\")\n raise aiohttp.web.HTTPInternalServerError(\n reason=\"OIDC authorization request failed.\"\n )\n\n response = aiohttp.web.Response(status=302, reason=\"Redirection to login\")\n response.headers[\"Location\"] = oidc[\"url\"]\n return response",
"def auth_user(self, username, email, first_name = '', last_name = '', is_staff = False, is_superuser = False):\n if not self.conf.ICEBERG_API_PRIVATE_KEY:\n raise IcebergMissingApplicationSettingsError()\n\n timestamp = int(time.time())\n secret_key = self.conf.ICEBERG_API_PRIVATE_KEY\n\n to_compose = [username, email, first_name, last_name, is_staff, is_superuser, timestamp]\n hash_obj = hmac.new(b\"%s\" % secret_key, b\";\".join(str(x) for x in to_compose), digestmod = hashlib.sha1)\n message_auth = hash_obj.hexdigest()\n\n data = {\n 'username': username,\n 'email': email,\n 'first_name': first_name,\n 'last_name': last_name,\n 'is_staff': is_staff,\n 'is_superuser': is_superuser,\n 'timestamp': timestamp,\n 'message_auth': message_auth\n }\n\n response = self.request('user/auth/', args = data)\n\n self.username = username\n self.access_token = response['access_token']\n\n self._auth_response = response\n\n return self",
"def step_impl(context):\n fields = {\n 'grant_type': 'authorization_code',\n 'code': context.code,\n 'client_id': context.vendor_config['auth']['client_id'],\n 'redirect_uri': context.vendor_config['auth']['redirect_uri'],\n }\n\n fields.update(dict(context.table))\n\n context.response = token_request(fields,\n context.vendor_config['auth'],\n context.conformance)",
"def auth():\n\tcode = request.query.code\n\tauth = 'https://foursquare.com/oauth2/access_token'\n\tparams = dict(\n\t\tclient_id=CLIENT_ID,\n\t\tclient_secret=CLIENT_SECRET,\n\t\tgrant_type='authorization_code',\n\t\tredirect_uri=REDIRECT_URI,\n\t\tcode=code\n\t)\n\tauth_says = fetch('%s?%s'%(auth, urlencode(params)))\n\tauth_response = json.loads(auth_says.content)\n\tif 'access_token' in auth_response:\n\t\toauth_token=auth_response['access_token']\n\t\tresponse.set_cookie('user', oauth_token, secret=CLIENT_SECRET)\n\t\tlogging.info('new oauth_token:%s'%oauth_token)\n\t\tredirect('/')\n\telse:\n\t\tlogging.error(auth_response)\n\t\tabort()",
"def step_impl(context):\n fields = {\n 'grant_type': 'authorization_code',\n 'code': context.code,\n 'client_id': context.vendor_config['versioned_auth']['client_id'],\n 'redirect_uri': context.vendor_config['versioned_auth']['redirect_uri'],\n }\n\n context.response = token_request(fields,\n context.vendor_config['versioned_auth'],\n context.conformance)",
"def auth_code_handler(self, request, pk=None):\n try:\n # Get xero auth access information form xero connection\n stored_values = OAUTH_PERSISTENT_SERVER_STORAGE\n\n\n if len(stored_values) == 0:\n return Utils.dispatch_failure(request, 'NO_TOKEN_AUTHENTICATION')\n\n secret_keys = Utils.get_access_keys(pk)\n if AccountingConfiguration.PRIVATE == secret_keys.type:\n exists = AccountingOauth2.objects.filter(company=pk).first()\n if not exists:\n auth = AccountingOauth2(accessToken=stored_values['consumer_key'],\n accessSecretKey=stored_values['rsa_key'],\n company_id=pk)\n auth.save()\n else:\n exists.accessToken = stored_values['consumer_key']\n exists.accessSecretKey = stored_values['rsa_key']\n exists.save()\n else:\n auth_verifier_uri = settings.XERO_AUTH_VERIFIER_URI\n oauth_verifier = request.GET.get('oauth_verifier')\n credentials = Utils.get_xero_public_credentials(stored_values)\n\n if credentials.expired():\n return Utils.dispatch_failure(request, 'NO_TOKEN_AUTHENTICATION')\n\n # Verify the auth verifier for establish the connection\n\n credentials.verify(oauth_verifier)\n # Resave our verified credentials\n for key, value in credentials.state.items():\n OAUTH_PERSISTENT_SERVER_STORAGE.update({key: value})\n\n stored_values = OAUTH_PERSISTENT_SERVER_STORAGE\n exists = AccountingOauth2.objects.filter(company=pk).first()\n\n if exists:\n exists.accessToken = stored_values['oauth_token']\n exists.realmId = oauth_verifier\n exists.accessSecretKey = stored_values['oauth_token_secret']\n exists.tokenAcitvatedOn = stored_values['oauth_expires_at']\n exists.tokenExpiryON = stored_values['oauth_authorization_expires_at']\n exists.save()\n else:\n auth = AccountingOauth2(accessToken=stored_values['oauth_token'],\n refreshToken='',\n realmId=oauth_verifier,\n accessSecretKey=stored_values['oauth_token_secret'],\n tokenAcitvatedOn=stored_values['oauth_expires_at'],\n tokenExpiryON=stored_values['oauth_authorization_expires_at'],\n company_id=pk)\n auth.save()\n # auth_redirect_url = os.environ.get ('QBO_AUTH_REDIRECT_URL',\n # 'http://localhost:4200/coa-match/quickbooks')\n\n # auth_redirect_url = os.environ.get ('QBO_AUTH_REDIRECT_URL','http://ec2-52-207-28-114.compute-1.amazonaws.com/ix/coa-match/quickbooks')\n\n # return redirect(auth_redirect_url)\n\n except Exception as e:\n auth_cancel_url = settings.QBO_AUTH_CANCEL_URL\n Utils.send_company_misconfig(pk, e)\n return redirect(auth_cancel_url + '/error')\n #return Utils.dispatch_success(request, 'TOKEN_ALREADY_VALIDATED')\n\n auth_redirect_url = settings.XERO_AUTH_REDIRECT_URL\n return redirect(auth_redirect_url)\n # return Utils.dispatch_success(request, stored_values)",
"def signin_user(request):\r\n if (settings.FEATURES['AUTH_USE_CERTIFICATES'] and\r\n external_auth.views.ssl_get_cert_from_request(request)):\r\n # SSL login doesn't require a view, so redirect\r\n # branding and allow that to process the login if it\r\n # is enabled and the header is in the request.\r\n return external_auth.views.redirect_with_get('root', request.GET)\r\n if settings.FEATURES.get('AUTH_USE_CAS'):\r\n # If CAS is enabled, redirect auth handling to there\r\n return redirect(reverse('cas-login'))\r\n if request.user.is_authenticated():\r\n return redirect(reverse('dashboard'))\r\n\r\n context = {\r\n 'course_id': request.GET.get('course_id'),\r\n 'enrollment_action': request.GET.get('enrollment_action'),\r\n # Bool injected into JS to submit form if we're inside a running third-\r\n # party auth pipeline; distinct from the actual instance of the running\r\n # pipeline, if any.\r\n 'pipeline_running': 'true' if pipeline.running(request) else 'false',\r\n 'platform_name': microsite.get_value(\r\n 'platform_name',\r\n settings.PLATFORM_NAME\r\n ),\r\n }\r\n\r\n return render_to_response('login.html', context)",
"def step_impl(context):\n fields = {\n 'grant_type': 'authorization_code',\n 'code': context.code,\n 'client_id': context.vendor_config['versioned_auth']['client_id'],\n 'redirect_uri': context.vendor_config['versioned_auth']['redirect_uri'],\n }\n\n fields.update(dict(context.table))\n\n context.response = token_request(fields,\n context.vendor_config['versioned_auth'],\n context.conformance)",
"def get(self):\n\n\t\trequest = user_auth_parser.parse_args(strict=True)\n\n\t\tresult = Authenticator.authenticate(\n\t\t\trequest[\"username\"],\n\t\t\trequest[\"password\"]\n\t\t)\n\n\t\treturn result",
"def api_auth():\n form = request.get_json(force=True)\n userdata = None\n if form['register']:\n userdata = userProvider.register_user(\n form['username'].encode('utf8'),\n form['password'].encode('utf8')\n )\n else:\n userdata = userProvider.load_authenticated_user(\n form['username'].encode('utf8'),\n form['password'].encode('utf8')\n )\n if userdata:\n user = userProvider.userdata_to_user(userdata)\n flask_login.login_user(user)\n return \"true\"\n raise Exception(\"No user loaded\")",
"def do_login(self, backend, user):",
"def do_auth(self, access_token, *args, **kwargs):\n data = self.user_data(access_token)\n data['access_token'] = access_token\n kwargs.update(data)\n kwargs.update({'response': data, 'backend': self})\n return self.strategy.authenticate(*args, **kwargs)"
] | [
"0.6552406",
"0.65238935",
"0.6430285",
"0.6422166",
"0.6393544",
"0.6226487",
"0.62242806",
"0.6216219",
"0.6209499",
"0.6170765",
"0.61462003",
"0.6108177",
"0.6060539",
"0.6059608",
"0.6046146",
"0.6044743",
"0.5983562",
"0.5963158",
"0.5959967",
"0.5950732",
"0.59470534",
"0.59441185",
"0.5937959",
"0.5936205",
"0.59340805",
"0.59136397",
"0.58715975",
"0.58611894",
"0.5859699",
"0.58578277"
] | 0.72427773 | 0 |
Size the core of the LVL Shifter given K_ratio, the ratio of the NMOS to PMOS | def _design_lvl_shift_core_size(cload: float, k_ratio: float, inv_input_cap: float,
fanout: float, is_ctrl: bool) -> Tuple[int, int, int]:
out_inv_input_cap = cload / fanout
print(f'cload = {cload}')
inv_m = int(round(out_inv_input_cap / inv_input_cap))
inv_m = max(1, inv_m)
pseg = int(round(2 * inv_m / fanout))
pseg = max(1, pseg)
if pseg == 1 and not is_ctrl:
print("=" * 80)
print(
"WARNING: LvShift Designer: pseg has been set to 1; might want to remove output inverter.")
print("=" * 80)
'''
# TODO: Find k_ratio based on functionality automatically rather than have it come from input params.
all_corners = get_tech_global_info('bag3_digital')['signoff_envs']['all_corners']
iterator = FloatBinaryIterator(low=1.0, high=10.0, tol=0.1)
while iterator.has_next():
k_cur = iterator.get_next()
nseg = int(np.round(pseg*k_cur))
dut_params = self._get_lvl_shift_core_params_dict(pinfo, pseg, nseg, has_rst, is_ctrl)
dut = await self.async_new_dut('lvshift_core', STDCellWrapper, dut_params)
functional = False
for
'''
nseg = int(np.round(pseg * k_ratio))
return inv_m, pseg, nseg | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _SizeCalculator(partition_size):\n # Max image size grows less than partition size, which means\n # footer size grows faster than partition size.\n return int(math.pow(partition_size, 0.95))",
"def pixel_size_ratio(self):\n return 2**(self.levels[-1] - self.levels[0])",
"def _SizeCalculator(partition_size):\n # Minus footer size to return max image size.\n return partition_size - int(math.pow(partition_size, 0.95))",
"def get_block_size(n, k):\r\n l, r = get_position(k)\r\n c, p, q = get_level_profile(n, l)\r\n return c + 1 if r < q else c",
"def get_nlr_size():\n\treturn 4.1 * u.kpc",
"def _square_size(module_num_nodes):\n return int(math.ceil(math.sqrt(module_num_nodes)))",
"def estimate_size(shape):\n total_bytes = reduce(np.multiply, shape) * 8\n return total_bytes / 1E6",
"def diffusion_width(conversion_depth): #Return value in PIXELS!!!\n return sqrt((drift_time(maximum(conversion_depth, undepleted_thickness)) *\n 2 * k * temp * low_field_mobility / e) + #depleted\n where(conversion_depth < undepleted_thickness,\n square(undepleted_thickness), 0)) / pixel_width #undepleted",
"def get_kernel_size(factor):\r\n return 2 * factor - factor % 2",
"def _compute_sizes(self, k, expand=False, factor=False,\n simplify=False):\n if not self._has(\"omega\"):\n self.cosineSequences(expand=expand, factor=factor,\n simplify=simplify)\n if not self._has(\"theta\"):\n self.eigenvalues(expand=expand, factor=factor, simplify=simplify)\n if self.is_cyclic():\n m = tuple(Integer(1 if th in [2, -2] else 2)\n for th in self._.theta)\n else:\n try:\n m = tuple(integralize(_simplify(_factor(\n self._.n / sum(s * om**2\n for s, om in zip(k, omg)))))\n for omg in self._.omega)\n except TypeError:\n raise InfeasibleError(\"%s not integral\" % self.DUAL_SIZES)\n return m",
"def CalculateDynamicPartitionSize(self, image_size):\n raise NotImplementedError",
"def calc_size(self):\r\n pass",
"def calculate_rf_size(rf_size, downsample):\n h = 61 # 24\" monitor\n d = 10 # 10cm from the right eye\n r = 1080 / downsample # Vertical resolution\n d_px = np.degrees(math.atan2(h / 2, d)) / (r / 2)\n return rf_size * d_px",
"def partition_ratio(self):\n if self._partition_ratio is None:\n partition_lineal = 0\n zones = self.idfobjects[\"ZONE\"]\n zone: EpBunch\n for zone in zones:\n for surface in [\n surf\n for surf in zone.zonesurfaces\n if surf.key.upper() not in [\"INTERNALMASS\", \"WINDOWSHADINGCONTROL\"]\n ]:\n if hasattr(surface, \"tilt\"):\n if (\n surface.tilt == 90.0\n and surface.Outside_Boundary_Condition != \"Outdoors\"\n ):\n multiplier = float(\n zone.Multiplier if zone.Multiplier != \"\" else 1\n )\n partition_lineal += surface.width * multiplier\n self._partition_ratio = (\n partition_lineal / self.net_conditioned_building_area\n )\n return self._partition_ratio",
"def disp_size(self) -> int:\n if self.disp_power == 0:\n return 0\n return 2 ** self.disp_power + 1",
"def _get_synthesis_size(self, lvl):\n lvl_img = self.target_pyramid[lvl]\n h, w = lvl_img.shape[-2:]\n h, w = int(h * self.scale_factor[0]), int(w * self.scale_factor[1])\n return h, w",
"def len_ratio(self, m, q):\n return len(m) / len(q)",
"def recommended_size(img_shape):\r\n new_width = 512\r\n new_height = img_shape[0] / img_shape[1] * 512\r\n new_height = round(new_height / 32) * 32\r\n return new_width, new_height",
"def calculate_size(self, num_dots):\n self.objects = num_dots\n square = sqrt(self.objects)\n if self.objects % square == 0:\n return int(square), int(square)\n else:\n denom = self.objects // sqrt(self.objects)\n while self.objects % denom != 0:\n denom -= 1\n return int(denom), int(self.objects // denom)",
"def sub_block_size(self):\n if not self.sub_block_count or not self.parent_block_size:\n return None\n return self.parent_block_size / np.array(self.sub_block_count)",
"def compute_outub_size(height, width, dtype, core_nums):\n ubuf_size = 100 * 1024 # ub whole size 100 * 1024 byte\n out_ele_perblock = compute_perblock_nums(dtype)\n out_blocks = math.ceil(height * width / out_ele_perblock)\n block_per_core = math.ceil(out_blocks / core_nums)\n use_cores = math.ceil(out_blocks / block_per_core)\n out_ele_size = cce.cce_intrin.get_bit_len(dtype) // BITS_NUMS\n out_f16_size = cce.cce_intrin.get_bit_len(\"float16\") // BITS_NUMS\n out_int8_size = cce.cce_intrin.get_bit_len(\"int8\") // BITS_NUMS\n if dtype in [\"int8\", \"uint8\"]:\n need_size = block_per_core * out_ele_perblock * (out_f16_size + out_int8_size)\n if need_size > ubuf_size:\n block_num = ubuf_size // (out_ele_perblock * (out_f16_size + out_int8_size))\n out_factor = math.ceil(block_per_core / block_num)\n last_remian = block_per_core % block_num\n else:\n block_num = block_per_core\n out_factor = 1\n last_remian = 0\n total_len = block_num * out_ele_perblock\n else:\n need_size = block_per_core * out_ele_size * out_ele_perblock\n if need_size > ubuf_size:\n block_num = ubuf_size // BYTES_PER_BLOCK\n out_factor = math.ceil(block_per_core / block_num)\n last_remian = block_per_core % block_num\n else:\n block_num = block_per_core\n out_factor = 1\n last_remian = 0\n total_len = block_num * out_ele_perblock\n\n return block_num, block_per_core, out_factor, last_remian, total_len, use_cores",
"def transit_width(r, k, b, P=1):\n\n\treturn P*math.asin(r*math.sqrt( ((1+k)**2-b**2) / (1-b**2*r**2) ))/math.pi",
"def kernel_size(self, frequency, taper=4.):\n return 2. * taper * self._sigma(frequency)",
"def inner_size_from_label_size(self, label_size: int) -> int:\n return 4 + math.ceil((label_size - 2) / (2 ** self.n_folds))",
"def calc_spot_size(self, distance):\n if distance < 1.2:\n return self.spot_width_close\n else:\n return distance * self.spot_width_scalar",
"def diffuse_ratio(DIFF_data,ghi_data): \n K = DIFF_data/ghi_data\n \n return K",
"def __get_size_multiplier(self, multiplier):\n if multiplier is None:\n result = 1\n elif multiplier in ['k', 'K']:\n result = self.__k_multiplier\n elif multiplier in ['m', 'M']:\n result = self.__m_multiplier\n elif multiplier in ['g', 'G']:\n result = self.__g_multiplier\n else:\n result = 0\n return result",
"def _total_chunk_size_left(self):\n if self.streaming_type == 'reshape':\n return self.N_l // self.conv_factor\n elif self.streaming_type == 'mask':\n return self.N_l // self.conv_factor * self.n_layers\n elif self.unidir:\n return 10000 // self.conv_factor\n else:\n return 10000 // self.conv_factor",
"def width_l_k(model: SingleRhNeutrinoModel):\n mh = parameters.charged_kaon_mass\n fh = parameters.fk\n ckm = parameters.Vus\n ml = _lepton_masses[model.gen]\n return _width_l_hp(model, ml, mh, fh, ckm)",
"def _osLen(self):\n return int(np.ceil(self.minOverscan * self.sampleRate / self.downsample) * self.downsample)\n\n #osv = self.osVector\n #return np.ceil(np.linalg.norm(osv) / self.pixelWidth)"
] | [
"0.6188077",
"0.59274673",
"0.58529997",
"0.5746031",
"0.573419",
"0.5615157",
"0.5585702",
"0.5555616",
"0.5553092",
"0.55231327",
"0.5479882",
"0.5438771",
"0.5424427",
"0.542206",
"0.54073846",
"0.5404979",
"0.53181946",
"0.53123444",
"0.53076833",
"0.52982414",
"0.52909434",
"0.52772576",
"0.5222126",
"0.5218357",
"0.5214911",
"0.5214482",
"0.52129114",
"0.5174636",
"0.51729345",
"0.5170993"
] | 0.6555078 | 0 |
Given the NMOS segments and the PMOS segements ratio for the core, this function designs the internal inverter. For control level shifter, we don't care about matching rise / fall delay, so we just size for fanout. | async def _design_lvl_shift_internal_inv(self, pseg: int, nseg: int, out_inv_m: int,
fanout: float,
pinfo: Any, tbm_specs: Dict[str, Any], is_ctrl: bool,
has_rst: bool, dual_output: bool,
vin: str, vout: str) -> Tuple[int, int]:
if is_ctrl: # size with fanout
inv_nseg = int(np.round(nseg / fanout))
inv_nseg = 1 if inv_nseg == 0 else inv_nseg
inv_pseg = int(np.round(pseg / fanout))
inv_pseg = 1 if inv_pseg == 0 else inv_pseg
self.log(f"Calculated inv to need nseg : {inv_nseg}")
self.log(f"Calculated inv to need pseg : {inv_pseg}")
return inv_pseg, inv_nseg
# First size the NMOS in the inverter assuming a reasonably sized PMOS
inv_nseg = await self._design_lvl_shift_inv_pdn(pseg, nseg, out_inv_m, fanout, pinfo,
tbm_specs, has_rst, dual_output, vin, vout)
self.log(f"Calculated inv to need at least nseg: {inv_nseg}")
# Now using the inverter pull down size, we size the inverter pull up PMOS
inv_pseg, inv_nseg = await self._design_lvl_shift_inv_pun(pseg, nseg, inv_nseg, out_inv_m,
fanout, pinfo,
tbm_specs, has_rst, dual_output,
vin, vout)
self.log(f"Calculated inv to need pseg: {inv_pseg} and nseg: {inv_nseg}")
return inv_pseg, inv_nseg | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _design_lvl_shift_core_size(cload: float, k_ratio: float, inv_input_cap: float,\n fanout: float, is_ctrl: bool) -> Tuple[int, int, int]:\n out_inv_input_cap = cload / fanout\n print(f'cload = {cload}')\n inv_m = int(round(out_inv_input_cap / inv_input_cap))\n inv_m = max(1, inv_m)\n pseg = int(round(2 * inv_m / fanout))\n pseg = max(1, pseg)\n if pseg == 1 and not is_ctrl:\n print(\"=\" * 80)\n print(\n \"WARNING: LvShift Designer: pseg has been set to 1; might want to remove output inverter.\")\n print(\"=\" * 80)\n\n '''\n # TODO: Find k_ratio based on functionality automatically rather than have it come from input params.\n all_corners = get_tech_global_info('bag3_digital')['signoff_envs']['all_corners']\n iterator = FloatBinaryIterator(low=1.0, high=10.0, tol=0.1)\n\n while iterator.has_next():\n k_cur = iterator.get_next()\n nseg = int(np.round(pseg*k_cur))\n\n dut_params = self._get_lvl_shift_core_params_dict(pinfo, pseg, nseg, has_rst, is_ctrl)\n dut = await self.async_new_dut('lvshift_core', STDCellWrapper, dut_params)\n functional = False\n\n for \n '''\n\n nseg = int(np.round(pseg * k_ratio))\n\n return inv_m, pseg, nseg",
"async def _design_output_inverter(self, inv_in_pseg: int, inv_in_nseg: int, pseg: int,\n nseg: int, inv_nseg: int,\n inv_pseg: int, out_inv_m: int, fanout: float, pinfo: Any,\n tbm_specs: Dict[str, Any], has_rst, vin, vout) -> int:\n tb_params = self._get_full_tb_params()\n # Use a binary iterator to find the PMOS size\n iterator = BinaryIterator(-out_inv_m + 1, out_inv_m - 1)\n err_best = float('inf')\n all_corners = get_tech_global_info('bag3_digital')['signoff_envs']['all_corners']\n\n while iterator.has_next():\n pseg_off = iterator.get_next()\n dut_params = self._get_lvl_shift_params_dict(pinfo, pseg, nseg, inv_pseg, inv_nseg,\n inv_in_nseg, inv_in_pseg, out_inv_m,\n has_rst, dual_output=False, skew_out=True,\n out_pseg_off=pseg_off)\n dut = await self.async_new_dut('lvshift', STDCellWrapper, dut_params)\n\n err_worst = -1 * float('Inf')\n worst_env = ''\n sim_worst = None\n for env in all_corners['envs']:\n tbm_specs['sim_envs'] = [env]\n tbm_specs['sim_params']['vdd_in'] = all_corners[vin][env]\n tbm_specs['sim_params']['vdd'] = all_corners[vout][env]\n tbm = cast(CombLogicTimingTB, self.make_tbm(CombLogicTimingTB, tbm_specs))\n sim_results = await self.async_simulate_tbm_obj(\n f'sim_output_inv_pseg_{pseg_off}_{env}', dut, tbm,\n tb_params)\n tdr_cur, tdf_cur = CombLogicTimingTB.get_output_delay(sim_results.data, tbm.specs,\n 'in',\n 'out', False, in_pwr='vdd_in',\n out_pwr='vdd')\n\n if math.isinf(np.max(tdr_cur)) or math.isinf(np.max(tdf_cur)):\n raise ValueError(\"Got infinite delay!\")\n if tdr_cur[0] < 0 or tdf_cur[0] < 0:\n raise ValueError(\"Got negative delay.\")\n\n err_cur = np.abs(tdr_cur[0] - tdf_cur[0])\n if err_cur > err_worst:\n err_worst = err_cur\n worst_env = env\n tdr = tdr_cur[0]\n tdf = tdf_cur[0]\n sim_worst = sim_results\n\n '''\n print(f'iter: {pseg_off}')\n print(f'env: {worst_env}, tdr: {tdr}, tdf: {tdf}')\n breakpoint()\n '''\n\n if tdr < tdf:\n iterator.down(tdr - tdf)\n else:\n iterator.up(tdr - tdf)\n\n err_abs = np.abs(tdr - tdf)\n if err_abs < err_best:\n err_best = err_abs\n iterator.save_info(pseg_off)\n\n pseg_off = iterator.get_last_save_info()\n if pseg_off is None:\n raise ValueError(\"Could not find PMOS size to match target delay\")\n\n self.log(f'Calculated output inverter to skew PMOS by {pseg_off}.')\n\n return pseg_off",
"def downscale(n, ldd, stream_thres, conv_factor, logger, ch):\n logger.info(str(\"Processing volume_t.\" + \"%03.f\" % (n+1)))\n volMapFile = os.path.join(downscaleLoc,str(\"volume_t.\" + \"%03.f\") % (n+1))\n volume_target = readmap(volMapFile)\n stream = streamorder(ldd) # make a stream order map\n # make a river-map, rivers are streams with strahler order < the largest order - a threshold\n # rivers = ifthenelse(scalar(stream) < mapmaximum(scalar(stream)) - stream_thres,boolean(0), boolean(stream))\n rivers = ifthenelse(scalar(stream) < stream_thres,boolean(0), boolean(stream))\n report(rivers,os.path.join(downscaleLoc,'rivers.map'))\n # initialize loop\n floodedLand = volume_target*0\n count = 0\n floodHeightInactiveCells = volume_target*0\n # now iterate in a loop, 15 meters is assumed to be the largest inundation level possible. Increase by steps of 0.3\n # check volume of cells taken into consideration\n volInRiver = ifthenelse(rivers==1,volume_target,scalar(0))\n volInLargeCells = areamaximum(volInRiver,ordinal(uniqueid_target))\n for level in arange(0.0,30,0.1):\n logger.debug('Processing with inundation depth = ' + str(level))\n\n \"\"\"\n Below, a short explanation of the maps, generated in this PCRaster routine is given. The principle idea is to impose a certain water level on river cells\n an check where the backwater of this imposed height may go to upstream through use of the local drain directions and elevation map\n The routine also checks where the imposed water in each cell comes from (i.e. from which 0.5 degree cell).\n In the end, the total volume of backwater from each 0.5 deg. cell is computed and compared to PCRGLOB volumes.\n If the imposed volume exceeds the PCRGLOB volume, the 0.5 deg. cell is assumed to be 'depleted' and the river cells are excluded from\n the river network in further processing steps. In the next step, a slightly higher level is imposed and the volume check is repeated.\n Hence, more downstream cells may impose backwater on the target cells under consideration in later steps.\n In the end of the routine, all volumes of each pcrglob cell should be accounted for in the downscaled map.\n\n floodInRiver: flood level, with resp. to MSL imposed on the river network map\n floodInRiverUpstream: the flood level of floodInRiver, imposed on the upstream area of each river pixel\n idInRiver: id of te 0.5 deg. cell, imposed on the river network map\n idInRiverUpstream: id imposed on the upstream area of each river cell.\n volInRiver: the volume of flood water in each 0.5 deg. pixel, imposed on the river network\n volInRiverUpstream: flood water volume, imposed on the upstream area of each river pixel\n areaInRiver: cell size, imposed on river network map\n areaInRiverUpstream: total surface area of areas with the same idInRiverUpstream value\n floodedLandTemp: The water level in areas, which would occur if the current 'level' was imposed on the river network\n floodedLandAv: The flooded water level, averaged over the idInRiverUpstream areas\n floodedLandTotal: The total volume of flood water in each contiguous area of idInRiverUpstream\n floodedLand: A volume comparison is made between floodedLandTotal and volInRiverUpstream.\n If floodedLandTotal is smaller, then the amount of imposed water will be smaller then the\n volume, computed by PCRGLOB in the 0.5 degree area. The inundation height in the cell will be updated in floodedLand.\n If the volume is exceeded, the cell will not be updated and the river cells in this area will be removed.\n Hence, backwater from more downstream cells can still impact on the cell under consideration.\n\n TO-DO: als een cel inactief wordt, dan gaat een benedenstroomse cel ineens heel veel water dumpen op deze plaatsen met als gevolg, mogelijk ernstige overschrijding van het volume uit die cel.\n \"\"\"\n floodInRiver = ordinal((ifthenelse(rivers==1,scalar(level)+dem,scalar(0)))*100)\n idInRiver = ordinal(ifthenelse(rivers==1,uniqueid_target,scalar(0)))\n volInRiver = ifthenelse(rivers==1,volume_target,scalar(0))\n areaInRiver = ifthenelse(rivers==1,surf,scalar(0))\n floodInRiverUpstream = subcatchment(ldd,floodInRiver)\n idInRiverUpstream = subcatchment(ldd,idInRiver)\n if level > 0:\n changedSourceCells = ifthenelse(idInRiverOld != idInRiverUpstream, boolean(1),boolean(0)) # if a different 0.5 deg. area is the source of floods\n floodHeightInactiveCells = ifthenelse(changedSourceCells,floodedLand,floodHeightInactiveCells)\n volInRiverUpstream = areamaximum(volInRiver,idInRiverUpstream)\n areaInRiverUpstream = areatotal(areamaximum(areaInRiver,idInRiverUpstream),idInRiverUpstream) # compute total catchment area of Id cell\n floodedLandTemp = min(max(scalar(floodInRiverUpstream)/100-dem,0),level)\n floodedLandTempAv = areaaverage(max(floodedLandTemp - floodHeightInactiveCells, 0),idInRiverUpstream)\n floodedLandTotal = floodedLandTempAv*areaInRiverUpstream\n # check which cells have a changed source area of .5 degrees and subtract the volume there\n floodedLand = ifthenelse(floodedLandTotal < volInRiverUpstream, max(scalar(floodedLandTemp),scalar(floodedLand)), scalar(floodedLand))# hieronder uitrekenen of volume al meer is dan eerder of niet.\n # update relevant river streams (exclude the ones that are already saturated)\n rivers = ifthenelse(floodedLandTotal < volume_target, rivers, boolean(0))\n idInRiverOld = idInRiverUpstream\n\n vol_pcrglob = pcr2numpy(volInLargeCells,0)/conv_factor\n vol_pcr = vol_pcrglob.sum()\n volmodelled = pcr2numpy(floodedLand*surf,0)\n vol_mod = volmodelled.sum()\n #\n logger.info(str('volume_t.' + '%03.f' + ': Volume PCRGLOB: ' + '%03.3f' + 'km3, Volume downscaling: ' + '%03.3f' + 'km3' + ', perc. diff: ' + '%2.2f' + '%%') % (n+1,vol_pcr/1e9, vol_mod/1e9, (vol_mod-vol_pcr)/vol_pcr*100))\n return logger, ch, floodedLand\n # end of function part",
"def decimator(clk, reset, dataIn, dataOut, decimationRatio, decimationRatioBase, decimationStyle_ext, dataClk, newValueFlag):\n bufferCounter = Signal(intbv(0, min = 0, max = MAXIMAL_RATIO))\n buff = [Signal(intbv(0)[BIT_WIDTH:]) for i in range(MAXIMAL_RATIO)]\n lfsr = Signal(intbv(0)[LFSR_WIDTH:])\n maxPeriod = Signal(bool(False))\n maxValue = Signal(intbv(0)[BIT_WIDTH:])\n minValue = Signal(intbv(0)[BIT_WIDTH:])\n #valueSum = Signal(intbv(0, min = 0, max = 255*2))\n dataOut_decimated = Signal(intbv(0)[8:])\n decimationStyle = Signal(intbv(0)[2:])\n metaCounter = Signal(intbv(0, min = 0, max = 9))\n flagRegistered = Signal(bool(False))\n decimationSum = Signal(intbv(0, min = 0, max = 256*MAXIMAL_RATIO))\n dataClkEdge = Signal(bool(False))\n \n @always(clk.posedge, reset.posedge)\n def newData():\n \"\"\"This process registers flag indicating new data from\n lower clock domain and then waits 8 clock cycles to prevent\n data coccuprion caused by metastability of lower frequency registers\"\"\"\n if(reset == 1):\n metaCounter.next = 0\n flagRegistered.next = False\n decimationStyle.next = 0\n else:\n if(newValueFlag):\n metaCounter.next = 0\n flagRegistered.next = True\n else:\n if(flagRegistered):\n if(metaCounter == 8):\n decimationStyle.next = decimationStyle_ext\n metaCounter.next = 0\n flagRegistered.next = False\n else:\n metaCounter.next = metaCounter + 1\n else:\n decimationStyle.next = decimationStyle\n \n \n @always(reset.posedge, clk.posedge)\n def lfsr_proc():\n \"\"\"This process makes pseudorandom numbers utilizing LFSR \n (http://en.wikipedia.org/wiki/Linear_feedback_shift_register)\"\"\"\n if(reset == 1):\n lfsr.next = LFSR_SEED\n else:\n if(dataClk):\n lfsr.next = concat(lfsr[LFSR_WIDTH-1:0], lfsr[9] ^ lfsr[6])\n \n @always(clk.posedge, reset.posedge)\n def bufferCnt():\n \"\"\"This process counts up from 0 to decimationRatio creating pointer\n into buffer memory for saving samples in dithering mode of decimator\"\"\"\n if(reset == 1):\n bufferCounter.next = 0\n else:\n if(decimationRatio > 0):\n if(bufferCounter == (decimationRatio-1)):\n bufferCounter.next = 0\n else:\n bufferCounter.next = bufferCounter + 1\n \n @always(clk.posedge, reset.posedge)\n def outputConnect():\n \"\"\"This process connects appropriate output \n according to selected decimation ratio\"\"\"\n if(reset == 1):\n dataOut.next = 0\n else:\n if(decimationRatio == 1):\n dataOut.next = dataIn\n else:\n dataOut.next = dataOut_decimated\n \n @always(clk.posedge, reset.posedge)\n def output():\n \"\"\"This is main process of decimator which on rising edge od data clock\n outputs decimated data according to selected decimation style\n for simple decimation it just pases current input date from adc,\n for dithering it takes random sample from decimated interval,\n for peak detection it takes maximum or minimum sample,\n for smoothing it makes mean out of decimated interval by shifting data right\"\"\"\n if(reset == 1):\n dataOut_decimated.next = 0\n maxPeriod.next = False\n maxValue.next = MIN_VALUE\n minValue.next = MAX_VALUE\n decimationSum.next = 0\n dataClkEdge.next = True\n else:\n if(dataClk == 1 and dataClkEdge == 1):\n dataClkEdge.next = False\n decimationSum[16:8].next = 0\n decimationSum[8:].next = dataIn\n maxValue.next = MIN_VALUE\n minValue.next = MAX_VALUE\n if(decimationRatio > 0):\n if(decimationStyle == 0):\n dataOut_decimated.next = dataIn\n elif(decimationStyle == 1):\n if(decimationRatio == 2):\n dataOut_decimated.next = buff[lfsr[1:]]\n elif(decimationRatio == 4):\n dataOut_decimated.next = buff[lfsr[2:]]\n elif(decimationRatio == 8):\n dataOut_decimated.next = buff[lfsr[3:]]\n elif(decimationRatio == 16):\n dataOut_decimated.next = buff[lfsr[4:]]\n elif(decimationRatio == 32):\n dataOut_decimated.next = buff[lfsr[5:]]\n elif(decimationRatio == 64):\n dataOut_decimated.next = buff[lfsr[6:]]\n elif(decimationRatio == 128):\n dataOut_decimated.next = buff[lfsr[7:]]\n elif(decimationRatio == 256):\n dataOut_decimated.next = buff[lfsr[8:]]\n elif(decimationStyle == 2):\n maxPeriod.next = not maxPeriod\n if(maxPeriod):\n dataOut_decimated.next = maxValue\n else:\n dataOut_decimated.next = minValue\n elif(decimationStyle == 3):\n if(decimationRatioBase == 1):\n dataOut_decimated.next = decimationSum[9:1]\n elif(decimationRatioBase == 2):\n dataOut_decimated.next = decimationSum[10:2]\n elif(decimationRatioBase == 3):\n dataOut_decimated.next = decimationSum[11:3]\n elif(decimationRatioBase == 4):\n dataOut_decimated.next = decimationSum[12:4]\n elif(decimationRatioBase == 5):\n dataOut_decimated.next = decimationSum[13:5]\n elif(decimationRatioBase == 6):\n dataOut_decimated.next = decimationSum[14:6]\n elif(decimationRatioBase == 7):\n dataOut_decimated.next = decimationSum[15:7]\n elif(decimationRatioBase == 8):\n dataOut_decimated.next = decimationSum[16:8]\n else:\n if(dataClk == 0):\n dataClkEdge.next = True\n decimationSum.next = decimationSum + concat(\"00000000\", dataIn)\n if(dataIn > maxValue):\n maxValue.next = dataIn\n if(dataIn < minValue):\n minValue.next = dataIn\n \n @always(clk.posedge)\n def fillBuffer():\n \"\"\"This process fills in buffer for dithering mode of decimation\"\"\"\n buff[bufferCounter].next = dataIn\n\n return fillBuffer, lfsr_proc, output, bufferCnt, outputConnect, newData",
"def prescaler(self) -> int:",
"def electrons_normalize(superdark):\n\n logging.info('\\tConverting {} to electrons.'.format(superdark))\n\n # Open the image and get the data\n hdulist = fits.open(superdark, 'update')\n sci1 = hdulist[1].data\n err2 = hdulist[2].data\n sci4 = hdulist[4].data\n err5 = hdulist[5].data\n\n # Find gains and exposure time\n gain = {}\n gain['A'] = hdulist[0].header['ATODGNA']\n gain['B'] = hdulist[0].header['ATODGNB']\n gain['C'] = hdulist[0].header['ATODGNC']\n gain['D'] = hdulist[0].header['ATODGND']\n exptime = hdulist[0].header['EXPTIME']\n\n # Multiply each \"half\" of the extensions by the appropriate gain.\n logging.info('\\tMultiplying each quadrant by its gain.')\n apply_norm(sci1, '*', gain['C'], 'regionAorC')\n apply_norm(err2, '*', gain['C'], 'regionAorC')\n apply_norm(sci1, '*', gain['D'], 'regionBorD')\n apply_norm(err2, '*', gain['D'], 'regionBorD')\n apply_norm(sci4, '*', gain['A'], 'regionAorC')\n apply_norm(err5, '*', gain['A'], 'regionAorC')\n apply_norm(sci4, '*', gain['B'], 'regionBorD')\n apply_norm(err5, '*', gain['B'], 'regionBorD')\n\n # Normalizing the gain to 1 is not necessary since calwf3\n # doesn't look at this keyword. It already assumes the units\n # of the darks are e-/sec and will use the gains in CCDTAB to\n # reconvert the darks to DNs. But we do it for consistency.\n logging.info('\\tNormalizing the SCI and ERR extensions (1, 2, 4, 5) ' + \\\n 'by the integration time.')\n apply_norm(sci1, '/', exptime, 'None')\n apply_norm(err2, '/', exptime, 'None')\n apply_norm(sci4, '/', exptime, 'None')\n apply_norm(err5, '/', exptime, 'None')\n\n # Update necessary keywords\n for ext in range(7):\n hdulist[ext].header['CCDGAIN'] = 1.0\n hdulist[0].header['EXPTIME'] = 1.0\n hdulist[0].header['TEXPTIME'] = 1.0\n hdulist.close()",
"def flatNoiseCGH():\n #Get data\n wdir = '/home/rallured/Dropbox/AXRO/Metrology/NoiseStudy/FlatMeasurements/'\n d1,dx1 = met.read4DFits(wdir+'161205_RefFlat_Avg8_Meas1.fits')\n d2,dx2 = met.read4DFits(wdir+'161205_RefFlat_Avg8_Meas2.fits')\n p1,px1 = met.read4DFits(wdir+'161205_RefFlat_ParrotingTestPitch_Meas1.fits')\n p2,px2 = met.read4DFits(wdir+'161205_RefFlat_ParrotingTestPitch_Meas2.fits')\n p3,px3 = met.read4DFits(wdir+'161205_RefFlat_ParrotingTestRoll_Meas1.fits')\n p4,px4 = met.read4DFits(wdir+'161205_RefFlat_ParrotingTestRoll_Meas2.fits')\n\n #Construct baseline power spectra\n f1,pow1 = fourier.meanPSD(d1-d2,win=np.hanning,dx=dx1)\n f2,pow2 = fourier.meanPSD(d1-d2,win=np.hanning,dx=dx1,axis=1)\n \n #Construct parroted power spectra\n f3,pow3 = fourier.meanPSD(p1-p2,win=np.hanning,dx=dx1)\n f4,pow4 = fourier.meanPSD(p1-p2,win=np.hanning,dx=dx2,axis=1)\n f5,pow5 = fourier.meanPSD(p3-p4,win=np.hanning,dx=dx1)\n f6,pow6 = fourier.meanPSD(p3-p4,win=np.hanning,dx=dx2,axis=1)\n\n #Plot\n plt.loglog(f1,pow1/f1[0],label='Axial Baseline')\n plt.loglog(f2,pow2/f2[0],label='Azimuthal Baseline')\n plt.loglog(f3,pow3/f3[0],label='Pitch Axial')\n plt.loglog(f4,pow4/f4[0],label='Pitch Azimuthal')\n plt.loglog(f5,pow5/f5[0],label='Roll Axial')\n plt.loglog(f6,pow6/f6[0],label='Roll Azimuthal')\n plt.title('Residual Fringe Repeatability Impact')\n plt.xlabel('Frequency (1/mm)')\n plt.ylabel('Power ($\\mu$m$^2$ mm)')\n plt.grid()\n plt.legend(loc='lower left')\n\n return f1,pow1",
"def selection_correction_method1_v2(tree, scale, h_in, h_out):\n #h_in = ROOT.TH1D(\"h_in\", \"neutron spectrum with all cuts: inside onset window; Energy [keV]; counts\", 50, 0, 25)\n #h_out = ROOT.TH1D(\"h_out\", \"neutron spectrum with all cuts: outside onset window; Energy [keV]; counts\", 50, 0, 25)\n for event in tree:\n cut = [0, 0]\n S15_ch = i_channel(0, event)\n bpm_ch = i_channel(4, event)\n RT = event.DD_Rise[S15_ch]\n S15_w2 = event.DD_AmplADU[S15_ch]\n onset = event.DD_Rise10pct[S15_ch]\n if cut[0]==0:\n # first cut: for inside onset window\n # if event passes the first cuts\n if S15_w2>1000 and RT>1.1 and RT<1.51 and onset>39 and onset<47:\n # loop over the pmt channel numbers to calculate the time of flight: time bd - time bpm\n for n_channel in range(5, 16):\n pmt_i = i_channel(n_channel, event)\n cfd_pmt = event.cfdPulse_CFDNS[pmt_i]\n cfd_bpm = event.cfdPulse_CFDNS[bpm_ch]\n # calculation of the time of flight\n tof = (cfd_pmt-cfd_bpm)%400\n #cut on tof: time of flight of the neutron\n if tof<335 and tof>295:\n energy2 = S15_w2*scale\n # fill histogram inside onset window\n h_in.Fill(energy2)\n cut[0]=1\n break\n if cut[1]==0:\n if S15_w2>1000 and RT<1.51 and RT>1.1 and ((onset<36 and onset>15) or (onset>50 and onset<=110)):\n for n_channel in range(5, 16):\n pmt_i = i_channel(n_channel, event)\n cfd_pmt = event.cfdPulse_CFDNS[pmt_i]\n cfd_bpm = event.cfdPulse_CFDNS[bpm_ch]\n tof = (cfd_pmt-cfd_bpm)%400\n if tof<335 and tof>295:\n energy2 = S15_w2*scale\n h_out.Fill(energy2)\n cut[1]=1\n break",
"def sos_correction(self, ratio):\n\n # Correct velocities\n self.u_mps = self.u_mps * ratio\n self.v_mps = self.v_mps * ratio",
"def update(self, sim, dt):\n #growth kinetics\n self.division_timer += dt\n #you can grow unless you are in the A state meaning apoptosis\n if(self.division_timer >= self.division_time and self._division):\n #now you can divide\n if(self.state == \"T1\"):\n #change the current sytate to D\n self.state = \"NSC\"\n self._division = False\n self.division_time = 36\n #progenitor time is faster with concentration factor\n\n #add the concentration\n source, consump_rate = self.get_gradient_source_sink_coeff(\"TNF\")\n self.set_gradient_source_sink_coeff(\"TNF\", 50.0*source, 1.0*consump_rate)\n## #get neighbors\n## nbs = sim.network.neighbors(self)\n## #get the total\n## tot = len(nbs)\n## mn_count = 0\n## for i in range(0, tot):\n## if(nbs[i].state == \"MN\" or nbs[i].state == \"T2\"): \n## mn_count += 1\n## norm_mn = float(mn_count) / float(tot)\n## if(norm_mn < self._p2):\n## self.division_time = 36*(norm_mn) # in hours\n## self.division_time = max(self.division_time, 1) \n## else:\n## \n## print(norm_mn, self.division_time)\n #also set the current consumption rate\n## source, consump_rate = self.get_gradient_source_sink_coeff(\"EGF\")\n## self.set_gradient_source_sink_coeff(\"EGF\", source, 1.0*consump_rate)\n if(self.state == \"T2\"):\n #change the current sytate to D\n self.state = \"MN\"\n self.division_time = 56 #in hours\n #also set the current consumption rate\n source, consump_rate = self.get_gradient_source_sink_coeff(\"EGF\")\n self.set_gradient_source_sink_coeff(\"EGF\", 50.0*source, 1.0*consump_rate)\n if(self.state == \"T3\"):\n #change the current sytate to D\n self.state = \"G\"\n self.division_time = 56 #in hours\n #also set the current consumption rate\n## source, consump_rate = self.get_gradient_source_sink_coeff(\"EGF\")\n## self.set_gradient_source_sink_coeff(\"EGF\", source, 1.0*consump_rate)\n #get the location\n #pick a random point on a sphere\n location = RandomPointOnSphere()*self.radius/2.0 + self.location\n #get the radius\n radius = self.radius\n #get the ID\n ID = sim.get_ID()\n #make the object\n sc = NueronalStemCell(location, radius, ID, self.state,\n division_time = self.division_time,\n params = [self._p1, self._p2,\n self._p3, self._p4, self._p5,\n self._p6, self.p7])\n #copy secretion to NSC progeny\n if(self.state == \"NSC\"):\n source, consump_rate = self.get_gradient_source_sink_coeff(\"TNF\")\n sc.set_gradient_source_sink_coeff(\"TNF\", 50.0*source, 1.0*consump_rate)\n sc._division = False\n #set its soluble count\n## sc.sol_count = self.sol_count / 2.\n## self.sol_count = self.sol_count / 2.\n #copy over all of the coefficients to the new cells\n## prod_cons = self.get_gradient_source_sink_coeff(\"O2\")\n## sc.set_gradient_source_sink_coeff(\"O2\", prod_cons[0], prod_cons[1])\n prod_cons = self.get_gradient_source_sink_coeff(\"EGF\")\n sc.set_gradient_source_sink_coeff(\"EGF\", prod_cons[0], prod_cons[1]) \n #add it to the imsulation\n sim.add_object_to_addition_queue(sc)\n #reset the division time\n self.division_timer = 0\n \n if(self.state == \"U\"):\n #HANDLE DIFFERENTIATION\n #RANDOM RULE\n x = rand.random()\n prob = self._p1 #probability of turning into a NSC\n #longer before the differentiation starts\n if(x < prob):\n #differentiation occurs\n self.state = \"T1\"\n #also add a proabability to differentiate directly to a mn\n n1 = self._p4\n## #get neighbors\n## nbs = sim.network.neighbors(self)\n## #get the total\n## tot = len(nbs)\n## mn_count = 0\n## if(tot > 0):\n## #count up the states fo all fo these\n## for i in range(0, tot):\n## if(nbs[i].state == \"MN\" or nbs[i].state == \"T2\"): \n## mn_count += 1\n #get the value fo the gradient and make differntiation inversly\n #inversly correlated with the proportion present\n norm_mn = self.get_gradient_value(\"EGF\")\n #probability of turning into a motor nueron\n n1 = self._p4\n## #normalize the result\n## if(tot != 0):\n## norm_mn = float(mn_count) / float(tot)\n## else:\n## norm_mn = 0\n #calculate the probability\n prob_MN = 1 - (1.*norm_mn**n1)/(self._p2**n1 + norm_mn**n1)\n x1 = rand.random()\n if(x1 <= self._p1*prob_MN):\n #differentiation occurs towards a motor nueron\n self.state = \"T2\"\n \n if(self.state == \"NSC\"):\n #HANDLE DIFFERENTIATION\n #RANDOM RULE\n x1 = rand.random()\n x2 = rand.random()\n #Find all the motor nuerons\n## #get neighbors\n## nbs = sim.network.neighbors(self)\n## #get the total\n## tot = len(nbs)\n## mn_count = 0\n## if(tot > 0):\n## #count up the states fo all fo these\n## for i in range(0, tot):\n## if(nbs[i].state == \"MN\" or nbs[i].state == \"T2\"): \n## mn_count += 1\n## #normalize the result\n## norm_mn = float(mn_count) / float(tot)\n #Make differerntiationd ependant on the gradient value\n norm_mn = self.get_gradient_value(\"EGF\")\n #set the paramaters\n n1 = self._p4\n #update the division time\n## self.division_time = norm_mn * 38 #in hours takes care of the feedback\n #depends on other motor nuerons\n prob_MN = 1 - (1.*norm_mn**n1)/(self._p3**n1 + norm_mn**n1) #probability of turning into a motor nueron\n## prob_G = (1.*norm_mn**n2)/(self._p3**n1 + norm_mn**n2) #of turning into a glial cell\n prob_G = self._p5\n #longer before the differentiation starts\n if(x1 <= prob_MN and x2 > prob_G):\n #differentiation occurs towards a motor nueron\n self.state = \"T2\"\n if(x1 > prob_MN and x2 <= prob_G):\n #differentiation occurs towards a glial cell\n self.state = \"T3\"\n #check to see if division enabled\n if(self._division == False):\n #check for mitotic speed up\n a = self._p6\n b = self._p7\n norm_nsc = self.get_gradient_value(\"TNF\")\n prob_divide = (1.*norm_nsc**b)/(a**b + norm_nsc**b)\n r = rand.random()\n if(r <= x):\n self._division = True",
"def selection_correction_method1(tree, scale, h_in, h_out):\n #h_in = ROOT.TH1D(\"h_in\", \"neutron spectrum with all cuts: inside onset window; Energy [keV]; counts\", 50, 0, 25)\n #h_out = ROOT.TH1D(\"h_out\", \"neutron spectrum with all cuts: outside onset window; Energy [keV]; counts\", 50, 0, 25)\n for event in tree:\n cut = [0, 0]\n S15_ch = i_channel(0, event)\n bpm_ch = i_channel(4, event)\n RT = event.DD_Rise[S15_ch]\n S15_w2 = event.DD_AmplADU[S15_ch]\n onset = event.DD_Rise10pct[S15_ch]\n if cut[0]==0:\n # first cut: for inside onset window\n # if event passes the first cuts\n if S15_w2>1000 and RT>1.1 and RT<1.51 and onset>39 and onset<47:\n # loop over the pmt channel numbers to calculate the time of flight: time bd - time bpm\n for n_channel in range(5, 16):\n pmt_i = i_channel(n_channel, event)\n cfd_pmt = event.cfdPulse_CFDNS[pmt_i]\n cfd_bpm = event.cfdPulse_CFDNS[bpm_ch]\n # calculation of the time of flight\n tof = (cfd_pmt-cfd_bpm)%400\n #cut on tof: time of flight of the neutron\n if tof<335 and tof>295:\n energy2 = S15_w2*scale\n # fill histogram inside onset window\n h_in.Fill(energy2)\n cut[0]=1\n break\n if cut[1]==0:\n if S15_w2>1000 and RT<1.51 and RT>1.1 and ((onset<36 and onset>15) or (onset>50 and onset<=110)):\n for n_channel in range(5, 16):\n pmt_i = i_channel(n_channel, event)\n cfd_pmt = event.cfdPulse_CFDNS[pmt_i]\n cfd_bpm = event.cfdPulse_CFDNS[bpm_ch]\n tof = (cfd_pmt-cfd_bpm)%400\n if tof<335 and tof>295:\n energy2 = S15_w2*scale\n h_out.Fill(energy2)\n cut[1]=1\n break\n return h_in, h_out",
"def ratio_4_doc(shot, dir, num_probes = 16):\n # data = [[0] *3 for i in range(num_probes)]\n # magdata = hdr.getMagData(shot)\n probe_locs = get_probeLocs_calib_setup(shot)\n data=hdr.getquikData(shot)\n time,eastcurrent,westcurrent = loadcurrent(shot)#using eastcurrent\n ratios = [[0]*3 for i in range(num_probes)]\n for probe in range(num_probes):\n ratio =1\n inverted = False\n # fig, (ax1, ax2, ax3) = plt.subplots(3, 1, sharex=True)\n B=sp.signal.detrend(cumtrapz(data.unCalibData[dir,probe,:], data.time))\n plot_time = data.time[:-1]\n if(np.max(B[2000:6000]) < abs(np.average(B[2000:6000]))):\n # print(\"\\ninverted!\")\n inverted = True\n # B = B* -1\n # ratio = -1\n\n r = probe_locs[probe]\n max_current = polyPeak_noPlot(time,eastcurrent)\n # if(np.max(eastcurrent) < -1*(np.min(eastcurrent))):\n # max_current = -1*np.min(eastcurrent)\n helmB = helmholtz2(r,max_current)\n\n # THis is intentional! I am only using shots where the cmponent is lined\n # up with the z-direction of the helmholz field\n # helmB[2] = helmB[2]*-1\n max_theoretical = np.max(helmB[2])\n max_measured = polyPeak_noPlot(plot_time, B)\n\n\n ratio = ratio * max_theoretical/max_measured\n if ratio > 30000 or ratio < -30000:\n ratio = 0\n\n\n ratios[probe][dir] = ratio\n # print(\"\\tRatio is: %f\" %(ratio))\n # if(inverted and ratio <0):\n # print(\"Inverted and ratio reflects that\")\n # elif(not inverted and ratio <0):\n if probe ==1:\n print(\"\\n Ratio: %5f \\n\\t max_measured: %3f, \\n\\t max_theoretical: %5f\"%(ratio,max_measured,max_theoretical ) )\n\n # Compute the median of the non-zero elements\n # m = np.median(foo[foo > 0])\n # Assign the median to the zero elements\n # foo[foo == 0] = m\n return ratios",
"def gain_standardization(self):\r\n \"\"\"\r\n load all gain factors from any hm stage (gains are identical for all SHM stages)\r\n \"\"\"\r\n gain_factors = []\r\n for i in range(self.number_of_paths):\r\n value = self.data_of_hm_cycle['coupon']['path_data'][0][0][0][i][4][0][0]\r\n gain_factors.append(value)\r\n gain_factors = np.array(gain_factors)\r\n gains_factor_new_dim = gain_factors[np.newaxis, ...]\r\n matrix_gains_2d = np.repeat(gains_factor_new_dim, self.signal_length, axis=0).T\r\n matrix_of_gains = matrix_gains_2d[:, :, np.newaxis]\r\n\r\n \"\"\"\r\n divide all signals by the gain factors such that all gains are standardized to one\r\n \"\"\"\r\n for i in range(self.num_of_hm_stages):\r\n entries = i*self.number_of_paths\r\n hm_cycle_set = self.sensor_data_flattened_[entries : entries + self.number_of_paths]\r\n divided_data = np.divide(hm_cycle_set, matrix_of_gains)\r\n self.sensor_data_flattened_[entries : entries + self.number_of_paths] = divided_data\r\n self.sensor_data_original_shape_[i, :, :, :] = divided_data\r\n\r\n return",
"def _tosuperclass(self): \n self.ne_in = self.rsig['ne']['signal']\n self.ne = self.ne_in\n self.te_in = self.rsig['te']['signal']\n self.ti_in = self.rsig['ti']['signal']\n self.ni_in = np.zeros((self.nion, len(self.ne_in)),dtype=float)\n self.zeff_in = np.full(self.nrho, self.zeff)\n self.vt_in = np.zeros(len(self.ne_in),dtype=float)\n self.vt = np.zeros(len(self.ne_in),dtype=float)\n self._ion_densities()\n self.ni = self.ni_in\n self.te = self.te_in\n self.ti = self.ti_in\n \n # no need to smooth since they are already smoothed\n self._extrapolate()",
"def selection_correction_method2(tree, scale, h_in, h_out):\n for event in tree:\n cut = [0, 0]\n S15_ch = i_channel(0, event)\n RT = event.DD_Rise[S15_ch]\n onset = event.DD_Rise10pct[S15_ch]\n energy_S15 = event.DD_AmplADU[S15_ch]\n if cut[0]==0:\n if energy_S15>1000 and RT>1.1 and RT<1.51 and onset>39 and onset<47:\n energy = energy_S15*scale\n h_in.Fill(energy)\n cut[0]=1\n if cut[1]==0:\n if energy_S15>1000 and RT>1.1 and RT<1.51 and ((onset>=15 and onset<=36) or (onset>=50 and onset<=110)):\n energy = energy_S15*scale\n h_out.Fill(energy)\n cut[1]=1",
"def prescaler(self, value: int, /) -> None:",
"def dispatch_max_sc(pv, demand, inv_size,param, return_series=False):\n bat_size_e_adj = param['BatteryCapacity']\n bat_size_p_adj = param['MaxPower']\n n_bat = param['BatteryEfficiency']\n n_inv = param['InverterEfficiency']\n timestep = param['timestep']\n # We work with np.ndarrays as they are much faster than pd.Series\n Nsteps = len(pv)\n LevelOfCharge = np.zeros(Nsteps)\n #inv2grid = np.zeros(Nsteps)\n inv_array=np.tile(inv_size/n_inv,len(pv))\n pv2store = np.zeros(Nsteps)\n store2inv = np.zeros(Nsteps)\n inv2curt = np.zeros(Nsteps)\n flagsell = np.zeros(Nsteps)\n flag_12h = np.zeros(Nsteps)\n store2grid = np.zeros(Nsteps)\n store2load = np.zeros(Nsteps)\n #grid2store = np.zeros(Nsteps) # TODO Always zero for now.\n\n #Load served by PV\n pv2load_dc = np.array([pv, demand / n_inv,inv_array]).min(axis=0) # DC direct self-consumption, with inverter limitation\n\n #Residual load\n res_load = demand - (pv2load_dc * n_inv) # AC\n inv2load = pv2load_dc * n_inv # AC\n\n #Excess PV\n res_pv = pv-pv2load_dc # DC\n\n #PV to storage after eff losses\n pv2inv = pv2load_dc*n_inv # AC\n\n #first timestep = 0\n LevelOfCharge[0] = 0 # bat_size_e_adj / 2 # DC\n\n\n for i in range(1,Nsteps):\n #PV to storage\n if LevelOfCharge[i-1] >= bat_size_e_adj: # if battery is full\n pv2store[i] = 0\n else: #if battery is not full\n\n if LevelOfCharge[i-1] + res_pv[i] * n_bat * timestep > bat_size_e_adj: # if battery will be full after putting excess\n pv2store[i] = min((bat_size_e_adj - LevelOfCharge[i-1]) / timestep, bat_size_p_adj)\n else:\n # pv2store[i] = min(res_pv[i], bat_size_p_adj)\n pv2store[i] = min(res_pv[i] * n_bat, bat_size_p_adj)\n #Storage to load\n if pv2store[i]==0:# modification to original algorithm (The battery cannot charge and discharge at the same time)\n store2inv[i] = min(bat_size_p_adj,(inv_size/n_inv-pv2load_dc[i]), # DC\n res_load[i] / n_inv,\n LevelOfCharge[i-1] / timestep) #modif to original, store2inv=pv2store*n_bat\n\n #SOC\n LevelOfCharge[i] = min(LevelOfCharge[i-1] - (store2inv[i] - pv2store[i]*n_bat ) * timestep, # DC\n bat_size_e_adj)#modif to original, store2inv=pv2store*n_bat\n\n pv2grid_dc=np.array([pv-pv2store,inv_array]).min(axis=0)-pv2load_dc # DC\n pv2inv= (pv2grid_dc+pv2load_dc)*n_inv # AC\n inv2curt=pv-pv2grid_dc-pv2load_dc-pv2store # DC\n\n inv2load = (pv2load_dc + store2inv) * n_inv # AC\n inv2grid = pv2grid_dc * n_inv # AC\n grid2load = demand - inv2load # AC\n #MaxDischarge = np.minimum(LevelOfCharge[i-1]*BatteryEfficiency/timestep,MaxPower)\n batt_losses=pv2store*(1-n_bat)\n inv_losses=(pv2grid_dc+pv2load_dc+store2inv)*(1-n_inv)\n #Potential Grid to storage # TODO: not an option for now in this strategy\n # GridPurchase = False\n \n out = { 'pv2inv': pv2inv, # AC\n 'res_pv':res_pv, # DC\n 'pv2store': pv2store, # DC\n 'inv2load': inv2load, # AC\n 'grid2load': grid2load, # AC\n 'store2inv': store2inv, # DC\n 'inv2curt':inv2curt, # DC\n 'LevelOfCharge': LevelOfCharge, # kWh\n 'inv2grid': inv2grid, #AC \n 'inv_losses':inv_losses,\n 'batt_losses':batt_losses,\n 'flag_sell':flagsell,\n 'flag_12h':flag_12h, \n 'store2grid':store2grid,\n 'store2load':store2inv*n_inv\n }\n if not return_series:\n out_pd = {}\n for k, v in out.items(): # Create dictionary of pandas series with same index as the input pv\n out_pd[k] = pd.Series(v, index=pv.index)\n out = out_pd\n return out",
"def ohms(self):\n # Rwb = Rwiper + Rtotal * (counts / 256)\n # Rwa = Rwiper + Rtotal * ((256 - counts) / 256)\n g = 0\n rtotal=0.0\n reach=[]\n for chan in self.get_channel_list(self.nchans):\n self.rwa[chan] = float( 256 - self.vals[chan] ) / 256.0\n self.rwb[chan] = float( self.vals[chan] ) / 256.0\n self.rwa[chan] *= self.Rtotal\n self.rwb[chan] *= self.Rtotal \n self.rwa[chan] += self.Rwiper\n self.rwb[chan] += self.Rwiper",
"def extendedConvert(self):\r\n devId = str(self.deviceId)\r\n if(devId == '28' or devId == '29'):\r\n answers = []\r\n #just add the counter value\r\n answers.append(self.fields[1])\r\n #find the engineering units converter\r\n enum = self.fields[0] & 0x3F\r\n #look up the scale and offset for that eeu\r\n eeu = self._eeumaps[str(enum)]\r\n self.eeu1 = eeu\r\n print('eeu:' + str(eeu))\r\n #convert from twos complement and adjust by scale/offset\r\n val = (self.convertSigned16(self.fields[2]) * eeu[1]) + eeu[0]\r\n answers.append(val)\r\n #reset fields to hold the new answers\r\n self.fields = answers\r\n self.units = [self.UNITS_COUNT, eeu[2]]\r\n elif(devId == '53' or devId == '54'):\r\n #strip off the first part of the answer which is the last part of the\r\n #serial number\r\n answers = [self.fields[1]]\r\n self.fields = answers\r\n elif(devId == '75' or devId == '76'):\r\n answers = []\r\n #find out the number of I/O points\r\n pointCount = self.fields[0] & 3\r\n #find out engineering units for 1st I/O\r\n enum = self.fields[1] & 0x3F\r\n eeu = self._eeumaps[str(enum)]\r\n self.eeu1 = eeu\r\n #new value = old value * scale + offset\r\n val = (self.convertSigned16(self.fields[3]) * eeu[1]) + eeu[0]\r\n answers.append(val)\r\n self.units = [eeu[2]]\r\n #see if there's two\r\n if pointCount == 2:\r\n #find out engineering units for 2nd I/O\r\n #and off first two bits\r\n enum = self.fields[0] >> 2\r\n eeu = self._eeumaps[str(enum)]\r\n self.eeu2 = eeu\r\n val = (self.convertSigned16(self.fields[2]) * eeu[1]) + eeu[0]\r\n answers.append(val)\r\n self.units.append(eeu[2])\r\n else:\r\n self.eeu2 = []\r\n #reset fields to hold the new answers\r\n self.fields = answers\r\n\r\n return",
"def setup_layout_constants(self):\n # determines the spacing between the edge and nmos (rail to active\n # metal or poly_to_poly spacing)\n half_gate_to_gate = 0.5 * (drc[\"poly_to_poly\"] - drc[\"minwidth_metal1\"])\n edge_to_nmos = max(drc[\"metal1_to_metal1\"] - self.nmos.active_contact_positions[0].y,\n half_gate_to_gate - self.nmos.poly_positions[0].y)\n\n # determine the position of the first transistor from the left\n self.nmos_position1 = vector(0,\n 0.5 * drc[\"minwidth_metal1\"] + edge_to_nmos)\n offset = self.nmos_position1 + vector(0,self.nmos.height)\n\n x = vector(self.nmos.active_width - self.nmos.active_contact.width, 0)\n self.nmos_position2 = x + self.nmos_position1.scale(0,1)\n\n # determines the spacing between the edge and pmos\n edge_to_pmos = max(drc[\"metal1_to_metal1\"] - self.pmos.active_contact_positions[0].y,\n half_gate_to_gate - self.pmos.poly_positions[0].y)\n self.pmos_position1 = vector(0,\n self.height - 0.5 * drc[\"minwidth_metal1\"]\n - edge_to_pmos - self.pmos.height)\n self.pmos_position2 = self.pmos_position1 + vector(self.pmos.width,0)\n\n self.well_width = max(self.pmos_position2.x + self.pmos.active_position.x\n + self.pmos.active_width\n + drc[\"active_to_body_active\"] + self.nwell_contact.width \n + drc[\"well_enclosure_active\"],\n self.nmos_position2.x + self.nmos.active_position.x \n + self.nmos.active_width \n + drc[\"active_to_body_active\"] + drc[\"well_enclosure_active\"])\n self.width = self.well_width",
"def update_speed_input_step(self,curr_v):\n \n # update speed inputs \n self.speed_inputs_east*=0\n self.speed_inputs_west*=0\n self.speed_inputs_north*=0\n self.speed_inputs_south*=0\n\n if self.use_eight_directions is True: \n self.speed_inputs_north_east*=0\n self.speed_inputs_north_west*=0\n self.speed_inputs_south_east*=0\n self.speed_inputs_south_west*=0\n \n #speed_values=self.rr[:self.N_e,0] \n speed_values=np.ones((self.N_e,1))\n\n if curr_v[0]>0:\n \n # north-east\n if self.use_eight_directions is True and curr_v[1]>0:\n self.speed_inputs_north_east=speed_values \n \n # south-east \n elif self.use_eight_directions is True and curr_v[1]<0:\n self.speed_inputs_south_east=speed_values\n \n #east \n else:\n self.speed_inputs_east=speed_values\n\n\n elif curr_v[0]<0:\n\n # north-west \n if self.use_eight_directions is True and curr_v[1]>0:\n self.speed_inputs_north_west=speed_values\n\n # south-west \n elif self.use_eight_directions is True and curr_v[1]<0:\n self.speed_inputs_south_west=speed_values\n \n # west \n else:\n self.speed_inputs_west=speed_values\n\n else: \n # north\n if curr_v[1]>0:\n self.speed_inputs_north=speed_values\n\n # south\n elif curr_v[1]<0:\n self.speed_inputs_south=speed_values",
"def piston_control(self):\n # At the beginning it is necessary to set some variables\n t_last = 0 # time of the last cycle\n inhale_end = time.time() - 1 # End of the last inhale\n self.cd[\"exhale_duration\"] = 1\n self.cd[\"inhale_duration\"] = 1\n #now = time.time()\n VCV_stage = 0\n PCV_stage = 0\n PSV_stage = 0\n emergency_contained = False\n\n # Gets the current volume and pressure before starting the cycles. If this doesn't work and \n # takes too long, there is probably some problem with the sensors\n t_P, P = (None, None)\n t_V, V = (None, None)\n P_V_t_limit = 5\n first_P_V = time.time()\n while P == None and V == None:\n if not self.prs.empty():\n t_P, P = self.prs.get()\n if not self.vol.empty():\n t_V, V = self.vol.get()\n if time.time() - first_P_V > P_V_t_limit:\n print(\"Took too long to receive new values of P or V from the queues\")\n # TODO Raise exception, error or return in this condition\n\n while True:\n # Gets the newest data and empties que queues. If there was no data, uses the values of \n # pressure or volume that it already has\n if not self.prs.empty():\n t_P, P = self.prs.get()\n while not self.prs.empty(): # Emptying the queue, only the most recent info is used\n dump = self.prs.get()\n\n if not self.vol.empty():\n t_V, V = self.vol.get()\n while not self.vol.empty(): # Emptying the queue, only the most recent info is used\n dump = self.vol.get()\n\n # TODO Needs to be obtained from the interface or defined in a configuration by the user\n T_inh_max = 60. / self.gui[\"VCV_frequency_spb\"].value() / 2\n\n if self.mode == 1: # 'VCV'\n \"\"\"\n This mode has 3 stages:\n 0 - Wait\n 1 - Inhale\n 2 - Exhale\n \"\"\"\n period = 60. / self.gui[\"VCV_frequency_spb\"].value()\n T_inh_max = period / 2\n if VCV_stage == 0: \n self.piston.stop()\n # If it's time for a new cycle, volume and pressure are within limits\n if (time.time() - t_last > period\n and V < self.gui[\"VCV_volume_spb\"].value()\n and P < self.gui[\"VCV_pressure_max_spb\"].value()):\n VCV_stage = 1\n inhale_start = time.time()\n # It is possible to calculate how long the last exhale took\n self.cd[\"exhale_duration\"] = inhale_start - inhale_end\n\n if VCV_stage == 1:\n # Checks if the current pressure is above P_max\n if P >= self.gui[\"VCV_pressure_max_spb\"].value():\n print(\"Pressure is too high during VCV cycle!\")\n self.piston.stop()\n # Checks if it reached the maximum inhale t\n elif time.time() - inhale_start >= T_inh_max:\n print(f\"VCV cycle is too long: {time.time() - inhale_start:.2f} s\")\n self.piston.stop()\n VCV_stage = 2\n inhale_end = time.time()\n # Checks whether the piston reached the bottom\n # TODO Define what happens in this case\n elif self.piston.piston_at_bottom:\n print(\"Reached max piston travel\")\n self.piston.stop()\n VCV_stage = 2\n inhale_end = time.time()\n # Checks if the current volume is above target\n # TODO Implement margin in options\n elif V >= self.gui[\"VCV_volume_spb\"].value() * 0.9:\n print(\"Reached target volume\")\n self.piston.stop()\n VCV_stage = 2 \n inhale_end = time.time()\n # if none of the previous limitations occured, may move the piston\n else:\n self.piston.pst_down()\n\n if VCV_stage == 2:\n # While the piston still hasn't reached the top\n # TODO Put timeout in piston raise time\n if not self.piston.piston_at_top and time.time() - t_last > period:\n self.piston.pst_up()\n else:\n self.piston.stop()\n VCV_stage = 0\n # Saves the last inhale start time to calculate when a new one should start\n t_last = inhale_start\n # It is possible to calculate how long the last inhale took\n self.cd[\"inhale_duration\"] = inhale_end - inhale_start\n\n elif self.mode == 2: # 'PCV'\n \"\"\"\n This mode has 3 stages:\n 0 - Wait\n 1 - Inhale\n 2 - Exhale\n \"\"\" \n period = 60. / self.gui[\"PCV_frequency_spb\"].value()\n T_inh_max = period / 2\n if PCV_stage == 0: \n self.piston.stop()\n # If it's time for a new cycle, volume and pressure are within limits\n if (time.time() - t_last > period\n and V < self.gui[\"PCV_volume_max_spb\"].value()\n and P < self.gui[\"PCV_pressure_spb\"].value()):\n PCV_stage = 1\n inhale_start = time.time()\n # It is possible to calculate how long the last exhale took\n self.cd[\"exhale_duration\"] = inhale_start - inhale_end\n\n if PCV_stage == 1:\n # Checks if the current volume is above max\n if V >= self.gui[\"PCV_volume_max_spb\"].value():\n print(\"Volume is too high during PCV cycle!\")\n self.piston.stop()\n # Checks if it reached the maximum inhale t\n elif time.time() - inhale_start >= T_inh_max:\n print(f\"PCV cycle is too long: {time.time() - inhale_start:.2f} s\")\n self.piston.stop()\n PCV_stage = 2\n inhale_end = time.time()\n # Checks whether the piston reached the bottom\n elif self.piston.piston_at_bottom:\n print(\"Reached max piston travel\")\n self.piston.stop()\n PCV_stage = 2\n inhale_end = time.time()\n # Checks if the current pressure is above target\n elif P >= self.gui[\"PCV_pressure_spb\"].value():\n print(\"Reached target pressure\")\n self.piston.stop()\n PCV_stage = 2 \n inhale_end = time.time()\n # if none of the previous limitations occured, may move the piston\n else:\n self.piston.pst_down()\n\n if PCV_stage == 2:\n # While the piston still hasn't reached the top\n if not self.piston.piston_at_top and time.time() - t_last > period:\n self.piston.pst_up()\n else:\n self.piston.stop()\n PCV_stage = 0\n # Saves the last inhale start time to calculate when a new one should start\n t_last = inhale_start\n # It is possible to calculate how long the last inhale took\n self.cd[\"inhale_duration\"] = inhale_end - inhale_start\n\n elif self.mode == 3: # 'PSV'\n \"\"\"\n This mode has 3 stages:\n 0 - Wait for inhale, P < threshold\n 1 - Inhale\n 2 - Exhale\n \"\"\"\n if PSV_stage == 0:\n self.piston.stop()\n # If the pressure is below the threshold, time to inhale\n if P < self.gui[\"PSV_sensitivity_spb\"].value():\n PSV_stage = 1\n inhale_start = time.time()\n # It is possible to calculate how long the last exhale took\n self.cd[\"exhale_duration\"] = inhale_start - inhale_end\n\n if PSV_stage == 1:\n # Checks if the current pressure is close to P_target\n if P >= self.gui[\"PSV_pressure_spb\"].value():\n print(\"Pressure reached target.\")\n self.piston.stop()\n PSV_stage = 2\n inhale_end = time.time()\n elif self.piston.piston_at_bottom:\n print(\"Reached max piston travel.\")\n self.piston.stop()\n PSV_stage = 2\n inhale_end = time.time()\n # if none of the previous limitations occured, may move the piston\n else:\n self.piston.pst_down()\n \n if PSV_stage == 2:\n # While the piston still hasn't reached the top\n if not self.piston.piston_at_top:\n self.piston.pst_up()\n else:\n self.piston.stop()\n PSV_stage = 0\n # Saves the last inhale start time to calculate when a new one should start\n t_last = inhale_start\n # It is possible to calculate how long the last inhale took\n self.cd[\"inhale_duration\"] = inhale_end - inhale_start\n\n # Emergency mode\n elif self.mode == 4: # 'Emergency'\n if not emergency_contained:\n self.piston.emergency()\n emergency_contained = True\n else:\n self.piston.stop()\n\n\n else: # Stop\n self.piston.stop()\n\n # Finds the indexes of data from the last cycle for flow and pressure\n # i_flw = np.where(time.time() - self.flw_data[0, :] < last_cycle_dur)[0]\n # i_prs = np.where(time.time() - self.prs_data[0, :] < last_cycle_dur)[0]\n \n # Sends the maximum pressure and volume in the last cycle to the interface\n self.cd[\"IE_ratio\"] = self.cd[\"exhale_duration\"] / self.cd[\"inhale_duration\"]\n # Saving the data for the GUI update\n # self.cd[\"peak_pressure\"] = peak_prs\n # self.cd[\"tidal_volume\"] = peak_vol\n self.signal_cycle_data.emit(self.cd)\n\n time.sleep(0.05)",
"def fun_w_out_small(self, core_index, h_per_core, h_in_index,\n l1_xpos, l1_xscale, one_value_buf):\n reg_index_y = self.tik_instance.Scalar(dtype=\"int32\")\n reg_cur_index = self.tik_instance.Scalar(dtype=\"int32\")\n reg_cur_index.set_as(core_index*h_per_core + h_in_index)\n list_w_num = []\n loop_index = 0\n while loop_index < self.w_in_loop:\n if loop_index != self.w_in_loop - 1:\n order = 256*loop_index\n list_w_fp = []\n list_w_int = []\n if self.half_pixel_centers:\n while order < 256*(loop_index+1):\n list_w_fp.append(float(order))\n list_w_int.append(int(max((float(order) + 0.5)*self.scale_w - 0.5, 0)))\n order += 1\n else:\n while order < 256*(loop_index+1):\n list_w_fp.append(float(order))\n list_w_int.append(int(float(order)*self.scale_w))\n order += 1\n list_w_int_new = list(set(list_w_int))\n list_w_int_new.sort()\n for i in list_w_int_new:\n list_w_num.append(list_w_int.count(i))\n else:\n order = 256*loop_index\n list_w_fp = []\n list_w_int = []\n if self.half_pixel_centers:\n while order < self.in_size_w:\n list_w_fp.append(float(order))\n list_w_int.append(int(max((float(order) + 0.5)*self.scale_w - 0.5, 0)))\n order += 1\n else:\n while order < self.in_size_w:\n list_w_fp.append(float(order))\n list_w_int.append(int(float(order)*self.scale_w))\n order += 1\n list_w_int_new = list(set(list_w_int))\n list_w_int_new.sort()\n for i in list_w_int_new:\n list_w_num.append(list_w_int.count(i))\n loop_index += 1\n\n w_offline_num = self.tik_instance.Tensor(\n \"int32\", (len(list_w_num),),\n name=\"w_offline_num\", scope=tik.scope_ubuf)\n reg_w_tmp = self.tik_instance.Scalar(dtype=\"int32\")\n number = 0\n for j in list_w_num:\n reg_w_tmp.set_as(j)\n w_offline_num[number].set_as(reg_w_tmp)\n number = number + 1\n\n out_size_w_num = _ceil_div(self.out_size_w, 4)\n ub_output = self.tik_instance.Tensor(\n \"float32\", (out_size_w_num*4, self.c_block_size),\n name=\"ub_output\", scope=tik.scope_ubuf)\n ub_output_2 = self.tik_instance.Tensor(\n \"float32\", (out_size_w_num*4, self.c_block_size),\n name=\"ub_output_2\", scope=tik.scope_ubuf)\n with self.tik_instance.for_range(0, self.nc1) as nc1_index:\n self.tik_instance.vector_dup(\n MASK, ub_output, 0.0, out_size_w_num, 1, 8)\n self.tik_instance.vector_dup(\n MASK, ub_output_2, 0.0, out_size_w_num, 1, 8)\n h_floor_buf = self.tik_instance.Tensor(\"int32\", (8,),\n name=\"h_floor_buf\",\n scope=tik.scope_ubuf)\n h_floor_buf_fp = self.tik_instance.Tensor(\"float32\", (8,),\n name=\"h_floor_buf_fp\",\n scope=tik.scope_ubuf)\n h_scale_buf = self.tik_instance.Tensor(\"float32\", (8,),\n name=\"h_scale_buf\",\n scope=tik.scope_ubuf)\n h_block_buf = self.tik_instance.Tensor(\"int32\", (8,),\n name=\"h_scale_buf\",\n scope=tik.scope_ubuf)\n one_u_u_buf = self.tik_instance.Tensor(\"float32\", (2, 8),\n name=\"one_u_u_buf\",\n scope=tik.scope_ubuf)\n const_0 = self.tik_instance.Tensor(\"float32\", (8,),\n name=\"const_0\",\n scope=tik.scope_ubuf)\n self.tik_instance.vector_dup(\n 8, h_block_buf, reg_cur_index, 1, 1, 8)\n self.tik_instance.vconv(8, \"\", h_scale_buf[0],\n h_block_buf[0], 1, 1, 1, 8, 8)\n if self.half_pixel_centers:\n self.tik_instance.vadds(8, h_scale_buf, h_scale_buf,\n float(0.5), 1, 1, 1, 8, 8)\n self.tik_instance.vmuls(8, h_scale_buf, h_scale_buf,\n self.scale_h, 1, 1, 1, 8, 8)\n if self.half_pixel_centers:\n self.tik_instance.vector_dup(8, const_0, 0, 1, 1, 8)\n self.tik_instance.vadds(8, h_scale_buf, h_scale_buf,\n float(-0.5), 1, 1, 1, 8, 8)\n self.tik_instance.vmax(8, h_scale_buf[0], h_scale_buf[0], const_0[0],\n 1, 1, 1, 1, 8, 8, 0)\n self.tik_instance.vconv(8, \"floor\", h_floor_buf[0],\n h_scale_buf[0], 1, 1, 1, 8, 8)\n self.tik_instance.vconv(8, \"\", h_floor_buf_fp[0],\n h_floor_buf[0], 1, 1, 1, 8, 8)\n self.tik_instance.vsub(8, one_u_u_buf[8],\n h_scale_buf[0], h_floor_buf_fp[0],\n 1, 1, 1, 1, 8, 8, 8)\n self.tik_instance.vsub(8, one_u_u_buf[0],\n one_value_buf[0], one_u_u_buf[8],\n 1, 1, 1, 1, 8, 8, 8)\n reg_index_y.set_as(h_floor_buf[0])\n\n one_out = self.tik_instance.Tensor(\n \"float32\", (4*256, self.c_block_size),\n name=\"one_out\", scope=tik.scope_ubuf)\n scale_512_ub_x = self.tik_instance.Tensor(\n \"float32\", (512, 8), name=\"scale_512_ub_x\",\n scope=tik.scope_ubuf)\n int32_256_ub_x = self.tik_instance.Tensor(\n \"int32\", (256, 8), name=\"int32_256_ub_x\",\n scope=tik.scope_ubuf)\n uv_ub = self.tik_instance.Tensor(\n \"float32\", (4*256, 8), name=\"uv_ub\", scope=tik.scope_ubuf)\n\n reg_w_out_begin = self.tik_instance.Scalar(dtype=\"int32\")\n reg_w_out_end = self.tik_instance.Scalar(dtype=\"int32\")\n reg_w_num = self.tik_instance.Scalar(dtype=\"int32\")\n reg_w_value = self.tik_instance.Scalar(dtype=\"int32\")\n reg_repeat = self.tik_instance.Scalar(dtype=\"int32\")\n self.tik_instance.vector_dup(\n MASK, one_out[0], float(0), 128, 1, 8)\n self.tik_instance.vector_dup(\n MASK, one_out[8192], float(0), 128, 1, 8)\n reg_w_num.set_as(0)\n with self.tik_instance.for_range(0, self.w_in_loop) \\\n as loop_index:\n reg_w_value.set_as(0)\n self.tik_instance.data_move(\n int32_256_ub_x, l1_xpos[loop_index*256*8], 0, 1,\n 256, 0, 0)\n self.tik_instance.data_move(\n scale_512_ub_x, l1_xscale[loop_index*512*8], 0, 1,\n 512, 0, 0)\n with self.tik_instance.if_scope(\n loop_index != self.w_in_loop - 1):\n ub_input = self.tik_instance.Tensor(\n \"float32\", (256, self.c_block_size),\n name=\"ub_input\", scope=tik.scope_ubuf)\n self.tik_instance.data_move(\n ub_input[0],\n self.grads_gm[(nc1_index *\n self.in_size_h + reg_cur_index) *\n self.in_size_w*16 +\n loop_index*256*16],\n 0, 1, 512, 0, 0)\n\n self.tik_instance.vmul(\n MASK, uv_ub[0], one_u_u_buf[0], scale_512_ub_x[0],\n 64, 1, 0, 1, 8, 0, 8)\n self.tik_instance.vmul(\n MASK, uv_ub[2*256*8], one_u_u_buf[8],\n scale_512_ub_x[0], 64, 1, 0, 1, 8, 0, 8)\n with self.tik_instance.for_range(0, 2) as repeat_index:\n self.tik_instance.vmul(\n MASK, one_out[256*32*repeat_index],\n uv_ub[256*16*repeat_index], ub_input[0],\n 32, 4, 1, 2, 32, 8, 16)\n self.tik_instance.vmul(\n MASK, one_out[256*32*repeat_index+8],\n uv_ub[256*16*repeat_index], ub_input[8],\n 32, 4, 1, 2, 32, 8, 16)\n self.tik_instance.vmul(\n MASK, one_out[256*32*repeat_index+16],\n uv_ub[256*16*repeat_index + 256*8], ub_input[0],\n 32, 4, 1, 2, 32, 8, 16)\n self.tik_instance.vmul(\n MASK, one_out[256*32*repeat_index+24],\n uv_ub[256*16*repeat_index + 256*8], ub_input[8],\n 32, 4, 1, 2, 32, 8, 16)\n reg_w_out_begin.set_as(int32_256_ub_x[0])\n reg_w_out_end.set_as(int32_256_ub_x[2047])\n with self.tik_instance.for_range(\n reg_w_out_begin, reg_w_out_end + 1) as w_out_index:\n reg_repeat.set_as(w_offline_num[reg_w_num])\n with self.tik_instance.if_scope(\n w_out_index != (self.out_size_w - 1)):\n self.tik_instance.vadd(\n 32, ub_output[w_out_index*16],\n one_out[reg_w_value*32],\n ub_output[w_out_index*16],\n reg_repeat, 1, 1, 1, 0, 4, 0)\n self.tik_instance.vadd(\n 32, ub_output_2[w_out_index*16],\n one_out[256*32+reg_w_value*32],\n ub_output_2[w_out_index*16],\n reg_repeat, 1, 1, 1, 0, 4, 0)\n with self.tik_instance.else_scope():\n self.tik_instance.vadd(\n 16, ub_output[w_out_index*16],\n one_out[reg_w_value*32],\n ub_output[w_out_index*16],\n reg_repeat, 1, 1, 1, 0, 4, 0)\n self.tik_instance.vadd(\n 16, ub_output[w_out_index*16],\n one_out[reg_w_value*32+16],\n ub_output[w_out_index*16],\n reg_repeat, 1, 1, 1, 0, 4, 0)\n self.tik_instance.vadd(\n 16, ub_output_2[w_out_index*16],\n one_out[256*32+reg_w_value*32],\n ub_output_2[w_out_index*16],\n reg_repeat, 1, 1, 1, 0, 4, 0)\n self.tik_instance.vadd(\n 16, ub_output_2[w_out_index*16],\n one_out[256*32+reg_w_value*32+16],\n ub_output_2[w_out_index*16],\n reg_repeat, 1, 1, 1, 0, 4, 0)\n reg_w_value.set_as(reg_w_value + reg_repeat)\n reg_w_num.set_as(reg_w_num + 1)\n\n self.tik_instance.vector_dup(\n MASK, one_out[0], float(0), 128, 1, 8)\n self.tik_instance.vector_dup(\n MASK, one_out[8192], float(0), 128, 1, 8)\n with self.tik_instance.else_scope():\n ub_input = self.tik_instance.Tensor(\n \"float32\", (256, self.c_block_size),\n name=\"ub_input\", scope=tik.scope_ubuf)\n self.tik_instance.data_move(\n ub_input[0],\n self.grads_gm[(nc1_index*self.in_size_h +\n reg_cur_index)*self.in_size_w*16 +\n loop_index*256*16],\n 0, 1, self.w_in_tail*2, 0, 0)\n\n self.tik_instance.vmul(\n MASK, uv_ub[0], one_u_u_buf[0], scale_512_ub_x[0],\n 64, 1, 0, 1, 8, 0, 8)\n self.tik_instance.vmul(\n MASK, uv_ub[2*256*8], one_u_u_buf[8],\n scale_512_ub_x[0], 64, 1, 0, 1, 8, 0, 8)\n with self.tik_instance.for_range(0, 2) as repeat_index:\n self.tik_instance.vmul(\n MASK, one_out[256*32*repeat_index],\n uv_ub[256*16*repeat_index], ub_input[0],\n 32, 4, 1, 2, 32, 8, 16)\n self.tik_instance.vmul(\n MASK, one_out[256*32*repeat_index+8],\n uv_ub[256*16*repeat_index], ub_input[8],\n 32, 4, 1, 2, 32, 8, 16)\n self.tik_instance.vmul(\n MASK, one_out[256*32*repeat_index+16],\n uv_ub[256*16*repeat_index + 256*8], ub_input[0],\n 32, 4, 1, 2, 32, 8, 16)\n self.tik_instance.vmul(\n MASK, one_out[256*32*repeat_index+24],\n uv_ub[256*16*repeat_index + 256*8], ub_input[8],\n 32, 4, 1, 2, 32, 8, 16)\n\n reg_w_out_begin.set_as(int32_256_ub_x[0])\n with self.tik_instance.for_range(\n reg_w_out_begin, self.out_size_w) as w_out_index:\n reg_repeat.set_as(w_offline_num[reg_w_num])\n with self.tik_instance.if_scope(\n w_out_index != (self.out_size_w - 1)):\n self.tik_instance.vadd(\n 32, ub_output[w_out_index*16],\n one_out[reg_w_value*32],\n ub_output[w_out_index*16],\n reg_repeat, 1, 1, 1, 0, 4, 0)\n self.tik_instance.vadd(\n 32, ub_output_2[w_out_index*16],\n one_out[256*32+reg_w_value*32],\n ub_output_2[w_out_index*16],\n reg_repeat, 1, 1, 1, 0, 4, 0)\n with self.tik_instance.else_scope():\n self.tik_instance.vadd(\n 16, ub_output[w_out_index*16],\n one_out[reg_w_value*32],\n ub_output[w_out_index*16],\n reg_repeat, 1, 1, 1, 0, 4, 0)\n self.tik_instance.vadd(\n 16, ub_output[w_out_index*16],\n one_out[reg_w_value*32+16],\n ub_output[w_out_index*16],\n reg_repeat, 1, 1, 1, 0, 4, 0)\n self.tik_instance.vadd(\n 16, ub_output_2[w_out_index*16],\n one_out[256*32+reg_w_value*32],\n ub_output_2[w_out_index*16],\n reg_repeat, 1, 1, 1, 0, 4, 0)\n self.tik_instance.vadd(\n 16, ub_output_2[w_out_index*16],\n one_out[256*32+reg_w_value*32+16],\n ub_output_2[w_out_index*16],\n reg_repeat, 1, 1, 1, 0, 4, 0)\n reg_w_value.set_as(reg_w_value + reg_repeat)\n reg_w_num.set_as(reg_w_num + 1)\n self.tik_instance.vector_dup(\n MASK, one_out[0], float(0), 128, 1, 8)\n self.tik_instance.vector_dup(\n MASK, one_out[8192], float(0), 128, 1, 8)\n #move data output\n self.tik_instance.set_atomic_add(1)\n self.tik_instance.data_move(\n self.output_gm[(nc1_index*self.out_size_h + reg_index_y) *\n self.out_size_w*self.c_block_size],\n ub_output[0], 0, 1, self.out_size_w*2, 0, 0)\n with self.tik_instance.if_scope(\n reg_index_y != self.out_size_h - 1):\n self.tik_instance.data_move(\n self.output_gm[(nc1_index*self.out_size_h +\n reg_index_y + 1) * self.out_size_w *\n self.c_block_size], ub_output_2[0],\n 0, 1, self.out_size_w*2, 0, 0)\n with self.tik_instance.else_scope():\n self.tik_instance.data_move(\n self.output_gm[(nc1_index*self.out_size_h +\n reg_index_y) * self.out_size_w *\n self.c_block_size], ub_output_2[0],\n 0, 1, self.out_size_w*2, 0, 0)\n self.tik_instance.set_atomic_add(0)",
"def chain_corrections():\n \n #read the files\n sample_4m=read_sample(map_files('sample_4m'))\n empty_cell_4m=read_sample(map_files('empty_cell_4m'))\n empty_4m=read_sample(map_files('empty_4m'))\n transmission_sample_cell_4m=read_sample(map_files('trans_sample_4m'))\n transmission_empty_cell_4m=read_sample(map_files('trans_empty_cell_4m'))\n blocked_beam_4m=read_sample(map_files('blocked_4m'))\n sensitivity=read_div(map_files('div'))\n #mask=read_sample(map_files('mask'))\n \n #normalize the monitors\n \n sample_4m_norm=monitor_normalize(sample_4m)\n empty_cell_4m_norm=monitor_normalize(empty_cell_4m)\n transmission_sample_cell_4m_norm=monitor_normalize(transmission_sample_cell_4m)\n transmission_empty_cell_4m_norm=monitor_normalize(transmission_empty_cell_4m)\n empty_4m_norm=monitor_normalize(empty_4m)\n blocked_beam_4m_norm=monitor_normalize(blocked_beam_4m)\n \n #calculate q\n sample_4m_norm_q=convert_q(sample_4m_norm)\n empty_cell_4m_norm_q=convert_q(empty_cell_4m)\n blocked_beam_4m_norm_q=convert_q(blocked_beam_4m_norm)\n transmission_sample_cell_4m_norm_q=convert_q(transmission_sample_cell_4m_norm)\n transmission_empty_cell_4m_norm_q=convert_q(transmission_empty_cell_4m_norm)\n empty_4m_norm_q=convert_q(empty_4m_norm)\n \n \n print 'converted'\n #convert flatness\n sample_4m_solid=correct_solid_angle(sample_4m_norm_q)\n empty_cell_4m_solid=correct_solid_angle(empty_cell_4m_norm_q)\n blocked_beam_4m_solid=correct_solid_angle(blocked_beam_4m_norm_q)\n transmission_sample_cell_4m_solid=correct_solid_angle(transmission_sample_cell_4m_norm_q)\n transmission_empty_cell_4m_solid=correct_solid_angle(transmission_empty_cell_4m_norm_q)\n empty_4m_solid=correct_solid_angle(empty_4m_norm_q)\n \n \n #calculate transmission\n coord_left=(60,60)\n coord_right=(70,70)\n transmission_sample_cell_4m_rat=generate_transmission(transmission_sample_cell_4m_solid,empty_4m_solid,\n coord_left,coord_right)\n transmission_empty_cell_4m_rat=generate_transmission(transmission_empty_cell_4m_solid,empty_4m_solid,\n coord_left,coord_right)\n print 'Sample transmission= {} (IGOR Value = 0.724)'.format(transmission_sample_cell_4m_rat)\n print 'Empty Cell transmission= {} (IGOR Value = 0.929)'.format(transmission_empty_cell_4m_rat)\n print 'hi'\n \n #Initial Correction -- Not with the sub/mult tools,\n #SAM = sample_4m_solid.data\n #print SAM.x\n #EMP = empty_4m_solid.data\n #print \"EMP: \"\n #print EMP.x\n #BGD = blocked_beam_4m_solid.data\n #print \"BGD\"\n #print BGD.x\n #Tsam = transmission_sample_cell_4m_rat\n #Temp = transmission_empty_cell_4m_rat\n #COR1 = SAM.__sub__(BGD)\n #COR2 = (EMP.__sub__(BGD)).__mul__(Tsam/Temp)\n #COR = COR1.__sub__(COR2)\n #print \"after initial correction: \"\n #print COR.x\n \n SAM = sample_4m_solid\n print SAM.data.x\n EMP = empty_4m_solid\n print \"EMP: \"\n print EMP.data.x\n BGD = blocked_beam_4m_solid\n print \"BGD:\"\n print BGD.data.x\n Tsam = transmission_sample_cell_4m_rat\n Temp = transmission_empty_cell_4m_rat\n print \"COR1:\"\n COR1 = SAM.__sub1__(BGD)\n print COR1.data.x #check=works\n #-----Problems Here-------\n print \"COR2:\"\n COR2 = (EMP.__sub1__(BGD)) #check=works\n print COR2.data.x\n print \"COR3:\"\n #AJJ - __mul__ not working because Tsam and Temp are Measurement instances and not simply floats. See above.\n COR3 = COR2.__mul__(Tsam/Temp) #mul not working\n print COR3.data.x\n #COR = COR1.__sub1__(COR2)\n #print \"after initial correction: \"\n #print COR.x\n #COR2 = (EMP.__sub__(BGD)).__mul__(Tsam/Temp)\n #COR = COR1.__sub__(COR2)\n #print \"after initial correction: \"\n #print COR.data.x",
"def rectifiercap(Iload, fswitch, dVout):\n C = Iload / (fswitch * dVout)\n return C",
"def enable_cl2_61p44(self):\n self.write_versa5(0x62,0x3b) ## Clock2 CMOS1 output, 3.3V\n self.write_versa5(0x2c,0x00) ## Disable aux output on clock 1\n self.write_versa5(0x31,0x81) ## Use divider for clock2\n ## VCO multiplier is shared for all outputs, set to 68 by firmware\n ## VCO = 38.4*68 = 2611.2 MHz\n ## There is a hardwired divide by 2 in the Versa 5 at the VCO output\n ## VCO to Dividers = 2611.2 MHZ/2 = 1305.6\n ## Target frequency of 61.44 requires dividers of 1305.6/61.44 = 21.25\n ## Frational dividers are supported\n ## Set integer portion of divider 21 = 0x15, 12 bits split across 2 registers\n self.write_versa5(0x3d,0x01)\n self.write_versa5(0x3e,0x50)\n ## Set fractional portion, 30 bits, 2**24 * .25 = 0x400000\n self.write_versa5(0x32,0x01) ## [29:22]\n self.write_versa5(0x33,0x00) ## [21:14]\n self.write_versa5(0x34,0x00) ## [13:6]\n self.write_versa5(0x35,0x00) ## [5:0] and disable ss\n self.write_versa5(0x63,0x01) ## Enable clock2",
"def getAbsNormalizationFactor(deltaE_wkspace,min,max):\n global reducer\n van_mass=reducer.get_default_parameter('vanadium-mass') \n \n Integration(InputWorkspace=deltaE_wkspace,OutputWorkspace='van_int',RangeLower=min,RangeUpper=max,IncludePartialBins='1')\n input_ws = mtd[deltaE_wkspace]\n ei_monovan = input_ws.getRun().getLogData(\"Ei\").value\n data_ws=mtd['van_int']\n nhist = data_ws.getNumberHistograms()\n #print nhist\n\n signal1_sum = 0.0\n weight1_sum = 0.0 \n signal2_sum = 0.0\n weight2_sum = 0.0 \n signal3_sum = 0.0\n weight3_sum = 0.0 \n signal4_sum = 0.0\n weight4_sum = 0.0 \n\n \n ic=0;\n izerc=0;\n for i in range(nhist):\n try:\n det = data_ws.getDetector(i)\n except Exception:\n continue\n if det.isMasked():\n continue\n\n signal = data_ws.readY(i)[0]\n error = data_ws.readE(i)[0]\n \n if signal != signal: #ignore NaN\n continue\n if ((error<=0) or (signal<=0)): # ignore Inf (0 in error are probably 0 in sign\n izerc+=1\n continue\n # Guess which minimizes the value sum(n_i-n)^2/Sigma_i -- this what Libisis had\n weight = 1.0/error\n signal1_sum += signal * weight\n weight1_sum += weight \n # Guess which minimizes the value sum(n_i-n)^2/Sigma_i^2\n weight2 = 1.0/(error*error)\n signal2_sum += signal * weight2\n weight2_sum += weight2 \n # Guess which assumes puassonian distribution with Err=Sqrt(signal) and calculates \n # the function: N_avrg = 1/(DetEfficiency_avrg^-1)*sum(n_i*DetEfficiency_i^-1)\n # where the DetEfficiency = WB_signal_i/WB_average WB_signal_i is the White Beam Vanadium \n # signal on i-th detector and the WB_average -- average WB vanadium signal. \n # n_i is the modified signal \n err_sq = error*error\n weight = err_sq/signal\n signal3_sum += err_sq\n weight3_sum += weight\n # Guess which estimatnes value sum(n_i^2/Sigma_i^2)/sum(n_i/Sigma_i^2) TGP suggestion from 12-2012\n signal4_sum += signal*signal/err_sq\n weight4_sum += signal/err_sq\n \n ic += 1 \n #print 'signal value =' ,signal\n #print 'error value =' ,error \n #print 'average ',signal_sum \n #---------------- Loop finished\n \n if( weight1_sum==0.0 or weight2_sum == 0.0 or weight3_sum == 0.0 or weight4_sum == 0.0) :\n print \"WB integral has been calculated incorrectrly, look at van_int workspace and input workspace: \",deltaE_wkspace\n raise IOError(\" divided by 0 weight\")\n \n integral_monovanLibISIS=signal1_sum / weight1_sum\n integral_monovanSigSq =signal2_sum / weight2_sum \n integral_monovanPuason =signal3_sum / weight3_sum \n integral_monovanTGP =signal4_sum / weight4_sum\n #integral_monovan=signal_sum /(wbVan_sum)\n van_multiplier = (float(reducer.van_rmm)/float(van_mass))\n absnorm_factorLibISIS = integral_monovanLibISIS * van_multiplier\n absnorm_factorSigSq = integral_monovanSigSq * van_multiplier \n absnorm_factorPuason = integral_monovanPuason * van_multiplier \n absnorm_factorTGP = integral_monovanTGP * van_multiplier \n #print 'Monovan integral :' ,integral_monovan \n \n if ei_monovan >= 210.0: \n xsection = 421 # vanadium cross-section in mBarn/sR (402 mBarn/Sr) (!!!modified to fit high energy limit?!!!)\n else: # old textbook cross-section for vanadium for ei=20mEv\n xsection = 400 + (ei_monovan/10) \n\n absnorm_factorLibISIS /= xsection\n absnorm_factorSigSq /= xsection \n absnorm_factorPuason /= xsection \n absnorm_factorTGP /= xsection \n \n sample_multiplier = (float(reducer.sample_mass)/float(reducer.sample_rmm))\n absnorm_factorLibISIS= absnorm_factorLibISIS *sample_multiplier\n absnorm_factorSigSq = absnorm_factorSigSq *sample_multiplier\n absnorm_factorPuason = absnorm_factorPuason *sample_multiplier\n absnorm_factorTGP = absnorm_factorTGP *sample_multiplier\n \n if (absnorm_factorLibISIS !=absnorm_factorLibISIS)|(izerc!=0): # It is an error, print diagnostics:\n if (absnorm_factorLibISIS !=absnorm_factorLibISIS):\n print '--------> Absolute normalization factor is NaN <----------------------------------------------'\n else:\n print '--------> Warning, Monovanadium has zero spectra <--------------------------------------------' \n print '--------> Processing workspace: ',deltaE_wkspace\n print '--------> Monovan Integration range : min=',min,' max=',max\n print '--------> Summarized: ',ic,' spectra with total value: ',signal2_sum, 'and total weight: ',weight2_sum\n print '--------> Dropped: ',izerc,' empty spectra'\n print '--------> Van multiplier: ',van_multiplier,' sample multiplier: ',sample_multiplier, 'and xsection: ',xsection \n print '--------> Abs norm factors: LibISIS: ',absnorm_factorLibISIS,' Sigma^2: ',absnorm_factorSigSq\n print '--------> Abs norm factors: Puasonian: ',absnorm_factorPuason, ' TGP: ',absnorm_factorTGP\n print '----------------------------------------------------------------------------------------------' \n else:\n DeleteWorkspace(Workspace=deltaE_wkspace)\n DeleteWorkspace(Workspace=data_ws)\n return (absnorm_factorLibISIS,absnorm_factorSigSq,absnorm_factorPuason,absnorm_factorTGP)",
"def set_calculated_segments(self, total_lights, segments):\n self.set_segments(segments)\n self.set_lights_per_segment(int(total_lights / segments))",
"def AdjustDispMag(self, n_subiter):\n if n_subiter == 1:\n self.disp_mag *= 1.0 / const.OPTSTEPADJUSTOR\n else:\n self.disp_mag *= const.OPTSTEPADJUSTOR",
"def width_h_invis(self):\n if m_higgs > 2.0 * self.mx:\n coupling = self.gsxx * self.stheta / np.sqrt(1 - self.stheta**2)\n\n val = (\n (coupling**2 * (m_higgs**2 - 4 * self.mx**2) ** 1.5)\n / (8.0 * m_higgs**2 * np.pi)\n ).real\n\n assert val >= 0\n\n return val\n else:\n return 0.0"
] | [
"0.573395",
"0.5689612",
"0.5377648",
"0.52175105",
"0.51980525",
"0.51722646",
"0.5154866",
"0.5073706",
"0.50463784",
"0.49642965",
"0.49641448",
"0.49348933",
"0.49050188",
"0.49022022",
"0.4888358",
"0.48803452",
"0.48743895",
"0.48456818",
"0.4839526",
"0.48382315",
"0.48345324",
"0.4820499",
"0.48185378",
"0.48111835",
"0.48080474",
"0.47990385",
"0.478853",
"0.47699437",
"0.47641224",
"0.47600082"
] | 0.5905445 | 0 |
Given all other sizes and total output inverter segments, this function will optimize the output inverter to minimize rise/fall mismatch. | async def _design_output_inverter(self, inv_in_pseg: int, inv_in_nseg: int, pseg: int,
nseg: int, inv_nseg: int,
inv_pseg: int, out_inv_m: int, fanout: float, pinfo: Any,
tbm_specs: Dict[str, Any], has_rst, vin, vout) -> int:
tb_params = self._get_full_tb_params()
# Use a binary iterator to find the PMOS size
iterator = BinaryIterator(-out_inv_m + 1, out_inv_m - 1)
err_best = float('inf')
all_corners = get_tech_global_info('bag3_digital')['signoff_envs']['all_corners']
while iterator.has_next():
pseg_off = iterator.get_next()
dut_params = self._get_lvl_shift_params_dict(pinfo, pseg, nseg, inv_pseg, inv_nseg,
inv_in_nseg, inv_in_pseg, out_inv_m,
has_rst, dual_output=False, skew_out=True,
out_pseg_off=pseg_off)
dut = await self.async_new_dut('lvshift', STDCellWrapper, dut_params)
err_worst = -1 * float('Inf')
worst_env = ''
sim_worst = None
for env in all_corners['envs']:
tbm_specs['sim_envs'] = [env]
tbm_specs['sim_params']['vdd_in'] = all_corners[vin][env]
tbm_specs['sim_params']['vdd'] = all_corners[vout][env]
tbm = cast(CombLogicTimingTB, self.make_tbm(CombLogicTimingTB, tbm_specs))
sim_results = await self.async_simulate_tbm_obj(
f'sim_output_inv_pseg_{pseg_off}_{env}', dut, tbm,
tb_params)
tdr_cur, tdf_cur = CombLogicTimingTB.get_output_delay(sim_results.data, tbm.specs,
'in',
'out', False, in_pwr='vdd_in',
out_pwr='vdd')
if math.isinf(np.max(tdr_cur)) or math.isinf(np.max(tdf_cur)):
raise ValueError("Got infinite delay!")
if tdr_cur[0] < 0 or tdf_cur[0] < 0:
raise ValueError("Got negative delay.")
err_cur = np.abs(tdr_cur[0] - tdf_cur[0])
if err_cur > err_worst:
err_worst = err_cur
worst_env = env
tdr = tdr_cur[0]
tdf = tdf_cur[0]
sim_worst = sim_results
'''
print(f'iter: {pseg_off}')
print(f'env: {worst_env}, tdr: {tdr}, tdf: {tdf}')
breakpoint()
'''
if tdr < tdf:
iterator.down(tdr - tdf)
else:
iterator.up(tdr - tdf)
err_abs = np.abs(tdr - tdf)
if err_abs < err_best:
err_best = err_abs
iterator.save_info(pseg_off)
pseg_off = iterator.get_last_save_info()
if pseg_off is None:
raise ValueError("Could not find PMOS size to match target delay")
self.log(f'Calculated output inverter to skew PMOS by {pseg_off}.')
return pseg_off | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"async def _design_lvl_shift_internal_inv(self, pseg: int, nseg: int, out_inv_m: int,\n fanout: float,\n pinfo: Any, tbm_specs: Dict[str, Any], is_ctrl: bool,\n has_rst: bool, dual_output: bool,\n vin: str, vout: str) -> Tuple[int, int]:\n if is_ctrl: # size with fanout\n inv_nseg = int(np.round(nseg / fanout))\n inv_nseg = 1 if inv_nseg == 0 else inv_nseg\n inv_pseg = int(np.round(pseg / fanout))\n inv_pseg = 1 if inv_pseg == 0 else inv_pseg\n self.log(f\"Calculated inv to need nseg : {inv_nseg}\")\n self.log(f\"Calculated inv to need pseg : {inv_pseg}\")\n return inv_pseg, inv_nseg\n\n # First size the NMOS in the inverter assuming a reasonably sized PMOS\n inv_nseg = await self._design_lvl_shift_inv_pdn(pseg, nseg, out_inv_m, fanout, pinfo,\n tbm_specs, has_rst, dual_output, vin, vout)\n self.log(f\"Calculated inv to need at least nseg: {inv_nseg}\")\n\n # Now using the inverter pull down size, we size the inverter pull up PMOS\n inv_pseg, inv_nseg = await self._design_lvl_shift_inv_pun(pseg, nseg, inv_nseg, out_inv_m,\n fanout, pinfo,\n tbm_specs, has_rst, dual_output,\n vin, vout)\n self.log(f\"Calculated inv to need pseg: {inv_pseg} and nseg: {inv_nseg}\")\n return inv_pseg, inv_nseg",
"def setup(self):\n igd = self.options['input_grid_data']\n ogd = self.options['output_grid_data']\n output_subset = self.options['output_subset']\n\n if ogd is None:\n ogd = igd\n\n # Build the interpolation matrix which maps from the input grid to the output grid.\n # Rather than a single phase-wide interpolating polynomial, map each segment.\n # To do this, find the nodes in the output grid which fall in each segment of the input\n # grid. Then build a Lagrange interpolating polynomial for that segment\n L_blocks = []\n output_nodes_ptau = list(ogd.node_ptau[ogd.subset_node_indices[output_subset]])\n\n for iseg in range(igd.num_segments):\n i1, i2 = igd.segment_indices[iseg]\n iptau_segi = np.take(igd.node_ptau, (i1, i2-1))\n istau_segi = np.take(igd.node_stau, (i1, i2-1))\n\n # The indices of the output grid that fall within this segment of the input grid\n if ogd is igd:\n optau_segi = iptau_segi\n else:\n ptau_hi = igd.segment_ends[iseg+1]\n if iseg < igd.num_segments - 1:\n idxs_in_iseg = np.where(output_nodes_ptau <= ptau_hi)[0]\n else:\n idxs_in_iseg = np.arange(len(output_nodes_ptau))\n optau_segi = np.asarray(output_nodes_ptau)[idxs_in_iseg]\n # Remove the captured nodes so we don't accidentally include them again\n output_nodes_ptau = output_nodes_ptau[len(idxs_in_iseg):]\n\n # Now get the output nodes which fall in iseg in iseg's segment tau space.\n ostau_segi = 2.0 * (optau_segi - iptau_segi[0]) / (iptau_segi[-1] - iptau_segi[0]) - 1\n\n # Create the interpolation matrix and add it to the blocks\n L, _ = lagrange_matrices(istau_segi, ostau_segi)\n L_blocks.append(L)\n\n self.interpolation_matrix = block_diag(*L_blocks)\n r, c = np.nonzero(self.interpolation_matrix)\n\n output_num_nodes, input_num_nodes = self.interpolation_matrix.shape\n\n for (name, kwargs) in self._timeseries_outputs:\n\n input_kwargs = {k: kwargs[k] for k in ('units', 'desc')}\n input_name = 'input_values:{0}'.format(name)\n self.add_input(input_name,\n shape=(input_num_nodes,) + kwargs['shape'],\n **input_kwargs)\n\n output_name = name\n output_kwargs = {k: kwargs[k] for k in ('units', 'desc')}\n output_kwargs['shape'] = (output_num_nodes,) + kwargs['shape']\n self.add_output(output_name, **output_kwargs)\n\n self._vars.append((input_name, output_name, kwargs['shape']))\n\n size = np.prod(kwargs['shape'])\n val_jac = np.zeros((output_num_nodes, size, input_num_nodes, size))\n\n for i in range(size):\n val_jac[:, i, :, i] = self.interpolation_matrix\n\n val_jac = val_jac.reshape((output_num_nodes * size, input_num_nodes * size),\n order='C')\n\n val_jac_rows, val_jac_cols = np.where(val_jac != 0)\n\n rs, cs = val_jac_rows, val_jac_cols\n self.declare_partials(of=output_name,\n wrt=input_name,\n rows=rs, cols=cs, val=val_jac[rs, cs])",
"def Optimize(self):\n self._OpenOutputFiles()\n while self.n_iter < self.n_maxiter and not self.is_converged:\n self.n_iter += 1\n self._ChooseStepDirection(self.opt_type)\n self._LineSearch(-1.0 * self.step_dir)\n self._UpdateEnergy()\n self._UpdateGradient()\n self.traj.AppendStep(self.mol)\n self._UpdateCriteria()\n self._CheckConvergence()\n self._PrintStatus()\n self._CloseOutputFiles()",
"def _optimise(self):\n pass",
"def optimize(self):\n self.vbe_step()\n self.compute_responsibilities()\n self.compute_sufficient_stats()\n self.vbmstep()",
"def pre_processing(self):\n while self.number_of_dmax() < 1:\n self.dmax -= 1\n __edges = self.current_edges()\n print('current edges =', __edges, ' expected edges =', self.edges)\n if __edges < self.edges:\n __temp = self.dmax\n __l = self.dmax\n self.dmax *= 2\n __r = self.dmax\n while self.number_of_dmax() >= 1 and __r < self.nodes:\n __l = __r\n self.dmax *= 2\n __r = self.dmax\n while __l < __r:\n self.dmax = int((__l + __r) / 2)\n if self.number_of_dmax() < 1:\n __r = self.dmax\n else:\n __l = self.dmax + 1\n self.dmax = __l - 1\n __edges = self.current_edges()\n if __edges > self.edges:\n __l = __temp\n __r = self.dmax\n while __l < __r:\n self.dmax = int((__l + __r) / 2)\n __edges = self.current_edges()\n if __edges > self.edges:\n __r = self.dmax\n else:\n __l = self.dmax + 1\n self.dmax = __l - 1\n print('adjust dmax =', self.dmax, ' edges =', int(__edges))\n elif __edges > self.edges:\n __temp1 = [_ ** self.lmd for _ in range(self.dmin, self.dmax + 1)]\n __temp2 = [_ * __ for _, __ in zip(__temp1, list(range(self.dmin, self.dmax+1)))]\n c = self.edges / sum(__temp2)\n n = c * sum(__temp1)\n self.select_p = n / self.nodes\n print('reduce select p =', self.select_p)",
"def optimize5():\n xl = xl_app()\n qt_app = get_qt_app() # pragma noqc\n # Get the initial values of the input cells\n msgBox = OpDialog()\n result = msgBox.exec_()\n if not result: # user cancelled\n return\n\n in_range = get_range(msgBox.in_range.text())\n out_cell = get_range(msgBox.out_cell.text())\n in_values = list(in_range.Value)\n X = np.array([x[0] for x in in_values])\n\n orig_calc_mode = xl.Calculation\n try:\n # switch Excel to manual calculation\n # and disable screen updating\n xl.Calculation = constants.xlManual\n xl.ScreenUpdating = False\n\n # run the minimization routine\n xl_obj_func = partial(obj_func, xl, in_range, out_cell)\n print(f\"X = {X}\")\n result = minimize(xl_obj_func, X, method=\"nelder-mead\")\n in_range.Value = [(float(x),) for x in result.x]\n xl.ScreenUpdating = True\n mbox = QMessageBox()\n mbox.setIcon(QMessageBox.Information)\n mbox.setText(\"Optimization results shown below.\" \"\\nMake changes permanent?\")\n mbox.setWindowTitle(\"Optimization Complete\")\n mbox.setInformativeText(\n \"\\n\".join(\n [\n \"Successful: %s\" % result.success,\n result.message,\n \"After %d iterations\" % result.nit,\n ]\n )\n )\n mbox.setStandardButtons(QMessageBox.Ok | QMessageBox.Cancel)\n yes_no = mbox.exec_()\n if yes_no != QMessageBox.Ok:\n in_range.Value = in_values\n else:\n in_range.Value = [(float(x),) for x in result.x]\n\n finally:\n # restore the original calculation\n # and screen updating mode\n xl.ScreenUpdating = True\n xl.Calculation = orig_calc_mode",
"def downscale(n, ldd, stream_thres, conv_factor, logger, ch):\n logger.info(str(\"Processing volume_t.\" + \"%03.f\" % (n+1)))\n volMapFile = os.path.join(downscaleLoc,str(\"volume_t.\" + \"%03.f\") % (n+1))\n volume_target = readmap(volMapFile)\n stream = streamorder(ldd) # make a stream order map\n # make a river-map, rivers are streams with strahler order < the largest order - a threshold\n # rivers = ifthenelse(scalar(stream) < mapmaximum(scalar(stream)) - stream_thres,boolean(0), boolean(stream))\n rivers = ifthenelse(scalar(stream) < stream_thres,boolean(0), boolean(stream))\n report(rivers,os.path.join(downscaleLoc,'rivers.map'))\n # initialize loop\n floodedLand = volume_target*0\n count = 0\n floodHeightInactiveCells = volume_target*0\n # now iterate in a loop, 15 meters is assumed to be the largest inundation level possible. Increase by steps of 0.3\n # check volume of cells taken into consideration\n volInRiver = ifthenelse(rivers==1,volume_target,scalar(0))\n volInLargeCells = areamaximum(volInRiver,ordinal(uniqueid_target))\n for level in arange(0.0,30,0.1):\n logger.debug('Processing with inundation depth = ' + str(level))\n\n \"\"\"\n Below, a short explanation of the maps, generated in this PCRaster routine is given. The principle idea is to impose a certain water level on river cells\n an check where the backwater of this imposed height may go to upstream through use of the local drain directions and elevation map\n The routine also checks where the imposed water in each cell comes from (i.e. from which 0.5 degree cell).\n In the end, the total volume of backwater from each 0.5 deg. cell is computed and compared to PCRGLOB volumes.\n If the imposed volume exceeds the PCRGLOB volume, the 0.5 deg. cell is assumed to be 'depleted' and the river cells are excluded from\n the river network in further processing steps. In the next step, a slightly higher level is imposed and the volume check is repeated.\n Hence, more downstream cells may impose backwater on the target cells under consideration in later steps.\n In the end of the routine, all volumes of each pcrglob cell should be accounted for in the downscaled map.\n\n floodInRiver: flood level, with resp. to MSL imposed on the river network map\n floodInRiverUpstream: the flood level of floodInRiver, imposed on the upstream area of each river pixel\n idInRiver: id of te 0.5 deg. cell, imposed on the river network map\n idInRiverUpstream: id imposed on the upstream area of each river cell.\n volInRiver: the volume of flood water in each 0.5 deg. pixel, imposed on the river network\n volInRiverUpstream: flood water volume, imposed on the upstream area of each river pixel\n areaInRiver: cell size, imposed on river network map\n areaInRiverUpstream: total surface area of areas with the same idInRiverUpstream value\n floodedLandTemp: The water level in areas, which would occur if the current 'level' was imposed on the river network\n floodedLandAv: The flooded water level, averaged over the idInRiverUpstream areas\n floodedLandTotal: The total volume of flood water in each contiguous area of idInRiverUpstream\n floodedLand: A volume comparison is made between floodedLandTotal and volInRiverUpstream.\n If floodedLandTotal is smaller, then the amount of imposed water will be smaller then the\n volume, computed by PCRGLOB in the 0.5 degree area. The inundation height in the cell will be updated in floodedLand.\n If the volume is exceeded, the cell will not be updated and the river cells in this area will be removed.\n Hence, backwater from more downstream cells can still impact on the cell under consideration.\n\n TO-DO: als een cel inactief wordt, dan gaat een benedenstroomse cel ineens heel veel water dumpen op deze plaatsen met als gevolg, mogelijk ernstige overschrijding van het volume uit die cel.\n \"\"\"\n floodInRiver = ordinal((ifthenelse(rivers==1,scalar(level)+dem,scalar(0)))*100)\n idInRiver = ordinal(ifthenelse(rivers==1,uniqueid_target,scalar(0)))\n volInRiver = ifthenelse(rivers==1,volume_target,scalar(0))\n areaInRiver = ifthenelse(rivers==1,surf,scalar(0))\n floodInRiverUpstream = subcatchment(ldd,floodInRiver)\n idInRiverUpstream = subcatchment(ldd,idInRiver)\n if level > 0:\n changedSourceCells = ifthenelse(idInRiverOld != idInRiverUpstream, boolean(1),boolean(0)) # if a different 0.5 deg. area is the source of floods\n floodHeightInactiveCells = ifthenelse(changedSourceCells,floodedLand,floodHeightInactiveCells)\n volInRiverUpstream = areamaximum(volInRiver,idInRiverUpstream)\n areaInRiverUpstream = areatotal(areamaximum(areaInRiver,idInRiverUpstream),idInRiverUpstream) # compute total catchment area of Id cell\n floodedLandTemp = min(max(scalar(floodInRiverUpstream)/100-dem,0),level)\n floodedLandTempAv = areaaverage(max(floodedLandTemp - floodHeightInactiveCells, 0),idInRiverUpstream)\n floodedLandTotal = floodedLandTempAv*areaInRiverUpstream\n # check which cells have a changed source area of .5 degrees and subtract the volume there\n floodedLand = ifthenelse(floodedLandTotal < volInRiverUpstream, max(scalar(floodedLandTemp),scalar(floodedLand)), scalar(floodedLand))# hieronder uitrekenen of volume al meer is dan eerder of niet.\n # update relevant river streams (exclude the ones that are already saturated)\n rivers = ifthenelse(floodedLandTotal < volume_target, rivers, boolean(0))\n idInRiverOld = idInRiverUpstream\n\n vol_pcrglob = pcr2numpy(volInLargeCells,0)/conv_factor\n vol_pcr = vol_pcrglob.sum()\n volmodelled = pcr2numpy(floodedLand*surf,0)\n vol_mod = volmodelled.sum()\n #\n logger.info(str('volume_t.' + '%03.f' + ': Volume PCRGLOB: ' + '%03.3f' + 'km3, Volume downscaling: ' + '%03.3f' + 'km3' + ', perc. diff: ' + '%2.2f' + '%%') % (n+1,vol_pcr/1e9, vol_mod/1e9, (vol_mod-vol_pcr)/vol_pcr*100))\n return logger, ch, floodedLand\n # end of function part",
"def optimize(self):\n\n self.logger.info(\"Solving with Dynamic Slope Scaling Procedure in Julia :\")\n optimization_start = time.time()\n\n # 1. Preprocess for old network graph\n if self.old_network_graph is not None:\n\n # DSSP on old network\n old_network_obj = sum(list(nx.get_node_attributes(self.old_network_graph, config.BUILDING_CONSUMPTION_KEY).values()))-1e-5\n try:\n self.check_infeasibility(self.old_network_graph, old_network_obj)\n except DHCOptimizerException as e:\n e.data = \"Invalid existing network: \" + e.data\n raise e\n\n flows, obj_val = self.optimize_with_dssp_julia(self.old_network_graph, old_network_obj, set())\n self.logger.info(\"Optimization phase time: %.2fs\" % (time.time() - optimization_start))\n solution_old_graph = self.build_solution_graph(self.old_network_graph, flows)\n\n if self.modify_old_network:\n\n # Add max capacity on old edges\n self.old_capacity = deepcopy(flows)\n old_buildings = list(nx.get_node_attributes(self.old_network_graph, config.BUILDING_CONSUMPTION_KEY).values())\n for key in flows:\n if (key[1],key[0],0) not in self.old_capacity and key[1] not in old_buildings:\n self.old_capacity[(key[1],key[0],0)] = self.old_capacity[key]\n\n # Add Imaginary edges\n for edge in self.old_capacity:\n if self.optimization_graph.has_edge(*edge):\n\n # add nodes\n if not self.optimization_graph.has_node(config.IM_PREFIX+edge[0]):\n self.optimization_graph.add_node(config.IM_PREFIX+edge[0])\n self.optimization_graph.nodes[config.IM_PREFIX+edge[0]][config.GPD_GEO_KEY] = \\\n self.optimization_graph.nodes[edge[0]][config.GPD_GEO_KEY]\n if not self.optimization_graph.has_node(config.IM_PREFIX+edge[1]):\n self.optimization_graph.add_node(config.IM_PREFIX+edge[1])\n self.optimization_graph.nodes[config.IM_PREFIX+edge[1]][config.GPD_GEO_KEY] = \\\n self.optimization_graph.nodes[edge[1]][config.GPD_GEO_KEY]\n # add edges\n if not self.optimization_graph.has_edge(edge[0],config.IM_PREFIX+edge[0]):\n self.optimization_graph.add_edge(edge[0],config.IM_PREFIX+edge[0])\n if not self.optimization_graph.has_edge(config.IM_PREFIX+edge[0],config.IM_PREFIX+edge[1]):\n self.optimization_graph.add_edge(config.IM_PREFIX+edge[0],config.IM_PREFIX+edge[1])\n if not self.optimization_graph.has_edge(config.IM_PREFIX+edge[1],edge[1]):\n self.optimization_graph.add_edge(config.IM_PREFIX+edge[1],edge[1])\n\n # put cost\n self.optimization_graph.edges[(config.IM_PREFIX+edge[0],config.IM_PREFIX+edge[1],0)][config.EDGE_COST_KEY] = \\\n self.optimization_graph.edges[(edge[0],edge[1],0)][config.EDGE_COST_KEY]\n self.optimization_graph.edges[(edge[0],edge[1],0)][config.EDGE_COST_KEY] = 1e-5\n self.optimization_graph.edges[(edge[0],config.IM_PREFIX+edge[0],0)][config.EDGE_COST_KEY] = 1e-5\n self.optimization_graph.edges[(config.IM_PREFIX+edge[1],edge[1],0)][config.EDGE_COST_KEY] = 1e-5\n\n else:\n # if we don't modify the old network, we have to change the capacity of the supplies\n already_consummed = {}\n for edge in solution_old_graph.edges():\n if solution_old_graph.nodes[edge[0]].get(config.NODE_TYPE_KEY) == config.SUPPLY_NODE_TYPE:\n already_consummed[edge[0]] = already_consummed.get(edge[0], 0) + \\\n solution_old_graph.edges[edge][config.SOLUTION_POWER_FLOW_KEY]\n for source in already_consummed:\n if already_consummed[source] <= self.optimization_graph.nodes[source][config.SUPPLY_POWER_CAPACITY_KEY]:\n self.optimization_graph.nodes[source][config.SUPPLY_POWER_CAPACITY_KEY] -= already_consummed[source]\n self.network_objective -= already_consummed[source]\n else:\n self.network_objective -= self.optimization_graph.nodes[source][config.SUPPLY_POWER_CAPACITY_KEY]\n self.optimization_graph.nodes[source][config.SUPPLY_POWER_CAPACITY_KEY] = 0\n\n # Remove edges from old network\n edges_to_remove = set()\n for e in self.optimization_graph.edges():\n if self.old_network_graph.has_edge(*e) or self.old_network_graph.has_edge(e[1],e[0]):\n edges_to_remove.add(e)\n self.optimization_graph.remove_edges_from(edges_to_remove)\n\n # Remove isolated buildings of optimization graph\n isolated_to_remove = set()\n for e in self.old_network_graph.edges():\n if e[0] in self.old_network_graph.nodes() and \\\n self.optimization_graph.nodes[e[1]].get(config.NODE_TYPE_KEY) == config.BUILDING_NODE_TYPE:\n isolated_to_remove.add(e)\n self.optimization_graph.remove_edges_from(isolated_to_remove)\n\n # Remove buildings from old network\n for n, data in self.old_network_graph.nodes(data=True):\n if data.get(config.NODE_TYPE_KEY) == config.BUILDING_NODE_TYPE:\n self.optimization_graph.remove_node(n)\n\n # Re-link sources\n sources = set()\n for n, data in self.optimization_graph.nodes(data=True):\n if data.get(config.NODE_TYPE_KEY) == config.SUPPLY_NODE_TYPE:\n sources.add(n)\n source_graph = self.optimization_graph.subgraph(sources).copy()\n self.optimization_graph.remove_nodes_from(sources)\n gnx.remove_isolates(self.optimization_graph)\n node_filter = lambda n: self.optimization_graph.nodes.get(n,{}).get(config.NODE_TYPE_KEY) != config.BUILDING_NODE_TYPE\n gnx.spatial_points_merge(self.optimization_graph, source_graph.nodes_to_gdf(), node_filter=node_filter, inplace=True)\n\n # fill missing information\n gnx.fill_edges_missing_geometry_attributes(self.optimization_graph)\n gnx.fill_length_attribute(self.optimization_graph, config.EDGE_LENGTH_KEY, only_missing=True)\n gnx.fill_length_attribute(self.optimization_graph, config.EDGE_COST_KEY, only_missing=True)\n for e in self.optimization_graph.edges(keys=True):\n self.optimization_graph.edges[e][config.LEASTCOST_COEF_KEY] = \\\n self.optimization_graph.edges[e].get(config.LEASTCOST_COEF_KEY,0)\n\n\n\n # 2. Process the DSSP on optimization graph\n self.check_is_ready()\n self.check_infeasibility(self.optimization_graph, self.network_objective)\n\n if self.old_network_graph is not None and self.modify_old_network:\n old_buildings = set(nx.get_node_attributes(self.old_network_graph, config.BUILDING_CONSUMPTION_KEY).keys())\n else:\n old_buildings = set()\n flows, obj_val = self.optimize_with_dssp_julia(self.optimization_graph, self.network_objective, old_buildings,postprocess= (not self.modify_old_network))\n self.logger.info(\"Optimization phase time: %.2fs\" % (time.time() - optimization_start))\n self.solution_graph = self.build_solution_graph(self.optimization_graph, flows, self.connected)\n\n # 3. Postprocess for old network graph\n if self.old_network_graph is not None:\n \n if self.modify_old_network:\n # Put the right supply capacity and cost\n for edge in self.old_capacity:\n if self.solution_graph.has_edge(edge[0],edge[1]):\n self.solution_graph.edges[(edge[0],edge[1])][config.EDGE_COST_KEY] = \\\n self.optimization_graph.edges[(config.IM_PREFIX+edge[0],config.IM_PREFIX+edge[1],0)][config.EDGE_COST_KEY]\n \n # Remove imaginary edges\n imaginary_nodes_to_remove = set()\n nodes_to_relabel = {}\n for edge in self.solution_graph.edges():\n if str(edge[0]).startswith(config.IM_PREFIX) and str(edge[1]).startswith(config.IM_PREFIX):\n real_edge = edge[0][len(config.IM_PREFIX):],edge[1][len(config.IM_PREFIX):]\n self.old_capacity[(real_edge[0], real_edge[1], 0)] = pd.np.inf\n self.old_capacity[(real_edge[1], real_edge[0], 0)] = pd.np.inf\n if not self.solution_graph.has_edge(*real_edge):\n for i in range(2):\n nodes_to_relabel[edge[i]] = real_edge[i]\n else:\n self.solution_graph.edges[real_edge[0],real_edge[1]][config.SOLUTION_POWER_FLOW_KEY] += \\\n self.solution_graph.edges[edge].get(config.SOLUTION_POWER_FLOW_KEY,0)\n imaginary_nodes_to_remove.add(edge[0])\n imaginary_nodes_to_remove.add(edge[1])\n elif str(edge[0]).startswith(config.IM_PREFIX):\n imaginary_nodes_to_remove.add(edge[0])\n elif str(edge[1]).startswith(config.IM_PREFIX):\n imaginary_nodes_to_remove.add(edge[1])\n\n nx.relabel_nodes(self.solution_graph, nodes_to_relabel, copy=False)\n self.solution_graph.remove_nodes_from(list(imaginary_nodes_to_remove))\n for node in nodes_to_relabel.values():\n if self.solution_graph.has_edge(node, node):\n self.solution_graph.remove_edge(node, node)\n\n else:\n for source in nx.get_node_attributes(self.solution_graph, config.SUPPLY_POWER_CAPACITY_KEY):\n self.solution_graph.nodes[source][config.SUPPLY_POWER_CAPACITY_KEY] += already_consummed.get(source,0)\n self.optimization_graph.nodes[source][config.SUPPLY_POWER_CAPACITY_KEY] += already_consummed.get(source,0)\n\n return flows, obj_val",
"def update_flow(self):\n start_nodes = []\n end_nodes = []\n capacities = []\n # (1): add all edges (u, v) with capacity ub-lb\n B = self.get_max_lb()*(self.num_edges() - len(self) + 2)\n for arc in self.arc_info.keys():\n if self.arc_info[arc][\"upper_bound\"] == float('inf'):\n self.arc_info[arc][\"upper_bound\"] = B\n for arc in self.arc_info.keys():\n start_nodes.append(self.arc_info[arc][\"start\"])\n end_nodes.append(self.arc_info[arc][\"destin\"])\n capacities.append(int(self.arc_info[arc][\"upper_bound\"]\\\n - self.arc_info[arc][\"lower_bound\"]))\n # (2): add edge (t, s) with capacity B\n # B = max_lb * (m - n + 2)\n B = self.get_max_lb()*(self.num_edges() - len(self) + 2)\n if B == 0:\n #B = float('inf')\n B = 100000\n start_nodes.append(self.sink())\n end_nodes.append(self.source())\n capacities.append(int(B))\n # (3): for all verts, if exc > 0, add edge (s', v) with capacity exc(v),\n # and if exc < 0, add edge(s', v) with capacity -exc(v)\n s_prime = max(self.vertices) + 1\n t_prime = max(self.vertices) + 2\n print(\"s'={}, t'={}\".format(s_prime, t_prime))\n for v in self:\n #print(\"vert {} in arcs: {}\".format(v,\n # self.in_arcs_lists[v]))\n # compute exc: lower bounds of in - lower bounds of out\n sum_lb_in = 0\n for in_arc in self.in_arcs_lists[v]:\n sum_lb_in += self.arc_info[in_arc][\"lower_bound\"]\n sum_lb_out = 0\n #print(\"vert {} out arcs: {}\".format(v,\n # self.out_arcs_lists[v]))\n for out_arc in self.out_arcs_lists[v]:\n sum_lb_out += self.arc_info[out_arc][\"lower_bound\"]\n exc = sum_lb_in - sum_lb_out\n #print(\"exc is {}\".format(exc))\n if exc > 0:\n start_nodes.append(s_prime)\n end_nodes.append(v)\n capacities.append(int(exc))\n else:\n start_nodes.append(v)\n end_nodes.append(t_prime)\n capacities.append(int(-exc))\n # solve maxflow\n #print(\"s' is {} and t' is {}\".format(s_prime, t_prime))\n max_flow = pywrapgraph.SimpleMaxFlow()\n for u, v, cap in zip(start_nodes, end_nodes, capacities):\n #print(\"Adding edge {}, {} with cap {}\".format(u,v,cap))\n max_flow.AddArcWithCapacity(u, v, cap)\n success = True\n if max_flow.Solve(s_prime, t_prime) == max_flow.OPTIMAL:\n #print('Max flow: {}'.format( max_flow.OptimalFlow()))\n #print(' Arc Flow / Capacity')\n for i in range(max_flow.NumArcs()):\n # print('%1s -> %1s %3s / %3s' % (\n # max_flow.Tail(i),\n # max_flow.Head(i),\n # max_flow.Flow(i),\n # max_flow.Capacity(i)))\n # check that (s', v) edges are saturated (once we find a false,\n # stay false forever)\n if success:\n if max_flow.Tail(i) == s_prime:\n success = max_flow.Flow(i) == max_flow.Capacity(i)\n else:\n success = False\n print('There was an issue with the max flow input.')\n if success:\n # update the flows to be the flow found from maxflow problem\n for i in range(max_flow.NumArcs()):\n # if this is an original arc, update the flow\n if max_flow.Tail(i) != s_prime \\\n and max_flow.Head(i) != t_prime \\\n and not (max_flow.Tail(i) == self.sink() \\\n and max_flow.Head(i) == self.source()):\n # update arc\n start = max_flow.Tail(i)\n destin = max_flow.Head(i)\n arc = self.get_arc(start, destin)\n new_flow = self.arc_info[arc][\"lower_bound\"] + max_flow.Flow(i)\n old_flow = self.arc_info[arc][\"weight\"]\n self.arc_info[arc][\"weight\"] = new_flow\n #print(\"Edge {} {} adjusted from {} to {}\".format(\n # start,\n # destin,\n # old_flow,\n # new_flow\n # ))\n self.check_conservation_of_flow() # check that solution is valid\n return True\n else:\n return False",
"def simplify_by_refinement(pts, jparams):\n print(\"=== TIN simplification ===\")\n start = time.time()\n print(\"start measuring time of refinement\")\n bbox_size = 1 #variable for bounding box size\n y_max = max(pts[:,1])\n x_max = max(pts[:,0])\n y_min = min(pts[:,1])\n x_min = min(pts[:,0])\n y_delta = y_max-y_min\n x_delta = x_max-x_min\n y_max += y_delta*0.5*(bbox_size-1)\n y_min -= y_delta*0.5*(bbox_size-1)\n x_max += x_delta*0.5*(bbox_size-1)\n x_min -= x_delta*0.5*(bbox_size-1)\n z_avg = sum(pts[:,2])/len(pts[:,2])\n dt_vertices = np.array([[x_min,y_min,z_avg], [x_max, y_min,z_avg], [x_max, y_max,z_avg], [x_min, y_max,z_avg]])\n #print(dt_vertices)\n dt_2d = scipy.spatial.Delaunay([i[0:2] for i in dt_vertices])\n error_track = 0\n highest_diff = np.inf\n while highest_diff>jparams[\"error-threshold\"] and error_track==0:\n diff_list = []\n for pt_index in range(0,len(pts)):\n point = pts[pt_index]\n triangle_idx = dt_2d.find_simplex(point[0:2])\n #print(triangle_idx)\n if triangle_idx == -1:\n print(\"!!! error creating the bounding box !!!\")\n error_track = 1\n break\n else: #calculate the difference between the existing TIN and the actual z value of the point\n interpolation = TIN_interpolator(dt_vertices, dt_2d, triangle_idx, point)\n diff_list.append(abs(point[2]-interpolation))\n #update values and triangulation\n highest_diff = max(diff_list)\n if highest_diff>jparams[\"error-threshold\"]:\n max_idx = diff_list.index(max(diff_list))\n dt_vertices = np.append(dt_vertices,[pts[max_idx]], axis=0)\n dt_2d = scipy.spatial.Delaunay([i[0:2] for i in dt_vertices])\n np.delete(pts,pt_index)\n #print(\"%.32f\" %highest_diff)\n #print(max(diff_list), min(diff_list))\n end = time.time()\n print(\"refinement takes \",end - start)\n if len(dt_vertices)>4:\n #print(\"There are \",len(dt_vertices)-4,\"important points\")\n return dt_vertices[4:len(dt_vertices)] # Remember: the vertices of the initial TIN should not be returned\n else:\n return None",
"def optimize(self):\n self.u = np.random.uniform(-1, 1, (self.batchsize, 288, 1, 1))\n self.l2 = torch.from_numpy(self.u).float()\n self.n = torch.randn(self.batchsize, 1, 28, 28)\n self.l1 = self.enc(self.input + self.n)\n print(self.l1.shape,99999999999999999999999999999999999)\n self.del1=self.dec(self.l1)\n self.del2=self.dec(self.l2)\n self.update_netc()\n self.update_netd()\n\n self.update_l2()\n self.update_netg()",
"def _SD_optimal(t):",
"def minimize(self):\n pass",
"def cv_compute_interpolation_weights(out_size, in_size, scale):\n # lower, upper, lerp\n res = [[0, 0, 0] for _ in range(out_size + 1)]\n res[-1] = [0, 0]\n for i in range(out_size - 1, -1, -1):\n val = (i + 0.5) * scale - 0.5\n res[i][0] = max(0, int(val))\n res[i][1] = min(res[i][0] + 1, in_size - 1)\n res[i][2] = max(0, val - int(val))\n return res",
"def chain_corrections():\n \n #read the files\n sample_4m=read_sample(map_files('sample_4m'))\n empty_cell_4m=read_sample(map_files('empty_cell_4m'))\n empty_4m=read_sample(map_files('empty_4m'))\n transmission_sample_cell_4m=read_sample(map_files('trans_sample_4m'))\n transmission_empty_cell_4m=read_sample(map_files('trans_empty_cell_4m'))\n blocked_beam_4m=read_sample(map_files('blocked_4m'))\n sensitivity=read_div(map_files('div'))\n #mask=read_sample(map_files('mask'))\n \n #normalize the monitors\n \n sample_4m_norm=monitor_normalize(sample_4m)\n empty_cell_4m_norm=monitor_normalize(empty_cell_4m)\n transmission_sample_cell_4m_norm=monitor_normalize(transmission_sample_cell_4m)\n transmission_empty_cell_4m_norm=monitor_normalize(transmission_empty_cell_4m)\n empty_4m_norm=monitor_normalize(empty_4m)\n blocked_beam_4m_norm=monitor_normalize(blocked_beam_4m)\n \n #calculate q\n sample_4m_norm_q=convert_q(sample_4m_norm)\n empty_cell_4m_norm_q=convert_q(empty_cell_4m)\n blocked_beam_4m_norm_q=convert_q(blocked_beam_4m_norm)\n transmission_sample_cell_4m_norm_q=convert_q(transmission_sample_cell_4m_norm)\n transmission_empty_cell_4m_norm_q=convert_q(transmission_empty_cell_4m_norm)\n empty_4m_norm_q=convert_q(empty_4m_norm)\n \n \n print 'converted'\n #convert flatness\n sample_4m_solid=correct_solid_angle(sample_4m_norm_q)\n empty_cell_4m_solid=correct_solid_angle(empty_cell_4m_norm_q)\n blocked_beam_4m_solid=correct_solid_angle(blocked_beam_4m_norm_q)\n transmission_sample_cell_4m_solid=correct_solid_angle(transmission_sample_cell_4m_norm_q)\n transmission_empty_cell_4m_solid=correct_solid_angle(transmission_empty_cell_4m_norm_q)\n empty_4m_solid=correct_solid_angle(empty_4m_norm_q)\n \n \n #calculate transmission\n coord_left=(60,60)\n coord_right=(70,70)\n transmission_sample_cell_4m_rat=generate_transmission(transmission_sample_cell_4m_solid,empty_4m_solid,\n coord_left,coord_right)\n transmission_empty_cell_4m_rat=generate_transmission(transmission_empty_cell_4m_solid,empty_4m_solid,\n coord_left,coord_right)\n print 'Sample transmission= {} (IGOR Value = 0.724)'.format(transmission_sample_cell_4m_rat)\n print 'Empty Cell transmission= {} (IGOR Value = 0.929)'.format(transmission_empty_cell_4m_rat)\n print 'hi'\n \n #Initial Correction -- Not with the sub/mult tools,\n #SAM = sample_4m_solid.data\n #print SAM.x\n #EMP = empty_4m_solid.data\n #print \"EMP: \"\n #print EMP.x\n #BGD = blocked_beam_4m_solid.data\n #print \"BGD\"\n #print BGD.x\n #Tsam = transmission_sample_cell_4m_rat\n #Temp = transmission_empty_cell_4m_rat\n #COR1 = SAM.__sub__(BGD)\n #COR2 = (EMP.__sub__(BGD)).__mul__(Tsam/Temp)\n #COR = COR1.__sub__(COR2)\n #print \"after initial correction: \"\n #print COR.x\n \n SAM = sample_4m_solid\n print SAM.data.x\n EMP = empty_4m_solid\n print \"EMP: \"\n print EMP.data.x\n BGD = blocked_beam_4m_solid\n print \"BGD:\"\n print BGD.data.x\n Tsam = transmission_sample_cell_4m_rat\n Temp = transmission_empty_cell_4m_rat\n print \"COR1:\"\n COR1 = SAM.__sub1__(BGD)\n print COR1.data.x #check=works\n #-----Problems Here-------\n print \"COR2:\"\n COR2 = (EMP.__sub1__(BGD)) #check=works\n print COR2.data.x\n print \"COR3:\"\n #AJJ - __mul__ not working because Tsam and Temp are Measurement instances and not simply floats. See above.\n COR3 = COR2.__mul__(Tsam/Temp) #mul not working\n print COR3.data.x\n #COR = COR1.__sub1__(COR2)\n #print \"after initial correction: \"\n #print COR.x\n #COR2 = (EMP.__sub__(BGD)).__mul__(Tsam/Temp)\n #COR = COR1.__sub__(COR2)\n #print \"after initial correction: \"\n #print COR.data.x",
"def fun_in_and_out_same(self, nc1_index, core_index, h_per_core,\n h_out_index):\n if self.in_size_w*self.c_block_size*4 < UB_SIZE/2:\n ub_output = self.tik_instance.Tensor(\n \"float32\", (self.in_size_w, self.c_block_size),\n name=\"ub_output\", scope=tik.scope_ubuf)\n self.tik_instance.data_move(\n ub_output[0], self.grads_gm[(nc1_index*self.in_size_h +\n core_index*h_per_core +\n h_out_index)*self.in_size_w*16],\n 0, 1, self.in_size_w*2, 0, 0)\n self.tik_instance.data_move(\n self.output_gm[(nc1_index*self.out_size_h + core_index *\n h_per_core + h_out_index)*self.out_size_w*16],\n ub_output[0], 0, 1, self.out_size_w*2, 0, 0)\n else:\n w_size_ub = UB_SIZE // (2*4*self.c_block_size)\n ub_output = self.tik_instance.Tensor(\n \"float32\", (w_size_ub, self.c_block_size),\n name=\"ub_output\", scope=tik.scope_ubuf)\n w_num_ub = _ceil_div(self.in_size_w, w_size_ub)\n if w_num_ub > 1:\n thread_num = 2\n else:\n thread_num = 1\n\n with self.tik_instance.for_range(\n 0, w_num_ub, thread_num=thread_num) as w_num_index:\n with self.tik_instance.if_scope(w_num_index != w_num_ub - 1):\n self.tik_instance.data_move(\n ub_output[0], self.grads_gm[\n ((nc1_index*self.in_size_h + core_index*h_per_core +\n h_out_index)*self.in_size_w + w_num_index *\n w_size_ub)*16], 0, 1, w_size_ub*2, 0, 0)\n self.tik_instance.data_move(\n self.output_gm[\n ((nc1_index*self.out_size_h + core_index*h_per_core\n + h_out_index)*self.out_size_w + w_num_index *\n w_size_ub)*16], ub_output[0], 0, 1, w_size_ub*2,\n 0, 0)\n with self.tik_instance.else_scope():\n self.tik_instance.data_move(\n ub_output[0],\n self.grads_gm[((nc1_index*self.in_size_h +\n core_index*h_per_core + h_out_index) *\n self.in_size_w + w_num_index*w_size_ub) *\n 16],\n 0, 1, (self.in_size_w - w_num_index*w_size_ub)*2, 0, 0)\n\n self.tik_instance.data_move(\n self.output_gm[((nc1_index*self.out_size_h +\n core_index*h_per_core + h_out_index) *\n self.out_size_w + w_num_index *\n w_size_ub)*16], ub_output[0], 0, 1,\n (self.in_size_w - w_num_index*w_size_ub)*2, 0, 0)",
"def upscale_main_side(output_img):\n #Fixing pixel which y % 2 != 1\n outputx, outputy = output_img.size\n for oy in range(0, outputy-1, 2):\n for ox in range(1, outputx, 2):\n pixel1 = output_img.getpixel((ox-1, oy))\n p1 = pixel1[0]\n p2 = pixel1[1]\n p3 = pixel1[2]\n if ox == outputx-1 :\n output_img.putpixel((ox, oy), (p1, p2, p3))\n else:\n pixel2 = output_img.getpixel((ox+1, oy))\n P1 = pixel2[0]\n P2 = pixel2[1]\n P3 = pixel2[2]\n output_img.putpixel((ox, oy), (int((p1+P1)/2), int((p2+P2)/2), int((p3+P3)/2)))\n #print(f'pixel:{ox, oy} output:{output_img.getpixel((ox,oy))}')\n #Fixing pixel which y % 2 == 1\n for oy in range(1, outputy-1, 2):\n for ox in range(0, outputx):\n pixel1 = output_img.getpixel((ox, oy-1))\n p1 = pixel1[0]\n p2 = pixel1[1]\n p3 = pixel1[2]\n if oy == outputx:\n output_img.putpixel((ox, oy), (p1, p2, p3))\n break\n else:\n pixel2 = output_img.getpixel((ox, oy+1))\n P1 = pixel2[0]\n P2 = pixel2[1]\n P3 = pixel2[2]\n output_img.putpixel((ox, oy), (int((p1+P1)/2), int((p2+P2)/2), int((p3+P3)/2)))\n #print(f'pixel:{ox, oy} output:{output_img.getpixel((ox,oy))}')\n #Save image \n result_img = output_path+'/output.'+working_img.format.lower()\n output_img.save(result_img)\n print('Upscale finished..!')\n output_img.show()",
"def sem_seg_postprocess(self, result, img_size, output_height,\n output_width):\n result = paddle.unsqueeze(result[:, :img_size[0], :img_size[1]], axis=0)\n result = F.interpolate(\n result,\n size=(output_height, output_width),\n mode=\"bilinear\",\n align_corners=False)[0]\n return result",
"def local_extrema_seuil(sweep, seuil1, seuil2, span) :\n\n #temporary elements\n temp_min = 0\n temp_min_arg = -1\n temp_max = 0\n temp_max_arg = -1\n\n #This holds the result\n up_i = 0\n down_i = 0\n up = array([])\n arg_up = array([])\n down = array([])\n arg_down = array([])\n #init the writing bolean\n min_write = True\n max_write = True\n sweep_size = size(sweep)\n\n for i in range(sweep_size) :\n value = sweep[i]\n #check if we are below the threshold, if yes, next point\n if abs(value) < seuil1 :\n max_write = True\n min_write = True\n if temp_max_arg != -1 :\n #Reshape the array\n s_up = array(shape(up))\n s_up[0] = s_up[0] + 1\n s_up = tuple(s_up)\n up = resize(up,s_up)\n arg_up = resize(arg_up,s_up)\n #Assign values\n up[up_i] = temp_max\n arg_up[up_i] = temp_max_arg\n up_i = up_i + 1\n temp_max = 0\n temp_max_arg = -1\n\n if temp_min_arg != -1 :\n #Reshape the array\n s_down = array(shape(down))\n s_down[0] = s_down[0] + 1\n s_down = tuple(s_down)\n down = resize(down,s_down)\n arg_down = resize(arg_down,s_down)\n #Assign values\n down[down_i] = temp_min\n arg_down[down_i] = temp_min_arg\n down_i = down_i + 1\n temp_min = 0\n temp_min_arg = -1\n\n continue\n\n\n #if we are in beetween the two threshold\n if abs(value) > seuil1 and abs(value) < seuil2 :\n if value < temp_min and min_write :\n temp_min = value\n temp_min_arg = i\n if value > temp_max and max_write:\n temp_max = value\n temp_max_arg = i\n\n #if we are above the threshold\n if abs(value) > seuil2 :\n #Make sure than min and max cannot be accessed before going back below seuil1\n if value < - seuil2 :\n min_write = False\n if(temp_min_arg + span > i) :\n temp_min = 0\n temp_min_arg = -1\n if value > seuil2 :\n max_write = False\n if(temp_max_arg + span > i) :\n temp_max = 0\n temp_max_arg = -1\n\n return [down, arg_down, up, arg_up]",
"def selection_correction_method2(tree, scale, h_in, h_out):\n for event in tree:\n cut = [0, 0]\n S15_ch = i_channel(0, event)\n RT = event.DD_Rise[S15_ch]\n onset = event.DD_Rise10pct[S15_ch]\n energy_S15 = event.DD_AmplADU[S15_ch]\n if cut[0]==0:\n if energy_S15>1000 and RT>1.1 and RT<1.51 and onset>39 and onset<47:\n energy = energy_S15*scale\n h_in.Fill(energy)\n cut[0]=1\n if cut[1]==0:\n if energy_S15>1000 and RT>1.1 and RT<1.51 and ((onset>=15 and onset<=36) or (onset>=50 and onset<=110)):\n energy = energy_S15*scale\n h_out.Fill(energy)\n cut[1]=1",
"def optimize(self, S):\n\n S = self.filter_number_units(S)\n S = self.optimize_number_units(S)\n S = self.break_tie_priority(S)\n S = self.break_tie_entropy(S)\n return S",
"def compute_interpolation_weights(out_size, in_size, scale):\n # lower, upper, lerp\n res = [[0, 0, 0] for _ in range(out_size + 1)]\n for i in range(out_size - 1, -1, -1):\n val = i * scale\n res[i][0] = int(val)\n res[i][1] = min(res[i][0] + 1, in_size - 1)\n res[i][2] = val - int(val)\n return res",
"def owen(self, x, nsamples, output_indexes, interaction_tolerance, context, batch_size, silent):\n \n f = self._reshaped_model\n r = self.masker\n m00 = np.zeros(self.mask_matrix.shape[1], dtype=np.bool)\n f00 = self.curr_expected_value\n f11 = self._reshaped_model(x.reshape(1,len(x)))[0]\n ind = len(self.values)-1\n \n # make sure output_indexes is a list of indexes\n if output_indexes is not None:\n assert self.multi_output, \"output_indexes is only valid for multi-output models!\"\n \n out_len = output_indexes_len(output_indexes)\n if output_indexes.startswith(\"max(\"):\n output_indexes = np.argsort(-f11)[:out_len]\n elif output_indexes.startswith(\"min(\"):\n output_indexes = np.argsort(f11)[:out_len]\n elif output_indexes.startswith(\"max(abs(\"):\n output_indexes = np.argsort(np.abs(f11))[:out_len]\n \n f00 = f00[output_indexes]\n f11 = f11[output_indexes]\n \n q = queue.PriorityQueue()\n q.put((0, 0, (m00, f00, f11, ind, 1.0)))\n eval_count = 0\n total_evals = min(nsamples, (len(x)-1)*len(x))\n pbar = None\n start_time = time.time()\n #pbar = tqdm(total=total_evals, disable=silent or self.eval_time * total_evals < 5, leave=False)\n while not q.empty():\n\n # if we passed our execution limit then leave everything else on the internal nodes\n if eval_count >= nsamples:\n while not q.empty():\n m00, f00, f11, ind, weight = q.get()[2]\n self.dvalues[ind] += (f11 - f00) * weight\n break\n\n # create a batch of work to do\n batch_args = []\n batch_data = []\n batch_positions = []\n batch_pos = 0\n while not q.empty() and len(batch_data) < batch_size and eval_count < nsamples:\n \n # get our next set of arguments\n m00, f00, f11, ind, weight = q.get()[2]\n\n # get the left are right children of this cluster\n lind = self.merge_clusters[ind, 0]\n rind = self.merge_clusters[ind, 1]\n\n # check if we are a leaf node or terminated our decent early and dumping credit at an internal node\n if lind < 0:\n self.dvalues[ind] += (f11 - f00) * weight\n continue\n\n # build the masks\n m10 = m00.copy() # we separate the copy from the add so as to not get converted to a matrix\n m10[:] += self.mask_matrix[lind, :]\n m01 = m00.copy()\n m01[:] += self.mask_matrix[rind, :]\n \n batch_args.append((m00, m10, m01, f00, f11, ind, lind, rind, weight))\n \n d = r(x, m10)\n batch_data.append(d)\n batch_positions.append(batch_pos)\n batch_pos += d.shape[0]\n \n d = r(x, m01)\n batch_data.append(d)\n batch_positions.append(batch_pos)\n batch_pos += d.shape[0]\n \n batch_positions.append(batch_pos)\n \n # run the batch\n if len(batch_args) > 0:\n fout = f(np.concatenate(batch_data, axis=0))\n if output_indexes is not None:\n fout = fout[:,output_indexes]\n \n eval_count += fout.shape[0]\n \n if pbar is None and time.time() - start_time > 5:\n pbar = tqdm(total=total_evals, disable=silent, leave=False)\n pbar.update(eval_count)\n if pbar is not None:\n pbar.update(fout.shape[0])\n \n # use the results of the batch to add new nodes\n for i in range(len(batch_args)):\n \n m00, m10, m01, f00, f11, ind, lind, rind, weight = batch_args[i]\n\n # evaluate the model on the two new masked inputs\n f10 = fout[batch_positions[2*i]:batch_positions[2*i+1]].mean(0)\n f01 = fout[batch_positions[2*i+1]:batch_positions[2*i+2]].mean(0)\n\n iratio_left = np.abs(((f10 - f00) - (f11 - f01)) / (np.abs(f10 - f00) + 1e-8))\n iratio_right = np.abs(((f11 - f10) - (f01 - f00)) / (np.abs(f11 - f10) + 1e-8))\n\n iratio = np.max([np.max(iratio_left), np.max(iratio_right)])\n\n new_weight = weight\n if iratio >= interaction_tolerance:\n new_weight /= 2\n\n # recurse on the left node with zero context\n args = (m00, f00, f10, lind, new_weight)\n q.put((-np.max(np.abs(f10 - f00)) * new_weight, np.random.randn(), args))\n\n # recurse on the right node with one context\n args = (m10, f10, f11, rind, new_weight)\n q.put((-np.max(np.abs(f11 - f10)) * new_weight, np.random.randn(), args))\n\n if iratio >= interaction_tolerance:\n # recurse on the right node with zero context\n args = (m00, f00, f01, rind, new_weight)\n q.put((-np.max(np.abs(f01 - f00)) * new_weight, np.random.randn(), args))\n\n # recurse on the left node with one context\n args = (m01, f01, f11, lind, new_weight)\n q.put((-np.max(np.abs(f11 - f01)) * new_weight, np.random.randn(), args))\n if pbar is not None:\n pbar.close()\n \n return output_indexes",
"def _handle_size_changes(self):\n listing = self._get_linear_instruction_listing()\n\n while True:\n found_invalid = False\n i = 0\n\n while i < len(listing) - 1:\n next_offset = listing[i].offset + listing[i].get_size()\n\n if next_offset < listing[i + 1].offset:\n raise Exception(\n f\"Something weird happened with the offsets at offset {listing[i].offset}\"\n )\n\n elif next_offset > listing[i + 1].offset:\n delta = next_offset - listing[i + 1].offset\n self._adjust(listing[i].offset + 0.5, delta)\n found_invalid = True\n\n i += 1\n\n if not found_invalid:\n break",
"def thinning (input_path, output_path):\n if os.path.exists(output_path):\n shutil.rmtree(output_path)\n os.makedirs(output_path)\n img_fn_list = get_images(input_path)\n epsilon = 0.0001\n for img_fn in img_fn_list:\n print('===============')\n print(img_fn)\n start = time.time()\n try:\n img_gray = cv2.imread(img_fn,cv2.IMREAD_GRAYSCALE)\n except:\n print(\"Error reading image {}!\".format(img_fn))\n continue\n # swap the color from black white to white black\n img= cv2.subtract(255, img_gray)\n\n img1 = img.copy()\n # Structuring Element\n kernel = cv2.getStructuringElement(cv2.MORPH_CROSS,(3,3))\n # Create an empty output image to hold values\n thin = np.zeros(img.shape,dtype='uint8')\n \n # Loop until erosion leads to an empty set\n while (cv2.countNonZero(img1)!=0):\n # Erosion\n erode = cv2.erode(img1,kernel)\n # Opening on eroded image\n opening = cv2.morphologyEx(erode,cv2.MORPH_OPEN,kernel)\n # Subtract these two\n subset = erode - opening\n # Union of all previous sets\n thin = cv2.bitwise_or(subset,thin)\n # Set the eroded image for next iteration\n img1 = erode.copy()\n cv2.imwrite(os.path.join(output_path, os.path.basename(img_fn)), thin)",
"def applySegmentationSteps(input_img, input_mode, output_root, save_intermediate=False, overwrite=False):\n\n np_img = loadImage(input_img)\n\n if save_intermediate == False:\n np_dist = getDistanceRaster(input_img, input_mode=input_mode)\n np_ridge = ridgeDetection(np_dist, 'np', method='meijering', black_ridges=False)\n np_blobs = connectedComponentsImage(np_ridge, 'np', output_path=output_root + '_blobs.tif')\n exportBlobs(np_img, np_blobs, 'np', output_root)\n plt.imsave(output_root + 'blobs_cmap.png', np_blobs, cmap='nipy_spectral')\n else:\n np_dist = getDistanceRaster(input_img, input_mode=input_mode, output_path=output_root + '_distance.tif')\n np_ridge = ridgeDetection(np_dist, 'np', method='meijering', black_ridges=False, output_path=output_root + '_ridge.tif')\n np_blobs = connectedComponentsImage(np_ridge, 'np', output_path=output_root + '_blobs.tif')\n exportBlobs(np_img, np_blobs, 'np', output_root)\n plt.imsave(output_root + 'blobs_cmap.png', np_blobs, cmap='nipy_spectral')\n\n if os.path.exists(output_root + 'blobs_tif'):\n return 0\n else:\n return 1",
"def test_convergence(self, nMax=500000, withPlots=True, overStep=100):\n\n def _mad(vin):\n med = np.median(vin)\n return np.median(np.abs(vin - med))\n\n self.convergenceSearchFlag = True\n self.needReset = False\n self._reset_limits()\n mStart = 10\n mStep = 1\n statStep = 5\n m = 0\n k = mStart\n converged = False\n postConv = 0\n pltout = []\n dIout = []\n Iold = 0\n sE = self.E_max * np.ones(1)\n sTheta_max = self.Theta_max * np.ones(1)\n sPsi_max = self.Psi_max * np.ones(1)\n\n statOut = []\n dIOut = []\n xm = []\n\n outQuad = 0\n outInt = 0\n if withPlots:\n from matplotlib import pyplot as plt\n fig = plt.figure(figsize=(8, 6))\n\n ax0 = fig.add_axes([0.1, 0.65, 0.8, 0.3])\n ax0.xaxis.set_visible(False)\n ax0.set_ylabel('Relative intensity $I$', color='C0')\n ampLine, = ax0.semilogy([], [], 'C0')\n\n ax1 = fig.add_axes([0.1, 0.1, 0.8, 0.55])\n ax1.set_xlabel('Number of nodes')\n ax1.set_ylabel('Median absolute deviation of $I$', color='C1')\n madLine, = ax1.semilogy([], [], 'C1')\n\n ax2 = ax1.twinx()\n ax2.set_ylabel('Median $dI/I$', color='C2')\n relmadLine, = ax2.semilogy([], [], 'C2')\n else:\n fig = None\n\n while True:\n m += 1\n if m % 1000 == 0:\n mStep *= 2\n if True: # raycing._VERBOSITY_ > 10:\n # print(\"INSUFFICIENT CONVERGENCE RANGE:\", k, \"NODES\")\n print(\"INCREASING CONVERGENCE STEP. NEW STEP\", mStep)\n\n k += mStep\n self.quadm = k\n self._build_integration_grid()\n xm.append(k*self.gIntervals)\n Inew = self.build_I_map(sE, sTheta_max, sPsi_max)[0]\n pltout.append(Inew)\n dIout.append(np.abs(Inew-Iold)/Inew)\n if m == 1:\n Iold = Inew\n continue\n Iold = Inew\n\n if withPlots:\n ampLine.set_xdata(xm)\n relInt = np.array(pltout)\n relInt /= relInt.max()\n ampLine.set_ydata(relInt)\n new_y_min = np.floor(np.log10(relInt.min()))\n ax0.set_xlim([0, xm[-1]+5])\n ax0.set_ylim([10**(new_y_min+0.1), 1.1])\n\n if converged:\n postConv += 1\n if m > statStep:\n mad = _mad(np.abs(np.array(pltout))[m-statStep:m])\n dIMAD = np.median(dIout[m-statStep:m])\n\n statOut.append(mad)\n dIOut.append(dIMAD)\n\n if ((dIMAD < self.gp) or (mad < self.gp)) and not converged:\n convPoint = k*self.gIntervals\n outQuad = k\n outInt = self.gIntervals\n if True: # raycing._VERBOSITY_ > 10:\n print(\"CONVERGENCE THRESHOLD REACHED AT \"\n \"{0} NODES, {1} INTERVALS.\".format(\n k, self.gIntervals))\n print(\"INTEGRATION GRID LENGTH IS {} POINTS\".format(\n convPoint))\n converged = True\n if withPlots:\n label = 'True convergence: {0} nodes, {1} interval{2}'\\\n .format(self.quadm, self.gIntervals,\n '' if self.gIntervals == 1 else 's')\n axvlineDict = dict(x=convPoint, color='r', label=label)\n ax0.axvline(**axvlineDict)\n ax1.axvline(**axvlineDict)\n if withPlots:\n new_y_max = np.ceil(np.log10(max(statOut)))\n new_y_min = np.floor(np.log10(min(statOut)))\n ax1.set_xlim([0, xm[-1]+5])\n ax1.set_ylim([10**new_y_min, 10**(new_y_max-0.1)])\n madLine.set_xdata(xm[statStep:])\n madLine.set_ydata(statOut)\n relmadLine.set_xdata(xm[statStep:])\n relmadLine.set_ydata(dIOut)\n new_y_max = np.ceil(np.log10(max(dIOut)))\n new_y_min = np.floor(np.log10(min(dIOut)))\n ax2.set_xlim([0, xm[-1]+5])\n ax2.set_ylim([10**new_y_min, 10**new_y_max])\n fig.canvas.draw()\n plt.pause(0.001)\n\n if xm[-1] > nMax:\n if not converged:\n print(\"PROBLEM WITH CONVERGENCE. INCREASE nMax.\")\n break\n\n if overStep is not None:\n if postConv > overStep:\n break\n\n convRes, stats = self._find_convergence_mixed()\n print(\"CONVERGENCE TEST COMPLETED.\")\n self.needReset = True\n if withPlots:\n label = 'Auto-finder: {0} nodes, {1} interval{2}'.format(\n self.quadm, self.gIntervals,\n '' if self.gIntervals == 1 else 's')\n axvlineDict = dict(x=self.quadm*self.gIntervals, color='m',\n linestyle='--', label=label)\n ax0.axvline(**axvlineDict)\n ax1.axvline(**axvlineDict)\n ax1.legend()\n fig.canvas.draw()\n plt.pause(0.1)\n return converged, outQuad, outInt, fig",
"def minimize(self):\n raise NotImplementedError",
"def decimator(clk, reset, dataIn, dataOut, decimationRatio, decimationRatioBase, decimationStyle_ext, dataClk, newValueFlag):\n bufferCounter = Signal(intbv(0, min = 0, max = MAXIMAL_RATIO))\n buff = [Signal(intbv(0)[BIT_WIDTH:]) for i in range(MAXIMAL_RATIO)]\n lfsr = Signal(intbv(0)[LFSR_WIDTH:])\n maxPeriod = Signal(bool(False))\n maxValue = Signal(intbv(0)[BIT_WIDTH:])\n minValue = Signal(intbv(0)[BIT_WIDTH:])\n #valueSum = Signal(intbv(0, min = 0, max = 255*2))\n dataOut_decimated = Signal(intbv(0)[8:])\n decimationStyle = Signal(intbv(0)[2:])\n metaCounter = Signal(intbv(0, min = 0, max = 9))\n flagRegistered = Signal(bool(False))\n decimationSum = Signal(intbv(0, min = 0, max = 256*MAXIMAL_RATIO))\n dataClkEdge = Signal(bool(False))\n \n @always(clk.posedge, reset.posedge)\n def newData():\n \"\"\"This process registers flag indicating new data from\n lower clock domain and then waits 8 clock cycles to prevent\n data coccuprion caused by metastability of lower frequency registers\"\"\"\n if(reset == 1):\n metaCounter.next = 0\n flagRegistered.next = False\n decimationStyle.next = 0\n else:\n if(newValueFlag):\n metaCounter.next = 0\n flagRegistered.next = True\n else:\n if(flagRegistered):\n if(metaCounter == 8):\n decimationStyle.next = decimationStyle_ext\n metaCounter.next = 0\n flagRegistered.next = False\n else:\n metaCounter.next = metaCounter + 1\n else:\n decimationStyle.next = decimationStyle\n \n \n @always(reset.posedge, clk.posedge)\n def lfsr_proc():\n \"\"\"This process makes pseudorandom numbers utilizing LFSR \n (http://en.wikipedia.org/wiki/Linear_feedback_shift_register)\"\"\"\n if(reset == 1):\n lfsr.next = LFSR_SEED\n else:\n if(dataClk):\n lfsr.next = concat(lfsr[LFSR_WIDTH-1:0], lfsr[9] ^ lfsr[6])\n \n @always(clk.posedge, reset.posedge)\n def bufferCnt():\n \"\"\"This process counts up from 0 to decimationRatio creating pointer\n into buffer memory for saving samples in dithering mode of decimator\"\"\"\n if(reset == 1):\n bufferCounter.next = 0\n else:\n if(decimationRatio > 0):\n if(bufferCounter == (decimationRatio-1)):\n bufferCounter.next = 0\n else:\n bufferCounter.next = bufferCounter + 1\n \n @always(clk.posedge, reset.posedge)\n def outputConnect():\n \"\"\"This process connects appropriate output \n according to selected decimation ratio\"\"\"\n if(reset == 1):\n dataOut.next = 0\n else:\n if(decimationRatio == 1):\n dataOut.next = dataIn\n else:\n dataOut.next = dataOut_decimated\n \n @always(clk.posedge, reset.posedge)\n def output():\n \"\"\"This is main process of decimator which on rising edge od data clock\n outputs decimated data according to selected decimation style\n for simple decimation it just pases current input date from adc,\n for dithering it takes random sample from decimated interval,\n for peak detection it takes maximum or minimum sample,\n for smoothing it makes mean out of decimated interval by shifting data right\"\"\"\n if(reset == 1):\n dataOut_decimated.next = 0\n maxPeriod.next = False\n maxValue.next = MIN_VALUE\n minValue.next = MAX_VALUE\n decimationSum.next = 0\n dataClkEdge.next = True\n else:\n if(dataClk == 1 and dataClkEdge == 1):\n dataClkEdge.next = False\n decimationSum[16:8].next = 0\n decimationSum[8:].next = dataIn\n maxValue.next = MIN_VALUE\n minValue.next = MAX_VALUE\n if(decimationRatio > 0):\n if(decimationStyle == 0):\n dataOut_decimated.next = dataIn\n elif(decimationStyle == 1):\n if(decimationRatio == 2):\n dataOut_decimated.next = buff[lfsr[1:]]\n elif(decimationRatio == 4):\n dataOut_decimated.next = buff[lfsr[2:]]\n elif(decimationRatio == 8):\n dataOut_decimated.next = buff[lfsr[3:]]\n elif(decimationRatio == 16):\n dataOut_decimated.next = buff[lfsr[4:]]\n elif(decimationRatio == 32):\n dataOut_decimated.next = buff[lfsr[5:]]\n elif(decimationRatio == 64):\n dataOut_decimated.next = buff[lfsr[6:]]\n elif(decimationRatio == 128):\n dataOut_decimated.next = buff[lfsr[7:]]\n elif(decimationRatio == 256):\n dataOut_decimated.next = buff[lfsr[8:]]\n elif(decimationStyle == 2):\n maxPeriod.next = not maxPeriod\n if(maxPeriod):\n dataOut_decimated.next = maxValue\n else:\n dataOut_decimated.next = minValue\n elif(decimationStyle == 3):\n if(decimationRatioBase == 1):\n dataOut_decimated.next = decimationSum[9:1]\n elif(decimationRatioBase == 2):\n dataOut_decimated.next = decimationSum[10:2]\n elif(decimationRatioBase == 3):\n dataOut_decimated.next = decimationSum[11:3]\n elif(decimationRatioBase == 4):\n dataOut_decimated.next = decimationSum[12:4]\n elif(decimationRatioBase == 5):\n dataOut_decimated.next = decimationSum[13:5]\n elif(decimationRatioBase == 6):\n dataOut_decimated.next = decimationSum[14:6]\n elif(decimationRatioBase == 7):\n dataOut_decimated.next = decimationSum[15:7]\n elif(decimationRatioBase == 8):\n dataOut_decimated.next = decimationSum[16:8]\n else:\n if(dataClk == 0):\n dataClkEdge.next = True\n decimationSum.next = decimationSum + concat(\"00000000\", dataIn)\n if(dataIn > maxValue):\n maxValue.next = dataIn\n if(dataIn < minValue):\n minValue.next = dataIn\n \n @always(clk.posedge)\n def fillBuffer():\n \"\"\"This process fills in buffer for dithering mode of decimation\"\"\"\n buff[bufferCounter].next = dataIn\n\n return fillBuffer, lfsr_proc, output, bufferCnt, outputConnect, newData"
] | [
"0.57551074",
"0.5633371",
"0.55631614",
"0.5548981",
"0.54104066",
"0.53518677",
"0.52544314",
"0.52046525",
"0.5193554",
"0.510433",
"0.50876915",
"0.507961",
"0.5076772",
"0.5050112",
"0.5047182",
"0.5028232",
"0.50279135",
"0.50186217",
"0.49814168",
"0.49547577",
"0.4953381",
"0.49449524",
"0.4944649",
"0.48872858",
"0.4882484",
"0.48626977",
"0.48329118",
"0.4827547",
"0.4807877",
"0.47981092"
] | 0.65121347 | 0 |
Creates a dictionary of parameters for the layout class LevelShifter | def _get_lvl_shift_params_dict(pinfo: Any, seg_p: int, seg_n: int, seg_inv_p: int,
seg_inv_n: int, seg_in_inv_p: int, seg_in_inv_n: int,
out_inv_m: int, has_rst: bool, dual_output: bool,
is_ctrl: bool = False,
skew_out: bool = False, out_pseg_off: int = 0) -> Dict[str, Any]:
tech_info = get_tech_global_info('bag3_digital')
wn = tech_info['w_minn'] if is_ctrl else 2 * tech_info['w_minn']
wp = tech_info['w_minp'] if is_ctrl else 2 * tech_info['w_minp']
if has_rst:
seg_dict = dict(pd=seg_n, pu=seg_p, rst=int(np.ceil(seg_n / 2)), prst=seg_p)
w_dict = dict(pd=wn, pu=wp, rst=wn)
else:
seg_dict = dict(pd=seg_n, pu=seg_p)
w_dict = dict(pd=wn, pu=wp)
lv_params = dict(
cls_name=LevelShifter.get_qualified_name(),
draw_taps=True,
pwr_gnd_list=[('VDD_in', 'VSS'), ('VDD', 'VSS')],
params=dict(
pinfo=pinfo,
lv_params=dict(
seg_dict=seg_dict,
w_dict=w_dict,
has_rst=has_rst,
in_upper=has_rst,
dual_output=dual_output,
),
in_buf_params=dict(segp_list=[seg_in_inv_p, seg_inv_p],
segn_list=[seg_in_inv_n, seg_inv_n],
w_p=wp, w_n=wn),
export_pins=True,
)
)
# Note that setting stack_p = 2 actually changes the topology of the level shifter to include PMOS devices
# tied to the input and in series with the cross-coupled PMOS pull-ups.
if has_rst:
lv_params['params']['lv_params']['stack_p'] = 2
if skew_out:
lv_params['params']['lv_params']['buf_segn_list'] = [out_inv_m]
lv_params['params']['lv_params']['buf_segp_list'] = [out_inv_m + out_pseg_off]
else:
lv_params['params']['lv_params']['buf_seg_list'] = [out_inv_m]
return lv_params | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def layout_method_mapper(self):\n return {\n \"kamada_kawai_layout\": kamada_kawai_layout,\n \"fruchterman_reingold_layout\": fruchterman_reingold_layout,\n \"spectral_layout\": spectral_layout,\n }",
"def init_pos_parms(self):\n\n ## init_pos_parms()\n parms = {}\n\n # length axis\n parms['length_attribute_road'] = ('top', 'cl', 'bottom')[self.segment]\n parms['length_attribute_artifact'] = ('top', 'cl', 'bottom')[self.pos_length]\n\n # width axis\n if type(self.pos_width) is int:\n parms['width_road_rect'] = self.road.lanes[self.pos_width]\n parms['width_attribute_road'] = 'cw'\n parms['width_attribute_artifact'] = 'cw'\n else:\n parms['width_road_rect'] = self.road\n pos_parms = {'l': ('left', 'right'),\n 'c': ('cw', 'cw'),\n 'r': ('right', 'left')}[self.pos_width]\n parms['width_attribute_road'] = pos_parms[0]\n parms['width_attribute_artifact'] = pos_parms[1]\n return parms",
"def get_params_info(cls):\n return dict(\n config='laygo configuration dictionary.',\n threshold='transistor threshold flavor.',\n draw_boundaries='True to draw boundaries.',\n num_blk='number of driver segments.',\n show_pins='True to draw pin geometries.',\n )",
"def _get_lvl_shift_core_params_dict(pinfo: Any, seg_p: int, seg_n: int,\n has_rst: bool, is_ctrl: bool = False) -> Dict[str, Any]:\n global_info = get_tech_global_info('bag3_digital')\n wn = global_info['w_minn'] if is_ctrl else 2 * global_info['w_minn']\n wp = global_info['w_minp'] if is_ctrl else 2 * global_info['w_minp']\n\n if has_rst:\n seg_dict = dict(pd=seg_n, pu=seg_p, rst=int(np.ceil(seg_n / 2)), prst=seg_p)\n w_dict = dict(pd=wn, pu=wp, rst=wn)\n else:\n seg_dict = dict(pd=seg_n, pu=seg_p)\n w_dict = dict(pd=wn, pu=wp)\n lv_params = dict(\n cls_name=LevelShifterCore.get_qualified_name(),\n draw_taps=True,\n params=dict(\n pinfo=pinfo,\n seg_dict=seg_dict,\n w_dict=w_dict,\n has_rst=has_rst,\n in_upper=has_rst,\n )\n )\n\n if has_rst:\n lv_params['params']['lv_params']['stack_p'] = 2\n\n return lv_params",
"def __init__(self, level):\n self.level = level\n self.my_map = {}\n self.my_level = []\n self.my_grid = []",
"def paramDetails(cls):\n return {\n 'dim': (10, 20, 2, 20),\n 'nIter': (1, 10, 2, 5),\n 'lamb': (.1, 1., .1, .05),\n 'alph': (30, 50, 5, 40)\n }",
"def init_params():\r\n\r\n p = OrderedDict()\r\n p['startYear'] = 1855\r\n p['num5YearAgeClasses'] = 25\r\n p['numCareLevels'] = 5\r\n p['pixelsInPopPyramid'] = 2000\r\n p['pixelsPerTown'] = 16 # 56\r\n p['mapGridXDimension'] = 20\r\n p['mapGridYDimension'] = 25\r\n p['careLevelColour'] = ['deepskyblue','green','yellow','orange','red']\r\n p['careDemandInHours'] = [ 0.0, 12.0, 24.0, 48.0, 96.0 ]\r\n p['unmetNeedColor'] = ['deepskyblue','green','yellow','orange','red', 'mediumorchid']\r\n p['houseSizeColour'] = ['deepskyblue','green','yellow','orange','red', 'mediumorchid']\r\n p['mainFont'] = 'Helvetica 18'\r\n p['fontColour'] = 'white'\r\n p['dateX'] = 70\r\n p['dateY'] = 20\r\n p['popX'] = 70\r\n p['popY'] = 50\r\n p['delayTime'] = 0.0\r\n p['maxTextUpdateList'] = 12\r\n \r\n return p",
"def getInitParams(self):\n paramDict = super().getInitParams()\n paramDict['method'] = self.method\n paramDict['dimension'] = self.dimension\n paramDict['rank'] = self.rank\n paramDict['mu'] = self.mu\n paramDict['covariance'] = self.covariance\n return paramDict",
"def params(self):\n return {'shape': self.shape,\n 'name': self.name}",
"def getInitParams(self):\n paramDict = super().getInitParams()\n paramDict[f'OutStream Available {self.dim}D :'] = self.availableOutStreamTypes[self.dim]\n paramDict['Plot is '] = str(self.dim) + 'D'\n for index in range(len(self.sourceName)):\n paramDict['Source Name ' + str(index) + ' :'] = self.sourceName[index]\n\n return paramDict",
"def _get_constructor_parameters(self) -> Dict[str, Any]:\n return dict(\n obs_space=self.obs_space,\n action_space=self.action_space,\n scale_imgs=self.scale_imgs,\n )",
"def getInitParams(self):\n paramDict = {}\n paramDict['upperBoundUsed' ] = self.upperBoundUsed\n paramDict['lowerBoundUsed' ] = self.lowerBoundUsed\n paramDict['hasInfiniteBound'] = self.hasInfiniteBound\n paramDict['upperBound' ] = self.upperBound\n paramDict['lowerBound' ] = self.lowerBound\n paramDict['adjustmentType' ] = self.__adjustmentType\n paramDict['dimensionality' ] = self.dimensionality\n return paramDict",
"def params(self):\n return {'cfg': self.cfg,\n 'momentum': self.momentum,\n 'center': self.center,\n 'scale': self.scale,\n 'epsilon': self.epsilon,\n 'act_fn': self.act_fn}",
"def create_layout( self ):",
"def parameters_ui(layout, params):\n\n r = layout.row()\n r.prop(params, \"rotation_axis\")\n\n if 'auto' not in params.rotation_axis.lower():\n r = layout.row()\n text = \"Auto align Foot\"\n r.prop(params, \"auto_align_extremity\", text=text)\n\n r = layout.row()\n r.prop(params, \"segments\")\n\n r = layout.row()\n r.prop(params, \"bbones\")\n\n bone_layers = bpy.context.active_pose_bone.bone.layers[:]\n\n for layer in ['fk', 'tweak']:\n r = layout.row()\n r.prop(params, layer + \"_extra_layers\")\n r.active = params.tweak_extra_layers\n\n col = r.column(align=True)\n row = col.row(align=True)\n\n for i in range(8):\n icon = \"NONE\"\n if bone_layers[i]:\n icon = \"LAYER_ACTIVE\"\n row.prop(params, layer + \"_layers\", index=i, toggle=True, text=\"\", icon=icon)\n\n row = col.row(align=True)\n\n for i in range(16, 24):\n icon = \"NONE\"\n if bone_layers[i]:\n icon = \"LAYER_ACTIVE\"\n row.prop(params, layer + \"_layers\", index=i, toggle=True, text=\"\", icon=icon)\n\n col = r.column(align=True)\n row = col.row(align=True)\n\n for i in range(8, 16):\n icon = \"NONE\"\n if bone_layers[i]:\n icon = \"LAYER_ACTIVE\"\n row.prop(params, layer + \"_layers\", index=i, toggle=True, text=\"\", icon=icon)\n\n row = col.row(align=True)\n\n for i in range(24, 32):\n icon = \"NONE\"\n if bone_layers[i]:\n icon = \"LAYER_ACTIVE\"\n row.prop(params, layer + \"_layers\", index=i, toggle=True, text=\"\", icon=icon)",
"def _pprint_params(self):\n return {'x_range': self.x_range, 'y_range': self.y_range,\n 'step': self.step, 'shape': self.shape,\n 'type': self.type}",
"def set_params(self):\n \n lo, hi = self.R.get((self.h, self.w, self.m), (0.0, 0.0))\n params.update({\n 'gamma' : 1.0, # minesweeper is a finite horizon game\n 'epsilon': 0.0,\n 'K': 16,\n 'R_lo': lo,\n 'R_hi': hi,\n 'max_depth': self.h * self.w / 2,\n 'c':hi-lo\n })",
"def getInitParams(self):\n paramDict = BoostDistribution.getInitParams(self)\n paramDict['location'] = self.location\n paramDict['scale' ] = self.scale\n return paramDict",
"def getInitParams(self):\n paramDict = BoostDistribution.getInitParams(self)\n paramDict['location'] = self.location\n paramDict['scale' ] = self.scale\n return paramDict",
"def _getAttributes(self):\n self._params = {}\n if self.interp is not None:\n # Initialize interpolation function :\n self['x'] = np.arange(0, self.pixels, 1)\n self['y'] = np.arange(0, self.pixels, 1)\n # Define newaxis :\n self['xnew'] = np.arange(0, self.pixels, self.interp)\n self['ynew'] = np.arange(0, self.pixels, self.interp)\n self['csize'] = len(self['xnew'])\n else:\n self['csize'] = self.pixels\n # Variables :\n l = int(self['csize'] / 2)\n self['l'] = l\n y, x = np.ogrid[-l:l, -l:l]\n disc = x**2 + y**2\n self['mask'] = disc < l**2\n self['nmask'] = np.invert(self['mask'])\n # self['image'] = np.tile(self.bgcolor[np.newaxis, ...], (2*l, 2*l, 1))",
"def _build_param_dict(self):\n # Add parameter handlers to parameter dict.\n self._param_dict = ProtocolParameterDict()\n \n self._param_dict.add(Parameter.CYCLE_TIME,\n r'(\\d+)\\s+= Cycle Time \\(.*\\)\\r\\n(0|1)\\s+= Minutes or Seconds Cycle Time',\n lambda match : self._to_seconds(int(match.group(1)),\n int(match.group(2))),\n self._int_to_string,\n visibility=ParameterDictVisibility.READ_WRITE,\n startup_param=True,\n direct_access=False,\n default_value=20,\n menu_path_read=SubMenu.SHOW_PARAM,\n submenu_read=[],\n menu_path_write=SubMenu.CHANGE_PARAM,\n submenu_write=[[\"1\", Prompt.CYCLE_TIME_PROMPT]])\n \n self._param_dict.add(Parameter.VERBOSE,\n r'', # Write-only, so does it really matter?\n lambda match : None,\n self._int_to_string,\n visibility=ParameterDictVisibility.READ_ONLY,\n startup_param=True,\n direct_access=True,\n init_value=1,\n menu_path_write=SubMenu.CHANGE_PARAM,\n submenu_write=[[\"2\", Prompt.VERBOSE_PROMPT]])\n \n self._param_dict.add(Parameter.METADATA_POWERUP,\n r'(0|1)\\s+= Metadata Print Status on Power up',\n lambda match : int(match.group(1)),\n self._int_to_string,\n visibility=ParameterDictVisibility.READ_ONLY,\n startup_param=True,\n direct_access=True,\n init_value=0,\n menu_path_write=SubMenu.CHANGE_PARAM,\n submenu_write=[[\"3\", Prompt.METADATA_PROMPT]])\n\n self._param_dict.add(Parameter.METADATA_RESTART,\n r'(0|1)\\s+= Metadata Print Status on Restart Data Collection',\n lambda match : int(match.group(1)),\n self._int_to_string,\n visibility=ParameterDictVisibility.READ_ONLY,\n startup_param=True,\n direct_access=True,\n init_value=0,\n menu_path_write=SubMenu.CHANGE_PARAM,\n submenu_write=[[\"4\", Prompt.METADATA_PROMPT]])\n \n self._param_dict.add(Parameter.RES_SENSOR_POWER,\n r'(0|1)\\s+= Res Power Status',\n lambda match : int(match.group(1)),\n self._int_to_string,\n visibility=ParameterDictVisibility.READ_ONLY,\n startup_param=True,\n direct_access=False,\n init_value=1,\n menu_path_read=SubMenu.SHOW_PARAM,\n submenu_read=[],\n menu_path_write=SubMenu.SENSOR_POWER,\n submenu_write=[[\"1\"]])\n\n self._param_dict.add(Parameter.INST_AMP_POWER,\n r'(0|1)\\s+= Thermocouple & Hydrogen Amp Power Status',\n lambda match : int(match.group(1)),\n self._int_to_string,\n visibility=ParameterDictVisibility.READ_ONLY,\n startup_param=True,\n direct_access=False,\n init_value=1,\n menu_path_read=SubMenu.SHOW_PARAM,\n submenu_read=[],\n menu_path_write=SubMenu.SENSOR_POWER,\n submenu_write=[[\"2\"]])\n\n self._param_dict.add(Parameter.EH_ISOLATION_AMP_POWER,\n r'(0|1)\\s+= eh Amp Power Status',\n lambda match : int(match.group(1)),\n self._int_to_string,\n visibility=ParameterDictVisibility.READ_ONLY,\n startup_param=True,\n direct_access=False,\n init_value=1,\n menu_path_read=SubMenu.SHOW_PARAM,\n submenu_read=[],\n menu_path_write=SubMenu.SENSOR_POWER,\n submenu_write=[[\"3\"]])\n \n self._param_dict.add(Parameter.HYDROGEN_POWER,\n r'(0|1)\\s+= Hydrogen Sensor Power Status',\n lambda match : int(match.group(1)),\n self._int_to_string,\n visibility=ParameterDictVisibility.READ_ONLY,\n startup_param=True,\n direct_access=False,\n init_value=1,\n menu_path_read=SubMenu.SHOW_PARAM,\n submenu_read=[],\n menu_path_write=SubMenu.SENSOR_POWER,\n submenu_write=[[\"4\"]])\n \n self._param_dict.add(Parameter.REFERENCE_TEMP_POWER,\n r'(0|1)\\s+= Reference Temperature Power Status',\n lambda match : int(match.group(1)),\n self._int_to_string,\n visibility=ParameterDictVisibility.READ_ONLY,\n startup_param=True,\n direct_access=False,\n init_value=1,\n menu_path_read=SubMenu.SHOW_PARAM,\n submenu_read=[],\n menu_path_write=SubMenu.SENSOR_POWER,\n submenu_write=[[\"5\"]])",
"def getInitParams(self):\n paramDict = super().getInitParams()\n paramDict['workingDir'] = self.workingDir\n paramDict['dataFilename'] = self.dataFilename\n paramDict['functionID'] = self.functionID\n paramDict['functionType'] = self.functionType\n paramDict['variableID'] = self.variableID\n paramDict['k'] = self.k\n paramDict['s'] = self.s\n return paramDict",
"def getInitParams(self):\n paramDict = super().getInitParams()\n paramDict['p'] = self.p\n return paramDict",
"def _build_param_dict(self):\n self._build_common_param_dict()\n\n self._param_dict.add(Parameter.NUM_AVG_SAMPLES,\n r'ScansToAverage>([\\d]+)</ScansToAverage>',\n lambda match: int(match.group(1)),\n str,\n type=ParameterDictType.INT,\n display_name=\"Scans to Average\",\n description=\"Number of samples to average (must be even)\",\n range=INT16,\n startup_param=True,\n direct_access=False,\n default_value=4,\n visibility=ParameterDictVisibility.READ_WRITE)\n self._param_dict.add(Parameter.MIN_COND_FREQ,\n r'MinimumCondFreq>([\\d]+)</MinimumCondFreq',\n lambda match: int(match.group(1)),\n str,\n type=ParameterDictType.INT,\n display_name=\"Minimum Conductivity Frequency\",\n range=INT16,\n description=\"Minimum conductivity frequency to enable pump turn-on.\",\n startup_param=True,\n direct_access=False,\n default_value=500,\n units=Units.HERTZ,\n visibility=ParameterDictVisibility.IMMUTABLE)\n self._param_dict.add(Parameter.PUMP_DELAY,\n r'PumpDelay>([\\d]+)</PumpDelay',\n lambda match: int(match.group(1)),\n str,\n type=ParameterDictType.INT,\n display_name=\"Pump Delay\",\n range=INT16,\n description=\"Time to wait after minimum conductivity frequency is reached before turning pump on.\",\n startup_param=True,\n direct_access=False,\n default_value=60,\n units=Units.SECOND,\n visibility=ParameterDictVisibility.READ_WRITE)\n self._param_dict.add(Parameter.AUTO_RUN,\n r'AutoRun>(.*)</AutoRun',\n lambda match: True if match.group(1) == 'yes' else False,\n self._true_false_to_string,\n type=ParameterDictType.BOOL,\n display_name=\"Auto Run\",\n description=\"Enable automatic logging when power is applied: (true | false).\",\n range={'True': True, 'False': False},\n startup_param=True,\n direct_access=True,\n default_value=False,\n visibility=ParameterDictVisibility.IMMUTABLE)\n self._param_dict.add(Parameter.IGNORE_SWITCH,\n r'IgnoreSwitch>(.*)</IgnoreSwitch',\n lambda match: True if match.group(1) == 'yes' else False,\n self._true_false_to_string,\n type=ParameterDictType.BOOL,\n display_name=\"Ignore Switch\",\n description=\"Disable magnetic switch position for starting or stopping logging: (true | false)\",\n range={'True': True, 'False': False},\n startup_param=True,\n direct_access=True,\n default_value=True,\n visibility=ParameterDictVisibility.IMMUTABLE)\n self._param_dict.add(Parameter.OPTODE,\n r'OPTODE>(.*)</OPTODE',\n lambda match: True if match.group(1) == 'yes' else False,\n self._true_false_to_string,\n type=ParameterDictType.BOOL,\n display_name=\"Optode Attached\",\n description=\"Enable optode: (true | false)\",\n range={'True': True, 'False': False},\n startup_param=True,\n direct_access=True,\n default_value=True,\n visibility=ParameterDictVisibility.IMMUTABLE)\n self._param_dict.add(Parameter.VOLT1,\n r'ExtVolt1>(.*)</ExtVolt1',\n lambda match: True if match.group(1) == 'yes' else False,\n self._true_false_to_string,\n type=ParameterDictType.BOOL,\n display_name=\"Volt 1\",\n description=\"Enable external voltage 1: (true | false)\",\n range={'True': True, 'False': False},\n startup_param=True,\n direct_access=True,\n default_value=True,\n visibility=ParameterDictVisibility.IMMUTABLE)\n\n self._build_ctd_specific_params()",
"def smp_dict():\n out = base_dict()\n out['mro']['current'] = ['Sample']\n out['name']['current'] = 'Sample'\n ao(out, 'idx', 'Integer', attr=['Hidden'])\n ao(out, 'ii', 'Integer', attr=['Hidden'])\n ao(out, 'initialDimension', 'Float', 0., name='Initial Dimension')\n return out",
"def _parse_kwargs(kwargs):\n layout_kwargs = {}\n # For the layout object\n if \"dim\" in kwargs:\n layout_kwargs[\"dim\"] = kwargs.pop(\"dim\")\n if \"center\" in kwargs:\n layout_kwargs[\"center\"] = kwargs.pop(\"center\")\n if \"scale\" in kwargs:\n layout_kwargs[\"scale\"] = kwargs.pop(\"scale\")\n\n placement_kwargs = {}\n # For the placement object\n if \"scale_ratio\" in kwargs:\n placement_kwargs[\"scale_ratio\"] = kwargs.pop(\"scale_ratio\")\n # For closest strategy\n if \"subset_size\" in kwargs:\n placement_kwargs[\"subset_size\"] = kwargs.pop(\"subset_size\")\n if \"num_neighbors\" in kwargs:\n placement_kwargs[\"num_neighbors\"] = kwargs.pop(\"num_neighbors\")\n\n return layout_kwargs, placement_kwargs",
"def get_params(self):\n return {\n \"nspecies\": self.nspecies,\n \"lmax\": self.lmax,\n \"nmax\": self.nmax,\n \"rcut\": self.rcut,\n \"sigma\": self.sigma,\n \"trans_width\": self.trans_width\n }",
"def __init__(self, scale_factor=128):\n # due to the way the WAD works, for keeping track of the lumps size for filling the dictionary the byte representation is\n # needed. But for checking duplicated vertices/linedefs/etc it would be needed to convert back each lump before the\n # check. For avoiding this problem, a set of lumps is stored in the writer and written only when the level is\n # fully specified.\n self.wad = WAD('W')\n self.current_level = None\n self.lumps = {'THINGS':Things(), 'LINEDEFS':Linedefs(), 'VERTEXES':Vertexes(),'SIDEDEFS': Sidedefs(), 'SECTORS':Sectors()} # Temporary lumps for this level\n self.scale_factor = scale_factor",
"def buildParamsDict(self):\n self.params_dict = {\n \"img_dir\": self.savePathJoin(\"Images\"),\n \"depth_dir\": self.savePathJoin(\"Depth\"),\n \"back_of_dir\": self.savePathJoin(\"Back_Of\"),\n \"of_dir\": self.savePathJoin(\"Of\"),\n \"save_dir\": self.user[\"Save\"],\n \"high\": self.high,\n \"low\": self.low,\n \"run_dict\": self.run_dict,\n \"of_model\": self.app.get_resource(\n os.path.join(\"of_models\", \"network-default.pytorch\")\n ),\n \"depth_model\": self.app.get_resource(\n os.path.join(\"depth_models\", \"model_city2kitti.meta\")\n ),\n \"yolo_weights\": self.app.get_resource(\n os.path.join(\"yolo\", \"yolov3.weights\")\n ),\n \"yolo_v\": self.app.get_resource(os.path.join(\"yolo\", \"yolov3.cfg\")),\n \"coco_names\": self.app.get_resource(os.path.join(\"yolo\", \"coco.names\")),\n \"object_detection_dir\": self.savePathJoin(\"ObjectDetection\"),\n \"plot_speed_dir\": PLOT_SPEED_DIR,\n \"plot_crash_dir\": PLOT_CRASH_DIR,\n \"numbers_dir\": NP_DIR,\n \"plot_error_dir\": PLOT_ERROR_DIR,\n \"speed_gt\": self.user[\"GT\"],\n \"vid_path\": self.user[\"Video\"],\n \"super_pixel_method\": self.super_pixel_method,\n \"super_pixel_dir\": SUPER_PIXEL_DIR,\n \"send_video_frame\": False,\n \"create_csv\": self.ui.c_csv.isChecked(),\n \"create_draw\": self.ui.c_draw.isChecked(),\n \"create_velocity\": self.ui.c_velocity.isChecked(),\n \"create_video_fps\": int(self.ui.t_fps.text()),\n \"optimize_params\": self.ui.c_optimize.isChecked(),\n \"super_pixel_label_dir\": os.path.join(\n self.savePathJoin(\"Super_Pixel\"), self.super_pixel_method\n ),\n }",
"def create_params(self):\n\n params = {'time_step':\n DesignParameter('time_step',\n unit='s',\n description='Time step with which the component model will be discretized'),\n 'horizon':\n DesignParameter('horizon',\n unit='s',\n description='Horizon of the optimization problem'),\n 'lines': DesignParameter('lines',\n unit='-',\n description='List of names of the lines that can be found in the network, e.g. '\n '\\'supply\\' and \\'return\\'',\n val=['supply', 'return'])\n }\n return params"
] | [
"0.617329",
"0.61104554",
"0.60987556",
"0.6021736",
"0.5993616",
"0.596282",
"0.59227943",
"0.5847736",
"0.5747626",
"0.5723274",
"0.57080036",
"0.5674384",
"0.565163",
"0.56308",
"0.5629004",
"0.5618874",
"0.56178457",
"0.56168747",
"0.56168747",
"0.5608124",
"0.5590386",
"0.5548011",
"0.5535021",
"0.5530335",
"0.55171686",
"0.5498826",
"0.5490759",
"0.54892915",
"0.5488858",
"0.54730225"
] | 0.6311791 | 0 |
Handle mocked API request for repo existence check. | def callback_repo_check(self, request, uri, headers, status_code=404):
self.assertEqual(
request.headers['Authorization'],
'token {0}'.format(self.OAUTH2_TOKEN)
)
# Handle the new "rerun" repo differently
if self.TEST_RERUN_REPO in uri:
status_code = 404
return (status_code, headers, json.dumps({'message': 'testing'})) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_get_github_repos_info_positive(self):\n self.assertIsNotNone(app.get_github_repos_info(\"dhh\")[\"repo_info\"])",
"def test_github_api_exists():\n p = github_api.GithubPath.from_repo('tensorflow/datasets', 'v3.1.0')\n with enable_api_call():\n assert p.exists()\n assert not (p / 'unnknown_dir').exists()\n\n readme = p / 'README.md'\n core = p / 'tensorflow_datasets' / 'core'\n with enable_api_call():\n assert readme.is_file()\n assert core.is_dir()\n\n # Data should have been cached (no API calls required)\n assert not readme.is_dir()\n assert not core.is_file()\n assert readme.exists()\n assert core.exists()\n # Recreating a new Path reuse the cache\n assert (core.parent.parent / 'README.md').is_file()\n assert (core.parent.parent / 'README.md')._metadata is readme._metadata",
"def test_api_repo_status_get(self):\n default_api = DefaultApi(api_client=self.api_client)\n params = dlrnapi_client.Params2()\n path, method = default_api.api_repo_status_get(params)\n self.assertEqual(path, '/api/repo_status')\n self.assertEqual(method, 'GET')",
"def test_returns_cloned_repo_by_name(self):\n # Need to set up a git repo with origin info.\n full_path = path.join(settings.REPO_ROOT, 'test')\n envoy.run('git init {0}'.format(full_path))\n fake_origin = 'git://localhost'\n envoy.run('git -C {0} remote add origin {1}'.format(full_path,\n fake_origin))\n url = reverse(\"find\", kwargs={'name': 'test'})\n response = self.client.get(url)\n self.assertEqual(200, response.status_code)\n result = json.loads(response.content.decode())\n expected_url = settings.REPO_URL + u'test'\n self.assertEqual(result['url'], expected_url)\n self.assertEqual(result['name'], u'test')",
"def test_returns_cloned_repo_by_name_auto_host(self):\n # Need to set up a git repo with origin info.\n full_path = path.join(settings.REPO_ROOT, 'test')\n envoy.run('git init {0}'.format(full_path))\n fake_origin = 'git://localhost'\n envoy.run('git -C {0} remote add origin {1}'.format(full_path,\n fake_origin))\n url = reverse(\"find\", kwargs={'name': 'test'})\n\n del settings.REPO_URL\n\n response = self.client.get(url, HTTP_HOST='test-host')\n\n self.assertEqual(200, response.status_code)\n result = json.loads(response.content.decode())\n expected_url = 'git://test-host/test'\n self.assertEqual(result['url'], expected_url)\n self.assertEqual(result['name'], u'test')",
"def register_repo_check(self, body):\n httpretty.register_uri(\n httpretty.GET,\n re.compile(\n '^{url}repos/{org}/({repo}|{repo_rerun})$'.format(\n url=self.URL,\n org=self.ORG,\n repo=re.escape(self.TEST_REPO),\n repo_rerun=re.escape(self.TEST_RERUN_REPO)\n )\n ),\n body=body\n )",
"def _get_repo(self, owner, repo):\n url = f\"{BASE_URL}/repos/{owner}/{repo}\"\n status, data, _ = self.get(url)\n if (status == 200):\n return data\n else:\n log.warning(\"GHUB\", f\"Unexpected status code {status} for request {url}.\")",
"def api_repo_get(access_key):\n repo = Repo.query.get(access_key)\n if not repo:\n return jsonify(error=\"Repo not found\"), 404\n \n if repo.is_private and 'working_repo' not in session:\n return jsonify(error=\"Unauthorized\"), 401\n elif repo.is_private and session['working_repo'] != repo.access_key:\n return jsonify(error=\"Unauthorized\"), 403\n elif repo.is_private and session['working_repo'] == repo.access_key:\n return jsonify(repo.to_json())\n else:\n return jsonify(repo.to_json())",
"def test_fetch_valid_github_repo(self):\n url = 'https://github.com/ivacf/archi'\n repo = GitHubRepoFetcher().fetch(url)\n self.assertEqual('archi', repo['name'])",
"def test_get_github_repos_info_negative(self):\n self.assertEqual(app.get_github_repos_info(\"undefined_user12345\")[\"status\"], 500)",
"def test_get_repo_false(self):\n repo = Repository(\"https://repo.com/fake.git\")\n self.assertFalse(repo.get_repo())\n if (os.path.exists(MEDIA_ROOT+\"/fake\")):\n shutil.rmtree(MEDIA_ROOT+\"/fake\")",
"def test_does_nothig_if_it_exists(self):\n repo = gnome.gh.repo_from_callback(MockCallback())\n repo.milestone_exists = MagicMock(return_value=True)\n repo.create_milestone = MagicMock()\n repo.ensure_milestone_exists('foo')\n repo.create_milestone.assert_not_called()",
"def test_00_package_exists_returns_false(self, Mock):\r\n html_request = FakeRequest(json.dumps(self.pkg_json_not_found), 200,\r\n {'content-type': 'application/json'})\r\n Mock.return_value = html_request\r\n with self.flask_app.test_request_context('/'):\r\n # Resource that exists\r\n out, e = self.ckan.package_exists(name='not-found')\r\n assert out is False, \"It should return False as pkg does not exist\"\r\n # Handle error in CKAN server\r\n Mock.return_value = self.server_error\r\n try:\r\n pkg, e = self.ckan.package_exists(name=\"something-goes-wrong\")\r\n if e:\r\n raise e\r\n except Exception as out:\r\n type, msg, status_code = out.args\r\n assert \"Server Error\" in msg, msg\r\n assert status_code == 500, \"status_code should be 500\"\r\n assert type == \"CKAN: the remote site failed! package_show failed\"\r\n # Now with a broken JSON item\r\n Mock.return_value = FakeRequest(\"simpletext\", 200,\r\n {'content-type': 'text/html'})\r\n out, e = self.ckan.package_exists(name='not-found')\r\n assert out is False, \"It should return False as pkg does not exist\"\r\n # Handle error in CKAN server\r\n try:\r\n pkg, e = self.ckan.package_exists(name=\"something-goes-wrong\")\r\n if e:\r\n raise e\r\n except Exception as out:\r\n type, msg, status_code = out.args\r\n assert status_code == 200, \"status_code should be 200\"\r\n assert type == \"CKAN: JSON not valid\"",
"def test_no_release_wrong_repo():\n response_404 = Response()\n response_404.status_code = 404\n with pytest.raises(HTTPError) as ex, patch(\n 'github.requests.get', return_value=response_404\n ):\n get_release_pr('access_token', 'org', 'repo')\n\n assert ex.value.response.status_code == 404",
"def test_get_missing_svn_repo(self):\n repo = 'testgetmissingrepo'\n svn = SpokeSVN(self.org_name, self.user_id)\n self.assertFalse(svn.get(repo)['data'])",
"def test_01_package_exists_returns_pkg(self, Mock):\r\n html_request = FakeRequest(json.dumps(self.pkg_json_found), 200,\r\n {'content-type': 'application/json'})\r\n Mock.return_value = html_request\r\n with self.flask_app.test_request_context('/'):\r\n # Resource that exists\r\n out, e = self.ckan.package_exists(name='urbanpark')\r\n assert out is not False, \"It should return a pkg\"\r\n err_msg = \"The pkg id should be the same\"\r\n assert out['id'] == self.pkg_json_found['result']['id'], err_msg",
"def mock_github_get(url):\n mock_repo_key = url.split(\"/\")[-1]\n\n result = requests.Response()\n result.status_code = 200\n result.encoding = \"utf-8\"\n result._content = repos[mock_repo_key].encode()\n\n return result",
"def test_get_name_from_any_remote(self, mock_get_repo): # pylint: disable=unused-argument\n self.assertEqual(_get_repo_name(self.project, ''), 'pybuilder_semver_git_tag')",
"def test_get_repositories_by_username_by_repo_slug_pullrequests_by_pull_request_id_statuses(self):\n pass",
"def test_fetch_repositories(self):\n self.maxDiff = None\n with self.logged_in(\n access_token=\"nbiousndegoijubdognlksdngndsgmngds\",\n extra_mocks=[\n (\n \"get\",\n \"https://api.github.com/user/repos\",\n dict(\n json=self.fixtures['./tests/fixtures/repos_ponteineptique.response.json'][0],\n headers=self.fixtures['./tests/fixtures/repos_ponteineptique.response.json'][1]\n )\n ),\n (\n \"get\",\n re.compile(\"https://api.github.com/user/repos\\?.*page=2\"),\n dict(\n json=self.fixtures['./tests/fixtures/repos_ponteineptique.page2.response.json'][0],\n headers=self.fixtures['./tests/fixtures/repos_ponteineptique.page2.response.json'][1]\n )\n )\n ]\n ):\n index = self.client.get(\"/\").data.decode()\n self.assertNotIn(\"Sign-in\", index, \"We are logged in\")\n self.assertIn(\"Hi ponteineptique!\", index, \"We are logged in\")\n\n # We check\n repositories = loads(self.client.get(\"/api/hook/v2.0/user/repositories\").data.decode())\n self.assertEqual(repositories, {\"repositories\": []}, \"No repository on first get\")\n\n # We refresh by posting\n repositories = loads(self.client.post(\"/api/hook/v2.0/user/repositories\").data.decode())\n repositories[\"repositories\"] = sorted(repositories[\"repositories\"], key=lambda x: x[\"name\"])\n self.assertEqual(\n repositories,\n {\n \"repositories\": [\n {'name': 'canonical-greekLit', 'owner': 'PerseusDL'},\n {'name': 'canonical-latinLit', 'owner': 'PerseusDL'},\n {'name': 'canonical-norseLit', 'owner': 'PerseusDL'},\n {'name': 'octodog', 'owner': 'octocat'}\n ]\n },\n \"Github API is parsed correctly\"\n )\n\n with self.logged_in(\n access_token=\"nbiousndegoijubdognlksdngndsgmngds\",\n extra_mocks=[\n (\n \"get\",\n \"https://api.github.com/user/repos\",\n dict(\n json=self.fixtures['./tests/fixtures/repos_ponteineptique.response.json'][0],\n headers=self.fixtures['./tests/fixtures/repos_ponteineptique.response.json'][1]\n )\n ),\n (\n \"get\",\n re.compile(\"https://api.github.com/user/repos\\?.*page=2\"),\n dict(\n json=self.fixtures['./tests/fixtures/repos_ponteineptique.page2.alt.response.json'][0],\n headers=self.fixtures['./tests/fixtures/repos_ponteineptique.page2.alt.response.json'][1]\n )\n )\n ]\n ):\n # We check it was saved\n repositories = loads(self.client.get(\"/api/hook/v2.0/user/repositories\").data.decode())\n repositories[\"repositories\"] = sorted(repositories[\"repositories\"], key=lambda x: x[\"name\"])\n self.assertEqual(\n repositories,\n {\n \"repositories\": [\n {'name': 'canonical-greekLit', 'owner': 'PerseusDL'},\n {'name': 'canonical-latinLit', 'owner': 'PerseusDL'},\n {'name': 'canonical-norseLit', 'owner': 'PerseusDL'},\n {'name': 'octodog', 'owner': 'octocat'}\n ]\n },\n \"When logging in back, we should have the same old repos\"\n )\n\n # We update by posting\n repositories = loads(self.client.post(\"/api/hook/v2.0/user/repositories\").data.decode())\n repositories[\"repositories\"] = sorted(repositories[\"repositories\"], key=lambda x: x[\"name\"])\n self.assertEqual(\n repositories,\n {\n \"repositories\": [\n {'name': 'canonical-greekLit', 'owner': 'PerseusDL'},\n {'name': 'canonical-latinLit', 'owner': 'PerseusDL'},\n {'name': 'canonical-norseLit', 'owner': 'PerseusDL'},\n {'name': 'octodog', 'owner': 'octocat'},\n {'name': 'oneKGreek', 'owner': 'ponteineptique'}\n ]\n },\n \"Github API is parsed correctly\"\n )\n\n # We check it was saved and cleared before\n repositories = loads(self.client.get(\"/api/hook/v2.0/user/repositories\").data.decode())\n repositories[\"repositories\"] = sorted(repositories[\"repositories\"], key=lambda x: x[\"name\"])\n self.assertEqual(\n repositories,\n {\n \"repositories\": [\n {'name': 'canonical-greekLit', 'owner': 'PerseusDL'},\n {'name': 'canonical-latinLit', 'owner': 'PerseusDL'},\n {'name': 'canonical-norseLit', 'owner': 'PerseusDL'},\n {'name': 'octodog', 'owner': 'octocat'},\n {'name': 'oneKGreek', 'owner': 'ponteineptique'}\n ]\n },\n \"Old repos should have been cleared, new ones should be there !\"\n )\n\n with self.logged_in(\n access_token=\"nbiousndegoijubdognlksdngndsgmngds\",\n extra_mocks=[\n (\n \"get\",\n \"https://api.github.com/user/repos\",\n dict(\n json=self.fixtures['./tests/fixtures/repos_ponteineptique.response.json'][0],\n headers=self.fixtures['./tests/fixtures/repos_ponteineptique.response.json'][1]\n )\n ),\n (\n \"get\",\n re.compile(\"https://api.github.com/user/repos\\?.*page=2\"),\n dict(\n json=self.fixtures['./tests/fixtures/repos_ponteineptique.page2.alt2.response.json'][0],\n headers=self.fixtures['./tests/fixtures/repos_ponteineptique.page2.alt2.response.json'][1]\n )\n )\n ]\n ):\n # We check it was saved\n repositories = loads(self.client.get(\"/api/hook/v2.0/user/repositories\").data.decode())\n repositories[\"repositories\"] = sorted(repositories[\"repositories\"], key=lambda x: x[\"name\"])\n self.assertEqual(\n repositories,\n {\n \"repositories\": [\n {'name': 'canonical-greekLit', 'owner': 'PerseusDL'},\n {'name': 'canonical-latinLit', 'owner': 'PerseusDL'},\n {'name': 'canonical-norseLit', 'owner': 'PerseusDL'},\n {'name': 'octodog', 'owner': 'octocat'},\n {'name': 'oneKGreek', 'owner': 'ponteineptique'}\n ]\n },\n \"When logging in back, we should have the same old repos\"\n )\n\n # We update by posting\n repositories = loads(self.client.post(\"/api/hook/v2.0/user/repositories\").data.decode())\n repositories[\"repositories\"] = sorted(repositories[\"repositories\"], key=lambda x: x[\"name\"])\n self.assertEqual(\n repositories,\n {\n 'repositories': [\n {'name': 'Ahab', 'owner': 'Capitains'},\n {'name': 'Capitains.github.io', 'owner': 'Capitains'},\n {'name': 'Cavern', 'owner': 'Capitains'},\n {'name': 'ahab-legacy-existdb', 'owner': 'Capitains'},\n {'name': 'ahab-legacy-python', 'owner': 'Capitains'},\n {'name': 'alignment-editor', 'owner': 'alpheios-project'},\n {'name': 'alpheios-docs', 'owner': 'alpheios-project'},\n {'name': 'alpheios-flask', 'owner': 'alpheios-project'},\n {'name': 'alpheios5', 'owner': 'alpheios-project'},\n {'name': 'angular-nemo', 'owner': 'Capitains'},\n {'name': 'arethusa', 'owner': 'alpheios-project'},\n {'name': 'arethusa-cli', 'owner': 'alpheios-project'},\n {'name': 'arethusa-configs', 'owner': 'alpheios-project'},\n {'name': 'arethusa-example-data', 'owner': 'alpheios-project'},\n {'name': 'arethusa-experiments', 'owner': 'alpheios-project'},\n {'name': 'arethusa-ngdocs-generator', 'owner': 'alpheios-project'},\n {'name': 'arethusa-server', 'owner': 'alpheios-project'},\n {'name': 'basic-reader', 'owner': 'alpheios-project'},\n {'name': 'canonical-greekLit', 'owner': 'PerseusDL'},\n {'name': 'canonical-latinLit', 'owner': 'PerseusDL'},\n {'name': 'canonical-norseLit', 'owner': 'PerseusDL'},\n {'name': 'chrome-wrapper', 'owner': 'alpheios-project'},\n {'name': 'cookiecutter-guidelines', 'owner': 'Capitains'},\n {'name': 'cts-api', 'owner': 'alpheios-project'},\n {'name': 'ctsworklist', 'owner': 'alpheios-project'},\n {'name': 'dummy1', 'owner': 'alpheios-project'},\n {'name': 'edit-utils', 'owner': 'alpheios-project'},\n {'name': 'inflection-analysis-prototype', 'owner': 'alpheios-project'},\n {'name': 'morphlib', 'owner': 'alpheios-project'},\n {'name': 'morphwrappers', 'owner': 'alpheios-project'},\n {'name': 'nemo_arethusa_plugin', 'owner': 'alpheios-project'},\n {'name': 'schemas', 'owner': 'alpheios-project'},\n {'name': 'tei-digital-age', 'owner': 'alpheios-project'}\n ]},\n \"Github API is parsed correctly\"\n )",
"def test_get_repo_pulled(self):\n repo = Repository(\"https://github.com/qcoumes/gitload_test.git\")\n self.assertTrue(repo.get_repo())\n self.assertTrue(os.path.exists(MEDIA_ROOT+\"/gitload_test\"))\n if (os.path.exists(MEDIA_ROOT+\"/gitload_test\")):\n shutil.rmtree(MEDIA_ROOT+\"/gitload_test\")",
"def test_get_release_pr_no_pulls():\n with patch(\n 'github.requests.get', return_value=Mock(json=Mock(return_value=[OTHER_PR]))\n ):\n assert get_release_pr('access_token', 'org', 'repo-missing') is None",
"def test_get_existing_issue_passes(self):\n response = self.client.get(self.url)\n response_json = response.get_json()\n self.assertEqual(response.status_code, 200)\n self.assertEqual(response_json[\"name\"], TEST_ISSUE_NAME)",
"def callback_repo_create(self, request, uri, headers, status_code=201):\n # Disabling unused-argument because this is a callback with\n # required method signature.\n # pylint: disable=unused-argument\n self.assertEqual(\n request.headers['Authorization'],\n 'token {0}'.format(self.OAUTH2_TOKEN)\n )\n repo_dict = json.loads(request.body)\n self.assertTrue(\n repo_dict['name'] in [self.TEST_REPO, self.TEST_RERUN_REPO]\n )\n self.assertEqual(repo_dict['description'], self.TEST_DESCRIPTION)\n self.assertEqual(repo_dict['private'], True)\n\n return (status_code, headers, json.dumps({'html_url': 'testing'}))",
"def test_returns_none_if_not_exists(self):\n repo = gnome.gh.repo_from_callback(MockCallback())\n repo._milestones = (MockFooMilestoneWrapper(),)\n found = repo.get_milestone('bar') # not 'foo'\n self.assertFalse(found)",
"def test_find_nonexistent_data(self):\n with app.app_context():\n\n resp = self.client.get(\"/tracking?repo=aa&branch=aa\")\n\n resp_dict = json.loads(resp.data)\n\n self.assertIn(\"code\", resp_dict, msg=\"Error in data format return\")\n self.assertEqual(ResponseCode.SUCCESS, resp_dict.get(\"code\"), msg=\"Error in status code return\")\n\n self.assertIn(\"msg\", resp_dict, msg=\"Error in data format return\")\n self.assertEqual(\n ResponseCode.CODE_MSG_MAP.get(ResponseCode.SUCCESS),\n resp_dict.get(\"msg\"),\n msg=\"Error in status code return\"\n )\n\n self.assertIn(\"data\", resp_dict, msg=\"Error in data format return\")\n self.assertIsNotNone(resp_dict.get(\"data\"), msg=\"Error in data information return\")\n self.assertEqual(resp_dict.get(\"data\"), [], msg=\"Error in data information return\")",
"def _handle_repository(self, repo):\n\n logger.debug(\"Loading configuration for repository: '%s' from '%s'.\"\n % (repo['name'],\n 'repositories-%s' % self._type))\n\n if 'id' in repo:\n logger.warning(\"Repository '%s' is defined as plain. It must be available \"\n \"inside the image as Cekit will not inject it.\"\n % repo['name'])\n return False\n\n if 'content_sets' in repo:\n self._fetch_repos = True\n return self._prepare_content_sets(repo)\n\n elif 'rpm' in repo:\n self._prepare_repository_rpm(repo)\n return False\n\n elif 'url' in repo:\n return True\n\n return False",
"def test_public_repos(self, mock_json):\n\n Response_payload = [{\"name\": \"Google\"}]\n mock_json.return_value = Response_payload\n\n with patch('client.GithubOrgClient._public_repos_url',\n new_callable=PropertyMock) as mock_public:\n\n mock_public.return_value = \"hello/world\"\n test_class = GithubOrgClient('test')\n result = test_class.public_repos()\n\n check = [rep[\"name\"] for rep in Response_payload]\n self.assertEqual(result, check)\n\n mock_public.assert_called_once()\n mock_json.assert_called_once()",
"def api_repo_patch(access_key):\n repo = Repo.query.get(access_key)\n data = request.get_json()\n errors = {}\n\n if not repo:\n return jsonify(error=\"Repo not found\"), 404\n\n validate = api_auth_validate(request, access_key)\n if not validate == True:\n return jsonify(error=validate['error']), validate['code']\n \n # Update\n if 'is_private' in data:\n if isinstance(data['is_private'], bool):\n repo.is_private = data['is_private']\n else:\n errors['is_private'] = 'Must be of type boolean.'\n \n if 'title' in data:\n if not isinstance(data['title'], str):\n errors['title'] = 'Must be of type string. '\n elif len(data['title']) > 50:\n err = errors.get('title', '')\n err = err + 'Must be 50 characters or less'\n errors['title'] = err\n else:\n repo.title = data['title']\n \n if 'description' in data:\n if not isinstance(data['description'], str):\n errors['description'] = 'Must be of type string. '\n elif len(data['description']) > 300:\n err = errors.get('description', '')\n err = err + 'Must be 300 characters or less'\n errors['description'] = err\n else:\n repo.description = data['description']\n \n # Abort if errors in request\n if len(errors.keys()) > 0:\n db.session.rollback()\n return jsonify(errors=errors), 400\n else:\n db.session.commit()\n return jsonify(message='success', repo=repo.to_json())",
"def test_do_nothing_if_already_exists(self):\n # I call that Haiku \"then nothing happens\"\n\n repo = gnome.gh.repo_from_callback(MockCallback())\n repo._milestones = (MockFooMilestoneWrapper(),)\n create_milestone_mock = MagicMock()\n repo._repo.create_milestone = create_milestone_mock\n repo.milestone_exists = MagicMock(return_value=True)\n repo.create_milestone('foo')\n repo._repo.create_milestone.assert_not_called()"
] | [
"0.6614973",
"0.6548657",
"0.6510613",
"0.6494966",
"0.6306936",
"0.6299396",
"0.6290716",
"0.6259003",
"0.625318",
"0.6162342",
"0.6154876",
"0.609253",
"0.6088747",
"0.607778",
"0.60644174",
"0.6063622",
"0.6023541",
"0.600028",
"0.60001606",
"0.59103364",
"0.58970094",
"0.58942693",
"0.5874921",
"0.58710086",
"0.5868439",
"0.58303046",
"0.5818303",
"0.5794545",
"0.5779266",
"0.5773733"
] | 0.6812977 | 0 |
Manage both add and delete of team membership. ``action_list`` is a list of tuples with (``username``, ``added (bool)``) to track state of membership since this will get called multiple times in one library call. | def callback_team_membership(
request, uri, headers, success=True, action_list=None
):
# pylint: disable=too-many-arguments
username = uri.rsplit('/', 1)[1]
if not success:
status_code = 500
if request.method == 'DELETE':
if success:
status_code = 204
action_list.append((username, False))
if request.method == 'PUT':
status_code = 200
action_list.append((username, True))
return (status_code, headers, '') | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def actions(self, request, action_list, group):\n return action_list",
"def add_list(action, user):\n \n userprofile = user.get_profile()\n \n board = userprofile.get_board(action['boardId'])\n \n # Create the list\n l = List()\n l.title = action['what']['title']\n l.color = action['what']['color']\n l.creator = user\n l.save()\n \n # Add the list to the user's lists\n \n board.lists.append(l.id)\n userprofile.save()\n \n return l;",
"def update_member_list(request, **kwargs):\n data = request.DATA\n loadbalancer_id = data.get('loadbalancer_id')\n pool_id = kwargs.get('pool_id')\n existing_members = kwargs.get('existing_members')\n members_to_add = kwargs.get('members_to_add')\n members_to_delete = kwargs.get('members_to_delete')\n\n if members_to_delete:\n kwargs = {'existing_members': existing_members,\n 'members_to_add': members_to_add,\n 'members_to_delete': members_to_delete,\n 'pool_id': pool_id}\n remove_member(request, **kwargs)\n elif members_to_add:\n kwargs = {'existing_members': existing_members,\n 'members_to_add': members_to_add,\n 'members_to_delete': members_to_delete,\n 'pool_id': pool_id}\n add_member(request, **kwargs)\n elif data.get('monitor'):\n args = (request, loadbalancer_id, update_monitor)\n thread.start_new_thread(poll_loadbalancer_status, args)",
"def move_list(action, user):\n try:\n l = List.objects.get(id=action['listId'])\n verify_permission(l, user)\n \n # Move the list\n userprofile = user.get_profile()\n \n new_board = userprofile.get_board(action['what']['newBoardId'])\n new_board.lists.append(action['listId'])\n \n board = userprofile.get_board(action['boardId'])\n board.lists.remove(action['listId'])\n \n userprofile.save()\n except:\n # the list or the board doesn't exist.\n pass",
"def grant_access(acl, list_to_edit):\n if request.POST[list_to_edit]:\n datastore_object = None\n if request.POST[list_to_edit].startswith('user'):\n datastore_object = models.UserProfile.load(request.POST[list_to_edit])\n else:\n datastore_object = models.UserGroup.get_by_id(\n int(request.POST[list_to_edit]))\n if datastore_object.key() not in acl.__getattribute__(list_to_edit):\n acl.__getattribute__(list_to_edit).append(datastore_object.key())",
"def rem_list(action, user):\n \n try:\n l = List.objects.get(id=action['listId'])\n verify_permission(l, user)\n l.delete()\n \n # Remove\n userprofile = user.get_profile()\n board = userprofile.get_board(action['boardId'])\n board.lists.remove(action['listId'])\n userprofile.save()\n except:\n # the list or the board doesn't exist.\n pass",
"def test_changelist_actions(self):\n user = User.objects.create(username='test')\n url = reverse('admin:prescription_prescription_changelist')\n request = self._mocked_authenticated_request(url, user)\n admin = PrescriptionAdmin(Prescription, site)\n\n self.assertFalse(user.has_perm('prescription.can_delete'))\n self.assertFalse(user.has_perm('prescription.can_delete_approval'))\n\n actions = admin.get_actions(request)\n\n self.assertTrue('delete_selected' not in actions)\n self.assertTrue('delete_approval_endorsement' not in actions)\n\n content_type = ContentType.objects.get(app_label='prescription',\n model='prescription')\n delete = Permission.objects.get(codename='delete_prescription',\n content_type=content_type)\n approval = Permission.objects.get(codename='can_delete_approval',\n content_type=content_type)\n permissions = [\n (delete, 'delete_selected'),\n (approval, 'delete_approval_endorsement')]\n\n for permission, action in permissions:\n # ensure that for each permission and action name, the user is\n # able to perform that action from the action dropdown.\n user.user_permissions.add(permission)\n user = User.objects.get(username='test')\n request = self._mocked_authenticated_request(url, user)\n actions = admin.get_actions(request)\n self.assertTrue(action in actions)",
"async def edit_list(\n self,\n\t\tlist_id: int,\n\t\tname: Optional[str] = None,\n\t\tuser_ids: Optional[List[int]] = None,\n\t\tadd_user_ids: Optional[List[int]] = None,\n\t\tdelete_user_ids: Optional[List[int]] = None,\n\t\t**kwargs\n ) -> base.OkResponseModel:\n\n params = self.get_set_params(locals())\n response = await self.api.request(\"friends.editList\", params)\n model = base.OkResponse\n return model(**response).response",
"def process_actions(actions, user):\n \n process = {\n 'add_task': add_task,\n 'rem_task': rem_task,\n 'add_list': add_list,\n 'rem_list': rem_list,\n 'move_list': move_list,\n 'edit_list': edit_list,\n 'edit_item': edit_item,\n 'add_board': add_board,\n 'rem_board': rem_board,\n 'edit_board': edit_board,\n }\n \n #TODO: handle errors\n modified_lists = dict()\n tmp_id_to_new_ids = [] # [(<tmp id>, <new id>), ..]\n \n while actions:\n \n action = actions.pop(0)\n \n try:\n fn = process.get(action['type'])\n if not fn: raise ActionDoesNotExist # the demanded action does not exist\n \n returned_list = fn(action, user)\n except (InsufficientPermissions, ActionDoesNotExist, List.DoesNotExist, Item.DoesNotExist):\n # Cannot modify this list\n # Add errors in the response\n continue\n \n # If there's a return value\n if returned_list and isinstance(returned_list, List): modified_lists[returned_list.id] = returned_list\n \n if action['type'] == 'add_list':\n # Change the temporary id to the new id in all the remaining actions\n for x in actions: \n if x.get('listId') == action['listId']: \n x['listId'] = returned_list.id\n tmp_id_to_new_ids.append((action['listId'], returned_list.id))\n \n return modified_lists, tmp_id_to_new_ids",
"def edit_list(action, user):\n \n editable_attributes = ('title', 'color', 'collapse')\n \n l = List.objects.get(id=action['listId'])\n verify_permission(l, user)\n \n for key, value in action['what'].iteritems():\n if key in editable_attributes:\n l.__setattr__(key, value)\n l.save()\n \n return l",
"def user_list_update(self):\n\t\tclient_log.debug(f'Запрос списка известных пользователей {self.username}')\n\t\treq = {\n\t\t\tACTION: USERS_REQUEST,\n\t\t\tTIME: time.time(),\n\t\t\tACCOUNT_NAME: self.username\n\t\t}\n\t\twith socket_lock:\n\t\t\tsend_message(self.transport, req)\n\t\t\tans = get_message(self.transport)\n\t\tif RESPONSE in ans and ans[RESPONSE] == 202:\n\t\t\tself.database.add_users(ans[LIST_INFO])\n\t\telse:\n\t\t\tclient_log.error('Не удалось обновить список известных пользователей.')",
"def fetch_members_for_list(self, list_id):\n def save_member(mbr):\n Member.objects(member_id=mbr[\"member_id\"]).update_one(upsert=True, set__email=mbr[\"email\"],\n set__updated_at=datetime.utcnow())\n return mbr[\"member_id\"]\n members_euid = [save_member(mbr) for mbr in self.mw.get_members(list_id)]\n List.objects(list_id=list_id).update(set__members_euid=members_euid,\n set__updated_at=datetime.utcnow())",
"def actionlist(username):\n username = username.strip()\n print('Hi, %s' % username)\n filename = '%s.actionlist' % username\n action_list = []\n if os.path.exists(filename):\n with open(filename, 'r') as file_actionlist:\n action_list = [line.strip() for line in file_actionlist]\n else:\n # add clockwise/counterCW gestures ROUNDLEN * SLIDE_SAMPLEN,\n # start position randomly generated\n nonslides = [\n '%s_%s' % (username, item)\n for item in GESTURES_DICT\n #if item not in ('click', 'clockwise', 'countercw')\n for i in range(SAMPLEN)]\n \"\"\"\n clockwises = slides('clockwise', username)\n countercws = slides('countercw', username)\n singleclicks = clicks(username)\n if 'click' in GESTURES_DICT:\n nonslides.extend(singleclicks)\n if 'clockwise' in GESTURES_DICT:\n action_list.extend(clockwises)\n if 'countercw' in GESTURES_DICT:\n action_list.extend(countercws) \n \"\"\" \n random.shuffle(nonslides)\n random_insert_seq(action_list, nonslides)\n action_list = ['%s_%d'% (item, ind)\n for ind, item in enumerate(action_list)]\n return action_list",
"def update_members(self, new_member_list):\n updated_members = 0\n request_list = list()\n\n # stale_members contains all old members at first, all current\n # members get then removed so that the remaining can get deleted\n stale_members = set(self.members)\n\n for member in new_member_list:\n m = Persona.query.get(member[\"id\"])\n\n if m is None:\n m = Persona(id=member[\"id\"], _stub=True)\n\n if m._stub is True:\n request_list.append(member[\"id\"])\n\n try:\n # Old and new member; remove from stale list\n stale_members.remove(m)\n except KeyError:\n # New member\n self.members.append(m)\n updated_members += 1\n\n # Remove old members that are not new members\n for member in stale_members:\n self.members.remove(member)\n\n app.logger.info(\"Updated {}'s members: {} added, {} removed, {} requested\".format(\n self.username, updated_members, len(stale_members), len(request_list)))\n\n return request_list",
"def removeActorList(self, actorList):\n pass",
"def remove_access(acl, list_to_edit):\n post_key = '%s_remove_' % list_to_edit\n removal_keys = [k for k in request.POST.keys() if k.startswith(post_key)]\n for key in removal_keys:\n model_type = models.UserGroup\n if list_to_edit.startswith('user'):\n model_type = models.UserProfile\n key_id = int(key.replace(post_key, ''))\n datastore_object = model_type.get_by_id(key_id)\n acl.__getattribute__(list_to_edit).remove(datastore_object.key())",
"def test_permission_add_multiple_actions_ok(self):\n test_name = sys._getframe().f_code.co_name\n self._execute('permission add test_user LOG_VIEW FILE_VIEW')\n rv, output = self._execute('permission list')\n self.assertEqual(0, rv)\n self.assertEqual(self.expected_results[test_name], output)",
"def action_list(request, employee_id=None):\n if employee_id:\n employee = Employee.objects.get(pk=employee_id)\n current_employee = Employee.objects.get(user__pk=request.user.pk)\n if not current_employee.isEnsoUser() and current_employee.company.pk != employee.company.pk:\n raise PermissionDenied()\n else:\n employee = request.user.employee_user.first()\n actions = employee.action_set.all()\n return TemplateResponse(\n request,\n 'mus/action_list.html',\n dict(\n actions=actions,\n employee=employee\n )\n )",
"def test_permission_remove_action_for_all_users(self):\n test_name = sys._getframe().f_code.co_name\n self._execute('permission add anonymous TICKET_CREATE')\n self._execute('permission remove * TICKET_CREATE')\n rv, output = self._execute('permission list')\n self.assertEqual(0, rv)\n self.assertEqual(self.expected_results[test_name], output)",
"def _add(self, signup_form_id, group_ids):\n path = '/members/add'\n data = self.extract()\n if group_ids:\n data['group_ids'] = group_ids\n if signup_form_id:\n data['signup_form_id'] = signup_form_id\n\n outcome = self.account.adapter.post(path, data)\n self['member_status_id'] = outcome['status']\n if outcome['added']:\n self['member_id'] = outcome['member_id']",
"def add_action(self, action):\n if action in self.actions:\n return\n elif self.actions == Action.actor_idle:\n self.actions = [action]\n elif action == Action.idle:\n self.actions = Action.actor_idle\n elif action not in self.actions:\n self.actions += [action]",
"def create_action_set(number_of_actions=4, add_no_change=True):\n # TO DO:\n # - check if the number of actions is equal.\n split_num = number_of_actions/2\n action_list = []\n for x in range(int(split_num)):\n action_list.append(\"lb\"+\"_\"+str(x))\n action_list.append(\"ub\"+\"_\"+str(x))\n\n if add_no_change == True:\n action_list.append(\"no_change\")\n return action_list",
"def add_members(id): # pylint: disable=I0011,W0622\n\n l = Legacy.query.get_or_404(id)\n\n if current_app.config.get('IGNORE_AUTH') is not True: # pragma: no cover\n if l.owner_id != g.user.id:\n raise Http403('Access denied')\n\n if not l.can_modify(g.user.id):\n raise Http403('Access denied')\n\n if request.json is None or 'members' not in request.json:\n raise NoData('\"members\" was not specified')\n\n member_list = request.json['members']\n\n if not isinstance(member_list, list):\n raise IncorrectData('\"members\" was not a valid list')\n\n for member in member_list:\n member = str(member)\n\n if not bool(re.match(r'^\\d+$', member)):\n member = Person.query.filter_by(email=member).first()\n\n if member is None:\n # TODO: Invite member\n # TODO: Queue a task to assign member on signup\n\n continue\n else:\n member = Person.query.get(member)\n\n if member is None:\n raise IncorrectData('Member not found')\n\n # TODO: Queue a task to assign member on acceptance\n\n # TODO: Remove following after member confirmation is done\n l.members.append(member)\n\n # TODO: Remove following after member confirmation is done\n l.save()\n\n return {}",
"def admin_actions():\n\n create_default_admin()\n return response('Admin account has been created', 201)",
"def add_task(action, user):\n \n item = Item()\n item.description = action['what'].get('description', '')\n item.id = action['what']['id']\n item.position = action['what']['position']\n \n l = List.objects.get(id=action['listId'])\n verify_permission(l, user)\n \n l.items.append(item)\n l.save()\n \n return l",
"def test_permission_remove_multiple_actions_ok(self):\n test_name = sys._getframe().f_code.co_name\n self._execute('permission remove anonymous WIKI_CREATE WIKI_MODIFY')\n rv, output = self._execute('permission list')\n self.assertEqual(0, rv)\n self.assertEqual(self.expected_results[test_name], output)",
"def callback_team_members(\n self, request, uri, headers,\n status_code=200, members=None\n ):\n # Disabling unused-argument because this is a callback with\n # required method signature.\n # pylint: disable=unused-argument,too-many-arguments\n if members is None:\n members = self.TEST_TEAM_MEMBERS\n self.assertEqual(\n request.headers['Authorization'],\n 'token {0}'.format(self.OAUTH2_TOKEN)\n )\n return (status_code, headers, json.dumps(\n [dict(login=x) for x in members]\n ))",
"def actions(self, state):\n myActionList= (1,2);\n return myActionList",
"def test_add_action(self):\n mdp = MDP()\n\n mdp.add_action(0)\n self.assertEqual(mdp.num_actions(), 1)\n self.assertEqual(type(mdp.get_action(0)), Action)\n self.assertIn(mdp.get_action(0), mdp.get_action_list())",
"def on_modified(l):\n actors.clear()\n for e in self:\n actor = e.create_actor()\n if can_delete:\n actor.new_action(\"Delete\").connect('activated', on_delete)\n actors.append(actor)"
] | [
"0.5766691",
"0.5531757",
"0.5485128",
"0.54238945",
"0.53260785",
"0.5270922",
"0.5267349",
"0.52224904",
"0.51530606",
"0.51122624",
"0.4995376",
"0.4994134",
"0.49654278",
"0.49381015",
"0.49172172",
"0.4907861",
"0.4896086",
"0.48836443",
"0.48314428",
"0.48299542",
"0.48076153",
"0.4807085",
"0.480566",
"0.4798372",
"0.47981933",
"0.4780226",
"0.4768681",
"0.47659272",
"0.47579235",
"0.47571442"
] | 0.63153493 | 0 |
Register repo check URL and method. | def register_repo_check(self, body):
httpretty.register_uri(
httpretty.GET,
re.compile(
'^{url}repos/{org}/({repo}|{repo_rerun})$'.format(
url=self.URL,
org=self.ORG,
repo=re.escape(self.TEST_REPO),
repo_rerun=re.escape(self.TEST_RERUN_REPO)
)
),
body=body
) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def add(name, url):\n click.echo(\"registered repo {} at url {}\".format(name, url))",
"def addRepository(self, uri):\n pass",
"def register(self, hook_url):\n raise NotImplementedError()",
"def add_repo(repo_name, url):\n\n # First, validate the URL\n if not utils.is_valid_url(url):\n log.e(TAG, \"Invalid URL provided (missing http/s)\")\n return -2\n\n # Next, make sure this repo doesnt exist\n if __has_repo(repo_name):\n log.e(TAG, \"Repo name '%s' already exists!\" % repo_name)\n return -3\n\n return __add_repo(repo_name, url)",
"def repository_create_hosted():\n pass",
"def repository_create_proxy():\n pass",
"def init_hook(conduit):\n repos = conduit.getRepos()\n for repo in repos.listEnabled():\n if len(repo.baseurl) == 0:\n continue\n bucket, path = parse_url(repo.baseurl[0])\n if bucket and isinstance(repo, YumRepository):\n check_base_url(repo.baseurl)\n replace_repo(repos, repo)",
"def addRepository(self, name, url):\n sslVerify = \"yes\" if url.startswith(\"https\") else \"no\"\n self.manager.addKickstartRepository(self.currentProject, baseurl=url,\n name=name,\n ssl_verify=sslVerify)\n self.manager.saveKickstartFile(self.currentProject)\n self.refresh()",
"def register_repo_create(self, body):\n httpretty.register_uri(\n httpretty.POST,\n '{url}orgs/{org}/repos'.format(\n url=self.URL,\n org=self.ORG,\n ),\n body=body\n )",
"def callback_repo_check(self, request, uri, headers, status_code=404):\n self.assertEqual(\n request.headers['Authorization'],\n 'token {0}'.format(self.OAUTH2_TOKEN)\n )\n # Handle the new \"rerun\" repo differently\n if self.TEST_RERUN_REPO in uri:\n status_code = 404\n return (status_code, headers, json.dumps({'message': 'testing'}))",
"def repo_add(self, name, url, **kwargs):\n\n self.helm_client.repo_add(name, url, **kwargs)",
"def __add_repo(repo_name, url):\n\n conn = sqlite3.connect(DTF_DB)\n cur = conn.cursor()\n\n entry = [(repo_name, url)]\n\n sql = ('INSERT INTO repos (repo_name, url)'\n 'VALUES (?, ?)')\n\n cur.executemany(sql, entry)\n conn.commit()\n\n return 0",
"def test_repo_config_basic_port() -> None:\n repo = RepositoryConfiguration(name=\"pypi\", base_url=\"https://pypi.org:443/pypi\")\n assert repo.get_access_url() == \"https://pypi.org:443/pypi\"\n assert repo.name == \"pypi\"",
"def test_register():\n repobee.try_register_plugin(\n sanitizer, sanitizer.SanitizeRepo, sanitizer.SanitizeFile\n )",
"def __init__(self, repo_url, creds, branch, repo_path=None, validate=True):\n parsed = urlparse(repo_url)\n self.scheme = parsed.scheme\n self.hostname = parsed.hostname\n self.org, self.repo = parsed.path.strip('/').split('/')\n self.creds = creds\n self.branch = branch\n self.repo_path = repo_path\n self.git_repo = None\n self.validate = validate",
"def register(id, name, get_packages, find_installed_packages):\n repo = Repository()\n repo.id = id\n repo.name = name\n repo.get_packages = get_packages\n repo.find_installed_packages = find_installed_packages\n repositories.append(repo)",
"def _add_repo(repo_name, repo_url, index):\n\n package_manager = _get_package_manager()\n package_manager.add_repo(repo_name, repo_url, index)\n\n return 0",
"def register_git_repository(args, namespace, notifier=None):\n\n tempdir = tempfile.mkdtemp()\n subprocess.check_call(\n \"\"\"\n cd {} &&\n git clone {} user_code\n \"\"\".format(tempdir, args.git_repository), shell=True)\n return register(Service, args, namespace,\n os.path.join(tempdir, 'user_code'), notifier)",
"async def add(self, ctx: Context, url: str):\n if url not in self.urls:\n self.urls.append(url)\n self.write_vac()\n await ctx.channel.send('Registered <{}> to checker.'.format(url))\n else:\n await ctx.channel.send('<{}> is already registered to checker.'.format(url))",
"def _AddRepository(url):\n name = url.split('/')[-1]\n\n if ndb.Key(Repository, name).get():\n raise AssertionError(\"Attempted to add a repository that's already in the \"\n 'Datastore: %s: %s' % (name, url))\n\n Repository(id=name, urls=[url]).put()\n return name",
"def git():\n pass",
"def checkGit(directory):",
"def add_addon_repository(self, repo: str) -> None:\n if repo in self._data[ATTR_ADDONS_CUSTOM_LIST]:\n return\n\n self._data[ATTR_ADDONS_CUSTOM_LIST].append(repo)",
"def _checkServiceURL(self, serviceName, options):\n url = self._getURL(serviceName, options)\n system = options['System']\n module = options['Module']\n self.log.info(\"Checking URLs for %s/%s\" % (system, module))\n urlsConfigPath = os.path.join('/Systems', system, self.setup, 'URLs', module)\n urls = gConfig.getValue(urlsConfigPath, [])\n self.log.debug(\"Found configured URLs for %s: %s\" % (module, urls))\n self.log.debug(\"This URL is %s\" % url)\n runitStatus = options['RunitStatus']\n wouldHave = 'Would have ' if not self.commitURLs else ''\n if runitStatus == 'Run' and url not in urls:\n urls.append(url)\n message = \"%sAdded URL %s to URLs for %s/%s\" % (wouldHave, url, system, module)\n self.log.info(message)\n self.accounting[serviceName + \"/URL\"][\"Treatment\"] = message\n self.csAPI.modifyValue(urlsConfigPath, \",\".join(urls))\n if runitStatus == 'Down' and url in urls:\n urls.remove(url)\n message = \"%sRemoved URL %s from URLs for %s/%s\" % (wouldHave, url, system, module)\n self.log.info(message)\n self.accounting[serviceName + \"/URL\"][\"Treatment\"] = message\n self.csAPI.modifyValue(urlsConfigPath, \",\".join(urls))",
"def register_team_repo_add(self, body):\n httpretty.register_uri(\n httpretty.PUT,\n re.compile(\n r'^{url}teams/\\d+/repos/{org}/({repo}|{rerun_repo})$'.format(\n url=self.URL,\n org=self.ORG,\n repo=re.escape(self.TEST_REPO),\n rerun_repo=re.escape(self.TEST_RERUN_REPO)\n )\n ),\n body=body\n )",
"def register_hook_create(self, body, status):\n test_url = '{url}repos/{org}/{repo}/hooks'.format(\n url=self.URL,\n org=self.ORG,\n repo=self.TEST_REPO\n )\n # Register for hook endpoint\n httpretty.register_uri(\n httpretty.POST,\n test_url,\n body=body,\n status=status\n )",
"def callback_repo_create(self, request, uri, headers, status_code=201):\n # Disabling unused-argument because this is a callback with\n # required method signature.\n # pylint: disable=unused-argument\n self.assertEqual(\n request.headers['Authorization'],\n 'token {0}'.format(self.OAUTH2_TOKEN)\n )\n repo_dict = json.loads(request.body)\n self.assertTrue(\n repo_dict['name'] in [self.TEST_REPO, self.TEST_RERUN_REPO]\n )\n self.assertEqual(repo_dict['description'], self.TEST_DESCRIPTION)\n self.assertEqual(repo_dict['private'], True)\n\n return (status_code, headers, json.dumps({'html_url': 'testing'}))",
"def test_api_repo_status_get(self):\n default_api = DefaultApi(api_client=self.api_client)\n params = dlrnapi_client.Params2()\n path, method = default_api.api_repo_status_get(params)\n self.assertEqual(path, '/api/repo_status')\n self.assertEqual(method, 'GET')",
"def __init__(self, repository):\n self.__repo = repository",
"def nexus_register(self):\n pylon = self.pylons[0] # Temporary hard code. Ideally this would pick a random valid pylon.\n\n if pylon[\"cm\"] == \"http\":\n return self._http_nexus_register()"
] | [
"0.648579",
"0.60714537",
"0.58604896",
"0.5853189",
"0.58087116",
"0.5801936",
"0.5749618",
"0.5737087",
"0.5730165",
"0.571998",
"0.5632852",
"0.5463911",
"0.54567236",
"0.53939337",
"0.53857875",
"0.5376803",
"0.53699636",
"0.52942485",
"0.5257235",
"0.5177173",
"0.5175245",
"0.51313245",
"0.5128633",
"0.5118998",
"0.5108654",
"0.50820893",
"0.5032565",
"0.50199455",
"0.5007233",
"0.5000883"
] | 0.7127913 | 0 |
Register url for repo create. | def register_repo_create(self, body):
httpretty.register_uri(
httpretty.POST,
'{url}orgs/{org}/repos'.format(
url=self.URL,
org=self.ORG,
),
body=body
) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def add(name, url):\n click.echo(\"registered repo {} at url {}\".format(name, url))",
"def repository_create_hosted():\n pass",
"def repo_add(self, name, url, **kwargs):\n\n self.helm_client.repo_add(name, url, **kwargs)",
"def addRepository(self, uri):\n pass",
"def addRepository(self, name, url):\n sslVerify = \"yes\" if url.startswith(\"https\") else \"no\"\n self.manager.addKickstartRepository(self.currentProject, baseurl=url,\n name=name,\n ssl_verify=sslVerify)\n self.manager.saveKickstartFile(self.currentProject)\n self.refresh()",
"def create_link(repository, project_name):\n beginning_url = \"https://api.github.com/repos/\"\n separator_url = \"/\"\n end_url = \"/commits\"\n\n base_url = beginning_url+repository+separator_url+project_name+end_url\n return base_url",
"def repository_create_proxy():\n pass",
"def create_repo_cli(api_client, url, provider, path):\n content = ReposApi(api_client).create(url, provider, path)\n click.echo(pretty_format(content))",
"def newrepo():\n form = AddRepoForm()\n if form.validate_on_submit():\n\n # make the directory for this package\n os.mkdir(DATA + form.name.data)\n\n flash('Repo created successfully')\n\n # redirect to the login page\n return redirect(url_for('home.dashboard'))\n\n # load registration template\n return render_template('home/add.html', form=form, title='Local Repo', target=\"add\")",
"def new(url):\n from grit import Repo\n return Repo.new(url=url, bare=True)",
"def __add_repo(repo_name, url):\n\n conn = sqlite3.connect(DTF_DB)\n cur = conn.cursor()\n\n entry = [(repo_name, url)]\n\n sql = ('INSERT INTO repos (repo_name, url)'\n 'VALUES (?, ?)')\n\n cur.executemany(sql, entry)\n conn.commit()\n\n return 0",
"def callback_repo_create(self, request, uri, headers, status_code=201):\n # Disabling unused-argument because this is a callback with\n # required method signature.\n # pylint: disable=unused-argument\n self.assertEqual(\n request.headers['Authorization'],\n 'token {0}'.format(self.OAUTH2_TOKEN)\n )\n repo_dict = json.loads(request.body)\n self.assertTrue(\n repo_dict['name'] in [self.TEST_REPO, self.TEST_RERUN_REPO]\n )\n self.assertEqual(repo_dict['description'], self.TEST_DESCRIPTION)\n self.assertEqual(repo_dict['private'], True)\n\n return (status_code, headers, json.dumps({'html_url': 'testing'}))",
"def _AddRepository(url):\n name = url.split('/')[-1]\n\n if ndb.Key(Repository, name).get():\n raise AssertionError(\"Attempted to add a repository that's already in the \"\n 'Datastore: %s: %s' % (name, url))\n\n Repository(id=name, urls=[url]).put()\n return name",
"def command_new_repo(self):\n repoinit.new_repo(*self.args())",
"def create_url(self):\n self.base_url = self.base + self.strs[jpn.path_latest]",
"def add_repo(repo_name, url):\n\n # First, validate the URL\n if not utils.is_valid_url(url):\n log.e(TAG, \"Invalid URL provided (missing http/s)\")\n return -2\n\n # Next, make sure this repo doesnt exist\n if __has_repo(repo_name):\n log.e(TAG, \"Repo name '%s' already exists!\" % repo_name)\n return -3\n\n return __add_repo(repo_name, url)",
"def _make_url(self):\n ...",
"def api_repo_create():\n form = NewRepoForm()\n if form.validate_on_submit():\n # On the miniscule chance we generate a non-unique access key, loop and try again.\n success = False\n while not success:\n new_repo = Repo.create(\n pass_phrase = form.pass_phrase.data,\n title = form.title.data,\n description = form.description.data,\n is_private = form.is_private.data\n )\n db.session.add(new_repo)\n try:\n db.session.commit()\n success = True\n except:\n db.session.rollback()\n success = False\n session['working_repo'] = new_repo.access_key\n return jsonify(message='success', created=new_repo.access_key)\n else:\n return jsonify(message=\"failed\", errors=form.errors_to_json()), 400",
"def _create_repo(\n self,\n repo_id: str,\n private: Optional[bool] = None,\n token: Optional[Union[bool, str]] = None,\n repo_url: Optional[str] = None,\n organization: Optional[str] = None,\n ) -> str:\n if repo_url is not None:\n warnings.warn(\n \"The `repo_url` argument is deprecated and will be removed in v5 of Transformers. Use `repo_id` \"\n \"instead.\"\n )\n if repo_id is not None:\n raise ValueError(\n \"`repo_id` and `repo_url` are both specified. Please set only the argument `repo_id`.\"\n )\n repo_id = repo_url.replace(f\"{HUGGINGFACE_CO_RESOLVE_ENDPOINT}/\", \"\")\n if organization is not None:\n warnings.warn(\n \"The `organization` argument is deprecated and will be removed in v5 of Transformers. Set your \"\n \"organization directly in the `repo_id` passed instead (`repo_id={organization}/{model_id}`).\"\n )\n if not repo_id.startswith(organization):\n if \"/\" in repo_id:\n repo_id = repo_id.split(\"/\")[-1]\n repo_id = f\"{organization}/{repo_id}\"\n\n url = create_repo(repo_id=repo_id, token=token, private=private, exist_ok=True)\n return url.repo_id",
"def repo_new(request):\n if request.method != 'POST':\n form = RepoForm()\n return respond(request, 'repo_new.html', {'form': form})\n form = RepoForm(request.POST)\n errors = form.errors\n if not errors:\n try:\n repo = models.Repository(\n name=form.cleaned_data.get('name'),\n url=form.cleaned_data.get('url'),\n guid=form.cleaned_data.get('guid'),\n )\n except (db.BadValueError, ValueError) as err:\n errors['__all__'] = unicode(err)\n if errors:\n return respond(request, 'repo_new.html', {'form': form})\n repo.put()\n branch_url = repo.url\n if not branch_url.endswith('/'):\n branch_url += '/'\n branch_url += 'trunk/'\n branch = models.Branch(repo_key=repo.key, repo_name=repo.name,\n category='*trunk*', name='Trunk',\n url=branch_url)\n branch.put()\n return HttpResponseRedirect(reverse(repos))",
"def add_repo_url(image, repository, repositories):\n try:\n path = repositories[repository]\n path = path.strip(\"/\").replace(\"https://\", \"\").replace(\"http://\", \"\")\n image = \"/\".join([path, image])\n except KeyError:\n raise KeyError(f\"Repository {repository} not defined!\")\n return image",
"def build_url(cls, config, namespace, name):\n return \"hxxp://mock.repo.url/\" + namespace + \"/\" + name + \".git\"",
"def create_remote(self, name: str, url: str, **kwargs: Any) -> Remote:\n return Remote.create(self, name, url, **kwargs)",
"def register_team_repo_add(self, body):\n httpretty.register_uri(\n httpretty.PUT,\n re.compile(\n r'^{url}teams/\\d+/repos/{org}/({repo}|{rerun_repo})$'.format(\n url=self.URL,\n org=self.ORG,\n repo=re.escape(self.TEST_REPO),\n rerun_repo=re.escape(self.TEST_RERUN_REPO)\n )\n ),\n body=body\n )",
"def infocalypse_create(ui_, repo, **opts):\n params, stored_cfg = get_config_info(ui_, opts)\n\n insert_uri = opts['uri']\n if insert_uri == '':\n # REDFLAG: fix parameter definition so that it is required?\n ui_.warn(\"Please set the insert URI with --uri.\\n\")\n return\n\n set_target_version(ui_, repo, opts, params,\n \"Only inserting to version(s): %s\\n\")\n params['INSERT_URI'] = insert_uri\n execute_create(ui_, repo, params, stored_cfg)",
"def repository_create_hosted_recipe(ctx: click.Context, **kwargs):\n _create_repository(ctx, 'hosted', **kwargs)",
"def register_repo_check(self, body):\n httpretty.register_uri(\n httpretty.GET,\n re.compile(\n '^{url}repos/{org}/({repo}|{repo_rerun})$'.format(\n url=self.URL,\n org=self.ORG,\n repo=re.escape(self.TEST_REPO),\n repo_rerun=re.escape(self.TEST_RERUN_REPO)\n )\n ),\n body=body\n )",
"def _generate_url(self, **kwargs):\n path = self.url_path.format(**kwargs)\n return self.poolbot.generate_url(path)",
"def register(self, hook_url):\n raise NotImplementedError()",
"async def publish_endpoint(repo_name):\n config = config_module.Config.get_instance()\n if repo_name not in config.repos:\n abort(400)\n\n repo = config.repos[repo_name]\n handler = handler_dispatcher(repo)\n\n resp = await handler.handle_request(request)\n\n config.save()\n return resp"
] | [
"0.7228415",
"0.69657815",
"0.66111845",
"0.6596335",
"0.6379819",
"0.6372043",
"0.6295349",
"0.62912536",
"0.62566435",
"0.62522775",
"0.6242981",
"0.61664414",
"0.6139451",
"0.60608256",
"0.6047676",
"0.60390604",
"0.60049415",
"0.59894115",
"0.5982883",
"0.5956164",
"0.5931266",
"0.5856418",
"0.58166295",
"0.58113885",
"0.5788459",
"0.57634056",
"0.57560223",
"0.5739194",
"0.5709762",
"0.5661997"
] | 0.76181614 | 0 |
Simple hook list URL. | def register_hook_list(self, body=None, status=200):
if body is None:
body = json.dumps(
[{
'url': '{url}repos/{org}/{repo}/hooks/1'.format(
url=self.URL, org=self.ORG, repo=self.TEST_REPO
)
}]
)
test_url = '{url}repos/{org}/{repo}/hooks'.format(
url=self.URL,
org=self.ORG,
repo=self.TEST_REPO
)
# Register for hook endpoint
httpretty.register_uri(
httpretty.GET,
test_url,
body=body,
status=status
) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def url(self):\n return reverse('snippet-list')",
"def getURLs():",
"def list(self):\n return self._post(\n request=ApiActions.LIST.value,\n uri=ApiUri.HOOKS.value,\n ).get('hooks')",
"def url_list(path):\n match = re.match(r'^.*(/wa/[A-Za-z0-9/-]+)([A-Za-z-]+)/([0-9]+/)?$', path)\n return u'%s%s%s/' % (match.group(1), match.group(2), \n settings.ACTION_ADMIN_LIST_SUFFIX)",
"def _get_changelist_named_url(self):\n return 'admin:fobi_formwizardhandler_changelist'",
"def hook_name(self) -> str:",
"def _get_changelist_named_url(self):\n return 'admin:fobi_formhandler_changelist'",
"def list_urls(self, prefix: str = \"\", etl_name: str = None) -> Iterable[str]:",
"def get_webhooks(ctx, url_match):\n if url_match:\n url_match_re = re.compile(url_match)\n org = ctx.obj.org\n for repo in org.iter_repos():\n for hook in repo.iter_hooks():\n for k, v in hook.config.items():\n if (url_match and url_match_re.search(v)) or not url_match:\n print(\"{org}.{repo}.{name}.{k}: {v}\".format(\n org=org.login, repo=repo.name,\n name=hook.name, k=k, v=v))",
"def url():\n ...",
"def get_urls():\r\n return []",
"def list(\n self,\n name,\n ):\n pass",
"def _get_post_list_url(self):\n return urljoin(self.BASE_URL, self.POST_LIST_URL)",
"def url(self):\n ...",
"def urls(self) -> list[str]:\r\n ...",
"def cli(context):\r\n\tclick.echo('getting webhookinfo for', context.config.API)\r\n\trp = tornado.httpclient.HTTPClient().fetch(context.config.API % 'getWebhookInfo')\r\n\tclick.echo(rp.body)",
"def url_event_listener():\n track_template = \"<a href=\\\"{0}\\\" target=\\\"_blank\\\" onclick=\\\"trackOutboundLink('{0}'); return false;\\\"\"\n if request.method == 'POST':\n urls = request.form['url_textbox']\n track_urls = [track_template.format(url.strip()) for url in urls.split('\\n')]\n return render_template('link_tracking.html', links=track_urls)\n return render_template('link_tracking.html', links=[])",
"def makeListF(f, url, *argsf, caseSensitive = False, wildCards = True):",
"def api_list(resource_name):\n if ':' in resource_name:\n api_name, resource_name = resource_name.split(':', 1)\n else:\n api_name = 'v1'\n return reverse('api_dispatch_list', kwargs={\n 'api_name': api_name,\n 'resource_name': resource_name,\n }) + '?format=json'",
"def _get_changelist_named_url(self):\n return 'admin:fobi_formelement_changelist'",
"def list():",
"def list():",
"def menu_blender_python_tutorial(self, event=None):\n self.link('http://jmsoler.free.fr/didacticiel/blender/tutor/english/index_prog_python.htm')",
"def test_urllist_command(self):\n # TODO: Write tests for when there is no superuser.\n # This seemed to not work when using this command on PythonAnywhere the first time\n pass",
"def on_hook(self) -> None:",
"def url_shortner(self):",
"def appurl( instkey, name, **matchdict ) :",
"def get_action_url(checklist, index):\r\n return checklist['items'][index]['action_url']",
"def _get_changelist_named_url(self):\n raise NotImplementedError(\n \"You should implement `_get_changelist_named_url`\"\n )",
"async def list(self, ctx):\n message = '\\n'.join(sorted(self.etrigs['etrigs'].keys(), key=str.lower))\n message = '```http\\n{}\\n```'.format(message)\n await ctx.send(message)"
] | [
"0.61078",
"0.60590255",
"0.60329723",
"0.60213965",
"0.6001832",
"0.5835626",
"0.5812988",
"0.5810151",
"0.575069",
"0.568352",
"0.56398803",
"0.5614848",
"0.5573087",
"0.54942304",
"0.54856974",
"0.54843545",
"0.5479251",
"0.54742557",
"0.5471129",
"0.54698616",
"0.5446562",
"0.5446562",
"0.5443403",
"0.54043907",
"0.53927535",
"0.5383529",
"0.53331417",
"0.5323027",
"0.53119564",
"0.5307244"
] | 0.6599427 | 0 |
Register team repo addition. | def register_team_repo_add(self, body):
httpretty.register_uri(
httpretty.PUT,
re.compile(
r'^{url}teams/\d+/repos/{org}/({repo}|{rerun_repo})$'.format(
url=self.URL,
org=self.ORG,
repo=re.escape(self.TEST_REPO),
rerun_repo=re.escape(self.TEST_RERUN_REPO)
)
),
body=body
) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def add(name, url):\n click.echo(\"registered repo {} at url {}\".format(name, url))",
"def _RegisterAmberRepository(self, tuf_repo, remote_port):\n\n # Extract the public signing key for inclusion in the config file.\n root_keys = []\n root_json_path = os.path.join(tuf_repo, 'repository', 'root.json')\n root_json = json.load(open(root_json_path, 'r'))\n for root_key_id in root_json['signed']['roles']['root']['keyids']:\n root_keys.append({\n 'Type': root_json['signed']['keys'][root_key_id]['keytype'],\n 'Value': root_json['signed']['keys'][root_key_id]['keyval']['public']\n })\n\n # \"pm serve\" can automatically generate a \"config.json\" file at query time,\n # but the file is unusable because it specifies URLs with port\n # numbers that are unreachable from across the port forwarding boundary.\n # So instead, we generate our own config file with the forwarded port\n # numbers instead.\n config_file = open(os.path.join(tuf_repo, 'repository', 'repo_config.json'),\n 'w')\n json.dump({\n 'ID': _MANAGED_REPO_NAME,\n 'RepoURL': \"http://127.0.0.1:%d\" % remote_port,\n 'BlobRepoURL': \"http://127.0.0.1:%d/blobs\" % remote_port,\n 'RatePeriod': 10,\n 'RootKeys': root_keys,\n 'StatusConfig': {\n 'Enabled': True\n },\n 'Auto': True\n }, config_file)\n config_file.close()\n\n # Register the repo.\n return_code = self._target.RunCommand(\n [('amberctl rm_src -n %s; ' +\n 'amberctl add_src -f http://127.0.0.1:%d/repo_config.json')\n % (_MANAGED_REPO_NAME, remote_port)])\n if return_code != 0:\n raise Exception('Error code %d when running amberctl.' % return_code)",
"def add(orgname, pat, reponame, branchname):\n g = Github(pat)\n repo = g.get_organization(orgname).get_repo(reponame)\n all_files = []\n contents = repo.get_contents(\"\")\n while contents:\n file_content = contents.pop(0)\n if file_content.type == \"dir\":\n contents.extend(repo.get_contents(file_content.path))\n else:\n file = file_content\n all_files.append(str(file)\n .replace('ContentFile(path=\"', '')\n .replace('\")', ''))\n\n with open('./CODEOWNERS', 'r') as file:\n content = file.read()\n\n # Upload to github\n git_prefix = '.github/'\n git_file = git_prefix + 'CODEOWNERS'\n if git_file in all_files:\n contents = repo.get_contents(git_file)\n repo.update_file(contents.path,\n \"updating CODEOWNERS\",\n content,\n contents.sha,\n branch=branchname)\n print(git_file + ' updated for: ' + reponame)\n else:\n repo.create_file(git_file,\n \"adding CODEOWNERS\",\n content,\n branch=branchname)\n print(git_file + ' created for: ' + reponame)",
"def test_add_team_manager_to_team(self):\n pass",
"def register(id, name, get_packages, find_installed_packages):\n repo = Repository()\n repo.id = id\n repo.name = name\n repo.get_packages = get_packages\n repo.find_installed_packages = find_installed_packages\n repositories.append(repo)",
"def _add_repo(repo_name, repo_url, index):\n\n package_manager = _get_package_manager()\n package_manager.add_repo(repo_name, repo_url, index)\n\n return 0",
"def add_addon_repository(self, repo: str) -> None:\n if repo in self._data[ATTR_ADDONS_CUSTOM_LIST]:\n return\n\n self._data[ATTR_ADDONS_CUSTOM_LIST].append(repo)",
"def register_repo_create(self, body):\n httpretty.register_uri(\n httpretty.POST,\n '{url}orgs/{org}/repos'.format(\n url=self.URL,\n org=self.ORG,\n ),\n body=body\n )",
"def repo_add(self, name, url, **kwargs):\n\n self.helm_client.repo_add(name, url, **kwargs)",
"def __add_repo(repo_name, url):\n\n conn = sqlite3.connect(DTF_DB)\n cur = conn.cursor()\n\n entry = [(repo_name, url)]\n\n sql = ('INSERT INTO repos (repo_name, url)'\n 'VALUES (?, ?)')\n\n cur.executemany(sql, entry)\n conn.commit()\n\n return 0",
"def process_repo(vb, options):\n if not options.repo:\n return\n\n vb.add_repo(options.repo_os, options.repo_id, options.repo_name, options.repo_url,\n options.unique, options.repo_tags)",
"def fusion_api_add_repository(self, body, api=None, headers=None):\n return self.repository.post(body, api, headers)",
"def addRepository(self, name, url):\n sslVerify = \"yes\" if url.startswith(\"https\") else \"no\"\n self.manager.addKickstartRepository(self.currentProject, baseurl=url,\n name=name,\n ssl_verify=sslVerify)\n self.manager.saveKickstartFile(self.currentProject)\n self.refresh()",
"def __gitSubmoduleAdd(self):\n self.vcs.gitSubmoduleAdd(self.project.getProjectPath())",
"def addRepository(self, uri):\n pass",
"def __gitAddRemote(self):\n self.vcs.gitAddRemote(self.project.getProjectPath())",
"def add_team(inp_to_add, type_to_add, host, root, password):\r\n team_name = \"\"\r\n\r\n if type_to_add == \"url\":\r\n team_soup = BeautifulSoup(requests.get(inp_to_add).text, 'html.parser')\r\n team_site = inp_to_add\r\n else:\r\n team_soup, team_site = get_first_search_result(\r\n SOCCER_URL + \"/search/teams/?q=\" + inp_to_add)\r\n\r\n if team_soup:\r\n # Need to examine if league already exists, if not - add it. Then, get its LEAGUE_ID\r\n league_url = SOCCER_URL + team_soup.find('div', id=\"page_team_1_block_team_table_9-wrapper\").h2.a[\"href\"]\r\n find_league({league_url}, \"url\", host, root, password)\r\n\r\n team_name = team_soup.find(\"table\", class_=\"leaguetable sortable table\").tbody.find_all(\r\n 'tr', class_=[\"odd highlight team_rank\", \"even highlight team_rank\"])[0].find(\r\n 'td', class_=\"text team large-link\").a.text\r\n\r\n return team_name",
"def add_repo(self, id, user, repo):\n request = self.request_builder('orgs.teams.add_repo',\n id=id, user=user, repo=repo)\n return self._put(request)",
"def register_team(self, agents_on_team):\n self.agents_on_team = agents_on_team",
"def team_add(token_user):\n if not json_param_exists('name') or \\\n not json_param_exists('type'):\n abort(400, \"one or more required parameter is missing\")\n name = request.json['name']\n team_type = TeamType.query.filter_by(name=request.json['type']).first()\n if not team_type:\n abort(400, \"invalid team type\")\n\n if team_type.name == 'other_team':\n if not token_user.has_permission('team.create') and \\\n not token_user.has_permission('team.create.elevated'):\n abort(403, 'team creation is not permitted')\n else: # creating any team other than 'other_team' requires elevated\n if not token_user.has_permission('team.create.elevated'):\n abort(403, 'insufficient permissions to create a team of this type')\n\n team = Team(name=name)\n team.team_type = team_type\n\n try:\n get_db().add(team)\n get_db().commit()\n except IntegrityError:\n abort(409, 'team name is already in use')\n\n return '', 201",
"def register(project_id, runner):\n pass",
"def add(self,path):\n out, err, code = self.command( [\"git\", \"add\", path], self.directory )",
"def add_repo(repo_name, url):\n\n # First, validate the URL\n if not utils.is_valid_url(url):\n log.e(TAG, \"Invalid URL provided (missing http/s)\")\n return -2\n\n # Next, make sure this repo doesnt exist\n if __has_repo(repo_name):\n log.e(TAG, \"Repo name '%s' already exists!\" % repo_name)\n return -3\n\n return __add_repo(repo_name, url)",
"def add(self, team):\n ### INVARIANT: team is a Team class object.\n if team not in self._conf_teams:\n self._conf_teams.append(team)",
"async def add_to_team(self, player : Player, team):\r\n if player in self.remaining:\r\n self.teams[team].append(player)\r\n self.remaining.remove(player)\r\n return discord.Embed(title=\"Valorant 10 Man Bot\",\r\n description=\"{} has been drafted to team {}\".format(get_member_name(player,lower=False), \":a:\" if team == \"A\" else \":b:\"))\r\n else:\r\n return discord.Embed(title=\"Valorant 10 Man Bot\",description=\"Sorry, {} is already drafted\".format(get_member_name(player)))",
"def addTag(self, repoType, txt, addSlash=True, project=0):\n # remove slash\n while txt.startswith(\"/\"):\n txt = txt[1:]\n \n # add fix to support & in filename, ampersand is used \n # as a shortcut for the tab by pyqt\n txt = txt.replace(\"&\", \"&&\")\n # end of fix\n \n if repoType == UCI.REPO_TESTS_LOCAL:\n repo = \"local-tests\"\n elif repoType == UCI.REPO_TESTS:\n repo = \"remote-tests\"\n project_name = self.iRepo.remote().getProjectName(project=project)\n repo += '(%s)' % project_name\n elif repoType == UCI.REPO_ADAPTERS:\n repo = \"remote-adapters\"\n elif repoType == UCI.REPO_LIBRARIES:\n repo = \"remote-libraries\"\n elif repoType == UCI.REPO_UNDEFINED:\n repo = \"undefined\"\n else:\n repo = \"unknown\"\n self.error( \"repo unknown: %s\" % repoType )\n if addSlash:\n if repoType == UCI.REPO_TESTS_LOCAL:\n ret = \"%s:%s\" % (repo, txt) \n else:\n ret = \"%s:/%s\" % (repo, txt)\n else:\n ret = \"%s: %s\" % (repo, txt) \n return ret",
"def gitAdd(filename, repo_dir):\n file_path = \"%s/%s\" % (repo_dir, filename)\n git(\"add\", file_path)",
"def registerTeam(self, agentsOnTeam):\n\n self.agentsOnTeam = agentsOnTeam",
"def commit_changes(repo, project = ''):\n cmd = repo.git\n cmd.add(all=True)\n try:\n cmd.commit(m=\"redcap2mysql.py data sync for project \" + project)\n except git.exc.GitCommandError, err:\n logging.info([traceback.format_exc(limit=1).splitlines()[-1]])",
"def add(self, organisation: Organisation) -> None:\n ..."
] | [
"0.6898738",
"0.6489705",
"0.64307415",
"0.6304983",
"0.6265113",
"0.6264515",
"0.6212561",
"0.61818516",
"0.6177951",
"0.61501896",
"0.60574985",
"0.5968487",
"0.58513224",
"0.57982254",
"0.57399786",
"0.5735879",
"0.5716812",
"0.5707634",
"0.5682229",
"0.5664063",
"0.56495",
"0.5591028",
"0.5558522",
"0.5552845",
"0.5550953",
"0.5547198",
"0.5539031",
"0.5496612",
"0.54952914",
"0.5492456"
] | 0.75826085 | 0 |
Return tables of cells in neuronPop population name connected to mitrals specified in args, via neuronProj projection name if args is not specified get all. | def exportTable(network, neuronProj, neuronPop, colours, \
args={}, spikes=True, allcells=True):
exportDict = {'spikes':spikes,'data_tables':[]}
if array(colours).shape == (3,): coloursList = False
else: coloursList = True
## get cells connected to mitrals specified in args. if not specified get arbitrary.
celllist = getCellsByMitralConnection(args, network, neuronProj, neuronPop, allcells)
for i,cell in enumerate(celllist):
cellname = cell.path.split('/')[-1]
## assumes at least one soma and takes the first!
cell.soma = moose.Compartment(get_matching_children(cell,['Soma','soma'])[0])
cellTablePath = cell.soma.path+"/data/vmTableSoma"
if moose.context.exists(cellTablePath):
cell._vmTableSoma = moose.Table(cellTablePath)
else:
print "SimError: Did not find "+cellTablePath
sys.exit(1)
if coloursList: colour=colours[i]
else: colour=colours
exportDict['data_tables'].append((cellname,colour,array(cell._vmTableSoma)))
return exportDict | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def getCellsByMitralConnection(args, network, projection, population, allcells=False):\n cellList = []\n cellUniques = []\n if args.has_key('mitrals'):\n for mitid in args['mitrals']:\n mitpath = 'mitrals_'+str(mitid)\n cellnum = 0\n if projection in network.projectionDict:\n for conn in network.projectionDict[projection][2]:\n if mitpath in conn[2]: # if mitrals_<mitid> is substring of post_seg_path of this connection\n cellpath = string.split(conn[1],'/')[1] # take out cellname from '/cellname/segmentname\n ## Take only those cells that have not been taken before.\n if cellpath not in cellUniques:\n cell = moose.Cell(cellpath)\n cellList.append(cell)\n cellUniques.append(cellpath)\n cellnum += 1\n if not allcells and cellnum == 30: break\n else:\n if allcells: cellList = network.populationDict[population][1].values()\n else: cellList = network.populationDict[population][1].values()[0:60]\n return cellList",
"def run_simulation(env, pop, nr):\n pop_f = []\n pop_pl = []\n pop_el = []\n\n\n for individual in pop:\n fitness, player_life, enemy_life, time = env.play(pcont=individual)\n pop_f.append(fitness)\n pop_pl.append(player_life)\n pop_el.append(enemy_life)\n\n return pop_f, pop_pl, pop_el",
"def personnel_projects_table(personnel_search_table_selected_indices, selected_funding, selected_year, rows):\n personnel_outcome_data, personnel_name = personnel_outcomes_helper(personnel_search_table_selected_indices, selected_funding, selected_year, rows)\n if personnel_outcome_data is not None:\n personnel_outcome_data = personnel_outcome_data[[\"type\", \"pub_title\"]]\n return personnel_outcome_data.to_dict('records')\n\n return pd.DataFrame(data=None, columns=[\"type\", \"pub_title\"]).to_dict('records') # None cannot be passed back as it will cause an error.",
"def tsne_projection(train_file, gen_file, i):\n train_data = pd.read_csv(train_file)\n def get_ori_prom(row):\n if row['gaps'] <= 2 and row['dips'] <=2:\n return 'Model'+str(i)+'_train_promising'\n else:\n return 'Model'+str(i)+'_train'\n train_data['label'] = train_data.apply(get_ori_prom, axis=1)\n train_data = train_data.drop(['id', 'gaps', 'dips'], axis=1)\n gen_prom_smiles = pd.read_csv(gen_file)\n gen_prom_smiles = gen_prom_smiles.drop(['Group', 'Gaps', 'Dips'], axis=1)\n gen_prom_smiles['label'] = 'Generated_promising'\n all_smi = pd.concat([train_data, gen_prom_smiles])\n mols = get_mols(all_smi.SMILES)\n fps, _ = get_fingerprints(mols)\n fp_embeded = TSNE(n_components=2, perplexity=100).fit_transform(fps)\n all_smi['tsne1'] = fp_embeded[:, 0]\n all_smi['tsne2'] = fp_embeded[:, 1]\n return all_smi, len(train_data)",
"def population(params, n=100):\r\n pops = []\r\n for i in range(n):\r\n pop = []\r\n for param in params:\r\n pop.append(np.random.choice(param))\r\n\r\n individuale = Individuale(pop)\r\n pops.append(individuale)\r\n # print(\"No.{} : {} : {}\".format(i, individuale, individuale.x))\r\n return pops",
"def population_projection(self):\n population_projection = Worldometers.soup.find_all(class_='years')[1]\n year_rows = population_projection.find_all('tr')\n\n population_projection_dict = {}\n for x in range(1, len(year_rows)):\n columns = year_rows[x].find_all('td')\n population_projection_dict[x-1] = {\n 'year': columns[0].getText().replace(',', ''),\n 'population': columns[1].getText().replace(',', ''),\n 'growth': columns[2].getText().replace(' %', '')\n }\n\n return population_projection_dict",
"def get_chained_proj(self):\n #non recursive call\n N = self.N_in\n projs = []\n for layer in self.layers:\n proj,N_out = self.get_deprojecter(layer,N)\n projs.append(proj)\n N = N_out\n return projs",
"def printPop(population: list):\n for p in population:\n print(p)",
"def run_projections(self):\n self.projection_collection100 = DonorCollection()\n self.projection_collection50 = DonorCollection()\n double_over_100 = dict(list(\n (name, list(map(lambda x: x * 2, donations.donations))) for name, donations in self.donors.items()))\n triple_under_50 = dict(list(\n (name, list(map(lambda x: x * 3, donations.donations))) for name, donations in self.donors.items()))\n for donor, donations in double_over_100.items():\n self.projection_collection100.add(donor, donations)\n for donor, donations in triple_under_50.items():\n self.projection_collection50.add(donor, donations)\n return self.projection_collection50, self.projection_collection100",
"def get_grid_data_popdiag(grid):\n# indir = '/CESM/bgcwg/obgc_diag/mapping/grids'\n indir = '/glade/p/cesm/bgcwg/obgc_diag/mapping/grids'\n infile = os.path.join(indir, grid + '.nc')\n fp = Nio.open_file(infile,'r')\n nlon, nlat = fp.variables['grid_dims'][:]\n tlat = fp.variables['grid_center_lat'][:]\n tlon = fp.variables['grid_center_lon'][:]\n fp.close()\n tlat = N.reshape(tlat,(nlat,nlon))[:,0]\n tlon = N.reshape(tlon,(nlat,nlon))[0,:]\n return nlon, nlat, tlon, tlat",
"def star_topology(random, population, args):\r\n for _ in range(len(population)):\r\n yield population[:]",
"def _projections(self, nvar):\n min_var = self.proje_var.argsort()[:nvar]\n add_coeffs = 1 / self.proje_var[min_var]\n indp_est_proje = np.dot(add_coeffs, self.sep_proje_eval[min_var]) /\\\n np.sum(add_coeffs)\n\n # consider covariance\n coverr = []\n try:\n proje_cov_inv = np.linalg.inv(self.proje_cov[min_var][:, min_var])\n cov_weight = np.sum(proje_cov_inv, axis=0) / np.sum(proje_cov_inv)\n cov_est_proje = np.dot(cov_weight, self.sep_proje_eval[min_var])\n coverr.append(1/np.sum(proje_cov_inv))\n except:\n cov_est_proje = np.ones(self.sep_proje_eval.shape[1])\n cov_est_proje[:] = np.nan\n coverr.append(np.nan)\n return np.array([indp_est_proje, cov_est_proje])",
"def project_pop(self):\n M = self.N[0:2]\n for x in range(10):\n M.append(self.run_step(M))\n split_N = split_list(M)\n \n fig = self.make_figure(split_N)\n fig.update_layout(title='Projected Fish Population')\n\n return fig",
"def select(self, m, population):\n pass",
"def intrapop_connections(n_mitral, n_granule, n_subpop, n_mitral_per_subpop):\n resmat = np.zeros((n_mitral, n_granule))\n for i_subpop in xrange(n_subpop):\n start = i_subpop*n_mitral_per_subpop\n stop = start + n_mitral_per_subpop\n resmat[start:stop, i_subpop] = 1.\n return resmat",
"def initPopulation(self, task):\n\t\tSol, Fitness, d = Algorithm.initPopulation(self, task)\n\t\tA, S, Q, v = np.full(self.NP, self.A), np.full([self.NP, task.D], 0.0), np.full(self.NP, 0.0), np.full([self.NP, task.D], 0.0)\n\t\td.update({'A': A, 'S': S, 'Q': Q, 'v': v})\n\t\treturn Sol, Fitness, d",
"def project(data=None, x=None, y=None, z=None, outfile=None, **kwargs):\n\n if kwargs.get(\"C\") is None:\n raise GMTInvalidInput(\"The `center` parameter must be specified.\")\n if kwargs.get(\"G\") is None and data is None:\n raise GMTInvalidInput(\n \"The `data` parameter must be specified unless `generate` is used.\"\n )\n if kwargs.get(\"G\") is not None and kwargs.get(\"F\") is not None:\n raise GMTInvalidInput(\n \"The `convention` parameter is not allowed with `generate`.\"\n )\n\n with GMTTempFile(suffix=\".csv\") as tmpfile:\n if outfile is None: # Output to tmpfile if outfile is not set\n outfile = tmpfile.name\n with Session() as lib:\n if kwargs.get(\"G\") is None:\n # Choose how data will be passed into the module\n table_context = lib.virtualfile_from_data(\n check_kind=\"vector\", data=data, x=x, y=y, z=z, required_z=False\n )\n\n # Run project on the temporary (csv) data table\n with table_context as infile:\n arg_str = build_arg_string(kwargs, infile=infile, outfile=outfile)\n else:\n arg_str = build_arg_string(kwargs, outfile=outfile)\n lib.call_module(module=\"project\", args=arg_str)\n\n # if user did not set outfile, return pd.DataFrame\n if outfile == tmpfile.name:\n if kwargs.get(\"G\") is not None:\n column_names = list(\"rsp\")\n result = pd.read_csv(tmpfile.name, sep=\"\\t\", names=column_names)\n else:\n result = pd.read_csv(tmpfile.name, sep=\"\\t\", header=None, comment=\">\")\n # return None if outfile set, output in outfile\n elif outfile != tmpfile.name:\n result = None\n\n return result",
"def atlas_projects():\n pass",
"def FindGrid(self, p_float=..., p_float=..., p_float=..., *args, **kwargs):\n ...",
"def allJPDs(net):\r\n \r\n onodes = net.Outputnodes()\r\n combinations = [i for i in itertools.combinations(onodes,2)]\r\n JPDdata = [] \r\n for c in combinations:\r\n priorA = getPrior(net,c[0])\r\n priorB = getPrior(net,c[1]) \r\n jpd = JPD(net,c)\r\n cov = Covariance (priorA,priorB,jpd)\r\n cor = Correlation (priorA,priorB,cov) \r\n JPDdata.append([c[0],c[1],jpd,cov,cor])\r\n return JPDdata",
"def get_population_names (f,a,s):\r\n ## a, s not used but needed for function to match general function format\r\n global gv\r\n aa = f.readline().strip()\r\n popnamelist = []\r\n i = 0\r\n foundghost = False\r\n while aa.find(\"Population\") >= 0:\r\n popname = aa.split()[3]\r\n if popname.upper() == \"GHOST\":\r\n foundghost = True\r\n popnamelist.append(popname)\r\n i += 1\r\n aa = f.readline().strip()\r\n if gv[\"useghost\"] == True and foundghost == False: # for compatibility with older output files\r\n popnamelist.append('ghost')\r\n anames = []\r\n if gv[\"usealtnames\"]:\r\n for line in open(gv[\"altnamefilename\"],\"r\"):\r\n temp = line.strip()\r\n if len(temp) > 0:\r\n anames.append(temp)\r\n anames = anames[0:len(popnamelist)]\r\n gv[\"altpopnames\"] = list(anames)\r\n return popnamelist",
"def population_observer(population, num_generations, num_evaluations, args):\r\n population.sort(reverse=True)\r\n print('----------------------------------------------------------------------------')\r\n print(' Current Population')\r\n print('----------------------------------------------------------------------------')\r\n for ind in population:\r\n print(str(ind))\r\n print('----------------------------------------------------------------------------')",
"def createGridcells(mapdata, listOfP):\n new_gridcells = GridCells()\n new_gridcells.header = mapdata.header\n new_gridcells.cell_width = mapdata.info.resolution\n new_gridcells.cell_height = mapdata.info.resolution\n new_gridcells.cells = []\n for p in listOfP:\n new_gridcells.cells.append(PathPlanner.grid_to_world(mapdata, p[0], p[1]))\n return new_gridcells",
"def JPD(net,nodeTuple): \r\n\r\n priorA = getPrior(net,nodeTuple[0]) \r\n statesA = net.NodeStates(nodeTuple[0],naming = 'titlename') \r\n statesB = net.NodeStates(nodeTuple[1],naming = 'titlename')\r\n numstatesA = [float(i) for i in statesA] \r\n numstatesB = [float(i) for i in statesB] \r\n output = np.zeros((len(statesA)+1,len(statesB)+1))\r\n output[0,1:] = numstatesB\r\n output[1:,0] = numstatesA\r\n for n,i in enumerate(statesA):\r\n net.RetractFinding() \r\n output[n+1][1:] = np.array(net.Finding(nodeTuple[0],i,nodeTuple[1],output = 'name'))*priorA[0][n]\r\n return output",
"def query_settlement_layer(grid):\n path = os.path.join(SHAPEFILE_DIR, f'{COUNTRY_ABBRV}.tif')\n\n grid['population'] = pd.DataFrame(\n zonal_stats(vectors=grid['geometry'], raster=path, stats='sum'))['sum']\n\n grid = grid.replace([np.inf, -np.inf], np.nan)\n\n return grid",
"def default_selection(random, population, args):\r\n return population",
"def proj(self, X, G):\n raise NotImplementedError",
"def init_pop(self):\n genes = np.random.randn( self.population_size * self.individual.gene_count )\n self.population = genes.reshape((self.population_size, -1))\n #print(self.population)",
"def get_null_proj(self,nsing=None):\n if nsing is None:\n nsing = self.get_nsing()\n if nsing is None:\n raise Exception(\"nsing is None\")\n print(\"using {0} singular components\".format(nsing))\n self.log(\"forming null space projection matrix with \" +\\\n \"{0} of {1} singular components\".format(nsing,self.jco.shape[1]))\n\n v2_proj = (self.xtqx.v[:,nsing:] * self.xtqx.v[:,nsing:].T)\n self.log(\"forming null space projection matrix with \" +\\\n \"{0} of {1} singular components\".format(nsing,self.jco.shape[1]))\n\n return v2_proj",
"def generate_population(population_size, nn_architecture):\n population = []\n for _ in range(population_size):\n population.append(nn.create_nn_from_arch(nn_architecture))\n\n return population"
] | [
"0.5612893",
"0.5285766",
"0.5185361",
"0.5045113",
"0.5007588",
"0.49609387",
"0.49419475",
"0.48437467",
"0.48161486",
"0.48034397",
"0.47943878",
"0.4792616",
"0.4761036",
"0.4739973",
"0.4637608",
"0.45978764",
"0.45735255",
"0.45684016",
"0.45425737",
"0.45364887",
"0.45320395",
"0.45264566",
"0.45124856",
"0.45041758",
"0.44981605",
"0.44906053",
"0.4479938",
"0.44782987",
"0.4456257",
"0.44456545"
] | 0.5903073 | 0 |
We record the position in S of the first occurence of a letter. If we encounter the letter a second time, we check their spacing. | def well_spaced(S, D):
seen = [None] * 26
for i, c in enumerate(S):
if seen[ord(c) - ord("a")] is None:
seen[ord(c) - ord("a")] = i
else:
if i - seen[ord(c) - ord("a")] != D[ord(c) - ord("a")] + 1:
return False
return True | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def alphabet_position(letter):\n alphabet = \"abcdefghijklmnopqrstuvwxyz\"\n pos = 0\n for ltr in alphabet:\n if ltr == letter.lower():\n return pos\n pos += 1\n return pos",
"def find_letter_in_dics(self,letter):\r\n if str.isupper(letter)==True and letter not in self.special_letters_dic and letter not in self.special_characters_dic: #taken from above\r\n position=self.general_upper_word_list[letter]\r\n elif str.islower(letter)==True and letter not in self.special_letters_dic and letter not in self.special_characters_dic:\r\n position=self.general_lower_word_list[letter]\r\n elif self.special_characters_dic!=None and letter in self.special_characters_dic:\r\n position=self.special_characters_dic[letter]\r\n elif letter in self.special_letters_dic:\r\n position=self.special_letters_dic[letter]\r\n elif letter in self.general_numbers_dic:\r\n position=self.general_numbers_dic[letter]\r\n return position",
"def alphabet_position(char):\n if type(char) != type(''):\n return -1\n if len(char) != 1:\n return -1\n if char.isalpha():\n return lowerLetters.find(char.lower())\n return -1",
"def alphabet_position(letter):\n\n alphabet='abcdefghijklmnopqrstuvwxyz'\n ALPHABET='ABCDEFGHIJKLMNOPQRSTUVWXYZ'\n\n if letter.isupper() == True:\n for x in ALPHABET:\n if letter == x:\n position = ALPHABET.index(x)\n return position\n\n else:\n for x in alphabet:\n if letter == x:\n position = alphabet.index(x)\n return position",
"def firstNotRepeatingCharacter(s):\n\n # even positions = number of characters\n # odd positions = last occurrence of that character\n scounter = [0] * 52\n\n for i in range(len(s)):\n char_pos = (ord(s[i]) - 97) * 2\n scounter[char_pos] += 1\n scounter[char_pos + 1] = i\n\n last_occurrence = len(s)\n for i in range(0, 52, 2):\n if scounter[i] == 1 and scounter[i + 1] < last_occurrence:\n last_occurrence = scounter[i + 1]\n\n if last_occurrence < len(s):\n return s[last_occurrence]\n\n return '_'",
"def successive(S):\n\t\n\tL = len(S)-1 \n\t# L is length of string S minus 1, so that final letter is preceded by 0 spaces\n\n\tfor letter in S:\n\t# a 'for' loop prints each letter of string \"S\" on new line\n\t\t\n\t\tprint(\" \"*L,letter)\n\t\t# prints each letter with L spaces preceding it \n\n\t\tL = L - 1\n\t\t# L decrements by 1 when each letter is printed to evoke the \"slant\" effect from right to left",
"def check_diagonal_1(self, given_letter):\n count = 0\n avalible_pos = []\n for i in range(self.size):\n if self.positions[self.letters[i] + self.numbers[i]] == given_letter:\n count += 1\n else:\n avalible_pos.append(self.letters[i] + self.numbers[i])\n return count, avalible_pos",
"def theLoveLetterMystery(s):\n mincount = 0\n for i in range(len(s) // 2):\n mincount += abs(ord(s[i]) - ord(s[-1 - i]))\n\n return mincount",
"def before_space(s):\n\n assert type(s) == str, repr(s)+' is not a string.'\n assert introcs.count_str(s,' ') >= 1, repr(s)+' does not contain at least one space.'\n\n #find location of first space\n space = introcs.find_str(s,' ')\n\n #get string before first space\n result = s[:space]\n\n #return the result\n return result",
"def check_diagonal_2(self, given_letter):\n count = 0\n avalible_pos = []\n for i in range(self.size):\n if self.positions[self.letters[self.size - 1 - i] + self.numbers[i]] == given_letter:\n count += 1\n else:\n avalible_pos.append(\n self.letters[self.size - 1 - i] + self.numbers[i])\n return count, avalible_pos",
"def first_recurring_char(s: str) -> str:\n h = {} # using dictionary as hash\n for ch in s:\n if ch in h:\n return ch\n\n h[ch] = 0\n return None",
"def letterToIndex(letter):\n if letter not in all_letters:\n letter = remove_tone_marks(letter)\n\n return all_letters.find(letter) + 1",
"def find_word(self,word):\r\n self.start_pos = []\r\n #check each row\r\n for i in range(0,len(self.wordsearch)):\r\n #check each column\r\n for j in range(0, len(self.wordsearch[i])):\r\n #find all coordinates which have the first letter of the word and store them\r\n if self.wordsearch[i][j] == self.word[0]:\r\n self.start_pos.append([i,j])\r\n \r\n \r\n #print(count)\r\n for pos in self.start_pos:\r\n if self.check_start(self.word, pos):\r\n \r\n return",
"def getFisrtCharThatAppearsOnce(myString):\n myString = \"\".join(myString.lower().split())\n charDict = {key:[0, 0] for key in string.ascii_lowercase}\n for pos, char in enumerate(myString):\n charDict[char][0] += 1\n charDict[char][1] = pos\n charDict = {key:values for key, values in charDict.items() if values[0] == 1}\n sortedCharDict = sorted(charDict.items(), key=operator.itemgetter(1))\n strOut = sortedCharDict[0][0] if sortedCharDict else False\n return strOut",
"def first_not_repeating_character(string):\n letters = {}\n order = {}\n for s in xrange(string):\n letters.setdefault(ord(string[s]) - 97, 0)\n letters[ord(string[s]) - 97] += 1\n order[s] = ord(string[s]) - 97\n for i in xrange(len(order)):\n if letters[order[i]] == 1:\n return chr(order[i] + 97)\n return '_'",
"def test_starts_letter(x):\n return x[0].isalpha()",
"def sac(self):\n if self.index >= self.length:\n return False\n \n self._sac, n = self.parse_sac()\n if self._sac is not None:\n self.idx_sac = self.index\n self.index += n\n if self.index < self.length and self.words[self.index]['word'] == ',':\n self.index += 1\n if self._debug: print(\"SAC\", self._sac, self.idx_sac)\n return True\n return False",
"def first_not_repeating_character(string):\n counter = Counter(string)\n for key, value in counter.items():\n if value <= 1:\n return key\n break\n return '_'",
"def getFirstChar(self):\n if self.i1 is None:\n self.firstChar = None\n else:\n chrNum = int(self.i1 // 10)\n if chrNum < 26:\n # should result in something like A4 for 4, B6 for 16\n self.firstChar = chr(ASCII_LETTER_A + chrNum) + str(self.i1 % 10)\n else:\n runLog.warning(\n \"invalid location. ring {0} is too many rings!\".format(self.i1),\n self,\n )",
"def listPosition(word):\n if len(word) == 1: return 1\n pos = 0\n for c in set(word):\n if c < word[0]:\n letters = list(word)\n letters.remove(c)\n pos += arrangements(letters)\n pos += listPosition(word[1:])\n return pos",
"def one_pass(self, s: str) -> str:\n alpha_map = {\n '1': 'a', '2': 'b', '3': 'c', '4': 'd', '5': 'e', '6': 'f', '7': 'g',\n '8': 'h', '9': 'i', '10': 'j', '11': 'k', '12': 'l', '13': 'm', '14': 'n',\n '15': 'o', '16': 'p', '17': 'q', '18': 'r', '19': 's', '20': 't',\n '21': 'u',\n '22': 'v', '23': 'w', '24': 'x', '25': 'y', '26': 'z'\n }\n\n i, res = 0, ''\n while i < len(s):\n if i + 2 < len(s) and s[i + 2] == '#':\n res += alpha_map[s[i:i + 2]]\n i += 3\n else:\n res += alpha_map[s[i]]\n i += 1\n return res",
"def first_non_repeating_letter(string: str) -> str:\n result = ''\n string_lower = string.lower()\n\n for i, s in enumerate(string_lower):\n if string_lower.count(s) == 1:\n return string[i]\n\n return result",
"def parse_sac(self):\n \n index = self.index\n start = self.index \n \n try:\n v = sttype[self.words[index]['word']]\n if Vocabulary.SAC not in v['tag']:\n return None, 0 \n itag = v['tag'].index(Vocabulary.SAC)\n addr2 = v['lemma'][itag]\n \n index += 1\n if index == self.length:\n return None, 0\n \n # e.g., Apt #3\n if self.words[index]['word'] == '#':\n index += 1\n if index == self.length:\n return None, 0\n \n addr2 += ' ' + self.words[index]['word']\n \n # e.g., Apt D-13\n if index + 1 < self.length and self.words[index+1]['word'] == '-':\n index += 2\n if index == self.length:\n return None, 0\n addr2 += self.words[index]['word']\n \n return addr2, index - start + 1\n \n except:\n return None, 0",
"def step1ab(self):\n\t\tif self.b[self.k] == 's':\n\t\t\tif self.ends(\"sses\"):\n\t\t\t\tself.k = self.k - 2\n\t\t\telif self.ends(\"ies\"):\n\t\t\t\tself.setto(\"i\")\n\t\t\telif self.b[self.k - 1] != 's':\n\t\t\t\tself.k = self.k - 1\n\t\tif self.ends(\"eed\"):\n\t\t\tif self.m() > 0:\n\t\t\t\tself.k = self.k - 1\n\t\telif (self.ends(\"ed\") or self.ends(\"ing\")) and self.vowelinstem():\n\t\t\tself.k = self.j\n\t\t\tif self.ends(\"at\"): self.setto(\"ate\")\n\t\t\telif self.ends(\"bl\"): self.setto(\"ble\")\n\t\t\telif self.ends(\"iz\"): self.setto(\"ize\")\n\t\t\telif self.doublec(self.k):\n\t\t\t\tself.k = self.k - 1\n\t\t\t\tch = self.b[self.k]\n\t\t\t\tif ch == 'l' or ch == 's' or ch == 'z':\n\t\t\t\t\tself.k = self.k + 1\n\t\t\telif (self.m() == 1 and self.cvc(self.k)):\n\t\t\t\tself.setto(\"e\")",
"def theLoveLetter(s):\n\n alphabets = 'abcdefghijklmnopqrstuvwxyz'\n\n l = len(s) # length of input string\n\n # Sub divide the input string into two equal parts\n str1 = s[:l//2]\n\n # handle odd input string length\n str2 = s[l//2 if l % 2 == 0 else l//2 + 1:]\n\n # handle first edge case\n # if input string is already a palindrome return\n if s == s[::-1]: \n return 0\n \n # initialize variables to hold totals of \n # string manipulations\n part1 = 0\n part2 = 0\n\n # traverse through first part of the string \n # from the string's beginning\n # while comparing current char with char from\n # second part of the string from the end\n\n \n\n for i in range(1,len(str1)+1):\n if str1[i-1] != str2[len(str2)-i]:\n # check the positions of the chars in alphabets\n if alphabets.find(str1[i-1]) < alphabets.find(str2[len(str2)-i]):\n part1 += (alphabets.find(str2[len(str2)-i]) - alphabets.find(str1[i-1]) )\n else:\n part2 += (alphabets.find(str1[i-1]) - alphabets.find(str2[len(str2)-i]))\n return part1+part2",
"def _validate_input_sequence(self, seq:str) -> str:\n if not \"$\" in seq:\n # add sentinal letter which is unique and lexicographically smaller than any other character\n if self.debug: print(f\"Sentinal letter is added to input sequence: {seq + '$'}\")\n return seq + \"$\"\n else:\n if seq[-1:] == \"$\" and seq.count(\"$\") == 1:\n if self.debug: print(f\"Input sequnce ({seq}) already contains sentinal letter at last position.\")\n return seq\n else:\n if self.debug: print(f\"Sentinal letter at wrong position: {seq}\")\n raise ValueError(\"Input sequence sequence may only contain the sentinal letter '$' in the last position.\")",
"def initial_finder(self, seq, ins):\n# print('call initial_finder, input = '+seq)\n letter=seq[0]\n if letter in ins:\n if letter in ['д','т','ц','с']:\n next_letter=seq[:2]\n if next_letter in ins:\n initial=next_letter\n len_init=2\n else:\n initial=letter\n len_init=1\n else:\n initial=letter\n len_init=1 \n else:\n initial='_'\n len_init=0\n# print(initial)\n return initial, len_init",
"def first_non_rep(string):\n if len(string) == 0:\n return None\n\n if len(string) == 1:\n return string\n\n letter_dict = {}\n # Letters in the string will get appended to the list, so order is maintained.\n one_char = []\n\n for letter in string:\n letter = letter.lower() \n if letter in letter_dict:\n letter_dict[letter] += 1\n if letter in one_char:\n one_char.remove(letter)\n else:\n letter_dict[letter] = 1\n one_char.append(letter)\n if not one_char:\n return None\n else:\n return one_char[0]",
"def test_match_start_check_at_beginning_of_string(self):\n first_letter = \"a\"\n s = \"abcdef\"\n self.assertEqual(__, re.search(first_letter, s).group())",
"def minAddToMakeValid(self, S):\n stack = []\n for char in S:\n if char == '(':\n stack.append(char)\n if char == ')':\n if len(stack):\n if stack[-1] == '(':\n stack.pop()\n else:\n stack.append(char)\n else:\n stack.append(char)\n return len(stack)"
] | [
"0.63146746",
"0.60987276",
"0.6056158",
"0.60332364",
"0.6029894",
"0.60163057",
"0.59288514",
"0.59279585",
"0.58409476",
"0.5798788",
"0.5759058",
"0.5749839",
"0.5723557",
"0.5687024",
"0.56397504",
"0.5600619",
"0.55635214",
"0.556004",
"0.5554876",
"0.5525632",
"0.5519316",
"0.55033195",
"0.54968286",
"0.5495098",
"0.5467568",
"0.54656166",
"0.54533535",
"0.54185236",
"0.5413595",
"0.5384992"
] | 0.6559219 | 0 |
Register publisher on nameserver. This works for PUBSUB only | def register_publisher(self, hostname, expire=-1): | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def registerEvent(eventName, publisher, msgInterface, exclusive=FALSE):",
"def register_topic(self, name, command):\n topic_name = command['topic_name']\n try:\n topic_type = self.get_interface_type(command['interface_type'], '.msg')\n self.pubs[topic_name] = self.create_publisher(topic_type, topic_name, 1)\n except JoyTeleopException as e:\n self.get_logger().error(\n 'could not register topic for command {}: {}'.format(name, str(e)))",
"def setup_subscriber(publisher_address):\n print(\"Subscribing to server on {}\".format(publisher_address))\n context = zmq.Context()\n socket = context.socket(zmq.SUB)\n socket.connect(publisher_address)\n filter = \"\"\n # the following two lines are for Python2 compatability\n if isinstance(filter, bytes):\n filter = filter.decode(\"ascii\")\n socket.setsockopt_string(zmq.SUBSCRIBE, filter)\n return socket",
"def publishEvent(eventName,publisher, msg):",
"def _registerPublisher(self, callerId, topic, topicType, callerApi):\n if topic not in self.FilterPublishedTopic:\n self.__docWriter.addPub(callerId, topic, topicType)",
"def register(self, target, hostname, listener_type, expire=-1):",
"def register(self):\n self._log.debug(\"Registering Nsr op data path %s as publisher\",\n NsrOpDataDtsHandler.XPATH)\n\n hdl = rift.tasklets.DTS.RegistrationHandler()\n with self._dts.group_create() as group:\n self._regh = group.register(xpath=NsrOpDataDtsHandler.XPATH,\n handler=hdl,\n flags=rwdts.Flag.PUBLISHER | rwdts.Flag.NO_PREP_READ)",
"def publish():\n pass",
"def register_server(self, server_name):\n if server_name not in self.server_queues.keys():\n print(\"Server '{}' registered itself.\".format(server_name))\n else:\n print(\"Server '{}' re-registered itself.\".format(server_name))\n # Remove the old queue, just to be safe\n del self.server_queues[server_name]\n\n dq = DispatcherQueue()\n self.server_queues[server_name] = dq",
"def publisher(self, publisher):\n self._publisher = publisher",
"def publisher(self, iTag, msgType, addr):\r\n return ROSPublisher(self, iTag, msgType, addr)",
"def set_publisher (self, publisher):\n self.publisher = publisher",
"def _registerOnServer(self, daemon, nameserver,vclock):\n uri = daemon.register(self)\n nameserver.register(self._name, uri)\n self.updateVectorClock(vclock)\n print(\"Gateway registered. Name {} and uri {} \".format(self._name,uri))",
"def set_subscription(self, value):\n self.pub_socket.setsockopt(zmq.SUBSCRIBE, value)",
"def unregister_publisher(self, hostname):",
"def publishService(self, name, stype, port, domain=\"\", host=\"\"):\n if name in self.published:\n return\n\n if not self.bus:\n self.bus = dbus.SystemBus()\n\n server = dbus.Interface(\n self.bus.get_object(\n avahi.DBUS_NAME,\n avahi.DBUS_PATH_SERVER),\n avahi.DBUS_INTERFACE_SERVER)\n\n g = dbus.Interface(\n self.bus.get_object(avahi.DBUS_NAME,\n server.EntryGroupNew()),\n avahi.DBUS_INTERFACE_ENTRY_GROUP)\n\n g.AddService(avahi.IF_UNSPEC, avahi.PROTO_UNSPEC,dbus.UInt32(0),\n name, stype, domain, host,\n dbus.UInt16(port), \"\")\n\n g.Commit()\n self.published[name] = g",
"def on_publish(unused_client, unused_userdata, unused_mid):\n\tprint('on_publish')",
"def register_router(self, hostname, expire=-1):",
"def _create_pub(name, rostype, *args, **kwargs):\n # counting publisher instance per topic name\n if name in TopicBack.pub_instance_count.keys():\n TopicBack.pub_instance_count[name] += 1\n else:\n TopicBack.pub_instance_count[name] = 1\n\n return rospy.Publisher(name, rostype, *args, **kwargs)",
"async def register_pub_sub_proxies(self):\n for topic in self.published_topics_proxy:\n\n class PubSubProxy:\n\n def __init__(self, remote_session, topic):\n self._remote_session = remote_session\n self._topic = topic\n\n async def __call__(self, *args, **kwargs):\n self._remote_session.publish(self._topic, *args, **kwargs)\n\n # subscribe to the local topics published so they can be \"re-published\" on the remote session\n await self.local_session.subscribe(PubSubProxy(self.remote_session, topic), topic)",
"def publisher(self, publisher):\r\n return publishers.Publisher(self, publisher)",
"def register (method, event):\n Publisher.subscribe (method, event)",
"def on_publish(client, userdata, mid):\n print(\"Message Published.\")",
"def _create_subscriber(self, topic_name):\n if self._sub:\n self._sub.unregister()\n self._sub = rospy.Subscriber(topic_name, Image, self._image_callback)\n rospy.loginfo(\"Listening to %s -- spinning ..\" % self._sub.name)\n self._widget.setWindowTitle(\"Label plugin, listening to (%s)\" % self._sub.name)",
"def on_publish( client, userdata, mid ):\n logging.info( \"Data published successfully.\" )",
"def create_subscriber():\n rospy.init_node(\"hello_world_sub_node\")\n rospy.Subscriber(\"hello_world\", String, process_hello_world_msg)",
"def registerServer(srv):\n srv.setListenAddress(hostname)\n srv.setMachine(getMBean('/Machines/'+machineName))",
"def test_registration(self):\n models = [BlogEntry, BlogRoll]\n pubsub.register(models)\n self.assertTrue(set(models).issubset(pubsub.registry))",
"def set_pub_sub(self):\n\n # Set trap check service client\n self.trap_cheq_srv = rospy.ServiceProxy(\"check_for_trap\", TrapCheck)\n\n # Set mix initiave controller output\n self.mix_cmd_pub = rospy.Publisher(\"mix_cmd\", Bool, queue_size=50)\n\n # Set agent TS state subscriber\n rospy.Subscriber(\"ts_state\", TransitionSystemStateStamped, self.ts_state_callback, queue_size=50)\n\n # Set human input planner\n rospy.Subscriber(\"key_cmd\", Bool, self.teleop_cmd_callback, queue_size=50)\n\n # Set planner input subscriber\n rospy.Subscriber(\"planner_cmd\", Bool, self.planner_cmd_callback, queue_size=50)",
"def register_pep(self, name, stanza):\n pubsub_stanza = self.xmpp['xep_0060'].stanza\n register_stanza_plugin(pubsub_stanza.EventItem, stanza)\n\n self.add_interest(stanza.namespace)\n self.xmpp['xep_0030'].add_feature(stanza.namespace)\n self.xmpp['xep_0060'].map_node_event(stanza.namespace, name)"
] | [
"0.6365265",
"0.62822574",
"0.6280103",
"0.62570566",
"0.6152319",
"0.60816693",
"0.6038326",
"0.6014652",
"0.5980777",
"0.58707476",
"0.582609",
"0.58243114",
"0.5799215",
"0.5795977",
"0.5795272",
"0.57344294",
"0.5730708",
"0.5672222",
"0.56624746",
"0.5653363",
"0.56333864",
"0.55717725",
"0.55689114",
"0.55281824",
"0.5501058",
"0.5478911",
"0.54678285",
"0.5452631",
"0.54216903",
"0.54194856"
] | 0.7828177 | 0 |
Unregister publisher on nameserver. This works for PUBSUB only | def unregister_publisher(self, hostname): | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def unregister(self):\n self._executor.unregister_publisher(self)",
"def unregisterEvent(eventName, publisher):",
"def unregister(self):\n if self.hub.is_connected:\n self._is_registered = False\n self.hub.unregister(self._private_key)\n self._hub_id = None\n self._public_id = None\n self._private_key = None\n else:\n raise SAMPClientError(\n \"Unable to unregister from the SAMP Hub. Hub proxy not connected.\"\n )",
"def unpublishService(self, name):\n self.published[name].Reset()\n del self.published[name]",
"def unregister(self, target, hostname, listener_type):",
"def unsubscribe(receiver):",
"def unsubscribe(receiver):",
"def unsubscribe(receiver):",
"def unsubscribe(receiver):",
"def unsubscribe(receiver):",
"def _remove_pub(pub):\n # counting publisher instance per topic name\n TopicBack.pub_instance_count[pub.name] -= 1\n\n # Be aware of https://github.com/ros/ros_comm/issues/111\n return pub.unregister()",
"def unregister_router(self, hostname):",
"def unsubscribe(self):\r\n self._unregister()",
"def unsubscribeFromEvent(eventName,subscriber):",
"def unsubscribe(self, tag):\n self.socket.setsockopt(constants.UNSUBSCRIBE, tag)",
"def unlisten(cls, name: str):\r\n cls.Unlisten(name)",
"def unlisten(self, prefix: str) -> None:\n assert len(prefix) == 1\n del self.queues[prefix]\n self.logger.info(\"No longer polling for message type: %s\", prefix)",
"def unsubscribe(self, item_name):\n self.subscribed = None",
"def unregister_server():\n (code, message) = rest_api.unregister_server(request)\n if (code == 200):\n return message\n else:\n abort(code)",
"def unregisterProducer():",
"def unregister(self, pollster):\n pollster.unregister(self._router_socket)",
"def _remove_sub(sub):\n # counting publisher instance per topic name\n TopicBack.sub_instance_count[sub.name] -= 1\n\n # Be aware of https://github.com/ros/ros_comm/issues/111\n return sub.unregister()",
"async def unregister(websocket):\n app['websockets'].discard(websocket)\n await notify_users()",
"def unregister(self, name):\r\n raise NotImplementedError",
"def unsubscribe(self):\n pass # pragma: no cover",
"def unsubscribe(self):\n if self._subscribed and self._connected:\n try:\n msg = self._create_message(strings.UNSUB_MSG)\n self.write(msg)\n except (OSError, KeyError) as ex:\n _LOGGER.error(\n \"PyISY encountered a socket error while writing unsubscribe message to the socket: %s.\",\n ex,\n )\n self._subscribed = False\n self.disconnect()",
"def unsubscribe(self, chanel_name):\n name = 'unsubscribe'\n\n self._send_websocket_request(name, chanel_name)",
"def unregister(self):\n assert self.state == State.SHUTDOWN\n del self._proto[self.dest_addr]",
"def remove_subscriber(self, subscriber):\n try:\n self.subscribers.remove(subscriber)\n # Silenty ignore of the subscriber was not registered\n except ValueError:\n pass",
"def unsubscribe(callback):\n if callback in _subscribers:\n del _subscribers[callback]"
] | [
"0.7360216",
"0.7246476",
"0.6685116",
"0.66366076",
"0.6556128",
"0.64936596",
"0.64936596",
"0.64936596",
"0.64936596",
"0.64936596",
"0.6339355",
"0.6295946",
"0.6200138",
"0.61934596",
"0.6149258",
"0.61001545",
"0.60975444",
"0.60746765",
"0.60650426",
"0.6057191",
"0.6052558",
"0.60420954",
"0.603342",
"0.6032517",
"0.60060453",
"0.5974066",
"0.5927142",
"0.59148085",
"0.5887629",
"0.5873658"
] | 0.84743935 | 0 |
Register router on the nameserver. This works for ROUTER proxy only | def register_router(self, hostname, expire=-1): | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def register_router(self, router):\n for prefix, viewset, basename in router.registry:\n self.register(prefix, viewset, base_name=basename)",
"def _registerOnServer(self, daemon, nameserver,vclock):\n uri = daemon.register(self)\n nameserver.register(self._name, uri)\n self.updateVectorClock(vclock)\n print(\"Gateway registered. Name {} and uri {} \".format(self._name,uri))",
"def register(name, func):\n WebSocketRouter.funcmap[name] = func",
"def add_router(self):\n router = OrderedDict({self.router_name: {\n 'type': 'OS::Neutron::Router',\n 'properties': {\n 'name': self.router_name,\n 'external_gateway_info': {\n 'network': { 'get_param': 'public_net' }\n }\n }\n }})\n self.template['resources'].update(router)",
"def add_new(self, name):\n if name not in self.routers:\n self.routers[name] = Router(name)\n return True\n return False",
"def registerServer(srv):\n srv.setListenAddress(hostname)\n srv.setMachine(getMBean('/Machines/'+machineName))",
"def extend(self, router):\n self.registry.extend(router.registry)",
"def register(self):\n # self.register_route(\"GET\", self.__route, lambda req, res: self.status(req, res))\n self.register_route(\"GET\", self.__route, None, self.status)",
"def mountRouterOnly(self, router):\n pass",
"def __init__(self, router):\n\n self.router = router",
"def create_router(self, router_name=\"test_router\"):\n LOG_OBJ.debug(\n \"Creating router in tenant %s\" %\n self.project_info[\"project_id\"])\n\n _url = \"http://\" + self.host_ip + \":9696/v2.0/routers.json\"\n _headers = {'Content-type': 'application/json',\n 'x-auth-token': self.project_info[\"token_project\"]}\n\n _router_info = {\n \"router\": {\n \"tenant_id\": self.project_info[\"project_id\"],\n \"name\": router_name,\n \"admin_state_up\": True}}\n\n _body = json.dumps(_router_info)\n\n response = self.request(\"POST\", _url, _headers, _body)\n if response is None:\n LOG_OBJ.error(\"No response from Server\")\n return\n if response.status not in [200, 201, 202, 203, 204]:\n LOG_OBJ.error(\"Create router Failed with status %s \" %\n response.status)\n return response.status\n\n output = json.loads(response.data)\n LOG_OBJ.info(\"Router Details : %s \" % output)\n\n return output['router']['id']",
"def __init__(self, router):\n self._router = router",
"def register_server():\n (code, message) = rest_api.register_server(request)\n if (code == 200):\n return message\n else:\n abort(code)",
"def router(paramstring):\r\n channels()",
"def mvcRouter(self, router):\n pass",
"def create_router(self, body=None):\r\n return self.post(self.routers_path, body=body)",
"def mountRouter(self, predicate, router):\n pass",
"def mountRouterPath(self, path, router):\n pass",
"def register_server(self, server_name):\n if server_name not in self.server_queues.keys():\n print(\"Server '{}' registered itself.\".format(server_name))\n else:\n print(\"Server '{}' re-registered itself.\".format(server_name))\n # Remove the old queue, just to be safe\n del self.server_queues[server_name]\n\n dq = DispatcherQueue()\n self.server_queues[server_name] = dq",
"def unregister_router(self, hostname):",
"def add_interface_router(self, router, body=None):\r\n return self.put((self.router_path % router) + \"/add_router_interface\",\r\n body=body)",
"def register_route(self, route, app):\n assert route not in self.routes\n self.routes[route] = app",
"def add_gateway_router(self, router, body=None):\r\n return self.put((self.router_path % router),\r\n body={'router': {'external_gateway_info': body}})",
"def register(self):\n for _, member in inspect.getmembers(self):\n if isinstance(member, Route):\n member.set_parent(self)\n member.register(self.core)",
"def _serve(self) -> None:\n for instrument in self._config[\"instruments\"]:\n uri = self._daemon.register(instrument, objectId=str(instrument))\n self._services[instrument.id] = str(uri)\n logger.success(f\"Registered {instrument} at {uri}\")\n self.uri = self._daemon.register(self, objectId=self.servername)\n logger.success(f\"Registered self at {self.uri}\")",
"def registerExistingServer():\n cd('/')\n cd('/Servers/'+managedServername)\n registerServer(cmo)",
"def register_proxy(self, proxy):\n self.__proxy = proxy",
"def RemoteRouter(services):\n return PublicController(services)",
"def add_routes(self):\n pass",
"def register_routes(self):\n @inlineCallbacks\n def registered(response):\n if response.code != 200:\n text = yield response.text()\n self._env.logger.error('{} {}'.format(response.code, text))\n\n try:\n api_register = '{}://{}:{}/api/1.0.0/register'.format(\n self._env.api_protocol,\n self._env.api_host,\n self._env.api_port\n )\n remote_ms = self._env.get('remote_ms', None)\n\n for path in self._env.swagger.paths:\n uri = self._env.swagger.base + path.split('{')[0].rstrip('/')\n if remote_ms:\n route = {\n 'protocol': 'https',\n 'host': remote_ms,\n 'port': 443,\n }\n else:\n if self._env.get('flask_private'):\n route = {\n 'protocol': self._env.get('flask_protocol'),\n 'host': self._env.get('flask_host'),\n 'port': self._env.get('flask_port'),\n }\n else:\n route = {\n 'protocol': self._env.flask_protocol,\n 'host': self._env.flask_host,\n 'port': self._env.flask_port,\n }\n route = dict(route, **{'uri': uri, 'key': self._key})\n #self._env.logger.info('Route> {}'.format(str(route)))\n treq.post(api_register, data={'details': dumps(route)}).addCallback(registered)\n\n swagger_paths = ['/ui/css', '/ui/lib', '/ui/images', '/swagger.json']\n ui = '/' + self._env.get('swagger_ui', 'ui')+'/'\n swagger_paths.append(ui)\n\n for path in swagger_paths:\n uri = self._env.swagger.base\n if len(uri):\n if uri[-1] == '/':\n uri = uri[:-1]\n uri += path\n if self._env.get('flask_private'):\n route = {\n 'protocol': self._env.get('flask_protocol'),\n 'host': self._env.get('flask_host'),\n 'port': self._env.get('flask_port'),\n 'uri': uri,\n 'key': self._key,\n 'ui': path == ui,\n 'name': self._env.get('my_name', 'no local name', 'microservice')\n }\n else:\n route = {\n 'protocol': self._env.flask_protocol,\n 'host': self._env.flask_host,\n 'port': self._env.flask_port,\n 'uri': uri,\n 'key': self._key,\n 'ui': path == ui,\n 'name': self._env.get('my_name', 'no local name', 'microservice')\n }\n treq.post(api_register, data={'details': dumps(route)}).addCallback(registered)\n\n return True\n except Exception as e:\n self._env.logger.error('error registering routes \"{}\"'.format(str(e)))"
] | [
"0.69818735",
"0.6787208",
"0.62229717",
"0.6108167",
"0.60634494",
"0.6024237",
"0.5897947",
"0.5891949",
"0.57765454",
"0.5680079",
"0.56769615",
"0.5665641",
"0.5664951",
"0.5652085",
"0.5566623",
"0.5562512",
"0.553475",
"0.5525856",
"0.5513387",
"0.54793245",
"0.54617864",
"0.54565257",
"0.53972936",
"0.53816843",
"0.53641814",
"0.5363288",
"0.53517467",
"0.53466105",
"0.53459823",
"0.5319597"
] | 0.74578744 | 0 |
Unregister router on the nameserver. This works for ROUTER proxy only | def unregister_router(self, hostname): | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def unregister(self, pollster):\n pollster.unregister(self._router_socket)",
"def unregister_server():\n (code, message) = rest_api.unregister_server(request)\n if (code == 200):\n return message\n else:\n abort(code)",
"def _unregister_from_server(self):\n self.remote_controller.unregister()",
"def delete_router(self, router):\r\n return self.delete(self.router_path % (router))",
"def unregister(self, target, hostname, listener_type):",
"def remove_gateway_router(self, router):\r\n return self.put((self.router_path % router),\r\n body={'router': {'external_gateway_info': {}}})",
"async def deregister_route(self, handle: str) -> None:\n await self.AD.http.deregister_route(handle, self.name)",
"def unregister_server(self, request):\n\n name = request.form['name']\n token = request.form['token']\n\n rest_client = RestClient.instance()\n user_info = rest_client.user_info(token)\n\n if (user_info is None):\n return (401, 'Unauthorized')\n\n try:\n removed = GameServers.instance().unregister_server(\n name, user_info.get('username'))\n if (not removed):\n return (403, 'Forbidden')\n except:\n pass\n return (200, 'OK')",
"def unlisten(cls, name: str):\r\n cls.Unlisten(name)",
"def _stopping(self, sender, **kwargs):\n for v in self._platform_connections.values():\n v.kill()\n\n self._platform_connections.clear()\n\n self.vip.rpc.call(MASTER_WEB, 'unregister_all_agent_routes',\n self.core.identity).get(timeout=30)",
"def unregister(self):\n if self.hub.is_connected:\n self._is_registered = False\n self.hub.unregister(self._private_key)\n self._hub_id = None\n self._public_id = None\n self._private_key = None\n else:\n raise SAMPClientError(\n \"Unable to unregister from the SAMP Hub. Hub proxy not connected.\"\n )",
"def disable(self):\n self.registrar.unregister_service(\"map\", namespace=__name__)\n self.registrar.unregister_service(\"directions\", namespace=__name__)",
"def removeProxyManagerConnection(address=None):\n global __mgr_cache__\n #: :type: ProxyManager\n if hasattr(__mgr_cache__[address], 'shutdown'):\n __mgr_cache__[address].shutdown()\n del __mgr_cache__[address]",
"def remote_destroy(self):\r\n if self._receivers:\r\n for interface in reduce(set.union, self._receivers.itervalues()):\r\n interface.unregisterProtocol(self)\r\n\r\n self._receivers = None\r\n\r\n if self._endpoint:\r\n self._endpoint.unregisterProtocol(self)\r\n self._endpoint = None",
"async def unregister(websocket):\n app['websockets'].discard(websocket)\n await notify_users()",
"def destroyTunnel(self, name, targetIP):\r\n return self._ref.callRemote('destroyTunnel', name, targetIP)",
"def unregister(self):\r\n self._unregister()",
"def unregister(self):\n assert self.state == State.SHUTDOWN\n del self._proto[self.dest_addr]",
"def remove_interface_router(self, router, body=None):\r\n return self.put((self.router_path % router) +\r\n \"/remove_router_interface\", body=body)",
"def unregister(self, dbus_path):\n self._media_proxy.proxy.UnregisterEndpoint(dbus_path)",
"def router_gateway_clear(mgr_or_client, router_id, *args, **kwargs):\n net_client = _g_router_client(mgr_or_client)\n router = router_show(mgr_or_client, router_id)\n router_id = router['id']\n body = net_client.update_router(router_id,\n external_gateway_info=dict())\n return body['router']",
"def _delete_router_port(self, method, api, header, data):\n self._execute_api(method, api, header, data)",
"def unregister_publisher(self, hostname):",
"def terminate(self):\n set_sysctl(self, 'net.ipv4.ip_forward', 0)\n set_sysctl(self, 'net.ipv6.conf.all.forwarding', 0)\n super(LinuxRouter, self).terminate()",
"def destroy(self):\r\n # TODO: WHY ???\r\n if not self._endpoint:\r\n return\r\n\r\n self._endpoint.unregisterProtocol(self)\r\n self._endpoint = None\r\n\r\n # Endpoint should destroy all connections\r\n assert len(self._connections) == 0\r\n\r\n super(Protocol, self).destroy()",
"def unregister(self, name):\r\n raise NotImplementedError",
"async def deregister_endpoint(self, handle: str) -> None:\n await self.AD.http.deregister_endpoint(handle, self.name)",
"def unregister(url):\n return Client.get_client().unregister(url)",
"def stop_server(self, server, name):\n # Spin down the requested server\n server.stop()",
"def delete(self, oid):\n path = '%s/routers/%s' % (self.ver, oid)\n res = self.client.call(path, 'DELETE', data='', \n token=self.manager.identity.token)\n self.logger.debug('Delete openstack router: %s' % truncate(res))\n return res[0]"
] | [
"0.7407919",
"0.6835906",
"0.6531206",
"0.6473747",
"0.6360587",
"0.6355955",
"0.62724245",
"0.6237078",
"0.62182945",
"0.6144307",
"0.61080134",
"0.6107907",
"0.60263836",
"0.60021615",
"0.59701866",
"0.5969365",
"0.5967524",
"0.5956805",
"0.59442115",
"0.59275377",
"0.5907844",
"0.5890464",
"0.5866178",
"0.58628505",
"0.5825473",
"0.582434",
"0.5747195",
"0.5720289",
"0.56991136",
"0.5687797"
] | 0.84506893 | 0 |
Register target on nameserver. If record already exists and has expiration timeout it will be updated. Existing records without timeout will stay untouched | def register(self, target, hostname, listener_type, expire=-1): | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def register_router(self, hostname, expire=-1):",
"def touch_member(self, data, ttl=None, permanent=False):",
"def register_publisher(self, hostname, expire=-1):",
"def _set_target_info_by_name(self, targets, port, target_name, iqn):\n host_iqn_registered_in_target = (\n self._get_host_iqn_registered_in_target_by_name(\n port, target_name, iqn))\n if host_iqn_registered_in_target:\n gid = host_iqn_registered_in_target['hostGroupNumber']\n storage_iqn = self.client.get_host_grp(port, gid)['iscsiName']\n targets['info'][port] = True\n targets['list'].append((port, gid))\n targets['iqns'][(port, gid)] = storage_iqn\n return True\n return False",
"def update(self, *args):\n\n gmtexpires = None\n (name, ip, expires) = args[:3]\n\n for arg in args:\n if arg.lower().startswith('expires='):\n gmtexpires = arg[8:]\n\n if gmtexpires is None:\n if len(args) == 3:\n gmtexpires = expires\n else:\n if args[2] == 'NEVER':\n gmtexpires = args[2]\n else:\n gmtexpires = args[3]\n\n self.name = name # \"www.example.com\"\n self.ip = maybe_ip_addr(ip) # IPV4Address instance, or string\n\n if self.ip == '<error>':\n self._expire()\n return\n\n fmt = \"%Y-%m-%d %H:%M:%S\"\n\n # if we already have expiry times, etc then we want to\n # properly delay our timeout\n\n oldexpires = self.expires\n\n if gmtexpires.upper() == 'NEVER':\n # FIXME can I just select a date 100 years in the future instead?\n self.expires = None\n else:\n self.expires = datetime.datetime.strptime(gmtexpires, fmt)\n self.created = datetime.datetime.utcnow()\n\n if self.expires is not None:\n if oldexpires is None:\n if self.expires <= self.created:\n diff = datetime.timedelta(seconds=0)\n else:\n diff = self.expires - self.created\n self.expiry = self.map.scheduler.callLater(diff.seconds,\n self._expire)\n\n else:\n diff = self.expires - oldexpires\n self.expiry.delay(diff.seconds)",
"def registerExistingServer():\n cd('/')\n cd('/Servers/'+managedServername)\n registerServer(cmo)",
"def _set_delete_timer(self, key_name, timeout):\n if key_name is not None:\n #print(\"(%d) _set_delete_timer:\" % int(time.time()), key_name.hex()[:10], timeout)\n query_management.QueryEntry(expire_after=timeout, callback_expire=remove_old_key,\n data={KeyType.hint: key_name}, retry_count=0)",
"def register(self, service_name, service_addr, service_ttl):\n raise NotImplementedError",
"def sync_dns(self,):\n\n for server_name, server_ip in self.get_instances():\n self.dnsmanager.ensure_a_record(server_name, server_ip)",
"def test_updatednsrecord(kasserver, kasapi):\n kasserver.add_dns_record(\"test.example.com\", \"CNAME\", \"www.example2.com\")\n assert kasapi.requests_contains(\"update_dns_settings\")",
"def set_ttl(self, ttl):",
"def save(subdomain):\n\tglobal Counter\n\tif db.session.query(Target.id).filter_by(subdomain=subdomain).scalar() is None :\n\t\tdb.session.add(Target(subdomain,str(uuid.uuid4())))\n\t\tdb.session.commit()\n\t\tCounter += 1\n\t\tlogger.log('INFO',f'[+] {subdomain} added to database')\n\telse:\n\t\tlogger.log('ERROR',f'[-] {subdomain} already exists')",
"def _registerOnServer(self, daemon, nameserver,vclock):\n uri = daemon.register(self)\n nameserver.register(self._name, uri)\n self.updateVectorClock(vclock)\n print(\"Gateway registered. Name {} and uri {} \".format(self._name,uri))",
"def add_node(self, nodename: str, timeout: int=3600):\n if nodename not in self._d:\n self._d[nodename] = ({}, timeout)\n else:\n self._d[nodename] = (self._d[nodename][0], timeout)",
"def register(url, interval=300):\n return Client.get_client().register(url, interval=interval)",
"def regen_id(self):\n data = self.todict()\n Slate.expire(self)\n self._test_id()\n self.update(data)\n self._update_cookie = True",
"def _extend_expiration_time(self, fname=None):\n if fname is None:\n fname = self.uniquefile\n future = datetime.now() + timedelta(seconds=self._lockduration)\n expires = time.mktime(future.timetuple())\n try:\n os.utime(fname, (expires, expires))\n except OSError as e:\n if e.errno not in self.NOT_EXIST_ERRORS:\n raise",
"def add_target(self, target):\n\n # pass specified target parameters to the PED-RPC server.\n target.pedrpc_connect()\n target.set_fuzz_data_logger(fuzz_data_logger=self._fuzz_data_logger)\n\n # add target to internal list.\n self.targets.append(target)",
"async def _register(self, name, source):\n self._last[name] = {}\n\n self._srcTaskList[name] = asyncio.create_task(\n self._monitor(name, source)\n )",
"def upsert_record(route53_zone, record_name, ip):\n\n # Only upsert the dns record if it doesn't resolve to us.\n try:\n record_ip = socket.gethostbyname(record_name)\n except socket.error:\n # Ignore if we can't connect to the host\n pass\n else:\n if ip == record_ip:\n return\n\n print str(dt.now()), \"Registering host as\", record_name\n record = route53_zone.get_a(record_name)\n\n if record and ip not in record.resource_records:\n route53_zone.update_a(record_name, ip)\n elif not record:\n route53_zone.add_a(record_name, ip)",
"def target_name(self, target_name):\n\n self._target_name = target_name",
"def target_name(self, target_name):\n\n self._target_name = target_name",
"async def store(self, api_name: str, schema: Dict, ttl_seconds: int):\n raise NotImplementedError()",
"def register_device(self, expiry: int) -> str:\n # pylint: disable=c0103\n sr = self._id_scope + \"%2Fregistrations%2F\" + self._device_id\n sig_no_encode = DeviceRegistration.compute_derived_symmetric_key(self._key, sr + \"\\n\" + str(expiry))\n sig_encoded = parse.quote(sig_no_encode, \"~()*!.'\")\n auth_string = \"SharedAccessSignature sr=\" + sr + \"&sig=\" + sig_encoded + \"&se=\" + str(expiry) + \"&skn=registration\"\n\n headers = {\n \"content-type\": \"application/json; charset=utf-8\",\n \"user-agent\": \"iot-central-client/1.0\",\n \"Accept\": \"*/*\",\n }\n\n if auth_string is not None:\n headers[\"authorization\"] = auth_string\n\n body = {\"registrationId\": self._device_id}\n\n uri = \"https://%s/%s/registrations/%s/register?api-version=%s\" % (\n self._dps_endpoint,\n self._id_scope,\n self._device_id,\n self._dps_api_version,\n )\n target = parse.urlparse(uri)\n\n self._logger.info(\"Connecting...\")\n self._logger.info(\"URL: \" + target.geturl())\n self._logger.info(\"body: \" + json.dumps(body))\n print(\"headers: \" + json.dumps(headers))\n\n response = self.__run_put_request_with_retry(target.geturl(), body, headers)\n\n data = None\n try:\n data = response.json()\n except Exception as e:\n err = \"ERROR: non JSON is received from \" + self._dps_endpoint + \" => \" + str(response) + \" .. message : \" + str(e)\n self._logger.error(err)\n raise DeviceRegistrationError(err)\n\n if \"errorCode\" in data:\n err = \"DPS => \" + str(data)\n self._logger.error(err)\n raise DeviceRegistrationError(err)\n\n time.sleep(1)\n return self._loop_assign(data[\"operationId\"], headers)",
"def _expire(self):\n del self.map.addr[self.name]\n self.map.notify(\"addrmap_expired\", *[self.name], **{})",
"def test_ttl_included_on_create(self):\r\n with mock.patch.object(ConnectionPool, 'execute') as m:\r\n TestTTLModel.ttl(60).create(text=\"hello blake\")\r\n\r\n query = m.call_args[0][0]\r\n self.assertIn(\"USING TTL\", query)",
"def add_target(self, target, ra, dec, uid, dmax=10):\n\n if target in self.lnames:\n raise hcam.HipercamError(\n 'Target = {target} name = {name} already has an entry'\n )\n\n if uid in self:\n entry = self[uid]\n ora, odec = entry['ra'], entry['dec']\n dist = np.sqrt((15*(ra-ora))**2+(np.cos(np.radians(dec))*(dec-odec))**2)\n if 3600*dist > dmax:\n raise hcam.HipercamError(\n f'Target id = {uid} already entered but the new position is {3600*dist}\" (>{dmax}) way from first position'\n )\n\n # add target to the list of names associated with ID\n self[uid]['names'].append(target)\n\n else:\n # new entry\n self[uid] = {'ra' : ra, 'dec' : dec, 'names' : [target]}\n\n # ensure target maps to the id\n self.lnames[target] = uid",
"def set(self, key, value, ttl=0):\n pass",
"def register_endpoint(self, **kwargs):\n self._database.update('endpoint', kwargs, kwargs, upsert=True)",
"def register(\n self, name, service_id=None, port=None,\n tags=None, check=None, interval=None, ttl=None):\n\n payload = {\n 'id': service_id,\n 'name': name,\n 'port': port,\n 'tags': tags,\n 'check': {\n 'script': check,\n 'interval': interval,\n 'ttl': ttl, }}\n\n return self.agent.http.put(\n lambda x: x.code == 200,\n '/v1/agent/service/register',\n data=json.dumps(payload))"
] | [
"0.59071237",
"0.57393044",
"0.56628335",
"0.54713863",
"0.5383242",
"0.5357558",
"0.5356139",
"0.5307397",
"0.5276759",
"0.52725244",
"0.52632517",
"0.523391",
"0.520452",
"0.51832664",
"0.51574975",
"0.51390576",
"0.5082721",
"0.5064394",
"0.50146466",
"0.50041485",
"0.4989615",
"0.4989615",
"0.49835986",
"0.4982552",
"0.4895431",
"0.48950756",
"0.48709083",
"0.4861517",
"0.48526382",
"0.48438936"
] | 0.7020003 | 0 |
Unregister target from nameserver. | def unregister(self, target, hostname, listener_type): | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def unregister(target: str) -> bool:\n ...",
"def _unregister_from_server(self):\n self.remote_controller.unregister()",
"def unregister_server(self, request):\n\n name = request.form['name']\n token = request.form['token']\n\n rest_client = RestClient.instance()\n user_info = rest_client.user_info(token)\n\n if (user_info is None):\n return (401, 'Unauthorized')\n\n try:\n removed = GameServers.instance().unregister_server(\n name, user_info.get('username'))\n if (not removed):\n return (403, 'Forbidden')\n except:\n pass\n return (200, 'OK')",
"def unregister(self):\n assert self.state == State.SHUTDOWN\n del self._proto[self.dest_addr]",
"def unregister_router(self, hostname):",
"def unlisten(cls, name: str):\r\n cls.Unlisten(name)",
"def unregister(url):\n return Client.get_client().unregister(url)",
"def unregister_server():\n (code, message) = rest_api.unregister_server(request)\n if (code == 200):\n return message\n else:\n abort(code)",
"def unregister(self):\r\n self._unregister()",
"def unregister(self, name):\r\n raise NotImplementedError",
"def unregister(self, service_name, service_addr):\n raise NotImplementedError",
"def stop_server(self, server, name):\n # Spin down the requested server\n server.stop()",
"def destroyTunnel(self, name, targetIP):\r\n return self._ref.callRemote('destroyTunnel', name, targetIP)",
"def unregister(self):\n idaapi.unregister_action(self.get_name())",
"def remote_destroyTunnel(self, name, targetIP):\r\n if name not in self._bridges:\r\n raise InternalError('Bridge does not exist.')\r\n\r\n key = (name, targetIP)\r\n\r\n if key not in self._uid:\r\n raise InternalError('Tunnel deos not exist.')\r\n\r\n return execute(('/usr/bin/ovs-vsctl', 'del-port',\r\n 'gre-{0}'.format(self._uid.pop(key))),\r\n reactor=self._reactor)",
"def unregister_publisher(self, hostname):",
"def remove_target(self, target):\n # type: (LoadBalancerTarget) -> List[BoundAction]\n return self._client.remove_target(self, target)",
"def unregister(self, pollster):\n pollster.unregister(self._router_socket)",
"def remove_target(self, target):\r\n if target in self.target_roots:\r\n self.target_roots.remove(target)\r\n self._targets.discard(target)",
"def DeleteTarget(self, target_instance_id):",
"def unregister_service(self, name):\n self._services.remove(name)",
"async def deregister_endpoint(self, handle: str) -> None:\n await self.AD.http.deregister_endpoint(handle, self.name)",
"def unregister(self):\n if self.hub.is_connected:\n self._is_registered = False\n self.hub.unregister(self._private_key)\n self._hub_id = None\n self._public_id = None\n self._private_key = None\n else:\n raise SAMPClientError(\n \"Unable to unregister from the SAMP Hub. Hub proxy not connected.\"\n )",
"def cmd_unregister(self, app_name=None):\n rc = self.socket_command_with_project('unregister', app_name)\n return rc",
"def stop(self, name=None):\n server = self.cloudman.get_server(name)['id']\n r = self.cloudman.compute.stop_server(server)\n return r",
"def unsubscribe(self):\r\n self._unregister()",
"def delete_server(ServerName=None):\n pass",
"def unregister(self, dbus_path):\n self._media_proxy.proxy.UnregisterEndpoint(dbus_path)",
"def deregister_instance(InstanceId=None):\n pass",
"def stop(self, context):\n # Unregister the service\n self.__registration.unregister()\n self.__registration = None"
] | [
"0.7585173",
"0.68528515",
"0.6826376",
"0.68002725",
"0.67341775",
"0.6664987",
"0.6569992",
"0.6551973",
"0.6536063",
"0.65019625",
"0.64468646",
"0.6361532",
"0.63340855",
"0.6244407",
"0.6225373",
"0.6179231",
"0.6150409",
"0.6149765",
"0.6141956",
"0.6136862",
"0.61327934",
"0.6112071",
"0.6100557",
"0.6097349",
"0.60632557",
"0.6058105",
"0.60032856",
"0.5968318",
"0.59670043",
"0.5952486"
] | 0.7942518 | 0 |
Get all hosts from nameserver by target. | def get_hosts(self, target, listener_type): | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def getHosts(self):\n raise \"not implemented\"",
"def get_hosts(self):\n\n raise NotImplementedError",
"def all_hosts(self):\n ...",
"def get_list_hosts(self, path, params):\n eth_src = params.get('eth_src')\n host = self._extract_url_base(path)\n reply = self._faucet_collector.get_list_hosts(host, eth_src)\n self._augment_state_reply(reply, path)\n return reply",
"def getHosts(**options):\n return search.HostSearch.byOptions(**options)",
"def get_allhosts():\n connection, tablename = HomeNetwork.get_connection_info()\n query = 'SELECT hostname from {}'.format(tablename)\n output = pandas.read_sql_query(query, connection).to_json(orient='records')\n\n for host in json.loads(output):\n yield host[\"hostname\"]",
"def _target_hosts(self, paths):\n for path in paths:\n response = self.api_client.get(path)\n self.assertHttpOK(response)\n content = json.loads(response.content)\n (volume_node,) = content[\"volume\"][\"volume_nodes\"]\n yield volume_node[\"host_label\"]",
"def iter_hosts():\n environmentdef = _get_environmentdef()\n\n for host in environmentdef.hosts():\n # fabric needs the host if we're calling from main()\n with this_hostname(host.host):\n yield host",
"def list(self, **kwargs):\n\n return self.getResourceManager() \\\n .getSdk() \\\n .hosts \\\n .list(**kwargs)",
"def list_hosts():\n task_run(\"/bin/hostname -f\",RING_1_dev__allnodes)",
"def get_all_hosts_checkmk():\n\n checkmk_api_url = config['checkmk_api_url']\n\n req_params = { 'action': 'get_all_hosts',\n '_username': config['checkmk_api_username'],\n '_secret': config['checkmk_api_secret'],\n 'effective_attributes': '1',\n 'output_format': 'json' }\n r = requests.post(checkmk_api_url, req_params)\n\n hosts = {}\n for host in r.json()['result'].items():\n hostname = host[0]\n hostlabels = host[1]['attributes']['labels']\n hosts[hostname] = hostlabels\n\n logging.info('got %s hosts from checkmk', len(hosts))\n\n return hosts",
"def get_all_hosts(self, view='summary'):\n return self._get(endpoint='{}/hosts'.format(self.api_version),\n params=dict(view=view)).json()",
"def handle_args(args: Namespace) -> list:\n # If no targets provided, assume were finding them on network.\n # Once we have targets, if no test given, port/service scan them.\n if not args.target:\n low(\"Target not supplied, running host scan.\")\n hosts = get_hosts(verify_subnet(args.subnet))\n else:\n low(\"Target supplied: {}\".format(args.target))\n hosts = [Host(host) for host in args.target]\n\n if args.user and args.passwd:\n low(\"Username and Password supplied for tests, {}:{}\".format(args.user, args.passwd))\n for host in hosts:\n host.credentials = {'user': args.user, 'passwd': args.passwd}\n\n return hosts",
"def hosts(self) -> List[str]:\n if self.head_host:\n return [self.head_host]\n else:\n return [replica.host for replica in self.pod_args['pods'][0]]",
"def get_hosts(self):\n\n hosts = self.client.service.getHosts()\n return hosts",
"def get_all_hosts(self, view='summary'):\n return self.api_client.get_all_hosts(view=view)['items']",
"def all_hosts(*args, **kwargs):\n return True",
"def select_host_ids():\n return IMPL.select_host_ids()",
"def get_hosts_fanout(self, target, listener_type):",
"def alias_all(self, host_names, target, raise_on_not_found=True):\n self.set_all(host_names, self.get_one(target, raise_on_not_found))",
"def hostnames(self) -> Sequence[str]:\n return pulumi.get(self, \"hostnames\")",
"def host_list(call=None):\n if call == \"action\":\n raise SaltCloudSystemExit(\n \"This function must be called with -f, --function argument.\"\n )\n ret = {}\n session = _get_session()\n hosts = session.xenapi.host.get_all()\n for host in hosts:\n host_record = session.xenapi.host.get_record(host)\n ret[host_record[\"name_label\"]] = host_record\n return ret",
"def get_hosts(self):\n if self._scanned:\n return self._scanner.all_hosts()\n else:\n raise ScannerError(\"ERROR: A scan has not yet been conducted!\")",
"def hosts(self):\n return self._hosts",
"def hosts(self):\n return self._hosts",
"def slave_hosts(self) -> 'List[str]':\n raise NotImplementedError",
"def get_chunk_hosts_for_index_servers(self, host):\n try:\n conn = psycopg2.connect(\"dbname='{0}'\".format(DATABASE))\n cur = conn.cursor(cursor_factory=psycopg2.extras.RealDictCursor)\n cur.execute(\"SELECT * FROM index_server WHERE is_host = %s;\", (host,))\n results = cur.fetchall()\n\n temp = []\n for chunk in results:\n temp_dict = {}\n chunk_id = chunk['chunk_id']\n temp_dict['chunk_id'] = chunk_id\n temp_dict['hosts'] = {}\n temp_dict['hosts']['c_host'] = self.get_relation_for_chunk_id('crawler', chunk_id)[0]['c_host']\n temp_dict['hosts']['ib_host'] = self.get_relation_for_chunk_id('index_builder', chunk_id)[0]['ib_host']\n temp.append(temp_dict)\n cur.close()\n return temp\n except Exception as e:\n print(e)",
"def targets(tgt, tgt_type=\"glob\"):\n\n ssh_known_hosts_file = __opts__.get(\"ssh_known_hosts_file\")\n\n if not os.path.isfile(ssh_known_hosts_file):\n log.error(\"Cannot find SSH known_hosts file\")\n raise OSError(\"Cannot find SSH known_hosts file\")\n if not os.access(ssh_known_hosts_file, os.R_OK):\n log.error(\"Cannot access SSH known_hosts file: %s\", ssh_known_hosts_file)\n raise OSError(\n \"Cannot access SSH known_hosts file: {}\".format(ssh_known_hosts_file)\n )\n\n with salt.utils.files.fopen(ssh_known_hosts_file, \"r\") as hostfile:\n raw = _parse_ssh_known_hosts([line.rstrip() for line in hostfile])\n\n return __utils__[\"roster_matcher.targets\"](raw, tgt, tgt_type, \"ipv4\")",
"def hostnames(self) -> pulumi.Output[Sequence[str]]:\n return pulumi.get(self, \"hostnames\")",
"def get_all_servers(self) -> List[Server]:\n pass"
] | [
"0.6609943",
"0.65855324",
"0.65804106",
"0.64846325",
"0.63959503",
"0.6352378",
"0.63149804",
"0.62216705",
"0.6152733",
"0.60675144",
"0.6000524",
"0.598171",
"0.5977664",
"0.5963793",
"0.5939155",
"0.590416",
"0.5887089",
"0.58860886",
"0.5873426",
"0.58689386",
"0.58522797",
"0.5851957",
"0.58455396",
"0.5833825",
"0.5833825",
"0.5818766",
"0.58119524",
"0.58115035",
"0.5809508",
"0.58042854"
] | 0.72865254 | 0 |
Retry if not hosts used on client first time connection. | def get_hosts_retry(self, target, listener_type): | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def reconnect(self):\n self.test_cmd()\n if not self.check_network: \n self.reset()\n attempt=0\n while not self.check_network and attempt<self.retries:\n self.full_reset()\n attempt+=1",
"def _retry_occurred(self):",
"def retry_connect(redis_cfg, tries=300, base_delay=4.):\n for i in range(tries):\n try:\n r = redis.StrictRedis(**redis_cfg)\n r.ping()\n return r\n except redis.ConnectionError as e:\n if i == tries - 1:\n raise\n else:\n delay = base_delay * (1 + (os.getpid() % 10) / 9)\n print(f'WARNING: could not connect to {redis_cfg}. Retrying after {delay} sec ({i+2}/{tries}). Error {e}')\n time.sleep(delay)",
"def _retry_bootstrap_candidates(self):\n if __debug__: dprint(\"unable to resolve all bootstrap addresses\", level=\"warning\")\n for counter in count(1):\n yield 1.0 if counter < 30 else 30.0\n if __debug__: dprint(\"attempt #\", counter, level=\"warning\")\n candidates = get_bootstrap_candidates(self)\n for candidate in candidates:\n if candidate is None:\n break\n else:\n if __debug__: dprint(\"resolved all bootstrap addresses\")\n self._bootstrap_candidates = dict((candidate.sock_addr, candidate) for candidate in candidates if candidate)\n break",
"def testGoodRetry(self):\n self.p = start_short_timeout_app_process()\n gateway = JavaGateway()\n connections = gateway._gateway_client.deque\n try:\n # Call #1\n gateway.jvm.System.currentTimeMillis()\n str_connection = str(connections[0])\n\n # Call #2 after, should not create new connections if the system is\n # not too slow :-)\n gateway.jvm.System.currentTimeMillis()\n self.assertEqual(1, len(connections))\n str_connection2 = str(connections[0])\n self.assertEqual(str_connection, str_connection2)\n\n sleep(0.5)\n gateway.jvm.System.currentTimeMillis()\n self.assertEqual(1, len(connections))\n str_connection3 = str(connections[0])\n # A new connection was automatically created.\n self.assertNotEqual(str_connection, str_connection3)\n except Py4JError:\n self.fail(\"Should retry automatically by default.\")\n finally:\n gateway.shutdown()\n self.p.join()",
"def sniff_hosts(self):\n previous_sniff = self.last_sniff\n hosts = []\n try:\n # reset last_sniff timestamp\n self.last_sniff = time.time()\n try:\n hosts = self.get_es_node_addresses()\n except Exception:\n raise TransportError(\"N/A\", \"Unable to sniff hosts.\" + traceback.format_exc())\n except:\n # keep the previous value on error\n self.last_sniff = previous_sniff\n raise\n\n # we weren't able to get any nodes, maybe using an incompatible\n # transport_schema or host_info_callback blocked all - raise error.\n if not hosts:\n raise TransportError(\"N/A\", \"Unable to sniff hosts - no viable hosts found.\" + traceback.format_exc())\n\n self.set_connections(hosts)",
"def _recover_network_failure(self):\n if self.auto_reconnect and not self._is_closing:\n connected = False\n while not connected:\n log_msg = \"* ATTEMPTING RECONNECT\"\n if self._retry_new_version:\n log_msg = \"* RETRYING DIFFERENT DDP VERSION\"\n self.ddpsocket._debug_log(log_msg)\n time.sleep(self.auto_reconnect_timeout)\n self._init_socket()\n try:\n self.connect()\n connected = True\n if self._retry_new_version:\n self._retry_new_version = False\n else:\n self._is_reconnecting = True\n except (socket.error, WebSocketException):\n pass",
"def attempt_reconnect(self):\n time.sleep(self.reconnect_delay)\n self.connect_to()",
"def always_retry(e):\n return True",
"def _check_connection(self):\n for _ in range(3):\n try:\n r = get(f\"http://{self.ip}/student/{self.user}\")\n if r.ok:\n break \n except OSError as e:\n print(f\"Connection error:\\n{e}\")\n sleep(2)\n else:\n raise ConnectionError(f\"Can not connect to server with params ip: {self.ip}, user: {self.user}\")",
"def connect(self, num_retry_attempts=1):\n pass",
"def decide_to_retry(error):\n return True",
"def test_retry_build_on_compute_error(self):\n # Now that the bug is fixed, we should assert that the server goes to\n # ACTIVE status and is on the second host after the retry operation.\n server = dict(\n name='retry-test',\n imageRef=self.image_id,\n flavorRef=self.flavor_id)\n server = self.admin_api.post_server({'server': server})\n self.addCleanup(self.admin_api.delete_server, server['id'])\n server = self._wait_for_instance_status(server['id'], 'ACTIVE')\n\n # Assert that the host is not the failed host.\n self.assertNotEqual(self.failed_host,\n server['OS-EXT-SRV-ATTR:host'])\n\n # Assert that we retried.\n self.assertEqual(2, self.attempts)",
"def retry(self, times):\n return Retry((requests.ConnectionError, requests.Timeout), times)",
"def refresh(self, tries=3):\n for _ in range(tries):\n try:\n if not self._rcon or not self._rcon.ping():\n self._rcon = self._connect()\n else:\n break\n except redis.ConnectionError as exc:\n LOG.error(\"Failed to connect to Redis Server: %s\", exc)\n else:\n raise exception.ArestorException(\n \"Failed to connect to Redis Server.\")\n\n return True",
"def retry(self):\n return False",
"def reconnect(self):\n while True:\n try:\n log.info('try to reconnect %s' % self._conf.hosts)\n self.close()\n self.connect()\n self.client().admin.command('ismaster')\n return\n except Exception as e:\n log.error('reconnect failed: %s' % e)\n time.sleep(1)",
"def get_hosts_fanout_retry(self, target, listener_type):",
"def test_retry_run(self):\n pass",
"async def _auto_reconnect(self):\n while True:\n await asyncio.sleep(10)\n try:\n await self.connect()\n return\n except CannotConnect:\n pass",
"def retry_if_connection_error(exception):\r\n # return True\r\n return isinstance(exception, HttpError)",
"def _monitor_for_zero_connected_peers(self):\n if len(self.Peers) == 0 and len(self.connection_queue) == 0:\n if self.peer_zero_count > 2:\n logger.debug(\"Peer count 0 exceeded max retries threshold, restarting...\")\n self.Restart()\n else:\n logger.debug(\n f\"Peer count is 0, allow for retries or queued connections to be established {self.peer_zero_count}\")\n self.peer_zero_count += 1",
"def _retry_on_connection_error(exc):\n\n if isinstance(exc, db_exception.DBConnectionError):\n LOG.warning(\"Connection error detected. Retrying...\")\n return True\n return False",
"def attempt_connection(self):\n self.connection_error = False\n sleep_exp = 1\n connect_count = 0\n\n while self.running and self.socket is None and connect_count < self.__reconnect_attempts_max:\n for host_and_port in self.__hosts_and_ports:\n try:\n log.info(\"Attempting connection to websocket %s\", host_and_port)\n self.socket = websocket.WebSocket()\n proto, host, port, path = host_and_port[3], host_and_port[0], host_and_port[1], host_and_port[2]\n if port:\n ws_uri = '{}://{}:{}/{}'.format(proto, host, port, path)\n else:\n ws_uri = '{}://{}/{}'.format(proto, host, path)\n\n self.socket.connect(ws_uri,\n timeout=self.__timeout)\n\n self.current_host_and_port = host_and_port\n log.info(\"Established connection to %s\", ws_uri)\n break\n except WebSocketException:\n self.socket = None\n connect_count += 1\n log.warning(\"Could not connect to host %s, port %s\", host_and_port[0], host_and_port[1], exc_info=1)\n\n if self.socket is None:\n sleep_duration = (min(self.__reconnect_sleep_max,\n ((self.__reconnect_sleep_initial / (1.0 + self.__reconnect_sleep_increase))\n * math.pow(1.0 + self.__reconnect_sleep_increase, sleep_exp)))\n * (1.0 + random.random() * self.__reconnect_sleep_jitter))\n sleep_end = monotonic() + sleep_duration\n log.debug(\"Sleeping for %.1f seconds before attempting reconnect\", sleep_duration)\n while self.running and monotonic() < sleep_end:\n time.sleep(0.2)\n\n if sleep_duration < self.__reconnect_sleep_max:\n sleep_exp += 1\n\n if not self.socket:\n raise exception.ConnectFailedException()",
"def retry_on_refuse(f, *args, **kwargs):\n i = 0\n while True:\n try:\n i += 1\n f(*args, **kwargs)\n break\n except (OSError, socket.error) as e:\n if e.args[0] != socket.errno.ECONNREFUSED or i > 10000:\n raise\n else:\n time.sleep(0.001)",
"def online_check():\n try_first_ips = [\n \"216.58.213.238\", # google\n \"8.8.8.8\", # google\n \"8.8.4.4\", # google\n \"46.228.47.115\", # yahoo\n ]\n last_resort_ips = [ # dns root servers\n \"198.41.0.4\",\n \"192.228.79.201\",\n \"192.33.4.12\",\n \"128.8.10.90\",\n \"192.203.230.10\",\n \"192.5.5.241\",\n \"192.112.36.4\",\n \"128.63.2.53\",\n \"192.36.148.17\",\n \"192.58.128.30\",\n \"193.0.14.129\",\n \"198.32.64.12\",\n \"202.12.27.33\"\n ]\n\n iplists = []\n iplists.append(try_first_ips)\n iplists.append(rand_ips(max_num=50))\n iplists.append(last_resort_ips)\n\n return any(can_ping_host(ip) for ip in chain(*iplists))",
"def validate_connection(self):\n for hostInfo in self.client.transport.hosts:\n host = hostInfo.get('host')\n port = hostInfo.get('port')\n self.validate_server_connection(host, port)",
"def test_hostmgr_failover(self, failure_tester):\n hosts1 = self._get_hosts(failure_tester)\n\n leader1 = failure_tester.fw.get_leader_info(failure_tester.hostmgr)\n assert leader1\n assert 0 != failure_tester.fw.restart(failure_tester.hostmgr, \"leader\")\n\n failure_tester.wait_for_leader_change(failure_tester.hostmgr, leader1)\n failure_tester.reset_client()\n\n # verify that we can query the new leader\n def check_hosts():\n hosts2 = self._get_hosts(failure_tester)\n return len(hosts1) == len(hosts2)\n\n failure_tester.wait_for_condition(check_hosts)",
"def retry_if_resetpeer_or_timeout(exception):\n return not ((not isinstance(exception, requests_exceptions.ConnectionError)\n and not isinstance(exception, requests_exceptions.ConnectTimeout))\n and not isinstance(exception, BadStatusLine or exception.errno == errno.ECONNRESET))",
"def retry_request(self, method, action, body=None,\r\n headers=None, params=None):\r\n max_attempts = self.retries + 1\r\n for i in range(max_attempts):\r\n try:\r\n return self.do_request(method, action, body=body,\r\n headers=headers, params=params)\r\n except exceptions.ConnectionFailed:\r\n # Exception has already been logged by do_request()\r\n if i < self.retries:\r\n _logger.debug(_('Retrying connection to Neutron service'))\r\n time.sleep(self.retry_interval)\r\n\r\n raise exceptions.ConnectionFailed(reason=_(\"Maximum attempts reached\"))"
] | [
"0.6595895",
"0.642362",
"0.6300115",
"0.62578905",
"0.62397426",
"0.62371826",
"0.61907387",
"0.61150855",
"0.6100826",
"0.60999596",
"0.60886294",
"0.6058668",
"0.6034286",
"0.59911746",
"0.59811133",
"0.59562373",
"0.5926307",
"0.5876836",
"0.5873153",
"0.5847975",
"0.58354205",
"0.58346975",
"0.5775218",
"0.5744007",
"0.5726481",
"0.571888",
"0.56798345",
"0.5677407",
"0.56643355",
"0.5652154"
] | 0.65693855 | 1 |
Get all hosts for fanout from nameserver by target. | def get_hosts_fanout(self, target, listener_type): | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_hosts(self, target, listener_type):",
"def all_hosts(self):\n ...",
"def getHosts(self):\n raise \"not implemented\"",
"def get_list_hosts(self, path, params):\n eth_src = params.get('eth_src')\n host = self._extract_url_base(path)\n reply = self._faucet_collector.get_list_hosts(host, eth_src)\n self._augment_state_reply(reply, path)\n return reply",
"def get_allhosts():\n connection, tablename = HomeNetwork.get_connection_info()\n query = 'SELECT hostname from {}'.format(tablename)\n output = pandas.read_sql_query(query, connection).to_json(orient='records')\n\n for host in json.loads(output):\n yield host[\"hostname\"]",
"def get_all_host(self, conf, tenant_id, network_id):\n\t\tpass",
"def get_hosts_fanout_retry(self, target, listener_type):",
"def iter_hosts():\n environmentdef = _get_environmentdef()\n\n for host in environmentdef.hosts():\n # fabric needs the host if we're calling from main()\n with this_hostname(host.host):\n yield host",
"def get_all_hosts(self, view='summary'):\n return self.api_client.get_all_hosts(view=view)['items']",
"def get_all_hosts(self, view='summary'):\n return self._get(endpoint='{}/hosts'.format(self.api_version),\n params=dict(view=view)).json()",
"def get_hosts(self):\n\n raise NotImplementedError",
"def list_hosts():\n task_run(\"/bin/hostname -f\",RING_1_dev__allnodes)",
"def getHosts(**options):\n return search.HostSearch.byOptions(**options)",
"def _target_hosts(self, paths):\n for path in paths:\n response = self.api_client.get(path)\n self.assertHttpOK(response)\n content = json.loads(response.content)\n (volume_node,) = content[\"volume\"][\"volume_nodes\"]\n yield volume_node[\"host_label\"]",
"def list(self, **kwargs):\n\n return self.getResourceManager() \\\n .getSdk() \\\n .hosts \\\n .list(**kwargs)",
"def query(oquery='', sure='no', ovirt=None):\n hosts = oVirtObjectType.all_types['host'].query(ovirt, oquery)\n env.hosts = [host.address for host in hosts]\n puts(yellow(\n \"Got %d hosts: \\n\\t\" % len(env.hosts)\n + '\\n\\t'.join(env.hosts)\n ))\n if sure != 'yes' and not env.parallel:\n if prompt('Is what you expected? y|n', default='y').lower() == 'n':\n abort('Ended by user request.')\n return hosts",
"def all_hosts(*args, **kwargs):\n return True",
"def handle_args(args: Namespace) -> list:\n # If no targets provided, assume were finding them on network.\n # Once we have targets, if no test given, port/service scan them.\n if not args.target:\n low(\"Target not supplied, running host scan.\")\n hosts = get_hosts(verify_subnet(args.subnet))\n else:\n low(\"Target supplied: {}\".format(args.target))\n hosts = [Host(host) for host in args.target]\n\n if args.user and args.passwd:\n low(\"Username and Password supplied for tests, {}:{}\".format(args.user, args.passwd))\n for host in hosts:\n host.credentials = {'user': args.user, 'passwd': args.passwd}\n\n return hosts",
"def _scan_hosts(self):\n results = []\n for item in glob.glob(self._pattern):\n results.append(item)\n return results",
"def all_hosts(self):\n if not 'scan' in list(self._scan_result.keys()):\n return []\n listh = list(self._scan_result['scan'].keys())\n listh.sort()\n return listh",
"def getAllHosts(cluster):\n nics = []\n hosts = rhevGet(\"/api/hosts\")\n doc = libxml2.parseDoc(hosts)\n ctxt = doc.xpathNewContext()\n res = ctxt.xpathEval(\"/hosts/host[cluster[@id='\" + getClusterData(cluster ,\"id\") + \"']]\")\n for i in res:\n #hrefs.append(i.prop(\"href\"))\n nic = rhevGet(i.prop(\"href\")+\"/nics\")\n nicdoc = libxml2.parseDoc(nic)\n ctxt = nicdoc.xpathNewContext()\n res = ctxt.xpathEval(\"/host_nics/host_nic/name[text() = '%s']/parent::*\" %rhev_settings.NIC)\n for i in res:\n nics.append(i.prop(\"href\"))\n return nics",
"def get_hosts_retry(self, target, listener_type):",
"def collect_hosts(hosts):\n host_ports, chroot = hosts.partition(\"/\")[::2]\n chroot = \"/\" + chroot if chroot else None\n\n result = []\n for host_port in host_ports.split(\",\"):\n host, port = host_port.partition(\":\")[::2]\n port = int(port.strip()) if port else 2181\n result.append((host.strip(), port))\n return (RandomHostIterator(result), chroot)",
"def get_hosts(marathon_url, app_id):\n\n api_endpoint = '/v2/apps/'\n headers = {'Content-Type': 'application/json'}\n url = marathon_url + api_endpoint + app_id\n print(url)\n r = requests.get(url, headers=headers)\n print(r.status_code)\n hosts = []\n for h in r.json()['app']['tasks']:\n hosts.append(h['host'])\n return hosts",
"def get_hosts(self):\n if self._scanned:\n return self._scanner.all_hosts()\n else:\n raise ScannerError(\"ERROR: A scan has not yet been conducted!\")",
"def get_chunk_hosts_for_index_servers(self, host):\n try:\n conn = psycopg2.connect(\"dbname='{0}'\".format(DATABASE))\n cur = conn.cursor(cursor_factory=psycopg2.extras.RealDictCursor)\n cur.execute(\"SELECT * FROM index_server WHERE is_host = %s;\", (host,))\n results = cur.fetchall()\n\n temp = []\n for chunk in results:\n temp_dict = {}\n chunk_id = chunk['chunk_id']\n temp_dict['chunk_id'] = chunk_id\n temp_dict['hosts'] = {}\n temp_dict['hosts']['c_host'] = self.get_relation_for_chunk_id('crawler', chunk_id)[0]['c_host']\n temp_dict['hosts']['ib_host'] = self.get_relation_for_chunk_id('index_builder', chunk_id)[0]['ib_host']\n temp.append(temp_dict)\n cur.close()\n return temp\n except Exception as e:\n print(e)",
"def get_migrating_vms_to_host(self, node_id):\n result = []\n for server_id in self.__migrating_tasks.keys():\n if self.__migrating_tasks[server_id] == node_id:\n result.append(server_id)\n return result",
"def alias_all(self, host_names, target, raise_on_not_found=True):\n self.set_all(host_names, self.get_one(target, raise_on_not_found))",
"def targets(self): # type: () -> t.List[HostConfig]\n return self.host_settings.targets",
"def get_stacking_standalone_hosts(self, *, filter=None, Range=None, fields=None, **kwargs):\n function_endpoint = urljoin(self._baseurl, 'stacking/standalone_hosts')\n return self._call('GET', function_endpoint, **kwargs)"
] | [
"0.7184551",
"0.6248219",
"0.6176217",
"0.61702764",
"0.6109676",
"0.6054905",
"0.60191494",
"0.6009551",
"0.59727305",
"0.5941708",
"0.591378",
"0.59014225",
"0.58759665",
"0.5767903",
"0.5579544",
"0.55613315",
"0.5546322",
"0.55437875",
"0.5537281",
"0.55069554",
"0.5466221",
"0.54537284",
"0.54090726",
"0.5390949",
"0.5386453",
"0.5377367",
"0.5376254",
"0.5344722",
"0.534365",
"0.5343118"
] | 0.7517761 | 0 |
Retry if not host for fanout used on client first time connection. | def get_hosts_fanout_retry(self, target, listener_type): | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def reconnect(self):\n self.test_cmd()\n if not self.check_network: \n self.reset()\n attempt=0\n while not self.check_network and attempt<self.retries:\n self.full_reset()\n attempt+=1",
"def _retry_occurred(self):",
"def decide_to_retry(error):\n return True",
"def attempt_reconnect(self):\n time.sleep(self.reconnect_delay)\n self.connect_to()",
"def always_retry(e):\n return True",
"def retry_connect(redis_cfg, tries=300, base_delay=4.):\n for i in range(tries):\n try:\n r = redis.StrictRedis(**redis_cfg)\n r.ping()\n return r\n except redis.ConnectionError as e:\n if i == tries - 1:\n raise\n else:\n delay = base_delay * (1 + (os.getpid() % 10) / 9)\n print(f'WARNING: could not connect to {redis_cfg}. Retrying after {delay} sec ({i+2}/{tries}). Error {e}')\n time.sleep(delay)",
"def testGoodRetry(self):\n self.p = start_short_timeout_app_process()\n gateway = JavaGateway()\n connections = gateway._gateway_client.deque\n try:\n # Call #1\n gateway.jvm.System.currentTimeMillis()\n str_connection = str(connections[0])\n\n # Call #2 after, should not create new connections if the system is\n # not too slow :-)\n gateway.jvm.System.currentTimeMillis()\n self.assertEqual(1, len(connections))\n str_connection2 = str(connections[0])\n self.assertEqual(str_connection, str_connection2)\n\n sleep(0.5)\n gateway.jvm.System.currentTimeMillis()\n self.assertEqual(1, len(connections))\n str_connection3 = str(connections[0])\n # A new connection was automatically created.\n self.assertNotEqual(str_connection, str_connection3)\n except Py4JError:\n self.fail(\"Should retry automatically by default.\")\n finally:\n gateway.shutdown()\n self.p.join()",
"def retry_on_refuse(f, *args, **kwargs):\n i = 0\n while True:\n try:\n i += 1\n f(*args, **kwargs)\n break\n except (OSError, socket.error) as e:\n if e.args[0] != socket.errno.ECONNREFUSED or i > 10000:\n raise\n else:\n time.sleep(0.001)",
"def retry_if_connection_error(exception):\r\n # return True\r\n return isinstance(exception, HttpError)",
"def get_hosts_retry(self, target, listener_type):",
"def _recover_network_failure(self):\n if self.auto_reconnect and not self._is_closing:\n connected = False\n while not connected:\n log_msg = \"* ATTEMPTING RECONNECT\"\n if self._retry_new_version:\n log_msg = \"* RETRYING DIFFERENT DDP VERSION\"\n self.ddpsocket._debug_log(log_msg)\n time.sleep(self.auto_reconnect_timeout)\n self._init_socket()\n try:\n self.connect()\n connected = True\n if self._retry_new_version:\n self._retry_new_version = False\n else:\n self._is_reconnecting = True\n except (socket.error, WebSocketException):\n pass",
"def attempt_to_connect(self):\n if self.server_handler.attempt_connection:\n self.server_handler.attempt_connection = False\n else:\n self.server_handler.attempt_connection = True",
"def retry(self):\n return False",
"def retry_if_resetpeer_or_timeout(exception):\n return not ((not isinstance(exception, requests_exceptions.ConnectionError)\n and not isinstance(exception, requests_exceptions.ConnectTimeout))\n and not isinstance(exception, BadStatusLine or exception.errno == errno.ECONNRESET))",
"def test_retry_run(self):\n pass",
"def connect(self, num_retry_attempts=1):\n pass",
"def test_start_sameconnector_twice_with_noreconnecting_on_failure(self):\n\n yield self.connect('127.0.0.1', self.pbPort)\n\n localConfig = copy.copy(self.defaultConfig)\n localConfig.reconnectOnConnectionFailure = False\n yield self.add(localConfig)\n yield self.start(localConfig.id)\n startRet = yield self.start(localConfig.id)\n\n self.assertEqual(True, startRet)\n\n yield self.stopall()\n\n # Give a grace time for stopping\n yield waitFor(0.2)",
"def retry_if_endpoint_error(exception) -> bool:\n is_endpoint_error: bool = isinstance(exception, EndpointConnectionError)\n return is_endpoint_error",
"def _check_connection(self):\n for _ in range(3):\n try:\n r = get(f\"http://{self.ip}/student/{self.user}\")\n if r.ok:\n break \n except OSError as e:\n print(f\"Connection error:\\n{e}\")\n sleep(2)\n else:\n raise ConnectionError(f\"Can not connect to server with params ip: {self.ip}, user: {self.user}\")",
"async def _auto_reconnect(self):\n while True:\n await asyncio.sleep(10)\n try:\n await self.connect()\n return\n except CannotConnect:\n pass",
"def connect_never_retry():\n try:\n messaging_service = MessagingService.builder().from_properties(boot.broker_properties()) \\\n .with_reconnection_retry_strategy(RetryStrategy.never_retry()).build()\n future = messaging_service.connect_async()\n\n return future.result()\n\n except PubSubPlusClientError as exception:\n raise exception\n\n finally:\n messaging_service.disconnect_async()",
"def _monitor_for_zero_connected_peers(self):\n if len(self.Peers) == 0 and len(self.connection_queue) == 0:\n if self.peer_zero_count > 2:\n logger.debug(\"Peer count 0 exceeded max retries threshold, restarting...\")\n self.Restart()\n else:\n logger.debug(\n f\"Peer count is 0, allow for retries or queued connections to be established {self.peer_zero_count}\")\n self.peer_zero_count += 1",
"def attempt_connection(self):\n self.connection_error = False\n sleep_exp = 1\n connect_count = 0\n\n while self.running and self.socket is None and connect_count < self.__reconnect_attempts_max:\n for host_and_port in self.__hosts_and_ports:\n try:\n log.info(\"Attempting connection to websocket %s\", host_and_port)\n self.socket = websocket.WebSocket()\n proto, host, port, path = host_and_port[3], host_and_port[0], host_and_port[1], host_and_port[2]\n if port:\n ws_uri = '{}://{}:{}/{}'.format(proto, host, port, path)\n else:\n ws_uri = '{}://{}/{}'.format(proto, host, path)\n\n self.socket.connect(ws_uri,\n timeout=self.__timeout)\n\n self.current_host_and_port = host_and_port\n log.info(\"Established connection to %s\", ws_uri)\n break\n except WebSocketException:\n self.socket = None\n connect_count += 1\n log.warning(\"Could not connect to host %s, port %s\", host_and_port[0], host_and_port[1], exc_info=1)\n\n if self.socket is None:\n sleep_duration = (min(self.__reconnect_sleep_max,\n ((self.__reconnect_sleep_initial / (1.0 + self.__reconnect_sleep_increase))\n * math.pow(1.0 + self.__reconnect_sleep_increase, sleep_exp)))\n * (1.0 + random.random() * self.__reconnect_sleep_jitter))\n sleep_end = monotonic() + sleep_duration\n log.debug(\"Sleeping for %.1f seconds before attempting reconnect\", sleep_duration)\n while self.running and monotonic() < sleep_end:\n time.sleep(0.2)\n\n if sleep_duration < self.__reconnect_sleep_max:\n sleep_exp += 1\n\n if not self.socket:\n raise exception.ConnectFailedException()",
"def auto_failover(method):\r\n\r\n @functools.wraps(method)\r\n def _decorator(self, *args, **kwargs):\r\n if self._in_fallback:\r\n pass_seconds = (datetime_now() - self._in_fallback_date).total_seconds()\r\n if pass_seconds > self._options.get(\"FAILOVER_TIME\", 30):\r\n print(\"Go to default connection\")\r\n self._client = self._old_client\r\n\r\n self._in_fallback = False\r\n self._in_fallback_date = None\r\n del self.fallback_client\r\n else:\r\n print(\"Mantain fallback connection\")\r\n\r\n try:\r\n print(\"Executing {0}\".format(method.__name__))\r\n return method(self, *args, **kwargs)\r\n except ConnectionInterrumped:\r\n if self._fallback and not self._in_fallback:\r\n print(\"raised ConnectionInterrumped\")\r\n print(\"Switching to fallback conection\")\r\n self._old_client = self._client\r\n self._client = self.fallback_client\r\n\r\n self._in_fallback = True\r\n self._in_fallback_date = timezone.now()\r\n\r\n return method(self, *args, **kwargs)\r\n return _decorator",
"def should_reconnect(self):\n if not self.by_remote:\n if self.code == 1006:\n if self.reason == 'Abnormal closure':\n return True\n \n return False",
"def test_exp_backoff():\n stream = ReconnectingTweetStream('user', 'pass', initial_wait=1, max_wait=5,\n error_cb=error_callback)\n # A connection failure should happen automatically because of patch\n assert_raises(ConnectionError, stream.next)\n # By now, callback should have been invoked 3 times (1s, 2s, 4s)\n assert callback_invoked == 3",
"def test_startconnector_with_noretry_on_con_failure(self):\n\n yield self.connect('127.0.0.1', self.pbPort)\n\n localConfig = copy.copy(self.defaultConfig)\n localConfig.reconnectOnConnectionFailure = False\n yield self.add(localConfig)\n yield self.start(localConfig.id)\n\n # It takes a moment to stop the service after a connection failure\n while True:\n ssRet = yield self.service_status(localConfig.id)\n if ssRet != 1:\n break;\n else:\n time.sleep(1)\n\n self.assertEqual(0, ssRet)\n\n yield self.stop(localConfig.id)",
"def retryFileCopy(self):\n self.areCopiesValid = self.checkCopiedFiles()\n copyRetryCount = 5\n while(copyRetryCount > 1 and not self.areCopiesValid):\n print(\"Something failed in copy, retrying \" + str(copyRetryCount))\n self.firstTimeSetup()\n self.areCopiesValid = self.checkCopiedFiles()\n copyRetryCount -= 1",
"def refresh(self, tries=3):\n for _ in range(tries):\n try:\n if not self._rcon or not self._rcon.ping():\n self._rcon = self._connect()\n else:\n break\n except redis.ConnectionError as exc:\n LOG.error(\"Failed to connect to Redis Server: %s\", exc)\n else:\n raise exception.ArestorException(\n \"Failed to connect to Redis Server.\")\n\n return True",
"def connect(self, host):\n return False"
] | [
"0.6410929",
"0.6315029",
"0.6292759",
"0.6159722",
"0.61007214",
"0.60679835",
"0.60382396",
"0.598557",
"0.5985042",
"0.5976616",
"0.59539026",
"0.59280324",
"0.591002",
"0.59035325",
"0.5888258",
"0.5867851",
"0.5821885",
"0.5793011",
"0.5766104",
"0.57537436",
"0.57288796",
"0.572702",
"0.5699445",
"0.5684096",
"0.56547654",
"0.56129944",
"0.5525968",
"0.5520385",
"0.55194217",
"0.5519333"
] | 0.7007031 | 0 |
Each profile model should define the __init__ method. The __init__ method must take the grid as the first input parameter. All other input parameters can be specified to define the model. The grid input parameter is automatically added as an attribute of the profile model. This method should set all three components of the mean velocity field, '_u'. The components default to 0 if they are not set here. | def __init__(self, grid, coef_u, coef_w=[0.01, 0.2]):
# In this example, we set the u-component to increase linearly with height:
# Note: we are making use of the automatically added 'grid' attribute
self._u[0] = coef_u * self.grid.z[:, None]
# Arbitrarily chose a factor of 0.3
# Note that the 'grid' object of this TurbSim run is accessible in the
# profile model.
self.coef_w = coef_w # We can store variables for use in other methods.
self._u[2] = self.calc_vertical_velocity() | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def __init__(self, velocity, vorticity, prof_coords, \n direction, beginMeanComput, **kwds):\n assert 'variables' not in kwds, 'variables parameter is useless.'\n super(Profiles, self).__init__(variables=[velocity, vorticity],\n **kwds)\n ## velocity field\n self.velocity = velocity\n ## vorticity field\n self.vorticity = vorticity\n ## X and Y coordinates of the profile\n self.prof_coords = prof_coords\n ## profile direction (0, 1 or 2)\n self.direction = direction\n ## time at which the computation of mean profile must begin\n self.beginMeanComput = beginMeanComput\n self.input = [velocity, vorticity]\n self.output = []",
"def __init__(self):\n\n # The object is already initialised.\n if self._initialised: return\n\n # Execute the base class __init__ method.\n Param_list.__init__(self)\n\n # Add the model variables.\n self._add_model_info()\n\n # Add the base data.\n self._add_align_data()\n\n # Add the parameters of all models.\n self._add(\n 'pivot_x',\n scope = 'global',\n units = 'Angstrom',\n desc = 'The pivot point position x coordinate',\n py_type = float,\n set = 'params',\n scaling = 1e2,\n grid_lower = pivot_x_lower,\n grid_upper = pivot_x_upper,\n err = True,\n sim = True\n )\n self._add(\n 'pivot_y',\n scope = 'global',\n units = 'Angstrom',\n desc = 'The pivot point position y coordinate',\n py_type = float,\n set = 'params',\n scaling = 1e2,\n grid_lower = pivot_y_lower,\n grid_upper = pivot_y_upper,\n err = True,\n sim = True\n )\n self._add(\n 'pivot_z',\n scope = 'global',\n units = 'Angstrom',\n desc = 'The pivot point position z coordinate',\n py_type = float,\n set = 'params',\n scaling = 1e2,\n grid_lower = pivot_z_lower,\n grid_upper = pivot_z_upper,\n err = True,\n sim = True\n )\n self._add(\n 'ave_pos_x',\n scope = 'global',\n units = 'Angstrom',\n desc = 'The average position x translation',\n py_type = float,\n set = 'params',\n grid_lower = -5,\n grid_upper = 5,\n err = True,\n sim = True\n )\n self._add(\n 'ave_pos_y',\n scope = 'global',\n units = 'Angstrom',\n desc = 'The average position y translation',\n py_type = float,\n set = 'params',\n grid_lower = -5,\n grid_upper = 5,\n err = True,\n sim = True\n )\n self._add(\n 'ave_pos_z',\n scope = 'global',\n units = 'Angstrom',\n desc = 'The average position z translation',\n py_type = float,\n set = 'params',\n grid_lower = -5,\n grid_upper = 5,\n err = True,\n sim = True\n )\n self._add(\n 'ave_pos_alpha',\n scope = 'global',\n units = 'rad',\n desc = 'The average position alpha Euler angle',\n py_type = float,\n set = 'params',\n grid_lower = 0.0,\n grid_upper = angle_upper_excluding_bound,\n err = True,\n sim = True\n )\n self._add(\n 'ave_pos_beta',\n scope = 'global',\n units = 'rad',\n desc = 'The average position beta Euler angle',\n py_type = float,\n set = 'params',\n grid_lower = 0.0,\n grid_upper = pi,\n err = True,\n sim = True\n )\n self._add(\n 'ave_pos_gamma',\n scope = 'global',\n units = 'rad',\n desc = 'The average position gamma Euler angle',\n py_type = float,\n set = 'params',\n grid_lower = 0.0,\n grid_upper = angle_upper_excluding_bound,\n err = True,\n sim = True\n )\n self._add(\n 'eigen_alpha',\n scope = 'global',\n units = 'rad',\n desc = 'The Eigenframe alpha Euler angle',\n py_type = float,\n set = 'params',\n grid_lower = 0.0,\n grid_upper = angle_upper_excluding_bound,\n err = True,\n sim = True\n )\n self._add(\n 'eigen_beta',\n scope = 'global',\n units = 'rad',\n desc = 'The Eigenframe beta Euler angle',\n py_type = float,\n set = 'params',\n grid_lower = 0.0,\n grid_upper = pi,\n err = True,\n sim = True\n )\n self._add(\n 'eigen_gamma',\n scope = 'global',\n units = 'rad',\n desc = 'The Eigenframe gamma Euler angle',\n py_type = float,\n set = 'params',\n grid_lower = 0.0,\n grid_upper = angle_upper_excluding_bound,\n err = True,\n sim = True\n )\n self._add(\n 'axis_theta',\n scope = 'global',\n units = 'rad',\n desc = 'The cone axis polar angle (for the isotropic cone model)',\n py_type = float,\n set = 'params',\n grid_lower = 0.0,\n grid_upper = pi,\n err = True,\n sim = True\n )\n self._add(\n 'axis_phi',\n scope = 'global',\n units = 'rad',\n desc = 'The cone axis azimuthal angle (for the isotropic cone model)',\n py_type = float,\n set = 'params',\n grid_lower = 0.0,\n grid_upper = angle_upper_excluding_bound,\n err = True,\n sim = True\n )\n self._add(\n 'axis_alpha',\n scope = 'global',\n units = 'rad',\n desc = 'The rotor axis alpha angle (the rotation angle out of the xy plane)',\n py_type = float,\n set = 'params',\n grid_lower = -pi,\n grid_upper = axis_alpha_upper,\n err = True,\n sim = True\n )\n self._add(\n 'cone_theta_x',\n scope = 'global',\n units = 'rad',\n desc = 'The pseudo-ellipse cone opening half-angle for the x-axis',\n py_type = float,\n set = 'params',\n grid_lower = cone_angle_lower,\n grid_upper = cone_angle_upper,\n err = True,\n sim = True\n )\n self._add(\n 'cone_theta_y',\n scope = 'global',\n units = 'rad',\n desc = 'The pseudo-ellipse cone opening half-angle for the y-axis',\n py_type = float,\n set = 'params',\n grid_lower = cone_angle_lower,\n grid_upper = cone_angle_upper,\n err = True,\n sim = True\n )\n self._add(\n 'cone_theta',\n scope = 'global',\n units = 'rad',\n desc = 'The isotropic cone opening half-angle',\n py_type = float,\n set = 'params',\n grid_lower = cone_angle_lower,\n grid_upper = cone_angle_upper,\n err = True,\n sim = True\n )\n self._add(\n 'cone_s1',\n scope = 'global',\n units = '',\n desc = 'The isotropic cone order parameter',\n py_type = float,\n set = 'params',\n grid_lower = -0.125,\n grid_upper = 1.0,\n err = True,\n sim = True\n )\n self._add(\n 'cone_sigma_max',\n scope = 'global',\n units = 'rad',\n desc = 'The torsion angle',\n py_type = float,\n set = 'params',\n grid_lower = cone_angle_lower,\n grid_upper = cone_angle_upper,\n err = True,\n sim = True\n )\n\n # Add minimisation structures.\n self._add_min_data(min_stats_global=True)\n\n # Set up the user function documentation.\n self._set_uf_title(\"Frame order parameters\")\n self._uf_param_table(label=\"table: frame order parameters\", caption=\"Frame order parameters.\", scope='global')\n self._uf_param_table(label=\"table: frame order parameter value setting with defaults\", caption=\"Frame order parameter value setting.\", scope='global', default=True)",
"def __init__(self, model, settings):\n super().__init__(model, settings)\n self.model_part = self.model.CreateModelPart(self.settings[\"model_part_name\"].GetString())\n self.model_part.ProcessInfo.SetValue(KM.DOMAIN_SIZE, self.settings[\"domain_size\"].GetInt())\n self.model_part.ProcessInfo.SetValue(KM.GRAVITY_Z, self.settings[\"gravity\"].GetDouble())\n self.EstimateDeltaTimeUtility = SW.EstimateTimeStepUtility(self.GetComputingModelPart(), self.settings[\"time_stepping\"])",
"def __init__(self, grid, grid_bnds, **kwargs):\n super().__init__(dynamic=True, **kwargs)\n self.grid = tf.Variable(initial_value=grid, trainable=True)\n self.grid_bnds = grid_bnds",
"def __init__(self, n_uv=1000, n_ubins=30, uv_max=None, u_min=None, u_max=None, frequency_taper=np.blackman, n_obs=1,\n nrealisations=100, nthreads=1, model_uncertainty=0.15, eta_min=0, use_analytical_noise=False, ps_dim=2,\n **kwargs):\n\n super().__init__(**kwargs)\n\n self._n_uv = n_uv\n self.n_ubins = n_ubins\n self._uv_max = uv_max\n self.frequency_taper = frequency_taper\n self.nrealisations = nrealisations\n self.model_uncertainty = model_uncertainty\n self.eta_min = eta_min\n self._u_min, self._u_max = u_min, u_max\n self._nthreads = nthreads\n self.ps_dim = ps_dim\n self.n_obs = n_obs\n\n self.use_analytical_noise = use_analytical_noise\n \n self.kernel_weights = None # set this as None so we only do this once",
"def __init__(self, params):\n\n self.params = (params,)\n\n [setattr(self, key, value) for key, value in params.items()]\n\n self.speed_step = (self.speed_desire_mean - self.speed_min) / 3 # Average number of speeds to check\n\n # Batch Details\n\n self.time_id = 0\n\n self.step_id = 0\n\n if self.do_save:\n\n self.time_taken = []\n\n self.time_delayed = []\n\n # Model Parameters\n\n self.boundaries = np.array([[0, 0], [self.width, self.height]])\n\n self.pop_active = 0\n\n self.pop_finished = 0\n\n # Initialise\n\n self.initialise_gates()\n\n self.agents = list([Agent(self, unique_id) for unique_id in range(self.pop_total)])\n\n return",
"def __init__(self, n_components=5, user=\"user_0\", model=\"gaussian\"):\n self.user = user\n self.n_components = n_components\n\n if(model==\"gaussian\"):\n self.model = hmm.GaussianHMM(n_components=n_components, covariance_type=\"diag\", \\\n init_params=\"cm\", params=\"cmt\")\n elif(model==\"GMMHMM\"):\n self.model = hmm.GMMHMM(n_components=n_components, n_mix=3, covariance_type=\"diag\", \\\n init_params=\"cm\", params=\"cmt\")\n self.model.gmms_ = [sklearn.mixture.GaussianMixture()]*3\n\n self.model.startprob_ = np.concatenate(([1],np.zeros(n_components-1)))\n self.model.transmat_ = self.compute_trans_matrix( n_components )\n\n self.overall_accuracy = 0",
"def __init__(self, verbose=False):\n self.verbose = verbose\n self.Umean = None # reference velocity\n self.have_field = False # True after the velocity field has been read\n\n# self.needUpdateMean = False # set to true update the mean inflow at every time step. \n# self.timeseries = None # used if needUpdateMean is True\n# self.Useries = None # used if needUpdateMean is True\n# self.Tseries = None # used if needUpdateMean is True\n\n self.mean_flow_read = False\n self.variances_read = False\n\n # inflow plane coordinates\n self.y = None\n self.z = None\n\n # set by calcVariance\n self.uu_mean = None\n self.vv_mean = None\n self.ww_mean = None\n\n # constant profiles, set by readAllProfiles or readVarianceProfile)\n self.z_profile = None\n self.uu_profile = None\n self.vv_profile = None\n self.ww_profile = None",
"def __init__(self, model_info, alg_config, **kwargs):\n import_config(globals(), alg_config)\n super().__init__(\n alg_name=kwargs.get(\"name\") or \"muzero\",\n model_info=model_info[\"actor\"],\n alg_config=alg_config,\n )\n # self.buff = ReplayBuffer(BUFFER_SIZE)\n self.buff = PrioritizedReplayBuffer(BUFFER_SIZE, alpha=1)\n self.discount = GAMMA\n self.unroll_step = UNROLL_STEP\n self.td_step = TD_STEP\n self.async_flag = False",
"def __init__(self, grid_size=7, num_bboxes=2, num_classes=20):\r\n super(Loss, self).__init__()\r\n self.S = grid_size\r\n self.B = num_bboxes\r\n self.C = num_classes",
"def __init__(self, Model, settings):\n\n KratosMultiphysics.Process.__init__(self)\n\n default_settings = KratosMultiphysics.Parameters(\"\"\"\n {\n \"help\" : \"This process applies constraints to the particles in a certain submodelpart, for a certain time interval\",\n \"mesh_id\" : 0,\n \"model_part_name\" : \"please_specify_model_part_name\",\n \"velocity_constraints_settings\" : {\n \"constrained\" : [true,true,true],\n \"value\" : [10.0, \"3*t\", \"x+y\"]\n },\n \"angular_velocity_constraints_settings\" : {\n \"constrained\" : [true,true,true],\n \"value\" : [10.0, \"3*t\", \"x+y\"]\n },\n \"interval\" : [0.0, 1e30]\n }\n \"\"\"\n )\n #example of admissible values for \"value\" : [10.0, \"3*t\", \"x+y\"]\n\n ## Trick to ensure that if someone sets constrained as a single bool, it is transformed to a vector\n if settings[\"velocity_constraints_settings\"].Has(\"constrained\"):\n if settings[\"velocity_constraints_settings\"][\"constrained\"].IsBool():\n is_fixed = settings[\"velocity_constraints_settings\"][\"constrained\"].GetBool()\n settings[\"velocity_constraints_settings\"][\"constrained\"] = default_settings[\"velocity_constraints_settings\"][\"constrained\"]\n for i in range(3):\n settings[\"velocity_constraints_settings\"][\"constrained\"][i].SetBool(is_fixed)\n\n if settings[\"angular_velocity_constraints_settings\"].Has(\"constrained\"):\n if settings[\"angular_velocity_constraints_settings\"][\"constrained\"].IsBool():\n is_fixed = settings[\"angular_velocity_constraints_settings\"][\"constrained\"].GetBool()\n settings[\"angular_velocity_constraints_settings\"][\"constrained\"] = default_settings[\"angular_velocity_constraints_settings\"][\"constrained\"]\n for i in range(3):\n settings[\"angular_velocity_constraints_settings\"][\"constrained\"][i].SetBool(is_fixed)\n\n settings.ValidateAndAssignDefaults(default_settings)\n\n self.model_part = Model[settings[\"model_part_name\"].GetString()]\n self.cplusplus_version_of_this_process = DEM.ApplyKinematicConstraintsProcess(self.model_part, settings)",
"def set_grid(self,ug):\n self.grd=ug\n self.set_topology()",
"def __init__(self, name, variable, variable_info, grid, api, interval):\n super().__init__(name, variable, variable_info)\n self.city = grid['city']\n self.county = grid['county']\n self.village = grid['village']\n self.api = api\n self.update = Throttle(interval)(self.update)",
"def __init__(self):\n self.position = Vector2()\n self.velocity = Vector2()\n self.update_parameters()\n self.mass = 0.18 # Mass of Sphero robot in kilograms",
"def __init__(self, grid, estimator, parameter_search, **kwargs):\n self.kwargs = kwargs\n self.grid = grid\n self.estimator = estimator\n self.parameter_search = parameter_search",
"def __init__(self, mean, config):\n self.lb = config.get('lb', 0)\n self.ub = config.get('ub', sys.maxint)\n self.a = float(config['a'])",
"def __init__(self, model):\n self.model = model\n self.nproperties = 0\n\n #: stores PSHELL, PCOMP, PCOMPG\n self.properties_shell = model.properties_shell\n\n # shear\n #: stores PSHEAR\n self.pshear = model.pshear\n\n # spring\n self.pelas = model.pelas\n\n # bush\n self.pbush = model.pbush\n\n # rods\n #self.conrod = model.conrod\n #self.crod = model.crod\n self.prod = model.prod\n\n # mass\n #: stores CONM1, CONM2, CMASS1, CMASS2, CMASS3, CMASS4, CMASS5, PMASS\n self.mass = model.mass\n\n # bars\n #: stores PBAR, PBARL\n self.properties_bar = model.properties_bar\n\n # beams\n #: stores PBEAM, PBEAML\n self.properties_beam = model.properties_beam\n\n # solids\n #: stores PSOLID, PLSOLID\n self.properties_solid = model.properties_solid\n\n # created by this class\n self.property_ids = None\n self.n = None\n self.property_groups = None",
"def __init__(self, model, X_lower, X_upper):\n self.model = model\n self.X_upper = X_upper\n self.X_lower = X_lower",
"def __init__(self,model,alpha=0,head_min=0,head_max=1,k=1,\r\n variables=[],priors=[]):\r\n \r\n import numpy as np\r\n \r\n # Append the base to the elementlist\r\n self.model = model\r\n model.elementlist.append(self)\r\n \r\n # Set orientation value\r\n self.alpha = alpha\r\n \r\n # Set potential scaling variables\r\n self.head_min = head_min\r\n self.head_max = head_max\r\n \r\n # Assign the hydraulic conductivity of the base model\r\n self.k = k\r\n \r\n # The model requires the base flow in terms of hydraulic potential (phi)\r\n # The function head_to_potential extracts the following variables:\r\n # phi_min hydraulic potential corresponding to head_min\r\n # phi_max hydraulic potential corresponding to head_max\r\n self.head_to_potential()\r\n \r\n # Check input for validity\r\n self.check_input()\r\n \r\n self.variables = variables\r\n self.priors = priors\r\n \r\n if len(self.variables) > 0:\r\n # There are some model variables specified\r\n for idx,var in enumerate(self.variables):\r\n self.model.num_params += 1\r\n exec(\"self.model.params += [self.%s]\" % var)\r\n self.model.variables += [var]\r\n self.model.priors += [self.priors[idx]]\r\n if 'name' in list(self.priors[idx].keys()):\r\n self.model.param_names += [self.priors[idx]['name']] \r\n else: \r\n self.model.param_names += ['unknown']",
"def __init__(self, **kwargs):\n GaussBeam.__init__(self, **kwargs)\n self.scale = kwargs.get('scale',10.)\n self.mass = kwargs.get('mass', 6.0)\n self.s0 = kwargs.get('s0', 7.0)\n self.retro = kwargs.get('retro', 1.0)\n self.alpha = kwargs.get('alpha', 1.0)\n self.Er0 = Erecoil( self.l , self.mass) \n self.mW = 1000 * (self.s0 * self.Er0 ) \\\n * np.abs( np.pi / 8. / uL(self.l) )\\\n * self.w[0]*self.w[1] / self.retro",
"def __init__(self, param_dictionary):\n\n BaseModel.__init__(self)\n\n # set starting compartment values\n self.set_compartment(\"susceptible\",\n param_dictionary[\"population\"] - param_dictionary[\"start_infectious\"])\n self.set_compartment(\"infectious\", param_dictionary[\"start_infectious\"])\n self.set_compartment(\"immune\", 0.)\n\n # set model parameters\n self.set_param(\"infection_beta\",\n param_dictionary[\"r0\"]\n / (param_dictionary[\"duration_infectious\"] * param_dictionary[\"population\"]))\n self.set_param(\"infection_rate_recover\", 1. / param_dictionary[\"duration_infectious\"])",
"def __init__(self, grid, x, y, cols):\n self.grid = grid\n self.x = x\n self.y = y\n self.cols = cols",
"def __init__(\n self, grid, mean_fire_recurrence=1.0, shape_parameter=3.5, scale_parameter=None\n ):\n super().__init__(grid)\n self._mean_fire_recurrence = mean_fire_recurrence\n\n self._shape_parameter = shape_parameter\n\n if scale_parameter is None:\n self.get_scale_parameter()\n\n else:\n self._scale_parameter = scale_parameter",
"def _init_model_params(self):\n super()._init_model_params()\n\n if 'e' in self.init_params:\n if self.init_type == 'uniform':\n if self.nr_no_train_de == 0:\n self.B = [\n np.full(\n (self.n_states, self.n_features[i]), 1.0 / self.n_features[i])\n for i in range(self.n_emissions)\n ]\n else:\n check_if_attributes_set(self, attr='e')\n else:\n if self.nr_no_train_de == 0:\n self.B = [\n np.random.rand(self.n_states, self.n_features[i])\n for i in range(self.n_emissions)\n ]\n for i in range(self.n_emissions):\n normalise(self.B[i], axis=1)\n\n else:\n check_if_attributes_set(self, attr='e')",
"def __init__(self, para, ini_cond):\n\n # grid\n self.z = np.linspace(0, para['grid']['zmax'], para['grid']['Nlayers']) # grid [m] above ground\n self.dz = self.z[1] - self.z[0] # gridsize [m]\n self.ones = np.ones(len(self.z)) # dummy\n self.zref = para['zref'] # height of forcing data [m]\n \n # moss properties\n self.hc = para['hc'] # canopy height (m)\n self.lad = para['lad'] # shoot-area density (m2m-3)\n self.LAI = sum(self.lad*self.dz)\n \n self.canopy_nodes = np.where(self.lad > 0)[0]\n \n # hydraulic\n self.porosity = para['hydraulic']['porosity']\n self.pF = para['hydraulic']['pF']\n self.Ksat = para['hydraulic']['Ksat']\n self.freezing_curve = para['hydraulic']['freezing_curve']\n \n # radiation\n self.albedo = para['radiation'] # 'PAR', 'NIR'\n self.emissivity = para['radiation']['emissivity']\n self.clump = para['radiation']['clumping']\n self.leaf_angle = para['radiation']['leaf_angle']\n \n #self.radiation = para['radiation']\n \n # compute non-dimensional flow velocity Un = U/ust and momentum diffusivity\n Utop = ini_cond['Utop'] # U/ust at zref\n Ubot = 0.0 # no-slip\n self.Sc = para['Schmidt_nr']\n _, self.Un, self.Kmn, _ = closure_model_U_moss(self.z, self.lad, self.hc, Utop, Ubot) \n \n self.U = None\n self.Ks = None\n self.length_scale = para['length_scale']\n \n self.Switch_WMA = False\n \n # initial states\n self.T = ini_cond['T']\n self.Wtot = ini_cond['Wtot']\n self.Wliq, self.Wice, _ = frozen_water(self.T, self.Wot, fp=self.freezing_curve, To=0.0)\n self.h = water_retention(self.pF, theta=self.Wliq)",
"def __init__(self, units, activation=\"relu\"):\n super(Dense, self).__init__(units=units, activation=activation)\n # momentum parameters:\n self.VW = 0\n self.Vb = 0\n # RMSprop parameters:\n self.SW = 0\n self.Sb = 0",
"def __init__(self):\n \n self.model = Net()\n\n if torch.cuda.is_available():\n map_location=torch.device('cuda')\n else:\n map_location=torch.device('cpu')\n\n # load parameters\n self.model.load_state_dict(torch.load('model.pt',\n map_location=map_location)) \n \n if torch.cuda.is_available():\n self.model.cuda()\n else:\n self.model.cpu()\n \n self.model.eval()",
"def __init__(self, num_hermgauss=20):\r\n super().__init__()\r\n\r\n gh_x, gh_w = np.polynomial.hermite.hermgauss(num_hermgauss)\r\n self.gh_x = torch.nn.Parameter(\r\n torch.from_numpy(gh_x[:, None, None].astype(NUMPY_DTYPE)),\r\n requires_grad=False)\r\n self.gh_w = torch.nn.Parameter(\r\n torch.from_numpy(gh_w[:, None, None].astype(NUMPY_DTYPE)),\r\n requires_grad=False)",
"def __init__(self, run):\n self.run = run\n\n fr = np.s_[:, 0, :]\n self.tf = run.Tf[fr]\n self.zf = run.Zf[fr]\n\n self.tf_ = run.Tf_[fr]\n self.zf_ = run.Zf_[fr]\n\n self.uf = run.Uf\n self.uf_ = run.Uf_\n\n self.levels_u = 100\n self.levels_u_ = np.linspace(-0.5, 0.5, 100)\n\n # derived properties\n # absolute velocity\n self.uf_abs = np.hypot(run.Uf, run.Wf)\n self.uf__abs = np.hypot(run.Uf_, run.Wf_)",
"def setParameters(self):\n\n # Set the parameters\n self.taux = 24.2\n self.mu = 0.23\n self.G = 33.75\n self.alpha_0 = 0.05\n self.delta = 0.0075\n self.p = 0.50\n self.I0 = 9500.0\n self.kparam = 0.55"
] | [
"0.63615376",
"0.6329917",
"0.6315436",
"0.6304923",
"0.6211358",
"0.6152573",
"0.61234856",
"0.6065217",
"0.6056414",
"0.6029072",
"0.5988933",
"0.5905428",
"0.5902939",
"0.58744293",
"0.585578",
"0.58418816",
"0.5836042",
"0.5823521",
"0.5822146",
"0.5813255",
"0.580647",
"0.5798709",
"0.5790158",
"0.5786622",
"0.5775858",
"0.5772316",
"0.5771245",
"0.5766504",
"0.576157",
"0.57400906"
] | 0.7273586 | 0 |
Take the MD5 digest of a name, convert it to hex and take the first 6 characters as an RGB value. | def dopplr(name):
return "#" + hashlib.sha224(name).hexdigest()[:6] | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_md5(text):\n return hashlib.md5(text).hexdigest()",
"def color_name_to_hex(name, default='#000000'):\n try:\n name = str(name)\n if name.startswith('#'):\n return name\n return webcolors.name_to_hex(name)\n except ValueError:\n return default",
"def get_md5_from_hexdigest(self, md5_hexdigest):\r\n import binascii\r\n digest = binascii.unhexlify(md5_hexdigest)\r\n base64md5 = base64.encodestring(digest)\r\n if base64md5[-1] == '\\n':\r\n base64md5 = base64md5[0:-1]\r\n return (md5_hexdigest, base64md5)",
"def HexDigest(self, name, truncation_length=None):\n\n if truncation_length is None:\n truncation_length = 64\n name_bytes = name.encode('UTF-8')\n return hashlib.sha256(name_bytes).hexdigest()[:truncation_length]",
"def colorname(line):\n strline = line.split('\\t')\n\n # get color name and hex\n clname = unidecode.unidecode(strline[0])\n clname = re.sub(BAD_CHARS, '', clname, 0, re.MULTILINE | re.IGNORECASE)\n clname = clname.lower()\n\n hexcol = strline[1].replace('#', '')\n return (clname, hexcol.upper(), strline[0])",
"def get_md5(s):\n m = hashlib.md5()\n m.update(s.encode('utf8'))\n return m.hexdigest()",
"def hex_to_rgb_hash(value):\n if not value:\n value = \"#EFEFEF\"\n\n value = value.lstrip('#')\n\n if len(value) != 6:\n raise Exception(\"hex_to_rgb_hash expects hex color of length, eg #FF0000, but got %s\" % value)\n\n round(int('EF', 16) / 255.0, 2)\n\n return {\n 'red': round(int(value[:2], 16) / 255.0, 1),\n 'green': round(int(value[2:4], 16) / 255.0, 1),\n 'blue': round(int(value[4:], 16) / 255.0, 1),\n }",
"def hex_md5_of_bytes(data: bytes) -> str:\n return hashlib.md5(data).hexdigest()",
"def get_hash(s):\n hash_object = hashlib.md5(s.encode())\n return hash_object.hexdigest()",
"def rgb_color( color ):\n color = color.strip()\n if color[0] == '#':\n color=color[1:]\n if len(color) != 6:\n raise ValueError, \"#%s incorrect format use #rrggbb\" % color\n r, g, b = color[:2], color[2:4], color[4:]\n r, g, b = [int(n, 16) for n in (r, g, b)]\n return (r, g, b)",
"def _md5sum(data):\n hash = hashlib.md5()\n hash.update(six.b(data))\n hash_hex = hash.hexdigest()\n return hash_hex",
"def _hash_value(value):\n return hashlib.md5(value.encode('utf-8')).hexdigest()[:9]",
"def hash_cli_name(name):\n from hashlib import blake2b\n return blake2b(name.encode(), digest_size=32).hexdigest()",
"def get_md5_from_str(src: str) -> str:\n res: str = \"\"\n if not isinstance(src, str) or str == \"\":\n return res\n m: hashlib._hashlib.HASH = hashlib.md5()\n m.update(src.encode('utf-8'))\n res = m.hexdigest()\n return res",
"def name(r, g, b, a=1):\n r = 0 if r < 0 else 1 if r > 1 else r\n g = 0 if g < 0 else 1 if g > 1 else g\n b = 0 if b < 0 else 1 if b > 1 else b\n a = 0 if a < 0 else 1 if a > 1 else a\n if a >= 1:\n s = '#%02x%02x%02x' % (255*r, 255*g, 255*b)\n return re.sub(r'#(\\w)\\1(\\w)\\2(\\w)\\3', r'#\\1\\2\\3', s)\n else:\n return 'rgba(%d,%d,%d,%0.2f)' % (255*r, 255*g, 255*b, a)",
"def get_md5(self, line):\n m = hashlib.md5()\n m.update(str(line).encode('utf-8'))\n return m.hexdigest()",
"def _hash_name(self, name, length=None):\n if not length:\n length = self.header_size\n hashed = name[:min(length, len(name))]\n for x in range(length, len(name), length):\n rem = min(x+length,len(name))-x\n for i in range(rem):\n hashed = hashed[:i] + chr(ord(name[x + i]) ^ ord(hashed[i])) + hashed[i+1:]\n if len(hashed) < length:\n hashed += '\\x00' * (length-len(hashed))\n return hashed",
"def _md5(input):\n m = hashlib.md5()\n m.update(input)\n return m.hexdigest()",
"def __hash_md5__(self, text):\n key = hashlib.md5()\n key.update(text.encode('utf-8'))\n return key.digest()",
"def hashname(self):\n return hashlib.md5(self.name.encode('utf-8')).hexdigest()",
"def getmd5(image: Image):\n return hashlib.md5(image.tobytes()).hexdigest()",
"def md5_value(strg):\n\tmd5 = hashlib.md5()\n\tmd5.update(strg.encode('UTF-8'))\n\treturn md5.hexdigest()",
"def get_colour_name(rgb_triplet):\n min_colours = {}\n for key, name in webcolors.css21_hex_to_names.items():\n r_c, g_c, b_c = webcolors.hex_to_rgb(key)\n rd = (r_c - rgb_triplet[0]) ** 2\n gd = (g_c - rgb_triplet[1]) ** 2\n bd = (b_c - rgb_triplet[2]) ** 2\n min_colours[(rd + gd + bd)] = name\n return min_colours[min(min_colours.keys())]",
"def calc_md5(string):\n\treturn md5(string).hexdigest()",
"def md5hash(string):\n return hashlib.md5(string).hexdigest()",
"def reformatColor(self, colorStr):\n if type(colorStr) is str:\n if colorStr.startswith('#'):\n colorStr = colorStr.replace('#', '')\n else:\n raise Exception('color is not hex format')\n r = int(colorStr[:2], 16)\n g = int(colorStr[2:4], 16)\n b = int(colorStr[4:6], 16)\n return r, g, b",
"def calc_md5(s: Union[bytes, str]) -> str:\n h = hashlib.new(\"md5\")\n\n b = s.encode(\"utf-8\") if isinstance(s, str) else s\n\n h.update(b)\n return h.hexdigest()",
"def get_md5(string):\r\n byte_string = string.encode(\"utf-8\")\r\n md5 = hashlib.md5()\r\n md5.update(byte_string)\r\n result = md5.hexdigest()\r\n return 'M'+result",
"def digest(string):\n return md5(string.encode(\"utf-8\")).hexdigest()",
"def MD5(self) -> _n_0_t_3[_n_0_t_9]:"
] | [
"0.61176944",
"0.6115744",
"0.61068666",
"0.60837525",
"0.6063438",
"0.60526246",
"0.6049227",
"0.6043318",
"0.6015899",
"0.595205",
"0.59485966",
"0.5934476",
"0.5924637",
"0.59155124",
"0.5902312",
"0.5875135",
"0.5866297",
"0.5865237",
"0.5853065",
"0.58442265",
"0.5834953",
"0.58330804",
"0.58141726",
"0.5810125",
"0.5785845",
"0.57750183",
"0.57547337",
"0.5754443",
"0.5748153",
"0.5747936"
] | 0.65998656 | 0 |
For a given background colour, return black or white for the text | def foreground_colour(background_colour):
# Get RGB values
background_colour = background_colour.lstrip("#")
background_colour = struct.unpack('BBB', background_colour.decode('hex'))
r = background_colour[0]
g = background_colour[1]
b = background_colour[2]
print(r, g, b)
# The perceived brightness of the individual primaries red, green, and blue
# are not identical. The quickest advice I can give is to use the
# traditional formula to convert RGB to gray - R*0.299 + G*0.587 + B*0.114.
# There are lots of other formulas.
grey = r*0.299 + g*0.587 + b*0.144
# The gamma curve applied to displays makes the middle gray value higher
# than you'd expect. This is easily solved by using 186 as the middle value
# rather than 128. Anything less than 186 should use white text, anything
# greater than 186 should use black text.
print(grey)
if grey < 186:
return "white"
else:
return "black" | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def black_or_white(bgcolor):\n ary_bgcolors = re.findall(r\"[\\w']+\", bgcolor)\n R = int(ary_bgcolors[1])\n G = int(ary_bgcolors[2])\n B = int(ary_bgcolors[3])\n Lumi = (sum([R,G,B])/3)\n\n if Lumi > 125:\n colorfont = 'rgb(0,0,0)'\n else:\n colorfont = 'rgb(255,255,255)'\n\n return colorfont",
"def get_coloured_text_string(text, colour):\n if colour==\"red\":\n return (\"\\033[31m\" + text + \"\\033[0m\")\n if colour==\"green\":\n return (\"\\033[32m\" + text + \"\\033[0m\")\n if colour==\"yellow\":\n return (\"\\033[33m\" + text + \"\\033[0m\")\n if colour==\"blue\":\n return (\"\\033[34m\" + text + \"\\033[0m\")\n if colour==\"purple\":\n return (\"\\033[35m\" + text + \"\\033[0m\")\n if colour==\"cyan\":\n return (\"\\033[36m\" + text + \"\\033[0m\")\n if colour==\"white\":\n return (\"\\033[37m\" + text + \"\\033[0m\")\n return text",
"def color_text(txt, foreground=PALETTE['white'], background=PALETTE['black']):\n if isinstance(foreground, str) and foreground.startswith('#'):\n foreground = hex_to_rgb(foreground)\n if isinstance(background, str) and background.startswith('#'):\n background = hex_to_rgb(background)\n return '{}{}{}{}'.format(_f(*foreground), _b(*background), txt, _r())",
"def get_matching_text_color(im, pos, size):\n part_area = (pos[0], pos[1], pos[0] + size[0], pos[1] + size[1])\n part_image = im.crop(part_area).convert('L')\n stat = ImageStat.Stat(part_image)\n mean_brightness = stat.mean[0]\n if mean_brightness < 128:\n return textColorBright\n else:\n return textColorDark",
"def fore_text(txt, foreground=PALETTE['white']):\n if isinstance(foreground, str) and foreground.startswith('#'):\n foreground = hex_to_rgb(foreground)\n return '{}{}{}'.format(_f(*foreground), txt, _r())",
"def GetTextBackground(*args, **kwargs):\n return _gdi_.DC_GetTextBackground(*args, **kwargs)",
"def _remove_background_colors(text) -> StyledStr:\n return _remove_regex(BACKGROUND_COLORS_REGEX, text)",
"def darkText(img):\n kernel = np.ones((30, 30), np.uint8) \n img_orig = cv2.morphologyEx(img, cv2.MORPH_BLACKHAT, kernel)\n \n TH = 150\n img_orig[(img_orig[:,:,0] < TH) | (img_orig[:,:,1] < TH) | (img_orig[:,:,2] < TH)] = (0,0,0)\n \n img_orig = closing(img_orig, size=(1, int(img.shape[1] / 8)))\n \n return (cv2.cvtColor(img_orig, cv2.COLOR_BGR2GRAY) != 0).astype(np.uint8)",
"def get_foreground(background_color, output='hex'):\n hex_white = '#FFFFFF'\n hex_black = '#000000'\n rgb_white = (255, 255, 255)\n rgb_black = (0, 0, 0)\n\n # convert to rgb if got hex\n if isinstance(background_color, str):\n background_color = hex_to_rgb(background_color)\n\n luminance = calculate_color_luminance(background_color)\n if (luminance + 0.05) / (0.0 + 0.05) > (1.0 + 0.05) / (luminance + 0.05):\n return rgb_black if output.lower() == 'rgb' else hex_black\n else:\n return rgb_white if output.lower() == 'rgb' else hex_white",
"def get_text_color ( self, object ):\n return self.text_color_",
"def contrast_from_bg(cls, col=\"#000000\", dark_default=\"000000\", light_default=\"FFFFFF\", hashed=\"#\"):\n trigger = float(0.45) #Values greater than this result in black text\n if not col:\n return \"#000000\" #Default to black\n if col in (\"Transparent\",\"transparent\"):\n return \"#000000\" #Default to black\n if not hashed:\n hashed = \"\"\n elif hashed is True:\n hashed = \"#\"\n try:\n col_out = cls.colour_to_rgb_tuple(col)\n r,g,b = col_out\n div = 255.0 #Produces a value between 0-1 as a float\n lum = float(0.2126*pow(r/div, 2.2)) + float(0.7152*pow(g/div, 2.2)) + float(0.0722*pow(b/div, 2.2))\n except (TypeError, ValueError):\n return dark_default\n #logging.info (\"Luminosity: %s\" % lum)\n #Decision gate:\n if lum >= trigger: #Light background, need dark text\n return \"%s%s\" % (hashed, dark_default)\n else: #Dark background, need light text\n return \"%s%s\" % (hashed, light_default)",
"def GetTextColour(self):\r\n \r\n return self._colText",
"def colourise(value, background=False):\n if background:\n return mark_safe(\"\".join((r\"\\cellcolor{\", COLOURUPS.get(value, \"white\"),\n \"}{\", value, \"}\")))\n else:\n return mark_safe(\"\".join((r\"\\textcolor{\", COLOURUPS.get(value, \"purple\"),\n \"}{\", value, \"}\")))",
"def _remove_text_colors(text) -> StyledStr:\n return _remove_regex(FOREGROUND_COLORS_REGEX, text)",
"def colorful_text(text, color=Fore.RESET):\n return color + text + Fore.RESET",
"def _change_background_color(text, color_code) -> StyledStr:\n uncolored_bg = _remove_background_colors(text)\n return _apply_ansi_code(color_code, uncolored_bg)",
"def test_assembleBackgroundColor(self):\n self.assertEqual(\n irc.assembleFormattedText(A.bg.blue[\"hello\"]), \"\\x0f\\x03,02hello\"\n )",
"def prBlueBG(text):\n print(\"\\033[44m{}\\033[0m\".format(text), sep=\"\")",
"def colorize(text, color):\n return COLOR_DICT[color] + str(text) + COLOR_DICT['end']",
"def textBackgroundBrush(self):\n return self._text_bg_brush",
"def printRed(text):\n print(Fore.RED + text + Fore.WHITE)",
"def sub_brightbg(self, ansimatch):\n return self.ansi_xterm256_bright_bg_map_dict.get(ansimatch.group(), \"\")",
"def GetBackgroundColour(self):\r\n\r\n return self._colBack",
"def brightText(img):\n kernel = np.ones((30, 30), np.uint8) \n img_orig = cv2.morphologyEx(img, cv2.MORPH_TOPHAT, kernel)\n \n TH = 150\n img_orig[(img_orig[:,:,0] < TH) | (img_orig[:,:,1] < TH) | (img_orig[:,:,2] < TH)] = (0,0,0)\n \n img_orig = closing(img_orig, size=(1, int(img.shape[1] / 8)))\n \n return (cv2.cvtColor(img_orig, cv2.COLOR_BGR2GRAY) != 0).astype(np.uint8)",
"def colorize(text, color):\n\n if not supports_color():\n return text\n\n return color + text + Colors.ENDC",
"def get_text_color ( self, object ):\n if self._is_selected( object ):\n return self.selected_text_color_\n return self.text_color_",
"def check_color(style):\n for kw in list(cc.keys()):\n m = re.search(kw, style)\n if m:\n return m.group()\n\n # Return 'b' if nothing has found\n return 'b'",
"def _get_output_letter(rgb):\n\n\t\tif rgb == Rgb.pastel_purple():\n\t\t\treturn \"p\"\n\t\telif rgb == Rgb.pastel_yellow():\n\t\t\treturn \"y\"\n\t\telif rgb == Rgb.pastel_green():\n\t\t\treturn \"g\"\n\t\telif rgb == Rgb.pastel_blue():\n\t\t\treturn \"b\"\n\t\telif rgb == Rgb.strong_red():\n\t\t\treturn \" \"\n\n\t\treturn \"?\"",
"def get_foreground(self):\n\n h = ((self._bytes[12] & 0x0F) << 8) | self._bytes[13]\n s = self._bytes[14]\n l = self._bytes[15]\n\n h = utils.map(h, 0, 4095, 0, 360)\n s = 65 - utils.map(s, 0, 255, 0, 20)\n l = 75 - utils.map(l, 0, 255, 0, 20)\n\n return utils.hsl_to_rgb(h, s, l)",
"def get_text_color(cls, quad):\n\n\t\tr = cls.get_address_value(quad.left_operand)\n\t\tg = cls.get_address_value(quad.right_operand)\n\t\tb = cls.get_address_value(quad.result)\n\n\t\tif r >= 0 and g >= 0 and b >= 0:\n\t\t\treturn [r,g,b]\n\t\telse:\n\t\t\tcls.throwColorError(\"text\", r,g,b)"
] | [
"0.79908603",
"0.70390666",
"0.70168227",
"0.6928707",
"0.67906517",
"0.6725715",
"0.6606326",
"0.6565484",
"0.6551614",
"0.651119",
"0.6485232",
"0.6482854",
"0.6421965",
"0.6326192",
"0.62925994",
"0.6289619",
"0.6224909",
"0.6212772",
"0.6126866",
"0.6106249",
"0.6093084",
"0.6079434",
"0.60593385",
"0.60563815",
"0.60511804",
"0.60263956",
"0.5999359",
"0.59709877",
"0.5966262",
"0.59359604"
] | 0.70396507 | 1 |
Find the largestsized font that'll fit this text on this cover | def largest_font_that_fits(draw, font_file, text, cover_width):
text_w = cover_width + 1
font_size = 110
padding = 20
while(text_w + padding > cover_width):
font_size -= 10
font = ImageFont.truetype(font_file, font_size)
text_w, text_h = draw.textsize(text, font)
return font | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_optimal_font_size(fontfile,text,maxwidth,maxheight):\n fontsize = 1\n font = ImageFont.truetype(fontfile, fontsize)\n while font.getsize(text)[0] < maxwidth and font.getsize(text)[1] < maxheight:\n fontsize += 1\n font = ImageFont.truetype(fontfile, fontsize-1)\n return font",
"def get_font_at_size(fonts_path, font_name, initial_font_size, text_to_print, target_width):\n font_size = initial_font_size\n while True:\n font = ImageFont.truetype(path.join(fonts_path, font_name), font_size)\n text_width = font.getsize(text_to_print)[0]\n if text_width <= target_width:\n break\n if font_size < 9:\n break\n font_size = font_size - 1\n return font",
"def get_optimal_font_scale(text, width):\r\n for scale in reversed(range(0, 60, 1)):\r\n textSize = cv2.getTextSize(text, fontFace=cv2.FONT_HERSHEY_DUPLEX, fontScale=scale/10, thickness=1)\r\n new_width = textSize[0][0]\r\n #print(new_width)\r\n if (new_width <= width):\r\n return scale/10\r\n return 1",
"def calc_text_size(self, text, font):\n w = 0\n for c in text:\n o = ord(c)\n if o > 0xff: # Translate Cyrillic Unicode to ASCII\n o -= 848\n if o > 255:\n o = 32\n w += font.char_size(o)[1]\n return(w, font.height())",
"def Get6PixelsHigh(text, color, fit_width=False):\n font = wx.Font(6, wx.SWISS, style=wx.NORMAL, weight=wx.BOLD)\n white = ' '\n width = -1\n if fit_width:\n width = 6\n lines = FitSize(width, 6, GetFontPixels(font, text, color, white), white)\n return lines",
"def textwide(s, tf):\r\n width = 350 ## default ok for Arial or Helvetica\r\n if gv[\"font\"] == \"Times-roman\":\r\n width = 330\r\n if gv[\"font\"] == \"Courier\":\r\n width = 390\r\n if gv[\"fontfixed\"] is False:\r\n localfontsize = int(gv[\"fontsize\"]*gv[\"globalscale\"])\r\n else:\r\n localfontsize = int(gv[\"fontsize\"])\r\n return tf*localfontsize * len(s)*width/(1000*(gv[\"fixedUR\"][0] - gv[\"fixedLL\"][0]))",
"def get_height_text(self, text, font_path, f_size=constant.FONT_SIZE_DEFAULT):\n # ログ\n log.debug(self)\n try:\n # Set font of text\n font = ImageFont.truetype(font_path, f_size)\n # Get size of text\n size = font.getsize(str(text))\n log.info(self)\n # Width of text\n return size[1]\n except:\n # 例外処理\n log.error(traceback.format_exc())",
"def get_text_size(self):\n return self.__text_surface.get_width(), self.__text_surface.get_height()",
"def get_textSize(self, text):\n #-- get size of text\n ((tw, th),tpad) = cv2.getTextSize( text,\n fontFace=self.fontFace,\n fontScale=self.fontScale,\n thickness=self.fontThickness)\n return tw, th, tpad",
"def get_width_text(self, text, font_path, f_size=constant.FONT_SIZE_DEFAULT):\n # ログ\n log.debug(self)\n try:\n # Set font of text\n font = ImageFont.truetype(font_path, f_size)\n # Get size of text\n size = font.getsize(str(text))\n log.info(self)\n # Width of text\n return size[0]\n except:\n # 例外処理\n log.error(traceback.format_exc())",
"def adjusting_fonts(self):\n fix_x = int(0 * settings.scale)\n fix_y = int(0 * settings.scale)\n font_object = self.fontA\n box = self.box\n text_box = self.box.get_size()\n text_list = self.text.split()\n number_of_words = len(text_list)\n count = 0\n height = fix_y\n first = True\n line = \"\"\n line_break = False\n while count < number_of_words:\n line += text_list[count]\n line_size = font_object.size(line)\n line_pos = int((text_box[0] + fix_x - line_size[0]) / 2)\n if line_size[0] < text_box[0]:\n if count + 1 < number_of_words:\n temporary_line = line + \" \" + text_list[count + 1]\n if font_object.size(temporary_line)[0] >= text_box[0]:\n line_image = font_object.render(line, 1, self.color)\n height += int((line_size[1] * 0.8))\n box.blit(line_image, (line_pos, height))\n line = \"\"\n else:\n line += \" \"\n elif count + 1 == number_of_words:\n height += int((line_size[1] * 0.8))\n box.blit(\n font_object.render(line, 1, self.color), (line_pos, height)\n )\n else:\n line = text_list[count]\n height += int(\n line_size[1] * 0.8\n ) # If line height is perfect it does not seem that it is the same text\n count += 1",
"def get_text_height(self, fnt, rescale=1):\n return round(fnt.getsize('HQfgjyp')[1]*rescale)",
"def fontSize(size):\n\t\treturn int(-size * (InterfaceTools.getCanvasSize()[0] / height))",
"def get_std_size(size_text):\r\n std_size = None\r\n # Find the color in the full dictionary\r\n size_code = SIZE_FULL.get(size_text)\r\n if size_code is not None:\r\n std_size = SIZE_STANDARD[size_code]\r\n return std_size",
"def GetBestSize(self, grid, attr, dc, row, col):\n text = grid.GetCellValue(row, col)\n\n _font = attr.GetFont()\n dc.SetFont(_font)\n\n col_width = grid.GetColSize(col)\n # margin = 2 # get border width into account when submitting optimal col size\n margin = 0\n w, h = _font.GetPixelSize()\n if len(text) > 0:\n w_sz = w * len(text) + 2 * w\n else:\n return wx.Size(2 * w, h) # self.default_width\n\n if self.auto_fit:\n col_width = min(w_sz, col_width)\n if col_width > self.max_width:\n col_width = self.max_width\n else:\n col_width = min(w_sz, self.default_width)\n\n if self.word_wrap:\n text = wordwrap.wordwrap(text, col_width, dc, breakLongWords=False,\n margin=margin)\n w, h = dc.GetMultiLineTextExtent(text)\n else:\n w = col_width\n if self.auto_fit:\n if w_sz > self.max_width:\n w_sz = self.max_width\n w = max(w, w_sz)\n else:\n return wx.Size(self.default_width, h)\n return wx.Size(w, h)",
"def bb_size(hit):\n \n size = hit.group(1)\n text = hit.group(2)\n \n minimum = getattr(settings, 'BBCODE_MINIMUM', 6)\n maximum = getattr(settings, 'BBCODE_MAXIMUM', 50)\n \n # Size needs to be controlled, and may vary depending on the CSS on the site. We don't want\n # Users requesting a size too small to be visible, or too large and take up the whole screen!\n # This could eventually go into the settings.py file, hehe.\n if(int(size) < minimum):\n return '<font style=\"font-size: %dpx\">%s [Too Small, Upscaled To 6]</font>' % (minimum, text)\n\n if(int(size) > maximum):\n return '<font style=\"font-size: %dpx\">%s [Too Large, Reduced To 50]</font>' % (maximum, text)\n\n # Return the normal text size\n return '<font style=\"font-size: %dpx\">%s</font>' % (int(size), text)",
"def get_fontsize(numrows):\r\n thresholds = [25, 50, 75, 100, 125]\r\n sizes = [5, 4, 3, 2, 1.5, 1]\r\n i = 0\r\n while numrows > thresholds[i]:\r\n i += 1\r\n if i == len(thresholds):\r\n break\r\n return sizes[i]",
"def _label_width(text):\n width = 0\n for lineno, line in enumerate(text.split(u'\\n')):\n size = [_BIG_FONT, _SMALL_FONT][lineno > 0] # Cool idiom, huh?\n width = max(width, size * len(line))\n return width",
"def get_text_font ( self, object ):\n return self.text_font",
"def measure_text_obj(t, r, ax):\n bb = t.get_window_extent(renderer=r)\n inv = ax.transData.inverted()\n bb = transforms.Bbox(inv.transform(bb))\n return (bb.width, bb.height)",
"def text_width(text):\n # Really crude guess would be: return len(text)/2\n return sum(GLYPH_WIDTHS.get(c, .5) for c in text)",
"def designer_pdf_viewer(h, word):\n a = 97\n alphabet = dict()\n for i in range(len(h)):\n alphabet.update({chr(a): h[i]})\n a += 1\n\n max_height = 0\n for i in word:\n if i in alphabet.keys():\n height = alphabet[i]\n if height > max_height:\n max_height = height\n\n return len(word) * 1 * max_height",
"def create_font(font_name, fit = True):\n font = {}\n try:\n numbers = Image.open(fonts_path + font_name + \".jpg\")\n if fit:\n numbers = images.fit_to_display(numbers, True)\n width, height = numbers.size\n font[\"d\"] = Image.open(fonts_path + \"degree.jpg\")\n font[\"d\"] = images.fit_to_display(font[\"d\"])\n font[\"p\"] = Image.open(fonts_path + \"percent.jpg\")\n font[\"p\"] = images.fit_to_display(font[\"p\"])\n font[\"m\"] = Image.open(fonts_path + \"am.jpg\")\n font[\"m\"] = images.fit_to_display(font[\"m\"], True)\n font[\"a\"] = Image.open(fonts_path + \"pm.jpg\")\n font[\"a\"] = images.fit_to_display(font[\"a\"], True)\n d_w, d_h = font[\"d\"].size\n font[\"d\"] = font[\"d\"].crop((10,0,d_w-10,d_w))\n box_width = float(width)/10 \n #Crop out each character in the provided image and save that to a dictionary\n for i in range(0, 10):\n box = [int(round(i*(box_width))), 0, int(round((i + 1)*(box_width))), height]\n #Checks if a subrectangle passes the width of the image, and shortens it if necessary\n if box[3] > width:\n box[3] = width\n \n box = tuple(box)\n font[str(i)] = numbers.crop(box) \n return font\n except IOError:\n print(\"Specified font file: %s.jpg cannot be found at: %s\" % (font_name,fonts_path))",
"def _label_height(text):\n return _BIG_FONT + _SMALL_LINE * text.count(u'\\n')",
"def _badge_chars_per_line(self, text, font, font_size):\r\n xmin, y = settings.CAMPAIGN_BADGE_TITLE_POSITION\r\n xmax, y = settings.CAMPAIGN_BADGE_CALLOUT_POSITION\r\n width_available = xmax - xmin - settings.CAMPAIGN_BADGE_TITLE_CLEARANCE # clearance\r\n font = ImageFont.truetype(font, font_size, encoding='unic')\r\n w, h = font.getsize(text)\r\n pixels_per_char = float(w)/float(len(text))\r\n chars_per_line = int(width_available/pixels_per_char)\r\n # If chars_per_line is slightly off, adjust it\r\n one_line = text[:chars_per_line]\r\n w, h = font.getsize(one_line)\r\n dw = w - width_available\r\n if dw > 0:\r\n dchar = dw/pixels_per_char + 2\r\n chars_per_line -= dchar\r\n return chars_per_line",
"def locate_front_facing_text(\n pipeline: Pipeline,\n text: str,\n img: ImageBGR,\n) -> Optional[ndarray]:\n\n boxes = locate_all_text(pipeline, text, img)\n return None if not boxes else max(boxes, key=width_of_box)",
"def _get_font_button(field_size):\r\n font_size = int(field_size * 2) # calculates font's size\r\n return pygame.font.SysFont(None, font_size) # returns font\r",
"def get_text_width(self, text: str) -> float:\n pass",
"def get_font_dict(f):\n return tk_font.Font(font=f).actual()",
"def GetMeasuringFont(self):\r\n\r\n return self._measuring_font"
] | [
"0.7606606",
"0.7251992",
"0.7203537",
"0.6699396",
"0.65662354",
"0.6466567",
"0.643199",
"0.63370657",
"0.6304743",
"0.6279057",
"0.62559944",
"0.6191247",
"0.61713487",
"0.6060088",
"0.59158844",
"0.58457994",
"0.58386546",
"0.5816909",
"0.58112776",
"0.58027065",
"0.57712907",
"0.57549596",
"0.57499486",
"0.573278",
"0.57312816",
"0.56746393",
"0.5620005",
"0.5610928",
"0.56064653",
"0.556217"
] | 0.8428862 | 0 |
Get some public domain image for text | def get_an_image(text):
# Get the second or fourth word
index = random.choice([1, 3])
text = text.split()[index]
print(text)
sort = random.choice(["relevance", "interestingness-desc"])
print(sort)
from flickr_search_downloadr import flickr_search_downloadr
filename = flickr_search_downloadr(text,
tags=None,
user_id="internetarchivebookimages",
sort=sort,
quantity=1,
number=None,
size="m",
title=None,
noclobber=True,
outdir="E:/stufftodelete")
img = Image.open(filename[0])
return img | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_image_url():",
"def wiki_image(pagetext):\n images = [i for i in pagetext.images if i not in EXCLUDED_IMAGES]\n if len(images) > 0:\n return images[0]\n else:\n return ''",
"def process_images(text):\n # if text != None:\n if text is not None:\n soup = BeautifulSoup(str(text), 'html.parser')\n img = soup.img\n try:\n image = img['title']\n return image\n except (TypeError, KeyError):\n # print(img)\n pass",
"def l10n_img(ctx, url):\n return static(l10n_img_file_name(ctx, url))",
"def detect_text_uri(uri): #mostly taken from https://cloud.google.com/vision/docs/detecting-text#vision-text-detection-python\r\n client = vision.ImageAnnotatorClient()\r\n image = types.Image()\r\n image.source.image_uri = uri\r\n response = client.text_detection(image=image)\r\n texts = response.text_annotations\r\n return(texts[0].description) #returns text of image\r",
"def text_to_images(text):\n top_keywords = text_to_keywords(text)\n images = keyword_to_images(top_keywords)\n return images",
"def return_image(val, model_id, message_name, field_name, mime, sind):\n column_data_source = curdoc().get_model_by_name(sind)\n index = column_data_source.tags[0]\n url = \"http://{0}/image/\".format(_host) + \"---\".join([model_id, message_name, field_name, mime, sind, str(index)])\n return url",
"def get_image(result):\n article_id = result['id']\n id_ = article_id[14:]\n href = article_id[:14]\n\n #FIXME: not working\n image_url = \"http://www.jpress.nli.org.il/Olive/APA/NLI_heb/get/GetImage.ashx?kind=block&href=%s&id=%s&ext=.png\" %(href, id_)\n \n return image_url",
"def getNewsIconURL(newsBrain):",
"def replfunc(self, match):\n url = match.group(1)\n imgformat = url.split('.')[-1]\n if url.startswith('http'):\n data = urlopen(url).read()\n elif url.startswith('data'):\n img = '<img src=\"' + url + '\" ' + match.group(2) + ' />'\n return img\n else:\n with open(url, 'rb') as f:\n data = f.read()\n\n self.log.info(\"embedding url: %s, format: %s\" % (url, imgformat))\n b64_data = base64.b64encode(data).decode(\"utf-8\")\n if imgformat == \"svg\":\n img = '<img src=\"data:image/svg+xml;base64,' + \\\n b64_data + '\" ' + match.group(2) + '/>'\n elif imgformat == \"pdf\":\n img = '<img src=\"data:application/pdf;base64,' + \\\n b64_data + '\" ' + match.group(2) + '/>'\n else:\n img = '<img src=\"data:image/' + imgformat + \\\n ';base64,' + b64_data + '\" ' + match.group(2) + ' />'\n return img",
"def image(self, text):\n pattern = re.compile(r\"\"\"\n (?:[\\[{])? # pre\n \\! # opening !\n (\\<|\\=|\\>)? # optional alignment atts\n (%s) # optional style,class atts\n (?:\\. )? # optional dot-space\n ([^\\s(!]+) # presume this is the src\n \\s? # optional space\n (?:\\(([^\\)]+)\\))? # optional title\n \\! # closing\n (?::(\\S+))? # optional href\n (?:[\\]}]|(?=\\s|$)) # lookahead: space or end of string\n \"\"\" % self.c, re.U | re.X)\n return pattern.sub(self.fImage, text)",
"def html_to_text_img(url):\n if url is '' or url is None:\n return None\n try:\n html = urlopen(url)\n except HTTPError as e:\n print(e)\n return None\n except URLError as e:\n print(e)\n return None\n\n try:\n soup = BeautifulSoup(html.read(), \"html.parser\")\n except AttributeError as e:\n print(e)\n return None\n\n title = soup.find(\"h1\").get_text()\n text = ''\n ps = soup.find(\"div\", {\"class\": \"article gtm-click\"}).find_all('p')\n for p in ps:\n text += ''.join(p.get_text())\n imgs = soup.select(\"div.article__image img\")\n img_url = imgs[0]['data-src']\n\n return title, text, img_url",
"def get_image_link():\n image_links = set()\n supplemented_keyword = urllib.parse.quote(\n supplemented_keywords[random.randint(0,\n len(supplemented_keywords) - 1)],\n safe='')\n main_keyword = urllib.parse.quote(\n main_keywords[random.randint(0,\n len(main_keywords) - 1)], safe='')\n\n # print('the theme of cats: ' + supplemented_keyword)\n\n search_query = (main_keyword + ' ' + supplemented_keyword).replace(\n ' ', '%20')\n url = 'https://www.google.com/search?q=' + \\\n search_query + '&source=lnms&tbm=isch'\n image_links = image_links.union(parse_page(url))\n\n image_link = list(image_links)[random.randint(0, len(image_links) - 1)]\n # print('link:' + image_link)\n\n while 'https://' not in image_link or r'\\\\u' in image_link or '.jpg' not in image_link:\n image_link = list(image_links)[random.randint(0, len(image_links) - 1)]\n # print('link:' + image_link)\n\n return image_link",
"def get_content(url):\n img=requests.get(url).content\n return img",
"def _get_image(self, name):\n document = self._control.document()\n image = document.resource(QtGui.QTextDocument.ImageResource,\n QtCore.QUrl(name))\n return image",
"def image(self):\n image = self._image\n for remove in ('oci:', 'http:', 'https:'):\n if image.startswith(remove):\n image = image.replace(remove, '')\n return image",
"def get_image_info(client, my_massage):\n # Get Javascript updated HTML page\n response = client.commands.getPageText()\n assert response['status']\n assert response['result']\n\n # Create soup from HTML page and get desired information\n soup = BeautifulSoup(response['result'], markupMassage=my_massage)\n image_info = {'name': soup.find(id='caption_region').h3.string,\n 'link': urlparse.urljoin('http://www.nasa.gov',\n soup.find(attrs='Full_Size')['href'])}\n return image_info",
"def get_captcha_image(self, page_html) -> str:\n try:\n items = page_html.select('div[class=\"ddText\"]')\n result_items = re.findall(r'\\\"data:image.*\\\"', str(items[0]))\n result_items = str(result_items).replace(\"\\\"\", \"\")\n except Exception as e:\n raise e\n else:\n return result_items",
"def plant_uml_create_png_and_return_image_url(plant_uml_txt):\n # import hashlib\n # dbg(hashlib.md5(plant_uml_txt.encode('utf-8')).hexdigest())\n\n # plant_uml_server = calc_plantuml_server_url()\n PLANTUML_URL_ON_INTERNET = \"http://www.plantuml.com/plantuml/uml\"\n plant_uml_server = PLANTUML_URL_ON_INTERNET\n log.info(\"plant_uml_server calculated to be %s\" % (plant_uml_server,))\n\n try:\n # response = requests.post(plant_uml_server, data={'text': plant_uml_txt})\n\n url = os.path.join(plant_uml_server, deflate_and_encode(plant_uml_txt))\n response = requests.get(url)\n\n except (ConnectionError, requests.exceptions.RequestException) as e:\n # log.exception(\"Trying to render using plantuml server %s str(e)\" % plant_uml_server)\n log.error(f\"Error trying to fetch initial html from plantuml server {plant_uml_server} {str(e)}\")\n return None, None\n\n if response.status_code == 200:\n # print(\"plant_uml_server responded with 200 ok\")\n log.info(\"plant_uml_server responded with 200 ok\")\n\n \"\"\"\n Need to find the fragment:\n <p id=\"diagram\">\n <img src=\"http://www.plantuml.com:80/plantuml/png/SyfFKj2rKt3CoKnELR1Io4ZDoSa70000\" alt=\"PlantUML diagram\" onload=\"doneLoading()\">\n </p>\n in the response.\n \"\"\"\n regex = r'.*<p id=\"diagram\".*\\s*<.*img src=\\\"(.*?)\\\"'\n image_url = re.findall(regex, response.text, re.MULTILINE)\n if image_url:\n image_url = image_url[\n 0\n ] # this is likely referencing localhost due to calc_plantuml_server_url() giving us a localhost\n # if PLANT_UML_LOCAL:\n # image_url = normalise_plantuml_url(\n # image_url\n # ) # substitute the real host we are on. doesn't really matter, cos always will dynamically repair in all list and update views - but need this repair for ztree1 non persistent debug view, so that can ipad in to e.g. http://192.168.0.3:8000/ztree1 whilst images being returned are refering to localhost which the ipad cannot reach (cos its a different machine)\n else:\n image_url = None\n return image_url, response\n else:\n log.error(\"plant_uml_server responded with %d ok\" % (response.status_code,))\n return None, response",
"def image_to_text(filename):\n\n client = AipOcr(APP_ID, API_KEY, SECRET_KEY)\n def get_file_content(filePath):\n with open(filePath, 'rb') as fp:\n return fp.read()\n\n image = get_file_content(filename)\n #res=client.basicGeneralUrl(url);\n res = client.general(image)\n\n text = ''\n\n for item in res[\"words_result\"]:\n text += \"%s\\n\" % item[\"words\"]\n\n return text",
"def plot_png(textVal):\n fig = Figure()\n axis = fig.add_subplot(1, 1, 1)\n wordCloud = WordCloud(width=500, height=300, max_font_size=100).generate(textVal)\n \n axis.imshow(wordCloud, interpolation='bilinear')\n axis.axis(\"off\")\n output = io.BytesIO()\n FigureCanvasAgg(fig).print_png(output)\n return Response(output.getvalue(), mimetype=\"image/png\")",
"def getimage(self):",
"async def ponyr(self, *text):\n if len(text) > 0:\n if len(text[0]) > 1 and len(text[0]) < 20:\n try:\n msg = \"+\".join(text)\n search = \"https://derpiboo.ru/search.json?q=\" + msg + \"&random_image=y\" \n async with aiohttp.get(search) as r:\n result = await r.json()\n if \"id\" in result:\n imgid = str(result[\"id\"])\n async with aiohttp.get(\"https://derpiboo.ru/images/\" + imgid + \".json\") as r:\n result = await r.json()\n url = \"http:\" + result[\"image\"]\n await self.bot.say(url)\n else:\n await self.bot.say(\"Your search terms gave no results.\")\n except:\n await self.bot.say(\"Error.\")\n else:\n await self.bot.say(\"Invalid search.\")\n else:\n async with aiohttp.get(\"https://derpiboo.ru/search.json?q=*&random_image=y\") as r:\n result = await r.json()\n imgid = str(result[\"id\"])\n async with aiohttp.get(\"https://derpiboo.ru/images/\" + imgid + \".json\") as r:\n result = await r.json()\n url = result[\"image\"]\n await self.bot.say(\"http:\" + url )",
"def getimg(topic):\r\n topic = str(topic)\r\n embed = discord.Embed(title=f\"Image on {topic}!\", colour = discord.Colour.random(), timestamp = datetime.datetime.now())\r\n url = f\"https://source.unsplash.com/900x/900/?{topic}\"\r\n embed.set_image(url=url)\r\n return embed",
"def get_text(self):\n txt = self.lang.tool.image_to_string(\n self.image,\n lang=self.lang,\n builder=pyocr.builders.TextBuilder()\n )\n return txt",
"def _get_image_url_in_content(self, content):\n begin_token = 'src=\"'\n begin = content.find(begin_token)\n if begin == -1:\n return None\n\n # Acrescentamos o tamanho do 'begin_token' no 'begin'\n begin += len(begin_token)\n end = content.find('\"', begin)\n url = content[begin:end]\n return url.split('?')[0]",
"def get_kegg_image(self):\n return 'http://rest.kegg.jp/get/%s/img' % self.kegg_id",
"async def _misc_IMGplumbob(self, ctx):\r\n await self.bot.say('{}, http://i.imgur.com/q8xJsJQ.gif'.format(ctx.message.author.mention))",
"def olive_image_parser(text: bytes) -> Optional[dict]:\n soup = BeautifulSoup(text, \"lxml\")\n root = soup.find(\"xmd-entity\")\n\n try:\n assert root is not None\n img = {\n 'id': root.get('id'),\n 'coords': root.img.get('box').split(),\n 'name': root.meta.get('name'),\n 'resolution': root.meta.get('images_resolution'),\n 'filepath': root.img.get('href')\n }\n return img\n except AssertionError:\n return None",
"def get_image_comic_url(session, response):\n soup = bs(response.text, 'lxml')\n for div in soup.find_all('div', class_=\"img-comic-container\"):\n for a in div.find_all('a', class_=\"img-comic-link\"):\n for img in a.find_all('img', src=True):\n return \"https:\" + img['src']"
] | [
"0.6494177",
"0.626865",
"0.62658316",
"0.62218577",
"0.61541396",
"0.6036385",
"0.6014176",
"0.5989792",
"0.5951417",
"0.59493285",
"0.58120805",
"0.5806087",
"0.57846344",
"0.5778579",
"0.577659",
"0.5753518",
"0.5752582",
"0.5750098",
"0.5733324",
"0.572244",
"0.5719195",
"0.56934446",
"0.56846195",
"0.56691647",
"0.5665551",
"0.565708",
"0.5656446",
"0.565598",
"0.56220376",
"0.5602752"
] | 0.65308005 | 0 |
Generate the train and validation errors needed to plot a validation curve that we can use to select lambda. | def validation_curve(x, y, x_val, y_val):
lambda_vec = np.array([0, 0.001, 0.003, 0.01, 0.03, 0.1, 0.3, 1, 3, 10])
error_train = np.zeros(len(lambda_vec))
error_val = np.zeros(len(lambda_vec))
m = x.shape[0]
m_val = x_val.shape[0]
for i in range(len(lambda_vec)):
l = lambda_vec[i]
theta = train_linear_reg(x, y, l)
error_train[i] = 1.0 / (2 * m) * np.sum(np.square(x.dot(theta) - y))
error_val[i] = 1.0 / (2 * m_val) * np.sum(np.square(x_val.dot(theta) - y_val))
return lambda_vec, error_train, error_val | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def plot_train_test_errors(train_errors, test_errors, lambda_str , K , path, rng):\n plt.plot(range(rng), train_errors, marker='o', label='Training Data');\n plt.plot(range(rng), test_errors, marker='v', label='Test Data');\n plt.title('ALS-WR Learning Curve, lambda = %s, K = %d'%(lambda_str, K))\n plt.xlabel('Number of Epochs');\n plt.ylabel('RMSE');\n plt.legend()\n plt.grid()\n plt.savefig(\"../results/test_train_rmse_\"+path)\n plt.show()",
"def cross_validation_visualization(lambdas, loss_train, loss_test):\n plt.semilogx(lambdas, loss_train, marker=\".\", color='b', label='train error')\n plt.semilogx(lambdas, loss_test, marker=\".\", color='r', label='test error')\n plt.xlabel(\"lambda\")\n plt.ylabel(\"rmse\")\n plt.title(\"cross validation\")\n plt.legend(loc=2)\n plt.grid(True)\n plt.savefig(\"cross_validation_mse\")",
"def visualization(epochs, mse_tr, mse_te):\n plt.semilogx(epochs, mse_tr, marker=\".\", color='b', label='train error')\n plt.semilogx(epochs, mse_te, marker=\".\", color='r', label='test error')\n plt.xlabel(\"k\")\n plt.ylabel(\"rmse\")\n plt.title(\"cross validation\")\n plt.legend(loc=2)\n plt.grid(True)\n plt.savefig(\"cross_validation\")",
"def train_test_error(e_train, e_test, model_params):\n\n fig = plt.figure(figsize=FIG_SIZE)\n plt.plot(model_params, e_train, label='Training Set')\n plt.plot(model_params, e_train, label='Test Set')\n plt.xlabel('Model Parameter')\n plt.ylabel('MSE of model')\n plt.legend()\n\n return fig",
"def create(self, train: List[float], validation: List[float]) -> None:\n self.ax.plot(train)\n self.ax.plot(validation)\n self.ax.set_xlabel('epochs')\n if self.loss:\n self.ax.set_ylabel('loss')\n else:\n self.ax.set_ylabel('accuracy')\n self.ax.legend(['train', 'validation'])",
"def _plot_errors(self):\n for task_id, loss_type in self.task_ids.iteritems():\n x = np.arange(len(self.training_errors[task_id]))\n fig, ax = plt.subplots(1, 1)\n ax.set_xlabel('Number of epochs of training')\n if loss_type is LossTypes.mse:\n ax.set_ylabel('RMSE Error')\n elif loss_type is LossTypes.cross_entropy:\n ax.set_xlabel('(1 - accuracy)')\n plt.plot(x, self.training_errors[task_id], 'r', label='training')\n plt.plot(x, self.validation_errors[task_id], 'b', label='validation')\n plt.legend(loc=\"best\", framealpha=0.3)\n fig.savefig(\"error-curve-task-{}.png\".format(task_id))\n plt.close('all')",
"def plot_training_curve(path):\n import matplotlib.pyplot as plt\n train_err = np.loadtxt(\"{}_train_err.csv\".format(path))\n val_err = np.loadtxt(\"{}_val_err.csv\".format(path))\n train_loss = np.loadtxt(\"{}_train_loss.csv\".format(path))\n val_loss = np.loadtxt(\"{}_val_loss.csv\".format(path))\n plt.title(\"Train vs Validation Error\")\n n = len(train_err) # number of epochs\n plt.plot(range(1,n+1), train_err, label=\"Train\")\n plt.plot(range(1,n+1), val_err, label=\"Validation\")\n plt.xlabel(\"Epoch\")\n plt.ylabel(\"Error\")\n plt.legend(loc='best')\n plt.show()\n plt.title(\"Train vs Validation Loss\")\n plt.plot(range(1,n+1), train_loss, label=\"Train\")\n plt.plot(range(1,n+1), val_loss, label=\"Validation\")\n plt.xlabel(\"Epoch\")\n plt.ylabel(\"Loss\")\n plt.legend(loc='best')\n plt.show()",
"def plot_learning_curve(X_train_all, X_val_all, y_train_all, y_val_all, train_sizes, title):\n\n errors_df = pd.DataFrame(columns = ['train_size', 'train_acc', 'val_acc'])\n\n # Loop through example sizes and get the training and validation error\n for train_size in train_sizes:\n # Select Subset of Data\n X_train = X_train_all[:train_size]\n X_val = X_val_all[:train_size]\n y_train = y_train_all[:train_size]\n y_val = y_val_all[:train_size]\n\n # Initialize Model\n model = svm.SVC(kernel='linear')\n\n # Fit model\n print(f\"Training {title} using {train_size} examples\")\n model.fit(X_train, y_train)\n\n # Get Predictions \n train_pred = model.predict(X_train)\n val_pred = model.predict(X_val)\n\n # Get Accuracy Score for X_Train and X_Val\n errors = pd.DataFrame({\n 'train_size': [train_size],\n 'train_acc': [accuracy_score(y_train, train_pred)],\n 'val_acc': [accuracy_score(y_val, val_pred)]\n })\n \n # Concatenate Dataframes\n errors_df = pd.concat([errors_df, errors])\n\n # Plot Learning Curve\n fig, ax = plt.subplots()\n\n errors_df.plot(x='train_size', y='train_acc',kind='line', ax=ax)\n errors_df.plot(x='train_size', y='val_acc',kind='line', color='red', ax=ax)\n\n ax.set_xlabel(\"Training Size\")\n ax.set_ylabel(\"Accuracy\")\n ax.set_title(title)\n\n # Save Figure\n plt.savefig('figs/' + title + '_learning_curve.png')",
"def test_validation_curve():\n\n p = pipeline.Pipeline(\n FX_TRAIN,\n FX_TEST,\n FX_LOOKUP,\n RESULTS_DIR\n )\n\n data = p.validation_curve()",
"def plot_cv_errors(errors, lambdas , K , path): \n colors = cycle([\"aqua\", \"black\", \"blue\", \"fuchsia\", \"gray\", \"green\", \"lime\", \"maroon\", \"navy\", \"olive\", \"purple\", \"red\", \"silver\", \"teal\", \"yellow\"])\n \n markers = cycle([ \".\", \",\", \"o\", \"v\" , \"^\" , \">\", \"1\", \"2\", \"3\", \"4\", \"8\", \"s\", \"p\", \"*\", \"h\"])\n \n \n for i, data in enumerate(errors):\n \n lambda_str = ('%f' % lambdas[i]).rstrip('0').rstrip('.')\n plt.plot(range(len(data)), data, marker=next(markers), label='$\\lambda$ = %s'%lambda_str);\n \n plt.ylim(0.975 , 0.99)\n #plt.xlim(0 , 50)\n plt.title('ALS-WR Learning Curve, K = %d'% K)\n plt.xlabel('Number of Epochs');\n plt.ylabel('RMSE');\n plt.legend(bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0.)\n plt.grid()\n plt.savefig(\"../results/\"+path)\n plt.show()",
"def plot_loss(training_errors, validation_errors):\n plt.xscale('Log')\n plt.xlabel('Epochs')\n plt.ylabel('Mean Actual Error')\n plt.plot(training_errors, label = \"Training Error\", \\\n color = 'blue')\n plt.plot(validation_errors, label = \"Validation Error\", \\\n color = 'red')\n plt.legend()\n # Saves plot automatically, adjust filename as needed.\n plt.savefig('reservoir_05whdens_100h_7spec_test_3.png')\n plt.show()",
"def error():\n\n # Make data set using errors\n dataset_a = DataSet(oscillating,error_y=oscillating_error,plot='error_bar',label='Data and error')\n dataset_a.set_error(interval=5,width=1,cap=2)\n dataset_b = DataSet(oscillating,plot='error_shade',error_y=oscillating_error,order=0,colour='lightgrey',label='Error')\n dataset_c = DataSet(oscillating,plot='line',order=1,colour='firebrick',label='Data')\n\n # Make line graph with error bars\n plot_bar = Plot()\n plot_bar.set_legend(legend=True)\n plot_bar.add_dataset(dataset_a)\n plot_bar.plot()\n plot_bar.save(name='./figures/2d_error_bar',fmt='png')\n plot_bar.display()\n\n # Make line graph with shaded errors\n plot_shade = Plot()\n plot_shade.set_legend(legend=True,location='upper left')\n plot_shade.add_dataset(dataset_b)\n plot_shade.add_dataset(dataset_c)\n plot_shade.plot()\n plot_shade.save(name='./figures/2d_error_shade',fmt='png')\n plot_shade.display()",
"def plot_errors(loss_train, loss_val, jet):\n plt.plot(list(range(len(loss_train))), loss_train, 'g', label='Training loss')\n plt.plot(list(range(len(loss_val))), loss_val, 'b', label='Validation loss')\n plt.title('Training and Validation loss for jet: {jet}'.format(jet=jet))\n plt.xlabel('Epochs')\n plt.ylabel('Loss')\n plt.legend()\n plt.show()",
"def display_convergence_error(train_losses, valid_losses):\n if len(valid_losses) > 0:\n plt.plot(len(train_losses), train_losses, color=\"red\")\n plt.plot(len(valid_losses), valid_losses, color=\"blue\")\n plt.legend([\"Train\", \"Valid\"])\n else:\n plt.plot(len(train_losses), train_losses, color=\"red\")\n plt.legend([\"Train\"])\n plt.xlabel('Epoch')\n plt.ylabel('Loss')\n plt.show()",
"def plot_error(self, maxstep=20):\n plt.ion()\n plt.xlabel(\"step\")\n plt.ylabel(\"Ave Logloss (bits)\")\n train_errors = []\n if self.dataset.test:\n test_errors = []\n for i in range(maxstep):\n self.learn(1)\n train_errors.append( sum(self.logloss(tple) for tple in self.dataset.train)\n /len(self.dataset.train))\n if self.dataset.test:\n test_errors.append( sum(self.logloss(tple) for tple in self.dataset.test)\n /len(self.dataset.test))\n plt.plot(range(1,maxstep+1),train_errors,\n label=str(self.num_classes)+\" classes. Training set\")\n if self.dataset.test:\n plt.plot(range(1,maxstep+1),test_errors,\n label=str(self.num_classes)+\" classes. Test set\")\n plt.legend()\n plt.draw()",
"def plot_cv_train_test(test_avg, train_avg, lambdas, path):\n\n plt.plot(lambdas, test_avg, marker = \"o\", color=\"green\", label=\"validating cv error\")\n plt.plot(lambdas, train_avg, marker = \"v\", color=\"blue\", label=\"training cv error\" )\n \n print(train_avg[0])\n print(test_avg[0])\n \n plt.title(\"Cross Validation Error for Different Regularization Parameters\")\n plt.ylabel(\"10f cv RMSE\")\n plt.ylim(0.86 , 0.99)\n plt.xlabel(\"$\\lambda$\")\n plt.legend(bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0.)\n plt.grid()\n plt.savefig(\"../results/\"+path)\n plt.show()",
"def error_values(X_train,X_test,Y_train,Y_test):\n #setting up parameters and variables for plotting \n n_train = X_train.shape[0]\n n_test = X_test.shape[0]\n d = X_train.shape[1]\n hdnode = 100\n w1 = np.random.normal(0,0.001,d*hdnode).reshape((d,hdnode))\n d1 = np.zeros((d,hdnode))\n w2 = np.random.normal(0,0.001,hdnode).reshape((hdnode,1))\n d2 = np.zeros(hdnode)\n h = np.zeros(hdnode)\n mb = 100 #minibatch size\n m = int(n_train/mb)\n batch = np.arange(m) \n lr = 0.00020\n EP = 20000 #needed for initializing \n ep = 0\n yh = np.zeros((n_train,1))\n yh2 = np.zeros((n_test,1))\n L_train= np.zeros(EP+1)\n L_test = np.zeros(EP+1)\n Y_train = Y_train.reshape(len(Y_train),1)\n #activation function for the hidden layer is tanh\n \n def g(A):\n return (np.tanh(A))\n\n def gd(A):\n return (1-np.square(np.tanh(A)))\n \n #setting up how long the epoch will run\n EP = 200\n ep = 0\n while ep < EP:\n ep += 1\n yh = g(X_train.dot(w1)).dot(w2)\n yh2 = g(X_test.dot(w1)).dot(w2)\n L_train[ep] = LA.norm(yh-Y_train.reshape(len(Y_train),1))/n_train\n L_test[ep] = LA.norm(yh2-Y_test.reshape(len(Y_test),1))/n_test\n \n np.random.shuffle(batch)\n for i in range(m):\n st = batch[i]*mb\n ed = (batch[i]+1)*mb\n h = g(X_train[st:ed].dot(w1))\n y = h.dot(w2)\n d2 = h.T.dot(Y_train[st:ed]-y)\n d1 = X_train[st:ed].T.dot(np.multiply((Y_train[st:ed]-y).dot(w2.T),gd(X_train[st:ed].dot(w1))))\n w2 += lr*d2\n w1 += lr*d1\n return yh, yh2",
"def _validation_errors(self):\n feed_dict = dict()\n feed_dict[self.model.get_layer('input')] = self.x_validate\n for id_ in self.task_ids.keys():\n feed_dict[self.model.get_layer(id_ + '-ground-truth')] = self.y_validate[id_]\n errors = {}\n for task_id, loss_type in self.task_ids.iteritems():\n if loss_type is LossTypes.mse:\n errors[task_id] = np.sqrt(self.model.get_layer(task_id + '-loss')\n .eval(session=self.sess, feed_dict=feed_dict))\n elif loss_type is LossTypes.cross_entropy:\n predictions = tf.argmax(self.model.get_layer(task_id + '-prediction'), 1)\n targets = tf.argmax(self.model.get_layer(task_id + '-ground-truth'), 1)\n correct_predictions = tf.equal(predictions, targets)\n accuracy_tensor = tf.reduce_mean(tf.cast(correct_predictions, tf.float32))\n accuracy = accuracy_tensor.eval(session=self.sess, feed_dict=feed_dict)\n errors[task_id] = 1. - accuracy\n return errors",
"def cross_validation_visualization(lambds, score_tr, score_te):\n plt.semilogx(lambds, score_tr, marker=\".\", color='b', label='train score');\n plt.semilogx(lambds, score_te, marker=\".\", color='r', label='test score');\n plt.xlabel(\"lambda\")\n plt.ylabel(\"score\")\n plt.title(\"cross validation\")\n plt.legend(loc=2)\n plt.grid(True)\n plt.savefig(\"cross_validation_test\")",
"def plot_train_and_valid_curves(ax, train_points, valid_points, learning_rate_updates_epoch, best_per_lr, mode=\"loss\"):\n if mode==\"loss\":\n name = \"Loss\"\n names = \"losses\"\n factor = [1.2, 1.22]\n loc_legend = 1\n elif mode ==\"acc\":\n name = \"Accuracy\"\n names = \"acc\"\n factor = [0.9, 0.88]\n loc_legend = 4\n else:\n print \"Mode not understood. Available modes : 'loss' and 'acc'\"\n return\n\n #ax = plt.subplot(1,1,1)#\n # Plot training and valid loss curves\n ax.plot(np.arange(len(train_points)),train_points, c=\"k\", zorder=1)\n ax.plot(np.arange(len(valid_points)),valid_points, c=\"k\", zorder=1)\n ax.scatter(np.arange(len(train_points)),train_points, c=\"b\", label=\"Train %s\"%names, zorder=2)\n ax.scatter(np.arange(len(valid_points)),valid_points, c=\"r\", label=\"Valid %s\"%names, zorder=2)\n # Plot vertical line when the learning rate was updated\n first = True\n for elem in learning_rate_updates_epoch:\n if first:\n plt.plot([elem-.5,elem-.5], [1.4*valid_points[elem],train_points[elem]*0.6], c=\"k\", label=\"LR updates\", linestyle=\"--\")\n first = False\n else:\n plt.plot([elem-.5,elem-.5], [1.4*valid_points[elem],train_points[elem]*0.6], c=\"k\", linestyle=\"--\")\n # Plot best model in each region\n first = True\n for i,elem in enumerate(best_per_lr):\n if first:\n x = elem[0]\n y = elem[1]\n plt.scatter(x,y, c=\"g\", label=\"Best models\", marker=\"*\", zorder=3, s=100)\n plt.plot([x,x],[y,factor[0]*y], c=\"g\")\n plt.text(x,factor[1]*y, \"Epoch %d\"%(x), fontsize=8)\n first = False\n else:\n x = elem[0]+learning_rate_updates_epoch[i-1]\n y = elem[1]\n plt.scatter(x,y, c=\"g\", marker=\"*\", zorder=3, s=100)\n plt.plot()\n plt.plot([x,x],[y,factor[0]*y], c=\"g\")\n plt.text(x,factor[1]*y, \"Epoch %d\"%(x), fontsize=8)\n # Xlim, Ylim, labels, legend...\n ax.set_ylim([0,1])\n ax.set_xlim([0,len(train_points)+5])\n ax.set_xlabel(\"Epochs\")\n ax.set_ylabel(name)\n handles,labels = ax.get_legend_handles_labels()\n sorted_zip = sorted(zip([2,0,1,3],handles, labels))\n index, handles, labels = zip(*sorted_zip)\n ax.legend(handles,labels, loc=loc_legend, prop={'size':10})",
"def cross_validation(self):\r\n kfold = KFold(10, shuffle=True, random_state=1)\r\n data = self.read_data()\r\n # error from each kth iteration\r\n errors = []\r\n for train, test in kfold.split(data):\r\n\r\n #Splitting into test and training data\r\n X_test, Y_test = data[test][:, 1], data[test][:, 2]\r\n X_train, Y_train = data[train][:, 1], data[train][:, 2]\r\n\r\n #Training on the split data\r\n weights, design_matrix = self.train(X_train, Y_train)\r\n\r\n y_pred = self.make_prediction(X_test, weights)\r\n self.plot(y_true=Y_test, y_pred=y_pred, x=X_test)\r\n\r\n #error matrix\r\n errors.append(np.mean(y_pred - Y_test) ** 2)\r\n\r\n #cross-validation parameter taken as mean of errors obtained from each iteration\r\n print(\"%0.10f mean with a standard deviation of %0.10f across the k-folds\" % (np.mean(errors), np.std(errors)))",
"def plot_errors(self):\n\n plt.title(\"Prediction Error\")\n plt.plot(self.errors)\n plt.ylabel(\"MSE (Mean Squared Error)\")\n plt.xlabel(\"Iteration\")\n plt.show()",
"def error_plot(training_costs, test_costs, learning_rate, accuracy, test_accuracy, val_accuracy, layers, data_size,\n n_neighbours, dropout_rate):\n\n plt.plot(training_costs, label=\"Training loss\")\n plt.plot(test_costs, label=\"Test loss\")\n plt.xlabel(\"Iterations\", size='medium')\n plt.ylabel(\"Cost function (%)\", size='medium')\n plt.suptitle(\"Cost function while training the neural network\", size='medium', ha='center')\n plt.title(\"layers: {} with dropout rate of {}, learning rate: {}\".format(layers, dropout_rate, learning_rate),\n size='small', ha='center')\n plt.figtext(0.77, 0.35, \"Training accuracy\\n{0:.2f}%\".format(accuracy), size='medium')\n plt.figtext(0.77, 0.25, \"Test accuracy\\n{0:.2f}%\".format(test_accuracy), size='medium')\n plt.figtext(0.77, 0.15, \"Validation accuracy\\n{0:.2f}%\".format(val_accuracy), size='medium')\n if n_neighbours == 0:\n plt.figtext(0.77, 0.80, \"Neighbours\\nexcluded\", size='medium')\n else:\n plt.figtext(0.77, 0.80, \"{} neighbours\\nincluded\".format(n_neighbours), size='medium')\n plt.figtext(0.77, 0.70, \"{}\\nsamples\".format(data_size))\n plt.legend(loc='right', bbox_to_anchor=(1.39, 0.5))\n plt.subplots_adjust(right=0.75)\n working_dir = os.path.dirname(os.path.abspath(__file__))\n saving(working_dir + \"/output_ANN/error_plots/{}_error_{}\".format(n_neighbours, data_size))",
"def validation_summaries(self, step):\n dnn_summary_writer = self.dnn_summary_writer\n gan_summary_writer = self.gan_summary_writer\n DNN = self.DNN\n D = self.D\n train_dataset = self.train_dataset\n validation_dataset = self.validation_dataset\n\n self.evaluation_epoch(DNN, train_dataset, dnn_summary_writer, '2 Train Error')\n dnn_validation_mae = self.evaluation_epoch(DNN, validation_dataset, dnn_summary_writer, '1 Validation Error')\n self.evaluation_epoch(D, train_dataset, gan_summary_writer, '2 Train Error')\n self.evaluation_epoch(D, validation_dataset, gan_summary_writer, '1 Validation Error',\n comparison_value=dnn_validation_mae)",
"def regularized_batch_gradient_descent_plotter(X_train,y_train,X_valid,y_valid,\n lambdas=(0.,1e-6,1e-4,1e-2,1e-1,1.,10.,100.),alpha=.01,plot_results=False):\n\n train_losses = []\n validation_losses = []\n lambdas = list(lambdas)\n lambdas.sort()\n\n for lamb in lambdas:\n print(\"running regularized l2 gradient descent with lambda = {}...\".format(lamb))\n thetas, losses = regularized_grad_descent(X_train,y_train,alpha,lamb)\n train_losses.append(losses[-1])\n validation_losses.append(compute_square_loss(X_valid,y_valid,thetas[-1]))\n\n if plot_results:\n if 0. in lambdas:\n print(\"warning: value of 0 found in lambdas, cannot plot on log scale!\")\n plt.plot(np.log(lambdas),train_losses,'b--')\n plt.plot(np.log(lambdas),validation_losses,'r--')\n plt.show()\n plt.close()\n\n return zip(lambdas,validation_losses)",
"def plot(self, ylog=False, category=\"Accuracy\", figsize=(12, 5)):\n if self.CV == False: # no Cross Validation set case\n fig, ax = plt.subplots(nrows=1, ncols=2, figsize=figsize)\n plt.suptitle(\"Training Curve for \" + self.loss, fontsize=12)\n ax[0].plot(range(1, len(self.trainError) + 1), self.trainError, 'g-', label='Training Error')\n ax[0].set_xlabel('Iteration')\n ax[0].set_ylabel(\"Error\")\n if ylog == True:\n ax[0].set_yscale('log')\n ax[0].legend()\n ax[0].grid('on')\n\n if category == \"Accuracy\":\n ax[1].plot(range(1, len(self.trainAcc) + 1), self.trainAcc, 'r-', label='Training Accuracy')\n ax[1].set_ylabel(\"Accuracy\")\n elif category == \"Error Rate\":\n ax[1].plot(range(1, len(self.trainAcc) + 1), 1 - np.array(self.trainAcc), 'r-', label='Training Error Rate')\n ax[1].set_ylabel(\"Error Rate\")\n # ax[1].set_ylim((0, 1))\n ax[1].set_xlabel('Iteration')\n ax[1].legend(loc='best')\n ax[1].grid('on')\n plt.show()\n if self.CV == True: # has Cross Validation set case\n fig, ax = plt.subplots(nrows=1, ncols=2, figsize=figsize)\n plt.suptitle(\"Training Curve for \" + self.loss, fontsize=12)\n ax[0].plot(range(1, len(self.trainError) + 1), self.trainError, 'g-', label='Training Error')\n ax[0].plot(range(1, len(self.cvError) + 1), self.cvError, 'r-', label='CV Error')\n ax[0].set_xlabel('Iteration')\n ax[0].set_ylabel(\"Error\")\n if ylog == True:\n ax[0].set_yscale('log')\n ax[0].legend()\n ax[0].grid('on')\n\n if category == \"Accuracy\":\n ax[1].plot(range(1, len(self.trainAcc) + 1), self.trainAcc, 'g-', label='Training Accuracy')\n ax[1].plot(range(1, len(self.cvAcc) + 1), self.cvAcc, 'r-', label='CV Accuracy')\n ax[1].set_ylabel(\"Accuracy\")\n elif category == \"Error Rate\":\n ax[1].plot(range(1, len(self.trainAcc) + 1), 1 - np.array(self.trainAcc), 'g-', label='Training Error Rate')\n ax[1].plot(range(1, len(self.cvAcc) + 1), 1 - np.array(self.cvAcc), 'r-', label='CV Error Rate')\n ax[1].set_ylabel(\"Error Rate\")\n # ax[1].set_ylim((0, 1))\n ax[1].set_xlabel('Iteration')\n ax[1].legend(loc='best')\n ax[1].grid('on')\n plt.show()\n\n return fig, ax",
"def learningCurve(X, y, Xval, yval, Lambda):\n\n # Number of training examples\n m, _ = X.shape\n\n # You need to return these values correctly\n error_train = np.zeros(m)\n error_val = np.zeros(m)\n\n for i in range(m):\n theta = trainLinearReg(X[:i + 1], y[:i + 1], Lambda)\n error_train[i], _ = linearRegCostFunction(X[:i + 1], y[:i + 1], theta, 0)\n error_val[i], _ = linearRegCostFunction(Xval, yval, theta, 0)\n \n return error_train, error_val",
"def training(self, dataset, \n epochs, r_loss, beta, \n test= None ,Plotter=None):\n\n losses = []\n val_losses = []\n fidelities = []\n val_fidelities = []\n epochs = range(epochs)\n\n for i in tqdm(epochs, desc='Epochs'):\n losses_epochs = []\n fidelity_epochs =[]\n for step, x in enumerate(dataset):\n\n loss, fidelity = self.training_step(x, r_loss, beta)\n \n # Logging.\n losses_epochs.append(float(loss))\n fidelity_epochs.append(float(fidelity))\n \n losses.append(np.mean(losses_epochs))\n fidelities.append(np.mean(fidelity_epochs))\n \n if test:\n val_losses_epochs = []\n val_fidelity_epochs = []\n\n for step, x in enumerate(test):\n\n val_loss, val_fidelity = self.validating_step(x, r_loss, beta)\n \n # Logging.\n val_losses_epochs.append(float(val_loss))\n val_fidelity_epochs.append(float(val_fidelity))\n \n val_losses.append(np.mean(val_losses_epochs))\n val_fidelities.append(np.mean(val_fidelity_epochs))\n\n if Plotter != None:\n if test:\n Plotter.plot([losses,val_losses]) \n else:\n Plotter.plot(losses)\n \n\n return losses, val_losses, fidelities, val_fidelities",
"def plot_curve(self):\n x1 = np.arange(self.init_epoch, self.params.num_epoch+1, dtype=np.int).tolist()\n x2 = np.linspace(self.init_epoch, self.epoch,\n num=(self.epoch-self.init_epoch)//self.params.val_every+1, dtype=np.int64)\n plt.plot(x1, self.train_loss, label='train_loss')\n plt.plot(x2, self.val_loss, label='val_loss')\n plt.legend(loc='best')\n plt.title('Train/Val loss')\n plt.grid()\n plt.xlabel('Epoch')\n plt.ylabel('Loss')\n plt.show()",
"def part_one(test_data, train_data):\n print \"Part 1.1\"\n range_k = range(1,71, 2)\n train_acc = []\n test_acc = []\n cv_acc = []\n train_neighbors_dists = knn_algo(train_data, train_data)\n test_neighbors_dists = knn_algo(test_data, train_data)\n cv_dists = knn_algo_cross_validate(train_data)\n\n for k in range_k:\n train_neighbors = find_k(train_neighbors_dists, k)\n train_err = (calculate_error(train_neighbors))\n\n test_neighbors = find_k(test_neighbors_dists, k)\n test_err = (calculate_error(test_neighbors))\n\n cv_neighbors = find_k(cv_dists, k)\n cv_err = (calculate_error(cv_neighbors))\n\n ### TODO: cross validation here\n train_acc.append(train_err)\n test_acc.append(test_err)\n cv_acc.append(cv_err)\n\n # part 1.1\n print \"K range: \"\n print range_k\n print \"Train acc: \"\n print train_acc\n print \"Test acc: \"\n print test_acc\n print \"CV acc: \"\n print cv_acc\n\n print \"Part 1.2: \"\n # part 1.2\n plt.plot(range_k, train_acc, label = \"train\")\n plt.plot(range_k, test_acc, label = \"test\")\n plt.plot(range_k, cv_acc, label = \"CV\")\n plt.ylabel(\"percent error\")\n plt.xlabel(\"k\")\n plt.legend()\n plt.show()\n return (range_k, train_acc, test_acc, cv_acc)"
] | [
"0.7372803",
"0.7234287",
"0.7018966",
"0.6955053",
"0.68787926",
"0.6867536",
"0.68586004",
"0.6833294",
"0.669417",
"0.663864",
"0.6621206",
"0.66208285",
"0.6596412",
"0.65892947",
"0.6574992",
"0.65665126",
"0.6500066",
"0.64827365",
"0.6461519",
"0.6461419",
"0.6399849",
"0.6341921",
"0.62978643",
"0.6285056",
"0.62829417",
"0.6254284",
"0.62419194",
"0.6171167",
"0.6143938",
"0.6140702"
] | 0.76718855 | 0 |
Check that we reject a WRITE that names the iounit argument but still has a positional format argument (containing an '='). TODO 267. This test needs expanding and probably moving to a file dedicated to R913 and its (many) constraints. | def test_named_unit_before_fmt_error():
tcls = Write_Stmt
# Cannot have an un-named (positional) argument after a named argument
with pytest.raises(NoMatchError):
tcls('''WRITE (UNIT=6, '("write some=""'//'text'//'""")')''') | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_write_stmt():\n tcls = Write_Stmt\n obj = tcls('write (123)\"hey\"')\n assert isinstance(obj, tcls), repr(obj)\n assert str(obj) == 'WRITE(123) \"hey\"'\n assert repr(obj).replace(\"u'\", \"'\") == (\n \"Write_Stmt(Io_Control_Spec_List(',', (Io_Control_Spec(None, \"\n \"Int_Literal_Constant('123', None)),)), Output_Item_List(',', \"\n \"(Char_Literal_Constant('\\\"hey\\\"', None),)))\"\n )\n\n obj = tcls('WRITE (*,\"(I3)\") my_int')\n assert isinstance(obj, tcls), repr(obj)\n assert str(obj) == 'WRITE(*, \"(I3)\") my_int'\n assert repr(obj).replace(\"u'\", \"'\") == (\n \"Write_Stmt(Io_Control_Spec_List(',', (Io_Control_Spec(None, \"\n \"Io_Unit('*')), Io_Control_Spec(None, \"\n \"Char_Literal_Constant('\\\"(I3)\\\"', None)))), Output_Item_List(',', \"\n \"(Name('my_int'),)))\"\n )\n\n obj = tcls(\"WRITE (*,namtest)\")\n assert isinstance(obj, tcls), repr(obj)\n assert str(obj) == \"WRITE(*, namtest)\"\n assert repr(obj).replace(\"u'\", \"'\") == (\n \"Write_Stmt(Io_Control_Spec_List(',', \"\n \"(Io_Control_Spec(None, Io_Unit('*')), Io_Control_Spec(None, \"\n \"Name('namtest')))), None)\"\n )\n\n # Test when format specifier contains an '=' character\n iolist = Io_Control_Spec_List(\"*,'(5X,\\\"q_mesh =\\\",4F12.8)'\")\n assert isinstance(iolist, Io_Control_Spec_List)\n obj = tcls(\"WRITE(*,'(5X,\\\"q_mesh =\\\",1F12.8)') 1.d0\")\n assert isinstance(obj, tcls)\n assert repr(obj).replace(\"u'\", \"'\") == (\n \"Write_Stmt(Io_Control_Spec_List(',', (Io_Control_Spec(None, \"\n \"Io_Unit('*')), Io_Control_Spec(None, \"\n \"Char_Literal_Constant('\\\\'(5X,\\\"q_mesh =\\\",1F12.8)\\\\'', None)))), \"\n \"Output_Item_List(',', (Real_Literal_Constant('1.D0', None),)))\"\n )\n\n obj = tcls(\"WRITE(*,FMT='(5X,\\\"q_mesh =\\\",1F12.8)') 1.d0\")\n assert isinstance(obj, tcls)\n assert repr(obj).replace(\"u'\", \"'\") == (\n \"Write_Stmt(Io_Control_Spec_List(',', (Io_Control_Spec(None, \"\n \"Io_Unit('*')), Io_Control_Spec('FMT', \"\n \"Char_Literal_Constant('\\\\'(5X,\\\"q_mesh =\\\",1F12.8)\\\\'', None)))), \"\n \"Output_Item_List(',', (Real_Literal_Constant('1.D0', None),)))\"\n )\n\n # Format specifier contains an '=' and is built using concatenation\n obj = tcls('''WRITE (6, '(\"write = some=\"\"'//'text'//'\"\"\")')''')\n assert isinstance(obj, tcls)\n assert str(obj) == '''WRITE(6, '(\"write = some=\"\"' // 'text' // '\"\"\")')'''\n obj_repr = repr(obj)\n obj_repr = obj_repr.replace('u\"', '\"')\n assert obj_repr.replace(\"u'\", \"'\") == (\n \"Write_Stmt(Io_Control_Spec_List(',', \"\n \"(Io_Control_Spec(None, Int_Literal_Constant('6', None)), \"\n \"Io_Control_Spec(None, Level_3_Expr(Level_3_Expr(\"\n \"Char_Literal_Constant('\\\\'(\\\"write = some=\\\"\\\"\\\\'', None), '//', \"\n \"Char_Literal_Constant(\\\"'text'\\\", None)), '//', \"\n \"Char_Literal_Constant('\\\\'\\\"\\\"\\\")\\\\'', None))))), None)\"\n )",
"def test_incorrect_input():\n content = 'hi'\n filename = {}\n\n with pytest.raises(TypeError):\n write_file(content, filename)\n\n content = {}\n filename = 'hi'\n\n with pytest.raises(TypeError):\n write_file(content, filename)",
"def check_write_command(self, line):\n self.E_str = \"check_write_command\"\n err_msg = \"The write command takes the syntax:\\n\\n\\twrite <data_name> <filepath>\"\n err_msg += \"\\n\\nor you could specify the type of file to write via:\\n\\n\\t\"\n err_msg += \"write <data_name> <filepath> as <file_type>\"\n\n words = line.split()\n if len(words) != 3 and len(words) != 5:\n self.print_error(err_msg)\n words[1] = words[1].lstrip('$')\n line = ' '.join(words)\n\n line, any_vars = self.find_vars_in_str(line)\n words = line.split()\n words = self.fix_words(words)\n\n # Check the variable to be written actually exists\n if words[1] not in self.variables:\n self.print_error(f\"I can't find the data named: '{words[1]}'\")\n\n # Check we know how to write the requested filetype\n if len(words) == 5:\n if words[4] not in f_dicts.write_fncs:\n err_msg = \"I don't know how to write that type of file.\\n\\n\"\n err_msg += \"Please use one of:\\n\\t*\"\n err_msg += \"\\n\\t*\".join(list(f_dicts.write_fncs.keys()))\n self.print_error(err_msg)\n\n self.files_written.append(gen_parse.rm_quotation_marks(words[2]))\n\n # Need to check requested filetype and if that isn't in write_fncs then raise Error",
"def test_duplicate_manual_position_arguments():\n '{0} {1} {0}'.format(1, 2)\n '{0} {1} {0}'.format(1)",
"def test_kwargs_not_false_positive(*args, **kwargs):\n 'Hello John Doe {0[0]}'.format(args)\n 'Hello {0[name]}'.format(kwargs)",
"def test_checkParameters(self):\n self.failUnlessEqual(self.nice.opts['long'], \"Alpha\")\n self.failUnlessEqual(self.nice.opts['another'], \"Beta\")\n self.failUnlessEqual(self.nice.opts['longonly'], \"noshort\")\n self.failUnlessEqual(self.nice.opts['shortless'], \"Gamma\")",
"def test_name_validation(self, attr):\n kwargs = {'kind': POSITIONAL_ONLY, attr: 3}\n with pytest.raises(TypeError) as excinfo:\n FParameter(**kwargs)\n assert excinfo.value.args[0] == \\\n '{} must be a str, not a {}'.format(attr, 3)",
"def test_supply_file(self):\n f = open(self.junk_file, 'w')\n f.close()\n self.assertRaises(argparse.ArgumentTypeError, generic.check_path, self.junk_file)",
"def _ValidateArgs(self, args):\n if not (args.IsSpecified('description') or\n args.IsSpecified('security_policy')):\n parameter_names = ['--description', '--security_policy']\n raise exceptions.MinimumArgumentException(\n parameter_names, 'Please specify at least one property to update')",
"def test_ParameterVariable_write_basic(self, mock_f):\n\n par = provide_parameter(\"double\", \"test\")\n with mock_f('test.txt', 'w') as m_fo:\n write_parameter(m_fo, parameter=par, stop_character=\"\")\n\n expected_writes = [unittest.mock.call(\"double test\"),\n unittest.mock.call(\"\"),\n unittest.mock.call(\"\"),\n unittest.mock.call(\"\\n\")]\n\n mock_f.assert_called_with('test.txt', 'w')\n handle = mock_f()\n handle.write.assert_has_calls(expected_writes, any_order=False)",
"def io_params_for_tadpole(io,key='in'):\n\n if type(io)==str:\n io=[io]\n\n N= len(io)\n if N==1:\n flag = f\"{key}1={io[0]}\"\n elif N==2:\n flag= f\"{key}1={io[0]} {key}2={io[1]}\"\n elif N==3:\n flag= f\"{key}1={io[0]},{io[2]} {key}2={io[1]}\"\n else:\n logger.error((\"File input/output expectation is one of: \"\n \"1 file = single-end/ interleaved paired-end \"\n \"2 files = R1,R2, or\"\n \"3 files = R1,R2,se\"\n \"got: {n} files:\\n{}\").format('\\n'.join(io),\n n=len(io)))\n sys.exit(1)\n return flag",
"def test_wrong_argument_for_encoding(self):\n with self.assertRaises(exceptions.WrongArgumentTypeError):\n positional.encode(4.5, 10)",
"def test_arguments_same_name() -> None:\n\n @argcomb(a=\"b\")\n def f(a: Any = None, /, b: Any = None, **kwargs: Any) -> None:\n ...\n\n with pytest.warns(UserWarning):\n f(1, 2, a=3) # pylint: disable=E1124",
"def test_bad_input():\n\n for arg in ['5', 'ch']:\n rv, out = getstatusoutput('{} {}'.format(prg, arg))\n assert rv == 0\n expected = 'I do not know \"{}\".'.format(arg)\n assert out.strip() == expected",
"def _processOutfmtArg(outfmt, stderr, gb_record_fmtdict, gb_cds_fmtdict) :\n outfmt_keys = outfmt.split(\",\")\n records_keys = set(gb_record_fmtdict.keys())\n cds_keys = set(gb_cds_fmtdict.keys())\n assert records_keys & cds_keys == set()\n if not all([x in records_keys | cds_keys for x in outfmt_keys]) :\n wrong_keys = [x for x in outfmt_keys if x not in records_keys | cds_keys]\n stderr.write(\"Bad outfmt specifier. You provided:\\n\")\n stderr.write(str(sorted(outfmt_keys)) + \"\\n\")\n stderr.write(\"Wrong specifier(s):\\n\")\n stderr.write(str(sorted(wrong_keys)) + \"\\n\")\n stderr.write(\"Allowed values are:\\n\")\n stderr.write(str(sorted(list(records_keys | cds_keys))) + \"\\n\")\n return outfmt_keys",
"def test_buoy_format1():\n with pytest.raises(AssertionError) as err_info:\n Check_BuoyDC.check_buoy_format(buoy_format_fail_1)\n assert str(err_info.value) == 'Input formatted incorrectly, see instructions'",
"def test_buoy_format2():\n with pytest.raises(AssertionError) as err_info:\n Check_BuoyDC.check_buoy_format(buoy_format_fail_2)\n assert str(err_info.value) == 'Input length incorrect, see instructions'",
"def check_args():\n schema = Schema({\n 'FOLDREC': Use(open, error='FOLDREC file should be readable'),\n 'CLUSTAL': Use(open, error='CLUSTAL file should be readable'),\n 'CCMPRED': Use(open, error='CCMPRED file should be readable'),\n '--metafold': Use(open, error='METAFOLD_FILE should be readable'),\n '--nb_pdb': And(Use(int), lambda n: 1 <= n <= 405,\n error='--nb_pdb=NUM should be integer 1 <= N <= 405'),\n '--dssp': Use(open, error='dssp/mkdssp should be readable'),\n '--dope': Use(open, error='dope file should be readable'),\n '--benchmark': Use(open, error='BENCHMARK_FILE should be readable'),\n '--cpu': And(Use(int), lambda n: 0 <= n <= cpu_count(),\n error='--cpus=NUM should be integer 1 <= N <= ' + str(cpu_count())),\n # The output PATH is created (if not exists) at the end of the program\n # so we skip the check.\n object: object})\n try:\n schema.validate(ARGUMENTS)\n except SchemaError as err:\n exit(err)",
"def test_writer_represents_missing_data_correctly(self, tmpdir, standard_gwas_parser_basic):\n reader = readers.IterableReader([\"1\\t100\\tA\\tC\\tNone\", \"2\\t200\\tA\\tC\\t.\"],\n parser=standard_gwas_parser_basic)\n expected_fn = tmpdir / 'test.txt'\n out_fn = reader.write(expected_fn, columns=['neg_log_pvalue'], make_tabix=False)\n\n assert expected_fn == out_fn\n assert os.path.isfile(out_fn), \"Output filename exists\"\n with open(out_fn, 'r') as f:\n assert f.readlines() == [\"#neg_log_pvalue\\n\", \".\\n\", \".\\n\"]",
"def raise_not_enough_arguments(self, string):\n\n\t\trequested = errors.number(self.counter + 1)\n\n\t\tnumber = len(self.positional)\n\n\t\tverb = \"was\" if number == 1 else \"were\"\n\n\t\twhat = \"Requested {} formatting argument for \"\\\n\t\t\t \"'{}' but only {} {} supplied!\"\n\n\t\twhat = what.format(requested, string, number, verb)\n\n\t\traise errors.ArgumentError(what)",
"def test_invalid_arguments(self):\n # More than two arguments should report an error.\n exit_code, output = run_cli('a', 'b', 'c')\n assert exit_code != 0\n assert \"Error\" in output\n # Invalid `ionice' values should report an error.\n exit_code, output = run_cli('--ionice=foo')\n assert exit_code != 0\n assert \"Error\" in output",
"def test_ParameterVariable_write_complex_string(self, mock_f):\n\n par = provide_parameter(\"double\", \"test\", value=\"\\\"Al\\\"\",\n comment=\"test comment\")\n\n with mock_f('test.txt', 'w') as m_fo:\n write_parameter(m_fo, parameter=par, stop_character=\",\")\n\n expected_writes = [unittest.mock.call(\"double test\"),\n unittest.mock.call(\" = \\\"Al\\\"\"),\n unittest.mock.call(\",\"),\n unittest.mock.call(\"// test comment\"),\n unittest.mock.call(\"\\n\")]\n\n mock_f.assert_called_with('test.txt', 'w')\n handle = mock_f()\n handle.write.assert_has_calls(expected_writes, any_order=False)",
"def _validate_can_write(self):\n if self._mode not in WRITE_MODES:\n raise IOError(\"File is not writable\")\n if self.Writable == 'no':\n raise IOError(\"'Writable' flag is 'no'\")",
"def test_writeMisc(self):\n output = StringIO()\n self.builder._writeMisc(\n output, \"Other\",\n [(x, \"\") for x in range(2, 50, 3)])\n self.assertEquals(\n output.getvalue(),\n \"Other\\n\"\n \"-----\\n\"\n \" - #2, #5, #8, #11, #14, #17, #20, #23, #26, #29, #32, #35, #38, #41,\\n\"\n \" #44, #47\\n\"\n \"\\n\")",
"def test_duplicate_flags():\n parser = CmdParser([noArgs, onearg])\n with pytest.raises(CmdParseError):\n out = parser.parse(\"onearg -a -a\")",
"def test_arg_astringUnmatchedQuotes(self):\n self.assertRaises(imap4.IllegalClientResponse,\n self.server.arg_astring, b'\"open')",
"def test_invalid_parameters(key, val):\n with pytest.raises(ParameterError):\n FastBasic(**{key: val}).read(\"1 2 3\\n4 5 6\")\n with pytest.raises(ParameterError):\n ascii.read(\"1 2 3\\n4 5 6\", format=\"fast_basic\", guess=False, **{key: val})",
"def test_no_args(self):\r\n errstring = \"export requires two arguments\"\r\n with self.assertRaisesRegexp(CommandError, errstring):\r\n self.command.handle()",
"def test_missing_arg(self):\n with self.assertRaises(TypeError):\n self.r1.save_to_file()",
"def test_invalidValues(self):\n argV = \"--fooint egg\".split()\n self.assertRaises(usage.UsageError, self.usage.parseOptions, argV)"
] | [
"0.6082462",
"0.5905375",
"0.56293344",
"0.5549656",
"0.5493521",
"0.54801637",
"0.5453339",
"0.54085284",
"0.53969",
"0.53846806",
"0.53668135",
"0.53544927",
"0.52869296",
"0.5286741",
"0.5283133",
"0.52826405",
"0.52780616",
"0.5258492",
"0.521193",
"0.5171243",
"0.51701754",
"0.516795",
"0.516511",
"0.5147128",
"0.51286554",
"0.5127404",
"0.5124427",
"0.51102877",
"0.51094663",
"0.51031464"
] | 0.60555166 | 1 |
Returns colour scheme for CSI (critical success index). | def _get_csi_colour_scheme():
this_colour_map_object = pyplot.cm.Blues
this_colour_norm_object = matplotlib.colors.BoundaryNorm(
LEVELS_FOR_CSI_CONTOURS, this_colour_map_object.N)
rgba_matrix = this_colour_map_object(this_colour_norm_object(
LEVELS_FOR_CSI_CONTOURS
))
colour_list = [
rgba_matrix[i, ..., :-1] for i in range(rgba_matrix.shape[0])
]
colour_map_object = matplotlib.colors.ListedColormap(colour_list)
colour_map_object.set_under(numpy.full(3, 1.))
colour_norm_object = matplotlib.colors.BoundaryNorm(
LEVELS_FOR_CSI_CONTOURS, colour_map_object.N)
return colour_map_object, colour_norm_object | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def conseguir_color(self):\n return self.pluma.conseguir_color()",
"def verify_color(cci):\n\n if cci < -6.0:\n return OrangeColor.GREEN\n elif -6.0 <= cci < -1.0:\n return OrangeColor.YELLOWISH_GREEN\n elif -1.0 <= cci < 2.7:\n return OrangeColor.YELLOW\n elif 2.7 <= cci < 6.0:\n return OrangeColor.LIGHT_ORANGE\n else: # cci >= 6\n return OrangeColor.ORANGE",
"def color_negative_red(val):\n if val == 'k':\n color = 'red' \n else:\n color = 'yellow'\n return ['color: %s' % color]*3",
"def getColor(self,number):\n if number >= 0:\n ret = cs.hsv_to_rgb(0,0,1-abs(number/self.maxp))\n else:\n ret = cs.hsv_to_rgb(0,abs(number/self.maxn),1)\n hexcolor = '#%02x%02x%02x' % (ret[0]*255,ret[1]*255,ret[2]*255)\n return hexcolor",
"def _ansiCSI(self):\n # get my renderer\n renderer = self.renderer\n # build the 3 bit color generator\n yield from renderer.setq(name=\"csi3\", value=f'\"$(esc)[$(1)m\"')\n yield from renderer.setq(name=\"csi8\", value=f'\"$(esc)[$(1);5;$(2)m\"')\n yield from renderer.setq(name=\"csi24\", value=f'\"$(esc)[$(1);2;$(2);$(3);$(4)m\"')\n\n # all done\n return",
"def color_negative_red_positive_green(val):\n if val < 0:\n color = 'red'\n elif val > 0:\n color = 'green'\n else:\n color = 'black'\n\n return 'color: %s' % color",
"def get_colour(progress, colours):\n if progress >= 0 and progress <= 1:\n start_colour, end_colour = colours[0], colours[1]\n\n r = start_colour[0] + (end_colour[0] - start_colour[0]) * progress\n b = start_colour[1] + (end_colour[1] - start_colour[1]) * progress\n g = start_colour[2] + (end_colour[2] - start_colour[2]) * progress\n\n return '#%02x%02x%02x' % (round(r), round(b), round(g))\n \n else: return '#000000'",
"def highlight_color(self):\n return curses.color_pair(4) if self.cycling else curses.color_pair(2)",
"def _proc_color(self, tokens):\n\n keys = tokens.keys()\n if \"red\" in keys: # RGB(A)\n rr, gg, bb = tokens[\"red\"], tokens[\"green\"], tokens[\"blue\"]\n hex2int = lambda h: int(h, 16)\n if \"alpha\" in keys:\n a = tokens[\"alpha\"]\n c = str((hex2int(rr), hex2int(gg), hex2int(bb), hex2int(a)))\n else:\n c = str((hex2int(rr), hex2int(gg), hex2int(bb)))\n elif \"hue\" in keys: # HSV\n r, g, b = hsv_to_rgb(tokens[\"hue\"],\n tokens[\"saturation\"],\n tokens[\"value\"])\n c = str((int(r*255), int(g*255), int(b*255)))\n else:\n c = tokens[\"color\"]\n\n return c",
"def nthColor(i):\n if i < len(colors):\n return colors[i]\n\n c1 = colors[i % len(colors)]\n c2 = nthColor(i // len(colors))\n\n return \"#\" + hex((int(c1[1:],16) + int(c2[1:],16)) // 2)[2:]",
"def get_cmap(n, name=\"hsv\"):\n return plt.cm.get_cmap(name, n)",
"def get_cmap(n, name=\"hsv\"):\n return plt.cm.get_cmap(name, n)",
"def get_cmap(n, name=\"hsv\"):\n return plt.cm.get_cmap(name, n)",
"def get_cmap(n, name='hsv'):\n return plt.cm.get_cmap(name, n)",
"def get_cmap(n, name='hsv'):\n return plt.cm.get_cmap(name, n)",
"def get_colors(n):\n color = cm.rainbow(np.linspace(0, 1, n))\n return color",
"def getColor(self,number):\n if number >= 0:\n if self.inverse:\n ret = cs.hsv_to_rgb(0,0,abs(number/self.maxp))\n else:\n ret = cs.hsv_to_rgb(0,0,1-abs(number/self.maxp))\n else:\n if self.inverse:\n ret = cs.hsv_to_rgb(0,1-abs(number/self.maxn),1)\n else:\n ret = cs.hsv_to_rgb(0,abs(number/self.maxn),1)\n return [ret[0]*255.0,ret[1]*255.0,ret[2]*255.0]",
"def colors(self):\n\t\treturn [(0, 30, 255),(0, 30, 120)]",
"def get_color(self, record):\n if record.level == CRITICAL:\n return Fore.RED + Style.DIM\n elif record.level == ERROR:\n return Fore.RED + Style.BRIGHT\n elif record.level == WARNING:\n return Fore.YELLOW + Style.DIM\n elif record.level == NOTICE:\n return Fore.CYAN + Style.BRIGHT\n elif record.level == DEBUG:\n return Fore.GREEN + Style.BRIGHT\n return Fore.WHITE",
"def color_positive_green(val):\n\tif val > 0: \n\t\tcolor = 'green'\n\telse: \n\t\tcolor = 'red'\n\treturn 'background-color: %s' % color",
"def color_negative_red(value):\n\n if value == 1:\n color = 'red'\n else:\n color = 'black'\n\n return 'color: %s' % color",
"def get_color(self, value):\n value = min(max(0,value), 1) * 510\n\n if value < 255:\n redValue = 255\n greenValue = math.sqrt(value) * 16\n greenValue = int(greenValue)\n else:\n greenValue = 255\n value = value - 255\n redValue = 255 - (value * value / 255)\n redValue = int(redValue)\n return '#' + f\"{redValue:0{2}x}\" + f\"{greenValue:0{2}x}\" + '00'",
"def getColor(self):\n return self._l[2]",
"def get_color(self):\r\n if self.color:\r\n return \"RED\"\r\n else:\r\n return \"BLACK\"",
"def get_good_colors(N):\n HSV_tuples = [(x*1.0/N, 0.5, 1) for x in range(N)]\n return(255 * np.array(map(lambda x: colorsys.hsv_to_rgb(*x), HSV_tuples)))",
"def get_colors(n_curves):\n # Use offsets to avoid shitty crappy min/max colors (ugly yellow or white for example)\n first_step = 1. / (n_curves + 1.)\n last_step = 1.\n nb_steps = n_curves + 1\n color_range = np.linspace(first_step, last_step, nb_steps)\n color = iter(plt.cm.hot(color_range))\n return color",
"def IntToColor(number):\n color = COLORS_INDEX.get(number)\n return color if color else 'default'",
"def colors_for_labels():\n colors = [(i * np.array([2 ** 25 - 1, 2 ** 15 - 1, 2 ** 21 - 1]) % 255).astype(np.uint8) for i in range(len(CATEGORY))]\n #colors = np.array(range(len(COCO_INSTANCE_CATEGORY_NAMES))) * np.array([2 ** 25 - 1, 2 ** 15 - 1, 2 ** 21 - 1])\n #colors = (colors % 255).numpy().astype(\"uint8\")\n return colors",
"def get_color(tense, seq=0):\n if tense in ['Perfekt', 'present perfect', 'pretérito perfecto compuesto', 'passé composé', 'vtt',\n 'passato prossimo', 'PresPerf']:\n return '#1f77b4'\n elif tense in ['Präsens', 'simple present', 'presente', 'présent', 'ott', 'Present', 'present imperfective', 'present']:\n return '#ff7f0e'\n elif tense in ['Präteritum', 'simple past', 'pretérito perfecto simple', 'indefinido', 'passé simple', 'ovt', 'Past', 'past perfective', 'past']:\n return '#2ca02c'\n elif tense in ['Plusquamperfekt', 'past perfect', 'pretérito pluscuamperfecto', 'plus-que-parfait', 'vvt',\n 'trapassato prossimo', 'PastPerf', 'past+infinitive']:\n return '#d62728'\n elif tense in ['Futur I', 'simple future', 'futur', 'futuro', 'ottt', 'future']:\n return '#9467bd'\n elif tense in ['Futur II', 'future perfect', 'futur antérieur', 'futuro perfecto', 'ovtt', 'future past']:\n return '#8c564b'\n elif tense in ['present perfect continuous', 'Cont', 'present/adjective']:\n return '#e377c2'\n elif tense in ['pasado reciente', 'passé récent', 'RecentPast', 'copular']:\n return '#7f7f7f'\n elif tense in ['pretérito imperfecto', 'imparfait', 'Imperfecto', 'past imperfective', 'past+present']:\n return '#bcbd22'\n elif tense in ['present participle', 'participio', 'Gerund', 'gerund', 'gerund perfective']:\n return '#17becf'\n elif tense in ['Infinitiv', 'infinitief', 'infinitif', 'infinitivo', 'infinitive']:\n return '#aec7e8'\n elif tense in ['present continuous', 'PresGer', 'existential']:\n return '#ffbb78'\n elif tense in ['condicional', 'conditionnel', 'Rep']:\n return '#98df8a'\n elif tense in ['past continuous']:\n return '#ff9896'\n elif tense in ['past perfect continuous']:\n return '#c5b0d5'\n elif tense in ['future continuous']:\n return '#c49c94'\n elif tense in ['future in the past', 'futuro perfecto']:\n return '#f7b6d2'\n elif tense in ['future in the past continuous']:\n return '#c7c7c7'\n elif tense in ['infinitivo perfecto']:\n return '#dbdb8d'\n elif tense in ['futur proche', 'futuro próximo']:\n return '#9edae5'\n elif tense in ['futur proche du passé', 'futuro próximo en imperfecto']:\n return '#393b79'\n elif tense in ['conditionnel passé']:\n return '#5254a3'\n elif tense in ['subjuntivo presente']:\n return '#e7cb94'\n elif tense in ['subjuntivo pretérito imperfecto']:\n return '#8c6d31'\n elif tense in ['participle past perfective active']:\n return '#843c39'\n elif tense in ['gerund imperfective']:\n return '#393b79'\n\n # Mandarin\n elif tense in ['unmarked']:\n return '#1f77b4'\n elif tense in ['rvc']:\n return '#ff7f0e'\n elif tense in ['le1', 'le']:\n return '#2ca02c'\n elif tense in ['le12']:\n return '#d62728'\n elif tense in ['guo']:\n return '#9467bd'\n elif tense in ['zhe']:\n return '#8c564b'\n elif tense in ['zai']:\n return '#e377c2'\n elif tense in ['unmarked duplication']:\n return '#7f7f7f'\n elif tense in ['adv']:\n return '#bcbd22'\n elif tense in ['adj']:\n return '#17becf'\n elif tense in ['conj']:\n return '#aec7e8'\n elif tense in ['mood']:\n return '#ffbb78'\n elif tense in ['noun']:\n return '#98df8a'\n elif tense in ['non-verb', 'other']:\n return '#ff9896'\n\n # ViB\n elif tense in ['adjectif']:\n return '#e6194b'\n elif tense in ['adverbe']:\n return '#3cb44b'\n elif tense in ['article défini']:\n return '#ff0000'\n elif tense in ['article défini pluriel']:\n return '#bf0000'\n elif tense in ['article défini singulier']:\n return '#ff0051'\n elif tense in ['article indéfini']:\n return '#ff8400'\n elif tense in ['article indéfini pluriel']:\n return '#8c4800'\n elif tense in ['article indéfini singulier']:\n return '#4c2800'\n elif tense in ['déterminant défini pluriel']:\n return '#adb300'\n elif tense in ['déterminant démonstratif']:\n return '#56bf00'\n elif tense in ['déterminant indéfini']:\n return '#285900'\n elif tense in ['déterminant possessif']:\n return '#00e686'\n elif tense in ['expression']:\n return '#e377c2'\n elif tense in ['nom commun']:\n return '#7f7f7f'\n elif tense in ['nom propre']:\n return '#bcbd22'\n elif tense in ['nom propre gén']:\n return '#dbdb8d'\n elif tense in ['numéral']:\n return '#17becf'\n elif tense in ['pronom démonstratif']:\n return '#5b008c'\n elif tense in ['pronom indéfini']:\n return '#2200ff'\n elif tense in ['pronom interrogatif']:\n return '#0058e6'\n elif tense in ['pronom personnel']:\n return '#006773'\n elif tense in ['pronom personnel adverbial']:\n return '#00331e'\n elif tense in ['pronom relatif']:\n return '#285900'\n elif tense in ['pronom réfléchi']:\n return '#00e686'\n\n # Contraction\n elif tense in ['contracted', 'bare noun']:\n return '#2f5597'\n elif tense in ['uncontracted', 'demonstrative']:\n return '#fd8f8e'\n\n else:\n return COLOR_LIST[seq % len(COLOR_LIST)]",
"def _sc_print_ ( sc ) :\n from Bender.Logger import colored_string \n if sc.isSuccess () : return colored_string( 'SUCCESS' , WHITE , GREEN , True ) \n elif sc.isRecoverable () : return colored_string( 'RECOVERABLE' , RED , YELLOW , True ) \n elif 0 != sc.getCode () :\n return colored_string('FAILURE[%d]' % sc.getCode() , YELLOW , RED , True ) \n return colored_string('FAILURE' , YELLOW , RED , True )"
] | [
"0.6432659",
"0.6361762",
"0.62378114",
"0.6107295",
"0.60489845",
"0.6039875",
"0.6022437",
"0.60140353",
"0.59839135",
"0.5968801",
"0.5968204",
"0.5968204",
"0.5968204",
"0.5938249",
"0.5938249",
"0.5932919",
"0.5913306",
"0.5908596",
"0.59029335",
"0.58735913",
"0.5872064",
"0.58716625",
"0.58517474",
"0.582142",
"0.5800823",
"0.57795036",
"0.57679677",
"0.5734277",
"0.57077324",
"0.57031333"
] | 0.7236149 | 0 |
Returns colour scheme for Peirce score. | def _get_peirce_colour_scheme():
this_colour_map_object = pyplot.cm.Blues
this_colour_norm_object = matplotlib.colors.BoundaryNorm(
LEVELS_FOR_PEIRCE_CONTOURS, this_colour_map_object.N)
rgba_matrix = this_colour_map_object(this_colour_norm_object(
LEVELS_FOR_PEIRCE_CONTOURS
))
colour_list = [
rgba_matrix[i, ..., :-1] for i in range(rgba_matrix.shape[0])
]
colour_map_object = matplotlib.colors.ListedColormap(colour_list)
colour_map_object.set_under(numpy.full(3, 1.))
colour_norm_object = matplotlib.colors.BoundaryNorm(
LEVELS_FOR_PEIRCE_CONTOURS, colour_map_object.N)
return colour_map_object, colour_norm_object | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def conseguir_color(self):\n return self.pluma.conseguir_color()",
"def get_colour(self):\n return self.colour",
"def get_colour(self) -> str:\n return self.colour",
"def getColor(self):\n return self._l[2]",
"def get_palace_board_red(self):\n\n return self._palace_board_red",
"def getColor(self,number):\n if number >= 0:\n if self.inverse:\n ret = cs.hsv_to_rgb(0,0,abs(number/self.maxp))\n else:\n ret = cs.hsv_to_rgb(0,0,1-abs(number/self.maxp))\n else:\n if self.inverse:\n ret = cs.hsv_to_rgb(0,1-abs(number/self.maxn),1)\n else:\n ret = cs.hsv_to_rgb(0,abs(number/self.maxn),1)\n return [ret[0]*255.0,ret[1]*255.0,ret[2]*255.0]",
"def get_color(self, value):\n value = min(max(0,value), 1) * 510\n\n if value < 255:\n redValue = 255\n greenValue = math.sqrt(value) * 16\n greenValue = int(greenValue)\n else:\n greenValue = 255\n value = value - 255\n redValue = 255 - (value * value / 255)\n redValue = int(redValue)\n return '#' + f\"{redValue:0{2}x}\" + f\"{greenValue:0{2}x}\" + '00'",
"def determine_color(score=be.df[\"score\"].mean()):\n avg_score = int(round(be.df[\"score\"].mean(), 0))\n if score < avg_score * 0.95:\n return \"red\"\n elif score > avg_score * 1.05:\n return \"green\"\n else:\n return \"yellow\"",
"def color(self):\n return 0x2f3136",
"def create_colour_scheme(alphabet=\"cinema\", colourscheme=\"hls\"):\n \n # List of all available alphabets from pepdata library + the all / dna alphabet\n alphabets_available = [\"cinema\", \"all\", \"dna\", \"gbmr4\", \"sdm12\", \"hsdm17\", \n \"hp2\", \"murphy10\", \"alex6\", \"aromatic2\", \"hp_vs_aromatic\"]\n # Default value, colouring scheme as used in the cinema alignment tool\n if alphabet == \"cinema\":\n return {\"A\": \"#c1ffc1\", \"B\": \"#ffffff\", \"C\": \"#50d433\", \"D\": \"#088446\",\n \"E\": \"#088446\", \"F\": \"#de94e3\", \"G\": \"#c1ffc1\", \"H\": \"#191996\",\n \"I\": \"#91b4ff\", \"J\": \"#ffffff\", \"K\": \"#ffa500\", \"L\": \"#91b4ff\",\n \"M\": \"#91b4ff\", \"N\": \"#088446\", \"O\": \"#ffffff\", \"P\": \"#ffb6c1\",\n \"Q\": \"#088446\", \"R\": \"#ffa500\", \"S\": \"#ce0000\", \"T\": \"#ce0000\",\n \"U\": \"#ffffff\", \"V\": \"#91b4ff\", \"W\": \"#de94e3\", \"X\": \"#ffffff\",\n \"Y\": \"#de94e3\", \"Z\": \"#ffffff\"}\n # If the alphabet is found in the reduced alphabets file:\n if alphabet in alphabets_available:\n alphabet = eval(alphabet)\n alphabet_values = set(alphabet.values())\n colours = sns.color_palette(colourscheme, len(alphabet_values)).as_hex()\n for keys in alphabet.keys():\n alphabet[keys] = colours[alphabet[keys]]\n else:\n print(\"Colour input\", alphabet, \"not available.\")\n sys.exit(1)\n return alphabet",
"def getColor(self,number):\n if number >= 0:\n ret = cs.hsv_to_rgb(0,0,1-abs(number/self.maxp))\n else:\n ret = cs.hsv_to_rgb(0,abs(number/self.maxn),1)\n hexcolor = '#%02x%02x%02x' % (ret[0]*255,ret[1]*255,ret[2]*255)\n return hexcolor",
"def get_color(self):\r\n if self.color:\r\n return \"RED\"\r\n else:\r\n return \"BLACK\"",
"def color(piece):\n return Color.BLACK if piece in {Piece.BP, Piece.BN, Piece.BB, Piece.BR, Piece.BQ, Piece.BK} else Color.WHITE",
"def get_colour_map(self):\n try:\n return {'C# minor' : 'Grey', 'A major' : 'Red', 'D minor' : 'Green',\n 'Eb Purple': 'greenyellow', 'D major' : 'Pink', 'G major' : 'Orange',\n 'G minor': 'goldenrod', 'A minor' : 'indianred', 'C minor' : 'peachpuff',\n 'B minor' : 'deepskyblue', 'Ab Major' : 'firebrick', 'Eb / D# minor' : 'orchid',\n 'Ab major' : 'moccasin', 'G# minor' : 'slateblue', 'Eb major' : 'turquoise',\n 'C major' : 'tomato', 'B major' : 'darkmagenta', 'F major' : 'olivedrab',\n 'F minor' : 'olive', 'Bb major' : 'lightsteelblue', 'Db major' : 'plum',\n 'Bb minor' : 'mediumspringgreen', 'E minor' : 'lightsalmon',\n 'F# / Gb major' : 'gold', 'F# minor' : 'burlywood'}\n\n # If colour not found to match, return grey as a last resort\n except KeyError as e:\n print('Unmatched colour: {0}'.format(e))\n return 'Grey'",
"def get_color(rank):\n if rank == 1:\n color = int(0xffd700)\n elif rank == 2:\n color = int(0xc0c0c0)\n elif rank == 3:\n color = int(0xcd7f32)\n else:\n color = random.randint(1, 16777215)\n\n return discord.Color(color)",
"def colors(self):\n\t\treturn [(0, 30, 255),(0, 30, 120)]",
"def get_color(self):\n return \"yellow\"",
"def get_palette(self, as_float=True):\n if as_float:\n return self._colors\n return (self._colors * 255).round().clip(0, 255).astype(np.uint8)",
"def change( p ):\n red = p[0]\n green = p[1]\n blue = p[2]\n return [ 255-red, 255-green, 255-blue ]",
"def get_color(edge, nR):\n R_color, E_color = 'C0', 'C1'\n edge = sorted(edge)\n if edge[0] < nR:\n if edge[1] > nR:\n comp_color = 'gray'\n zorder = 10\n else:\n comp_color = R_color\n zorder = 5\n else:\n comp_color = E_color\n zorder = 5\n return comp_color, zorder",
"def potential_color(self):\n\n return (1., 1., 0.)",
"def verify_color(cci):\n\n if cci < -6.0:\n return OrangeColor.GREEN\n elif -6.0 <= cci < -1.0:\n return OrangeColor.YELLOWISH_GREEN\n elif -1.0 <= cci < 2.7:\n return OrangeColor.YELLOW\n elif 2.7 <= cci < 6.0:\n return OrangeColor.LIGHT_ORANGE\n else: # cci >= 6\n return OrangeColor.ORANGE",
"def getSquareColor(file: int, rank: int) -> str:\r\n if (rank % 2 == file % 2):\r\n return 'light'\r\n else:\r\n return 'dark'",
"def _build_color_table() -> list[tuple[int, int, int, int, int]]:\n FG = FOREGROUND_COLOR\n BG = BACKGROUND_COLOR\n\n return [\n (0x00, 0x00, 0x00, FG.BLACK, BG.BLACK),\n (0x00, 0x00, 0xAA, FG.BLUE, BG.BLUE),\n (0x00, 0xAA, 0x00, FG.GREEN, BG.GREEN),\n (0x00, 0xAA, 0xAA, FG.CYAN, BG.CYAN),\n (0xAA, 0x00, 0x00, FG.RED, BG.RED),\n (0xAA, 0x00, 0xAA, FG.MAGENTA, BG.MAGENTA),\n (0xAA, 0xAA, 0x00, FG.YELLOW, BG.YELLOW),\n (0x88, 0x88, 0x88, FG.GRAY, BG.GRAY),\n (0x44, 0x44, 0xFF, FG.BLUE | FG.INTENSITY, BG.BLUE | BG.INTENSITY),\n (0x44, 0xFF, 0x44, FG.GREEN | FG.INTENSITY, BG.GREEN | BG.INTENSITY),\n (0x44, 0xFF, 0xFF, FG.CYAN | FG.INTENSITY, BG.CYAN | BG.INTENSITY),\n (0xFF, 0x44, 0x44, FG.RED | FG.INTENSITY, BG.RED | BG.INTENSITY),\n (0xFF, 0x44, 0xFF, FG.MAGENTA | FG.INTENSITY, BG.MAGENTA | BG.INTENSITY),\n (0xFF, 0xFF, 0x44, FG.YELLOW | FG.INTENSITY, BG.YELLOW | BG.INTENSITY),\n (0x44, 0x44, 0x44, FG.BLACK | FG.INTENSITY, BG.BLACK | BG.INTENSITY),\n (0xFF, 0xFF, 0xFF, FG.GRAY | FG.INTENSITY, BG.GRAY | BG.INTENSITY),\n ]",
"def getColor(self):\r\n return self.color",
"def color_negative_red(value):\n\n if value == 1:\n color = 'red'\n else:\n color = 'black'\n\n return 'color: %s' % color",
"def get_color(activePerMillion):\n activePer100k = activePerMillion / 10.0\n if activePer100k < 100:\n return \"#aaf0d1\"\n elif activePer100k < 500:\n return \"#a3f7bf\"\n elif activePer100k < 1000:\n return \"#90EE90\"\n elif activePer100k < 1500:\n return \"#00ff7f\"\n elif activePer100k < 2000:\n return \"#77dd77\"\n elif activePer100k < 2500:\n return \"#32cd32\"\n elif activePer100k < 3000:\n return \"#4cbb17\"\n elif activePer100k < 3500:\n return \"#228b22\"\n elif activePer100k < 4000:\n return \"#355e3b \"\n else:\n return \"#006400\"",
"def _style_colours(self):\n\n pass",
"def rgbcolor(h, f):\n # q = 1 - f\n # t = f\n if h == 0:\n return v, f, p\n elif h == 1:\n return 1 - f, v, p\n elif h == 2:\n return p, v, f\n elif h == 3:\n return p, 1 - f, v\n elif h == 4:\n return f, p, v\n elif h == 5:\n return v, p, 1 - f",
"def color_negative_red(val):\n if val == 'k':\n color = 'red' \n else:\n color = 'yellow'\n return ['color: %s' % color]*3"
] | [
"0.678865",
"0.6306019",
"0.6232524",
"0.622833",
"0.6212288",
"0.6193526",
"0.61802113",
"0.6138026",
"0.6122331",
"0.61007756",
"0.60846984",
"0.60838187",
"0.60765666",
"0.60642964",
"0.60596305",
"0.6055237",
"0.604426",
"0.597812",
"0.5978035",
"0.59603816",
"0.595773",
"0.59447736",
"0.5925505",
"0.59190154",
"0.59156805",
"0.59135234",
"0.5911019",
"0.5905958",
"0.58933365",
"0.5887424"
] | 0.75720006 | 0 |
Generates polygon for confidence interval. P = number of points in bottom curve = number of points in top curve | def _confidence_interval_to_polygon(
x_coords_bottom, y_coords_bottom, x_coords_top, y_coords_top,
for_performance_diagram=False):
nan_flags_top = numpy.logical_or(
numpy.isnan(x_coords_top), numpy.isnan(y_coords_top))
real_indices_top = numpy.where(numpy.invert(nan_flags_top))[0]
nan_flags_bottom = numpy.logical_or(
numpy.isnan(x_coords_bottom), numpy.isnan(y_coords_bottom))
real_indices_bottom = numpy.where(numpy.invert(nan_flags_bottom))[0]
if for_performance_diagram:
y_coords_top = y_coords_top[real_indices_top]
sort_indices_top = numpy.argsort(y_coords_top)
y_coords_top = y_coords_top[sort_indices_top]
x_coords_top = x_coords_top[real_indices_top][sort_indices_top]
y_coords_bottom = y_coords_bottom[real_indices_bottom]
sort_indices_bottom = numpy.argsort(-y_coords_bottom)
y_coords_bottom = y_coords_bottom[sort_indices_bottom]
x_coords_bottom = x_coords_bottom[real_indices_bottom][
sort_indices_bottom]
else:
x_coords_top = x_coords_top[real_indices_top]
sort_indices_top = numpy.argsort(-x_coords_top)
x_coords_top = x_coords_top[sort_indices_top]
y_coords_top = y_coords_top[real_indices_top][sort_indices_top]
x_coords_bottom = x_coords_bottom[real_indices_bottom]
sort_indices_bottom = numpy.argsort(x_coords_bottom)
x_coords_bottom = x_coords_bottom[sort_indices_bottom]
y_coords_bottom = y_coords_bottom[real_indices_bottom][
sort_indices_bottom]
polygon_x_coords = numpy.concatenate((
x_coords_top, x_coords_bottom, numpy.array([x_coords_top[0]])))
polygon_y_coords = numpy.concatenate((
y_coords_top, y_coords_bottom, numpy.array([y_coords_top[0]])))
return polygons.vertex_arrays_to_polygon_object(
polygon_x_coords, polygon_y_coords) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def rectpolyctl(xmin,xmax,ymin,ymax):\n pc=[]\n pc.append((xmin,ymin))\n pc.append((xmin,ymax))\n pc.append((xmax,ymax))\n pc.append((xmax,ymin))\n pc.append((xmin,ymin))\n return pc",
"def generatePolygons():",
"def _createpoly(self):\n return self.cv.create_polygon((0, 0, 0, 0, 0, 0), fill=\"\", outline=\"\")",
"def generate_polygon(x,y,N):\r\n # Add the first point to the end of the list and convert to array if needed\r\n if type(x) == list:\r\n x = np.array(x + [x[0]])\r\n y = np.array(y + [y[0]])\r\n else:\r\n x = np.append(x,x[0])\r\n y = np.append(y,y[0])\r\n \r\n # Parameterize the arrays and interpolate\r\n d = [get_distance((x[i],y[i]),(x[i+1],y[i+1])) for i in range(len(x)-1)]\r\n d = np.cumsum([0]+d)\r\n t = np.linspace(0,d[-1],N)\r\n fx = interp1d(d,x)\r\n fy = interp1d(d,y)\r\n x = fx(t)\r\n y = fy(t)\r\n \r\n return x,y",
"def create_proportions_interval(confidence, n_samples, data_point, method=\"AC\"):\n if data_point > 1 or data_point < 0:\n raise Exception(\"create_proportions_interval cannot be used for value outside of range [0,1].\")\n\n if method.lower() == \"clt\" or method.lower == \"wald\":\n clt_margin = st.norm.ppf(1 - (1 - confidence) / 2) * math.sqrt(data_point * (1 - data_point) / n_samples)\n clt = float(max(data_point - clt_margin, 0)), float(min(data_point + clt_margin, 1))\n return Interval(*clt)\n elif \"3\" in method or \"three\" in method:\n rule_of_three_margin = 3/n_samples\n rule_of_three = Interval(float(max(data_point - rule_of_three_margin, 0)), float(min(data_point + rule_of_three_margin, 1)))\n return rule_of_three\n elif method.lower() == \"ac\" or \"agresti\" in method.lower():\n AC = proportion_confint(round(data_point*n_samples), n_samples, alpha=1 - confidence, method=\"agresti_coull\")\n return Interval(*AC)\n elif method.lower() == \"wilson\":\n wilson = proportion_confint(round(data_point*n_samples), n_samples, alpha=1 - confidence, method=\"wilson\")\n return Interval(*wilson)\n elif \"clop\" in method.lower() or \"pear\" in method.lower():\n clopper_pearson = proportion_confint(round(data_point*n_samples), n_samples, alpha=1 - confidence, method=\"beta\")\n return Interval(*clopper_pearson)\n elif \"jef\" in method.lower():\n jeffreys = proportion_confint(round(data_point*n_samples), n_samples, alpha=1 - confidence, method=\"jeffreys\")\n return Interval(*jeffreys)\n elif \"hsb\" in method.lower():\n return create_interval_hsb(confidence, n_samples, data_point)\n else:\n raise Exception(\"Method mot found.\")",
"def create_poly(self, bounds):\n\n left, bottom, right, top = bounds\n\n return Polygon(\n [\n (left, bottom),\n (left, top),\n (right, top),\n (right, bottom),\n (left, bottom),\n ]\n )",
"def optimal_polygon(y, w=0.5, debug=False):\n # Make sure that we use numpy array\n y = np.array(y)\n x = np.arange(len(y))\n\n # Initialization\n y = np.round(y, 6)\n p_plus = (x[0], y[0] + w)\n l_plus = (x[0], y[0] + w)\n r_plus = (x[1], y[1] + w)\n s_plus = {(x[0], y[0] + w): (x[1], y[1] + w)}\n t_plus = {(x[1], y[1] + w): (x[0], y[0] + w)}\n p_minus = (x[0], y[0] - w)\n l_minus = (x[0], y[0] - w)\n r_minus = (x[1], y[1] - w)\n s_minus = {(x[0], y[0] - w): (x[1], y[1] - w)}\n t_minus = {(x[1], y[1] - w): (x[0], y[0] - w)}\n q = []\n i = 2\n\n while i < len(y):\n # Updating CH_plus (convex hull) and CH_minus\n p = (x[i - 1], y[i - 1] + w)\n p_i_plus = (x[i], y[i] + w)\n while (p != p_plus) and _angle(p_i_plus, p, t_plus[p], '+') > np.pi:\n p = t_plus[p]\n s_plus[p] = p_i_plus\n t_plus[p_i_plus] = p\n\n p = (x[i - 1], y[i - 1] - w)\n p_i_minus = (x[i], y[i] - w)\n while (p != p_minus) and _angle(p_i_minus, p, t_minus[p], '-') > np.pi:\n p = t_minus[p]\n s_minus[p] = p_i_minus\n t_minus[p_i_minus] = p\n\n # Check if CH_plus and CH_minus intersect\n if _angle(p_i_plus, l_plus, r_minus, '+') < np.pi:\n q.append((_intersect(l_plus, r_minus, p_plus, p_minus), l_plus, r_minus, p_plus, p_minus))\n p_minus = r_minus\n p_plus = _intersect(l_plus, r_minus, (x[i - 1], y[i - 1] + w), p_i_plus)\n s_plus[p_plus] = p_i_plus\n t_plus[p_i_plus] = p_plus\n r_plus = p_i_plus\n r_minus = p_i_minus\n l_plus = p_plus\n l_minus = p_minus\n while _angle(l_minus, r_plus, s_minus[l_minus], '-') < np.pi:\n l_minus = s_minus[l_minus]\n elif _angle(p_i_minus, l_minus, r_plus, '-') < np.pi:\n q.append((_intersect(l_minus, r_plus, p_minus, p_plus), l_minus, r_plus, p_minus, p_plus))\n p_plus = r_plus\n p_minus = _intersect(l_minus, r_plus, (x[i - 1], y[i - 1] - w), p_i_minus)\n s_minus[p_minus] = p_i_minus\n t_minus[p_i_minus] = p_minus\n r_minus = p_i_minus\n r_plus = p_i_plus\n l_minus = p_minus\n l_plus = p_plus\n while _angle(l_plus, r_minus, s_plus[l_plus], '+') < np.pi:\n l_plus = s_plus[l_plus]\n else:\n # Updating the two seperating and supporting lines\n if _angle(p_i_plus, l_minus, r_plus, '+') < np.pi:\n r_plus = p_i_plus\n while _angle(p_i_plus, l_minus, s_minus[l_minus], '+') < np.pi:\n l_minus = s_minus[l_minus]\n\n if _angle(p_i_minus, l_plus, r_minus, '-') < np.pi:\n r_minus = p_i_minus\n while _angle(p_i_minus, l_plus, s_plus[l_plus], '-') < np.pi:\n l_plus = s_plus[l_plus]\n i += 1\n\n # Add last change point\n a = _intersect(l_plus, r_minus, p_plus, p_minus)\n b = _intersect(l_minus, r_plus, p_minus, p_plus)\n p = ((a[0] + b[0]) / 2, (a[1] + b[1]) / 2)\n q.append((p, r_minus, r_plus, p_minus, p_plus))\n\n end_a = _intersect(p, r_plus, p_i_minus, p_i_plus)\n end_b = _intersect(p, r_minus, p_i_minus, p_i_plus)\n end = ((end_a[0] + end_b[0]) / 2, (end_a[1] + end_b[1]) / 2)\n q.append((end, (None, None), (None, None), p_i_minus, p_i_plus))\n\n if debug:\n return np.array(q)\n else:\n return np.array([o[0] for o in q])",
"def _bounding_box_to_polytope(lower, upper):\n intervals = [(a[0], b[0]) for a, b in zip(lower, upper)]\n return box2poly(intervals)",
"def getPointInPolygonStatement(self, approxTable, columns, columnsPIP, condition):\r\n return 'SELECT ' + ora.getHintStatement(ora.getParallelStringQuery(self.numProcesses)) + ora.getSelectColumns('*') + \"\"\" \r\nFROM TABLE(mdsys.sdo_PointInPolygon(CURSOR(\r\n\"\"\" + ora.getSelectStatement(approxTable, ora.getSelectColumns(columnsPIP)) + \"\"\"), \r\nMDSYS.SDO_GEOMETRY('\"\"\" + self.wkt + \"\"\"', \"\"\" + str(self.srid) + \"\"\"), \"\"\" + str(self.tolerance) +\"\"\"))\r\n\"\"\" + condition",
"def draw_polygon(self, *points, color=DEFAULT.color):",
"def polygon_weights(polygon, xrange=None, yrange=None,\n center=True): # pragma: no cover\n poly = np.array(polygon)\n if poly.ndim != 2 or poly.shape[-1] != 2 or poly.shape[0] < 3:\n log.warning(\"invalid polygon shape\")\n return []\n\n xlims = [poly[:, 1].min(), poly[:, 1].max()]\n ylims = [poly[:, 0].min(), poly[:, 0].max()]\n\n if xrange is not None:\n xlims[0] = np.nanmax((xlims[0], np.nanmin(xrange)))\n xlims[1] = np.nanmin((xlims[1], np.nanmax(xrange)))\n if yrange is not None:\n ylims[0] = np.nanmax((ylims[0], np.nanmin(yrange)))\n ylims[1] = np.nanmin((ylims[1], np.nanmax(yrange)))\n\n if xlims[0] >= xlims[1] or ylims[0] >= ylims[1]:\n log.debug(\"out of bounds\")\n return []\n\n xlims = [int(np.floor(xlims[0])), int(np.ceil(xlims[1]))]\n ylims = [int(np.floor(ylims[0])), int(np.ceil(ylims[1]))]\n\n if center:\n dx = -0.5, 0.5\n dy = -0.5, 0.5\n else:\n dx = 0, 1\n dy = 0, 1\n\n gy, gx = np.mgrid[ylims[0]:ylims[1] + 1, xlims[0]:xlims[1] + 1]\n p = path.Path(poly)\n result = []\n for ycen, xcen in zip(gy.ravel(), gx.ravel()):\n bbox = Bbox([[ycen + dy[0], xcen + dx[0]],\n [ycen + dy[1], xcen + dy[1]]])\n area = polygon_area(p.clip_to_bbox(bbox))\n if area != 0:\n result.append(((ycen, xcen), area))\n\n return result",
"def polygon(self, pointlist, cls=None, style=None, attrs=None):\n payload = self._meta.make_payload(cls, style, attrs)\n pts_str = ' '.join('%s,%s' % (x, y) for x, y in pointlist)\n self.elements.append(\"\"\"<polygon points=\"%s\" %s/>\"\"\" % (pts_str, payload))\n return self",
"def polygon(self, pointlist, cls=None, style=None, attrs=None):\n payload = self._meta.make_payload(cls, style, attrs)\n pts_str = ' '.join('%s,%s' % (x, y) for x, y in pointlist)\n self.elements.append(\"\"\"<polygon points=\"%s\" %s/>\"\"\" % (pts_str, payload))\n return self",
"def test_simple_polygonisation(n_points=20):\n # generate random sample points.\n sample_points = np.random.random_sample((n_points,2))*10\n # generate simple polygon\n seq = simple_polygonisation(sample_points)\n # plot polygon\n plt.figure()\n plt.plot(seq[:,0], seq[:,1], color=\"blue\", marker=\"s\", alpha=0.5)",
"def bezierPoly(ctrlP):\n n = len(ctrlP) - 1 #degree of the polynomial\n first = True\n for t in np.linspace(0.0, 1.0, 5 * n):\n point = bezierFunc(ctrlP, t)\n if first: # Initialize list of points in the polynomial\n bezierPointsList = np.copy(point)\n first = False\n else:\n bezierPointsList = np.append(bezierPointsList, point, axis=0)\n return bezierPointsList",
"def ci_prop(p, n, conf_level=95):\n # calculate significance level\n alpha = np.round((1 - conf_level / 100), 2)\n # standard error\n std_error = np.sqrt(p * (1 - p) / n)\n # find the z critical value\n z_star = np.round(stats.norm.ppf(1 - alpha / 2), 3)\n # margin of error\n margin_of_error = np.round(z_star * std_error, 2)\n # calculate lower and upper confidence bounds\n lcb = np.round(p - margin_of_error, 2)\n ucb = np.round(p + margin_of_error, 2)\n\n print(\"Margin Of Error: {}\".format(margin_of_error))\n print(\n \"{}% Confidence Interval for Population Proportion: ({}, {})\".format(\n conf_level, lcb, ucb\n )\n )",
"def getClassBalance(pshapes, bounds, proj):\n\n xmin, ymin, xmax, ymax = bounds\n bpoly = Polygon([(xmin, ymax),\n (xmax, ymax),\n (xmax, ymin),\n (xmin, ymin)])\n project = partial(\n pyproj.transform,\n pyproj.Proj(proj='latlong', datum='WGS84'),\n proj)\n bpolyproj = transform(project, bpoly)\n totalarea = bpolyproj.area\n polyarea = 0\n for pshape in pshapes:\n polyarea += pshape.area\n\n return polyarea/totalarea",
"def to_shapely_polygon(self):\n # load shapely lazily, which makes the dependency more optional\n import shapely.geometry\n return shapely.geometry.Polygon([(point[0], point[1]) for point in self.exterior])",
"def trapezoid_decomposition_pl(polygons, bounds):\n polygons = Polygons(polygons)\n # print(bounds)\n point_locator = PointLocator(bounds)\n for edge in polygons.random_edge_sampler():\n point_locator.add_line(edge)\n return point_locator",
"def initPoly(deg, st, end):\n if (deg == 0):\n print(\"ERROR: The bezier curve degree has to be greater than 0\")\n return\n controlP = np.zeros((deg + 1, 2))\n controlP[0] = np.asarray(st)\n for i in range(deg - 1):\n point = controlP[i] + 1 / deg * (np.asarray(end) - np.asarray(st))\n controlP[i + 1] = point\n controlP[-1] = np.asarray(end)\n return controlP",
"def Polygon(self, polyline = False):\n\n from geographiclib.polygonarea import PolygonArea\n return PolygonArea(self, polyline)",
"def draw_polygon(left_x, right_x, left_y, right_y, img_):\n pts_left = np.array([np.flipud(np.transpose(np.vstack([left_x, left_y])))])\n pts_right = np.array([np.transpose(np.vstack([right_x, right_y]))])\n pts = np.hstack((pts_left, pts_right))\n img_ = cv2.polylines(img_, np.int_([pts]), isClosed=False, color=(60, 200, 60), thickness=10, lineType=cv2.LINE_AA)\n img_ = cv2.fillPoly(img_, np.int_(pts), (50, 90, 50))\n return img_",
"def polygon_vol(P):\n area=0\n #first and last points must be the same\n if P==[]:\n return 0\n \n if P[0]!=P[len(P)-1]:\n P.append(P[0])\n\n for i in range(0,len(P)-1):\n area = area + P[i][0]*P[i+1][1]- P[i+1][0]*P[i][1]\n\n P.pop()\n return 0.5*area",
"def make_polygon(*coords):\n global GEOMETRY_SURF, POLYGONS,col\n if len(coords) < 3:\n print(\"Warning: Invalid polygon passed, ignoring...\")\n return\n start = coords[0]\n prev = coords[0]\n for coord in coords:\n POLYGONS |= {coord}\n line = Boundary(prev[0],prev[1],coord[0],coord[1]) #add segment to WALL list\n prev = coord\n line = Boundary(prev[0], prev[1],start[0],start[1])\n #now draw poly\n pygame.draw.polygon(GEOMETRY_SURF,col[\"SHAPECOL\"], coords)\n return",
"def polyclip(i, j, pol_x, pol_y, area=False):\n n = len(pol_x)\n nout = n + 4\n px_out, py_out = [0] * nout, [0] * nout\n clip_vals = [i, i + 1, j + 1, j]\n\n for ctype in range(4):\n cv = clip_vals[ctype]\n if ctype == 0:\n inside = [px > i for px in pol_x]\n elif ctype == 1:\n inside = [(px < i + 1) for px in pol_x]\n elif ctype == 2:\n inside = [(py < j + 1) for py in pol_y]\n else:\n inside = [py > j for py in pol_y]\n if all(inside):\n continue\n\n shiftp1 = inside.copy()\n shiftp1.insert(0, shiftp1.pop(-1))\n crosses = [i1 != i2 for (i1, i2) in zip(inside, shiftp1)]\n pind = 0\n for k in range(n):\n px, py = pol_x[k], pol_y[k]\n if crosses[k]: # out->in or in->out, add intersection\n ind = n - 1 if k == 0 else k - 1\n sx, sy = pol_x[ind], pol_y[ind]\n try:\n if ctype <= 1: # left or right\n px_out[pind] = cv\n py_out[pind] = sy + ((py - sy) / (px - sx)) * (cv - sx)\n else: # top or bottom\n px_out[pind] = sx + ((px - sx) / (py - sy)) * (cv - sy)\n py_out[pind] = cv\n except ZeroDivisionError: # pragma: no cover\n px_out[pind] = np.nan\n py_out[pind] = np.nan\n pind += 1\n\n if inside[k]: # out->in or in->in, add 2nd point\n px_out[pind] = px\n py_out[pind] = py\n pind += 1\n\n if pind >= nout - 2:\n nout *= 2\n px_out = px_out + [0] * nout\n py_out = py_out + [0] * nout\n nout *= 2\n\n if pind == 0: # polygon is entirely outside this line\n return None, None\n n = pind\n pol_x = px_out[:n].copy()\n pol_y = py_out[:n].copy()\n\n if area:\n if pol_x is None: # pragma: no cover\n return 0.0\n shiftx = pol_x.copy()\n shifty = pol_y.copy()\n shiftx.append(shiftx.pop(0))\n shifty.append(shifty.pop(0))\n a1 = [p[0] * p[1] for p in zip(pol_x, shifty)]\n a2 = [p[0] * p[1] for p in zip(pol_y, shiftx)]\n a = [p[0] - p[1] for p in zip(a1, a2)]\n return abs(sum(a)) / 2\n\n return pol_x, pol_y",
"def polygonFromInteriorPoints(geom, preferredEpsg):\n (projTr, llTr) = makeTransformations(4326, preferredEpsg)\n\n geomProj = copyGeom(geom)\n geomProj.Transform(projTr)\n geomOutline = geomProj.ConvexHull()\n geomOutline.Transform(llTr)\n return geomOutline",
"def polyFeat(X, p):\r\n # You need to return the following variables correctly.\r\n X_poly = np.zeros((X.shape[0], p))\r\n\r\n # ====================== YOUR CODE HERE ======================\r\n\r\n for i in range(p):\r\n X_poly[:, i] = X[:, 0] ** (i + 1)\r\n\r\n # ============================================================\r\n return X_poly",
"def polygonpts(nSides, radius=1.0):\n\treturn [[cos(theta)*radius, sin(theta)*radius] for theta in frange(0, twopi, nSides+1)[:-1] ]",
"def create_polygon(self, vertices, style=None, parent=None):\n d = 'M %f %f L' % (vertices[0].x, vertices[0].y)\n for p in vertices[1:]:\n d = d + ' %f,%f' % (p.x, p.y)\n if vertices[0] != vertices[-1]:\n d = d + ' %f,%f' % (vertices[0].x, vertices[0].y)\n attrs = {'d': d}\n return self.create_path(attrs, style, parent)",
"def Y_upper_shape_points(ix, iy, w_sen, t_sen, n_w_sen):\n p0 = (ix, iy)\n p1 = (ix - t_sen, iy - w_sen + n_w_sen / 2)\n p2 = (ix, iy - w_sen + n_w_sen / 2)\n p3 = (ix, iy - n_w_sen / 2)\n p4 = (ix + t_sen, iy - n_w_sen / 2)\n p5 = (ix + t_sen, iy + w_sen - n_w_sen / 2)\n p6 = (ix, iy + w_sen - n_w_sen / 2)\n p7 = (ix, iy + n_w_sen / 2)\n p8 = (ix - t_sen, iy + n_w_sen / 2)\n\n return p0, p1, p2, p3, p4, p5, p6, p7, p8"
] | [
"0.6202383",
"0.6190135",
"0.6152543",
"0.6056553",
"0.6024834",
"0.58629483",
"0.58027285",
"0.5787738",
"0.56690466",
"0.5664175",
"0.56515396",
"0.56223565",
"0.56223565",
"0.5581558",
"0.5581497",
"0.55799574",
"0.55596733",
"0.55531454",
"0.5548768",
"0.5543551",
"0.55401826",
"0.5520864",
"0.5510418",
"0.55087346",
"0.55037737",
"0.5494874",
"0.5490363",
"0.5485471",
"0.5453371",
"0.54501295"
] | 0.6431237 | 0 |
Plots background (references lines and polygons) of attributes diagram. For more on the attributes diagram, see Hsu and Murphy (1986). BSS = Brier skill score. For more on the BSS, see `model_evaluation.get_brier_skill_score`. | def _plot_background_of_attributes_diagram(
axes_object, climatology,
no_skill_line_colour=DEFAULT_ZERO_BSS_COLOUR,
no_skill_line_width=DEFAULT_ZERO_BSS_WIDTH,
other_line_colour=DEFAULT_CLIMATOLOGY_COLOUR,
other_line_width=DEFAULT_CLIMATOLOGY_WIDTH):
error_checking.assert_is_geq(climatology, 0.)
error_checking.assert_is_leq(climatology, 1.)
(x_vertices_for_left_skill_area,
y_vertices_for_left_skill_area,
x_vertices_for_right_skill_area,
y_vertices_for_right_skill_area
) = model_eval.get_skill_areas_in_reliability_curve(climatology)
skill_area_colour = matplotlib.colors.to_rgba(
plotting_utils.colour_from_numpy_to_tuple(no_skill_line_colour),
TRANSPARENCY_FOR_POSITIVE_BSS_AREA
)
left_polygon_object = polygons.vertex_arrays_to_polygon_object(
x_vertices_for_left_skill_area, y_vertices_for_left_skill_area
)
left_polygon_patch = PolygonPatch(
left_polygon_object, lw=0, ec=skill_area_colour, fc=skill_area_colour
)
axes_object.add_patch(left_polygon_patch)
right_polygon_object = polygons.vertex_arrays_to_polygon_object(
x_vertices_for_right_skill_area, y_vertices_for_right_skill_area
)
right_polygon_patch = PolygonPatch(
right_polygon_object, lw=0, ec=skill_area_colour, fc=skill_area_colour
)
axes_object.add_patch(right_polygon_patch)
no_skill_x_coords, no_skill_y_coords = (
model_eval.get_no_skill_reliability_curve(climatology)
)
axes_object.plot(
no_skill_x_coords, no_skill_y_coords,
color=plotting_utils.colour_from_numpy_to_tuple(no_skill_line_colour),
linestyle='solid', linewidth=no_skill_line_width
)
climo_x_coords, climo_y_coords = (
model_eval.get_climatology_line_for_reliability_curve(
climatology)
)
axes_object.plot(
climo_x_coords, climo_y_coords,
color=plotting_utils.colour_from_numpy_to_tuple(other_line_colour),
linestyle='dashed', linewidth=other_line_width
)
no_resolution_x_coords, no_resolution_y_coords = (
model_eval.get_no_resolution_line_for_reliability_curve(
climatology)
)
axes_object.plot(
no_resolution_x_coords, no_resolution_y_coords,
color=plotting_utils.colour_from_numpy_to_tuple(other_line_colour),
linestyle='dashed', linewidth=other_line_width
) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def plot_bootstrapped_attributes_diagram(\n figure_object, axes_object, ci_bottom_dict, ci_mean_dict, ci_top_dict,\n num_examples_by_bin,\n reliability_line_colour=DEFAULT_RELIABILITY_COLOUR,\n reliability_line_width=DEFAULT_RELIABILITY_WIDTH,\n perfect_relia_line_colour=DEFAULT_PERFECT_RELIABILITY_COLOUR,\n perfect_relia_line_width=DEFAULT_PERFECT_RELIABILITY_WIDTH,\n no_skill_line_colour=DEFAULT_ZERO_BSS_COLOUR,\n no_skill_line_width=DEFAULT_ZERO_BSS_WIDTH,\n other_line_colour=DEFAULT_CLIMATOLOGY_COLOUR,\n other_line_width=DEFAULT_CLIMATOLOGY_WIDTH,\n histogram_bar_face_colour=DEFAULT_HISTOGRAM_FACE_COLOUR,\n histogram_bar_edge_colour=DEFAULT_HISTOGRAM_EDGE_COLOUR,\n histogram_bar_edge_width=DEFAULT_HISTOGRAM_EDGE_WIDTH):\n\n plot_attributes_diagram(\n figure_object=figure_object, axes_object=axes_object,\n mean_forecast_by_bin=ci_mean_dict[model_eval.MEAN_FORECAST_BY_BIN_KEY],\n event_frequency_by_bin=ci_mean_dict[model_eval.EVENT_FREQ_BY_BIN_KEY],\n num_examples_by_bin=num_examples_by_bin,\n reliability_line_colour=reliability_line_colour,\n reliability_line_width=reliability_line_width,\n perfect_relia_line_colour=perfect_relia_line_colour,\n perfect_relia_line_width=perfect_relia_line_width,\n no_skill_line_colour=no_skill_line_colour,\n no_skill_line_width=no_skill_line_width,\n other_line_colour=other_line_colour, other_line_width=other_line_width,\n histogram_bar_face_colour=histogram_bar_face_colour,\n histogram_bar_edge_colour=histogram_bar_edge_colour,\n histogram_bar_edge_width=histogram_bar_edge_width)\n\n polygon_object = _confidence_interval_to_polygon(\n x_coords_bottom=ci_bottom_dict[model_eval.MEAN_FORECAST_BY_BIN_KEY],\n y_coords_bottom=ci_bottom_dict[model_eval.EVENT_FREQ_BY_BIN_KEY],\n x_coords_top=ci_top_dict[model_eval.MEAN_FORECAST_BY_BIN_KEY],\n y_coords_top=ci_top_dict[model_eval.EVENT_FREQ_BY_BIN_KEY]\n )\n\n polygon_colour = matplotlib.colors.to_rgba(\n plotting_utils.colour_from_numpy_to_tuple(reliability_line_colour),\n TRANSPARENCY_FOR_CONFIDENCE_INTERVAL\n )\n\n polygon_patch = PolygonPatch(\n polygon_object, lw=0, ec=polygon_colour, fc=polygon_colour)\n\n axes_object.add_patch(polygon_patch)",
"def plot_scenario(self, ax):\n ax.set_xlim((0,10))\n ax.set_ylim((0,10))\n\n # Unpack region's sizes and positions\n obs_x = self.obstacle_vert[0]\n obs_y = self.obstacle_vert[2]\n obs_w = self.obstacle_vert[1]-obs_x\n obs_h = self.obstacle_vert[3]-obs_y\n\n goal_x = self.goal_vert[0]\n goal_y = self.goal_vert[2]\n goal_w = self.goal_vert[1]-goal_x\n goal_h = self.goal_vert[3]-goal_y\n\n target1_x = self.target1_vert[0]\n target1_y = self.target1_vert[2]\n target1_w = self.target1_vert[1]-target1_x\n target1_h = self.target1_vert[3]-target1_y\n\n target2_x = self.target2_vert[0]\n target2_y = self.target2_vert[2]\n target2_w = self.target2_vert[1]-target2_x\n target2_h = self.target2_vert[3]-target2_y\n\n obstacle = Rectangle((obs_x,obs_y),obs_w,obs_h,color='red',alpha=0.5)\n goal = Rectangle((goal_x,goal_y),goal_w,goal_h, color='green',alpha=0.5)\n\n target1 = Rectangle((target1_x,target1_y),target1_w,target1_h, color='blue',alpha=0.5)\n target2 = Rectangle((target2_x,target2_y),target2_w,target2_h, color='blue',alpha=0.5)\n\n ax.add_patch(obstacle)\n ax.add_patch(goal)\n ax.add_patch(target1)\n ax.add_patch(target2)",
"def plot_attributes_diagram(\n figure_object, axes_object, mean_forecast_by_bin,\n event_frequency_by_bin, num_examples_by_bin,\n reliability_line_colour=DEFAULT_RELIABILITY_COLOUR,\n reliability_line_width=DEFAULT_RELIABILITY_WIDTH,\n perfect_relia_line_colour=DEFAULT_PERFECT_RELIABILITY_COLOUR,\n perfect_relia_line_width=DEFAULT_PERFECT_RELIABILITY_WIDTH,\n no_skill_line_colour=DEFAULT_ZERO_BSS_COLOUR,\n no_skill_line_width=DEFAULT_ZERO_BSS_WIDTH,\n other_line_colour=DEFAULT_CLIMATOLOGY_COLOUR,\n other_line_width=DEFAULT_CLIMATOLOGY_WIDTH,\n histogram_bar_face_colour=DEFAULT_HISTOGRAM_FACE_COLOUR,\n histogram_bar_edge_colour=DEFAULT_HISTOGRAM_EDGE_COLOUR,\n histogram_bar_edge_width=DEFAULT_HISTOGRAM_EDGE_WIDTH):\n\n error_checking.assert_is_numpy_array(\n event_frequency_by_bin, num_dimensions=1)\n error_checking.assert_is_geq_numpy_array(\n event_frequency_by_bin, 0., allow_nan=True)\n error_checking.assert_is_leq_numpy_array(\n event_frequency_by_bin, 1., allow_nan=True)\n num_bins = len(event_frequency_by_bin)\n\n error_checking.assert_is_integer_numpy_array(num_examples_by_bin)\n error_checking.assert_is_numpy_array(\n num_examples_by_bin, exact_dimensions=numpy.array([num_bins]))\n error_checking.assert_is_geq_numpy_array(num_examples_by_bin, 0)\n\n non_empty_bin_indices = numpy.where(num_examples_by_bin > 0)[0]\n error_checking.assert_is_numpy_array_without_nan(\n event_frequency_by_bin[non_empty_bin_indices])\n\n climatology = numpy.average(\n event_frequency_by_bin[non_empty_bin_indices],\n weights=num_examples_by_bin[non_empty_bin_indices]\n )\n\n _plot_background_of_attributes_diagram(\n axes_object=axes_object, climatology=climatology,\n no_skill_line_colour=no_skill_line_colour,\n no_skill_line_width=no_skill_line_width,\n other_line_colour=other_line_colour, other_line_width=other_line_width)\n\n _plot_inset_histogram_for_attributes_diagram(\n figure_object=figure_object, num_examples_by_bin=num_examples_by_bin,\n bar_face_colour=histogram_bar_face_colour,\n bar_edge_colour=histogram_bar_edge_colour,\n bar_edge_width=histogram_bar_edge_width)\n\n plot_reliability_curve(\n axes_object=axes_object,\n mean_forecast_by_bin=mean_forecast_by_bin,\n event_frequency_by_bin=event_frequency_by_bin,\n line_colour=reliability_line_colour, line_width=reliability_line_width,\n perfect_line_colour=perfect_relia_line_colour,\n perfect_line_width=perfect_relia_line_width)",
"def plot_forest(self):\n ax, = az.plot_forest(self.ifd_, var_names=[\"avg\", \"a_coef\", \"b_vals_coef\", \"b_mask_coef\", \"c_vals_coef\", \"c_mask_coef\"])\n ax.axvline(0, linestyle=':', color='black')\n # return ax",
"def draw_background(self):\n backgrounds = {\n \"forest\": (38, 106, 46),\n \"desert\": (194, 178, 128)\n }\n self.background_surface.fill(backgrounds[self.geography])",
"def relative_src_bg(self):\n fig, ax = plt.subplots()\n \n for oneF in ['extracted_flux','extracted_bg_only']:\n wave, f = self.result['1d'][oneF]\n ax.plot(wave,f,label=oneF)\n ax.set_xlabel('Wavelength ($\\mu$m)')\n ax.set_ylabel('Extracted Flux')\n ax.legend()\n \n fig.show()",
"def plot_bootstrapped_performance_diagram(\n axes_object, ci_bottom_dict, ci_mean_dict, ci_top_dict,\n line_colour=DEFAULT_PERFORMANCE_COLOUR,\n line_width=DEFAULT_PERFORMANCE_WIDTH,\n bias_line_colour=DEFAULT_FREQ_BIAS_COLOUR,\n bias_line_width=DEFAULT_FREQ_BIAS_WIDTH):\n\n plot_performance_diagram(\n axes_object=axes_object,\n pod_by_threshold=ci_mean_dict[model_eval.POD_BY_THRESHOLD_KEY],\n success_ratio_by_threshold=ci_mean_dict[model_eval.SR_BY_THRESHOLD_KEY],\n line_colour=line_colour, line_width=line_width,\n bias_line_colour=bias_line_colour, bias_line_width=bias_line_width)\n\n polygon_object = _confidence_interval_to_polygon(\n x_coords_bottom=ci_bottom_dict[model_eval.SR_BY_THRESHOLD_KEY],\n y_coords_bottom=ci_bottom_dict[model_eval.POD_BY_THRESHOLD_KEY],\n x_coords_top=ci_top_dict[model_eval.SR_BY_THRESHOLD_KEY],\n y_coords_top=ci_top_dict[model_eval.POD_BY_THRESHOLD_KEY],\n for_performance_diagram=True)\n\n polygon_colour = matplotlib.colors.to_rgba(\n plotting_utils.colour_from_numpy_to_tuple(line_colour),\n TRANSPARENCY_FOR_CONFIDENCE_INTERVAL\n )\n\n polygon_patch = PolygonPatch(\n polygon_object, lw=0, ec=polygon_colour, fc=polygon_colour)\n\n axes_object.add_patch(polygon_patch)",
"def draw_bg (self):\n self.health = max(0.0, min(1.0, (self.healthsteps + self.mud.value) / self.healthsteps))\n healthycolor = (0x11, 0x22, 0x44)\n pollutedcolor = (0x66, 0x66, 0)\n self.watercolor = [int((a - b) * self.health + b)\n for a,b in zip(healthycolor, pollutedcolor)]\n colorname = \"rgb({},{},{})\".format(*self.watercolor)\n w, h = self.width, self.height\n self.draw.rectangle((0,0,w-1,self.level_px-1), \"#000000\")\n self.draw.rectangle((0,self.level_px,w-1,h-1), colorname)",
"def draw_attributes(img_path, df):\n img = cv2.imread(img_path)\n # img = cv2.cvtColor(color, cv2.COLOR_BGR2RGB)\n for row in df.iterrows():\n top, right, bottom, left = row[1][4:].astype(int)\n if row[1]['Male'] >= 0.5:\n gender = 'Male'\n else:\n gender = 'Female'\n\n race = np.argmax(row[1][1:4])\n text_showed = \"{} {}\".format(race, gender)\n\n cv2.rectangle(img, (left, top), (right, bottom), (0, 0, 255), 2)\n font = cv2.FONT_HERSHEY_DUPLEX\n img_width = img.shape[1]\n cv2.putText(img, text_showed, (left + 6, bottom - 6), font, 0.5, (255, 255, 255), 1)\n return img",
"def make_background_graphics(self):\n # Title.\n centered = self.graphics.center_just(1, \"CHOOSE DIFFICULTY\")\n title = TextBox(*centered)\n self.uielements.append(title)\n\n # Bars.\n bar_a = TextBox(Point(0, 2), \"_\"*self.graphics.LENGTH)\n self.uielements.append(bar_a)\n\n bar_b = TextBox(Point(0, 8), \"_\"*self.graphics.LENGTH)\n self.uielements.append(bar_b)\n\n # Controls.\n text = [\n \"_\"*self.graphics.LENGTH,\n \" wasd: Move | m: Select | q: Quit\"\n ]\n controls = LongTextBox(Point(0, self.graphics.HEIGHT-3), text)\n self.uielements.append(controls)",
"def draw_roc(signal, background, output_dir=\".\", output_name=\"roc\", form=\".pdf\"):\n\n x, y = get_roc(signal, background)\n file_path = output_dir + \"/\"+ output_name + \"_X.cvs\"\n numpy.savetxt(file_path, x, delimiter=\",\")\n file_path = output_dir + \"/\"+ output_name + \"_Y.cvs\"\n numpy.savetxt(file_path, y, delimiter=\",\")\n output_name = output_name + form\n\n auc = metrics.auc(x, y, reorder=True)\n\n fig = plt.figure(1, figsize=(7, 7), dpi=300)\n fig.clear()\n\n # Create an axes instance\n ax = fig.add_subplot(111)\n\n ax.plot(x, y, '-', color='#B64926', lw=2, label=\"AUC: %.4f\" % auc)\n ax.margins(0.05)\n\n ax.set_xlabel(\"Background efficiency\")\n ax.set_ylabel(\"Signal efficiency\")\n \n fig.set_tight_layout(True)\n\n ax.legend(loc='lower right', numpoints=1, frameon=False)\n\n print(\"AUC: %.4f\" % auc)\n\n fig.savefig(os.path.join(output_dir, output_name))\n\n plt.close()\n\n def get_index(y, value):\n \"\"\"\n Find the last index of the element in y\n satistying y[index] <= value\n \"\"\"\n\n for i in range(len(y)):\n if y[i] <= value:\n continue\n\n return i\n\n print(\"Background efficiency for signal efficiency of 0.70: %f\" % x[get_index(y, 0.70)])\n print(\"Background efficiency for signal efficiency of 0.80: %f\" % x[get_index(y, 0.80)])\n print(\"Background efficiency for signal efficiency of 0.90: %f\" % x[get_index(y, 0.90)])",
"def get_background(formatted_symbol, label_dict):\n\n # implement when possible in qgis\n # balloon_callout_background = change_interface(\n # formatted_symbol.Background,\n # ArcGisModules.module_display.IBalloonCallout\n # )\n\n line_callout_background = change_interface(\n formatted_symbol.Background,\n ArcGisModules.module_display.ILineCallout\n )\n\n marker_text_background = change_interface(\n formatted_symbol.Background,\n ArcGisModules.module_display.IMarkerTextBackground\n )\n\n if marker_text_background:\n label_dict['labelValues']['background']['subsymbol'] = {}\n SymbolPropertiesProvider.get_point_properties(\n label_dict['labelValues']['background']['subsymbol'],\n marker_text_background.Symbol\n )\n label_dict['labelValues']['background']['shapeType'] = \"5\"\n label_dict['labelValues']['background']['shapeDraw'] = \"1\"\n elif line_callout_background:\n try:\n formatted_symbol_callout_margin = change_interface(\n formatted_symbol.Background,\n ArcGisModules.module_display.ITextMargins\n )\n label_dict['labelValues']['background']['shapeFillColor'] = convert_int_to_rgb_string(\n line_callout_background.Border.Color.RGB\n )\n label_dict['labelValues']['background']['shapeBorderColor'] = convert_int_to_rgb_string(\n line_callout_background.Border.Outline.Color.RGB\n )\n label_dict['labelValues']['background']['shapeBorderWidth'] = str(\n line_callout_background.Border.Outline.Width\n )\n label_dict['labelValues']['background']['shapeSizeX'] = str(\n int(formatted_symbol_callout_margin.LeftMargin) * 2\n )\n label_dict['labelValues']['background']['shapeSizeY'] = str(\n int(formatted_symbol_callout_margin.TopMargin) * 2)\n label_dict['labelValues']['background']['shapeDraw'] = \"1\"\n except ValueError:\n label_dict['labelValues']['background']['shapeDraw'] = \"0\"\n\n else:\n pass",
"def plot(profile, filename=\"area_plot.png\", colors=None):\n \n if profile.abundance_data.shape[1] > MAX_DATA_POINTS:\n print(\"Too many data points to plot area plot.\")\n return\n\n col_label = sort_for_area_plot(profile.abundance_data)\n\n profile.abundance_data.sort(columns=col_label, axis=0, inplace=True)\n\n if colors == None:\n colors = generate_colors(len(profile.abundance_data.columns))\n\n w = 0 # x coordinate to plot the new bar on\n \n prev = dict()\n plt.clf()\n plt.title(\"Area Plot\")\n lgd_labels = dict() # stores information for the plot legend \n \n for cls in profile.references.keys():\n df = profile.abundance_data.loc[profile.references[cls]]\n sort_by_most_abundant(df)\n \n # change order of columns so most abundant attribute is plotted first\n l = list(df.columns)[::-1]\n df = df[l]\n \n for sample in df.index:\n for i in range(len(df.columns)):\n attr = df.columns[i] \n if i == 0:\n prev[sample] = 0\n plt.bar(w, df.loc[sample, attr], linewidth=0, bottom=prev[sample], color=colors[i])\n prev[sample] += df.loc[sample, attr]\n if attr not in lgd_labels.keys():\n lgd_labels[attr] = mpatches.Patch(color=colors[i], label=attr)\n w += 0.8\n\n ticks = list()\n ticks.append(0)\n running = 0\n for cls in profile.references.keys():\n running = running + len(profile.references[cls]) * 0.8\n ticks.append(running)\n plt.axvline(x=running, color='black')\n \n plt.xticks(ticks, list(profile.references.keys()))\n plt.xlim(0, len(profile.abundance_data.index) * 0.8)\n plt.ylim(0,1)\n plt.xlabel(\"Samples\")\n plt.ylabel(\"Abundance\")\n lgd = plt.legend(title=\"Attributes\", handles=list(lgd_labels.values()), \n loc='upper center', bbox_to_anchor=(0.5, -0.1), ncol=2, fontsize=8)\n plt.savefig(filename, bbox_extra_artists=(lgd,), bbox_inches='tight', \n dpi=(400), figsize=(24, 24))",
"def artAttrSkinPaintCtx(*args, accopacity: bool=False, activeListChangedProc: Union[AnyStr,\n bool]=\"\", afterStrokeCmd: Union[AnyStr, bool]=\"\", alphaclamp:\n Union[AnyStr, bool]=\"none\", alphaclamplower: Union[float, bool]=0.0,\n alphaclampupper: Union[float, bool]=1.0, attrSelected: Union[AnyStr,\n bool]=\"\", beforeStrokeCmd: Union[AnyStr, bool]=\"\", brushalignment:\n bool=True, brushfeedback: bool=True, clamp: Union[AnyStr, bool]=\"none\",\n clamplower: Union[float, bool]=0.0, clampupper: Union[float, bool]=1.0,\n clear: bool=True, colorAlphaValue: Union[float, bool]=0.0,\n colorRGBAValue: Union[List[float, float, float, float], bool]=None,\n colorRGBValue: Union[List[float, float, float], bool]=None, colorRamp:\n Union[AnyStr, bool]=\"\", colorfeedback: bool=False,\n colorfeedbackOverride: bool=False, colorrangelower: Union[float,\n bool]=0.0, colorrangeupper: Union[float, bool]=1.0, dataTypeIndex:\n Union[int, bool]=0, disablelighting: bool=False, dragSlider: AnyStr=\"\",\n duringStrokeCmd: Union[AnyStr, bool]=\"\", dynclonemode: bool=True,\n exists: bool=True, expandfilename: bool=True, exportaspectratio:\n Union[float, bool]=0.0, exportfilemode: Union[AnyStr,\n bool]=\"luminance/rgb\", exportfilesave: AnyStr=\"\", exportfilesizex:\n Union[int, bool]=0, exportfilesizey: Union[int, bool]=0,\n exportfiletype: Union[AnyStr, bool]=\"\", filterNodes: bool=True,\n history: bool=True, image1: Union[AnyStr, bool]=\"\", image2:\n Union[AnyStr, bool]=\"\", image3: Union[AnyStr, bool]=\"\", importfileload:\n AnyStr=\"\", importfilemode: Union[AnyStr, bool]=\"alpha\", importreassign:\n bool=False, influence: Union[AnyStr, bool]=\"\", interactiveUpdate:\n bool=True, lastRecorderCmd: Union[AnyStr, bool]=\"\", lastStampName:\n Union[AnyStr, bool]=\"\", lowerradius: Union[float, bool]=0.0,\n makeStroke: Union[int, List[int], bool]=0, mappressure: Union[AnyStr,\n bool]=\"none\", maxvalue: Union[float, bool]=1.0, minvalue: Union[float,\n bool]=0.0, name: AnyStr=\"\", objattrArray: Union[AnyStr, bool]=\"\",\n opacity: Union[float, bool]=1.0, outline: bool=True, outwhilepaint:\n bool=False, paintNodeArray: Union[AnyStr, bool]=\"\", paintSelectMode:\n Union[int, bool]=0, paintattrselected: AnyStr=\"\", paintmode:\n Union[AnyStr, bool]=\"screen\", paintoperationtype: Union[AnyStr,\n bool]=\"Paint\", pickColor: bool=True, pickValue: bool=True,\n playbackCursor: Union[List[float, float], List[List[float, float]],\n bool]=None, playbackPressure: Union[float, List[float], bool]=0.0,\n preserveclonesource: bool=True, profileShapeFile: Union[AnyStr,\n bool]=\"\", projective: bool=False, radius: Union[float, bool]=1.0,\n rampMaxColor: Union[List[float, float, float], bool]=None,\n rampMinColor: Union[List[float, float, float], bool]=None, record:\n bool=True, reflection: bool=False, reflectionaboutorigin: bool=True,\n reflectionaxis: Union[AnyStr, bool]=\"x\", screenRadius: Union[float,\n bool]=0.0, selectclonesource: bool=True, selectedattroper: Union[AnyStr,\n bool]=\"absolute\", showactive: bool=True, skinPaintMode: Union[int,\n bool]=0, stampDepth: Union[float, bool]=0.0, stampProfile: Union[AnyStr,\n bool]=\"\", stampSpacing: Union[float, bool]=1.0, strokesmooth:\n Union[AnyStr, bool]=\"\", surfaceConformedBrushVertices: bool=True,\n tablet: bool=True, tangentOutline: bool=True, toolOffProc: Union[AnyStr,\n bool]=\"\", toolOnProc: Union[AnyStr, bool]=\"\", useColorRamp: bool=True,\n useMaxMinColor: bool=True, usepressure: bool=False, value: Union[float,\n bool]=0.0, whichTool: Union[AnyStr, bool]=\"\", worldRadius: Union[float,\n bool]=0.0, xrayJoints: bool=True, q=True, query=True, e=True, edit=True,\n **kwargs)->Union[None, Any]:\n pass",
"def plot_scenario(self, ax):\n ax.set_xlim((0,12))\n ax.set_ylim((0,12))\n\n # Unpack region's sizes and positions\n obs_x = self.obstacle_vert[0]\n obs_y = self.obstacle_vert[2]\n obs_w = self.obstacle_vert[1]-obs_x\n obs_h = self.obstacle_vert[3]-obs_y\n\n tar_x = self.goal_vert[0]\n tar_y = self.goal_vert[2]\n tar_w = self.goal_vert[1]-tar_x\n tar_h = self.goal_vert[3]-tar_y\n\n obstacle = Rectangle((obs_x,obs_y),obs_w,obs_h,color='red',alpha=0.5)\n target = Rectangle((tar_x,tar_y),tar_w,tar_h, color='green',alpha=0.5)\n\n ax.add_patch(obstacle)\n ax.add_patch(target)",
"def plot_regime_diagram_background_L19(\n ax=None,\n ):\n if ax is None:\n ax = plt.gca()\n # range of power\n xpr = [-1, 1]\n ypr = [-3, 3]\n # range\n xlims = [10**i for i in xpr]\n ylims = [10**i for i in ypr]\n # background following Fig. 3 of Belcher et al., 2012\n nx = 500\n ny = 500\n xx = np.logspace(xpr[0], xpr[1], nx)\n yy = np.logspace(ypr[0], ypr[1], ny)\n zz1 = np.zeros([nx, ny])\n zz2 = np.zeros([nx, ny])\n zz3 = np.zeros([nx, ny])\n for i in np.arange(nx):\n for j in np.arange(ny):\n zz1[i,j] = 2*(1-np.exp(-0.5*xx[i]))\n zz2[i,j] = 0.22*xx[i]**(-2)\n zz3[i,j] = 0.3*xx[i]**(-2)*yy[j]\n zz = zz1 + zz2 + zz3\n\n rz_ST = zz1/zz\n rz_LT = zz2/zz\n rz_CT = zz3/zz\n fr = np.ones(zz.shape) * 7\n cfrac = 0.25\n fr[(rz_LT<cfrac) & (rz_CT<cfrac)] = 1\n fr[(rz_ST<cfrac) & (rz_CT<cfrac)] = 2\n fr[(rz_ST<cfrac) & (rz_LT<cfrac)] = 3\n fr[(rz_ST>=cfrac) & (rz_LT>=cfrac) & (rz_CT<cfrac)] = 4\n fr[(rz_ST>=cfrac) & (rz_CT>=cfrac) & (rz_LT<cfrac)] = 5\n fr[(rz_LT>=cfrac) & (rz_CT>=cfrac) & (rz_ST<cfrac)] = 6\n color_list = ['firebrick','forestgreen','royalblue','gold','orchid','turquoise','w']\n cb_ticks = [0.5, 1.5, 2.5, 3.5, 4.5, 5.5, 6.5, 7.5]\n cmap, norm = from_levels_and_colors(cb_ticks, color_list)\n ax.contourf(xx, yy, np.transpose(fr), cmap=cmap, norm=norm)\n ax.contour(xx, yy, np.transpose(fr), colors='darkgray')\n ax.set_xlim(xlims)\n ax.set_ylim(ylims)\n ax.set_xscale('log')\n ax.set_yscale('log')\n ax.set_xlabel('La$_t$')\n ax.set_ylabel('$h/L_L$')\n ax.set_aspect(aspect=1/3)\n ax.text(0.11, 4e-3, 'Langmuir', bbox=dict(boxstyle=\"square\",ec='k',fc='w'))\n ax.text(3, 4e-3, 'Shear', bbox=dict(boxstyle=\"square\",ec='k',fc='w'))\n ax.text(0.13, 1e2, 'Convection', bbox=dict(boxstyle=\"square\",ec='k',fc='w'))",
"def _set_border_and_background_transparent(figure):\n figure.setAttribute('COLOR', pyHAttributeColor(0, 0, 0, 0))\n figure.setAttribute('FILL', pyHAttributeFillColor(0, 0, 0, 0))",
"def plottrace_paper(moviedict, figw, figh, figdpi, fontsz, border, xlabel, ylabel, yaxisticks, \n xaxisticks, labels, lw, fs):\n \n for movie, val in moviedict.iteritems():\n os.chdir(movie)\n condition, xlim, color, inum = val\n \n fontv = matplotlib.font_manager.FontProperties(fname='/usr/share/matplotlib/mpl-data/fonts/ttf/arial.ttf')\n fontv.set_size(fontsz)\n \n print(movie)\n td = dil.load_params()\n x, roi_cols = dil.load_results(RESULTS_FILE)\n start = int(td['startshort'])\n end = int(td['endshort'])\n \n \n fig1 = plt.figure(figsize=(figw*xlim/0.6, figh), dpi=figdpi, facecolor='w', edgecolor='k')\n \n xlen = len(x[roi_cols['Mean1']][start:end])\n #print(xlen)\n xvals = np.arange(0, float(xlen)/fs, 1/float(fs))\n #print(xvals)\n \n \n ycib = x[roi_cols['Mean1']][start:end]\n ycib = [v - np.mean(ycib) for v in ycib]\n #print(ycib)\n \n ylab = x[roi_cols['Mean2']][start:end]\n ylab = [v - np.mean(ylab) for v in ylab]\n ylab = [v + 70 for v in ylab]\n \n # Plots the traces\n \n plt.plot(xvals, ylab, label='proboscis tip', linewidth=lw, color='k')\n plt.plot(xvals, ycib, label='cibarium', linewidth=lw, color='b')\n \n \n \n \n \n \n \n if labels == 'yes':\n plt.title(td['condition'], fontproperties=fontv, horizontalalignment='left')\n \n #Plots legend and removes the border around it.\n legend=plt.legend()\n #legend = plt.legend(bbox_to_anchor = (1.5, 1.6))\n legend.draw_frame(False)\n ltext = legend.get_texts() \n plt.setp(ltext, fontproperties=fontv) \n \n ax = plt.gca()\n \n #Uncomment lines below to display without top and right borders.\n \n if border == 'no':\n for loc, spine in ax.spines.iteritems():\n if loc in ['left','bottom']:\n pass\n elif loc in ['right','top']:\n spine.set_color('none') # don't draw spine\n else:\n raise ValueError('unknown spine location: %s'%loc)\n \n \n #Uncomment lines below to display ticks only where there are borders.\n ax.xaxis.set_ticks_position('bottom')\n ax.yaxis.set_ticks_position('left')\n \n # Specifies the number of tickmarks/labels on the yaxis.\n #ax.yaxis.set_major_locator(matplotlib.ticker.MaxNLocator(yaxisticks)) \n ## Removes tick labels and ticks from xaxis.\n ax.axes.yaxis.set_major_locator(matplotlib.ticker.NullLocator())\n \n if labels == 'yes':\n plt.ylabel(ylabel, fontsize=fontsz, labelpad=12)\n fig1.figsize = (6, 3)\n \n # Adjusts the space between the plot and the edges of the figure; (0,0) is the lower \n #lefthand corner of the figure.\n fig1.subplots_adjust(bottom=0.3)\n fig1.subplots_adjust(left=0.05)\n fig1.subplots_adjust(right=0.95)\n fig1.subplots_adjust(top=0.95)\n \n #ax.xaxis.set_major_locator(matplotlib.ticker.MaxNLocator(XAXISTICKS)) \n \n #Specifies axis labels and axis tick label sizes.\n plt.xlabel(xlabel, fontproperties=fontv)\n plt.ylabel(ylabel, fontproperties=fontv)\n plt.xticks([0, 0.2, 0.4, 0.6], fontproperties=fontv)\n plt.xlim( (0, xlim+0.05) )\n #plt.yticks(fontproperties=fontv)\n \n \n \n # Saves the figures in plots/plots.\n if labels == 'no':\n plotfolder = os.path.join(os.path.dirname(os.path.dirname(os.path.abspath('.'))),\n 'plots')\n makenewdir(plotfolder)\n figname = os.path.join(plotfolder, movie + '_trace_nolab')\n plt.savefig(figname+'.svg', dpi=FIGDPI, format='svg')\n plt.savefig(figname+'.png', dpi=FIGDPI, format='png')\n os.chdir('../')\n\n if labels == 'yes':\n plotfolder = os.path.join(os.path.dirname(os.path.dirname(os.path.abspath('.'))),\n 'plots')\n makenewdir(plotfolder)\n figname = os.path.join(plotfolder, movie + '_trace')\n plt.savefig(figname+'.svg', dpi=FIGDPI, format='svg')\n plt.savefig(figname+'.png', dpi=FIGDPI, format='png')\n os.chdir('../')",
"def backgroundAndSkills(self):\n backgrounds = {}\n def createBackgrounds(fileName):\n \"\"\" (str) -> None\n Opens a file with background information and populates the backgrounds dictionary with\n that information\n \"\"\"\n backgroundFile = open(fileName,'r')\n current_bg = ''\n for line in backgroundFile:\n #If there is no text, go to next line\n if line == \"\\n\":\n pass\n #Else if the line starts with \"~~\", create new key in top level\n #dictionary with the remainder of that line and set its value\n #to an empty dictionary\n elif line[:2] == \"~~\":\n current_bg = line[2:-1]\n backgrounds[line[2:-1]] = {}\n #Go through the next few lines and set them to keys and values\n #in the nestled dictionary\n elif \":\" in line:\n line_heading = line[:line.index(\":\")]\n after_heading = line[line.index(\":\")+2:-1]\n #create a key/value pair for the background regarding its profession\n if line_heading == \"hasProfession\":\n \n #Change the string to a bool\n if after_heading == \"True\":\n backgrounds[current_bg][line_heading] = True\n else:\n backgrounds[current_bg][line_heading] = False\n #Create professions list if current BG has professions\n if line_heading == \"professions\" and backgrounds[current_bg]['hasProfession']:\n backgrounds[current_bg]['professions'] = after_heading.split(', ')\n #Create a two item list to store the trait name and its description\n if line_heading == \"trait\":\n backgrounds[current_bg]['trait'] = [line[line.index(\":\")+2: line.index(\"-\")-1],\\\n line[line.index(\"-\")+2:-1]]\n #Create an entry for the story of a character's background\n if line_heading == \"story\":\n backgrounds[current_bg]['story'] = after_heading\n #Create a list for the recommended skills\n if line_heading == \"recommended\":\n backgrounds[current_bg]['recommended'] = after_heading.split(', ')\n backgroundFile.close()\n\n #Creat background now\n createBackgrounds('Backgrounds.txt')\n\n \n \n \n #Make a list of backgrounds\n background_list = []\n for i in backgrounds:\n background_list.append(i)\n background_list.sort()\n #Ask user to choose a background and set that to self.background\n background_choice = raw_input('Enter a background from this list: '+str(background_list)+': ').title()\n print\n while background_choice not in background_list:\n background_choice = raw_input('Enter a background from this list: '+str(background_list)+': ').title()\n print\n self.background = background_choice\n self.backgroundStory = backgrounds[self.background]['story']\n #Add the background's trait to self.traits\n self.traits[backgrounds[self.background]['trait'][0]] = backgrounds[self.background]['trait'][1]\n #If the background has a profession, add that now\n if backgrounds[self.background]['hasProfession'] == True:\n #Create a temp list to account for Commoner's multiple profession listing\n profession_holder = []\n for i in backgrounds[self.background]['professions']:\n if i in profession_holder:\n pass\n else:\n profession_holder.append(i)\n #Ask user which profession they'd like or if they press Enter one is chosen randomly\n temp_choice = raw_input(\"Which profession would you like? \"+str(profession_holder)+\"\\n\"\\\n \"Enter one from the list above or press Enter for random. \").title()\n print\n while temp_choice != '' and temp_choice not in backgrounds[self.background]['professions']:\n temp_choice = raw_input(\"Which profession would you like? \"+str(profession_holder)+\"\\n\"\\\n \"Enter one from the list above or press Enter for random. \").title()\n print\n if temp_choice == '':\n temp_int = r.randint(0,len(backgrounds[self.background]['professions'])-1)\n self.backgroundProfession = backgrounds[self.background]['professions'][temp_int]\n else:\n self.backgroundProfession = temp_choice\n else:\n pass\n\n #Ask about skills.\n skill_choice = []\n print \"You'll now choose 4 skills from this list:\"\n print\n for i in skills:\n print i.title()\n print\n print \"Recommended skills for your \"+self.background+\" are: \"+str(backgrounds[self.background]['recommended'])\n for i in range(4):\n skill_choice.append(raw_input(\"Which Skill would you like for skill \"+str(i+1)+\"? \").title())\n print\n self.skills = skill_choice",
"def plot(self):\n h = .02\n i=1\n bags_X = self.bags_X\n bags_y = self.bags_y\n fig1 = plt.figure(figsize=(45, 9))\n\n \n cm = plt.cm.RdBu\n cm_bright = ListedColormap(['#FF0000', '#0000FF'])\n \n for model in self.models:\n ax = plt.subplot(1, len(self.models) , i)\n X = pd.DataFrame(bags_X[i-1])\n y = pd.Series(bags_y[i-1])\n x_min, x_max = X[X.columns[0]].min() - .5, X[X.columns[0]].max() + .5\n y_min, y_max = X[X.columns[1]].min() - .5, X[X.columns[1]].max() + .5\n xx, yy = np.meshgrid(np.arange(x_min, x_max, h), np.arange(y_min, y_max, h))\n Z = np.array(model.predict(pd.DataFrame(np.c_[xx.ravel(), yy.ravel()], columns=X.columns)))\n # print(Z[12])\n Z = Z.reshape(xx.shape)\n ax.contourf(xx, yy, Z, cmap=cm, alpha=.8)\n ax.scatter(X[X.columns[0]], X[X.columns[1]], c=y, cmap=cm_bright, edgecolors='k')\n # size=[1000*w for w in self.weights[i-1]]\n ax.set_xlim(xx.min(), xx.max())\n ax.set_ylim(yy.min(), yy.max())\n ax.set_xlabel(str(X.columns[0]))\n ax.set_ylabel(str(X.columns[1]))\n plt.title(\"Estimator \"+str(i))\n i+=1\n \n fig2 = plt.figure(figsize=(9,9))\n X = self.X\n y = self.y\n ax2 = plt.subplot(1,1,1)\n x_min, x_max = X[X.columns[0]].min() - .5, X[X.columns[0]].max() + .5\n y_min, y_max = X[X.columns[1]].min() - .5, X[X.columns[1]].max() + .5\n xx, yy = np.meshgrid(np.arange(x_min, x_max, h), np.arange(y_min, y_max, h))\n Z = np.array(self.predict(pd.DataFrame(np.c_[xx.ravel(), yy.ravel()], columns=X.columns)))\n Z = Z.reshape(xx.shape)\n ax2.contourf(xx, yy, Z, cmap=cm, alpha=.8)\n # size=[1000*w for w in self.weights[i-2]]\n ax2.scatter(X[X.columns[0]], X[X.columns[1]], c=y, cmap=cm_bright, edgecolors='k')\n ax2.set_xlim(xx.min(), xx.max())\n ax2.set_ylim(yy.min(), yy.max())\n plt.title(\"Combined Decision Surface\")\n \n plt.tight_layout()\n plt.show()\n\n return [fig1,fig2]",
"def add_boundary(self, fig, w, legend, min_x=-1, max_x=1, **kwargs):\n color = kwargs.pop('color', None)\n bold = kwargs.pop('bold', False)\n fade = kwargs.pop('fade', False)\n line_width = kwargs.pop('line_width', 1)\n x, y = self.decision_boundary(w, min_x=min_x, max_x=max_x)\n # color = self._get_color(color, bold, fade)\n self.add_curve(fig, x=x, y=y, legend=legend,\n line_width=line_width, color=color,\n bold=bold, fade=fade)\n return fig",
"def draw_roc(signal, background, output_dir=\".\", output_name=\"roc.pdf\"):\n\n x, y = get_roc(signal, background)\n auc = metrics.auc(x, y, reorder=True)\n\n fig = plt.figure(1, figsize=(6, 6), dpi=300)\n fig.clear()\n\n # Create an axes instance\n ax = fig.add_subplot(111)\n\n ax.plot(x, y, '-', color='#B64926', lw=2, label=\"AUC: %.4f\" % auc)\n ax.plot([0, 1], [0, 1], ':', color='black', lw=2, label=\"Random cut\")\n ax.margins(0.05)\n\n ax.set_xlabel(\"Background efficiency\")\n ax.set_ylabel(\"Signal efficiency\")\n \n fig.set_tight_layout(True)\n\n ax.legend(loc='lower right', numpoints=1, frameon=False)\n\n print(\"AUC: %.4f\" % auc)\n\n fig.savefig(os.path.join(output_dir, output_name))\n\n plt.close()\n\n def get_index(y, value):\n \"\"\"\n Find the last index of the element in y\n satistying y[index] <= value\n \"\"\"\n\n for i in range(len(y)):\n if y[i] <= value:\n continue\n\n return i\n\n print(\"Background efficiency for signal efficiency of 0.70: %f\" % x[get_index(y, 0.70)])\n print(\"Background efficiency for signal efficiency of 0.80: %f\" % x[get_index(y, 0.80)])\n print(\"Background efficiency for signal efficiency of 0.90: %f\" % x[get_index(y, 0.90)])",
"def plot_reward(reward, walls, ax_title, fig, ax, alpha=1):\n\n # Clean up the arrays (imshow only takes values in [0, 1])\n pos_label, neg_label = visualizeReward(reward)\n\n # set up plot\n def make_pic(pos_label, walls, neg_label):\n \"\"\"Combine colors to make the walls + rewards achieve desired color\"\"\"\n alphas = np.ones(pos_label.shape)\n alphas[pos_label > 0] = alpha\n alphas[neg_label > 0] = alpha\n\n # Coloring the walls brown\n # BROWN = np.array((133, 87, 35, 0)) / 255.0\n # wall_color = np.einsum(\"ij,k->ijk\", walls, BROWN)\n\n # to get our true reward (blue) values on the right scale, we'll create our own color scale\n # Another possibility: 123, 176, 32\n small_positive = np.array((150, 189, 3, 0)) / 255.0\n # Another possibility: 26,147,111\n big_positive = np.array((85, 135, 80, 0)) / 255.0\n diff = big_positive - small_positive\n blue = np.stack(\n [\n np.zeros(neg_label.shape),\n np.zeros(neg_label.shape),\n pos_label.copy(),\n np.zeros(neg_label.shape),\n ],\n axis=-1,\n )\n blue[pos_label > 0, :] = (\n np.einsum(\"i,j->ij\", pos_label[pos_label > 0], diff) + small_positive\n )\n\n # Negative reward\n # Another possibility: 223, 161, 177\n small_negative = np.array((227, 126, 126, 0)) / 255.0\n # Another possibility: 195, 75, 123\n big_negative = np.array((180, 27, 27, 0)) / 255.0\n diff = big_negative - small_negative\n neg_color = np.stack(\n [\n neg_label.copy(),\n np.zeros_like(neg_label),\n np.zeros_like(neg_label),\n np.zeros_like(neg_label),\n ],\n axis=-1,\n )\n neg_color[neg_label > 0, :] = (\n np.einsum(\"i,j->ij\", neg_label[neg_label > 0], diff) + small_negative\n )\n\n label = np.stack(\n [\n np.zeros_like(neg_label),\n np.zeros(pos_label.shape),\n np.zeros(pos_label.shape),\n alphas,\n ],\n axis=-1,\n )\n # label = label + blue + wall_color\n label = label + blue + neg_color\n\n # Set all the black (0,0,0,1) RGBA tuples to be white\n label[np.sum(label, 2) == 1] = np.array([0.9, 0.9, 0.9, 1])\n return label.reshape(list(walls.shape) + [4])\n\n # truth plot\n true = ax.imshow(make_pic(pos_label, walls, neg_label))\n hatch_walls(walls, ax)\n\n ax.set_title(ax_title)\n\n # Remove xticks, yticks\n ax.set_yticks([])\n ax.set_xticks([])\n\n return fig, ax",
"def addBarrelBlue(self, event):\n # let user draw second ROI\n ROI = RoiPoly(color='b') #let user draw ROI\n plt.show(block=False)\n mask = ROI.get_mask(self.greyimg)\n self.ROI += mask",
"def build_player_atrribute_corelation(player_attributes):\n player_attributes_wo_na = player_attributes.dropna()\n player_attributes_corr = player_attributes_wo_na.corr()\n fig, ax = plt.subplots(nrows=1, ncols=1)\n fig.set_size_inches(w=24, h=24)\n sns.heatmap(player_attributes_corr, annot=True, linewidths=0.5, ax=ax, cmap=\"Blues\")\n plt.show()",
"def _pre_draw_bge(self):\r\n self._pre_draw_common()\r\n # draw rays\r\n self._drawRays()",
"def plot_model_weights(dataframe, **kwargs):\n\n fig = pyplot.figure()\n ax = fig.add_subplot(1, 1, 1)\n\n # Plot all the datapoints\n ax = dataframe.plot(kind='scatter', x='ref_affinity', y='dg_calc', c='weights', color='Blue', ax=ax)\n ax.set_aspect('equal')\n\n # Force X and Y axis to have the same data range\n axis_min = 10 * round((min([dataframe['ref_affinity'].min(), dataframe['dg_calc'].min()]) - 5) / 10)\n axis_max = 10 * round((max([dataframe['ref_affinity'].max(), dataframe['dg_calc'].max()]) + 5) / 10)\n ax.set_xlim(axis_min, axis_max)\n ax.set_ylim(axis_min, axis_max)\n\n # Add diagonal\n xlim = ax.get_xlim()\n ylim = ax.get_ylim()\n\n ax.plot(xlim, ylim, 'k-', linewidth=0.5)\n ax.plot((xlim[0], xlim[1] - 5), (ylim[0] + 5, ylim[1]), 'k--')\n ax.plot((xlim[0] + 5, xlim[1]), (ylim[0], ylim[1] - 5), 'k--')\n\n # Plot the training set if any\n trainset = dataframe.trainset\n if not trainset.empty:\n ax = trainset.plot(kind='scatter', x='ref_affinity', y='dg_calc', c='weights', marker='s', ax=ax)\n\n # Plot the regression line\n ref = dataframe['ref_affinity'].values\n fitx = polyfit(ref, dataframe['dg_calc'].values, 1)\n fit_fnx = poly1d(fitx)\n ax.plot(ref, fit_fnx(ref), 'r-', label=\"fit\", linewidth=0.5)\n\n # Plot datalabels if needed\n if kwargs.get('plot_labels', False):\n cutoff = kwargs.get('cutoff', 0.85)\n for i, point in dataframe.iterrows():\n if point['weights'] < cutoff:\n ax.text(point['ref_affinity'], point['dg_calc'], \"{0:.0f}\".format(point['case']), fontsize=8)\n\n ax.set_xlabel(r'$\\Delta$$G_{Ref}$ (kJ/mol)', fontsize=10)\n ax.set_ylabel(r'$\\Delta$$G_{Calc}$ (kJ/mol)', fontsize=10)\n ax.legend(loc=\"best\", frameon=False)\n\n return fig",
"def setDisplayProperties(self, x, y, textcolor, bgcolor):\n poolt = [\"ZombieBufPool\", \"BufPool\", \"ZombiePool\", \"Pool\"]\n if self.gobj.mobj.parent.className in poolt:\n self.setGeometry(0, 30,\n self.gobj.boundingRect().width(),\n self.gobj.boundingRect().height())\n else:\n self.setGeometry(x, y,\n self.gobj.boundingRect().width(),\n self.gobj.boundingRect().height())\n self.bg.setBrush(QtGui.QBrush(bgcolor))\n self.setFlag(QGraphicsItem.ItemIsMovable, False)",
"def artAttrPaintVertexCtx(*args, accopacity: bool=False, activeListChangedProc: Union[AnyStr,\n bool]=\"\", afterStrokeCmd: Union[AnyStr, bool]=\"\", alphaclamp:\n Union[AnyStr, bool]=\"none\", alphaclamplower: Union[float, bool]=0.0,\n alphaclampupper: Union[float, bool]=1.0, attrSelected: Union[AnyStr,\n bool]=\"\", beforeStrokeCmd: Union[AnyStr, bool]=\"\", brushalignment:\n bool=True, brushfeedback: bool=True, clamp: Union[AnyStr,\n bool]=\"none\", clamplower: Union[float, bool]=0.0, clampupper:\n Union[float, bool]=1.0, clear: bool=True, colorAlphaValue:\n Union[float, bool]=0.0, colorRGBAValue: Union[List[float, float,\n float, float], bool]=None, colorRGBValue: Union[List[float, float,\n float], bool]=None, colorRamp: Union[AnyStr, bool]=\"\", colorfeedback:\n bool=False, colorfeedbackOverride: bool=False, colorrangelower:\n Union[float, bool]=0.0, colorrangeupper: Union[float, bool]=1.0,\n dataTypeIndex: Union[int, bool]=0, disablelighting: bool=False,\n dragSlider: AnyStr=\"\", duringStrokeCmd: Union[AnyStr, bool]=\"\",\n dynclonemode: bool=True, exists: bool=True, expandfilename: bool=True,\n exportaspectratio: Union[float, bool]=0.0, exportfilemode:\n Union[AnyStr, bool]=\"luminance/rgb\", exportfilesave: AnyStr=\"\",\n exportfilesizex: Union[int, bool]=0, exportfilesizey: Union[int,\n bool]=0, exportfiletype: Union[AnyStr, bool]=\"\", filterNodes:\n bool=True, history: bool=True, image1: Union[AnyStr, bool]=\"\",\n image2: Union[AnyStr, bool]=\"\", image3: Union[AnyStr, bool]=\"\",\n importfileload: AnyStr=\"\", importfilemode: Union[AnyStr,\n bool]=\"alpha\", importreassign: bool=False, interactiveUpdate:\n bool=True, lastRecorderCmd: Union[AnyStr, bool]=\"\", lastStampName:\n Union[AnyStr, bool]=\"\", lowerradius: Union[float, bool]=0.0,\n makeStroke: Union[int, List[int], bool]=0, mappressure: Union[AnyStr,\n bool]=\"none\", maxvalue: Union[float, bool]=1.0, minvalue: Union[float,\n bool]=0.0, name: AnyStr=\"\", objattrArray: Union[AnyStr, bool]=\"\",\n opacity: Union[float, bool]=1.0, outline: bool=True, outwhilepaint:\n bool=False, paintComponent: Union[int, bool]=0, paintNodeArray:\n Union[AnyStr, bool]=\"\", paintNumChannels: Union[int, bool]=0,\n paintRGBA: bool=False, paintVertexFace: bool=False,\n paintattrselected: AnyStr=\"\", paintmode: Union[AnyStr, bool]=\"screen\",\n paintoperationtype: Union[AnyStr, bool]=\"Paint\", pickColor: bool=True,\n pickValue: bool=True, playbackCursor: Union[List[float, float],\n List[List[float, float]], bool]=None, playbackPressure: Union[float,\n List[float], bool]=0.0, preserveclonesource: bool=True,\n profileShapeFile: Union[AnyStr, bool]=\"\", projective: bool=False,\n radius: Union[float, bool]=1.0, rampMaxColor: Union[List[float, float,\n float], bool]=None, rampMinColor: Union[List[float, float, float],\n bool]=None, record: bool=True, reflection: bool=False,\n reflectionaboutorigin: bool=True, reflectionaxis: Union[AnyStr,\n bool]=\"x\", screenRadius: Union[float, bool]=0.0, selectclonesource:\n bool=True, selectedattroper: Union[AnyStr, bool]=\"absolute\",\n showactive: bool=True, stampDepth: Union[float, bool]=0.0,\n stampProfile: Union[AnyStr, bool]=\"\", stampSpacing: Union[float,\n bool]=1.0, strokesmooth: Union[AnyStr, bool]=\"\",\n surfaceConformedBrushVertices: bool=True, tablet: bool=True,\n tangentOutline: bool=True, toolOffProc: Union[AnyStr, bool]=\"\",\n toolOnProc: Union[AnyStr, bool]=\"\", useColorRamp: bool=True,\n useMaxMinColor: bool=True, usepressure: bool=False, value:\n Union[float, bool]=0.0, vertexColorRange: bool=False,\n vertexColorRangeLower: Union[float, bool]=0.0, vertexColorRangeUpper:\n Union[float, bool]=1.0, whichTool: Union[AnyStr, bool]=\"\",\n worldRadius: Union[float, bool]=0.0, q=True, query=True, e=True,\n edit=True, **kwargs)->Union[None, Any]:\n pass",
"def show_ground_feature():\n ground_description_int = GROUND_FEATURES_LIST[ZERO_BASE_PLYR_POS]\n mvaddstr(16, 3, GROUND_DESCRIPTIONS.get(ground_description_int), color_pair(GROUND_FEATURES_COLOUR) | A_BOLD)"
] | [
"0.6314721",
"0.5160302",
"0.5150252",
"0.5146807",
"0.511476",
"0.5082323",
"0.5072824",
"0.506235",
"0.50508076",
"0.4978316",
"0.49699628",
"0.49588132",
"0.49191087",
"0.49132687",
"0.4868202",
"0.48487267",
"0.48426506",
"0.4834363",
"0.48201233",
"0.48166025",
"0.4805289",
"0.47878814",
"0.47867528",
"0.47766677",
"0.47549203",
"0.4744793",
"0.4730238",
"0.4722846",
"0.46770748",
"0.46656463"
] | 0.679852 | 0 |
Plots forecast histogram inset in attributes diagram. For more on the attributes diagram, see Hsu and Murphy (1986). B = number of forecast bins | def _plot_inset_histogram_for_attributes_diagram(
figure_object, num_examples_by_bin,
bar_face_colour=DEFAULT_HISTOGRAM_FACE_COLOUR,
bar_edge_colour=DEFAULT_HISTOGRAM_EDGE_COLOUR,
bar_edge_width=DEFAULT_HISTOGRAM_EDGE_WIDTH):
error_checking.assert_is_integer_numpy_array(num_examples_by_bin)
error_checking.assert_is_numpy_array(num_examples_by_bin, num_dimensions=1)
error_checking.assert_is_geq_numpy_array(num_examples_by_bin, 0)
num_forecast_bins = len(num_examples_by_bin)
error_checking.assert_is_geq(num_forecast_bins, 2)
example_frequency_by_bin = (
num_examples_by_bin.astype(float) / numpy.sum(num_examples_by_bin)
)
forecast_bin_edges = numpy.linspace(0., 1., num=num_forecast_bins + 1)
forecast_bin_width = forecast_bin_edges[1] - forecast_bin_edges[0]
forecast_bin_centers = forecast_bin_edges[:-1] + forecast_bin_width / 2
inset_axes_object = figure_object.add_axes([
INSET_HISTOGRAM_LEFT_EDGE, INSET_HISTOGRAM_BOTTOM_EDGE,
INSET_HISTOGRAM_WIDTH, INSET_HISTOGRAM_HEIGHT
])
inset_axes_object.bar(
forecast_bin_centers, example_frequency_by_bin, forecast_bin_width,
color=plotting_utils.colour_from_numpy_to_tuple(bar_face_colour),
edgecolor=plotting_utils.colour_from_numpy_to_tuple(bar_edge_colour),
linewidth=bar_edge_width
)
max_y_tick_value = rounder.floor_to_nearest(
1.05 * numpy.max(example_frequency_by_bin),
INSET_HISTOGRAM_Y_TICK_SPACING
)
num_y_ticks = 1 + int(numpy.round(
max_y_tick_value / INSET_HISTOGRAM_Y_TICK_SPACING
))
y_tick_values = numpy.linspace(0., max_y_tick_value, num=num_y_ticks)
pyplot.xticks(INSET_HISTOGRAM_X_TICKS, axes=inset_axes_object)
pyplot.yticks(y_tick_values, axes=inset_axes_object)
inset_axes_object.set_xlim(0., 1.)
inset_axes_object.set_ylim(0., 1.05 * numpy.max(example_frequency_by_bin)) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def plot_attributes_diagram(\n figure_object, axes_object, mean_forecast_by_bin,\n event_frequency_by_bin, num_examples_by_bin,\n reliability_line_colour=DEFAULT_RELIABILITY_COLOUR,\n reliability_line_width=DEFAULT_RELIABILITY_WIDTH,\n perfect_relia_line_colour=DEFAULT_PERFECT_RELIABILITY_COLOUR,\n perfect_relia_line_width=DEFAULT_PERFECT_RELIABILITY_WIDTH,\n no_skill_line_colour=DEFAULT_ZERO_BSS_COLOUR,\n no_skill_line_width=DEFAULT_ZERO_BSS_WIDTH,\n other_line_colour=DEFAULT_CLIMATOLOGY_COLOUR,\n other_line_width=DEFAULT_CLIMATOLOGY_WIDTH,\n histogram_bar_face_colour=DEFAULT_HISTOGRAM_FACE_COLOUR,\n histogram_bar_edge_colour=DEFAULT_HISTOGRAM_EDGE_COLOUR,\n histogram_bar_edge_width=DEFAULT_HISTOGRAM_EDGE_WIDTH):\n\n error_checking.assert_is_numpy_array(\n event_frequency_by_bin, num_dimensions=1)\n error_checking.assert_is_geq_numpy_array(\n event_frequency_by_bin, 0., allow_nan=True)\n error_checking.assert_is_leq_numpy_array(\n event_frequency_by_bin, 1., allow_nan=True)\n num_bins = len(event_frequency_by_bin)\n\n error_checking.assert_is_integer_numpy_array(num_examples_by_bin)\n error_checking.assert_is_numpy_array(\n num_examples_by_bin, exact_dimensions=numpy.array([num_bins]))\n error_checking.assert_is_geq_numpy_array(num_examples_by_bin, 0)\n\n non_empty_bin_indices = numpy.where(num_examples_by_bin > 0)[0]\n error_checking.assert_is_numpy_array_without_nan(\n event_frequency_by_bin[non_empty_bin_indices])\n\n climatology = numpy.average(\n event_frequency_by_bin[non_empty_bin_indices],\n weights=num_examples_by_bin[non_empty_bin_indices]\n )\n\n _plot_background_of_attributes_diagram(\n axes_object=axes_object, climatology=climatology,\n no_skill_line_colour=no_skill_line_colour,\n no_skill_line_width=no_skill_line_width,\n other_line_colour=other_line_colour, other_line_width=other_line_width)\n\n _plot_inset_histogram_for_attributes_diagram(\n figure_object=figure_object, num_examples_by_bin=num_examples_by_bin,\n bar_face_colour=histogram_bar_face_colour,\n bar_edge_colour=histogram_bar_edge_colour,\n bar_edge_width=histogram_bar_edge_width)\n\n plot_reliability_curve(\n axes_object=axes_object,\n mean_forecast_by_bin=mean_forecast_by_bin,\n event_frequency_by_bin=event_frequency_by_bin,\n line_colour=reliability_line_colour, line_width=reliability_line_width,\n perfect_line_colour=perfect_relia_line_colour,\n perfect_line_width=perfect_relia_line_width)",
"def entries_histogram(turnstile_weather):\n\n plt.figure()\n turnstile_weather[turnstile_weather.rain == 0][\n 'ENTRIESn_hourly'].hist() # your code here to plot a historgram for hourly entries when it is raining\n turnstile_weather[turnstile_weather.rain == 1][\n 'ENTRIESn_hourly'].hist() # your code here to plot a historgram for hourly entries when it is not raining\n return plt",
"def featuresHist_colors(self, **kwargs):\n # Selecting bins automatically:\n bins_onpower = np.arange(self.onpower_train.min().values[0],\n self.onpower_train.max().values[0],\n (self.onpower_train.max().values[0] -\n self.onpower_train.min().values[0]) / 50)\n\n bins_offpower = np.arange(self.offpower_train.min().values[0],\n self.offpower_train.max().values[0],\n (self.offpower_train.max().values[0] -\n self.offpower_train.min().values[0]) / 50)\n\n bins_duration = np.arange(self.duration_train.min().values[0],\n self.duration_train.max().values[0],\n (self.duration_train.max().values[0] -\n self.duration_train.min().values[0]) / 50)\n\n # If a bin has been specified update the bin sizes.\n # Updating bins with specified values.\n for key in kwargs:\n if key == 'bins_onpower':\n bins_onpower = kwargs[key]\n elif key == 'bins_offpower':\n bins_offpower = kwargs[key]\n elif key == 'bins_duration':\n bins_duration = kwargs[key]\n else:\n print(\"Non valid kwarg\")\n\n # Plot:\n fig1 = plt.figure()\n ax1 = fig1.add_subplot(311)\n ax2 = fig1.add_subplot(312)\n ax3 = fig1.add_subplot(313)\n\n start = 0\n end = 0\n for ind in np.arange(len(self.stats)):\n\n if self.stats[ind]['Nevents'] != 0:\n if ind == 0:\n start = 0\n else:\n start = end\n end += self.stats[ind]['Nevents']\n ax1.hist(\n self.onpower_train[start:end].onpower.values, bins=bins_onpower, alpha=0.5)\n ax2.hist(\n self.offpower_train[start:end].offpower.values, bins=bins_offpower, alpha=0.5)\n ax3.hist(\n self.duration_train[start:end].duration.values, bins=bins_duration, alpha=0.5)\n\n ax1.set_title(\"Feature: Onpower\")\n ax1.set_xlabel(\"Watts\")\n ax1.set_ylabel(\"Counts\")\n\n ax2.set_title(\"Feature: Offpower\")\n ax2.set_xlabel(\"Watts\")\n ax2.set_ylabel(\"Counts\")\n\n ax3.set_title(\"Feature: Duration\")\n ax3.set_xlabel(\"Seconds\")\n ax3.set_ylabel(\"Counts\")",
"def distribution_magnitude_histogram(cur, var, table, label):\n x = select(cur,var, table)\n print(\"Number of entries: \", len(x))\n print(\"Maximum: \", max(x))\n print(\"Minimum: \", min(x))\n \n fig = plt.figure()\n ax = fig.add_subplot(1,1,1)\n ax.set_xlabel(\"Sentiment Magnitude\")\n ax.set_ylabel(\"Number of Sentences\")\n fig.suptitle(label)\n ax.hist(x, bins = 20)\n plt.show()",
"def makeHistogram(values, numBins, xLabel, yLabel, title=None):",
"def plot_hist(df, num_bins=8):\n df.hist(figsize=(24, 20), bins=num_bins)\n plt.axes",
"def hist(self):\r\n plt.hist(self.data_array, bins='auto', density=False, facecolor='b')\r\n plt.title(self.column_name)\r\n plt.savefig(self.column_name + \".svg\")\r\n plt.close()",
"def drawHist(data, xLabel, unit, binSize, title):\n mean = np.mean(data)\n median = np.median(data)\n mode = stats.mode(data)[0].astype(float)\n \n q1, q3 = np.percentile(data, [25, 75])\n iqr = q3 - q1\n sigma = np.std(data)\n \n \n bins = np.arange(min(data), max(data) + 1, binSize)\n plt.style.use('dark_background')\n fig, ax = plt.subplots(figsize=(12,7))\n plt.hist(data, bins=bins, histtype='bar') \n plt.title(title)\n plt.xlabel(xLabel + \" \" + unit)\n plt.ylabel('count')\n ymax = ax.get_ylim()[1]\n ax.vlines(mean, 0, ymax, color='red', label='mean')\n ax.vlines(mean-sigma, 0, ymax, color='red', linestyle='--', \n label='mean +/- std')\n ax.vlines(mean+sigma, 0, ymax, color='red', linestyle='--')\n plt.legend()\n plt.show()\n \n print(\"Einheit: \", unit)\n print(\"Minimum: \", round(data.min(),3))\n print(\"Maximum: \", round(data.max(),3))\n print(\"Mittelwert: \", round(mean,3))\n print(\"Median: \", round(median,3))\n print(\"Modus: \", round(mode[0],3))\n print(\"Standardabweichung: \", round(sigma, 3))\n print(\"1. Quartil: \", round(q1,3))\n print(\"3. Quartil: \", round(q3,3))\n print(\"Quartilsdifferenz: \", round(iqr,3))",
"def plot_features(inputs, nbins=50):\n\tfor i in range(0, inputs.shape[1]):\n\t\tx = inputs[:, i]\n\t\tn, bins, patches = hist(x, bins=nbins, range=None, rwidth=0.8, normed=False, weights=None, cumulative=False, bottom=None)\n\t\tprint('BINS: %i', bins)\n\t\tprint('N: %i', n)\n\t\tplt.show()",
"def hist(self, bins):\n x = self.x\n plt.hist(x, bins)\n plt.xlabel('Observed Data')\n plt.ylabel('Frequency')\n plt.show()",
"def plot_h_bstat_times_hist(self, h_bstat, times, index, er = None, domain = None,\n yrange = None, label = None,pos=None,theory=True):\n data_class = self.data_classes[index]\n if h_bstat not in data_class._hbstats:\n print(h_bstat + \"not in\" + str(data_class.index))\n return\n\n multiply_se_by = 1.96\n\n if pos is None:\n pos = 'both'\n\n data = data_class.read_hbstats(h_bstat)\n\n times = sorted(times)\n if times[0] < 0:\n maxtime = max(data[h_bstat][pos][0].keys())\n times[0] = maxtime\n\n if h_bstat not in self._theory_hbstats_at_time:\n theory = False\n\n\n if er is None:\n er = True\n\n undertext_params = [['N', 'U'], ['shift_s0', 'sigma_0_del'], ['E2Ns', 'V2Ns']]\n\n data_class = self.data_classes[index]\n\n bin_type = data_class.bin_type[h_bstat]\n if bin_type == 'efs':\n rawname = h_bstat[:-9]\n loglinear = True\n if domain is None:\n domain = [0.1,100]\n elif domain[0] < 0.1:\n domain[0] = 0.1\n xlabel = \"Effect size squared (\" +r\"$S=a^2$\"+')'\n else:\n loglinear = False\n rawname = h_bstat[:-6]\n xlabel = \"Initial MAF\"\n if domain is None:\n domain = [0, 0.5]\n ylabel = self.name_class.yname(rawname)\n\n binedged = data_class.hist_bins[h_bstat]\n\n\n savedir = os.path.join(self.base_dir, 'hbstats_at_times')\n\n if label is None:\n label = h_bstat + '_' + str(index)\n else:\n label += '_' +str(index)\n\n\n if pos == 'both':\n label = 'dpn_' + label\n savedir = os.path.join(savedir, 'combined')\n elif pos == 'pos':\n label ='p_' + label\n savedir = os.path.join(savedir,'positives')\n else:\n label = 'n_' + label\n savedir = os.path.join(savedir, 'negatives')\n\n time_string = \"generation_\"+str(times[0])\n for ti in times[1:]:\n time_string+='_and_'+str(ti)\n savedir = os.path.join(savedir, time_string)\n\n\n plot_dict = dict()\n plot_dict['xlabel'] = xlabel\n plot_dict['savedir'] = savedir\n plot_dict['yrange'] = yrange\n plot_dict['domain'] = domain\n plot_dict['ylabel'] = ylabel\n\n\n plotspecs = dict()\n plotspecs['undertext_font'] = {'color': 'black', 'weight': 'roman', 'size': '10'}\n plotspecs['legend_anchor'] = 'upper left'\n plotspecs['legend_loc'] = (1.02, 1.03)\n plotspecs['fsize'] = (28, 11)\n plotspecs['dpi'] = 200\n plotspecs['linewidth'] = 3.5\n plotspecs['ticksize'] = 30\n plotspecs['legend_font'] = {'size': '30'}\n plotspecs['axis_font'] = {'fontname': 'Arial', 'size': '28'}\n if loglinear:\n plotspecs['xlog'] = True\n\n\n # data = data_class_list[0].read_hbstats(h_bstat)\n # the_cols = sorted(data[h_bstat][pos].keys())\n\n cols = sorted(data[h_bstat][pos].keys())\n times_all = sorted(data[h_bstat][pos][0].keys())\n\n times_real = []\n y_hist = []\n ynames_hist = []\n\n if er:\n yer_hist = []\n\n for ti in times:\n time_time = find_nearest(times_all,ti)\n times_real.append(time_time)\n label+= '_time_'+str(time_time)\n ynames_hist.append('Time: ' + str(time_time))\n #data = data_class.read_bstats(h_bstat)\n ys = [data[h_bstat][pos][co][time_time]['mean'] for co in cols]\n # print(ys)\n y_hist.append(ys)\n if er:\n yser = [multiply_se_by*data[h_bstat][pos][co][time_time]['se'] for co in cols]\n yer_hist.append(yser)\n\n binedges = [binedged for _ in times_real]\n\n plot_dict['binedges'] = binedges\n plot_dict['y_hist'] = y_hist\n if er:\n plot_dict['yer_hist'] = yer_hist\n\n if len(times)>1:\n plot_dict['ynames_hist'] = ynames_hist\n\n #some_styles = ['o','^','s','D','x','+','*','v','h']\n # time_styles = dict()\n # for i in xrange(len(times_real)):\n # time_styles[times[i]] = some_styles[i]\n\n\n plot_dict['label'] = label\n\n # List with [text_top, text_bottom] containing relevant parameters\n\n undertext = []\n undertext = []\n if bin_type == 'efs' and rawname[-5:]==\"input\":\n threshold_2Ns = data_class.scaled_s_above_which_only_one_mutation_per_generation()\n warning_string = \"NOTE: on average only 1 new mut per generation has \" + r\"$a^2>$\"+\\\n str(threshold_2Ns) +\". 'Per mut input' results could be inaccurate in bins\"\n number_runs_string = \"with few new muts unless enough simulations were done. \" + str(\n int(data_class.number_population_simulations())) + \\\n \" population sims were performed with parameters:\"\n undertext.append(warning_string)\n undertext.append(number_runs_string)\n else:\n number_runs_string = \"Obtained from \" + str(int(data_class.number_population_simulations())) + \\\n \" population sims with parameters:\"\n undertext.append(number_runs_string)\n for listi in undertext_params:\n text_list = self._plot_text(index_list=[index],params_list=listi)\n if text_list:\n text_string = ', '.join(text_list)\n undertext.append(text_string )\n\n plot_dict['undertext'] = undertext#data_class.plot_text()\n plotspecs['undertext_font'] = {'color': 'black', 'weight': 'roman', 'size': 16}\n plot_dict['plotspecs'] = plotspecs\n\n #add a theory stat\n\n if theory:\n numxs = 100\n sfirst = domain[0]\n slast = domain[1]\n xi = np.linspace(sfirst, slast, numxs)\n xishort = np.array([ss for ss in xi])\n xilong = np.array([ss for ss in xi])\n time_for_short = min(times)\n myfunctionlong, longfname = self.theory_hbstat_function_and_name_long(data_class, h_bstat)\n yilong = np.array([myfunctionlong(ss) for ss in xilong])\n if h_bstat == 'frozen_d2ax_over_shift_fbins':\n x = [xilong]\n y = [yilong]\n ynames = [longfname]\n else:\n myfunctionshort, shortfname = self.theory_hbstat_function_and_name_short(data_class,h_bstat, time_for_short)\n yishort = np.array([myfunctionshort(ss) for ss in xishort])\n x = [xishort, xilong]\n y = [yishort, yilong]\n ynames = [shortfname, longfname]\n plot_dict['x'] = x\n plot_dict['y'] = y\n plot_dict['ynames'] = ynames\n\n plot_many_y_hist_and_many_y(**plot_dict)",
"def visualize(self):\n self.dataFrame.hist()\n plt.show()",
"def create_histogram(self, i):\n # styling\n sns.set(style=\"whitegrid\")\n font = {'weight': 'normal'}\n plt.rc('font', **font)\n plt.rc('axes', labelsize=25) # fontsize of the x and y labels\n plt.rc('xtick', labelsize=25) # fontsize of the tick labels\n plt.rc('ytick', labelsize=25)\n fig, ax = plt.subplots(1, 1, figsize=(5, 5), dpi=100)\n try:\n if self.dtype_is_object() or self.num_of_values() <= 15:\n if self.num_of_values() > 15:\n data = pd.to_numeric(self.data, errors='coerce')\n plot = sns.distplot(data.dropna())\n else:\n plot = sns.countplot(self.remove_nan_values())\n else:\n plot = sns.distplot(self.remove_nan_values())\n plot.set(xlabel='', ylabel='')\n except Exception:\n plt.text(0.5, 0.5, f'Unable to plot', ha='center', va='center', transform=ax.transAxes, fontsize=16)\n if not os.path.isdir('hist_images'):\n os.mkdir('hist_images')\n plt.savefig(f'hist_images/histogram{i}.png', bbox_inches='tight')\n plt.close()\n plt.clf()",
"def hist(self, color=\"#FFFFFF\", axes_style=\"darkgrid\", context=\"notebook\",\n col_wrap=4, exhibit_path=None, **kwargs):\n import matplotlib.pyplot as plt\n import seaborn as sns\n sns.set_context(context)\n\n # data0 = self.sims_data[[\"sim\", \"origin\", \"dev\", \"rectype\", \"latest\", \"reserve\",]]\n # data0 = data0[(data0[\"dev\"]==data0[\"dev\"].max()) & (data0[\"rectype\"]==\"forecast\")].reset_index(drop=True)\n # data0 = data0.drop([\"dev\", \"rectype\", \"latest\"], axis=1)\n #\n # # Include additional origin representing aggregate distribution.\n # data1 = data0.groupby(\"sim\", as_index=False)[[\"reserve\"]].sum()\n # data1[\"origin\"] =\"total\"\n # data = pd.concat([data0, data1])\n data = self.reserve_dist\n\n # Get mean, min and max ultimate and reserve by origin.\n med_data = data.groupby(\"origin\", as_index=False)[[\"reserve\"]].median().rename(\n {\"reserve\": \"med_res\"}, axis=1).set_index(\"origin\")\n min_data = data.groupby(\"origin\", as_index=False)[[\"reserve\"]].min().rename(\n {\"reserve\": \"min_res\"}, axis=1).set_index(\"origin\")\n max_data = data.groupby(\"origin\", as_index=False)[[\"reserve\"]].max().rename(\n {\"reserve\": \"max_res\"}, axis=1).set_index(\"origin\")\n dfmetrics = functools.reduce(lambda df1, df2: df1.join(df2), (med_data, min_data, max_data))\n dfmetrics = dfmetrics.applymap(lambda v: 0 if v < 0 else v).reset_index(drop=False)\n\n with sns.axes_style(axes_style):\n\n pltkwargs = {\"color\": color, \"bins\": 20, \"edgecolor\": \"#484848\",\n \"alpha\": 1., \"linewidth\": .45}\n\n if kwargs is not None:\n pltkwargs.update(kwargs)\n\n grid = sns.FacetGrid(\n data, col=\"origin\", col_wrap=col_wrap, margin_titles=False,\n despine=True, sharex=False, sharey=False,\n )\n\n hists = grid.map(plt.hist, \"reserve\", **pltkwargs)\n grid.set_axis_labels(\"\", \"\")\n grid.set_titles(\"\", size=6)\n\n # Change ticklabel font size and place legend on each facet.\n origin_vals = sorted([int(ii) for ii in data[\"origin\"].unique() if ii != \"total\"])\n dindex = {jj: ii for ii, jj in enumerate(origin_vals)}\n dindex.update({\"total\": max(dindex.values()) + 1})\n data[\"origin_index\"] = data[\"origin\"].map(dindex)\n origin_order = data[[\"origin_index\", \"origin\"]].drop_duplicates().sort_values(\n \"origin_index\"\n ).origin.values\n\n with warnings.catch_warnings():\n\n warnings.simplefilter(\"ignore\")\n\n for origin, ax_ii in zip(origin_order, grid.axes):\n\n # xmin = np.max([0, dfmetrics[dfmetrics.origin == origin][\"min_res\"].item()])\n xmax = dfmetrics[dfmetrics.origin == origin][\"max_res\"].item() * 1.025\n xmed = dfmetrics[dfmetrics.origin == origin][\"med_res\"].item()\n origin_str = \"{}\".format(origin)\n ax_ii.set_xlim([0, xmax])\n ax_ii.axvline(xmed, color=\"#E02C70\", linestyle=\"--\", linewidth=1.5)\n ax_ii.grid(False)\n\n ymedloc = max(rect.get_height() for rect in ax_ii.patches) * .30\n ax_ii.set_yticks([])\n ax_ii.set_yticklabels([])\n ax_ii.tick_params(\n axis=\"x\", which=\"both\", bottom=True, top=False, labelbottom=True\n )\n ax_ii.set_xticklabels(\n [\"{:,.0f}\".format(jj) for jj in ax_ii.get_xticks()], size=7\n )\n ax_ii.annotate(\n origin_str, xy=(.85, .925), xycoords='axes fraction',\n textcoords='axes fraction', fontsize=9, rotation=0, color=\"#000000\",\n )\n ax_ii.annotate(\n \"median = {:,.0f}\".format(xmed), (xmed, ymedloc), xytext=(7.5, 0),\n textcoords=\"offset points\", ha=\"center\", va=\"bottom\", fontsize=7,\n rotation=90, color=\"#000000\"\n )\n\n # Draw border around each facet.\n for _, spine in ax_ii.spines.items():\n spine.set(visible=True, color=\"#000000\", linewidth=.50)\n\n if exhibit_path is not None:\n plt.savefig(exhibit_path)\n else:\n plt.show()",
"def plot_hist(self):\n labels = [self.get_class_str(action, obj)\n for (action, obj, subj, rec, beg, end) in self.action_clips]\n visualize.plot_hist(labels, proportion=True)",
"def featuresHist(self, **kwargs):\n\n # Selecting bins automatically:\n bins_onpower = np.arange(self.onpower_train.min().values[0],\n self.onpower_train.max().values[0],\n (self.onpower_train.max().values[0] -\n self.onpower_train.min().values[0]) / 50)\n\n bins_offpower = np.arange(self.offpower_train.min().values[0],\n self.offpower_train.max().values[0],\n (self.offpower_train.max().values[0] -\n self.offpower_train.min().values[0]) / 50)\n\n bins_duration = np.arange(self.duration_train.min().values[0],\n self.duration_train.max().values[0],\n (self.duration_train.max().values[0] -\n self.duration_train.min().values[0]) / 50)\n\n # If a bin has been specified update the bin sizes.\n for key in kwargs:\n if key == 'bins_onpower':\n bins_onpower = kwargs[key]\n elif key == 'bins_offpower':\n bins_offpower = kwargs[key]\n elif key == 'bins_duration':\n bins_duration = kwargs[key]\n else:\n print(\"Non valid kwarg\")\n\n # Plot structure:\n fig = plt.figure()\n ax1 = fig.add_subplot(311)\n ax2 = fig.add_subplot(312)\n ax3 = fig.add_subplot(313)\n\n # Evaluating score for:\n # Onpower\n x = np.arange(bins_onpower.min(), bins_onpower.max() + \\\n np.diff(bins_onpower)[0], np.diff(bins_onpower)[0] / float(1000)).reshape(-1, 1)\n y = self.__pdf(self.onpower, x)\n norm = pd.cut(\n self.onpower_train.onpower, bins=bins_onpower).value_counts().max() / max(y)\n # Plots for Onpower\n ax1.hist(\n self.onpower_train.onpower.values, bins=bins_onpower, alpha=0.5)\n ax1.plot(x, y * norm)\n #ax1.set_title(\"Feature: Onpower\")\n #ax1.set_ylabel(\"Counts\")\n #ax1.set_xlabel(\"On power (W)\")\n ax1.set_ylabel(\"On power counts\")\n\n # Offpower\n x = np.arange(bins_offpower.min(), bins_offpower.max() + \\\n np.diff(bins_offpower)[0], np.diff(bins_offpower)[0] / float(1000)).reshape(-1, 1)\n y = self.__pdf(self.offpower, x)\n norm = pd.cut(self.offpower_train.offpower,\n bins=bins_offpower).value_counts().max() / max(y)\n # Plots for Offpower\n ax2.hist(self.offpower_train.offpower.values,\n bins=bins_offpower, alpha=0.5)\n ax2.plot(x, y * norm)\n #ax2.set_title(\"Feature: Offpower\")\n #ax2.set_ylabel(\"Counts\")\n #ax2.set_xlabel(\"Off power (W)\")\n ax2.set_ylabel(\"Off power counts\")\n\n # Duration\n x = np.arange(bins_duration.min(), bins_duration.max() + \\\n np.diff(bins_duration)[0], np.diff(bins_duration)[0] / float(1000)).reshape(-1, 1)\n y = self.__pdf(self.duration, x)\n norm = pd.cut(self.duration_train.duration,\n bins=bins_duration).value_counts().max() / max(y)\n # Plots for duration\n ax3.hist(self.duration_train.duration.values,\n bins=bins_duration, alpha=0.5)\n ax3.plot(x, y * norm)\n #ax3.set_title(\"Feature: Duration\")\n #ax3.set_ylabel(\"Counts\")\n #ax3.set_xlabel(\"Duration (seconds)\")\n ax3.set_ylabel(\"Duration counts\")",
"def plot_modelparametercollections_addhist(paramcol_SF,paramcol_AGN,stat_SF,stat_AGN,\n SFcol,AGNcol,LW,bindefs=None,Nbins=10):\n if (paramcol_SF is None) & (paramcol_AGN is not None):\n if len(paramcol_AGN) > 0:\n if bindefs is None:\n xmin = np.min(paramcol_AGN)-np.abs(np.min(paramcol_AGN))*0.05\n xmax = np.max(paramcol_AGN)+np.abs(np.min(paramcol_AGN))*0.05\n binwidth_x = np.diff([xmin,xmax])/Nbins\n bindefs = np.arange(xmin, xmax+binwidth_x, binwidth_x)\n\n plt.hist(paramcol_AGN, bins=bindefs,histtype='step',color=AGNcol)\n\n yminsys, ymaxsys = plt.ylim()\n\n plt.plot([stat_AGN[0]]*2, [yminsys, ymaxsys], '--',lw=LW,alpha=0.5,color=AGNcol) # mean\n plt.plot([stat_AGN[2]]*2, [yminsys, ymaxsys], ':', lw=LW,alpha=0.5,color=AGNcol) # median\n plt.plot([stat_AGN[4]]*2, [yminsys, ymaxsys], ':', lw=LW,alpha=0.5,color=AGNcol) # 68% confidence interval lower\n plt.plot([stat_AGN[8]]*2, [yminsys, ymaxsys], ':', lw=LW,alpha=0.5,color=AGNcol) # 68% confidence interval lower\n\n elif (paramcol_SF is not None) & (paramcol_AGN is None):\n if len(paramcol_SF) > 0:\n\n if bindefs is None:\n xmin = np.min(paramcol_SF)-np.abs(np.min(paramcol_SF))*0.05\n xmax = np.max(paramcol_SF)+np.abs(np.min(paramcol_SF))*0.05\n binwidth_x = np.diff([xmin,xmax])/Nbins\n bindefs = np.arange(xmin, xmax+binwidth_x, binwidth_x)\n\n plt.hist(paramcol_SF, bins=bindefs,histtype='step',color=SFcol)\n\n yminsys, ymaxsys = plt.ylim()\n\n plt.plot([stat_SF[0]]*2, [yminsys, ymaxsys], '--',lw=LW,alpha=0.5,color=SFcol) # mean\n plt.plot([stat_SF[2]]*2, [yminsys, ymaxsys], ':', lw=LW,alpha=0.5,color=SFcol) # median\n plt.plot([stat_SF[4]]*2, [yminsys, ymaxsys], ':', lw=LW,alpha=0.5,color=SFcol) # 68% confidence interval lower\n plt.plot([stat_SF[8]]*2, [yminsys, ymaxsys], ':', lw=LW,alpha=0.5,color=SFcol) # 68% confidence interval lower\n\n else:\n\n if (len(paramcol_SF) == 0) & (len(paramcol_AGN) == 0):\n pass\n else:\n if bindefs is None:\n xmin = np.min(np.append(paramcol_SF,paramcol_AGN))*0.95\n xmax = np.max(np.append(paramcol_SF,paramcol_AGN))*1.05\n binwidth_x = np.diff([xmin,xmax])/Nbins\n bindefs = np.arange(xmin, xmax+binwidth_x, binwidth_x)\n # if xlog:\n # bindefs = np.logspace(np.log10(bindefs[0]),np.log10(bindefs[-1]),len(bindefs))\n # axHistax.set_xscale('log')\n\n plt.hist(paramcol_SF, bins=bindefs,histtype='step',color=SFcol)\n plt.hist(paramcol_AGN, bins=bindefs,histtype='step',color=AGNcol)\n\n yminsys, ymaxsys = plt.ylim()\n\n if len(paramcol_SF) > 0:\n plt.plot([stat_SF[0]]*2, [yminsys, ymaxsys], '--',lw=LW,alpha=0.5,color=SFcol) # mean\n plt.plot([stat_SF[2]]*2, [yminsys, ymaxsys], ':', lw=LW,alpha=0.5,color=SFcol) # median\n plt.plot([stat_SF[4]]*2, [yminsys, ymaxsys], ':', lw=LW,alpha=0.5,color=SFcol) # 68% confidence interval lower\n plt.plot([stat_SF[8]]*2, [yminsys, ymaxsys], ':', lw=LW,alpha=0.5,color=SFcol) # 68% confidence interval lower\n\n if len(paramcol_AGN) > 0:\n plt.plot([stat_AGN[0]]*2, [yminsys, ymaxsys], '--',lw=LW,alpha=0.5,color=AGNcol) # mean\n plt.plot([stat_AGN[2]]*2, [yminsys, ymaxsys], ':', lw=LW,alpha=0.5,color=AGNcol) # median\n plt.plot([stat_AGN[4]]*2, [yminsys, ymaxsys], ':', lw=LW,alpha=0.5,color=AGNcol) # 68% confidence interval lower\n plt.plot([stat_AGN[8]]*2, [yminsys, ymaxsys], ':', lw=LW,alpha=0.5,color=AGNcol) # 68% confidence interval lower",
"def display(self, bin_size):\n xs = np.linspace(self.sample_min, self.sample_max, 2000)\n ys = np.zeros_like(xs)\n for (l, s), w in zip(self.gauss_params, self.dist_weights):\n ys += ss.norm.pdf(xs, loc=l, scale=s) * w\n plt.plot(xs, ys, color=\"blue\")\n plt.hist(self.samples, density=True, bins=bin_size, color=\"palegreen\")\n plt.xlabel(\"duration\")\n plt.ylabel(\"density\")\n _, _, ymin, ymax = plt.axis()\n if self.lower_bound > 0:\n plt.vlines([self.lower_bound], ymin, ymax, color=\"crimson\")\n if self.upper_bound < float(\"inf\"):\n plt.vlines([self.upper_bound], ymin, ymax, color=\"crimson\")\n plt.show()",
"def update_histo_frame():\n min_histo.text = str(MIN_RANGE_F) # Display the legend\n max_histo.text = str(MAX_RANGE_F)\n\n histogram = np.zeros(GRID_AXIS) # Clear histogram accumulation array\n # Collect camera data and calculate the histogram\n for _row in range(0, GRID_AXIS):\n for _col in range(0, GRID_AXIS):\n histo_index = int(map_range(GRID_DATA[_col, _row], 0, 1, 0, GRID_AXIS - 1))\n histogram[histo_index] = histogram[histo_index] + 1\n\n histo_scale = np.max(histogram) / (GRID_AXIS - 1)\n if histo_scale <= 0:\n histo_scale = 1\n\n # Display the histogram\n for _col in range(0, GRID_AXIS):\n for _row in range(0, GRID_AXIS):\n if histogram[_col] / histo_scale > GRID_AXIS - 1 - _row:\n image_group[((_row * GRID_AXIS) + _col)].fill = index_to_rgb(\n round((_col / GRID_AXIS), 3)\n )\n else:\n image_group[((_row * GRID_AXIS) + _col)].fill = BLACK",
"def setup_hist(self):\n self.x_min = {}\n self.x_max = {}\n self.x_max_minus_min = {}\n self.dx = {}\n self.n_bins = {}\n\n self.histogram_edges = {}\n self.histogram_values = {}\n self.histogram_cdf = {}",
"def PlotHist(self, label=None):\n ys, xs, patches = plt.hist(self.test_stats)\n plt.vlines(self.actual, 0, max(ys), linewidth=3, color='black')\n plt.xlabel('test statistic')\n plt.ylabel('count')\n plt.show()",
"def plot_hist(self):\n \n plt.figure();\n self.dist_frame.plot(kind='hist',legend=False,orientation='horizontal')",
"def getHistogram( self, img):\n bins = 256\n range_scale = [0,254]\n nivel_transparencia = 0.5\n plt.hist(img.ravel(),bins,range_scale, label=\"histogram\", alpha=nivel_transparencia);\n plt.legend(loc='upper right')\n plt.show()",
"def plotHist(self):\n X = []\n Y = []\n for item in self.hist.items():\n X.append(int(item[0]))\n Y.append(int(item[1]))\n plt.bar(X,Y, align='center')\n plt.xticks([1,2,3,4,5,6,7])\n plt.ylim(0,len(self.responses))\n plt.title(self.text)\n plt.xlabel('Number of Responses')\n plt.ylabel('Value of Response')\n for x, y in zip(X, Y):\n plt.text(x, y, str(y), ha='center', va='bottom')\n plt.show()",
"def display_hist(self, parameter):\n values = self.dataframe[parameter]\n #Begining and ending date of the dataset\n beg = self.beg\n end = self.end\n #Settings of the hist chart with different bins according to parameter\n maxi = values.max()\n mini = values.min()\n #Weather parameter is either a value between 0-1 or a physical parameter >1\n if maxi > 1: #ie if we are with a parameter with values not in [0,1]\n #bins = the number of integer between max and min value of the parameter\n bins = int(maxi - mini)\n plt.hist(values, bins=bins, range=(int(mini), int(maxi)))\n else: #Else, for parameters with value between [0,1]\n bins = 2*int(10*(maxi - mini))\n plt.hist(values, bins=bins, range=(mini, maxi))\n plt.ylabel(parameter)\n plt.title('Weather historical data from {} to {}'.format(beg, end))",
"def add_histogram(self, tag, values, global_step=None, bins='tensorflow'):\n values = make_np(values)\n self.vis.histogram(make_np(values), opts={'title': tag})",
"def plot_feature_histograms(self):\n\n num_features = len(self.feature_names)\n fig, ax = plt.subplots(nrows=1, ncols=num_features, figsize=(50, 2), tight_layout=True)\n fig.suptitle('Histograms of 19 features', fontsize=14)\n\n for i in range(num_features):\n ax[i].hist(self.train_data[self.train_data.columns[i]], bins=50)\n ax[i].set_xlabel(self.train_data.columns[i])\n\n plt.savefig(r'data_analysis\\histograms_' + self.file_name + '.png', \n facecolor=fig.get_facecolor(), bbox_inches='tight')",
"def _plot_attr_token_counts(x_token_counts):\n # Generate Histogram, https://matplotlib.org/users/screenshots.html\n num_bins = 30\n fig, ax = plt.subplots()\n n, bins, patches = ax.hist(x_token_counts, num_bins, normed=0, log=True)\n # ax.plot(bins, '--')\n ax.set_xlabel('Token Count')\n ax.set_ylabel('Bin Size')\n ax.set_title(r'Attribute Type Token Count')\n\n # Tweak spacing to prevent clipping of ylabel\n fig.tight_layout()\n plt.show()",
"def plot_histograms(top, bot, edges, resolution, *, ax=None):\n if ax is None:\n ax = plt.gca()\n\n ax.hlines(y=0,\n xmin=0,\n xmax=1,\n linestyle='dashed',\n color='black',\n alpha=0.2)\n ax.bar(edges, top, width=resolution)\n ax.bar(edges, -bot, width=resolution)\n # Set some sensible defaults - these can be overridden after the fact,\n # since we return the axes object\n ax.set_xlim((-0.05, 1.05))\n ax.set_xlabel('Predicted Probability')\n height = max(abs(x) for x in ax.get_ylim())\n ax.set_ylim((-height, height))\n ax.set_ylabel('Count')\n return ax",
"def histogram(values, title, fig_size=(4,3), path=None):\n plt.clf()\n f, ax = plt.subplots(1, figsize=fig_size)\n ax.hist(values, bins=60)\n ax.set_title(title)\n f.tight_layout()\n if(path != None):\n f.savefig(path+'/hist_'+title+'.png')"
] | [
"0.6258223",
"0.59847486",
"0.5973236",
"0.59051794",
"0.58949333",
"0.58539087",
"0.5833261",
"0.58074725",
"0.57988834",
"0.57910645",
"0.56742895",
"0.566742",
"0.5615893",
"0.5603276",
"0.5587624",
"0.5563986",
"0.5562396",
"0.55190045",
"0.55185115",
"0.54872495",
"0.5481084",
"0.54581434",
"0.54563147",
"0.54506224",
"0.54426026",
"0.54405487",
"0.5439367",
"0.5437094",
"0.5435372",
"0.540676"
] | 0.66039234 | 0 |
Bootstrapped version of plot_roc_curve. T = number of probability thresholds in curve | def plot_bootstrapped_roc_curve(
axes_object, ci_bottom_dict, ci_mean_dict, ci_top_dict,
line_colour=DEFAULT_ROC_COLOUR,
line_width=DEFAULT_ROC_WIDTH,
random_line_colour=DEFAULT_RANDOM_ROC_COLOUR,
random_line_width=DEFAULT_RANDOM_ROC_WIDTH):
plot_roc_curve(
axes_object=axes_object,
pod_by_threshold=ci_mean_dict[model_eval.POD_BY_THRESHOLD_KEY],
pofd_by_threshold=ci_mean_dict[model_eval.POFD_BY_THRESHOLD_KEY],
line_colour=line_colour, line_width=line_width,
random_line_colour=random_line_colour,
random_line_width=random_line_width)
polygon_object = _confidence_interval_to_polygon(
x_coords_bottom=ci_bottom_dict[model_eval.POFD_BY_THRESHOLD_KEY],
y_coords_bottom=ci_bottom_dict[model_eval.POD_BY_THRESHOLD_KEY],
x_coords_top=ci_top_dict[model_eval.POFD_BY_THRESHOLD_KEY],
y_coords_top=ci_top_dict[model_eval.POD_BY_THRESHOLD_KEY]
)
polygon_colour = matplotlib.colors.to_rgba(
plotting_utils.colour_from_numpy_to_tuple(line_colour),
TRANSPARENCY_FOR_CONFIDENCE_INTERVAL
)
polygon_patch = PolygonPatch(
polygon_object, lw=0, ec=polygon_colour, fc=polygon_colour)
axes_object.add_patch(polygon_patch) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def plot_roc_curve(y_true, y_pred_proba, threshold=0.5):\n\n y_pred = predict_with_threshold(y_pred_proba, threshold)\n roc_auc = roc_auc_score(y_true, y_pred)\n fpr, tpr, thresholds = roc_curve(y_true, y_pred_proba)\n\n plt.plot( # roc auc line\n fpr, tpr,\n label='AUC={:.3f}'.format(roc_auc),\n linewidth=3, alpha=0.7)\n plt.plot( # base line\n [0, 1], [0, 1], 'r--',\n label='baseline=0.5',\n linewidth=3, alpha=0.3)\n\n plt.xlim(0, 1)\n plt.ylim(0, 1)\n plt.title('ROC curve')\n plt.xlabel('False Positive Rate')\n plt.ylabel('True Positive Rate')\n plt.legend(loc=\"lower right\")",
"def plot_roc_curve(tprs, aucs, tag=''):\n fig, ax = plt.subplots()\n ax.plot([0, 1], [0, 1], linestyle='--', lw=2, color='r',\n label='Chance', alpha=.8)\n\n mean_tpr = np.mean(tprs, axis=0)\n mean_tpr[-1] = 1.0\n mean_fpr = np.linspace(0, 1, 100)\n\n mean_auc = auc(mean_fpr, mean_tpr)\n std_auc = np.std(aucs)\n ax.plot(mean_fpr, mean_tpr, color='b',\n label=r'Mean ROC (AUC = %0.2f $\\pm$ %0.2f)' % (mean_auc, std_auc),\n lw=2, alpha=.8)\n\n std_tpr = np.std(tprs, axis=0)\n tprs_upper = np.minimum(mean_tpr + std_tpr, 1)\n tprs_lower = np.maximum(mean_tpr - std_tpr, 0)\n ax.fill_between(mean_fpr, tprs_lower, tprs_upper, color='grey', alpha=.2,\n label=r'$\\pm$ 1 std. dev.')\n\n ax.set(xlim=[-0.05, 1.05], ylim=[-0.05, 1.05],\n title=\"Receiver operating characteristic example\")\n ax.legend(loc=\"lower right\")\n plt.tight_layout()\n plt.savefig(f'roc_{tag}.png')\n plt.show()",
"def plot_roc_curve(y_true, y_pred_prob, show_threshold=False, **params):\n\n figure = plt.figure(figsize=params.get('figsize', (17, 10)))\n fpr, tpr, thresholds = roc_curve(y_true, y_pred_prob)\n roc_auc = auc(fpr, tpr)\n plt.plot(fpr, tpr, label='ROC curve (area = %0.5f)' % roc_auc)\n plt.plot([0, 1], [0, 1], 'k--')\n plt.xlim([-0.05, 1.05])\n plt.ylim([-0.05, 1.05])\n plt.xticks(np.arange(0.0, 1.1, step=0.1))\n plt.yticks(np.arange(0.0, 1.1, step=0.1))\n plt.xlabel('False Positive Rate')\n plt.ylabel('True Positive Rate')\n plt.title('ROC curve')\n plt.legend(loc=\"lower right\")\n if show_threshold:\n ax2 = plt.gca().twinx()\n ax2.plot(fpr, thresholds, markeredgecolor='r',\n linestyle='dashed', color='r')\n ax2.set_ylabel('Threshold', color='r')\n ax2.set_ylim([0.0, 1.0])\n ax2.set_xlim([0.0, 1.0])\n\n plt.show()\n\n return figure, roc_auc",
"def plot_roc(self, ax, prob, y, label='ROC'):\n self.df = self.calculate_threshold_values(prob, y)\n ax.plot([1] + list(self.df.fpr), [1] + list(self.df.tpr), label=label)\n x = [1] + list(self.df.fpr)\n y1 = [1] + list(self.df.tpr)\n y2 = x\n ax.fill_between(x, y1, y2, alpha=0.2)\n ax.set_xlabel('fpr')\n ax.set_ylabel('tpr')\n ax.set_title('ROC Curve')\n ax.legend()",
"def plot_ROC():\r\n fpr = dict()\r\n tpr = dict()\r\n roc_auc = dict()\r\n threshold = dict()\r\n \r\n for i in range(n_classes):\r\n \r\n fpr[i], tpr[i], threshold[i] = roc_curve(y_test[:, i], y_pred[:, i])\r\n \r\n roc_auc[i] = auc(fpr[i], tpr[i])\r\n\r\n fig = plt.figure()\r\n ax = fig.add_subplot(1, 1, 1)\r\n ax.set_aspect('equal')\r\n colors = cycle(['aqua', 'red', 'purple', 'royalblue', 'black'])\r\n \r\n for i, color in zip(range(n_classes), colors):\r\n \r\n plt.plot(\r\n fpr[i],\r\n tpr[i],\r\n color=color,\r\n linewidth=3,\r\n #label='Class {0} (AUC {1:0.3f})'\r\n label='AUC {1:0.2f}' \r\n ''.format(i+1, roc_auc[i])\r\n )\r\n\r\n #plt.plot([0, 1], [0, 1], 'k--', linewidth=3)\r\n plt.xlim([-0.03, 1])\r\n plt.ylim([0, 1.03])\r\n ax.axhline(y=0, color='k', linewidth=4)\r\n ax.axhline(y=1.03, color='k', linewidth=4)\r\n ax.axvline(x=-0.03, color='k', linewidth=4)\r\n ax.axvline(x=1, color='k', linewidth=4) \r\n plt.xticks([0, 0.2, 0.4, 0.6, 0.8, 1.0], fontsize=16, fontweight='bold')\r\n plt.yticks([0, 0.2, 0.4, 0.6, 0.8, 1.0], fontsize=16, fontweight='bold')\r\n #plt.xlabel('False Positive Rate', fontweight='bold', fontsize=16)\r\n #plt.ylabel('True Positive Rate', fontweight='bold', fontsize=16)\r\n plt.legend(loc='lower right', prop={'size': 14, 'weight': 'bold'}) \r\n plt.grid(True)\r\n\r\n ROC_filename = 'ROC' + '_' + \\\r\n str(DNN_Model) + '_' + \\\r\n strftime(\"%d-%b-%Y-%H-%M-%S\", gmtime()) + '.png'\r\n \r\n plt.savefig(\r\n os.path.join(result_dir, ROC_filename),\r\n format='png',\r\n dpi=600\r\n )\r\n\r\n plt.show()\r\n plt.close()",
"def plot_roc_curve(ht, scores, tp_label='tp', fp_label='fp', colors=None, title='ROC Curve', hover_mode='mouse'):\n if colors is None:\n # Get a palette automatically\n from bokeh.palettes import d3\n palette = d3['Category10'][max(3, len(scores))]\n colors = {score: palette[i] for i, score in enumerate(scores)}\n\n if isinstance(scores, str):\n scores = [scores]\n total_tp, total_fp = ht.aggregate((hl.agg.count_where(ht[tp_label]), hl.agg.count_where(ht[fp_label])))\n\n p = figure(title=title, x_axis_label='FPR', y_axis_label='TPR', tools=\"hover,save,pan,box_zoom,reset,wheel_zoom\")\n p.add_layout(Title(text=f'Based on {total_tp} TPs and {total_fp} FPs'), 'above')\n\n aucs = []\n for score in scores:\n ordered_ht = ht.key_by(_score=-ht[score])\n ordered_ht = ordered_ht.select(\n score_name=score, score=ordered_ht[score],\n tpr=hl.scan.count_where(ordered_ht[tp_label]) / total_tp,\n fpr=hl.scan.count_where(ordered_ht[fp_label]) / total_fp,\n ).key_by().drop('_score')\n last_row = hl.utils.range_table(1).key_by().select(score_name=score, score=hl.float64(float('-inf')), tpr=hl.float64(1.0), fpr=hl.float64(1.0))\n ordered_ht = ordered_ht.union(last_row)\n ordered_ht = ordered_ht.annotate(\n auc_contrib=hl.or_else((ordered_ht.fpr - hl.scan.max(ordered_ht.fpr)) * ordered_ht.tpr, 0.0)\n )\n auc = ordered_ht.aggregate(hl.agg.sum(ordered_ht.auc_contrib))\n aucs.append(auc)\n df = ordered_ht.annotate(score_name=ordered_ht.score_name + f' (AUC = {auc:.4f})').to_pandas()\n p.line(x='fpr', y='tpr', legend_field='score_name', source=ColumnDataSource(df), color=colors[score], line_width=3)\n\n p.legend.location = 'bottom_right'\n p.legend.click_policy = 'hide'\n p.select_one(HoverTool).tooltips = [(x, f\"@{x}\") for x in ('score_name', 'score', 'tpr', 'fpr')]\n p.select_one(HoverTool).mode = hover_mode\n return p, aucs",
"def plot_roc(model, X_test, Y_test, verbose=False):\n\n y_true, y_pred = Y_test, model.predict(X_test)\n if verbose:\n print(\"CLASSIFICATION REPORT\")\n print(classification_report(y_true, y_pred))\n\n y_pred_prob = model.predict_proba(X_test)[:,1]\n\n fpr, tpr, _ = roc_curve(Y_test, y_pred_prob)\n\n if verbose:\n print(\"TESTING PROBABILITIES:\")\n for a,b in zip(Y_test,y_pred_prob):\n print(a,b)\n \n if verbose:\n print(\"ROC RAW DATA:\")\n for a,b in zip(fpr, tpr):\n print(a,b)",
"def plot_roc_curve(y: np.ndarray, \n y_pred_positive: np.ndarray, \n label: str) -> None:\n fpr, tpr, thresholds = metrics.roc_curve(y, y_pred_positive)\n plt.figure(figsize=(8, 8))\n plt.plot(fpr, tpr, \"b:\", linewidth=2, label=label)\n plt.fill_between(fpr, tpr, color='blue', alpha=0.3)\n plt.plot([0, 1], [0, 1], 'k--')\n plt.axis([0, 1, 0, 1])\n plt.xlabel('Odsetek fałszywie pozytywnych (FPR)', fontsize=16)\n plt.ylabel('Odsetek prawdziwie pozytywnych (TPR)', fontsize=16)\n plt.legend(loc=\"lower right\", fontsize=16)\n plt.title('Krzywa ROC, AUC={0:.3f}'.format(metrics.roc_auc_score(y, y_pred_positive)), fontsize=18)\n plt.show()",
"def plot_roc_curve(y, y_pred, threshold_step=0.001):\r\n # define the thresholds that will be used to compute the ROC curve\r\n thresholds = np.arange(threshold_step, 1.0, threshold_step)\r\n\r\n # define the list with the values of (sensitivity and 1 - specificity)\r\n recalls = []\r\n fall_outs = []\r\n\r\n # compute the metrics for every threshold\r\n for threshold in thresholds:\r\n\r\n # get the roc metrics\r\n recall, fall_out = roc_metrics(y_pred, y, threshold=threshold)\r\n\r\n # append to the corresponding lists\r\n recalls.append(recall)\r\n fall_outs.append(fall_out)\r\n\r\n # configure the size of the ROC curve plots\r\n plt.rcParams[\"figure.figsize\"] = [15, 10]\r\n plt.rcParams[\"xtick.labelsize\"] = 14\r\n plt.rcParams[\"ytick.labelsize\"] = 14\r\n\r\n # plot the ROC curve\r\n plt.plot(fall_outs, recalls, color=\"darkcyan\", label=\"RNN Classifier\")\r\n\r\n # plot y = x for comparison\r\n x = np.arange(0, 1.01, 0.1)\r\n plt.plot(x, x, color=\"brown\", linestyle=\"--\", label=r\"$y\\;=\\;x$\")\r\n\r\n # add legend, labels and title\r\n plt.xlabel(\"\\n1 - Specificity\", fontsize=20)\r\n plt.ylabel(\"Sensitivity\\n\", fontsize=20)\r\n plt.title(\"ROC curve\\n\", fontsize=25)\r\n plt.legend()\r\n plt.grid()\r\n plt.show()",
"def draw_roc_curve(fpr, tpr, labels=None, size_inch=(5, 5), dpi=160, show=False, block=False):\n if not isinstance(fpr, np.ndarray) or not isinstance(tpr, np.ndarray):\n raise AssertionError(\"invalid inputs\")\n if fpr.shape != tpr.shape:\n raise AssertionError(\"mismatched input sizes\")\n if fpr.ndim == 1:\n fpr = np.expand_dims(fpr, 0)\n if tpr.ndim == 1:\n tpr = np.expand_dims(tpr, 0)\n if labels is not None:\n if isinstance(labels, str):\n labels = [labels]\n if len(labels) != fpr.shape[0]:\n raise AssertionError(\"should have one label per curve\")\n else:\n labels = [None] * fpr.shape[0]\n fig = plt.figure(num=\"roc\", figsize=size_inch, dpi=dpi, facecolor=\"w\", edgecolor=\"k\")\n fig.clf()\n ax = fig.add_subplot(1, 1, 1)\n import sklearn.metrics\n for idx, label in enumerate(labels):\n auc = sklearn.metrics.auc(fpr[idx, ...], tpr[idx, ...])\n if label is not None:\n ax.plot(fpr[idx, ...], tpr[idx, ...], \"b\", label=(\"%s [auc = %0.3f]\" % (label, auc)))\n else:\n ax.plot(fpr[idx, ...], tpr[idx, ...], \"b\", label=(\"auc = %0.3f\" % auc))\n ax.legend(loc=\"lower right\")\n ax.plot([0, 1], [0, 1], 'r--')\n ax.set_xlim(0, 1)\n ax.set_ylim(0, 1)\n ax.set_ylabel(\"True Positive Rate\")\n ax.set_xlabel(\"False Positive Rate\")\n fig.set_tight_layout(True)\n if show:\n fig.show()\n if block:\n plt.show(block=block)\n return fig\n plt.pause(0.5)\n return fig, ax",
"def plot_final_roc(prediction_matrix, model_names, y_test, PATH = None):\n plt.figure(figsize=(10, 8))\n for i, model in enumerate(model_names): \n predictions = prediction_matrix[:,i]\n fpr, tpr, threshholds = roc_curve(y_test, predictions)\n sns.set_style('darkgrid', {'axes.facecolor': '0.9'})\n lw = 2\n plt.plot(fpr, tpr,\n lw=lw, label=f'{model_names[i]} AUC: {round(auc(fpr, tpr), 3)}')\n plt.plot([0, 1], [0, 1], lw=lw, linestyle='--')\n plt.xlim([0.0, 1.0])\n plt.ylim([0.0, 1.05])\n plt.yticks([i/20.0 for i in range(21)], size = 14)\n plt.xticks([i/20.0 for i in range(21)], rotation = 45, size = 14)\n plt.xlabel('False Positive Rate', size =16)\n plt.ylabel('True Positive Rate', size =16)\n plt.title('ROC Curve', size = 20)\n plt.legend(loc='lower right', prop = {\"size\" : 20})\n if PATH:\n plt.savefig(PATH, bbox_inches='tight', transparent = True)\n plt.show()",
"def plt_roc_curve(preds, labels, save_str='', save=False):\r\n\r\n # Thresholds to use for plt\r\n thresholds = np.linspace(0, 1, 1000)\r\n\r\n # Init arrays for storing FPR and TPR\r\n fprs = np.zeros_like(thresholds)\r\n tprs = np.zeros_like(thresholds)\r\n\r\n for ii in range(np.size(thresholds)):\r\n\r\n # Get TPR here\r\n tprs[ii] = get_sens(preds=preds, labels=labels,\r\n threshold=thresholds[ii])\r\n\r\n # Get FPR here\r\n fprs[ii] = 1 - get_spec(preds=preds, labels=labels,\r\n threshold=thresholds[ii])\r\n\r\n # Make fig\r\n plt.figure(figsize=(12, 6))\r\n plt.rc(\"font\", family=\"Times New Roman\")\r\n plt.tick_params(labelsize=20)\r\n plt.plot(fprs, tprs, 'k-')\r\n plt.plot(np.linspace(0, 1, 1000), np.linspace(0, 1, 1000), 'b--')\r\n plt.xlabel('False Positive Rate', fontsize=24)\r\n plt.ylabel('True Positive Rate', fontsize=24)\r\n plt.tight_layout()\r\n\r\n if save: # If saving\r\n\r\n verify_path(os.path.join(get_proj_path(), 'output/roc-figs/'))\r\n out_path = os.path.join(get_proj_path(), 'output/roc-figs/')\r\n\r\n plt.savefig(os.path.join(out_path, '%s.png' % save_str), dpi=150)\r\n plt.close()\r\n save_pickle(np.array([fprs, tprs]),\r\n os.path.join(out_path, '%s.pickle' % save_str))",
"def auroc_helper(labels, preds):\r\n fpr, tpr, threshold = metrics.roc_curve(labels, preds)\r\n df = pd.DataFrame({'fpr': fpr,\r\n 'tpr': tpr,\r\n 'threshold': threshold})\r\n # uncomment for the best model to save\r\n # df.to_csv('/home/delvinso/nephro/output/all_custom_thresholds.csv', index = False)\r\n epoch_auc = metrics.auc(fpr, tpr)\r\n print('AUC: {}'.format(epoch_auc))\r\n\r\n # https://scikit-learn.org/stable/auto_examples/model_selection/plot_roc.html\r\n f = plt.figure()\r\n lw = 2\r\n plt.plot(fpr, tpr, color='darkred',\r\n lw=lw, label='ROC curve (area = %0.3f)' % epoch_auc)\r\n plt.plot([0, 1], [0, 1], color='navy', lw=lw, linestyle='--')\r\n plt.xlim([0.0, 1.0])\r\n plt.ylim([0.0, 1.05])\r\n plt.xlabel('1 - Specificity (FPR)')\r\n plt.ylabel('Sensitivity (TPR)')\r\n plt.title('ROC - Bladder vs Other')\r\n plt.legend(loc=\"lower right\")\r\n return f",
"def plot_roc_curves(labels, probas, name='', ax=None):\n # Setup axis\n if ax is None:\n fig, ax = plt.subplots(figsize=(20, 10))\n\n plot_roc(labels, probas, name=name, ax=ax)\n\n # Plot chance\n ax.plot([0, 1], [0, 1], linestyle='--', lw=2, color='black', alpha=.8)\n\n # Fill bottom right\n ax.fill_between([0, 1], [0, 1], alpha=0.3, color='black')\n\n # Settings\n ax.set_xlabel('False Positive Rate or (1 - Specifity)', fontsize=15)\n ax.set_ylabel('True Positive Rate or (Sensitivity)', fontsize=15)\n ax.set_title('Receiver Operating Characteristic', weight='bold', fontsize=18)\n ax.set_xlim([0, 1])\n ax.set_ylim([0, 1])\n ax.legend(loc='lower right')\n\n return ax",
"def plot_roc(X,y,test_preds,fname=\"res/roc.png\"):\n\t#Retrieve multiple fpr and tpr values for different thresholds\n\tfpr, tpr, thresholds = roc_curve(y,test_preds)\n\tplt.plot(fpr, tpr)\n\tplt.title(auc(fpr, tpr))\n\tplt.savefig(fname, bbox_inches='tight')\n\tplt.close()",
"def plot_multi_roc_curve(y_trues, y_pred_probs, labels, **params):\n\n figure = plt.figure(figsize=params.get('figsize', (17, 10)))\n roc_aucs = []\n for y_true, y_pred_prob, label in zip(y_trues, y_pred_probs, labels):\n fpr, tpr, thresholds = roc_curve(y_true, y_pred_prob)\n roc_auc = auc(fpr, tpr)\n roc_aucs.append(roc_auc)\n plt.plot(fpr, tpr, label=f'{label} ROC curve (area = %0.5f)' % roc_auc)\n\n plt.plot([0, 1], [0, 1], 'k--')\n plt.xlim([-0.05, 1.05])\n plt.ylim([-0.05, 1.05])\n plt.xticks(np.arange(0.0, 1.1, step=0.1))\n plt.yticks(np.arange(0.0, 1.1, step=0.1))\n plt.xlabel('False Positive Rate')\n plt.ylabel('True Positive Rate')\n plt.title('ROC curve')\n plt.legend(loc=\"lower right\")\n\n plt.show()\n\n return figure, roc_aucs",
"def _bokeh_roc_curve(\n y_true_binary: np.ndarray,\n y_pred_score: np.ndarray,\n title_rows: Sequence[str],\n sample_weights: Optional[np.ndarray],\n) -> Callable[[], Figure]:\n assert y_true_binary.shape == y_pred_score.shape\n assert set(y_true_binary).issubset({0, 1}) or set(y_true_binary).issubset(\n {False, True}\n )\n assert np.ndim(y_true_binary) == 1\n\n fpr, tpr, thresholds = sklmetrics.roc_curve(\n y_true=y_true_binary, y_score=y_pred_score, sample_weight=sample_weights\n )\n\n def figure() -> Figure:\n source = ColumnDataSource(\n data={\n \"FPR\": fpr,\n \"TPR\": tpr,\n \"threshold\": thresholds,\n \"specificity\": 1.0 - fpr,\n }\n )\n\n p = plotting.figure(\n plot_height=400,\n plot_width=350,\n tools=TOOLS,\n toolbar_location=TOOLBAR_LOCATION,\n # toolbar_location=None, # hides entire toolbar\n match_aspect=True,\n )\n\n p.xaxis.axis_label = \"FPR\"\n p.yaxis.axis_label = \"TPR\"\n\n add_title_rows(p, title_rows)\n apply_default_style(p)\n\n curve = p.line(x=\"FPR\", y=\"TPR\", line_width=2, color=DARK_BLUE, source=source)\n p.line(\n x=[0.0, 1.0],\n y=[0.0, 1.0],\n line_alpha=0.75,\n color=\"grey\",\n line_dash=\"dotted\",\n )\n\n p.add_tools(\n HoverTool(\n # make sure there is no tool tip for the diagonal baseline\n renderers=[curve],\n tooltips=[\n (\"TPR\", \"@TPR\"),\n (\"FPR\", \"@FPR\"),\n (\"Sensitivity\", \"@TPR\"),\n (\"Specificity\", \"@specificity\"),\n (\"Threshold\", \"@threshold\"),\n ],\n # display a tooltip whenever the cursor is vertically in line with a glyph\n mode=\"vline\",\n )\n )\n\n return p\n\n return figure",
"def roc(test_set_y_org,test_set_y_pred_prob,classes_unique,plot_curve=False,filename=\"./fig_roc.pdf\",colors=None,positive_class_for_two_classes=None,figwidth=5,figheight=5):\n from sklearn.metrics import roc_curve\n #from sklearn.metrics import auc\n from sklearn.metrics import roc_auc_score\n from scipy import interp\n import matplotlib as mpl\n mpl.use(\"pdf\")\n import matplotlib.pyplot as plt\n \n fpr = dict()\n tpr = dict()\n roc_auc = dict()\n n_classes=len(classes_unique)\n test_set_Y_org,test_set_y_org_unique=membership_vector_to_indicator_matrix(test_set_y_org)\n \n for c in range(n_classes):\n fpr[c], tpr[c], _ = roc_curve(test_set_Y_org[:, c], test_set_y_pred_prob[:, c])\n roc_auc[c] = roc_auc_score(test_set_Y_org[:, c], test_set_y_pred_prob[:, c])\n \n # Compute macro-average ROC curve and AUROC area\n # First aggregate all false positive rates\n all_fpr = np.unique(np.concatenate([fpr[c] for c in range(n_classes)]))\n # Then interpolate all ROC curves at this points\n mean_tpr = np.zeros_like(all_fpr)\n for c in range(n_classes):\n mean_tpr += interp(all_fpr, fpr[c], tpr[c])\n # Finally average it and compute AUC\n mean_tpr /= n_classes\n fpr[\"macro\"] = all_fpr\n tpr[\"macro\"] = mean_tpr\n #roc_auc[\"macro\"] = auc(fpr[\"macro\"], tpr[\"macro\"])\n \n # Compute micro-average PRC curve and PRC areas\n fpr[\"micro\"], tpr[\"micro\"], _ = roc_curve(test_set_Y_org.ravel(), test_set_y_pred_prob.ravel())\n roc_auc[\"macro\"] = roc_auc_score(test_set_Y_org, test_set_y_pred_prob, average=\"macro\") # micro macro, weighted, or samples\n roc_auc[\"micro\"] = roc_auc_score(test_set_Y_org, test_set_y_pred_prob,average=\"micro\") # micro macro, weighted, or samples\n roc_auc[\"weighted\"] = roc_auc_score(test_set_Y_org, test_set_y_pred_prob, average=\"weighted\") # micro macro, weighted, or samples\n roc_auc[\"samples\"] = roc_auc_score(test_set_Y_org, test_set_y_pred_prob, average=\"samples\") # micro macro, weighted, or samples\n\n if plot_curve:\n fig=plt.figure(num=1,figsize=(figwidth,figheight))\n ax=fig.add_subplot(1,1,1)\n ax.plot([0, 1], [0, 1], 'k--')\n if n_classes>2 or positive_class_for_two_classes is None:\n ax.plot(fpr[\"macro\"], tpr[\"macro\"], linewidth=1,color=colors[n_classes],label='macro-avg ROC (area={0:0.4f})'.format(roc_auc[\"macro\"]))\n \n for c in range(n_classes):\n if positive_class_for_two_classes is None or (n_classes==2 and positive_class_for_two_classes==c):\n ax.plot(fpr[c], tpr[c],linewidth=1,color=colors[c],label='ROC of {0} (area={1:0.4f})'.format(classes_unique[c], roc_auc[c]))\n \n # add some text for labels, title and axes ticks\n ax.set_ylim(0.0,1.0)\n ax.set_xlim(0.0,1.0)\n ax.set_ylabel(\"True Positive Rate\",fontsize=12)\n ax.set_xlabel(\"False Positive Rate\",fontsize=12) \n #ax.set_title(\"\",fontsize=15)\n ax.legend(loc=\"lower right\",fontsize=8)\n #plt.subplots_adjust(bottom=0.12) # may this is not working because of the following setting\n fig.savefig(filename,bbox_inches='tight')\n plt.close(fig)\n\n roc_auc_list=[roc_auc[c] for c in range(n_classes)]\n roc_auc_list.extend([roc_auc[\"macro\"],roc_auc[\"micro\"],roc_auc[\"weighted\"],roc_auc[\"samples\"]])\n roc_auc=np.array(roc_auc_list)\n names=[\"AUROC_\" + c for c in classes_unique]\n names.extend([\"macro\",\"micro\",\"weighted\",\"samples\"])\n names=np.array(names)\n return roc_auc,names",
"def plot_ROC_curve(y_true, y_pred, labels, roc_path):\n n_classes = len(labels)\n # Compute ROC curve and ROC area for each class\n fpr = dict()\n tpr = dict()\n roc_auc = dict()\n for y, pred, label in zip(y_true.transpose(), y_pred.transpose(), labels):\n fpr[label], tpr[label], _ = roc_curve(y, pred)\n roc_auc[label] = auc(fpr[label], tpr[label])\n\n # First aggregate all false positive rates\n all_fpr = np.unique(np.concatenate([fpr[label] for label in labels]))\n\n # Then interpolate all ROC curves at this points\n mean_tpr = np.zeros_like(all_fpr)\n for label in labels:\n mean_tpr += interp(all_fpr, fpr[label], tpr[label])\n\n # Finally average it and compute AUC\n mean_tpr /= n_classes\n\n # Compute micro-average ROC curve and ROC area\n fpr[\"micro\"], tpr[\"micro\"], _ = roc_curve(y_true.ravel(), y_pred.ravel())\n roc_auc[\"micro\"] = auc(fpr[\"micro\"], tpr[\"micro\"])\n\n fpr[\"macro\"] = all_fpr\n tpr[\"macro\"] = mean_tpr\n roc_auc[\"macro\"] = auc(fpr[\"macro\"], tpr[\"macro\"])\n\n # Plot all ROC curves\n plt.figure()\n lw = 2\n plt.plot(fpr[\"micro\"], tpr[\"micro\"],\n label='micro-average ROC curve (area = {0:0.3f})'\n ''.format(roc_auc[\"micro\"]),\n color='deeppink', linestyle=':', linewidth=2)\n\n plt.plot(fpr[\"macro\"], tpr[\"macro\"],\n label='macro-average ROC curve (area = {0:0.3f})'\n ''.format(roc_auc[\"macro\"]),\n color='navy', linestyle=':', linewidth=2)\n\n if len(labels) == 4:\n colors = ['green', 'cornflowerblue', 'darkorange', 'darkred']\n else:\n colors = ['green', 'cornflowerblue', 'darkred']\n for label, color in zip(labels, cycle(colors)):\n plt.plot(fpr[label], tpr[label], color=color, lw=lw,\n label='ROC curve of {0} (area = {1:0.3f})'\n ''.format(label, roc_auc[label]))\n\n plt.plot([0, 1], [0, 1], 'k--', lw=lw)\n plt.xlim([0.0, 1.0])\n plt.ylim([0.0, 1.05])\n plt.xlabel('False Positive Rate')\n plt.ylabel('True Positive Rate')\n plt.title('ROC curve')\n plt.legend(loc=\"lower right\")\n matplotlib.rcParams.update({'font.size': 14})\n plt.savefig('%s.png' % roc_path, pad_inches = 0, bbox_inches='tight')",
"def plot_ROC_curve(model, X_train, X_test, y_train, y_test):\n \n # Model Metrics\n print model\n print \"*************************** Model Metrics *********************************\"\n print 'Accuracy: %s' % cross_val_score(model, X_train, y_train, scoring = 'accuracy', cv = 5).mean()\n print 'Precision: %s' % cross_val_score(model, X_train, y_train, scoring = 'precision', cv = 5).mean()\n print 'Recall: %s' % cross_val_score(model, X_train, y_train, scoring = 'recall_weighted', cv = 5).mean()\n print 'F1: %s' % cross_val_score(model, X_train, y_train, scoring = 'f1', cv = 5).mean()\n\n fitted = model.fit(X_train, y_train)\n try:\n y_score = fitted.predict_proba(X_test)[:,1]\n except:\n y_score = fitted.decision_function(X_test)\n \n # Confusion matrix\n print \"********************* Normalized Confusion Matrix *************************\"\n cm = confusion_matrix(y_test, fitted.predict(X_test))\n cm_normalized = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]\n \n print('Normalized confusion matrix')\n print(cm_normalized)\n plt.matshow(cm, cmap=plt.cm.Blues)\n plt.colorbar()\n plt.xlabel('Predicted Values')\n plt.ylabel('Actual Values')\n \n # Classification Report\n print \"********************* Classification Report********************************\" \n print classification_report(y_test, fitted.predict(X_test))\n \n print \"********************* ROC Curve *******************************************\"\n \n # ROC Curve\n fpr, tpr, _ = roc_curve(y_test, y_score)\n roc_auc = auc(fpr, tpr)\n \n plt.figure()\n plt.plot(fpr, tpr, label='ROC curve (area = %0.2f)' % roc_auc)\n plt.plot([0, 1], [0, 1], 'k--')\n plt.xlim([0.0, 1.0])\n plt.ylim([0.0, 1.05])\n plt.xlabel('False Positive Rate')\n plt.ylabel('True Positive Rate')\n plt.title('Receiver operating characteristic')\n plt.legend(loc=\"lower right\")\n plt.show()",
"def plot_roc_curve(x_data, labels, net, plotfile,\n title=''):\n\n # Have the net predict, then split the scores by ground truth\n scores = net.predict(x_data)\n\n distfile = PLOTDIR / plotfile.replace('roc', 'dist')\n\n fig, ax = plt.subplots(1, 1, figsize=(12, 12))\n\n df = pd.DataFrame({'Condition': ['Positive' if int(i) == 1 else 'Negative'\n for i in labels[0, :]],\n 'Score': scores[0, :]})\n sns.violinplot(x='Condition', y='Score', data=df, ax=ax)\n ax.set_title('{} Dist for Rap1 Identification'.format(title))\n\n fig.savefig(str(distfile))\n\n plt.close()\n\n fp_rate, tp_rate = calc_roc(scores[labels], scores[~labels])\n\n # Make the plot\n plotfile = PLOTDIR / plotfile\n\n fig, ax = plt.subplots(1, 1, figsize=(12, 12))\n\n ax.plot(fp_rate, tp_rate, '-o', linewidth=3)\n\n # Plot the line for perfect confusion\n ax.plot([0, 1], [0, 1], '--', linewidth=3)\n\n ax.set_title('{} ROC for Rap1 Identification'.format(title))\n ax.set_xlabel('False Positive Rate')\n ax.set_ylabel('True Positive Rate')\n ax.set_xlim([-0.01, 1.01])\n ax.set_ylim([-0.01, 1.01])\n\n fig.savefig(str(plotfile))\n plt.close()",
"def roc_plot(label, fpr, tpr, roc_auc):\n plt.figure()\n for i in range(len(label)):\n plt.plot(fpr[i], tpr[i], label=label[i] + ' AUC = %0.2f' % roc_auc[i], alpha=0.75)\n plt.plot([0, 1], [0, 1], 'r--')\n plt.xlim([-0.01, 1.01])\n plt.ylim([-0.01, 1.01])\n plt.xlabel('False Positive Rate')\n plt.ylabel('True Positive Rate')\n plt.title('ROC Curve')\n plt.legend(loc='lower right')\n plt.show()",
"def plot_roc(self, X, y):\n plot_roc(self.clf, X, y)",
"def generate_roc_curve(clf, X, y, survived_weight=1, plot=False, n_classes=5):\n fpr = dict()\n tpr = dict()\n roc_auc = dict()\n aucs = []\n for i in range(5):\n # shuffle and split training and test sets\n X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=.25)\n #weights = np.array([survived_weight if s == 1 else 1 for s in y_train])\n #clf.fit(X_train, y_train, sample_weight=weights)\n clf.fit(X_train, y_train)\n\n fpr[i], tpr[i], _ = roc_curve(y_test, clf.predict_proba(X_test)[:,1])\n roc_auc[i] = auc(fpr[i], tpr[i])\n aucs.append(roc_auc[i])\n print('ROC AUC: {:.2%}'.format(roc_auc[i]))\n\n auc_mean = \"{:.3%}\".format(np.mean(aucs))\n auc_std = \"{:.3%}\".format(np.std(aucs))\n auc_lower = \"{:.3%}\".format(np.mean(aucs)-np.std(aucs))\n print(\"ROC - Area under curve: {} and stddev: {}\".format(auc_mean, auc_std))\n\n if plot:\n # Plot of a ROC curve for a specific class\n plt.figure()\n for i in range(5):\n plt.plot(fpr[i], tpr[i], label='ROC curve (area = %0.2f)' % roc_auc[i])\n\n plt.plot([0, 1], [0, 1], 'k--')\n plt.xlim([0.0, 1.0])\n plt.ylim([0.0, 1.05])\n plt.xlabel('False Positive Rate')\n plt.ylabel('True Positive Rate')\n plt.title('ROC Curve')\n plt.legend(loc=\"lower right\")\n plt.show()",
"def roc_curve(model, X_train, y_train, X_test, y_test, train=True):\n from sklearn.metrics import roc_curve\n if train==True:\n ypredTrain = model.predict(X_train)\n fpr, tpr, thresholds = roc_curve(y_train, ypredTrain)\n plt.plot(fpr, tpr, linewidth=3, label=None, color='r', linestyle='-')\n plt.rc('xtick', labelsize=10) \n plt.rc('ytick', labelsize=10) \n plt.xlabel('False Positive Rate', size=12)\n plt.ylabel('True Positive Rate', size=12)\n plt.grid()\n plt.rcParams['figure.facecolor'] = '#F2F3F4' \n plt.rcParams['axes.facecolor'] = '#F2F3F4' \n plt.title(\"ROC Curve: Sensitivity/Specificity Trade-off\\n\\n(Train)\\n\", size=14)\n plt.show()\n elif train==False:\n ypredTest = model.predict(X_test)\n fpr, tpr, thresholds = roc_curve(y_test, ypredTest)\n plt.plot(fpr, tpr, linewidth=3, label=None, color='b', linestyle='-')\n plt.rc('xtick', labelsize=10) \n plt.rc('ytick', labelsize=10) \n plt.xlabel('False Positive Rate', size=12)\n plt.ylabel('True Positive Rate', size=12)\n plt.grid()\n plt.rcParams['figure.facecolor'] = '#F2F3F4' \n plt.rcParams['axes.facecolor'] = '#F2F3F4'\n plt.title('ROC Curve: Sensitivity/Specificity Trade-off\\n\\n(Test)\\n', size=14)\n plt.show()",
"def plot_roc(y_true, y_probas, classes = None, title='ROC Curves', average_plot = True,\n ax=None, figsize=None, cmap='nipy_spectral',\n title_fontsize=\"large\", text_fontsize=\"medium\"):\n if ax is None:\n fig, ax = plt.subplots(1, 1, figsize=figsize)\n ax.set_title(title, fontsize=title_fontsize)\n n_fold_roc_auc = []\n for i in range(len(y_true)):\n fpr, tpr, _ = roc_curve(y_true[i], y_probas[i])\n roc_auc = auc(fpr, tpr)\n color = plt.cm.get_cmap(cmap)(float(i) / len(y_true))\n \n if classes is None:\n s = 'fold'\n else:\n s = classes[i]\n ax.plot(fpr, tpr, lw=2, color=color,\n label='ROC curve of {0} {1} (area = {2:0.2f})'\n ''.format(s, i, roc_auc))\n n_fold_roc_auc.append(roc_auc)\n\n average_roc_auc = 0\n if classes is None:\n if average_plot:\n all_y_true = np.concatenate(y_true)\n all_probas = np.concatenate(y_probas)\n fpr_all, tpr_all, _ = roc_curve(all_y_true, all_probas)\n average_roc_auc = auc(fpr_all, tpr_all)\n ax.plot(fpr_all, tpr_all,\n label='average ROC curve '\n '(area = {0:0.2f})'.format(average_roc_auc),\n color='blue', linestyle=':', linewidth=4)\n\n ax.plot([0, 1], [0, 1], 'k--', lw=2)\n ax.set_xlim([0.0, 1.0])\n ax.set_ylim([0.0, 1.05])\n ax.set_xlabel('False Positive Rate', fontsize=text_fontsize)\n ax.set_ylabel('True Positive Rate', fontsize=text_fontsize)\n ax.tick_params(labelsize=text_fontsize)\n ax.legend(loc='lower right', fontsize=text_fontsize)\n return ax, n_fold_roc_auc, average_roc_auc",
"def plot_mean_roc_curve_of_classifiers(classifier_roc_list, data_set_description):\n if const.RECORD_RESULTS is True:\n fig = plt.figure(figsize=(8, 6.66))\n monochrome = (cycler(\"color\", [\"k\"]) * cycler(\"marker\", [\"\"]) *\n cycler(\"linestyle\", [\"-\", \"--\", \"-.\"]))\n color_arr = [\"#64B3DE\", \"#1f78b4\", \"#6ABF20\", \"#FBAC44\", \"#bc1659\", \"#B9B914\", \"#33a02c\", \"#ff7f00\", \"#6a3d9a\", \"black\", \"#b15928\", \"#e31a1c\"]\n plt.rc(\"axes\", prop_cycle=monochrome)\n line_style_index = 0\n color_index = 0\n\n for (test_run_roc_list, classifier_description) in classifier_roc_list:\n if not (None, None) in test_run_roc_list[0]:\n mean_tpr = 0.0\n mean_fpr = np.linspace(0, 1, 100)\n count = 0\n for roc_list in test_run_roc_list:\n for (tpr, fpr) in roc_list:\n mean_tpr += interp(mean_fpr, fpr, tpr)\n mean_tpr[0] = 0.0\n count += 1\n\n mean_tpr /= float(count)\n mean_tpr[-1] = 1.0\n mean_auc = auc(mean_fpr, mean_tpr)\n line_width = 0.5\n if line_style_index == 1:\n line_width = 0.8\n elif line_style_index == 2:\n line_width = 1.5\n\n plt.plot(mean_fpr, mean_tpr, c=color_arr[color_index], lw=line_width, alpha=1, label=\"{0} ({1:.3f})\".format(classifier_description, mean_auc))\n line_style_index = (line_style_index + 1) % 3\n color_index += 1\n\n plt.locator_params(axis='x', nbins=10)\n plt.locator_params(axis='y', nbins=10)\n plt.plot([0, 1], [0, 1], \"k--\", label=\"Random classification\", lw=0.8)\n plt.xlim([0.0, 1.0])\n plt.ylim([0.0, 1.0])\n plt.xlabel(\"False Positive Rate\")\n plt.ylabel(\"True Positive Rate\")\n plt.title(\"ROC curve for each classifier\")\n plt.legend(loc=\"lower right\", fancybox=True, frameon=True)\n current_time = datetime.now().strftime(\"%Y-%m-%d_%H-%M-%S\")\n plt.savefig(os.path.dirname(os.path.realpath(__file__)) + \"/../results/{0}_roc_classifier_plot_{1}.png\".format(data_set_description, current_time), bbox_inches=\"tight\")\n plt.close(fig)",
"def roc2(fpr, tpr, roc_auc):\n plt.figure()\n plt.plot(fpr, tpr, color='darkorange', lw=2, label='ROC curve (area = %0.2f)' % roc_auc)\n plt.plot([0, 1], [0, 1], color='navy', lw=1, linestyle='--')\n plt.xlim([0.0, 1.0])\n plt.ylim([0.0, 1.05])\n plt.xlabel('False Positive Rate')\n plt.ylabel('True Positive Rate')\n plt.title('Receiver operating characteristic')\n plt.legend(loc=\"lower right\")\n plt.show()",
"def _create_roc_curve(forecast_probabilities, observed_labels, output_dir_name):\n\n pofd_by_threshold, pod_by_threshold = model_eval.get_points_in_roc_curve(\n forecast_probabilities=forecast_probabilities,\n observed_labels=observed_labels,\n threshold_arg=model_eval.THRESHOLD_ARG_FOR_UNIQUE_FORECASTS,\n unique_forecast_precision=FORECAST_PRECISION_FOR_THRESHOLDS)\n\n auc = model_eval.get_area_under_roc_curve(\n pofd_by_threshold=pofd_by_threshold,\n pod_by_threshold=pod_by_threshold)\n scikit_learn_auc = roc_auc_score(\n y_true=observed_labels, y_score=forecast_probabilities)\n\n title_string = 'AUC = {0:.4f} ... scikit-learn AUC = {1:.4f}'.format(\n auc, scikit_learn_auc)\n print title_string\n\n figure_file_name = '{0:s}/roc_curve.jpg'.format(output_dir_name)\n print 'Saving ROC curve to: \"{0:s}\"...\\n'.format(figure_file_name)\n\n _, axes_object = pyplot.subplots(\n 1, 1, figsize=(FIGURE_WIDTH_INCHES, FIGURE_HEIGHT_INCHES))\n model_eval_plotting.plot_roc_curve(\n axes_object=axes_object, pod_by_threshold=pod_by_threshold,\n pofd_by_threshold=pofd_by_threshold)\n\n pyplot.title(title_string)\n pyplot.savefig(figure_file_name, dpi=DOTS_PER_INCH)\n pyplot.close()\n\n return auc, scikit_learn_auc",
"def plot_roc_distributions(self, model_str, resampling_number, roc_curve_steps, roc_plot_path):\n sampling_types = ['Normal', 'Oversampling', 'Undersampling']\n\n PLOT_MARGIN = 0.05\n plt.rcParams[\"figure.figsize\"] = (16, 9)\n plt.subplots_adjust(wspace=0.2, hspace=0.4)\n sub_plot_index = 1\n\n for sampling_type in sampling_types:\n mean_fpr, mean_tpr, mean_threshold, mean_auc, std_auc = self._compute_mean_auc_data(sampling_type, model_str, resampling_number, roc_curve_steps)\n\n plt.subplot(int('22' + str(sub_plot_index)))\n\n sub_plot_index += 1\n\n plt.plot(mean_fpr, mean_tpr, color='g', label='AUC:{0}, STD:{1}'.format(round(mean_auc, 2), round(std_auc, 2)))\n plt.plot(mean_fpr, mean_threshold, linestyle='--', lw=2, color='b', label='Thresholds')\n plt.plot([0, 1], [0, 1], linestyle='--', lw=2, color='r', label='Chance')\n\n plt.xlim([0 - PLOT_MARGIN, 1 + PLOT_MARGIN])\n plt.ylim([0 - PLOT_MARGIN, 1 + PLOT_MARGIN])\n plt.xlabel('False Positive Rate')\n plt.ylabel('True Positive Rate')\n plt.title(sampling_type + ' ROC Distribution')\n plt.legend(loc=\"lower right\")\n\n plt.savefig(roc_plot_path)\n plt.clf()"
] | [
"0.7486583",
"0.7387596",
"0.73390347",
"0.7303862",
"0.71846604",
"0.7127741",
"0.71195436",
"0.70976794",
"0.7094367",
"0.7023224",
"0.69831747",
"0.6977276",
"0.6967064",
"0.69567615",
"0.69160146",
"0.6896866",
"0.6894308",
"0.6850953",
"0.682168",
"0.6805597",
"0.6803503",
"0.6794241",
"0.6793439",
"0.677283",
"0.6767744",
"0.6767021",
"0.67477185",
"0.67421055",
"0.6708559",
"0.67017835"
] | 0.7392667 | 1 |
Bootstrapped version of plot_performance_diagram. | def plot_bootstrapped_performance_diagram(
axes_object, ci_bottom_dict, ci_mean_dict, ci_top_dict,
line_colour=DEFAULT_PERFORMANCE_COLOUR,
line_width=DEFAULT_PERFORMANCE_WIDTH,
bias_line_colour=DEFAULT_FREQ_BIAS_COLOUR,
bias_line_width=DEFAULT_FREQ_BIAS_WIDTH):
plot_performance_diagram(
axes_object=axes_object,
pod_by_threshold=ci_mean_dict[model_eval.POD_BY_THRESHOLD_KEY],
success_ratio_by_threshold=ci_mean_dict[model_eval.SR_BY_THRESHOLD_KEY],
line_colour=line_colour, line_width=line_width,
bias_line_colour=bias_line_colour, bias_line_width=bias_line_width)
polygon_object = _confidence_interval_to_polygon(
x_coords_bottom=ci_bottom_dict[model_eval.SR_BY_THRESHOLD_KEY],
y_coords_bottom=ci_bottom_dict[model_eval.POD_BY_THRESHOLD_KEY],
x_coords_top=ci_top_dict[model_eval.SR_BY_THRESHOLD_KEY],
y_coords_top=ci_top_dict[model_eval.POD_BY_THRESHOLD_KEY],
for_performance_diagram=True)
polygon_colour = matplotlib.colors.to_rgba(
plotting_utils.colour_from_numpy_to_tuple(line_colour),
TRANSPARENCY_FOR_CONFIDENCE_INTERVAL
)
polygon_patch = PolygonPatch(
polygon_object, lw=0, ec=polygon_colour, fc=polygon_colour)
axes_object.add_patch(polygon_patch) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def create_dashboard(h, t, k, p):\n plt.style.use('seaborn')\n # Initialize the dashboard\n fig = plt.figure(figsize=(20, 8))\n ax1 = fig.add_subplot(2, 2, 1)\n ax2 = fig.add_subplot(2, 2, 2)\n ax3 = fig.add_subplot(2, 2, 3)\n ax4 = fig.add_subplot(2, 2, 4)\n\n # Create individual graphs\n dt_line, = ax1.plot(h, lw=3, c='k')\n total_line, = ax2.plot(t, lw=3, c='#d62728')\n k_line, = ax3.plot(k, lw=3, c='#1f77b4')\n p_line = ax4.plot(p, lw=3, c='#2ca02c')\n\n ax1.set_title(r'Variation in $\\Delta t$')\n ax1.set_ylabel(r'$\\Delta t$')\n ax2.set_title(r'Total Energy over Time')\n ax2.set_ylabel('Total Energy')\n ax3.set_title('Kinetic Energy over Time')\n ax3.set_ylabel('Kinetic Energy')\n ax3.set_xlabel('Time Steps')\n ax4.set_title('Potential Energy over Time')\n ax4.set_ylabel('Potential Energy')\n ax4.set_xlabel('Time Steps')\n\n plt.show()\n\n \"\"\"im = ax[0, 0].imshow(model.lattice, cmap='Greys', vmin=-1, vmax=1)\n energy_line, = ax[0, 1].plot([], [], lw=3)\n mag_line, = ax[1, 0].plot([], [], lw=3)\n heat_line, = ax[1, 1].plot([], [], lw=3)\n susceptibility_line, = ax[2, 0].plot([], [], lw=3)\n acceptance_line, = ax[2, 1].plot([], [], lw=3)\"\"\"",
"def _plot_init(self):\n pass",
"def _plot_init(self):\n pass",
"def plot_speed_benchmark(dump_dir):\n\n speed_file = os.path.join(dump_dir, \"stats/rnn_speed.csv\")\n\n assert os.path.isfile(speed_file), lu.str_to_redstr(\n f\"speed_file does not exist. Run ``python run.py --speed`` first.\"\n )\n\n df = pd.read_csv(speed_file)\n\n df_cpu = df[df.device == \"cpu\"]\n df_gpu = df[df.device == \"gpu\"]\n\n cpu_is_available = len(df_cpu) > 0\n gpu_is_available = len(df_gpu) > 0\n\n # CPU benchmark should always be available\n assert cpu_is_available\n\n n_models = len(df.model.unique())\n\n if gpu_is_available:\n # Space bars by 2 units to leave room for gpu\n idxs_cpu = 0.5 + np.arange(3 * n_models)[::3]\n idxs_gpu = idxs_cpu + 1\n xticks = idxs_cpu + 0.5\n xtick_labels = df_cpu.model.values.tolist()\n\n else:\n # Space bars by 1 unit\n idxs_cpu = 0.5 + np.arange(2 * n_models)[::2]\n xticks = idxs_cpu\n xtick_labels = df_cpu.model.values.tolist()\n\n plt.figure()\n ax = plt.gca()\n\n for i in range(len(idxs_cpu)):\n label = \"CPU\" if i == 0 else None\n plt.bar(\n idxs_cpu[i],\n df_cpu[\"Supernova_per_s\"].values[i],\n width=1,\n color=\"C0\",\n label=label,\n )\n\n if gpu_is_available:\n for i in range(len(idxs_gpu)):\n label = \"GPU\" if i == 0 else None\n plt.bar(\n idxs_gpu[i],\n df_gpu[\"Supernova_per_s\"].values[i],\n width=1,\n color=\"C2\",\n label=label,\n )\n\n ax.set_ylabel(\"Lightcurves / s\", fontsize=16)\n ax.set_title(\"Inference throughput\", fontsize=20)\n\n ax.set_xticks(xticks)\n ax.set_xticklabels(xtick_labels)\n ax.set_yscale(\"log\")\n ax.legend()\n\n plt.grid()\n plt.tight_layout()\n plt.savefig(os.path.join(dump_dir, \"figures/rnn_speed.png\"))\n plt.clf()\n plt.close()",
"def _InitializeVizier(self):\n self._should_report_metrics = False",
"def plot_performance_diagram(\n axes_object, pod_by_threshold, success_ratio_by_threshold,\n line_colour=DEFAULT_PERFORMANCE_COLOUR,\n line_width=DEFAULT_PERFORMANCE_WIDTH,\n bias_line_colour=DEFAULT_FREQ_BIAS_COLOUR,\n bias_line_width=DEFAULT_FREQ_BIAS_WIDTH):\n\n error_checking.assert_is_numpy_array(pod_by_threshold, num_dimensions=1)\n error_checking.assert_is_geq_numpy_array(\n pod_by_threshold, 0., allow_nan=True)\n error_checking.assert_is_leq_numpy_array(\n pod_by_threshold, 1., allow_nan=True)\n num_thresholds = len(pod_by_threshold)\n\n error_checking.assert_is_numpy_array(\n success_ratio_by_threshold,\n exact_dimensions=numpy.array([num_thresholds]))\n error_checking.assert_is_geq_numpy_array(\n success_ratio_by_threshold, 0., allow_nan=True)\n error_checking.assert_is_leq_numpy_array(\n success_ratio_by_threshold, 1., allow_nan=True)\n\n success_ratio_matrix, pod_matrix = model_eval.get_sr_pod_grid()\n csi_matrix = model_eval.csi_from_sr_and_pod(\n success_ratio_matrix, pod_matrix)\n frequency_bias_matrix = model_eval.frequency_bias_from_sr_and_pod(\n success_ratio_matrix, pod_matrix)\n\n this_colour_map_object, this_colour_norm_object = _get_csi_colour_scheme()\n\n pyplot.contourf(\n success_ratio_matrix, pod_matrix, csi_matrix, LEVELS_FOR_CSI_CONTOURS,\n cmap=this_colour_map_object, norm=this_colour_norm_object, vmin=0.,\n vmax=1., axes=axes_object)\n\n colour_bar_object = plotting_utils.plot_colour_bar(\n axes_object_or_matrix=axes_object, data_matrix=csi_matrix,\n colour_map_object=this_colour_map_object,\n colour_norm_object=this_colour_norm_object,\n orientation_string='vertical', extend_min=False, extend_max=False)\n\n colour_bar_object.set_label('CSI (critical success index)')\n\n bias_colour_tuple = plotting_utils.colour_from_numpy_to_tuple(\n bias_line_colour)\n\n bias_colours_2d_tuple = ()\n for _ in range(len(LEVELS_FOR_FREQ_BIAS_CONTOURS)):\n bias_colours_2d_tuple += (bias_colour_tuple,)\n\n bias_contour_object = pyplot.contour(\n success_ratio_matrix, pod_matrix, frequency_bias_matrix,\n LEVELS_FOR_FREQ_BIAS_CONTOURS, colors=bias_colours_2d_tuple,\n linewidths=bias_line_width, linestyles='dashed', axes=axes_object)\n\n pyplot.clabel(\n bias_contour_object, inline=True,\n inline_spacing=PIXEL_PADDING_FOR_FREQ_BIAS_LABELS,\n fmt=STRING_FORMAT_FOR_FREQ_BIAS_LABELS, fontsize=FONT_SIZE)\n\n nan_flags = numpy.logical_or(\n numpy.isnan(success_ratio_by_threshold), numpy.isnan(pod_by_threshold)\n )\n\n if not numpy.all(nan_flags):\n real_indices = numpy.where(numpy.invert(nan_flags))[0]\n\n axes_object.plot(\n success_ratio_by_threshold[real_indices],\n pod_by_threshold[real_indices],\n color=plotting_utils.colour_from_numpy_to_tuple(line_colour),\n linestyle='solid', linewidth=line_width\n )\n\n axes_object.set_xlabel('Success ratio (1 - FAR)')\n axes_object.set_ylabel('POD (probability of detection)')\n axes_object.set_xlim(0., 1.)\n axes_object.set_ylim(0., 1.)",
"def init_plot(self):\n self.dpi = 100\n self.fig = Figure((5.0, 5.0), dpi = self.dpi)\n\n self.main_plot = self.fig.add_subplot(111)\n self.main_plot.set_axis_bgcolor('black')\n self.main_plot.set_title('Dynamic venous flow view', size = 12)\n\n pylab.setp(self.main_plot.get_xticklabels(), fontsize = 8)\n pylab.setp(self.main_plot.get_yticklabels(), fontsize = 8)\n\n # Plot the data as a green line\n self.plot_data = self.main_plot.plot(\n self.daq.data0,\n linewidth = 1,\n color = (0, 1, 0),\n )[0]\n self.main_plot.grid(True, color='gray')",
"def initialize_visualization(self) -> None:\n pass",
"def plot(self):\n pass",
"def plot(self):\n\t\tplot_chain(self.database_path, self.temp_folder)\n\t\tplot_density(self.database_path, self.temp_folder, self.cal_params)",
"def draw_perf(best_per_lr, learning_rate_updates_epoch, fignumber=0, mode=\"loss\", pdf=None):\n plt.figure(fignumber, figsize=(6,3))\n plt.clf()\n ax = plt.subplot(1,1,1)\n plot_perf(ax, best_per_lr, learning_rate_updates_epoch, mode)\n if pdf is None:\n plt.show()\n else:\n pdf.savefig()\n plt.close()",
"def main():\n df_data = import_clean_process()\n plot_data_matplotlib(df_data)\n return",
"def initialize(self) -> None:\n # Only do matplotlib import when necessary\n super().initialize()\n from matplotlib import pyplot as plt\n self.fig, self.ax = plt.subplots()\n if self.state_map is not None:\n self._add_state_map(self.state_map)\n else:\n self.categories = self.simulation.state_list",
"def plot_bootstrapped_attributes_diagram(\n figure_object, axes_object, ci_bottom_dict, ci_mean_dict, ci_top_dict,\n num_examples_by_bin,\n reliability_line_colour=DEFAULT_RELIABILITY_COLOUR,\n reliability_line_width=DEFAULT_RELIABILITY_WIDTH,\n perfect_relia_line_colour=DEFAULT_PERFECT_RELIABILITY_COLOUR,\n perfect_relia_line_width=DEFAULT_PERFECT_RELIABILITY_WIDTH,\n no_skill_line_colour=DEFAULT_ZERO_BSS_COLOUR,\n no_skill_line_width=DEFAULT_ZERO_BSS_WIDTH,\n other_line_colour=DEFAULT_CLIMATOLOGY_COLOUR,\n other_line_width=DEFAULT_CLIMATOLOGY_WIDTH,\n histogram_bar_face_colour=DEFAULT_HISTOGRAM_FACE_COLOUR,\n histogram_bar_edge_colour=DEFAULT_HISTOGRAM_EDGE_COLOUR,\n histogram_bar_edge_width=DEFAULT_HISTOGRAM_EDGE_WIDTH):\n\n plot_attributes_diagram(\n figure_object=figure_object, axes_object=axes_object,\n mean_forecast_by_bin=ci_mean_dict[model_eval.MEAN_FORECAST_BY_BIN_KEY],\n event_frequency_by_bin=ci_mean_dict[model_eval.EVENT_FREQ_BY_BIN_KEY],\n num_examples_by_bin=num_examples_by_bin,\n reliability_line_colour=reliability_line_colour,\n reliability_line_width=reliability_line_width,\n perfect_relia_line_colour=perfect_relia_line_colour,\n perfect_relia_line_width=perfect_relia_line_width,\n no_skill_line_colour=no_skill_line_colour,\n no_skill_line_width=no_skill_line_width,\n other_line_colour=other_line_colour, other_line_width=other_line_width,\n histogram_bar_face_colour=histogram_bar_face_colour,\n histogram_bar_edge_colour=histogram_bar_edge_colour,\n histogram_bar_edge_width=histogram_bar_edge_width)\n\n polygon_object = _confidence_interval_to_polygon(\n x_coords_bottom=ci_bottom_dict[model_eval.MEAN_FORECAST_BY_BIN_KEY],\n y_coords_bottom=ci_bottom_dict[model_eval.EVENT_FREQ_BY_BIN_KEY],\n x_coords_top=ci_top_dict[model_eval.MEAN_FORECAST_BY_BIN_KEY],\n y_coords_top=ci_top_dict[model_eval.EVENT_FREQ_BY_BIN_KEY]\n )\n\n polygon_colour = matplotlib.colors.to_rgba(\n plotting_utils.colour_from_numpy_to_tuple(reliability_line_colour),\n TRANSPARENCY_FOR_CONFIDENCE_INTERVAL\n )\n\n polygon_patch = PolygonPatch(\n polygon_object, lw=0, ec=polygon_colour, fc=polygon_colour)\n\n axes_object.add_patch(polygon_patch)",
"def show():\n setup()\n plt.show()",
"async def plot(self, new=False) -> None:\n self._logger.debug(\"running\")\n self.figure.clear()\n self.figure.set_tight_layout(True)\n num_plots = len(self._plots)\n axes = None\n for i in range(num_plots):\n plot = self._plots[i]\n name = plot[0]\n active = plot[2]\n if active:\n if i == 0:\n axes = self.figure.add_subplot(1, 1, 1)\n axes.tick_params(axis='x', labelrotation=30)\n axes.set_ylabel(name, color='#1f77b4')\n await sleep(.001)\n if not new:\n await create_task(self.plot_device_data(axes, name))\n else:\n alt_axes = axes.twinx()\n alt_axes.set_ylabel(name, color='#ff7f0e')\n alt_axes.tick_params(axis='y', labelcolor='#ff7f0e')\n alt_axes.set_yticks(np.arange(0, 6, step=1))\n await sleep(.001)\n if not new:\n await create_task(self.plot_device_data(alt_axes, name))\n\n if not new:\n self.add_vert_lines()\n await sleep(.001)\n self.figure.canvas.draw()\n self._logger.debug(\"done\")",
"def _show_learning_rate():\n fig, axes = plt.subplots(nrows=1, ncols=2, figsize=(6.4 * 2, 4.8))\n\n # Visualize c_prime\n c_prime_list = np.linspace(1, 100, num=11)\n x_label = f\"c'\"\n y_label = \"Minimum Clusters Size\"\n title = \"\"\n\n ax = axes[0]\n x_list = c_prime_list\n\n # MNIST\n y_list = [161, 16, 14, 15, 20, 21, 24, 27, 30, 30, 35]\n ax.plot(x_list, y_list, label=\"MNIST\")\n\n # Fashion MNIST\n y_list = [63, 12, 12, 15, 18, 19, 22, 25, 26, 28, 30]\n ax.plot(x_list, y_list, label=\"Fashion MNIST\")\n\n # 20 news groups\n y_list = [1297, 724, 221, 80, 52, 51, 54, 54, 52, 60, 60]\n ax.plot(x_list, y_list, label=\"Newsgroups\")\n\n ax.set_xlabel(x_label)\n ax.set_ylabel(y_label)\n ax.set_title(title)\n ax.legend()\n ax.set_yscale('log')\n\n # Visualize t0\n t0_list = np.linspace(1, 100, num=11)\n x_label = f\"t0\"\n y_label = \"Minimum Clusters Size\"\n title = \"\"\n\n ax = axes[1]\n x_list = t0_list\n\n # MNIST\n y_list = [16, 16, 16, 16, 16, 17, 16, 16, 16, 16, 16]\n ax.plot(x_list, y_list, label=\"MNIST\")\n\n # Fashion MNIST\n y_list = [12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12]\n ax.plot(x_list, y_list, label=\"Fashion MNIST\")\n\n # 20 news groups\n y_list = [765, 765, 767, 772, 772, 773, 789, 789, 793, 796, 799]\n ax.plot(x_list, y_list, label=\"Newsgroups\")\n\n ax.set_xlabel(x_label)\n ax.set_ylabel(y_label)\n ax.set_title(title)\n ax.legend()\n ax.set_yscale('log')\n\n plt.show()",
"def plateSeparatedPerformance():\n model_perfs = pickle.load(open(\"pickles/separatePlateTestModelPerformances.pkl\", \"rb\"))\n model_stds = pickle.load(open(\"pickles/separatePlateTestModelStds.pkl\", \"rb\"))\n null_YFP_performances = pickle.load(open(\"pickles/separatePlateTestYFPPerformances.pkl\", \"rb\"))\n null_YFP_stds = pickle.load(open(\"pickles/separatePlateTestYFPStds.pkl\", \"rb\"))\n null_DAPI_performances = pickle.load(open(\"pickles/separatePlateTestDAPIPerformances.pkl\", \"rb\"))\n null_DAPI_stds = pickle.load(open(\"pickles/separatePlateTestDAPIStds.pkl\", \"rb\"))\n fig, ax = plt.subplots()\n xlabels = [\"null\", \"DRW1\", \"DRW2\", \"DRW3\", \"DRW4\", \"DRW5\", \"DRW6\"]\n x = np.array([1, 2, 3, 4, 5, 6])\n width = .26\n rects = ax.bar(x, model_perfs, width, yerr=model_stds, capsize=3, error_kw=dict(lw=.2, capsize=1, capthick=1), color=\"red\", label=\"ML Model\", zorder=3)\n rects2 = ax.bar(x + width, null_YFP_performances, width, yerr=null_YFP_stds, capsize=3, error_kw=dict(lw=.2, capsize=1, capthick=1), color=\"gold\",label=\"Null YFP Model\", zorder=3)\n rects3 = ax.bar(x+ 2*width, null_DAPI_performances, width, yerr=null_DAPI_stds, capsize=3, error_kw=dict(lw=.2, capsize=1, capthick=1), color=\"blue\", label=\"Null DAPI Model\", zorder=3)\n autolabel(rects, ax, fontsize=8)\n autolabel(rects2, ax, fontsize=8)\n autolabel(rects3, ax, fontsize=8)\n plt.title(\"Pearson Performance by Drug Perturbation\",fontname=\"Times New Roman\", fontsize=14, y=1.0)\n ax.set_ylabel(\"Pearson Correlation Coefficient\", fontname=\"Times New Roman\", fontsize=12)\n ax.set_xlabel(\"Drug\", fontname=\"Times New Roman\", fontsize=12)\n ax.set_xticklabels(xlabels,fontsize=12, fontname=\"Times New Roman\")\n plt.yticks(fontname=\"Times New Roman\", fontsize=12)\n ax.set_ylim((0,1))\n ax.yaxis.grid(True, linestyle='-', which='major', color='grey', alpha=.25, zorder=0)\n ax.xaxis.set_major_locator(plt.MaxNLocator(7))\n box = ax.get_position()\n ax.set_position([box.x0, box.y0, box.width, box.height * 0.85])\n ax.legend(loc='upper right', prop={\"family\":\"Times New Roman\", \"size\":10}, bbox_to_anchor=(1, 1.32))\n plt.gcf().subplots_adjust(top=.76)\n plt.savefig(\"matplotlib_figures/separatedPlatesPerformance.png\", dpi=300)",
"def plot_speedup(serial_time, parallel_times, proc_list):\r\n fig = plt.figure()\r\n speedup = [serial_time/p for p in parallel_times]\r\n plt.plot(proc_list, speedup, label='Actual speedup')\r\n plt.plot(proc_list, proc_list, label='Theoretical speedup', ls='--', c='k')\r\n plt.title('Benchmark performance speedup for data download')\r\n plt.xlabel('Number of processes')\r\n plt.ylabel('Speedup')\r\n plt.legend()\r\n plt.savefig('data_proc_speedup.png')",
"def plot_data():\n \n [X_train, X_dev, X_test, Y_train, Y_dev, Y_test, numOutputNodes] = load_data('regression') \n \n traindev = np.concatenate((Y_train, Y_dev), 1)\n traindevtest = np.concatenate((traindev, Y_test), 1)\n tdt = traindevtest.reshape(traindevtest.shape[1],)\n\n Y_train = Y_train.reshape(Y_train.shape[1],)\n Y_dev = Y_dev.reshape(Y_dev.shape[1],)\n Y_test = Y_test.reshape(Y_test.shape[1],)\n\n sigma = np.round(np.std(tdt), 3)\n mu = np.round(np.mean(tdt), 3)\n\n # plots histogram of all data together, indicating values of mean and standard deviation\n plt.figure(1)\n plt.hist(tdt)\n plt.title(\"{} data points, mu = {}, sigma = {}\".format(tdt.size, mu, sigma))\n plt.xlabel(\"average Fe coordination number\")\n plt.ylabel(\"frequency\")\n plt.show()\n\n # plots histogram where the training, cross-validation, and test sets have separate bars\n plt.figure(2)\n plt.hist([Y_train, Y_dev, Y_test], label = ['training', 'cross-validation', 'test'], density = True)\n plt.xlabel(\"average Fe coordination number\")\n plt.ylabel(\"frequency\")\n plt.legend()\n plt.show()\n\n # below is graphing for the charge data, as opposed to the averaged spectrum data\n [X_train1, X_dev1, X_test1, _, _, _, Y_train1, Y_dev1, Y_test1, numOutputNodes1] = load_data('multi_task')\n traindev1 = np.concatenate((Y_train1, Y_dev1), 1)\n traindevtest1 = np.concatenate((traindev1, Y_test1), 1)\n tdt1 = traindevtest1.reshape(traindevtest1.shape[1],)\n\n Y_train1 = Y_train1.reshape(Y_train1.shape[1],)\n Y_dev1 = Y_dev1.reshape(Y_dev1.shape[1],)\n Y_test1 = Y_test1.reshape(Y_test1.shape[1],)\n\n sigma = np.round(np.std(tdt1), 3)\n mu = np.round(np.mean(tdt1), 3)\n\n # plots histogram of all data together, indicating values of mean and standard deviation\n plt.figure(3)\n plt.hist(tdt1)\n plt.title(\"{} data points, mu = {}, sigma = {}\".format(tdt1.size, mu, sigma))\n plt.xlabel(\"charge\")\n plt.ylabel(\"frequency\")\n plt.show()\n\n # plots histogram where the training, cross-validation, and test sets have separate bars\n plt.figure(4)\n plt.hist([Y_train1, Y_dev1, Y_test1], label = ['training', 'cross-validation', 'test'], density = True)\n plt.xlabel(\"charge\")\n plt.ylabel(\"frequency\")\n plt.legend()\n plt.show()\n\n return None",
"def run_plot(args):\n # print(\"running chronqc_plot\")\n chronqc_plot.main(args)",
"def H_perform_plot(performance, hurricane):\n fig = plt.figure(figsize = (15, 10))\n for i in range(len(performance)):\n temp1 = performance[i]\n temp2 = hurricane[i]\n plt.plot(np.arange(0, len(temp1), 1), temp1, color = temp2.c, label = temp2.name)\n plt.xlabel('Time Step')\n plt.xticks(np.arange(0, len(temp1), 30))\n plt.ylabel('Performance')\n plt.legend(bbox_to_anchor=(1, 1), loc='upper left', ncol=1, frameon = 0)\n plt.grid(True)",
"def make_timeplot(df_measure, df_prediction):\n # mode = 'confirmed'\n mode = 'active'\n df_measure_confirmed = df_measure[mode]\n colors = px.colors.qualitative.Dark24\n n_colors = len(colors)\n fig = go.Figure()\n for i, country in enumerate(df_measure_confirmed.columns):\n fig.add_trace(go.Scatter(x=df_measure_confirmed.index, \n y=df_measure_confirmed[country],\n name=country[1], mode='markers+lines',\n marker_color=colors[i%n_colors],\n line_color=colors[i%n_colors],\n visible=False))\n for i, country in enumerate(df_prediction.columns):\n fig.add_trace(go.Scatter(x=df_prediction.index, \n y=df_prediction[country],\n name='+' + country[1], mode='lines',\n line_dash='dash',\n line_color=colors[i%n_colors],\n showlegend=False,\n visible=False))\n\n last_day = df_measure_confirmed.index.max()\n day = pd.DateOffset(days=1)\n fig.update_layout(title='',\n xaxis=dict(rangeslider_visible=True,\n range=(last_day - 10 * day,\n last_day + 4 * day)))\n fig.update_layout(\n updatemenus=[\n dict(\n type = \"buttons\",\n direction = \"left\",\n buttons=list([\n dict(\n args=[{\"visible\": [False,]*len(df_measure_confirmed.columns)}],\n label=\"Reset\",\n method=\"update\",\n ),\n dict(\n args=[\"yaxis\", {'type':'log'}],\n label=\"log\",\n method=\"relayout\",\n ),\n dict(\n args=[\"yaxis\", {'type':'linear'}],\n label=\"lin\",\n method=\"relayout\",\n ),\n\n ]),\n pad={\"r\": 10, \"t\": 10, \"b\":5},\n showactive=True,\n x=0.05,\n xanchor=\"left\",\n y=1.35,\n yanchor=\"top\",\n font_color='black',\n ),\n ],\n height=.9*FIRST_LINE_HEIGHT,\n)\n\n return fig",
"def plot_graph(self) -> None:",
"def plot():\n pass",
"def visualizations():\r\n raise NotImplementedError\r\n # df = pandas.read_csv('accidents_by_hour.csv', index_col=0, header=0)\r\n # plt.plot(0, 0, data=df)\r\n # plt.show()\r",
"def plot_precision_figure(self):\n\n data_analysis = DatabaseData(dataframe=self.plot_data)\n prop_data, energy_data, M, C, pred_energy, pred_property = \\\n data_analysis.create_precision_bokeh_compat(self.prop_data, self.energy_data, properties=self.properties)\n p = figure(plot_height=400, plot_width=400,tools=\"pan,wheel_zoom,box_zoom,reset,previewsave\",\\\n x_axis_type=\"log\", y_axis_type=\"log\", x_axis_label='Energy Convergence (meV/atom)', title='Slope M is {0}'.format(str(M)) )\n p.line(pred_energy, pred_property, color='red')\n p.circle(self.energy_data, self.prop_data, color='blue',size=5, line_alpha=0)\n #p.multi_line(xs_err, ys_err, color='black')\n if self.properties == 'B':\n p.yaxis.axis_label = 'Bulk Modulus B (%)'\n elif self.properties == 'dB':\n p.yaxis.axis_label = 'Bulk Modulus Pressure Derivative (%)'\n elif self.properties == 'Multiple':\n p.yaxis.axis_label = \"V0, B, B' (%)\"\n elif self.properties == 'V0':\n p.yaxis.axis_label = 'Volume (%)'\n\n return p",
"def plot(self, fn, train_fn):\n from keras.utils.visualize_util import plot\n X, Y = self.load_dataset(train_fn)\n self.model_fn()\n plot(self.model, to_file = fn)",
"def plot_diagram(probs, labels, y_axis='accuracy'):\n probs_labels = [(prob, labels[i]) for i, prob in enumerate(probs)]\n probs_labels = np.array(sorted(probs_labels, key=lambda x: x[0]))\n window_len = int(len(labels)/100.)\n calibration_errors = []\n confidences = []\n accuracies = []\n # More interesting than length of the window (which is specific to this\n # window) is average distance between datapoints. This normalizes by dividing\n # by the window length.\n distances = []\n for i in range(len(probs_labels)-window_len):\n distances.append((\n probs_labels[i+window_len, 0] - probs_labels[i, 0])/float(window_len))\n # It's pretty sketchy to look for the 100 datapoints around this one.\n # They could be anywhere in the probability simplex. This introduces bias.\n mean_confidences = mean(probs_labels[i:i + window_len, 0])\n confidences.append(mean_confidences)\n class_accuracies = mean(probs_labels[i:i + window_len, 1])\n accuracies.append(class_accuracies)\n calibration_error = class_accuracies-mean_confidences\n calibration_errors.append(calibration_error)\n\n if y_axis == 'accuracy':\n fig, ax = plt.subplots()\n fig.set_size_inches(5, 5)\n xbins = [i/10. for i in range(11)]\n ax.plot(confidences, accuracies, color='green')\n ax.plot(xbins, xbins, color='orange')\n ax.set_xlabel('Model Confidence')\n ax.set_ylabel('Model Accuracy')\n elif y_axis == 'error':\n fig, ax = plt.subplots()\n fig.set_size_inches(5, 5)\n xbins = [i/10. for i in range(11)]\n ax.plot(confidences, calibration_errors, color='green')\n ax.plot(xbins, xbins, color='orange')\n ax.set_xlabel('Model Confidence')\n ax.set_ylabel('Model Calibration Error')\n ax.set_title('Reliability Diagram', fontsize=20)\n return fig",
"def doAllPlots ():\n #df = processIp (\"18-06-01-1-attack.pcap\", \"ec:1a:59:79:f4:89\")\n #df.to_csv (\"df.csv\", index=False)\n df = pd.read_csv (\"df.csv\")\n attack_df = parseAnnotation (\"ec1a5979f489.csv\")\n ret = plotThresholds (df, attack_df)\n plotEntropyWithThreshold (df, ret[2])\n createUtilityHistogram (ret[0], ret[1]) \n\n \"\"\"\n Traffic flow graph\n \"\"\"\n #df = processTrafficFlow (\"18-06-01-short.pcap\", \"ec:1a:59:79:f4:89\")\n #plotTrafficFlow (df)\n\n \"\"\"\n Entropy for source port\n \"\"\"\n #df = processSrcPort (\"18-06-01-short.pcap\", \"ec:1a:59:79:f4:89\")\n #plotEntropy (df)\n\n \"\"\"\n Entropy for destination port\n \"\"\" \n #df = processDstPort (\"18-06-01-short.pcap\", \"ec:1a:59:79:f4:89\")\n #plotEntropy (df) \n\n \"\"\"\n It will be implemented next day\n df = processPorts (\"18-06-01.pcap\", \"ec:1a:59:79:f4:89\")\n attack_df = parseAnnotation (\"ec1a5979f489.csv\")\n ret = plotThresholds (df, attack_df)\n plotEntropy (df, attack_df, ret[2])\n createUtilityHistogram (ret[0], ret[1]) \n\n df = processProtocols (\"18-06-01.pcap\", \"ec:1a:59:79:f4:89\")\n attack_df = parseAnnotation (\"ec1a5979f489.csv\")\n ret = plotThresholds (df, attack_df)\n plotEntropy (df, attack_df, ret[2])\n createUtilityHistogram (ret[0], ret[1]) \n \"\"\"\n return"
] | [
"0.59768915",
"0.57902294",
"0.57902294",
"0.56942993",
"0.5668916",
"0.56470734",
"0.557234",
"0.55706507",
"0.55304885",
"0.55228186",
"0.5484611",
"0.5461995",
"0.5434274",
"0.5413838",
"0.5411302",
"0.54013497",
"0.5389023",
"0.53875077",
"0.5386788",
"0.5372324",
"0.53510576",
"0.5348966",
"0.53445804",
"0.53302044",
"0.5326564",
"0.5323342",
"0.5320521",
"0.530579",
"0.5298926",
"0.52949476"
] | 0.6330226 | 0 |
Bootstrapped version of plot_reliability_curve. B = number of bins (separated by forecast probability) | def plot_bootstrapped_reliability_curve(
axes_object, ci_bottom_dict, ci_mean_dict, ci_top_dict,
line_colour=DEFAULT_RELIABILITY_COLOUR,
line_width=DEFAULT_RELIABILITY_WIDTH,
perfect_line_colour=DEFAULT_PERFECT_RELIABILITY_COLOUR,
perfect_line_width=DEFAULT_PERFECT_RELIABILITY_WIDTH):
plot_reliability_curve(
axes_object=axes_object,
mean_forecast_by_bin=ci_mean_dict[
model_eval.MEAN_FORECAST_BY_BIN_KEY],
event_frequency_by_bin=ci_mean_dict[model_eval.EVENT_FREQ_BY_BIN_KEY],
line_colour=line_colour, line_width=line_width,
perfect_line_colour=perfect_line_colour,
perfect_line_width=perfect_line_width)
polygon_object = _confidence_interval_to_polygon(
x_coords_bottom=ci_bottom_dict[model_eval.MEAN_FORECAST_BY_BIN_KEY],
y_coords_bottom=ci_bottom_dict[model_eval.EVENT_FREQ_BY_BIN_KEY],
x_coords_top=ci_top_dict[model_eval.MEAN_FORECAST_BY_BIN_KEY],
y_coords_top=ci_top_dict[model_eval.EVENT_FREQ_BY_BIN_KEY]
)
polygon_colour = matplotlib.colors.to_rgba(
plotting_utils.colour_from_numpy_to_tuple(line_colour),
TRANSPARENCY_FOR_CONFIDENCE_INTERVAL
)
polygon_patch = PolygonPatch(
polygon_object, lw=0, ec=polygon_colour, fc=polygon_colour)
axes_object.add_patch(polygon_patch) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def plot_reliability_curve(\n axes_object, mean_forecast_by_bin, event_frequency_by_bin,\n line_colour=DEFAULT_RELIABILITY_COLOUR,\n line_width=DEFAULT_RELIABILITY_WIDTH,\n perfect_line_colour=DEFAULT_PERFECT_RELIABILITY_COLOUR,\n perfect_line_width=DEFAULT_PERFECT_RELIABILITY_WIDTH):\n\n error_checking.assert_is_numpy_array(\n mean_forecast_by_bin, num_dimensions=1)\n error_checking.assert_is_geq_numpy_array(\n mean_forecast_by_bin, 0., allow_nan=True)\n error_checking.assert_is_leq_numpy_array(\n mean_forecast_by_bin, 1., allow_nan=True)\n num_bins = len(mean_forecast_by_bin)\n\n error_checking.assert_is_numpy_array(\n event_frequency_by_bin, exact_dimensions=numpy.array([num_bins]))\n error_checking.assert_is_geq_numpy_array(\n event_frequency_by_bin, 0., allow_nan=True)\n error_checking.assert_is_leq_numpy_array(\n event_frequency_by_bin, 1., allow_nan=True)\n\n perfect_x_coords, perfect_y_coords = (\n model_eval.get_perfect_reliability_curve()\n )\n\n axes_object.plot(\n perfect_x_coords, perfect_y_coords,\n color=plotting_utils.colour_from_numpy_to_tuple(perfect_line_colour),\n linestyle='dashed', linewidth=perfect_line_width\n )\n\n nan_flags = numpy.logical_or(\n numpy.isnan(mean_forecast_by_bin),\n numpy.isnan(event_frequency_by_bin)\n )\n\n if not numpy.all(nan_flags):\n real_indices = numpy.where(numpy.invert(nan_flags))[0]\n\n axes_object.plot(\n mean_forecast_by_bin[real_indices],\n event_frequency_by_bin[real_indices],\n color=plotting_utils.colour_from_numpy_to_tuple(line_colour),\n linestyle='solid', linewidth=line_width\n )\n\n axes_object.set_xlabel('Forecast probability')\n axes_object.set_ylabel('Conditional event frequency')\n axes_object.set_xlim(0., 1.)\n axes_object.set_ylim(0., 1.)",
"def plot_model_curves(class_name, model, range_metrics, ax):\n def plot_axis(ax, data, color):\n \"\"\"\n Plot data on axis in certain color\n \"\"\"\n x_indices = [0, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9]\n ax.scatter(x_indices, data, color=color, s=4)\n ax.plot(x_indices, data, color=color, linewidth=2)\n ax.set_yticks([]) # same for y ticks\n ax.set_ylim([0, 1])\n # Get balanced purities\n preds = np.concatenate(model.results)\n if model.name == \"Binary Classifiers\":\n purities = get_binary_balanced_purity_ranges(\n preds, model.class_labels, 0.1, model.class_counts)[class_name]\n else:\n purities = get_balanced_purity_ranges(\n preds, model.class_labels, 0.1, model.class_counts)[class_name]\n\n # Get completenesses\n comps = get_completeness_ranges(model.class_counts, range_metrics, class_name)\n\n print(\"\\n\\n Model: \" + str(model.name) + \", class: \" + class_name)\n print(\"Completeness\")\n print(comps)\n print(\"Purity\")\n print(purities)\n\n plot_axis(ax, comps, C_BAR_COLOR)\n ax2 = ax.twinx() # instantiate a second axes that shares the same x-axis\n ax2.set_ylim([0, 1])\n plot_axis(ax2, purities, P_BAR_COLOR)\n for axis in ['top', 'bottom', 'left', 'right']:\n ax.spines[axis].set_linewidth(1.5)\n return ax2",
"def plot_betweeness(net, label, outpath):\n _, betweeness_values = networkit_util.get_betweeness(net, label, outpath)\n unique_value, unique_cnt = np.unique(betweeness_values, return_counts=True)\n unique_cumcnt = np.cumsum(unique_cnt) / sum(unique_cnt)\n fig = plt.figure()\n ax = fig.add_subplot(1, 1, 1)\n ax.plot(unique_value, unique_cumcnt, 'b.')\n # ax.set_title('Cumulative distribution of betweeness centrality of nodes')\n ax.set_xlabel('betweeness centrality b')\n ax.set_ylabel('p(x <= b)')\n plt.savefig(outpath + label + \"-betweeness-distribution.eps\")",
"def plot_calibration_curve(est, name, fig_index):\n # Calibrated with isotonic calibration\n isotonic = CalibratedClassifierCV(est, cv=2, method='isotonic')\n\n # Calibrated with sigmoid calibration\n sigmoid = CalibratedClassifierCV(est, cv=2, method='sigmoid')\n\n # Logistic regression with no calibration as baseline\n lr = LogisticRegression(C=1.)\n\n fig = plt.figure(fig_index, figsize=(10, 10))\n ax1 = plt.subplot2grid((3, 1), (0, 0), rowspan=2)\n ax2 = plt.subplot2grid((3, 1), (2, 0))\n\n ax1.plot([0, 1], [0, 1], \"k:\", label=\"Perfectly calibrated\")\n for clf, name in [(lr, 'Logistic'),(est, name),(isotonic, name + ' + Isotonic'),(sigmoid, name + ' + Sigmoid')]:\n #Para cada modelo, entrenamos y predecimos \n clf.fit(X_train, y_train)\n y_pred = clf.predict(X_test)\n if hasattr(clf, \"predict_proba\"):\n prob_pos = clf.predict_proba(X_test)[:, 1]\n else: # use decision function\n prob_pos = clf.decision_function(X_test)\n prob_pos = \\\n (prob_pos - prob_pos.min()) / (prob_pos.max() - prob_pos.min())\n\n clf_score = brier_score_loss(y_test, prob_pos, pos_label=y.max())\n print(\"%s:\" % name)\n print(\"\\tBrier: %1.3f\" % (clf_score))\n print(\"\\tPrecision: %1.3f\" % precision_score(y_test, y_pred))\n print(\"\\tRecall: %1.3f\" % recall_score(y_test, y_pred))\n print(\"\\tF1: %1.3f\\n\" % f1_score(y_test, y_pred))\n\n fraction_of_positives, mean_predicted_value = \\\n calibration_curve(y_test, prob_pos, n_bins=10)\n\n ax1.plot(mean_predicted_value, fraction_of_positives, \"s-\",\n label=\"%s (%1.3f)\" % (name, clf_score))\n\n ax2.hist(prob_pos, range=(0, 1), bins=10, label=name,\n histtype=\"step\", lw=2)\n\n ax1.set_ylabel(\"Fraction of positives\")\n ax1.set_ylim([-0.05, 1.05])\n ax1.legend(loc=\"lower right\")\n ax1.set_title('Calibration plots (reliability curve)')\n\n ax2.set_xlabel(\"Mean predicted value\")\n ax2.set_ylabel(\"Count\")\n ax2.legend(loc=\"upper center\", ncol=2)\n\n plt.tight_layout()",
"def plot_np_reliability(t, r, ci_lb=None, ci_ub=None, ax=None, linestyle='-',\n color='blue', label=None):\n if ax is None:\n ax = plt.gca()\n\n # Plot r against t as step function.\n ax.step(t, r, where='post', color=color,\n label=label, linestyle=linestyle)\n\n # Add confidence intervals if provided.\n if ci_lb is not None:\n ax.step(t, ci_lb, where='post', color=color, alpha=0.5,\n linestyle=linestyle)\n if ci_ub is not None:\n ax.step(t, ci_ub, where='post', color=color, alpha=0.5,\n linestyle=linestyle)\n\n # Add axis-labels.\n ax.set_xlabel(r'$t$')\n ax.set_ylabel(r'$\\widehat R(t)$')\n\n ax.set_xlim(0)\n\n return ax",
"def eval_curve_b(self, sess, task_b_data):\r\n fdict = self._prerun(sess, None, task_b_data)\r\n cost_b, acc_b, acc_b_v = self.monitor_b(\r\n sess,\r\n task_b_data.x_train,\r\n task_b_data.y_train,\r\n task_b_data.x_test,\r\n task_b_data.y_test,\r\n fdict=fdict)\r\n return cost_b, acc_b, acc_b_v",
"def visualize_confidence_level(prediction_proba):\n data = (prediction_proba[0]*100).round(2)\n grad_percentage = pd.DataFrame(data = data,columns = ['Porcentage'],index = ['Est','Int','Int_Est','Rob','Rob_Est','Rob_Int','Rob_Int_Est'])\n ax = grad_percentage.plot(kind='barh', figsize=(7, 4), color='#0067e7', zorder=10, width=0.8)\n ax.legend().set_visible(False)\n ax.set_xlim(xmin=0, xmax=100)\n \n ax.spines['right'].set_visible(False)\n ax.spines['top'].set_visible(False)\n ax.spines['left'].set_visible(True)\n ax.spines['bottom'].set_visible(True)\n\n ax.tick_params(axis=\"both\", which=\"both\", bottom=\"off\", top=\"off\", labelbottom=\"on\", left=\"off\", right=\"off\", labelleft=\"on\")\n \n vals = ax.get_xticks()\n for tick in vals:\n ax.axvline(x=tick, linestyle='dashed', alpha=0.4, color='#eeeeee', zorder=1)\n\n ax.set_xlabel(\" Porcentage(%) Nivel de confianza\", labelpad=2, weight='bold', size=12)\n ax.set_ylabel(\"Victimización\", labelpad=10, weight='bold', size=12)\n ax.set_title('Nivel de confianza de la predicción ', fontdict=None, loc='center', pad=None, weight='bold')\n\n st.pyplot()\n \n return",
"def activation(self):\n def boltzmann(voltage, vhalf, k):\n voltage = list(map(lambda x: float(x), voltage))\n exponent = np.exp(-np.divide(np.subtract(voltage, vhalf), k))\n return (-1/ (exponent + 1))\n\n def find_v_half(current, voltage):\n \"\"\"\n current: array of normalized IV curve\n voltage: array of the test potentials\n \"\"\"\n for i in range(len(voltage)):\n if current.iloc[i] > -0.5:\n continue\n elif current.iloc[i] == -0.5:\n return voltage.iloc[i]\n else:\n volt1 = float(voltage.iloc[i-1])\n volt2 = float(voltage.iloc[i])\n curr1 = float(current.iloc[i-1])\n curr2 = float(current.iloc[i])\n V_half = volt1 + (volt1 - volt2)*(-0.5 - curr1)/(curr1 - curr2)\n return V_half\n\n v_half = []\n for i in range(self.n_cols):\n current = self.screened_data.iloc[:,i]\n #print(current)\n v_half.append(find_v_half(current, self.xaxis))\n print('Median: ', np.median(v_half))\n print('Min: ', np.min(v_half))\n print('Max: ', np.max(v_half))\n return 0\n # We only fit to the first half of the activation curve\n no_of_data_points = len(self.averaged_data)\n for i in range(no_of_data_points):\n if self.averaged_data[i+1] > self.averaged_data[i]:\n break\n \n self.averaged_data = self.averaged_data[0:i+1]\n self.xaxis = self.xaxis[0:i+1]\n\n \n initial_guess = [-10, 10]\n popt, pcov = curve_fit(boltzmann, self.xaxis, self.averaged_data, p0= initial_guess)\n plt.plot(self.xaxis, self.averaged_data, label = 'Average of all models')\n plt.plot(boltzmann(self.xaxis, *initial_guess), label = 'Initial Guess')\n plt.plot(self.xaxis, boltzmann(self.xaxis, *popt), label = 'Best Fit')\n plt.xlabel('Voltage (mV)')\n plt.ylabel('Normalized Current')\n plt.title('Steady State Activation')\n plt.legend()\n plt.savefig('activation_boltzmann_fit.png')\n return popt[0]",
"def ppk_plot(data: (List[int], List[float], pd.Series, np.array),\n upper_control_limit: (int, float), lower_control_limit: (int, float),\n threshold_percent: float = 0.001,\n ax: Axis = None):\n\n data = coerce(data)\n mean = data.mean()\n std = data.std()\n\n if ax is None:\n fig, ax = plt.subplots()\n\n ax.hist(data, density=True, label='data', alpha=0.3)\n x = np.linspace(mean - 4 * std, mean + 4 * std, 100)\n pdf = stats.norm.pdf(x, mean, std)\n ax.plot(x, pdf, label='normal fit', alpha=0.7)\n\n bottom, top = ax.get_ylim()\n\n ax.axvline(mean, linestyle='--')\n ax.text(mean, top * 1.01, s='$\\mu$', ha='center')\n\n ax.axvline(mean + std, alpha=0.6, linestyle='--')\n ax.text(mean + std, top * 1.01, s='$\\sigma$', ha='center')\n\n ax.axvline(mean - std, alpha=0.6, linestyle='--')\n ax.text(mean - std, top * 1.01, s='$-\\sigma$', ha='center')\n\n ax.axvline(mean + 2 * std, alpha=0.4, linestyle='--')\n ax.text(mean + 2 * std, top * 1.01, s='$2\\sigma$', ha='center')\n\n ax.axvline(mean - 2 * std, alpha=0.4, linestyle='--')\n ax.text(mean - 2 * std, top * 1.01, s='-$2\\sigma$', ha='center')\n\n ax.axvline(mean + 3 * std, alpha=0.2, linestyle='--')\n ax.text(mean + 3 * std, top * 1.01, s='$3\\sigma$', ha='center')\n\n ax.axvline(mean - 3 * std, alpha=0.2, linestyle='--')\n ax.text(mean - 3 * std, top * 1.01, s='-$3\\sigma$', ha='center')\n\n ax.fill_between(x, pdf, where=x < lower_control_limit, facecolor='red', alpha=0.5)\n ax.fill_between(x, pdf, where=x > upper_control_limit, facecolor='red', alpha=0.5)\n\n lower_percent = 100.0 * stats.norm.cdf(lower_control_limit, mean, std)\n lower_percent_text = f'{lower_percent:.02f}% < LCL' if lower_percent > threshold_percent else None\n\n higher_percent = 100.0 - 100.0 * stats.norm.cdf(upper_control_limit, mean, std)\n higher_percent_text = f'{higher_percent:.02f}% > UCL' if higher_percent > threshold_percent else None\n\n left, right = ax.get_xlim()\n bottom, top = ax.get_ylim()\n cpk = calc_ppk(data, upper_control_limit=upper_control_limit, lower_control_limit=lower_control_limit)\n\n lower_sigma_level = (mean - lower_control_limit) / std\n if lower_sigma_level < 6.0:\n ax.axvline(lower_control_limit, color='red', alpha=0.25, label='limits')\n ax.text(lower_control_limit, top * 0.95, s=f'$-{lower_sigma_level:.01f}\\sigma$', ha='center')\n else:\n ax.text(left, top * 0.95, s=f'limit > $-6\\sigma$', ha='left')\n\n upper_sigma_level = (upper_control_limit - mean) / std\n if upper_sigma_level < 6.0:\n ax.axvline(upper_control_limit, color='red', alpha=0.25)\n ax.text(upper_control_limit, top * 0.95, s=f'${upper_sigma_level:.01f}\\sigma$', ha='center')\n else:\n ax.text(right, top * 0.95, s=f'limit > $6\\sigma$', ha='right')\n\n strings = [f'Ppk = {cpk:.02f}']\n\n strings.append(f'$\\mu = {mean:.3g}$')\n strings.append(f'$\\sigma = {std:.3g}$')\n\n if lower_percent_text:\n strings.append(lower_percent_text)\n if higher_percent_text:\n strings.append(higher_percent_text)\n\n props = dict(boxstyle='round', facecolor='white', alpha=0.75, edgecolor='grey')\n ax.text(right - (right - left) * 0.05, 0.85 * top, '\\n'.join(strings), bbox=props, ha='right', va='top')\n\n ax.legend(loc='lower right')",
"def plot_roccurves_per_bkg(signals, bkgs, cut_function, cut_values, ax=None):\n # Get a default ax if none is given\n if ax is None:\n import matplotlib.pyplot as plt\n fig = plt.figure(figsize=(8,8))\n ax = fig.gca()\n # Get signal efficieny once\n eff_sig, n_pass_sig, n_total_sig = get_eff(svjflatanalysis.iterate(signals), cut_function, cut_values)\n # Perform some basic plotting setup\n ax.plot([0.0,1.0], [0.0,1.0], linestyle='--', color='xkcd:gray')\n ax.set_xlim(0.0, 1.05)\n ax.set_ylim(0.0, 1.05)\n ax.set_ylabel('Signal eff.', fontsize=DEFAULT_FONTSIZE)\n ax.set_xlabel('Bkg eff.', fontsize=DEFAULT_FONTSIZE)\n ax.legend(fontsize=DEFAULT_FONTSIZE)\n # Then efficiencies per bkg category (ttjets, qcd, ...)\n bkg_categories = list(set([ b.get_category() for b in bkgs ]))\n bkg_categories.sort()\n lines = {}\n for bkg_cat in bkg_categories:\n # Get Datasets that have this category\n bkgs_this_cat = [ b for b in bkgs if b.get_category() == bkg_cat ]\n # Compute efficiency in this category\n eff_bkg, n_pass_bkg, n_total_bkg = get_eff(svjflatanalysis.iterate(bkgs_this_cat), cut_function, cut_values)\n # Draw roccurve for this category\n line = _draw_roccurve(eff_sig, eff_bkg, cut_values, ax)\n line.set_label(bkg_cat)\n # Save this line in a dict for potential outputting/modifying\n lines[bkg_cat] = line\n return ax",
"def bifurcation_diagram(args, Bpbmin, Bpbmax, ylim=(-1, 0.6)):\n\n xs = []\n Bpb_list = np.linspace(Bpbmin, Bpbmax, 100)\n Iext, G, Ein, Eex, eps, a, b, A, Bpb, Bbp, vsl = args\n\n sol, t = calcODE(args, -1.5, -1.5, 0.5, 0.5, 0.5, 0.5, ts=4000, nt=2 ** 25)\n sol = sol[-len(sol) // 2:, :]\n t = t[-len(t) // 2:]\n\n x0 = sol[0, :]\n n = np.array(ode(x0, t[0], *args))\n q, _ = np.linalg.qr(n[:, None], mode='complete')\n\n periods = []\n for Bpb in Bpb_list:\n args = (Iext, G, Ein, Eex, eps, a, b, A, Bpb, Bbp, vsl)\n sol, t = calcODE(args, *sol[-1, :], ts=1000, nt=2 ** 15)\n sol = sol[-len(sol) // 2:, :]\n t = t[-len(t) // 2:]\n\n for i in range(len(sol) - 1):\n x1 = sol[i]\n x2 = sol[i + 1]\n if np.sign(n @ (x2 - x0)) != np.sign(n @ (x1 - x0)):\n c1 = dist(x1, x0, n)\n c2 = dist(x2, x0, n)\n alpha = c2 / (c1 + c2)\n x_new = x1 + alpha * (x2 - x1)\n x = (x_new - x0).dot(q)\n xs.append((Bpb, x[0], x[1], x[2], x[3], x[4], x[5]))\n # if np.linalg.norm(x_new - x0) < 1e-2 and period is None:\n period = t[i] - periods[-1][-1] if len(periods) else 0\n periods.append((Bpb, period, np.linalg.norm(x_new - x0), t[i]))\n\n plt.figure(figsize=(15, 10))\n plt.scatter([i[0] for i in xs], [i[2] for i in xs], s=10)\n plt.xlabel('$B_{pb}$')\n\n # plt.ylim(ylim)\n plt.show()\n\n periods = [i for i in periods if i[1] > 0]\n\n return periods, xs",
"def inactivation(self):\n\n def boltzmann(voltage, vhalf, k):\n\n voltage = list(map(lambda x: float(x), voltage))\n exponent = np.exp(np.divide(np.subtract(voltage, vhalf), k))\n return (1/ (exponent + 1))\n\n def find_v_half(current, voltage):\n \"\"\"\n current: array of normalized IV curve\n voltage: array of the test potentials\n \"\"\"\n for i in range(len(voltage)):\n if current.iloc[i] < 0.5:\n continue\n elif current.iloc[i] == 0.5:\n return voltage.iloc[i]\n else:\n volt1 = float(voltage.iloc[i-1])\n volt2 = float(voltage.iloc[i])\n curr1 = float(current.iloc[i-1])\n curr2 = float(current.iloc[i])\n V_half = volt1 + (volt1 - volt2)*(0.5 - curr1)/(curr1 - curr2)\n return V_half\n\n v_half = []\n for i in range(self.n_cols):\n current = self.screened_data.iloc[:,i]\n #print(current)\n v_half.append(find_v_half(current, self.xaxis))\n print('Median: ', np.median(v_half))\n print('Min: ', np.min(v_half))\n print('Max: ', np.max(v_half))\n return 0\n\n initial_guess = [-42, 7]\n popt, pcov = curve_fit(boltzmann, self.xaxis, self.averaged_data, p0= initial_guess)\n plt.plot(self.xaxis, self.averaged_data, label = 'Average of all models')\n plt.plot(boltzmann(self.xaxis, *initial_guess), label = 'Initial Guess')\n plt.plot(self.xaxis, boltzmann(self.xaxis, *popt), label = 'Best Fit')\n plt.xlabel('Voltage (mV)')\n plt.ylabel('Normalized Current')\n plt.title('Steady State Inactivation')\n plt.legend()\n plt.savefig('inactivation_boltzmann_fit.png')\n return popt[0]",
"def plot_train_distr(xgb_model,X,y,out_dir=\"res/\"):\n\t\n\t#Get the predicted probabilities for both classes (store them seperately)\n\tprobs_oxid = xgb_model.predict_proba(X[y==1])[:,1]\n\tprobs_native = xgb_model.predict_proba(X[y==0])[:,1]\n\t\n\t#Plot density distribution for probailities\n\tpd.Series(probs_oxid).plot(kind=\"density\")\n\tpd.Series(probs_native).plot(kind=\"density\")\n\taxes = plt.gca()\n\taxes.set_xlim([0.0,1.0])\n\tplt.savefig(out_dir+\"density_groups.png\", bbox_inches='tight')\n\tplt.close()\n\t\n\t#Plot density distribution for probailities; zoom in more so the y-axis is readable\n\tpd.Series(probs_oxid).plot(kind=\"density\")\n\tpd.Series(probs_native).plot(kind=\"density\")\n\taxes = plt.gca()\n\taxes.set_xlim([0.0,1.0])\n\taxes.set_ylim([0.0,1.0])\n\tplt.savefig(out_dir+'density_groups_zoomed.png', bbox_inches='tight')\n\tplt.close()\n\n\t#Plot probability distributions in histogram\n\tplt.hist(probs_native,bins=100)\n\tplt.hist(probs_oxid,bins=100)\n\tplt.savefig(out_dir+'hist_groups.png', bbox_inches='tight')\n\tplt.close()\n\t\n\t#Plot probability distributions in histogram; zoom in more so the y-axis is readable\n\tplt.hist(probs_native,bins=100)\n\tplt.hist(probs_oxid,bins=100)\n\taxes = plt.gca()\n\taxes.set_ylim([0.0,1000.0])\n\tplt.savefig(out_dir+'hist_groups_zoomed.png', bbox_inches='tight')\n\tplt.close()",
"def plot_progression(weights, bhs, bvs):\n\tweights_plot = []\n\tfor i in range(40):\n\t\tweights_plot.append(weights[i][0][0])\t# only plots the first value in the matrix every time\n\tplt.plot(weights_plot)\n\n\tplt.show()",
"def plot_calibration_curve(est, name, fig_index, data):\n\n X_train = data[0]\n X_test = data[1]\n y_train = data[2]\n y_test = data[3]\n\n y = np.concatenate([y_train, y_test], axis=0)\n\n # Calibrated with isotonic calibration\n isotonic = CalibratedClassifierCV(est, cv=2, method='isotonic')\n\n # Calibrated with sigmoid calibration\n sigmoid = CalibratedClassifierCV(est, cv=2, method='sigmoid')\n\n # Logistic regression with no calibration as baseline\n lr = LogisticRegression(C=1., solver='lbfgs')\n\n fig = plt.figure(1, figsize=(15, 10))\n ax1 = plt.subplot2grid((4, 6), (0, 0), colspan=2, rowspan=2)\n ax2 = plt.subplot2grid((4, 6), (0, 2), colspan=2, rowspan=2)\n ax3 = plt.subplot2grid((4, 6), (0, 4), colspan=2, rowspan=2)\n ax4 = plt.subplot2grid((4, 6), (2, 0), colspan=6, rowspan=2)\n\n ax1.plot([0, 1], [0, 1], \"k:\", label=\"Perfectly calibrated\")\n for clf, name in [(lr, 'Logistic'),\n (est, name),\n (isotonic, name + ' + Isotonic'),\n (sigmoid, name + ' + Sigmoid')]:\n clf.fit(X_train, y_train)\n y_pred = clf.predict(X_test)\n if hasattr(clf, \"predict_proba\"):\n prob_pos = clf.predict_proba(X_test)[:, 1]\n y_proba = prob_pos.copy()\n else: # use decision function\n prob_pos = clf.decision_function(X_test)\n y_proba = prob_pos.copy()\n prob_pos = \\\n (prob_pos - prob_pos.min()) / (prob_pos.max() - prob_pos.min())\n\n clf_score = brier_score_loss(y_test, prob_pos, pos_label=y.max())\n print(\"%s:\" % name)\n print(\"\\tBrier: %1.3f\" % (clf_score))\n print(\"\\tPrecision: %1.3f\" % precision_score(y_test, y_pred))\n print(\"\\tRecall: %1.3f\" % recall_score(y_test, y_pred))\n print(\"\\tF1: %1.3f\" % f1_score(y_test, y_pred))\n print(\"\\tAve. Precision Score: %1.3f\\n\" % \\\n average_precision_score(y_test, y_proba))\n\n fraction_of_positives, mean_predicted_value = \\\n calibration_curve(y_test, prob_pos, n_bins=10)\n\n ax1.plot(mean_predicted_value, fraction_of_positives, \"s-\",\n label=\"%s (%1.3f)\" % (name, clf_score))\n\n fpr, tpr, thresholds = roc_curve(y_test, y_proba, drop_intermediate=False)\n roc_auc = roc_auc_score(y_test, y_proba)\n ax2.plot(fpr, tpr, ls='-', label=\"%s (%1.3f)\" % (name, roc_auc))\n\n precision, recall, _ = precision_recall_curve(y_test, y_proba)\n ax3.plot(recall, precision)\n\n ax4.hist(prob_pos, range=(0, 1), bins=10,\n label='%s' % name, histtype=\"step\", lw=2)\n\n ax1.set_xlabel(\"Score\", fontsize=14)\n ax1.set_ylabel(\"Fraction of positives\", fontsize=14)\n ax1.set_ylim([-0.05, 1.05])\n ax1.legend(loc=\"lower right\")\n ax1.set_title('Calibration plots (reliability curve)', fontsize=16)\n\n ax2.set_xlabel(\"False Positive Rate\", fontsize=14)\n ax2.set_ylabel(\"True Positive Rate\", fontsize=14)\n ax2.set_ylim([-0.05, 1.05])\n ax2.legend(loc=\"lower right\")\n ax2.set_title('ROC Curve', fontsize=16)\n\n ax3.set_xlabel(\"Recall\", fontsize=14)\n ax3.set_ylabel(\"Precision\", fontsize=14)\n ax3.set_ylim([-0.05, 1.05])\n ax3.legend(loc=\"lower center\")\n ax3.set_title('Precision-Recall Curve', fontsize=16)\n\n ax4.set_xlabel(\"Mean predicted value\", fontsize=14)\n ax4.set_ylabel(\"Count\", fontsize=14)\n ax4.legend(loc=\"upper center\")\n ax4.set_title('Classification Result', fontsize=16)\n\n plt.tight_layout()\n\n plt.show()\n\n return",
"def plot_for_scaling_check(bolo_name):\n\n\n pop_path = \"../Analyse_\" + bolo_name + \"/Populations/Pop_for_scaling/\"\n\n #Load the estimator\n d_est = BDT_fh.open_estimator_file(bolo_name)\n\n #Best estimator for heat: coefficients\n coeff_EC1, coeff_EC2 = float(d_est[\"HEAT\"][:5]), 1 - float(d_est[\"HEAT\"][:5])\n coeff_EIB, coeff_EID = float(d_est[\"FID\"][:5]), 1-float(d_est[\"FID\"][:5])\n\n #Open event files\n data_types = {\"names\": (\"EC1\", \"EC2\", \"EIA\", \"EIB\", \"EIC\", \"EID\"), \"formats\": (\"f\", \"f\", \"f\", \"f\", \"f\", \"f\")}\n\n arr_heatonly = np.loadtxt(pop_path + bolo_name + \"_heatonly_KTH_cut_full_info.txt\", delimiter=\",\", dtype=data_types)\n arr_all = np.loadtxt(pop_path + bolo_name + \"_all_KTH_cut_full_info.txt\", delimiter=\",\", dtype=data_types)\n arr_FidGamma = np.loadtxt(pop_path + bolo_name + \"_FidGamma_KTH_cut_full_info.txt\", delimiter=\",\", dtype=data_types)\n arr_S1Gamma = np.loadtxt(pop_path + bolo_name + \"_S1Gamma_KTH_cut_full_info.txt\", delimiter=\",\", dtype=data_types)\n arr_S2Gamma = np.loadtxt(pop_path + bolo_name + \"_S2Gamma_KTH_cut_full_info.txt\", delimiter=\",\", dtype=data_types)\n arr_S1Beta = np.loadtxt(pop_path + bolo_name + \"_S1Beta_KTH_cut_full_info.txt\", delimiter=\",\", dtype=data_types)\n arr_S2Beta = np.loadtxt(pop_path + bolo_name + \"_S2Beta_KTH_cut_full_info.txt\", delimiter=\",\", dtype=data_types)\n arr_S1Pb = np.loadtxt(pop_path + bolo_name + \"_S1Pb_KTH_cut_full_info.txt\", delimiter=\",\", dtype=data_types)\n arr_S2Pb = np.loadtxt(pop_path + bolo_name + \"_S2Pb_KTH_cut_full_info.txt\", delimiter=\",\", dtype=data_types)\n\n arr_EI_heatonly, arr_EI_all = coeff_EIB*arr_heatonly[\"EIB\"] + coeff_EID*arr_heatonly[\"EID\"], coeff_EIB*arr_all[\"EIB\"] + coeff_EID*arr_all[\"EID\"]\n arr_EC_heatonly, arr_EC_all = coeff_EC1*arr_heatonly[\"EC1\"] + coeff_EC2*arr_heatonly[\"EC2\"], coeff_EC1*arr_all[\"EC1\"] + coeff_EC2*arr_all[\"EC2\"]\n arr_EI_FidGamma, arr_EC_FidGamma = coeff_EIB*arr_FidGamma[\"EIB\"] + coeff_EID*arr_FidGamma[\"EID\"], coeff_EC1*arr_FidGamma[\"EC1\"] + coeff_EC2*arr_FidGamma[\"EC2\"]\n arr_EI_S1Gamma, arr_EI_S2Gamma = coeff_EIB*arr_S1Gamma[\"EIB\"] + coeff_EID*arr_S1Gamma[\"EID\"], coeff_EIB*arr_S2Gamma[\"EIB\"] + coeff_EID*arr_S2Gamma[\"EID\"]\n arr_EC_S1Gamma, arr_EC_S2Gamma = coeff_EC1*arr_S1Gamma[\"EC1\"] + coeff_EC2*arr_S1Gamma[\"EC2\"], coeff_EC1*arr_S2Gamma[\"EC1\"] + coeff_EC2*arr_S2Gamma[\"EC2\"]\n arr_EI_S1Beta, arr_EI_S2Beta = coeff_EIB*arr_S1Beta[\"EIB\"] + coeff_EID*arr_S1Beta[\"EID\"], coeff_EIB*arr_S2Beta[\"EIB\"] + coeff_EID*arr_S2Beta[\"EID\"]\n arr_EC_S1Beta, arr_EC_S2Beta = coeff_EC1*arr_S1Beta[\"EC1\"] + coeff_EC2*arr_S1Beta[\"EC2\"], coeff_EC1*arr_S2Beta[\"EC1\"] + coeff_EC2*arr_S2Beta[\"EC2\"]\n arr_EI_S1Pb, arr_EI_S2Pb = coeff_EIB*arr_S1Pb[\"EIB\"] + coeff_EID*arr_S1Pb[\"EID\"], coeff_EIB*arr_S2Pb[\"EIB\"] + coeff_EID*arr_S2Pb[\"EID\"]\n arr_EC_S1Pb, arr_EC_S2Pb = coeff_EC1*arr_S1Pb[\"EC1\"] + coeff_EC2*arr_S1Pb[\"EC2\"], coeff_EC1*arr_S2Pb[\"EC1\"] + coeff_EC2*arr_S2Pb[\"EC2\"]\n\n lS1Beta, lS2Beta, lS1Pb, lS2Pb = np.where(arr_EC_S1Beta<15), np.where(arr_EC_S2Beta<15), np.where(arr_EC_S1Pb<15), np.where(arr_EC_S2Pb<15)\n lS1Gamma, lS2Gamma, lFidGamma = np.where(arr_EC_S1Gamma<15), np.where(arr_EC_S2Gamma<15), np.where(arr_EC_FidGamma<15)\n lheatonly, lall = np.where(arr_EC_heatonly<15), np.where(arr_EC_all<15)\n\n arr_EI_heatonly, arr_EC_heatonly = arr_EI_heatonly[lheatonly], arr_EC_heatonly[lheatonly]\n arr_EI_all, arr_EC_all = arr_EI_all[lall], arr_EC_all[lall]\n arr_EI_FidGamma, arr_EC_FidGamma = arr_EI_FidGamma[lFidGamma], arr_EC_FidGamma[lFidGamma]\n arr_EI_S1Gamma, arr_EC_S1Gamma = arr_EI_S1Gamma[lS1Gamma], arr_EC_S1Gamma[lS1Gamma]\n arr_EI_S2Gamma, arr_EC_S2Gamma = arr_EI_S2Gamma[lS2Gamma], arr_EC_S2Gamma[lS2Gamma]\n arr_EI_S1Beta, arr_EC_S1Beta = arr_EI_S1Beta[lS1Beta], arr_EC_S1Beta[lS1Beta]\n arr_EI_S2Beta, arr_EC_S2Beta = arr_EI_S2Beta[lS2Beta], arr_EC_S2Beta[lS2Beta]\n arr_EI_S1Pb, arr_EC_S1Pb = arr_EI_S1Pb[lS1Pb], arr_EC_S1Pb[lS1Pb]\n arr_EI_S2Pb, arr_EC_S2Pb = arr_EI_S2Pb[lS2Pb], arr_EC_S2Pb[lS2Pb]\n\n arr_EI_all, arr_EC_all = np.array(arr_EI_all).astype(float), np.array(arr_EC_all).astype(float)\n arr_EI_heatonly, arr_EC_heatonly = np.array(arr_EI_heatonly).astype(float), np.array(arr_EC_heatonly).astype(float)\n arr_EI_FidGamma, arr_EC_FidGamma = np.array(arr_EI_FidGamma).astype(float), np.array(arr_EC_FidGamma).astype(float)\n arr_EI_S1Gamma, arr_EC_S1Gamma = np.array(arr_EI_S1Gamma).astype(float), np.array(arr_EC_S1Gamma).astype(float)\n arr_EI_S2Gamma, arr_EC_S2Gamma = np.array(arr_EI_S2Gamma).astype(float), np.array(arr_EC_S2Gamma).astype(float) \n arr_EI_S1Beta, arr_EC_S1Beta = np.array(arr_EI_S1Beta).astype(float), np.array(arr_EC_S1Beta).astype(float)\n arr_EI_S2Beta, arr_EC_S2Beta = np.array(arr_EI_S2Beta).astype(float), np.array(arr_EC_S2Beta).astype(float)\n arr_EI_S1Pb, arr_EC_S1Pb = np.array(arr_EI_S1Pb).astype(float), np.array(arr_EC_S1Pb).astype(float)\n arr_EI_S2Pb, arr_EC_S2Pb = np.array(arr_EI_S2Pb).astype(float), np.array(arr_EC_S2Pb).astype(float)\n\n\n gr_heatonly = TGraph(len(arr_EI_heatonly), arr_EC_heatonly, arr_EI_heatonly)\n gr_FidGamma, gr_all = TGraph(len(arr_EI_FidGamma), arr_EC_FidGamma, arr_EI_FidGamma), TGraph(len(arr_EI_all), arr_EC_all, arr_EI_all)\n gr_S1Gamma, gr_S2Gamma = TGraph(len(arr_EI_S1Gamma), arr_EC_S1Gamma, arr_EI_S1Gamma), TGraph(len(arr_EI_S2Gamma), arr_EC_S2Gamma, arr_EI_S2Gamma)\n gr_S1Beta, gr_S2Beta = TGraph(len(arr_EI_S1Beta), arr_EC_S1Beta, arr_EI_S1Beta), TGraph(len(arr_EI_S2Beta), arr_EC_S2Beta, arr_EI_S2Beta)\n gr_S1Pb, gr_S2Pb = TGraph(len(arr_EI_S1Pb), arr_EC_S1Pb, arr_EI_S1Pb), TGraph(len(arr_EI_S2Pb), arr_EC_S2Pb, arr_EI_S2Pb)\n\n PyRPl.process_TGraph(gr_all, X_title = \"Heat\", Y_title = \"Ion\", color=kRed, marker_style = 20, marker_size = 0.1)\n PyRPl.process_TGraph(gr_FidGamma, X_title = \"Heat\", Y_title = \"Ion\", color=kBlack), PyRPl.process_TGraph(gr_heatonly, X_title = \"Heat\", Y_title = \"Ion\", color=kBlack)\n PyRPl.process_TGraph(gr_S1Gamma, X_title = \"Heat\", Y_title = \"Ion\", color=kBlack), PyRPl.process_TGraph(gr_S2Gamma, X_title = \"Heat\", Y_title = \"Ion\", color=kBlack)\n PyRPl.process_TGraph(gr_S1Beta, X_title = \"Heat\", Y_title = \"Ion\", color=kBlack), PyRPl.process_TGraph(gr_S2Beta, X_title = \"Heat\", Y_title = \"Ion\", color=kBlack)\n PyRPl.process_TGraph(gr_S1Pb, X_title = \"Heat\", Y_title = \"Ion\", color=kRed), PyRPl.process_TGraph(gr_S2Pb, X_title = \"Heat\", Y_title = \"Ion\", color=kBlack)\n\n list_gr = [gr_all, gr_FidGamma, gr_S1Gamma, gr_S2Gamma, gr_S1Beta, gr_S2Beta, gr_S1Pb, gr_S2Pb, gr_heatonly]\n list_pts = [gr.GetN() for gr in list_gr[1:]]\n print gr_all.GetN(), sum(list_pts)\n h = TH2F(\"h\", \"h\", 100, -5, 15, 100, -5, 15)\n PyRPl.process_TH2(h, X_title = \"Heat\", Y_title = \"Ion\")\n h.Draw()\n for gr in list_gr:\n gr.Draw(\"*same\")\n\n raw_input()\n\n # arr_Q_S1Beta = arr_EI_S1Beta/((1+8./3)*arr_EC_S1Beta - arr_EI_S1Beta*5.5/3)\n # arr_Q_S2Beta = arr_EI_S1Beta/((1+8./3)*arr_EC_S1Beta - arr_EI_S1Beta*5.5/3)\n # arr_Q_S1Pb = arr_EI_S1Pb/((1+8./3)*arr_EC_S1Pb - arr_EI_S1Pb*5.5/3)\n # arr_Q_S2Pb = arr_EI_S2Pb/((1+8./3)*arr_EC_S2Pb - arr_EI_S2Pb*5.5/3)\n \n # gr_QS1Beta, gr_QS2Beta = TGraph(len(arr_Q_S1Beta), arr_EC_S1Beta, arr_Q_S1Beta), TGraph(len(arr_Q_S2Beta), arr_EC_S2Beta, arr_Q_S2Beta)\n # gr_QS1Pb, gr_QS2Pb = TGraph(len(arr_Q_S1Pb), arr_EC_S1Pb, arr_Q_S1Pb), TGraph(len(arr_Q_S2Pb), arr_EC_S2Pb, arr_Q_S2Pb)\n\n\n # PyRPl.process_TGraph(gr_QS1Beta, X_title = \"Heat\", Y_title = \"Q\", color=kOrange-3), PyRPl.process_TGraph(gr_QS2Beta, X_title = \"Heat\", Y_title = \"Q\", color=kBlue)\n # PyRPl.process_TGraph(gr_QS1Pb, X_title = \"Heat\", Y_title = \"Q\", color=kRed), PyRPl.process_TGraph(gr_QS2Pb, X_title = \"Heat\", Y_title = \"Q\", color=kGreen+2)",
"def plot_precision_recall_curve(estimator, X, y, *, sample_weight=..., response_method=..., name=..., ax=..., pos_label=..., **kwargs):\n ...",
"def plot_basins(f, Df, zeros, domain, res=1000, iters=15):\n raise NotImplementedError(\"Problem 7 Incomplete\")",
"def Make_Binned_ANN_ROC_Curves(title,Signal_title,Background_title,bins,log=False):\n #hsv = plt.get_cmap('hsv')\n #color = hsv(np.linspace(0,1.0,len(bins)-1))\n #color = ['b', 'g', 'r', 'c', 'm', 'y']\n if len(bins)<=6:\n color = ['red','green','blue','orange','brown']\n else:\n color = ['deepskyblue','rosybrown','olivedrab','royalblue','firebrick','chartreuse','navy','red','darkorchid','lightseagreen','mediumvioletred','blue']\n nbins = 60\n\tdis_string = \"ANN_\"\n\n Signal_file = rt.TFile(\"Thesis_Plots/root_files/{}_ANN_histograms.root\".format(Signal_title),\"READ\")\n Background_file = rt.TFile(\"Thesis_Plots/root_files/{}_ANN_histograms.root\".format(Background_title),\"READ\")\n\n plt.figure(\"ROC\")\n plt.clf()\n\n for bin_ in range(len(bins)-1):\n Dis_Signal_Eff = FCM.Get_ROC_Efficiencies(Signal_file.Get(dis_string+str(bins[bin_])+\"_\"+str(bins[bin_+1])),(0,1),nbins,0)\n Dis_BG_Eff = FCM.Get_ROC_Efficiencies(Background_file.Get(dis_string+str(bins[bin_])+\"_\"+str(bins[bin_+1])),(0,1),nbins,0)\n CSV_Signal_Eff = FCM.Get_ROC_Efficiencies(Signal_file.Get(\"CSV_\"+str(bins[bin_])+\"_\"+str(bins[bin_+1])),(0,1),ratio_bins,0)\n CSV_BG_Eff = FCM.Get_ROC_Efficiencies(Background_file.Get(\"CSV_\"+str(bins[bin_])+\"_\"+str(bins[bin_+1])),(0,1),ratio_bins,0)\n if log:\n plt.semilogy(Dis_Signal_Eff,Dis_BG_Eff, color = color[bin_], linestyle = '-',label=str(bins[bin_])+\"_\"+str(bins[bin_+1]))\n plt.semilogy(CSV_Signal_Eff,CSV_BG_Eff, color = color[bin_],linestyle = '--',)\n\n else:\n plt.plot(Dis_Signal_Eff,1-Dis_BG_Eff, color = color[bin_], linestyle = '-',label=str(bins[bin_])+\"_\"+str(bins[bin_+1]))\n plt.plot(CSV_Signal_Eff,1-CSV_BG_Eff, color = color[bin_],linestyle = '--',)\n\n if log:\n\t\tif diff:\n\t\t\tplt.semilogy([0,0],[0,0],'k-',label = 'L4-L1')\n\t\telse:\n \tplt.semilogy([0,0],[0,0],'k-',label = 'L4/L1')\n plt.semilogy([0,0],[0,0],'k-.',label = 'CSV')\n plt.semilogy([0,1],[0.1,0.1],'k:')\n plt.xlabel(r\"$\\epsilon$_signal\")\n plt.ylabel(r\"$\\epsilon$_background\")\n plt.legend(loc=4)\n else:\n\t\tif diff:\n\t\t\tplt.plot([0,0],[0,0],'k-',label = 'L4-L1')\n\t\telse:\n \tplt.plot([0,0],[0,0],'k-',label = 'L4/L1')\n plt.plot([0,0],[0,0],'k-.',label = 'CSV')\n #plt.plot([0,1],[0.9,0.9],'k:',label=\"10% mistag\")\n plt.plot([0,1],[0.9,0.9],'k:')\n plt.xlabel(r\"$\\epsilon$_signal\")\n plt.ylabel(r\"1-$\\epsilon$_background\")\n plt.legend(loc=3)\n #plt.title(title+\"_ROC-Curves\")\n\n plt.savefig(\"Thesis_Plots/{}_ROC_Curves.png\".format(title))\n print \"saved as Thesis_Plots/{}_ROC_Curves.png\".format(title)",
"def plot(t): \n assert isinstance(t, int), \"'t' argument should be an integer.\"\n assert t > 0, \"'t' argument should be a positive integer.\" \n # Initialize arrays with zeros to store mean cumulative rewards upto t \n # rounds for each of the three implemented bandit algorithms\n EpsGreedy_rewards = np.zeros(t)\n UCB_rewards = np.zeros(t)\n LinUCB_rewards = np.zeros(t)\n # For each round, store the mean cumulative rewards upto that round\n for i in range(1,t):\n EpsGreedy_rewards[i] = np.sum(results_EpsGreedy[0:i]) / t\n UCB_rewards[i] = np.sum(results_UCB[0:i]) / t\n LinUCB_rewards[i] = np.sum(results_LinUCB[0:i]) / t\n # Plot running per round cumulative reward\n plt.plot(range(0,t), EpsGreedy_rewards, color='b', label='e-Greedy')\n plt.plot(range(0,t), UCB_rewards, color='g', label='UCB')\n plt.plot(range(0,t), LinUCB_rewards, color='orange', label='LinUCB')\n plt.xlabel('Round')\n plt.ylabel('Mean Cumulative Reward')\n plt.title('Running Per Round Cumulative Reward')\n plt.legend()\n plt.show()",
"def figure_10_12_b():\n xs = np.arange(-6,6,0.1)\n plt.plot(xs,sigmoid(xs))\n x=2.5\n plt.scatter(x,sigmoid(x))\n plt.plot(xs,logistic_lower_bound(xs,x))\n plt.show()",
"def plot_bias_smooth(bias, bias_smooth, comp_figfile, hist_figfile):\n h, w = bias.shape\n # calculate the residual between bias and smoothed bias data\n bias_res = bias - bias_smooth\n\n fig1 = plt.figure(figsize=(12,4), dpi=150)\n ax1 = fig1.add_axes([0.055, 0.12, 0.25, 0.75])\n ax2 = fig1.add_axes([0.355, 0.12, 0.25, 0.75])\n ax3 = fig1.add_axes([0.655, 0.12, 0.25, 0.75])\n mean = bias.mean(dtype=np.float64)\n std = bias.std(dtype=np.float64, ddof=1)\n vmin = mean - 2.*std\n vmax = mean + 2.*std\n cax1 = ax1.imshow(bias, vmin=vmin, vmax=vmax, cmap='gray')\n cax2 = ax2.imshow(bias_smooth, vmin=vmin, vmax=vmax, cmap='gray')\n cax3 = ax3.imshow(bias_res, vmin=vmin, vmax=vmax, cmap='gray')\n cbar_ax = fig1.add_axes([0.925, 0.12, 0.02, 0.75])\n cbar = fig1.colorbar(cax1, cax=cbar_ax)\n ax1.set_title('bias')\n ax2.set_title('bias_smooth')\n ax3.set_title('bias - bias_smooth')\n for ax in [ax1,ax2,ax3]:\n ax.set_xlim(0, bias.shape[1]-1)\n ax.set_ylim(bias.shape[1]-1, 0)\n ax.set_xlabel('X', fontsize=11)\n for tick in ax.xaxis.get_major_ticks():\n tick.label1.set_fontsize(10)\n for tick in ax.yaxis.get_major_ticks():\n tick.label1.set_fontsize(10)\n # only show y label in the left panel\n ax1.set_ylabel('Y',fontsize=11)\n \n # plot the histogram of smoothed bias\n # prepare the bin list\n bins = np.linspace(-4, 4, 40+1)\n \n # prepare the gaussian fitting and error function\n fitfunc = lambda p,x:p[0]*np.exp(-0.5*(x-p[1])**2/p[2]**2)\n errfunc = lambda p,x,y: y-fitfunc(p,x)\n \n # create figure\n fig2 = plt.figure(figsize=(8,6), dpi=150)\n for i, j in [(i, j) for i in range(3) for j in range(3)]:\n ax = fig2.add_axes([0.1+j*0.3, 0.7-i*0.3, 0.27, 0.27])\n \n labels = 'abcdefghi'\n alpha = 0.7\n # plot both bias and smoothed bias\n for idata,data in enumerate([bias,bias_res]):\n message = ['Parameters for gaussian fitting of the histograms',\n 'y, x, A, center, sigma']\n for iy, ix in [(iy, ix) for iy in range(3) for ix in range(3)]:\n yc = iy*(h//4) + h//4\n xc = ix*(w//4) + w//4\n x1, x2 = xc-200, xc+200\n y1, y2 = yc-200, yc+200\n ax1.plot([x1,x2], [y1,y1], 'm-', alpha=alpha)\n ax1.plot([x1,x2], [y2,y2], 'm-', alpha=alpha)\n ax1.plot([x1,x1], [y1,y2], 'm-', alpha=alpha)\n ax1.plot([x2,x2], [y1,y2], 'm-', alpha=alpha)\n ax3.plot([x1,x2], [y1,y1], 'c-', alpha=alpha)\n ax3.plot([x1,x2], [y2,y2], 'c-', alpha=alpha)\n ax3.plot([x1,x1], [y1,y2], 'c-', alpha=alpha)\n ax3.plot([x2,x2], [y1,y2], 'c-', alpha=alpha)\n ax1.text(xc-50,yc-20,'(%s)'%labels[iy*3+ix],color='m')\n ax3.text(xc-50,yc-20,'(%s)'%labels[iy*3+ix],color='c')\n data_cut = data[y1:y2,x1:x2]\n y,_ = np.histogram(data_cut, bins=bins)\n x = (np.roll(bins,1) + bins)/2\n x = x[1:]\n # use least square minimization function in scipy\n p1,succ = opt.leastsq(errfunc,[y.max(),0.,1.],args=(x,y))\n ax = fig2.get_axes()[iy*3+ix]\n color1 = ('r', 'b')[idata]\n color2 = ('m', 'c')[idata]\n # plot the histogram\n ax.bar(x, y, align='center', color=color1, width=0.2, alpha=0.5)\n # plot the gaussian fitting of histogram\n xnew = np.linspace(x[0], x[-1], 201)\n ax.plot(xnew, fitfunc(p1, xnew), color2+'-', lw=2)\n ax.set_xlim(-4, 4)\n x1,x2 = ax.get_xlim()\n y1,y2 = ax.get_ylim()\n message.append('%4d %4d %+10.8e %+10.8e %+6.3f'%(\n yc, xc, p1[0], p1[1], p1[2]))\n \n # write the fitting parameters into running log\n logger.info((os.linesep+' ').join(message))\n \n # find maximum y in different axes\n max_y = 0\n for iax, ax in enumerate(fig2.get_axes()):\n y1, y2 = ax.get_ylim()\n if y2 > max_y:\n max_y = y2\n \n # set y range for all axes\n for iax, ax in enumerate(fig2.get_axes()):\n x1, x2 = ax.get_xlim()\n ax.text(0.9*x1+0.1*x2, 0.2*y1+0.8*y2, '(%s)'%labels[iax],\n fontsize=12)\n ax.set_ylim(0, max_y)\n \n for tick in ax.xaxis.get_major_ticks():\n tick.label1.set_fontsize(12)\n for tick in ax.yaxis.get_major_ticks():\n tick.label1.set_fontsize(12)\n \n if iax in [0, 3, 6]:\n ax.set_ylabel('$N$', fontsize=11)\n else:\n ax.set_yticklabels([])\n if iax in [6, 7, 8]:\n ax.set_xlabel('Counts', fontsize=11)\n\n for tick in ax.xaxis.get_major_ticks():\n tick.label1.set_fontsize(9)\n for tick in ax.yaxis.get_major_ticks():\n tick.label1.set_fontsize(9)\n\n # save figures\n fig1.savefig(comp_figfile)\n fig2.savefig(hist_figfile)\n plt.close(fig1)\n plt.close(fig2)",
"def fit(xdata, ydata, cap=1, extrapolrange=10, ax=None, fig_title=None):\n if type(xdata) == list:\n xdata = np.array(xdata)\n if type(ydata) == list:\n ydata = np.array(ydata)\n\n # Cap on GR values\n # ----------------\n if cap > 0:\n ydata = np.array([np.min((yd, cap)) for yd in ydata])\n\n \n ge50_low = np.max((np.min(xdata) * 1e-4, 1e-7))\n ge50_high = np.min((np.max(xdata) * 1e2, 1e2))\n lower_bounds = [-.05, -np.log10(1), .025,\n -1, -np.log10(ge50_high), 0.025]\n upper_bounds = [1, -np.log10(ge50_low), 5,\n .5, -np.log10(0.3), 10]\n\n priors = [.1, -np.log10(np.median(xdata)), 2,\n -0.1, -np.log10(1), 2]\n\n cmin = np.log10(np.min(xdata)/extrapolrange)\n cmax = np.log10(np.max(xdata) * extrapolrange)\n xc = 10 ** (np.arange(cmin, cmax, 0.05))\n\n # Compute Biphasic fit\n # --------------------\n popt_bp, pcov_bp = curve_fit(biphasic_fit_function, xdata, ydata,\n bounds=(lower_bounds, upper_bounds),\n p0=priors)\n yfit_bp = biphasic_fit_function(xc, *popt_bp)\n #popt_bp[1] = 10 ** -popt_bp[1]\n #popt_bp[4] = 10 ** -popt_bp[4]\n \n # Compute Sigmoidal fit 1\n # ------------------------\n popt_sig1, pcov_sig1 = curve_fit(sigmoidal_fit_function, xdata, ydata,\n bounds=(lower_bounds[:3], upper_bounds[:3]),\n p0=priors[:3])\n sig1_rsquared = get_rsquare(sigmoidal_fit_function(xdata, *popt_sig1), ydata)\n yfit_sig1 = sigmoidal_fit_function(xc, *popt_sig1)\n popt_sig1[1] = 10 ** -popt_sig1[1]\n\n # Compute Sigmoidal fit 2\n # ------------------------\n popt_sig2, pcov_sig2 = curve_fit(sigmoidal_fit_function, xdata, ydata,\n bounds=(lower_bounds[3:], upper_bounds[3:]),\n p0=priors[3:])\n sig2_rsquared = get_rsquare(sigmoidal_fit_function(xdata, *popt_sig2), ydata)\n yfit_sig2 = sigmoidal_fit_function(xc, *popt_sig2)\n popt_sig2[1] = 10 ** -popt_sig2[1]\n \n if sig1_rsquared > sig2_rsquared:\n print('1st phase sigmoidal fit is the better of the 2 sigmoidal fits ')\n best_sig_fit = yfit_sig1\n sigmoidal_params = np.array(list(popt_sig1)+[1, -np.inf, .01])\n else:\n best_sig_fit = yfit_sig2\n print('2nd phase sigmoidal fit is the better of the 2 sigmoidal fits')\n sigmoidal_params = np.array([1, -np.inf, .01] + list(popt_sig2))\n\n # Plot data, biphasic and best sigmoidal fits\n # -------------------------------------------\n if ax is not None:\n ax.semilogx(xdata, ydata, 'ob', label='Measured GR value') \n ax.semilogx(xc, yfit_bp, 'lightblue', label='Biphasic fit')\n ax.semilogx(xc, best_sig_fit, '-k', label='Best sigmoidal fit')\n ax.set_ylim((-0.5, 1))\n xlim = (10 ** cmin, 10 ** cmax)\n ax.set_xlim(xlim)\n ax.plot(xlim, [0, 0], '--k')\n ax.set_title(fig_title)\n \n return yfit_bp, popt_bp, best_sig_fit, sigmoidal_params",
"def plot_stability_function(self,bounds=[-20,1]):\n import matplotlib.pyplot as plt\n p,q=self.stability_function()\n xx=np.arange(bounds[0], bounds[1], 0.01)\n yy=p(xx)/q(xx)\n fig, = plt.plot(xx,yy)\n plt.draw()",
"def pr_curve(model, X_train, y_train, X_test, y_test, train=True):\n from sklearn.metrics import precision_recall_curve\n if train==True:\n ypredTrain = model.predict(X_train) \n precisions, recalls, thresholds = precision_recall_curve(y_train, ypredTrain)\n plt.plot(precisions, recalls, linewidth=3, color='r', linestyle='-')\n plt.rc('xtick', labelsize=10) \n plt.rc('ytick', labelsize=10) \n plt.xlabel(\"Precision\", size=12)\n plt.ylabel(\"Recall\", size=12)\n plt.grid()\n plt.rcParams['figure.facecolor'] = '#F2F3F4' \n plt.rcParams['axes.facecolor'] = '#F2F3F4' \n plt.title(\"PR Curve: Precision/Recall Trade-off\\n\\n(Train)\\n\", size=14) \n plt.show()\n elif train==False:\n ypredTest = model.predict(X_test)\n precisions, recalls, thresholds = precision_recall_curve(y_test, ypredTest)\n plt.plot(precisions, recalls, linewidth=3, color='b', linestyle='-')\n plt.rc('xtick', labelsize=10) \n plt.rc('ytick', labelsize=10) \n plt.xlabel(\"Precision\", size=12)\n plt.ylabel(\"Recall\", size=12)\n plt.grid()\n plt.rcParams['figure.facecolor'] = '#F2F3F4' \n plt.rcParams['axes.facecolor'] = '#F2F3F4'\n plt.title(\"PR Curve: Precision/Recall Trade-off\\n\\n(Test)\\n\", size=14)\n plt.show()",
"def plot_proba_function(clf, ax=None):\n fn = lambda x: clf.predict_proba(x)[0][0]\n plot_decision_function(fn, [0, 0.5, 1], ax)",
"def plotprice(self):\n plt.figure()\n plt.hist( self.pricetree[-1,:] )\n plt.title(\"price Distribution\") \n plt.show()",
"def prob4():\n domain = np.linspace(-1, 1, 400)\n n_array = [2**i for i in range(2,9)]\n f = lambda x: 1 / (1 + 25*x**2)\n plt.ion()\n poly_error = []\n cheby_error = []\n for n in n_array:\n x = np.linspace(-1, 1, n)\n poly = BarycentricInterpolator(x)\n poly.set_yi(f(x))\n poly_error.append(la.norm(f(domain) - poly(domain), ord=np.inf))\n y = np.array([(1/2)*(2*np.cos(j*np.pi/n)) for j in range(n+1)])\n cheby = BarycentricInterpolator(y)\n cheby.set_yi(f(y))\n cheby_error.append(la.norm(f(domain) - cheby(domain), ord=np.inf))\n plt.loglog(n_array, poly_error, label=\"equally spaced points\", basex=2)\n plt.loglog(n_array, cheby_error, label=\"Chebyshev extremal points\", basex=2)\n plt.legend()\n plt.show()",
"def plot_roccurve(signals, bkgs, cut_function, cut_values, ax):\n eff_sig, eff_bkg, n_pass_sig, n_pass_bkg, n_total_sig, n_total_bkg = roccurve(signals, bkgs, cut_function, cut_values)\n return _draw_roccurve(eff_sig, eff_bkg, cut_values, ax)",
"def plot_ook_ber(snr_min=0, snr_max=15, show_coherent=False):\n # Compute values\n SNR_MIN = snr_min\n SNR_MAX = snr_max\n Eb_No_dB = np.linspace(SNR_MIN, SNR_MAX, 10000)\n Eb_No = 10**(Eb_No_dB / 10.0)\n sigma = 0.1\n a = 4 * Eb_No\n b_th = sigma * np.sqrt(2 + Eb_No)\n sigma = b_th / np.sqrt(2 + Eb_No)\n b = (b_th / sigma) * (b_th / sigma)\n pes = np.exp(-b_th * b_th / (2 * sigma * sigma))\n pem = 1 - (ncx2.sf(x=b, df=2, nc=a))\n ook_non_coherent = 0.5 * (pem + pes)\n ook_coherent = 0.5 * erfc(np.sqrt(Eb_No / 2))\n # Generate plot\n init()\n fig, ax = plt.subplots(1, 1, figsize=(FIG_WIDTH, FIG_HEIGHT), dpi=DPI)\n xlabel = \"$E_b/N_o$ (dB)\"\n ylabel = \"BER\"\n labels = [\"Noncoherent\", \"Coherent\"]\n sns.lineplot(x=Eb_No_dB, y=ook_non_coherent, lw=0.75, markers=False,\n ax=ax, ci=None, color=colors[0], label=labels[0])\n if show_coherent:\n sns.lineplot(x=Eb_No_dB, y=ook_coherent, lw=0.75, markers=False,\n ax=ax, ci=None, color=colors[-1], label=labels[1])\n ax.set_ylabel(ylabel)\n ax.set_xlabel(xlabel)\n ax.grid(which='both')\n ax.set_yscale('log')\n ax.minorticks_on()\n if not show_coherent:\n ax.legend().set_visible(False)\n ax.xaxis.grid(True, lw=.5, which='major')\n ax.yaxis.grid(True, lw=.5, which='major')\n ax.xaxis.grid(True, lw=.3, which='minor', ls=':')\n ax.yaxis.grid(True, lw=.3, which='minor', ls=':')\n ax.tick_params(width=.5, which='major')\n ax.tick_params(width=.3, which='minor')\n for axis in ['top', 'bottom', 'left', 'right']:\n ax.spines[axis].set_linewidth(0.5)\n ax.set_aspect(1 / ax.get_data_ratio(), adjustable='box')\n fig.tight_layout()\n return fig"
] | [
"0.61927634",
"0.584321",
"0.58100855",
"0.57739335",
"0.57387716",
"0.5699075",
"0.5687666",
"0.5648579",
"0.5588099",
"0.5552348",
"0.55387723",
"0.55168885",
"0.54948616",
"0.5494651",
"0.54943234",
"0.5490119",
"0.54568595",
"0.5451723",
"0.5428895",
"0.54109",
"0.53955597",
"0.539319",
"0.538386",
"0.53786",
"0.5366923",
"0.53480744",
"0.53451866",
"0.53391975",
"0.5332451",
"0.532974"
] | 0.6703668 | 0 |
Bootstrapped version of plot_attributes_diagram. | def plot_bootstrapped_attributes_diagram(
figure_object, axes_object, ci_bottom_dict, ci_mean_dict, ci_top_dict,
num_examples_by_bin,
reliability_line_colour=DEFAULT_RELIABILITY_COLOUR,
reliability_line_width=DEFAULT_RELIABILITY_WIDTH,
perfect_relia_line_colour=DEFAULT_PERFECT_RELIABILITY_COLOUR,
perfect_relia_line_width=DEFAULT_PERFECT_RELIABILITY_WIDTH,
no_skill_line_colour=DEFAULT_ZERO_BSS_COLOUR,
no_skill_line_width=DEFAULT_ZERO_BSS_WIDTH,
other_line_colour=DEFAULT_CLIMATOLOGY_COLOUR,
other_line_width=DEFAULT_CLIMATOLOGY_WIDTH,
histogram_bar_face_colour=DEFAULT_HISTOGRAM_FACE_COLOUR,
histogram_bar_edge_colour=DEFAULT_HISTOGRAM_EDGE_COLOUR,
histogram_bar_edge_width=DEFAULT_HISTOGRAM_EDGE_WIDTH):
plot_attributes_diagram(
figure_object=figure_object, axes_object=axes_object,
mean_forecast_by_bin=ci_mean_dict[model_eval.MEAN_FORECAST_BY_BIN_KEY],
event_frequency_by_bin=ci_mean_dict[model_eval.EVENT_FREQ_BY_BIN_KEY],
num_examples_by_bin=num_examples_by_bin,
reliability_line_colour=reliability_line_colour,
reliability_line_width=reliability_line_width,
perfect_relia_line_colour=perfect_relia_line_colour,
perfect_relia_line_width=perfect_relia_line_width,
no_skill_line_colour=no_skill_line_colour,
no_skill_line_width=no_skill_line_width,
other_line_colour=other_line_colour, other_line_width=other_line_width,
histogram_bar_face_colour=histogram_bar_face_colour,
histogram_bar_edge_colour=histogram_bar_edge_colour,
histogram_bar_edge_width=histogram_bar_edge_width)
polygon_object = _confidence_interval_to_polygon(
x_coords_bottom=ci_bottom_dict[model_eval.MEAN_FORECAST_BY_BIN_KEY],
y_coords_bottom=ci_bottom_dict[model_eval.EVENT_FREQ_BY_BIN_KEY],
x_coords_top=ci_top_dict[model_eval.MEAN_FORECAST_BY_BIN_KEY],
y_coords_top=ci_top_dict[model_eval.EVENT_FREQ_BY_BIN_KEY]
)
polygon_colour = matplotlib.colors.to_rgba(
plotting_utils.colour_from_numpy_to_tuple(reliability_line_colour),
TRANSPARENCY_FOR_CONFIDENCE_INTERVAL
)
polygon_patch = PolygonPatch(
polygon_object, lw=0, ec=polygon_colour, fc=polygon_colour)
axes_object.add_patch(polygon_patch) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def __init__(self, width, height, data, palette, labels, axis_labels=[], axis_label_height=16):\n\n DiagramElement.__init__(self)\n self.palette = palette\n self.labels = labels\n self.plot = DiscretePlot(width, height, data, self.labels, axis_labels,axis_label_height)\n self.plot.buildPalette(self.palette)",
"def _plot_init(self):\n pass",
"def _plot_init(self):\n pass",
"def plot_attributes_diagram(\n figure_object, axes_object, mean_forecast_by_bin,\n event_frequency_by_bin, num_examples_by_bin,\n reliability_line_colour=DEFAULT_RELIABILITY_COLOUR,\n reliability_line_width=DEFAULT_RELIABILITY_WIDTH,\n perfect_relia_line_colour=DEFAULT_PERFECT_RELIABILITY_COLOUR,\n perfect_relia_line_width=DEFAULT_PERFECT_RELIABILITY_WIDTH,\n no_skill_line_colour=DEFAULT_ZERO_BSS_COLOUR,\n no_skill_line_width=DEFAULT_ZERO_BSS_WIDTH,\n other_line_colour=DEFAULT_CLIMATOLOGY_COLOUR,\n other_line_width=DEFAULT_CLIMATOLOGY_WIDTH,\n histogram_bar_face_colour=DEFAULT_HISTOGRAM_FACE_COLOUR,\n histogram_bar_edge_colour=DEFAULT_HISTOGRAM_EDGE_COLOUR,\n histogram_bar_edge_width=DEFAULT_HISTOGRAM_EDGE_WIDTH):\n\n error_checking.assert_is_numpy_array(\n event_frequency_by_bin, num_dimensions=1)\n error_checking.assert_is_geq_numpy_array(\n event_frequency_by_bin, 0., allow_nan=True)\n error_checking.assert_is_leq_numpy_array(\n event_frequency_by_bin, 1., allow_nan=True)\n num_bins = len(event_frequency_by_bin)\n\n error_checking.assert_is_integer_numpy_array(num_examples_by_bin)\n error_checking.assert_is_numpy_array(\n num_examples_by_bin, exact_dimensions=numpy.array([num_bins]))\n error_checking.assert_is_geq_numpy_array(num_examples_by_bin, 0)\n\n non_empty_bin_indices = numpy.where(num_examples_by_bin > 0)[0]\n error_checking.assert_is_numpy_array_without_nan(\n event_frequency_by_bin[non_empty_bin_indices])\n\n climatology = numpy.average(\n event_frequency_by_bin[non_empty_bin_indices],\n weights=num_examples_by_bin[non_empty_bin_indices]\n )\n\n _plot_background_of_attributes_diagram(\n axes_object=axes_object, climatology=climatology,\n no_skill_line_colour=no_skill_line_colour,\n no_skill_line_width=no_skill_line_width,\n other_line_colour=other_line_colour, other_line_width=other_line_width)\n\n _plot_inset_histogram_for_attributes_diagram(\n figure_object=figure_object, num_examples_by_bin=num_examples_by_bin,\n bar_face_colour=histogram_bar_face_colour,\n bar_edge_colour=histogram_bar_edge_colour,\n bar_edge_width=histogram_bar_edge_width)\n\n plot_reliability_curve(\n axes_object=axes_object,\n mean_forecast_by_bin=mean_forecast_by_bin,\n event_frequency_by_bin=event_frequency_by_bin,\n line_colour=reliability_line_colour, line_width=reliability_line_width,\n perfect_line_colour=perfect_relia_line_colour,\n perfect_line_width=perfect_relia_line_width)",
"def __init__(self, plot_design, label_classes, original_label_mapping):\n self.plot_design = plot_design\n self.labels, self.label_mapping = self._create_labels_and_mapping(label_classes, original_label_mapping)",
"def _init_node_attributes(self):\n assert False",
"def generate(self, diagram):",
"def _plot_background_of_attributes_diagram(\n axes_object, climatology,\n no_skill_line_colour=DEFAULT_ZERO_BSS_COLOUR,\n no_skill_line_width=DEFAULT_ZERO_BSS_WIDTH,\n other_line_colour=DEFAULT_CLIMATOLOGY_COLOUR,\n other_line_width=DEFAULT_CLIMATOLOGY_WIDTH):\n\n error_checking.assert_is_geq(climatology, 0.)\n error_checking.assert_is_leq(climatology, 1.)\n\n (x_vertices_for_left_skill_area,\n y_vertices_for_left_skill_area,\n x_vertices_for_right_skill_area,\n y_vertices_for_right_skill_area\n ) = model_eval.get_skill_areas_in_reliability_curve(climatology)\n\n skill_area_colour = matplotlib.colors.to_rgba(\n plotting_utils.colour_from_numpy_to_tuple(no_skill_line_colour),\n TRANSPARENCY_FOR_POSITIVE_BSS_AREA\n )\n\n left_polygon_object = polygons.vertex_arrays_to_polygon_object(\n x_vertices_for_left_skill_area, y_vertices_for_left_skill_area\n )\n left_polygon_patch = PolygonPatch(\n left_polygon_object, lw=0, ec=skill_area_colour, fc=skill_area_colour\n )\n\n axes_object.add_patch(left_polygon_patch)\n\n right_polygon_object = polygons.vertex_arrays_to_polygon_object(\n x_vertices_for_right_skill_area, y_vertices_for_right_skill_area\n )\n right_polygon_patch = PolygonPatch(\n right_polygon_object, lw=0, ec=skill_area_colour, fc=skill_area_colour\n )\n\n axes_object.add_patch(right_polygon_patch)\n\n no_skill_x_coords, no_skill_y_coords = (\n model_eval.get_no_skill_reliability_curve(climatology)\n )\n\n axes_object.plot(\n no_skill_x_coords, no_skill_y_coords,\n color=plotting_utils.colour_from_numpy_to_tuple(no_skill_line_colour),\n linestyle='solid', linewidth=no_skill_line_width\n )\n\n climo_x_coords, climo_y_coords = (\n model_eval.get_climatology_line_for_reliability_curve(\n climatology)\n )\n\n axes_object.plot(\n climo_x_coords, climo_y_coords,\n color=plotting_utils.colour_from_numpy_to_tuple(other_line_colour),\n linestyle='dashed', linewidth=other_line_width\n )\n\n no_resolution_x_coords, no_resolution_y_coords = (\n model_eval.get_no_resolution_line_for_reliability_curve(\n climatology)\n )\n\n axes_object.plot(\n no_resolution_x_coords, no_resolution_y_coords,\n color=plotting_utils.colour_from_numpy_to_tuple(other_line_colour),\n linestyle='dashed', linewidth=other_line_width\n )",
"def boot_induvidual_plot(self): # Setting up induvidual plots\n self.plot_traits = list([self.plt_0.subplot2grid((2, 5), (0, 0)), self.plt_0.subplot2grid((2, 5), (0, 1)),\n self.plt_0.subplot2grid((2, 5), (0, 2)), self.plt_0.subplot2grid((2, 5), (0, 3)),\n self.plt_0.subplot2grid((2, 5), (0, 4))])\n\n # creatng list of plot objects\n\n for x in range(len(self.X_transp)): # Iterating over each attributes patient\n\n present=self.plot_traits[x]\n # Selecting a particular plot object\n present.set_facecolor('orange')\n # setting face color\n present.scatter(self.np_0.arange(len(self.list_patient_names)),self.X_transp[x],c='blue')\n # drawing a scatter plot of this attribute\n\n present.xaxis.set_major_locator(plt.MultipleLocator(1))\n\n present.set_xlabel('Patient ID', fontweight='bold')\n # setting X-LABEL\n present.set_ylabel(self.list_attributes[x], fontweight='bold')\n # setting Y-LABEL\n present.title.set_text(self.list_attributes[x]+\" Variation\")\n # setting Title\n\n present = self.plt_0.subplot2grid((2, 5), (1, 0), colspan=5)\n # to plot the present's status\n present.scatter(self.X_reduced_transp[0], self.X_reduced_transp[1], c='red')\n # plotting in the BOTTOM-PLOT\n\n present.set_xlabel(\"Principle Component -1\", fontweight='bold')\n # setting X-LABEL\n present.set_ylabel(\"Principle Component -2\", fontweight='bold')\n # setting Y-LABEL\n\n for x in range(len(self.list_patient_names)): # Naming each patient with ID\n self.list_patient_names[x] = \"Patient \" + str(x)\n # Eg: Patient 0,Patient 1...\n for i, txt in enumerate(self.list_patient_names): # This is used to enumerate the scatter plots label\n present.annotate(txt, (self.X_reduced_transp[0][i] + 1, self.X_reduced_transp[1][i]), fontsize=10, c='black')\n # Coonecting with present",
"def initialize(self) -> None:\n # Only do matplotlib import when necessary\n super().initialize()\n from matplotlib import pyplot as plt\n self.fig, self.ax = plt.subplots()\n if self.state_map is not None:\n self._add_state_map(self.state_map)\n else:\n self.categories = self.simulation.state_list",
"def plot_data_attr_dist(self, dim1=0, dim2=1):\n (_, _, gen_test) = self.dataset.data_loaders(\n batch_size=16, # TODO: remove this hard coding\n split=(0.7, 0.15)\n )\n print('Num Test Batches: ', len(gen_test))\n self._plot_data_attr_dist(gen_test, dim1, dim2, 'rhy_complexity')\n self._plot_data_attr_dist(gen_test, dim1, dim2, 'num_notes')\n self._plot_data_attr_dist(gen_test, dim1, dim2, 'note_range')",
"def _init_attributes(self):\n if os.name == \"nt\":\n if \"64\" in platform.architecture()[0]:\n platform_arch = \"x86_64\"\n elif \"32\" in platform.architecture()[0]:\n platform_arch = \"i386\"\n else:\n platform_arch = platform.architecture()\n os_ver = f\"Windows-{platform.win32_ver()[1]}\"\n else:\n platform_arch = platform.machine()\n if platform.system() == \"Darwin\":\n os_ver = f\"macOS-{platform.mac_ver()[0]}\"\n else:\n os_ver = \"-\".join(linux_distribution()[0:2])\n\n license_chunks = LICENSE.split(\" \")\n if license_chunks[0] == \"GPLv2\":\n client_license = \"GPL-2.0\"\n else:\n client_license = \"Commercial\"\n\n default_attributes = {\n # Process id\n \"_pid\": str(os.getpid()),\n # Platform architecture\n \"_platform\": platform_arch,\n # OS version\n \"_os\": os_ver,\n # Hostname of the local machine\n \"_source_host\": socket.gethostname(),\n # Client's name\n \"_client_name\": \"mysql-connector-python\",\n # Client's version\n \"_client_version\": \".\".join([str(x) for x in VERSION[0:3]]),\n # Client's License identifier\n \"_client_license\": client_license,\n }\n self._settings[\"attributes\"].update(default_attributes)\n\n if \"connection-attributes\" in self._settings:\n for attr_name in self._settings[\"connection-attributes\"]:\n attr_value = self._settings[\"connection-attributes\"][attr_name]\n # Validate name type\n if not isinstance(attr_name, str):\n raise InterfaceError(\n f\"Attribute name '{attr_name}' must be a string type\"\n )\n # Validate attribute name limit 32 characters\n if len(attr_name) > 32:\n raise InterfaceError(\n f\"Attribute name '{attr_name}' exceeds 32 characters \"\n \"limit size\"\n )\n # Validate names in connection-attributes cannot start with \"_\"\n if attr_name.startswith(\"_\"):\n raise InterfaceError(\n \"Key names in 'session-connect-attributes' cannot \"\n f\"start with '_', found: {attr_name}\"\n )\n # Validate value type\n if not isinstance(attr_value, str):\n raise InterfaceError(\n f\"Attribute name '{attr_name}' value '{attr_value}' \"\n \" must be a string type\"\n )\n\n # Validate attribute value limit 1024 characters\n if len(attr_value) > 1024:\n raise InterfaceError(\n f\"Attribute name '{attr_name}' value: '{attr_value}' \"\n \"exceeds 1024 characters limit size\"\n )\n\n self._settings[\"attributes\"][attr_name] = attr_value",
"def initializePlot( self ):\n\n self.mNTaxa = len(self.mTree.get_taxa())\n self.mNNodes = max( self.mTree.chain.keys() ) + 1\n\n self.calculateCoordinates()\n \n self.calculateCanvasSize( )",
"def initialize_visualization(self) -> None:\n pass",
"def _instantiate_attributes_before_function(self, context=None):\n\n super()._instantiate_attributes_before_function(context=context)",
"def __init__(self, auto_column_tags=None, chart_default_color=None, column_tags=None, custom_tags=None, default_sort_column=None, expected_data_spacing=None, fixed_legend_display_stats=None, fixed_legend_enabled=None, fixed_legend_filter_field=None, fixed_legend_filter_limit=None, fixed_legend_filter_sort=None, fixed_legend_hide_label=None, fixed_legend_position=None, fixed_legend_show_metric_name=None, fixed_legend_show_source_name=None, fixed_legend_use_raw_stats=None, group_by_source=None, invert_dynamic_legend_hover_control=None, line_type=None, logs_table=None, max=None, min=None, num_tags=None, plain_markdown_content=None, show_hosts=None, show_labels=None, show_raw_values=None, show_value_column=None, sort_values_descending=None, sparkline_decimal_precision=None, sparkline_display_color=None, sparkline_display_font_size=None, sparkline_display_horizontal_position=None, sparkline_display_postfix=None, sparkline_display_prefix=None, sparkline_display_value_type=None, sparkline_display_vertical_position=None, sparkline_fill_color=None, sparkline_line_color=None, sparkline_size=None, sparkline_value_color_map_apply_to=None, sparkline_value_color_map_colors=None, sparkline_value_color_map_values=None, sparkline_value_color_map_values_v2=None, sparkline_value_text_map_text=None, sparkline_value_text_map_thresholds=None, stack_type=None, tag_mode=None, time_based_coloring=None, type=None, window_size=None, windowing=None, xmax=None, xmin=None, y0_scale_siby1024=None, y0_unit_autoscaling=None, y1_max=None, y1_min=None, y1_scale_siby1024=None, y1_unit_autoscaling=None, y1_units=None, ymax=None, ymin=None, _configuration=None): # noqa: E501 # noqa: E501\n if _configuration is None:\n _configuration = Configuration()\n self._configuration = _configuration\n\n self._auto_column_tags = None\n self._chart_default_color = None\n self._column_tags = None\n self._custom_tags = None\n self._default_sort_column = None\n self._expected_data_spacing = None\n self._fixed_legend_display_stats = None\n self._fixed_legend_enabled = None\n self._fixed_legend_filter_field = None\n self._fixed_legend_filter_limit = None\n self._fixed_legend_filter_sort = None\n self._fixed_legend_hide_label = None\n self._fixed_legend_position = None\n self._fixed_legend_show_metric_name = None\n self._fixed_legend_show_source_name = None\n self._fixed_legend_use_raw_stats = None\n self._group_by_source = None\n self._invert_dynamic_legend_hover_control = None\n self._line_type = None\n self._logs_table = None\n self._max = None\n self._min = None\n self._num_tags = None\n self._plain_markdown_content = None\n self._show_hosts = None\n self._show_labels = None\n self._show_raw_values = None\n self._show_value_column = None\n self._sort_values_descending = None\n self._sparkline_decimal_precision = None\n self._sparkline_display_color = None\n self._sparkline_display_font_size = None\n self._sparkline_display_horizontal_position = None\n self._sparkline_display_postfix = None\n self._sparkline_display_prefix = None\n self._sparkline_display_value_type = None\n self._sparkline_display_vertical_position = None\n self._sparkline_fill_color = None\n self._sparkline_line_color = None\n self._sparkline_size = None\n self._sparkline_value_color_map_apply_to = None\n self._sparkline_value_color_map_colors = None\n self._sparkline_value_color_map_values = None\n self._sparkline_value_color_map_values_v2 = None\n self._sparkline_value_text_map_text = None\n self._sparkline_value_text_map_thresholds = None\n self._stack_type = None\n self._tag_mode = None\n self._time_based_coloring = None\n self._type = None\n self._window_size = None\n self._windowing = None\n self._xmax = None\n self._xmin = None\n self._y0_scale_siby1024 = None\n self._y0_unit_autoscaling = None\n self._y1_max = None\n self._y1_min = None\n self._y1_scale_siby1024 = None\n self._y1_unit_autoscaling = None\n self._y1_units = None\n self._ymax = None\n self._ymin = None\n self.discriminator = None\n\n if auto_column_tags is not None:\n self.auto_column_tags = auto_column_tags\n if chart_default_color is not None:\n self.chart_default_color = chart_default_color\n if column_tags is not None:\n self.column_tags = column_tags\n if custom_tags is not None:\n self.custom_tags = custom_tags\n if default_sort_column is not None:\n self.default_sort_column = default_sort_column\n if expected_data_spacing is not None:\n self.expected_data_spacing = expected_data_spacing\n if fixed_legend_display_stats is not None:\n self.fixed_legend_display_stats = fixed_legend_display_stats\n if fixed_legend_enabled is not None:\n self.fixed_legend_enabled = fixed_legend_enabled\n if fixed_legend_filter_field is not None:\n self.fixed_legend_filter_field = fixed_legend_filter_field\n if fixed_legend_filter_limit is not None:\n self.fixed_legend_filter_limit = fixed_legend_filter_limit\n if fixed_legend_filter_sort is not None:\n self.fixed_legend_filter_sort = fixed_legend_filter_sort\n if fixed_legend_hide_label is not None:\n self.fixed_legend_hide_label = fixed_legend_hide_label\n if fixed_legend_position is not None:\n self.fixed_legend_position = fixed_legend_position\n if fixed_legend_show_metric_name is not None:\n self.fixed_legend_show_metric_name = fixed_legend_show_metric_name\n if fixed_legend_show_source_name is not None:\n self.fixed_legend_show_source_name = fixed_legend_show_source_name\n if fixed_legend_use_raw_stats is not None:\n self.fixed_legend_use_raw_stats = fixed_legend_use_raw_stats\n if group_by_source is not None:\n self.group_by_source = group_by_source\n if invert_dynamic_legend_hover_control is not None:\n self.invert_dynamic_legend_hover_control = invert_dynamic_legend_hover_control\n if line_type is not None:\n self.line_type = line_type\n if logs_table is not None:\n self.logs_table = logs_table\n if max is not None:\n self.max = max\n if min is not None:\n self.min = min\n if num_tags is not None:\n self.num_tags = num_tags\n if plain_markdown_content is not None:\n self.plain_markdown_content = plain_markdown_content\n if show_hosts is not None:\n self.show_hosts = show_hosts\n if show_labels is not None:\n self.show_labels = show_labels\n if show_raw_values is not None:\n self.show_raw_values = show_raw_values\n if show_value_column is not None:\n self.show_value_column = show_value_column\n if sort_values_descending is not None:\n self.sort_values_descending = sort_values_descending\n if sparkline_decimal_precision is not None:\n self.sparkline_decimal_precision = sparkline_decimal_precision\n if sparkline_display_color is not None:\n self.sparkline_display_color = sparkline_display_color\n if sparkline_display_font_size is not None:\n self.sparkline_display_font_size = sparkline_display_font_size\n if sparkline_display_horizontal_position is not None:\n self.sparkline_display_horizontal_position = sparkline_display_horizontal_position\n if sparkline_display_postfix is not None:\n self.sparkline_display_postfix = sparkline_display_postfix\n if sparkline_display_prefix is not None:\n self.sparkline_display_prefix = sparkline_display_prefix\n if sparkline_display_value_type is not None:\n self.sparkline_display_value_type = sparkline_display_value_type\n if sparkline_display_vertical_position is not None:\n self.sparkline_display_vertical_position = sparkline_display_vertical_position\n if sparkline_fill_color is not None:\n self.sparkline_fill_color = sparkline_fill_color\n if sparkline_line_color is not None:\n self.sparkline_line_color = sparkline_line_color\n if sparkline_size is not None:\n self.sparkline_size = sparkline_size\n if sparkline_value_color_map_apply_to is not None:\n self.sparkline_value_color_map_apply_to = sparkline_value_color_map_apply_to\n if sparkline_value_color_map_colors is not None:\n self.sparkline_value_color_map_colors = sparkline_value_color_map_colors\n if sparkline_value_color_map_values is not None:\n self.sparkline_value_color_map_values = sparkline_value_color_map_values\n if sparkline_value_color_map_values_v2 is not None:\n self.sparkline_value_color_map_values_v2 = sparkline_value_color_map_values_v2\n if sparkline_value_text_map_text is not None:\n self.sparkline_value_text_map_text = sparkline_value_text_map_text\n if sparkline_value_text_map_thresholds is not None:\n self.sparkline_value_text_map_thresholds = sparkline_value_text_map_thresholds\n if stack_type is not None:\n self.stack_type = stack_type\n if tag_mode is not None:\n self.tag_mode = tag_mode\n if time_based_coloring is not None:\n self.time_based_coloring = time_based_coloring\n self.type = type\n if window_size is not None:\n self.window_size = window_size\n if windowing is not None:\n self.windowing = windowing\n if xmax is not None:\n self.xmax = xmax\n if xmin is not None:\n self.xmin = xmin\n if y0_scale_siby1024 is not None:\n self.y0_scale_siby1024 = y0_scale_siby1024\n if y0_unit_autoscaling is not None:\n self.y0_unit_autoscaling = y0_unit_autoscaling\n if y1_max is not None:\n self.y1_max = y1_max\n if y1_min is not None:\n self.y1_min = y1_min\n if y1_scale_siby1024 is not None:\n self.y1_scale_siby1024 = y1_scale_siby1024\n if y1_unit_autoscaling is not None:\n self.y1_unit_autoscaling = y1_unit_autoscaling\n if y1_units is not None:\n self.y1_units = y1_units\n if ymax is not None:\n self.ymax = ymax\n if ymin is not None:\n self.ymin = ymin",
"def _setup_show(self):\n super(Scatter, self)._setup_show()\n\n # check if pandas is installed\n if pd:\n # if it is we try to take advantage of it's data structures\n # asumming we get an groupby object\n if isinstance(self.values, pd.core.groupby.DataFrameGroupBy):\n pdict = OrderedDict()\n\n for i in self.values.groups.keys():\n self.labels = self.values.get_group(i).columns\n xname = self.values.get_group(i).columns[0]\n yname = self.values.get_group(i).columns[1]\n x = getattr(self.values.get_group(i), xname)\n y = getattr(self.values.get_group(i), yname)\n pdict[i] = np.array([x.values, y.values]).T\n\n self.values = DataAdapter(pdict)\n self.labels = self.values.keys()\n\n # create axis labels from group by object only if the input\n # values is a DataFrameGroupBy\n if self._xlabel is None:\n self._xlabel = self.labels[0]\n\n if self._ylabel is None:\n self._ylabel = self.labels[1]\n\n else:\n self.values = DataAdapter(self.values)\n self.labels = self.values.keys()\n\n else:\n self.values = DataAdapter(self.values)\n self.labels = self.values.keys()",
"def __init__(self, features, plot_design, feature_description_class):\n super().__init__(features, plot_design, feature_description_class)",
"def _set_attributes(self):",
"def _InitializeVizier(self):\n self._should_report_metrics = False",
"def build_player_atrribute_corelation(player_attributes):\n player_attributes_wo_na = player_attributes.dropna()\n player_attributes_corr = player_attributes_wo_na.corr()\n fig, ax = plt.subplots(nrows=1, ncols=1)\n fig.set_size_inches(w=24, h=24)\n sns.heatmap(player_attributes_corr, annot=True, linewidths=0.5, ax=ax, cmap=\"Blues\")\n plt.show()",
"def init():\n return _libsbml.LayoutExtension_init()",
"def get_attributes(cls):\r\n return [\r\n Attribute('height', None),\r\n Attribute('width', None),\r\n Attribute('parts', None),\r\n Attribute('analyses', None),\r\n Attribute('initial_value', None),\r\n Attribute('submit_analyses', None),\r\n Attribute('label', ''),\r\n ]",
"def _init_plot(self) -> None:\n\n # create a grayscale plot\n out = sys.stdout\n sys.stdout = open(\"/dev/null\", \"w\")\n hdu = self.image_generator.image(self.ra, self.dec)\n self.plot = aplpy.FITSFigure(hdu)\n self.plot.show_grayscale()\n self.plot.set_theme(\"publication\")\n sys.stdout = out\n\n # label for the position angle\n pa_string = \"PA = %.1f\" % self.mode_details.position_angle().to_value(u.deg)\n if self.mode_details.automated_position_angle():\n pa_string += \" (auto)\"\n self.draw_label(0.95, -0.05, pa_string, style=\"italic\", weight=\"bold\")\n\n # label for the title\n if self.title:\n self.draw_label(\n 0.5, 1.03, self.title, style=\"italic\", weight=\"bold\", size=\"large\"\n )\n\n # label for the image source\n self.draw_label(\n -0.05,\n -0.05,\n \"%s\" % self.image_generator.source(),\n style=\"italic\",\n weight=\"bold\",\n )\n\n # grid overlay\n self.plot.add_grid()\n self.plot.grid.set_alpha(0.2)\n self.plot.grid.set_color(\"b\")\n\n # indicate the RSS field of view\n self.draw_circle(self.ra, self.dec, 4.0 * u.arcmin, \"g\")\n self.draw_label(\n 0.79,\n 0.79,\n \"RSS\",\n style=\"italic\",\n weight=\"bold\",\n size=\"large\",\n horizontalalignment=\"left\",\n color=(0, 0, 1),\n )\n\n # indicate the Salticam field of view\n self.draw_circle(self.ra, self.dec, 5.0 * u.arcmin, \"g\")\n self.draw_label(\n 0.86,\n 0.86,\n \"SCAM\",\n style=\"italic\",\n weight=\"bold\",\n size=\"large\",\n horizontalalignment=\"left\",\n color=(0, 0, 1),\n )\n\n # labels for north and east direction\n self.draw_label(\n self.ra,\n self.dec + 4.8 * u.arcmin,\n \"N\",\n style=\"italic\",\n weight=\"bold\",\n size=\"large\",\n color=(0, 0.5, 1),\n )\n self.draw_label(\n self.ra + 4.8 * u.arcmin / np.abs(np.cos(self.dec)),\n self.dec,\n \"E\",\n style=\"italic\",\n weight=\"bold\",\n size=\"large\",\n horizontalalignment=\"right\",\n color=(0, 0.5, 1),\n )\n\n # add cross hairs\n self.draw_centered_line(\n 0 * u.deg,\n 8 * u.arcmin,\n self.ra,\n self.dec,\n color=\"g\",\n linewidth=0.5,\n alpha=1.0,\n )\n self.draw_centered_line(\n 90 * u.deg,\n 8 * u.arcmin,\n self.ra,\n self.dec,\n color=\"g\",\n linewidth=0.5,\n alpha=1.0,\n )\n\n # label for the magnitude range and bandpass\n if self.magnitude_range:\n self._show_magnitudes()\n\n # add mode specific content\n if not self.basic_annotations:\n self.mode_details.annotate_finder_chart(self)",
"def get_attributes(cls):\r\n return [Attribute('size', '20'),\r\n Attribute('label', ''), ]",
"def __init__(self):\n super(GraphVisualizerPointDraw, self).__init__()\n\n self.setMinimumSize(QSize(13, 13))\n self.setMaximumSize(QSize(13, 13))",
"def plot_bootstrapped_performance_diagram(\n axes_object, ci_bottom_dict, ci_mean_dict, ci_top_dict,\n line_colour=DEFAULT_PERFORMANCE_COLOUR,\n line_width=DEFAULT_PERFORMANCE_WIDTH,\n bias_line_colour=DEFAULT_FREQ_BIAS_COLOUR,\n bias_line_width=DEFAULT_FREQ_BIAS_WIDTH):\n\n plot_performance_diagram(\n axes_object=axes_object,\n pod_by_threshold=ci_mean_dict[model_eval.POD_BY_THRESHOLD_KEY],\n success_ratio_by_threshold=ci_mean_dict[model_eval.SR_BY_THRESHOLD_KEY],\n line_colour=line_colour, line_width=line_width,\n bias_line_colour=bias_line_colour, bias_line_width=bias_line_width)\n\n polygon_object = _confidence_interval_to_polygon(\n x_coords_bottom=ci_bottom_dict[model_eval.SR_BY_THRESHOLD_KEY],\n y_coords_bottom=ci_bottom_dict[model_eval.POD_BY_THRESHOLD_KEY],\n x_coords_top=ci_top_dict[model_eval.SR_BY_THRESHOLD_KEY],\n y_coords_top=ci_top_dict[model_eval.POD_BY_THRESHOLD_KEY],\n for_performance_diagram=True)\n\n polygon_colour = matplotlib.colors.to_rgba(\n plotting_utils.colour_from_numpy_to_tuple(line_colour),\n TRANSPARENCY_FOR_CONFIDENCE_INTERVAL\n )\n\n polygon_patch = PolygonPatch(\n polygon_object, lw=0, ec=polygon_colour, fc=polygon_colour)\n\n axes_object.add_patch(polygon_patch)",
"def __init__(self, link=None, first_slice_angle=None, floor=None, plot_empty_cells_type=None, auto_scaling=None, style=None, series_axis=None, value_axis=None, show_data_table=None, is3_d=None, chart_area=None, elevation=None, side_wall=None, type=None, title=None, walls=None, back_wall=None, chart_data_table=None, height_percent=None, gap_width=None, legend=None, chart_object=None, is_rectangular_cornered=None, second_category_axis=None, second_value_axis=None, placement=None, name=None, size_with_window=None, right_angle_axes=None, plot_visible_cells=None, show_legend=None, pivot_source=None, depth_percent=None, print_size=None, gap_depth=None, shapes=None, walls_and_gridlines2_d=None, n_series=None, rotation_angle=None, plot_area=None, category_axis=None, perspective=None, hide_pivot_field_buttons=None, page_setup=None, **kw):\n self.container = {}\n\t\t \n \"\"\"\n Chart - a model defined in Swagger\n \"\"\"\n\n self.container['link'] = None\n self.container['first_slice_angle'] = None\n self.container['floor'] = None\n self.container['plot_empty_cells_type'] = None\n self.container['auto_scaling'] = None\n self.container['style'] = None\n self.container['series_axis'] = None\n self.container['value_axis'] = None\n self.container['show_data_table'] = None\n self.container['is3_d'] = None\n self.container['chart_area'] = None\n self.container['elevation'] = None\n self.container['side_wall'] = None\n self.container['type'] = None\n self.container['title'] = None\n self.container['walls'] = None\n self.container['back_wall'] = None\n self.container['chart_data_table'] = None\n self.container['height_percent'] = None\n self.container['gap_width'] = None\n self.container['legend'] = None\n self.container['chart_object'] = None\n self.container['is_rectangular_cornered'] = None\n self.container['second_category_axis'] = None\n self.container['second_value_axis'] = None\n self.container['placement'] = None\n self.container['name'] = None\n self.container['size_with_window'] = None\n self.container['right_angle_axes'] = None\n self.container['plot_visible_cells'] = None\n self.container['show_legend'] = None\n self.container['pivot_source'] = None\n self.container['depth_percent'] = None\n self.container['print_size'] = None\n self.container['gap_depth'] = None\n self.container['shapes'] = None\n self.container['walls_and_gridlines2_d'] = None\n self.container['n_series'] = None\n self.container['rotation_angle'] = None\n self.container['plot_area'] = None\n self.container['category_axis'] = None\n self.container['perspective'] = None\n self.container['hide_pivot_field_buttons'] = None\n self.container['page_setup'] = None\n\n if link is not None:\n self.link = link\n if first_slice_angle is not None:\n self.first_slice_angle = first_slice_angle\n if floor is not None:\n self.floor = floor\n if plot_empty_cells_type is not None:\n self.plot_empty_cells_type = plot_empty_cells_type\n if auto_scaling is not None:\n self.auto_scaling = auto_scaling\n if style is not None:\n self.style = style\n if series_axis is not None:\n self.series_axis = series_axis\n if value_axis is not None:\n self.value_axis = value_axis\n if show_data_table is not None:\n self.show_data_table = show_data_table\n if is3_d is not None:\n self.is3_d = is3_d\n if chart_area is not None:\n self.chart_area = chart_area\n if elevation is not None:\n self.elevation = elevation\n if side_wall is not None:\n self.side_wall = side_wall\n if type is not None:\n self.type = type\n if title is not None:\n self.title = title\n if walls is not None:\n self.walls = walls\n if back_wall is not None:\n self.back_wall = back_wall\n if chart_data_table is not None:\n self.chart_data_table = chart_data_table\n if height_percent is not None:\n self.height_percent = height_percent\n if gap_width is not None:\n self.gap_width = gap_width\n if legend is not None:\n self.legend = legend\n if chart_object is not None:\n self.chart_object = chart_object\n if is_rectangular_cornered is not None:\n self.is_rectangular_cornered = is_rectangular_cornered\n if second_category_axis is not None:\n self.second_category_axis = second_category_axis\n if second_value_axis is not None:\n self.second_value_axis = second_value_axis\n if placement is not None:\n self.placement = placement\n if name is not None:\n self.name = name\n if size_with_window is not None:\n self.size_with_window = size_with_window\n if right_angle_axes is not None:\n self.right_angle_axes = right_angle_axes\n if plot_visible_cells is not None:\n self.plot_visible_cells = plot_visible_cells\n if show_legend is not None:\n self.show_legend = show_legend\n if pivot_source is not None:\n self.pivot_source = pivot_source\n if depth_percent is not None:\n self.depth_percent = depth_percent\n if print_size is not None:\n self.print_size = print_size\n if gap_depth is not None:\n self.gap_depth = gap_depth\n if shapes is not None:\n self.shapes = shapes\n if walls_and_gridlines2_d is not None:\n self.walls_and_gridlines2_d = walls_and_gridlines2_d\n if n_series is not None:\n self.n_series = n_series\n if rotation_angle is not None:\n self.rotation_angle = rotation_angle\n if plot_area is not None:\n self.plot_area = plot_area\n if category_axis is not None:\n self.category_axis = category_axis\n if perspective is not None:\n self.perspective = perspective\n if hide_pivot_field_buttons is not None:\n self.hide_pivot_field_buttons = hide_pivot_field_buttons\n if page_setup is not None:\n self.page_setup = page_setup",
"def draw_defaults(self):\n\n pass",
"def setup_draw(self):\n pass"
] | [
"0.5769211",
"0.5578839",
"0.5578839",
"0.5510679",
"0.5479296",
"0.5369858",
"0.5342495",
"0.53263974",
"0.5322546",
"0.51956356",
"0.5170794",
"0.5103581",
"0.5096639",
"0.505632",
"0.49624193",
"0.49553692",
"0.49490607",
"0.4941913",
"0.49288616",
"0.49155903",
"0.49087885",
"0.48910874",
"0.48798662",
"0.48709163",
"0.48696935",
"0.48618513",
"0.48593247",
"0.48557714",
"0.4851638",
"0.48310626"
] | 0.6774211 | 0 |
Provides the spin from an int. +1 == Spin.up, 1 == Spin.down. | def from_int(i):
if i == 1:
return Spin.up
elif i == -1:
return Spin.down
else:
raise ValueError("Spin integers must be 1 or -1") | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_spin(self, i):\n \n return 1 if self.spins[i] else -1",
"def spin(mult):\n return mult - 1",
"def getSpinControl(*args):",
"def on_spin(self, event):\n spin_value = self.spin_run.GetValue()\n text = \"\".join([_(u\"New run spin control value: \"), str(spin_value)])\n if self.state == 0:\n self.canvas_2d.render(text)\n else:\n self.canvas_3d.render()\n return spin_value",
"def spin(self) -> float:\n return self._s",
"def spin(rxn_class):\n return rxn_class[1]",
"def spinWheel():\n\tt = random.randrange(8)\n\tif t == 7:\n\t\treturn \"00\"\n\treturn str(t)",
"def integer(self, label, handler=None, pack=True, minval=1, maxval=100, optional=False, initial_on=False, **kwargs):\n handler = self._changed_handler(handler)\n spin = wx.SpinCtrl(self, min=minval, max=maxval, **kwargs)\n spin.SetValue(kwargs.get(\"initial\", 0))\n spin.Bind(wx.EVT_SPINCTRL, handler)\n\n if optional:\n cb = wx.CheckBox(self, label=label)\n cb.SetValue(initial_on)\n cb.Bind(wx.EVT_CHECKBOX, handler)\n spin.checkbox = cb\n if pack:\n self.pack(\"\", cb, spin, enable=initial_on)\n elif pack:\n self.pack(label, spin)\n\n return spin",
"def valoresSpin():\n try:\n var.ui.spinEdad.setValue(16)\n except Exception as error:\n print('Error valores spin: %s' % str(error))",
"def spin (self, string):\n \n spin_str = '.' * self.spin_pos + '|' + '.' * (self.spin_size - self.spin_pos - 1)\n sys.stdout.write('\\r' + string + ' ' + spin_str + ' ')\n sys.stdout.flush()\n\n self.spin_pos += self.spin_dir\n if self.spin_pos < 0:\n self.spin_dir = 1\n self.spin_pos = 1\n elif self.spin_pos >= self.spin_size:\n self.spin_pos -= 2\n self.spin_dir = -1",
"def _get_nspin(self, file):\n f = open_general(file)\n tmptxt = f.readlines()\n f.close()\n itmp = search_string('NSPIN', tmptxt)\n nspin = int(tmptxt.pop(itmp).split()[-1])\n return nspin",
"def on_spin_cont(self, event):\n spin_value_cont = self.spin_cont.GetValue()\n text = \"\".join(\n [_(u\"New continue spin control value: \"), str(spin_value_cont)])\n if self.state == 0:\n self.canvas_2d.render(text)\n else:\n self.canvas_3d.render()\n return spin_value_cont",
"def spin():\n global wind_count\n wind_count = wind_count + 1",
"def GetSpinner(self):\n return self.spinner_char_list[self.current_spinner_index]",
"def _spin(self):\n center= self.rect.center\n self.dizzy= self.dizzy + 10 #12\n if self.dizzy >= 360:\n self.dizzy = 0\n self.image = self.original\n else:\n rotate= pygame.transform.rotate\n self.image= rotate(self.original, self.dizzy)\n self.rect= self.image.get_rect(center= center)",
"def spinCtrl(parent,value='',pos=defPos,size=defSize,style=wx.SP_ARROW_KEYS,\r\n min=0,max=100,initial=0,name='wxSpinctrl',id=defId,onSpin=None,tip=None):\r\n gSpinCtrl=wx.SpinCtrl(parent,id,value,pos,size,style,min,max,initial,name)\r\n if onSpin: gSpinCtrl.Bind(wx.EVT_SPINCTRL,onSpin)\r\n if tip: gSpinCtrl.SetToolTip(tooltip(tip))\r\n return gSpinCtrl",
"def spinAround(self):",
"def int_spinbox(init: int = 0, range: Tuple[int, int] = (0, 99), descr: str = '', data_type: type[Data] = Data):\n\n class StdInpWidget_IntSpinBox(StdInputWidgetBase, QSpinBox):\n\n def __init__(self, params):\n StdInputWidgetBase.__init__(self, params)\n QSpinBox.__init__(self)\n\n # there is no 'valueEdited' signal, only a 'valueChanged' signal\n # so we need to block the signal when a new value is coming from\n # a connection\n self._prevent_update = PreventUpdateCtx(False)\n\n # tooltip\n self.setToolTip(self.__doc__)\n\n self.valueChanged.connect(self.value_changed)\n\n # initial value and rage\n with self._prevent_update:\n self.setRange(*range)\n self.setValue(init)\n\n\n @property\n def val(self) -> data_type:\n return data_type(self.value())\n\n def load_from(self, val: Data):\n with self._prevent_update:\n self.setValue(val.payload)\n\n def value_changed(self, _):\n self.on_widget_val_changed(self.val)\n\n def val_update_event(self, val: Data):\n if isinstance(val.payload, int):\n with self._prevent_update:\n self.setValue(val.payload)\n\n StdInpWidget_IntSpinBox.__doc__ = descr\n\n return StdInpWidget_IntSpinBox",
"def spin(self):\n spinner = self._spinner_dict.get(self._current_gui, lambda: None)\n spinner()",
"def _spinner_key():\n with _spinner_key.lock:\n _spinner_key.counter += 1\n return \"_spinner_%d\" % _spinner_key.counter",
"def spin_right(self):\n global motor_direction\n with self._lock:\n GPIO.output(7, False)\n GPIO.output(11, True)\n GPIO.output(13, True)\n GPIO.output(15, False)\n # time.sleep(sec)\n motor_direction = 'Spin Right'\n return motor_direction",
"def _g_spin_changed(self):\n self.gLine.setValue(self.gSpin.value())",
"def spin_left(self):\n global motor_direction\n with self._lock:\n GPIO.output(7, True)\n GPIO.output(11, False)\n GPIO.output(13, False)\n GPIO.output(15, True)\n # time.sleep(sec)\n motor_direction = 'Spin Left'\n return motor_direction",
"def _spin_index(self, sz: float) -> int:\n if self.spin is None:\n if sz is not None or not np.isclose(sz, 0):\n raise Exception(\"cannot request spin index of spinless fermions\")\n return 0\n else:\n return round(sz + self.spin)",
"def getSpin(position, lines):\r\n x = str(position[0])\r\n y = str(position[1])\r\n z = str(position[2])\r\n \r\n spin = None\r\n \r\n for line in lines:\r\n if not line.startswith('#'):\r\n values = line.split()\r\n if x == values[1] and y == values[2] and z == values[3]:\r\n spin = (float(values[4]), float(values[5]), float(values[6]))\r\n break\r\n \r\n return spin",
"def spin(self, *args, **kwargs) -> Any:\n pass",
"def spin(self, ref, speed, duration):\n self.instructions.append(Spin(ref, speed, duration))",
"def _r_spin_changed(self):\n self.rLine.setValue(self.rSpin.value())",
"def _spin(self):\n center = self.rect.center\n self.dizzy += 12 # rotate 12 degree clockwise\n\n if self.dizzy >= 360:\n self.dizzy = 0\n self.image = self.original # reset the image to its original ones after rotated\n else:\n self.image = pygame.transform.rotate(self.original, self.dizzy)\n\n self.rect = self.image.get_rect()\n self.rect.center = center # make sure the image would not move when spinning",
"def _b_spin_changed(self):\n self.bLine.setValue(self.bSpin.value())"
] | [
"0.7496796",
"0.7100908",
"0.66705376",
"0.66398364",
"0.65249074",
"0.64780074",
"0.6415233",
"0.630757",
"0.6230635",
"0.6085406",
"0.5867",
"0.58657813",
"0.5847536",
"0.582126",
"0.5800157",
"0.57851535",
"0.57549834",
"0.57405555",
"0.5706542",
"0.5580043",
"0.5572952",
"0.54825103",
"0.5471139",
"0.5460732",
"0.54522943",
"0.5436842",
"0.53972083",
"0.5382742",
"0.5375836",
"0.5355896"
] | 0.8344842 | 0 |
String indicating the type of orbital. Is always uppercase. E.g., S, P, D, F, etc. | def orbital_type(self):
return self.name[0].upper() | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def getType(self):\n if (self.type == 's'):\n #suit type\n type = \"suit\"\n elif (self.type == 'b'):\n #boss type\n type = \"boss\"\n else:\n notify.error(\"Invalid DNA type: \", self.type)\n\n return type",
"def unit_type(self) -> str:",
"def determine_type(self):\n \n t = \" \" # Holder string\n self.cs = 0. \n ## On the lowest rigidities\n if self.xi_L < 0.3:\n if self.pe > 700.:\n t = \"spiral\"\n elif self.pe < 700. and self.pe > 100.:\n if self.xi_L > 0.1:\n t = \"cluster\"\n else:\n t = \"gas\"\n else:\n t = \"gas\"\n else:\n t = \"loop\"\n \n self.type = t # Type of point\n \n if self.pe < 7500.:\n if self.pe < 10.:\n self.pe += 1.\n elif self.pe < 700. and self.pe > 100.: \n self.pe += 50.\n else:\n self.pe += 200.\n \n return",
"def guess_symmetry(self):\n if self.lattice_parameters.count(None) > 0:\n return 'monoclinic'\n if np.isclose(self.alpha, 90.0) and np.isclose(self.beta, 90.0):\n if np.isclose(self.gamma, 90.0):\n if np.isclose(self.a, self.b) and np.isclose(self.a, self.c):\n return 'cubic'\n elif np.isclose(self.a, self.b):\n return 'tetragonal'\n else:\n return 'orthorhombic'\n elif np.isclose(self.gamma, 120.0):\n if np.isclose(self.a, self.b):\n return 'hexagonal'\n else:\n return 'triclinic'\n elif np.isclose(self.alpha, 90.0) and np.isclose(self.gamma, 90.0):\n return 'monoclinic'\n else:\n return 'triclinic'",
"def Sphericity(self):\n s = self.sphericity\n assert s in range(1,6), \"Sphericity score out of bounds.\"\n if s == 1: return 'Linear'\n elif s == 2: return 'Ovoid Linear'\n elif s == 3: return 'Ovoid'\n elif s == 4: return 'Ovoid Round'\n elif s == 5: return 'Round'",
"def to_observation_type(self) -> str:\n obstype = self._header[\"OBSTYPE\"].strip().lower()\n self._used_these_cards(\"OBSTYPE\")\n if obstype == \"object\":\n return \"science\"\n return obstype",
"def __str__(self):\n if (self.type == 's'):\n return \"type = %s\\nbody = %s, dept = %s, name = %s\" % \\\n (\"suit\", self.body, self.dept, self.name)\n elif (self.type == 'b'):\n return \"type = boss cog\\ndept = %s\" % (self.dept)\n else:\n return \"type undefined\"",
"def shape_type_string(shape):\n shape_type = shape.ShapeType()\n types = {TopAbs_VERTEX: \"Vertex\",\n TopAbs_SOLID: \"Solid\",\n TopAbs_EDGE: \"Edge\",\n TopAbs_FACE: \"Face\",\n TopAbs_SHELL: \"Shell\",\n TopAbs_WIRE: \"Wire\",\n TopAbs_COMPOUND: \"Compound\",\n TopAbs_COMPSOLID: \"Compsolid\"}\n return \"%s (id %s)\" % (types[shape_type], hash(shape))",
"def Type(self, String):\r\n\r\n if (String == \"byte\") or (String == \"sbyte\"):\r\n return 1\r\n elif (String == \"word\") or (String == \"sword\"):\r\n return 2\r\n else:\r\n return 4",
"def gwcalctyp(self):\n dig0 = str(self._SIGMA_TYPES[self.type])\n dig1 = str(self._SC_MODES[self.sc_mode])\n return dig1.strip() + dig0.strip()",
"def name(self) -> str:\n station_name = self._get_station_name()\n return f\"{station_name} {self._fuel_type}\"",
"def get_surface_type(self) -> SurfaceTypeStr:\n return SURFACE_TYPES.inverse[self.surfaceType()]",
"def outputtype(cls):\n return \"AFNI\"",
"def formatPickupType(string):\n if string == 'N/A':\n return 0\n elif string == 'D':\n return 1\n elif string == 'M':\n return 2\n \n elif string == 'C':\n return 3\n \n elif string == 'R':\n return 4\n \n else:\n return 5",
"def surface_type(self):\n surf_type = BRepAdaptor_Surface(self.topods_shape()).GetType()\n if surf_type == GeomAbs_Plane:\n return \"plane\"\n if surf_type == GeomAbs_Cylinder:\n return \"cylinder\"\n if surf_type == GeomAbs_Cone:\n return \"cone\"\n if surf_type == GeomAbs_Sphere:\n return \"sphere\"\n if surf_type == GeomAbs_Torus:\n return \"torus\"\n if surf_type == GeomAbs_BezierSurface:\n return \"bezier\"\n if surf_type == GeomAbs_BSplineSurface:\n return \"bspline\"\n if surf_type == GeomAbs_SurfaceOfRevolution:\n return \"revolution\"\n if surf_type == GeomAbs_SurfaceOfExtrusion:\n return \"extrusion\"\n if surf_type == GeomAbs_OffsetSurface:\n return \"offset\"\n if surf_type == GeomAbs_OtherSurface:\n return \"other\"\n return \"unknown\"",
"def plan_type_name(self):\n\n if self.converted:\n length = self.plan_interval_length\n unit = self.plan_interval_unit\n\n if unit == \"month\" and length == 1:\n return \"monthly\"\n elif unit == \"month\" and length == 12:\n return \"yearly\"\n\n return \"\"",
"def type(self):\n return self.EQUATION",
"def type(self):\n return self.EQUATION",
"def _get_type_name(self, st_type):\n if st_type <= 2045: return 'str' + str(st_type)\n return self._type_names[st_type]",
"def unit(self) -> str:",
"def getObcType(): \n return simuConfig[\"OBC\"]",
"def get_label(genotype_type):\n if genotype_type == \"Hom\":\n return 0\n elif genotype_type == \"Het\":\n return 1\n elif genotype_type == \"Hom_alt\":\n return 2",
"def get_analog_type_name(self, device_type_name):\n if device_type_name in [\"SOLN\", \"BEND\", \"KICK\"]:\n return \"CURRENT\"\n elif device_type_name in [\"PBLM\", \"LBLM\", \"CBLM\", \"BLM\"]:\n return \"LOSS\"\n elif device_type_name in [\"TORO\", \"FARC\"]:\n return \"CHARGE\"\n else:\n raise ValueError(\"Function \\\"get_analog_type_name(device_type_name={})\\\". Invalid device type name\"\n .format(device_type_name))",
"def __str__(self):\n s = self.prefix.symbol\n s += self.unit.symbol\n if len(self.timebase.symbol) > 0:\n s += '/' + self.timebase.symbol\n if s == 'mls/min':\n s = 'sccm'\n elif s == 'ls/min':\n s = 'slm'\n if self.unit == Sfc5xxxUnit.STANDARD_LITER_15C:\n s += ' (15°C)'\n elif self.unit == Sfc5xxxUnit.STANDARD_LITER_25C:\n s += ' (25°C)'\n elif self.unit == Sfc5xxxUnit.STANDARD_LITER_70F:\n s += ' (70°F)'\n return s",
"def get_type(cmb_type):\n\n terminology = ['Boulder','Trad','Sport','TR','Aid','Ice','Mixed','Alpine','Chipped']\n\n kind = {}\n kind_pitches_feet = str(cmb_type).split(', ')\n for morsel in kind_pitches_feet:\n if morsel in terminology:\n # columns end up either True or NaN\n kind[morsel.lower()] = True\n elif pitchRE.search(morsel):\n kind['pitches'] = morsel.split(' ')[0]\n elif feetRE.search(morsel):\n kind['feet'] = float(morsel[:-1])\n elif commitmentRE.search(morsel):\n kind['commitment'] = morsel.split(' ')[-1]\n return kind",
"def type_name(self) -> pulumi.Output[Optional[str]]:\n return pulumi.get(self, \"type_name\")",
"def show_opinion_type(opinion_type):\n click.echo(format_type(opinion_type))",
"def type(self) -> str:\n return pulumi.get(self, \"type\")",
"def type(self) -> str:\n return pulumi.get(self, \"type\")",
"def type(self) -> str:\n return pulumi.get(self, \"type\")"
] | [
"0.64918655",
"0.6483131",
"0.6290639",
"0.5996382",
"0.5951763",
"0.5928909",
"0.5896806",
"0.5896335",
"0.5885597",
"0.58396786",
"0.5786302",
"0.571342",
"0.57116663",
"0.5709346",
"0.567985",
"0.5675378",
"0.56724256",
"0.56724256",
"0.56605196",
"0.5654156",
"0.56412363",
"0.56058794",
"0.56014884",
"0.5585274",
"0.5584501",
"0.55821186",
"0.5571829",
"0.5555255",
"0.5555255",
"0.5555255"
] | 0.819442 | 0 |
Returns an orbital based on the index of the orbital in VASP runs. | def from_vasp_index(i):
return Orbital.all_orbitals[i] | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def orbit_index():\n return OrbitController.invoke(OUTPUT_DIRECTORY)",
"def get_vsolar(self):\n return self.read_register(4098, 1, 3)",
"def get_orbit(self):\n return self.get_attr('orbit')",
"def get_solar(self, name_building):\n return self._solar.loc[name_building]",
"def solar_time_index(self):\n if self._solar_time_index is None:\n with Resource(self.solar_fpath) as res:\n self._solar_time_index = res.time_index\n return self._solar_time_index",
"def getorbit(sat, tfinal, tstep, trec):\n ntimes = (int)(tfinal/tstep)\n n_tvals = (int)(tfinal/trec)\n state_arr = np.zeros((6, n_tvals))\n orbelem_arr = np.zeros((6, n_tvals))\n s_major_arr = np.zeros(n_tvals)\n count = 0\n for i in range(ntimes):\n sat.rk4_step_sat(tstep)\n if i % (trec/tstep) == 0:\n state_arr[:, count] = sat.getstate()\n orbelem_arr[:, count] = sat.orb_elem()\n s_major_arr[count] = sat.get_a()\n tether = sat.get_tether()\n tether.setlamda_a(sat)\n tether.set_iv(sat)\n print state_arr[0, count]\n print count\n count += 1\n return (state_arr, orbelem_arr, s_major_arr)",
"def obtener_peso_arista(self, v, w):\n return self.vertices[v][w]",
"def solar_time_index(self):\n return self.data.solar_time_index",
"def create_general_sar_orbit(state_vectors: StateVectors):\n time_axis = are_ax.RegularAxis((0, state_vectors.time_step, state_vectors.number_of_state_vectors),\n state_vectors.reference_time)\n return GeneralSarOrbit(time_axis, state_vectors.position_vector.reshape((state_vectors.position_vector.size,)))",
"def AsVector(self) -> ngsolve.la.BaseVector:",
"def _get_index(self, orb: int, sz: float = None):\n if orb >= self.n_orbitals:\n raise IndexError(\"requested orbital index outside of the hilbert space\")\n spin_idx = self._spin_index(sz)\n return spin_idx * self.n_orbitals + orb",
"def index():\n return get_in_radius(50)",
"def phase_velocity(refractive_index):\n return cgs.c / refractive_index",
"def get_active_fracture_index( self , ijk = None , global_index = None):\n gi = self.__global_index( global_index = global_index , ijk = ijk)\n return self._get_active_fracture_index1( gi )",
"def orbit(self, alpha, action='tuples'):\n return _orbit(self.degree, self.generators, alpha, action)",
"def getOctaveIndex(self):\n notename = self.getNoteName().lower() # Most of the time this name should work\n if notename not in self.octaveIndices.keys(): # If not, it probably has too many accidentals\n simple = self.simplify() # Simplify\n notename = simple.getNoteName().lower() # And try again\n return self.octaveIndices.get(notename) # Return the index",
"def inversion(self, index=0):\r\n i = [(j[0]+index) % 12 for j in self.__full]\r\n return TWToneMatrix(i)",
"def orbit_rep(self, alpha, beta, schreier_vector=None):\n if schreier_vector is None:\n schreier_vector = self.schreier_vector(alpha)\n if schreier_vector[beta] is None:\n return False\n k = schreier_vector[beta]\n gens = [x._array_form for x in self.generators]\n a = []\n while k != -1:\n a.append(gens[k])\n beta = gens[k].index(beta) # beta = (~gens[k])(beta)\n k = schreier_vector[beta]\n if a:\n return _af_new(_af_rmuln(*a))\n else:\n return _af_new(list(range(self._degree)))",
"def get_ref_index(self):\n total_pol = self.get_compound_pol()\n molar_volume = self.get_molar_volume()\n if not total_pol:\n return None\n ref_index = np.sqrt((4 * np.pi * total_pol) / ((2.26 - 4 * np.pi / 3) * total_pol + molar_volume) + 1)\n return ref_index",
"def orbit(self, representation='trivial'):\n if not self:\n return self\n\n answer = BarrattEccles_element(torsion=self.torsion)\n for k, v in self.items():\n inverse = tuple(k[0].index(i + 1) + 1 for i in range(len(k[0])))\n permutation = SymmetricRing_element({inverse: 1}, torsion=self.torsion)\n if representation == 'sign':\n permutation = k[0].sign * permutation\n answer += permutation * BarrattEccles_element({k: v}, torsion=self.torsion)\n\n return answer",
"def solar_model():\n \n latitude, longitude, timezone, elevation = location_input()\n year, time = time_input()\n\n lat_r = latitude/180*np.pi\n lon_r = longitude/180*np.pi \n n = 0\n for i in range(1900,year):\n if i%4 == 0:\n n += 366\n else:\n n+=365\n JulD = n + time + 2415018.5 - (timezone)/24\n LT = time - int(time)\n JC = (JulD - 2451545) / 36525\n x = 46.815 + JC * (0.00059 - JC * 0.001813)\n M_OE = 23 + (26 + (21.448 - JC * x) / 60) / 60\n EEO = 0.016708634 - JC * (0.000042037 + 0.0000001267 * JC)\n GMAS = 357.52911 + JC * (35999.05029 - 0.0001537 * JC)\n GMAS_r = m.radians(GMAS)\n GMLS = (280.46646 + JC * (36000.76983 + JC * 0.0003032))%360\n GMLS_r = m.radians(GMLS)\n Obliq_C = M_OE + 0.00256 * np.cos((125.04 - 1934.136 * JC) / 180 * np.pi)\n Obliq_C_r = m.radians(Obliq_C)\n SEC = np.sin(GMAS_r) * (1.914602 - JC * (0.004817 + 0.000014 * JC)) + np.sin(2 * GMAS_r) * (0.019993 - 0.000101 * JC) + np.sin(3 * GMAS_r) * 0.000289\n STL = GMLS + SEC\n SAL = STL - 0.00569 - 0.00478 * np.sin((125.04 - 1934.136 * JC) / 180 * np.pi)\n SAL_r = m.radians(SAL)\n sin_Delta = np.sin(Obliq_C_r) * np.sin(SAL_r)\n Delta_r = np.arcsin(sin_Delta) #in radians \n Var_y = np.tan((Obliq_C / 2) / 180 * np.pi) * np.tan((Obliq_C / 2) / 180 * np.pi)\n EOT_prime = Var_y * np.sin(2 * GMLS_r) - 2 * EEO * np.sin(GMAS_r) + 4 * EEO * Var_y * np.sin(GMAS_r) * np.cos(2 * GMLS_r) - 0.5 * Var_y * Var_y * np.sin(4 * GMLS_r) - 1.25 * EEO * EEO * np.sin(2 * GMAS_r)\n EOT = 4 * EOT_prime / np.pi * 180 \n TST = (LT * 1440 + EOT + 4 * longitude - 60 * timezone)%1440\n if TST / 4 < 0:\n Omega = TST/4+180\n else:\n Omega = TST/4 - 180 \n Omega_r = m.radians(Omega)\n \n cos_Zenith = np.sin(lat_r) * np.sin(Delta_r) + np.cos(lat_r) * np.cos(Delta_r) * np.cos(Omega_r)\n Zenith_r = np.arccos(cos_Zenith) #in radians\n Aprime_r = np.arccos((np.sin(lat_r) * np.cos(Zenith_r) - np.sin(Delta_r)) / (np.cos(lat_r) * np.sin(Zenith_r)))\n Aprime = Aprime_r / np.pi * 180\n if Omega > 0:\n Azimuth = (Aprime + 180) % 360 #in degrees\n else:\n Azimuth = (540 - Aprime) % 360 #in degrees \n Azimuth_r = Azimuth / 180 * np.pi\n Elev_angle = (np.pi)/2 - Zenith_r\n\n \n # calculate incidence angle\n # Beta is equal to angle of tilted surface to horizontal (in radians)\n Beta = 45 # in degrees\n Beta_r = m.radians(Beta)\n \n cos_incidence = np.sin(Delta_r)* np.sin(lat_r) * np.cos(Beta_r) - np.sin(Delta_r) * np.cos(lat_r) * np.sin(Beta_r) * np.cos(Azimuth_r) + np.cos(Delta_r) * np.cos(lat_r) * np.cos(Beta_r) * np.cos(Omega_r) + np.cos(Delta_r) * np.sin(lat_r) * np.sin(Beta_r) * np.cos(Azimuth_r) * np.cos(Omega_r) + np.cos(Delta_r) * np.sin(Beta_r) * np.sin(Azimuth_r) * np.sin(Omega_r) \n incidence_ang_r = np.arccos(cos_incidence)\n \n return Delta_r, lat_r, Omega_r, Zenith_r, Azimuth_r, Elev_angle",
"def get_orbit_number(self):\n return self.read_global_attribute('sensing_orbit_number')",
"def wind_vector(v_total, v_aircraft, mag_angle=True):\n # wind = total - aircraft\n\n\n vwind = v_total - v_aircraft\n if mag_angle:\n return np.linalg.norm(vwind), head360(np.arctan2(vwind[1], vwind[0]))\n else:\n return vwind",
"def _get_index(self):\n\n return self.index\n\n # to avoid having differences bigger than 2pi",
"def getSolar():\n ina = INA219(address=int('0x44', 16))\n sol_bus_v = ina.getBusVoltage_V()\n sol_shunt_mv = ina.getShuntVoltage_mV()\n sol_curr_ma = ina.getCurrent_mA()\n sol_volt_v = (ina.getBusVoltage_V() + ina.getShuntVoltage_mV() / 1000)\n sol_power_mw = ina.getPower_mW()\n return sol_volt_v, sol_curr_ma",
"def get_pos(self) -> WAVector:\n pass",
"def find_orbit(ring, refpts: Refpts = None, **kwargs):\n if ring.is_6d:\n return find_orbit6(ring, refpts=refpts, **kwargs)\n else:\n return find_orbit4(ring, refpts=refpts, **kwargs)",
"def orbitalVelocity( self ): # returns [m/s]\n velocity = self.orbitalAngularVelocity * self.r # current orbital velocity [m/s]\n return velocity",
"def get_index(self):\n return (np.sqrt(self.dielectric))",
"def orbit(self):\n return [x for x in TransitiveIdeal(attrcall('simple_reflections'), [self])]"
] | [
"0.63372135",
"0.5902144",
"0.5708969",
"0.5705622",
"0.5473955",
"0.5333812",
"0.5275198",
"0.5232643",
"0.5220132",
"0.51939213",
"0.5165619",
"0.5133298",
"0.5122275",
"0.5114667",
"0.50612587",
"0.50426286",
"0.50406325",
"0.49916345",
"0.49804333",
"0.49263537",
"0.49151546",
"0.4902795",
"0.48615116",
"0.48606855",
"0.4843823",
"0.48394385",
"0.48374334",
"0.482848",
"0.4818792",
"0.47995514"
] | 0.80536574 | 0 |
Returns an orbital from a string representation, e.g., "s", "px". | def from_string(orb_str):
for orb in Orbital.all_orbitals:
if str(orb) == orb_str:
return orb
raise ValueError("Illegal orbital definition!") | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def fromString(cls, string):\n # From SAM specification v1.5, slightly adapted for single-token parsing\n pattern = r\"^[0-9]+[MIDNSHPX=]\" \n string = string.strip()\n if string == '*':\n return CIGAR.fromList(['*'])\n parsed = []\n s = string\n # Parse string token (e.g. 14M) by token, re.findall is not enough,\n # because non-matching subsequences between (e.g. \"14Mblabla3D4M\") would\n # go unnoticed! Also it would be good to abort as early as possible if\n # an invalid string is found to avoid parsing possibly very long strings\n while s != '':\n r = re.match(pattern, s)\n if not r:\n raise ValueError('Invalid CIGAR string: \"'+string+'\"')\n g = r.group(0)\n parsed.append(g)\n s = s[len(g):]\n \n parsed = [(int(p[:-1]), p[-1:]) for p in parsed]\n\n return CIGAR.fromList(parsed)",
"def fromString(self, s):\n vars = s.replace(',', '').split()\n self.position = [int(vars[0]), int(vars[1])]\n print(self.position)\n self.K = np.array([[int(vars[2]), int(vars[3])], \n [int(vars[4]), int(vars[5])]])",
"def deserialize(string):\n tokens = string.split(Unipuck._SERIAL_DELIM)\n\n center = Point(int(tokens[0]), int(tokens[1]))\n radius = int(tokens[2])\n angle = float(tokens[3])\n\n return Unipuck(center, radius, angle)",
"def from_str(cls, string):",
"def from_str(klass, s):\n # Special cases\n if s == \"nan\" :\n return RatTerm(RatNum(1, 0), 1)\n elif s == \"0\" :\n return RatTerm(RatNum(0, 1), 1)\n elif s == \"x\" :\n return RatTerm(RatNum(1, 1), 1)\n elif s == \"-x\" :\n return RatTerm(RatNum(-1, 1), 1)\n \n # Exponent\n if \"^\" in s :\n expo = int(s.split(\"^\")[1])\n else :\n expo = 0\n \n # Rational coefficient \n co = s.split(\"*\")[0]\n if \"/\" in co :\n nom, sep, denom = s.partition(\"/\")\n nom = int(nom)\n denom = int(denom.split(\"*\")[0])\n # coefficient = 1\n elif s.startswith(\"x\") :\n nom = 1\n denom = 1\n # coefficient = -1\n elif s.startswith(\"-x\") :\n nom = -1\n denom = 1\n else :\n nom = int(s.split(\"*\")[0])\n denom = 1\n \n return RatTerm(RatNum(nom, denom), expo)",
"def from_str(cls, s):\n raise NotImplementedError",
"def from_string(cls, alg_str):\n try:\n return cls(int(alg_str[1]) - 1, ord(alg_str[0]) - 97)\n except ValueError as e:\n raise ValueError(\"Location.from_string {} invalid: {}\".format(alg_str, e))",
"def name_to_position(name: str) -> SkyCoord:\n # remove any characters that are not a digit, decimal, or sign\n name = re.sub(r\"[^\\d\\.\\+-]\", \"\", name)\n if \"-\" in name:\n try:\n ra, dec = name.split(\"-\")\n except ValueError as e:\n log.error(\"Error converting pulsar name '{}' to RA/Dec: {}\".format(name, e))\n return None\n sign = \"-\"\n else:\n try:\n ra, dec = name.split(\"+\")\n except ValueError as e:\n log.error(\"Error converting pulsar name '{}' to RA/Dec: {}\".format(name, e))\n return None\n sign = \"+\"\n match = re.match(\n r\"J?(?P<hour>\\d{2})(?P<minute>\\d{2,4})(?P<decimal>\\.?)(?P<frac>\\d*)\", ra\n )\n if match:\n if len(match.group(\"minute\")) == 2:\n # HHMM\n ra_hms = \"{}:{}{}{}\".format(\n match.group(\"hour\"),\n match.group(\"minute\"),\n match.group(\"decimal\"),\n match.group(\"frac\"),\n )\n elif len(match.group(\"minute\")) == 4:\n # HHMMSS\n ra_hms = \"{}:{}:{}{}{}\".format(\n match.group(\"hour\"),\n match.group(\"minute\")[:2],\n match.group(\"minute\")[2:4],\n match.group(\"decimal\"),\n match.group(\"frac\"),\n )\n else:\n log.error(\"Cannot parse RA string '{}' from source '{}'\".format(ra, name))\n return None\n else:\n log.error(\"Cannot parse RA string '{}' from source '{}'\".format(ra, name))\n return None\n match = re.match(\n r\"(?P<degree>\\d{2})(?P<minute>\\d{0,4})(?P<decimal>\\.?)(?P<frac>\\d*)\", dec\n )\n if match:\n if len(match.group(\"minute\")) == 0:\n # DD.D\n dec_dms = \"{}{}{}\".format(\n match.group(\"degree\"), match.group(\"decimal\"), match.group(\"frac\"),\n )\n\n elif len(match.group(\"minute\")) == 2:\n # DDMM\n dec_dms = \"{}:{}{}{}\".format(\n match.group(\"degree\"),\n match.group(\"minute\"),\n match.group(\"decimal\"),\n match.group(\"frac\"),\n )\n elif len(match.group(\"minute\")) == 4:\n # DDMMSS\n dec_dms = \"{}:{}:{}{}{}\".format(\n match.group(\"degree\"),\n match.group(\"minute\")[:2],\n match.group(\"minute\")[2:4],\n match.group(\"decimal\"),\n match.group(\"frac\"),\n )\n else:\n log.error(\"Cannot parse Dec string '{}' from source '{}'\".format(dec, name))\n return None\n else:\n log.error(\"Cannot parse Dec string '{}' from source '{}'\".format(dec, name))\n return None\n\n try:\n c = SkyCoord(ra_hms, sign + dec_dms, unit=(\"hour\", \"deg\"))\n except ValueError as e:\n log.error(\"Cannot parse RA/Dec {},{}: {}\".format(ra_hms, sign + dec_dms, e))\n return None\n return c",
"def parse_speed(as_str: str) -> float:\n return float(as_str.rstrip(\"x\"))",
"def from_string(cls, value='0+0j', context=None):\r\n value = value.strip()\r\n match = cls.imag_regex.match(value)\r\n if match:\r\n return cls(0, match[1], context)\r\n match = cls.cplx_regex.match(value)\r\n if match:\r\n return cls(match[1], match[2], context)\r\n raise ValueError('CDecimal.from_string argument is a malformed string')",
"def from_string(string):\n return Output('', magic=string)",
"def fromStr(cls, s):\n assert isinstance(s, str), 'incorrect type of arg s: should be type str, is type {}'.format(type(s))\n s = [ int(n) for n in s.split('.') ]\n return cls(*s)",
"def from_str (s):\n try: \n return from_csv(s)\n except Exception: \n pass\n \n try: \n return from_hex(s)\n except Exception: \n pass\n\n try:\n return from_name(s)\n except Exception: \n pass\n\n raise ColourFormatError(\"'%s' is not a recognized colour string\"%s)",
"def __coordinate_system_from_str__(value_str):\n if value_str is not None:\n if value_str == \"IJK\": return GeometryTopologyData.IJK\n elif value_str == \"RAS\": return GeometryTopologyData.RAS\n elif value_str == \"LPS\": return GeometryTopologyData.LPS\n else: return GeometryTopologyData.UNKNOWN\n else:\n return GeometryTopologyData.UNKNOWN",
"def from_str(cls, s: str):\n instr, outstr = s.split('→')\n return ExperimentSetting(in_state=TensorProductState.from_str(instr),\n observable=PauliTerm.from_compact_str(outstr))",
"def deserialize(self, str):\n try:\n end = 0\n start = end\n end += 4\n (self.yaw,) = _struct_f.unpack(str[start:end])\n return self\n except struct.error as e:\n raise genpy.DeserializationError(e) #most likely buffer underfill",
"def parse(s: str) -> StateFormula:\n tree = PCTL_PARSER.parse(s.replace(\" \", \"\"))\n return PCTLTransformer.transform(tree)",
"def from_string(name: str) -> Algorithm:\n if name == \"caesar\":\n return Algorithm.caesar\n elif name == \"substitution\":\n return Algorithm.substitution\n elif name == \"transposition\":\n return Algorithm.transposition\n elif name == \"affine\":\n return Algorithm.affine\n elif name == \"vigenere\":\n return Algorithm.vigenere",
"def _parse_ra_dec(coord_str):\n if isinstance(coord_str, str):\n coord1 = coord_str.split()\n else:\n # This exception should never be raised from SkyCoord\n raise TypeError(\"coord_str must be a single str\")\n\n if len(coord1) == 6:\n coord = (\" \".join(coord1[:3]), \" \".join(coord1[3:]))\n elif len(coord1) > 2:\n coord = PLUS_MINUS_RE.split(coord_str)\n coord = (coord[0], \" \".join(coord[1:]))\n elif len(coord1) == 1:\n match_j = J_PREFIXED_RA_DEC_RE.match(coord_str)\n if match_j:\n coord = match_j.groups()\n if len(coord[0].split(\".\")[0]) == 7:\n coord = (\n f\"{coord[0][0:3]} {coord[0][3:5]} {coord[0][5:]}\",\n f\"{coord[1][0:3]} {coord[1][3:5]} {coord[1][5:]}\",\n )\n else:\n coord = (\n f\"{coord[0][0:2]} {coord[0][2:4]} {coord[0][4:]}\",\n f\"{coord[1][0:3]} {coord[1][3:5]} {coord[1][5:]}\",\n )\n else:\n coord = PLUS_MINUS_RE.split(coord_str)\n coord = (coord[0], \" \".join(coord[1:]))\n else:\n coord = coord1\n\n return coord",
"def from_str(s: str) -> \"Lineage\":\n match = LINEAGE_REGEX.search(s)\n if not match:\n raise InvalidLineageString(\n f\"Lineage string {s} is not in the expected format.\"\n )\n major = match.group(\"major\")\n minor = match.group(\"minor\") or None\n return Lineage(major=major, minor=minor)",
"def stringToMod(string):\n string = string.strip() \n i = 0\n symbol = \"\"\n # read the symbol\n while i < len(string) and string[i] != \"(\":\n symbol = symbol + string[i]\n i = i + 1\n # if parameters are present, get them\n if i< len(string) and string[i] == \"(\": \n i = i + 1 # skip the opening bracket\n params = string[i:(len(string) - 1)].split(\",\")\n for i in range(0,len(params)):\n params[i] = float(params[i].strip())\n return(Module(symbol,params))\n else:\n return(Module(symbol,[]))",
"def parse(s):\n return s",
"def deserialize(self, str):\n try:\n if self.c0 is None:\n self.c0 = bh_motion.msg.Vector3()\n if self.c1 is None:\n self.c1 = bh_motion.msg.Vector3()\n if self.c2 is None:\n self.c2 = bh_motion.msg.Vector3()\n end = 0\n _x = self\n start = end\n end += 36\n (_x.c0.x, _x.c0.y, _x.c0.z, _x.c1.x, _x.c1.y, _x.c1.z, _x.c2.x, _x.c2.y, _x.c2.z,) = _struct_9f.unpack(str[start:end])\n return self\n except struct.error as e:\n raise genpy.DeserializationError(e) #most likely buffer underfill",
"def from_string(s):\n pair, exchange = s.split('@')\n base = pair[:3]\n quote = pair[3:]\n if base not in Instrument.KNOWN_CURRENCiES:\n raise ValueError('Unknown base currency: {}'.format(base))\n if quote not in Instrument.KNOWN_CURRENCiES:\n raise ValueError('Unknown quote currency: {}'.format(quote))\n\n return Instrument(base_currency=base,\n quote_currency=quote,\n exchange_id=ExchangeID[exchange])",
"def from_string(representation):\r\n auto = Automaton()\r\n lines = [line.strip() for line in representation.split('\\n')]\r\n auto.transitions = [Transition(*line.split()) for line in lines[:-1]]\r\n auto.final_states = lines[-1].split()[1:]\r\n auto.start_state = lines[-1].split()[0]\r\n\r\n return auto",
"def toLit(string):\r\n\r\n nowLit = ast.literal_eval(string)\r\n return nowLit",
"def toLit(string):\r\n\r\n nowLit = ast.literal_eval(string)\r\n return nowLit",
"def parse(string):\n posslash = string.find('/')\n if posslash < 0:\n return Rational(int(string), 1)\n else:\n strs = string.split('/')\n return Rational(int(strs[0].strip()), int(strs[1].strip()))",
"def fromstring(cls, string):\n string = cls.normalize_puzzle_string(string)\n size = int(sqrt(len(string)))\n square_size = int(sqrt(size))\n if size**2 != len(string) or square_size**2 != size:\n raise ValueError(\"Invalid input string length: %d\" % len(string))\n # TODO: remove this constraint for larger puzzles:\n if square_size != 3:\n raise ValueError(\"Code currently only supports 9x9 puzzles\")\n\n self = cls()\n # Fill in the cells at the places that are specified in the string\n for coords, char in zip(self.cells(), string):\n if char != '.':\n self.assign_value(coords, int(char))\n\n return self",
"def fromisoformat(cls, time_string):\n if not isinstance(time_string, str):\n raise TypeError(\"fromisoformat: argument must be str\")\n\n try:\n return cls(*_parse_isoformat_time(time_string))\n except Exception:\n raise ValueError(f\"Invalid isoformat string\")"
] | [
"0.5704035",
"0.5645704",
"0.56413406",
"0.558188",
"0.5521622",
"0.5513611",
"0.5464635",
"0.54529065",
"0.5443319",
"0.5428483",
"0.5418516",
"0.53966254",
"0.53935385",
"0.53825116",
"0.5334332",
"0.5319566",
"0.52879316",
"0.5284919",
"0.5277784",
"0.5237857",
"0.52367216",
"0.52273875",
"0.5219361",
"0.5210007",
"0.52000386",
"0.5181321",
"0.5181321",
"0.51730293",
"0.5158482",
"0.5152681"
] | 0.67074496 | 0 |
We use cli_url to set CLI URL and reflect this in cli_area to take it from JS. | def _get_cli_area(self):
for rec in self:
rec.cli_area = rec.cli_url | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _set_cli_area(self):\n pass",
"def cli():",
"def cli():",
"def cli():",
"def cli():",
"def cli():",
"def cli():",
"def cli():",
"def cli():",
"def cli():",
"def cli():",
"def cli():",
"def cli():",
"def cli():",
"def cli():",
"def cli():",
"def cli():",
"def cli():",
"def cli():",
"def cli():",
"def cli():",
"def cli():",
"def cli():",
"def cli():",
"def cli():",
"def cli():",
"def cli():",
"def cli():",
"def cli():",
"def _cli():\n pass"
] | [
"0.67812735",
"0.6515039",
"0.6515039",
"0.6515039",
"0.6515039",
"0.6515039",
"0.6515039",
"0.6515039",
"0.6515039",
"0.6515039",
"0.6515039",
"0.6515039",
"0.6515039",
"0.6515039",
"0.6515039",
"0.6515039",
"0.6515039",
"0.6515039",
"0.6515039",
"0.6515039",
"0.6515039",
"0.6515039",
"0.6515039",
"0.6515039",
"0.6515039",
"0.6515039",
"0.6515039",
"0.6515039",
"0.6515039",
"0.6471206"
] | 0.68657756 | 0 |
Asks for the user to guess numbers and turns the strings to a list | def user_guess():
return list(input("What is your guess?")) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def user_input():\n guess_num = int(input(\"please type four-digit: \"))\n guess_list = list(str(guess_num))\n return guess_list",
"def convert_str_input_into_list_of_int(guess_input):\n guess = list(guess_input)\n guess = [int(x) for x in guess]\n return guess",
"def get_user_list(question):\n return [int(s) for s in input(question).split()]",
"def make_list():\n num_list = []\n tries = 3\n for i in range(tries):\n number = get_input()\n try:\n number = int(number)\n except ValueError:\n print(\"Numbers Only.\")\n else:\n num_list[len(num_list):] = [number]\n return num_list",
"def list_input() -> [int]:\n lst = [int(x) for x in input(\"Introduceti numerele listei separate cu un spatiu: \").split(\" \")]\n\n return lst",
"def from_input_to_list(inputted_string):\n\n created_list = [int(i) for i in inputted_string]\n\n return created_list",
"def challengeInput(self):\r\n firstLine = input().split(' ')\r\n secondLine = input().split(' ')\r\n numbers = []\r\n for s in firstLine:\r\n numbers.append(float(s) )\r\n for s in secondLine:\r\n numbers.append(float(s) )\r\n return numbers",
"def user_picks():\r\n print (\"Enter the second to last posted Fantasy 5 lotto numbers from 1 to 42:\")\r\n ui = []\r\n while len(ui) < 5:\r\n print (len(ui) + 1,)\r\n try:\r\n i = int(input(\"--> \" ))\r\n # check if i is unique and has a value from 1 to 42\r\n # and is an integer, otherwise don't append\r\n if (i not in ui) and (1 <= i <= 42): \r\n ui.append(i)\r\n except:\r\n print (\"Enter an integer number!\")\r\n return ui",
"def get_input():\n input_str = input(\"Enter elements to be sorted: \")\n try:\n elements = [int(e) for e in input_str.split()] #make a list of integers from input string\n except ValueError:\n print(\"Please enter a list of integers only, seperated by a space!!\")\n return elements",
"def prompt_for_numbers():\n\n numbers = []\n print(\"Enter a series of numbers, with -1 to quit\")\n\n num = 0\n\n while num != -1:\n num = int(input())\n\n if num != -1:\n numbers.append(num)\n\n return numbers",
"def create_game_from_string(self, numbers_string:str):\n game = []\n number_chars = numbers_string.split()\n for char in number_chars:\n game.append(int(char))\n\n if self.verify_valid_game(game):\n return game",
"def get_int_list_input(prompt, invalid_prompt):\n\n input_list = []\n is_input_valid = False\n\n while not is_input_valid:\n is_input_valid = True\n input_text = input(prompt)\n\n #Empty input is valid too\n if len(input_text) == 0:\n break\n\n try:\n for txt in input_text.split(\",\"):\n input_list.append(int(txt))\n except ValueError:\n input_list = []\n is_input_valid = False\n\n if invalid_prompt != None:\n print(invalid_prompt.format(input_text))\n else:\n break\n\n return (is_input_valid, input_list)",
"def get_numbers():\n print('Hello, you are in Fermat\\'s Last Theorem checker')\n print('Type:')\n a = int(input('a='))\n b = int(input('b='))\n c = int(input('c='))\n n = int(input('n='))\n return (a, b, c, n)",
"def get_guess_from_user(self):\n self.guess_number = input(f\"please guess a number between 1 to {self.difficulty}: \\n\")\n while True:\n if not self.guess_number.isnumeric() or \\\n not int(self.guess_number) <= self.difficulty or \\\n not int(self.guess_number) >= 0:\n self.guess_number = input(f\"you input is invalid!! please guess a number between 1 to {self.difficulty}: \\n\")\n else:\n self.guess_number = int(self.guess_number)\n break\n return self.guess_number",
"def game():\n\n secret_number = random.randint(1, 10)\n\n guess_list = []\n\n while len(guess_list) < 5:\n guess = input(\"Guess a number between 1 and 10: \")\n\n try:\n guess = int(guess)\n except ValueError:\n print(\"{} is not a number!\".format(guess))\n else:\n if guess == secret_number:\n print(\"You guessed it! My number was {}\".format(secret_number))\n play_again()\n break\n elif guess > secret_number:\n print(\"{} is too high!\".format(guess))\n else:\n print(\"{} is too low!\".format(guess))\n\n guess_list.append(guess)\n else:\n print(\"You ran out of guesses!\")\n play_again()",
"def populate_array_input():\n arr = []\n while True:\n val = input(\"Enter a number, type a non-number to quit: \")\n try:\n val = int(val)\n except ValueError:\n break\n arr.append(val)\n return arr",
"def clues_generator(code, userGuess):\n if userGuess == code:\n return \"Code Cracked!\"\n\n clues = []\n\n # Compare guess to code\n for ind, num in enumerate(userGuess):\n if num == code[ind]:\n clues.append(\"Match\")\n elif num in code:\n clues.append(\"Close\")\n if clues == []:\n return [\"Nope\"]\n else:\n return clues",
"def parse_input(string):\n return [int(vote) for vote in string.split()]",
"def userinput(prompttext=\"\", times=1):\n\n # If times is 1\n if times == 1:\n # Return the result\n return input(str(prompttext))\n\n # Create new empty list\n inputlist = []\n\n # For each time in range\n for _ in range(times):\n # Append the result of another input request\n inputlist.append(input(str(prompttext)))\n\n # Return the final result\n return inputlist",
"def guess_number(min_guess_range, max_guess_range):\n\tprint(f'guess the number between {min_guess_range} and {max_guess_range}!')\n\treturn check_input(min_guess_range, max_guess_range)",
"def create_input_list(prompt):\n list_countries = input(prompt).split(\", \")\n list_countries = [x.lower() for x in list_countries] \n return list_countries",
"def get_numbers(string:str, type_=\"int\") -> list:\n \n num_list = []\n for word in string.split():\n if type_ == \"int\":\n try:\n num_list.append(int(word))\n except:\n pass\n elif type_ == \"float\":\n if isfloat(word):\n num_list.append(float(word))\n return num_list",
"def guessnum2(num):\n import random\n guess_list = [i for i in range(1, 101)] # generates a list of integers from 1-100\n tries = 0\n guess = random.choice(guess_list) # randomly selects a number from guess_list\n while guess != num:\n guess_list.remove(guess) # removes the randomly guessed number from the list\n guess = random.choice(guess_list) # makes another random guess\n tries += 1\n print(f'I got it right in {tries} tries.')",
"def initialize_suggestion_list(suggestion_num):\n y = 0\n suggestion_list = []\n while y < suggestion_num:\n suggestion_list.append(\"\")\n y += 1\n return suggestion_list",
"def create_list():\n input_list = []\n input_from_user = input()\n while input_from_user != STOPPER_STRING:\n input_list.append(input_from_user)\n input_from_user = input()\n return input_list",
"def input_grid(grid: List[str] = None) -> List[str]:\n if grid is None:\n print(\"Please enter grid as strings, divided by commas\")\n in_grid = input(\"Grid: \")\n processed_grid = in_grid.replace(' ', '').split(',')\n return processed_grid\n else:\n return grid",
"def prepare_numbers(numbers):\n \n numb = []\n for item in numbers:\n numb.append(int(item))\n return numb",
"async def numguess(self, ctx):\r\n if await bMsg(ctx,ctx.message.author.name,client):\r\n return\r\n logger.info('Games.numguess', extra={'invoker': ctx.message.author.name})\r\n guess = None\r\n limDn = 0\r\n limUp = 100\r\n tries = 7\r\n secret = random.randint(1, 100)\r\n await ctx.send(\"\"\"Arr! I'm the Dread Pirate Roberts, and I have a secret!\r\nIt's a number from {} to {}. I'll give you {} tries.\r\nSend a number to guess it.\"\"\".format(limDn, limUp, tries))\r\n while guess != secret and tries > 0:\r\n await ctx.send(\"What's yer guess, matey?\")\r\n result = ''\r\n guess = await ctx.bot.wait_for('message',\r\n check=lambda m: m.channel == ctx.channel and re.match('[0-9]+', m.content))\r\n guess = int(guess.content)\r\n if guess == secret:\r\n break\r\n elif guess < limDn or guess > limUp:\r\n result += \"Out of range, ye swab!\\n\"\r\n elif guess < secret:\r\n result += \"Too low, ye scurvy dog!\\n\"\r\n limDn = guess\r\n elif guess > secret:\r\n result += \"Too high, landlubber!\\n\"\r\n limUp = guess\r\n tries -= 1\r\n result += \"Yer range is {} to {}; ye have {} tries left.\".format(limDn, limUp, tries)\r\n await ctx.send(result)\r\n if guess == secret:\r\n await ctx.send(\"Avast! Ye got it! Found my secret, ye did! With {} tries left!\".format(tries))\r\n else:\r\n await ctx.send(\"No more tries, matey! Better luck next time! The secret number was {}.\".format(secret))",
"def get_user_input(self):\r\n try:\r\n user_input = input('Guess a letter: ')\r\n print('\\n')\r\n if user_input.lower() in self.already_guessed:\r\n raise ValueError(YELLOW + 'You already guessed '\r\n f'{user_input.lower()}.\\n' + END)\r\n if len(user_input) == 0:\r\n raise ValueError(YELLOW + 'You didn\\'t enter a letter. '\r\n 'Please enter a letter between A-Z\\n' + END)\r\n if not user_input.isalpha():\r\n raise ValueError(YELLOW + 'You entered a number. '\r\n 'Please enter a letter between A-Z.\\n' + END)\r\n if len(user_input) > 1:\r\n raise ValueError(YELLOW + 'Please enter one letter.\\n' + END)\r\n except ValueError as error:\r\n print(error)\r\n self.get_user_input()\r\n else:\r\n if len(self.already_guessed) > 0: # prints previous guesses\r\n self.print_previous_guesses()\r\n if user_input.lower() in [letter.original.lower() for letter in\r\n self.active_phrase if letter != ' ']:\r\n for letter in self.active_phrase:\r\n if letter != ' ':\r\n letter.compare_guess(user_input) # checks guess\r\n self.active_phrase.print_phrase()\r\n else:\r\n self.lives -= 1\r\n print(f'You have {self.lives} out of 5 lives remaining!\\n')\r\n if user_input.lower() not in self.already_guessed:\r\n self.already_guessed.append(user_input.lower())\r\n self.active_phrase.print_phrase()",
"def populate_lists_loop() -> list:\r\n n = int(input(\"Enter the amount of desired lists: \"))\r\n total_ls = []\r\n for _ in range(n):\r\n ls = input(\r\n f\"Enter element for the list number {_+1} each seperated with comma: \"\r\n )\r\n ls = list(map(int, ls.split(\",\")))\r\n total_ls.append(ls)\r\n return total_ls"
] | [
"0.7755962",
"0.69793016",
"0.6891124",
"0.6676904",
"0.63334835",
"0.63176185",
"0.62182146",
"0.60630065",
"0.60263",
"0.5925718",
"0.58965445",
"0.5883831",
"0.58714354",
"0.58387905",
"0.5829174",
"0.5744655",
"0.5730842",
"0.5711055",
"0.5682094",
"0.5649758",
"0.56178695",
"0.55888826",
"0.5573967",
"0.5571039",
"0.5545069",
"0.5541653",
"0.5463427",
"0.54502076",
"0.5428847",
"0.5426261"
] | 0.73835796 | 1 |
It takes the code generater by the machine and the user's guess then compares the numbers in a loop and creates a list of clues according to the matching parameters | def clues_generator(code, userGuess):
if userGuess == code:
return "Code Cracked!"
clues = []
# Compare guess to code
for ind, num in enumerate(userGuess):
if num == code[ind]:
clues.append("Match")
elif num in code:
clues.append("Close")
if clues == []:
return ["Nope"]
else:
return clues | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def eval_guess(self, Guess):\n\n\t\t# pulls comparison from win check and assigns peg responses \n\n\t\t# returns a list to be in hint_response\n\n\t\t# displays as part of big display in view.\n\n\t\t\"\"\"Borrow the logic from win_check to implement eval_guess. Use variables right and wrong to \n\t\tevaluate. Right = Black peg. Wrong = no peg. \n\n\t\tWhite will be generated from a third loop to compare the entire list\"\"\"\n\n\n\t\tpass",
"def AIguessing(lijst):\n\n global Code\n global allcombos\n\n\n AIguess = choice(lijst)\n\n print(f\"The original code was {Code}\")\n print(f\"my guess this time is {AIguess}, how did I do?\")\n while not feedbackgiven:\n correct = int(input(\"Write down how many colors are in the right spot: \"))\n semicorrect = int(input(\"Write down how many colors are correct but not in the right spot: \"))\n\n feedback = correct + semicorrect\n if feedback <= 4:\n return NewFeedbackSystem(AIguess, correct, semicorrect, lijst)\n else:\n print(\"please use numbers 1-4 where the total <= 4\")\n continue",
"def NewFeedbackSystem(guess, correct, semicorrect, lijst):\n\n global allcombos\n global usedcombos\n global all_right\n\n\n feedback = correct + semicorrect\n\n usedcombos.append(guess)\n\n if not allright: #needs an extra way to AT LEAST get the same feedback as previous one\n\n if feedback == 4: #takes all letters in the code and checks for possible new combinations, adds them to the list\n for j in range(1):\n A = guess[j]\n B = guess[j + 1]\n C = guess[j + 2]\n D = guess[j + 3]\n\n results = permutations(f\"{A}{B}{C}{D}\", 4)\n newcombos = resulttolist(results)\n newcombos = [item for item in newcombos if item not in usedcombos]\n\n all_right = True\n return AIguessing(newcombos)\n\n elif feedback == 3: #takes all letters in the code and checks for possible new combinations with >= 3 from previous code, adds them to the list\n results = permutations(guess, 3)\n newresult = resulttolist(results, feedback)\n\n return compareWithAll(newresult, lijst, feedback)\n\n elif feedback == 2:\n #takes all letters in the code and checks for possible new combinations with >= 2 from previous code, adds them to the list\n results = permutations(guess, 2)\n newresult = resulttolist(results, feedback)\n\n return compareWithAll(newresult, lijst, feedback)\n\n elif feedback == 1:\n #takes all letters in the code and checks for possible new combinations with >= 1 from previous code, adds them to the list\n results = combinations(guess, 1)\n newresult = list(dict.fromkeys(resulttolist(results)))\n\n\n return compareWithAll(newresult, lijst)\n\n else:\n #takes all letters in the code and checks for possible new combinations WITHOUT these letters, adds them to the list\n newletterlist = [item for item in letters if item not in guess] #creates a new list with letters that weren't used\n newletters = \"\".join(newletterlist)\n\n results = product(newletters, repeat=4)\n newcombos = resulttolist(results)\n newcombos = [item for item in newcombos if item not in usedcombos]\n\n return AIguessing(newcombos)\n\n else: #if all letters were guessed correctly\n\n if correct == 4:\n return (\"Well played Human, but I win this time\")\n\n elif correct == 2: #in a 2,2 case, checks which combinations are possible while keeping 2 on the same spot each time\n\n results = permutations(guess, 2)\n newresult = resulttolist(results, feedback)\n\n return compareWithAll(newresult, lijst, feedback)\n\n elif correct == 1: #in a 1,3 case, creates a list with still possible combinations (since there'll be only 8, it's hardcoded in here)\n for j in range(1):\n A = guess[j]\n B = guess[j + 1]\n C = guess[j + 2]\n D = guess[j + 3]\n\n newcombos = [f\"{A}{C}{D}{B}\", f\"{A}{D}{B}{C}\", f\"{C}{B}{D}{A}\", f\"{D}{B}{A}{C}\", f\"{B}{D}{C}{A}\", f\"{D}{A}{C}{B}\", f\"{B}{C}{A}{D}\", f\"{C}{A}{B}{D}\"]\n newcombos = [item for item in newcombos if item not in usedcombos]\n\n return AIguessing(newcombos)\n\n else:\n for j in range(1):\n A = guess[j]\n B = guess[j + 1]\n C = guess[j + 2]\n D = guess[j + 3]\n\n results = permutations(f\"{A}{B}{C}{D}\", 4)\n newcombos = resulttolist(results)\n newcombos = [item for item in newcombos if item not in usedcombos]\n\n return AIguessing(newcombos)",
"def _prepare(self):\n self.code = random.randint(1000,9999)\n self.user_guess.append(\"----\")\n self.user_guess.append(\"----\")\n self.applied_guess.append(\"****\")\n self.applied_guess.append(\"****\")",
"def display_platform(wrong_guesses):\n zero_incorrect = \"\"\"\n _________\n | |\n |\n |\n |\n |\n |\n |\n _________|_______\n \"\"\"\n\n one_incorrect = \"\"\"\n _________\n | |\n O |\n |\n |\n |\n |\n |\n _________|_______\n \"\"\"\n\n two_incorrect = \"\"\"\n _________\n | |\n O |\n | |\n | |\n |\n |\n |\n _________|_______\n \"\"\"\n\n three_incorrect = \"\"\"\n _________\n | |\n O |\n --| |\n | |\n |\n |\n |\n _________|_______\n \"\"\"\n\n four_incorrect = \"\"\"\n _________\n | |\n O |\n --|-- |\n | |\n |\n |\n |\n _________|_______\n \"\"\"\n\n five_incorrect = \"\"\"\n _________\n | |\n O |\n --|-- |\n | |\n / |\n |\n |\n _________|_______\n \"\"\"\n\n six_incorrect = \"\"\"\n _________\n | |\n O |\n --|-- |\n | |\n / \\ |\n |\n |\n _________|_______\n \"\"\"\n\n if num_wrong_attempts == 0:\n print(zero_incorrect)\n if num_wrong_attempts == 1:\n print(one_incorrect) \n if num_wrong_attempts == 2: \n print(two_incorrect) \n if num_wrong_attempts == 3: \n print(three_incorrect)\n if num_wrong_attempts == 4: \n print(four_incorrect)\n if num_wrong_attempts == 5: \n print(five_incorrect) \n if num_wrong_attempts == 6: \n print(six_incorrect)",
"def initiate_codes(lottery_file):\n # load the lottery data\n lottery = {}\n with open(lottery_file) as lf:\n head = lf.readline()\n prev = None\n for line in lf:\n info = line.strip().split('|')\n issue = info[0]\n nums = map(int, info[1:])\n lottery[issue] = {\"numbers\": nums, \"previous\":prev, \"issue\": issue}\n prev = issue\n\n # get the missing info for 20150901001\n issues = sorted(lottery.keys())\n lot_miss_info = {}\n for issue in issues[100:]:\n lot_miss_info[issue] = {}\n # 0: ten thousand, 1: thousand, 2: hundred, 3: ten, 4: unit\n for i in range(5):\n lot_miss_info[issue][i] = {}\n for dig in range(10):\n lot_miss_info[issue][i][dig] = 0\n mis_count = 0\n # trace back and get the previous appearence\n cur = issue\n while True:\n lot = lottery[cur]\n if lot[\"numbers\"][i] == dig:\n break\n else:\n mis_count += 1\n cur = lot[\"previous\"]\n lot_miss_info[issue][i][dig] = mis_count\n\n # compute the codes information\n codes = {}\n for issue in issues[100:]:\n # currently we only consider unit(4) and ten(3) digit codes\n # we have defined 7 codes\n # luo_ma: 当前中奖数字\n # leng_1_ma: 当前期中最大间隔的数字\n # leng_2_ma: 当前期中第二大间隔的数字\n # sha_ma: 十位(落码-1), 个位(落码*3+3)\n # chuan_1: 落码-1\n # chuan_2: 落码+1\n # 隔码: 上一期的落码\n codes[issue] = {}\n for dig in range(3, 5):\n code = compute_code(issue, dig, lottery, lot_miss_info)\n codes[issue][dig] = code\n\n # compute the match information\n matched = {} # 只匹配落/杀/冷12码\n full_matched = {}# 匹配所有6码\n match_keys = [\"luo_ma\", \"leng_1_ma\", \"leng_2_ma\", \"sha_ma\"]\n \n full_match_keys = match_keys + [\"chuan_1\", \"chuan_2\", \"ge_ma\"]\n for issue in issues[101:]:\n prev_id = lottery[issue][\"previous\"]\n numbers = lottery[issue][\"numbers\"]\n prev_code = codes[prev_id]\n flag, full_flag = update_match(lottery[issue], prev_code)\n matched[issue] = flag\n full_matched[issue] = full_flag\n\n # compute the l4z1hbz\n l4z1hbz_seq = {}\n for issue in issues[108:]:\n l4z1hbz_seq[issue] = compute_l4z1hbz(issue, matched, lottery)\n\n return lottery, lot_miss_info, codes, matched, full_matched, l4z1hbz_seq",
"def apply_guess(self, guess, current):\n\n k = 0\n guesss = str(guess)\n number = str(self.code)\n revealed = [\"*\", \"*\", \"*\", \"*\"]\n\n for i in number:\n l = 0\n for j in guesss:\n if j == i and k == l:\n revealed[l] = \"X\"\n elif j == i and k != l:\n revealed[l] = \"O\"\n \n \n l += 1\n \n k +=1\n \n m = \"\"\n\n for i in revealed:\n m += i\n self.applied_guess[current] = m\n\n gu =[]\n gr = 0\n g = int(guess % 10)\n gr = guess - g\n gu.insert(0 , g)\n g = int((gr % 100) / 10)\n gr = gr - (g * 10)\n gu.insert(0 , g)\n g = int((gr % 1000) / 100)\n gr = gr - (g * 100)\n gu.insert(0 , g)\n g = int((gr % 10000) / 1000)\n gr = gr - (g * 1000)\n gu.insert(0 , g)\n m = \"\"\n for v in gu:\n m += f\"{v}\"\n\n self.user_guess[current] = m",
"def master_mind(code = Code(), display=False, silent=False):\n\tfrom itertools import permutations\n\tif not silent:\n\t\tprint(f\"code is {code}\")\n\n\tturn = 0\n\tcompleted = False\n\tused_colors = []\n\tcertain = [None,None,None,None,None]\n\timpossible = []\n\ttrace_attemps = {}\n\tto_pos_test = None\n\tbits = 0\n\twhile not completed:\n\t\t# 1st phase\n\t\t# determine the used colors and get some information on their positions\n\n\t\t#import pdb; pdb.set_trace()\n\n\t\tfor color in range(9):\n\t\t\tif to_pos_test is not None:\n\t\t\t\t# determining the next possible position to do the position test on\n\t\t\t\t# get all non-determined positions\n\t\t\t\t# remove the one where the color cannot be\n\t\t\t\t# get the first one or 0\n\t\t\t\tpossible = [k for k,v in enumerate(certain) if v is None]\n\t\t\t\tfor to_remove in [k for nope in impossible for k,v in enumerate(nope) if v == to_pos_test]:\n\t\t\t\t\tif to_remove in possible:\n\t\t\t\t\t\tpossible.remove(to_remove)\n\t\t\t\tposition = next(iter(possible), 0)\n\t\t\t\tattempt = [color for i in range(position)] + [to_pos_test] + [color for i in range(4-position)]\n\t\t\t\tif display:\n\t\t\t\t\t\tprint(attempt)\n\t\t\t\t\t\tprint(f'testing {color} and {to_pos_test} at {position}')\n\t\t\t\tscore = code.score(attempt.copy())\n\t\t\t\tif display:\n\t\t\t\t\tprint(f\"adding {attempt} to trace_attemps\")\n\t\t\t\ttrace_attemps[\",\".join([str(color) for color in attempt])]=score\n\t\t\t\tnext_color = color if score[0]+score[1] > len(used_colors) else to_pos_test\n\n\t\t\t\tif display:\n\t\t\t\t\tprint(score)\n\t\t\t\tif score[1] > 0: # 1 answer bit can be white (2 in some rare cases)\n\t\t\t\t\tused_colors+= [color for i in range(score[0] + score[1]-1)]\n\t\t\t\t\timpossible.append(\n\t\t\t\t\t\t[None for i in range(position)] + [to_pos_test] + [None for i in range(4-position)]\n\t\t\t\t\t)\n\t\t\t\t\tif display:\n\t\t\t\t\t\tprint(f'color {to_pos_test} cannot be at position {position}')\n\t\t\t\telse:\n\t\t\t\t\tused_colors+= [color for i in range(score[0] - 1)]\n\t\t\t\t\tcertain[position] = to_pos_test\n\t\t\t\t\tif display:\n\t\t\t\t\t\tprint(f'color {to_pos_test} is certain to be at position {position}')\n\t\t\t\tto_pos_test = next_color\n\n\t\t\telse:\n\t\t\t\tif display:\n\t\t\t\t\t\tprint(f'testing {color}')\n\t\t\t\tattempt = [color,color,color,color,color]\n\t\t\t\tif display:\n\t\t\t\t\tprint(attempt)\n\t\t\t\tscore = code.score(attempt.copy())\n\t\t\t\tif display:\n\t\t\t\t\tprint(f\"adding {attempt} to trace_attemps\")\n\t\t\t\ttrace_attemps[\",\".join([str(color) for color in attempt])]=score\n\t\t\t\tif display:\n\t\t\t\t\tprint(score)\n\t\t\t\tused_colors+= [color for i in range(score[0])]\n\t\t\t\tif score[0] > 0:\n\t\t\t\t\tto_pos_test = color\n\t\t\tif len(used_colors) == 5:\n\t\t\t\tbreak\n\t\t\tturn += 1\n\n\t\tif display:\n\t\t\tprint(f\"colors found are {used_colors}\")\n\t\t\tprint(f'certain : {certain}')\n\t\t\tprint(f'impossible:')\n\t\t\tfor nope in impossible:\n\t\t\t\tprint(nope)\n\t\t\tprint(f'trace_attemps:')\n\t\t\tfor trace in trace_attemps:\n\t\t\t\tprint(trace)\n\n\t\t# 2nd phase\n\t\tfor attempt in list(permutations(used_colors)):\n\t\t\tattempt = list(attempt)\n\t\t\tcoherent = True\n\t\t\tfor trace in trace_attemps:\n\t\t\t\tif display:\n\t\t\t\t\tprint(f\"checkibg coherence with {trace}\")\n\t\t\t\tif Code([int(color) for color in trace.split(',')]).score(attempt.copy()) != trace_attemps[trace]:\n\t\t\t\t\tcoherent = False\n\n\t\t\tif (\n\t\t\t\tcoherent \n\t\t\t\tand attempt == [certain[i] or attempt[i] for i in range(5)] # must comply certain\n\t\t\t\tand not any([attempt[i] == nope[i] for nope in impossible for i in range(5)]) # must be completly different than impossible\n\t\t\t):\n\t\t\t\tif display:\n\t\t\t\t\tprint(f\"attempt {attempt}\")\n\t\t\t\tscore = code.score(attempt.copy())\n\t\t\t\ttrace_attemps[\",\".join([str(color) for color in attempt])]=score\n\t\t\t\tcompleted = score == (5,0)\n\t\t\t\tif completed:\n\t\t\t\t\tanswer = attempt\n\t\t\t\t\tbreak\n\t\t\t\tturn +=1\n\tif not silent:\n\t\tprint(f\"the code was {answer}\")\n\t\tprint(f\"completed in {turn} turn(s)\")\n\telse:\n\t\treturn turn",
"def _prepare(self):\n number_of_numbers = 10\n code_length = safe_int_input(\"How long is the code to be guessed? (4-10): \", 4, 10)\n numbers = '1234567890'[:number_of_numbers]\n code = ''.join(random.choices(numbers, k=code_length))",
"def rpsls(player_guess):\n\n\n # convert name to player_number using name_to_number\n \n player_number = name_to_number(player_guess)\n \n # compute random guess for comp_number using random.randrange()\n \n comp_number = random.randrange(0, 5)\n \n # compute difference of player_number and comp_number modulo five\n \n difference = (player_number - comp_number) % 5\n \n # use if/elif/else to determine winner (but don't forget that players can tie !)\n \n if difference == 1 or difference == 2:\n result = \"Player wins\"\n elif difference == 3 or difference == 4:\n result = \"Computer wins\"\n else:\n result = \"Player and computer tie!\"\n \n # convert comp_number to name using number_to_name\n \n comp_guess = number_to_name(comp_number)\n \n # print results\n \n print \"Player chooses\", player_guess\n print \"Computer chooses\", comp_guess\n print result\n print",
"def results_of_guess(self):\r\n print(self.best_guess)\r\n print(self.chosen_letter)\r\n \r\n #self.best_guess = input(\"Enter word with correct letters and stars \" + \"as blank spaces.\")\r\n wrong_words = set()\r\n if self.chosen_letter in self.best_guess: # in case of success\r\n print(\"hit\")\r\n list_of_indices = [i for i, value in enumerate(self.best_guess) \r\n if value == self.chosen_letter]\r\n for word in self.valid_words:\r\n for index in list_of_indices:\r\n if word[index] != self.chosen_letter:\r\n wrong_words.add(word)\r\n elif word.count(self.chosen_letter) > len(list_of_indices):\r\n wrong_words.add(word)\r\n \r\n else: # in case of failure\r\n print(\"miss\")\r\n for word in self.valid_words:\r\n if self.chosen_letter in word:\r\n wrong_words.add(word)\r\n self.valid_words = self.valid_words.difference(wrong_words)",
"def advancedGuessingGame():\n\n print(\"\\nWelcome to the guessing game!\")\n print(\"A number between _ and _ ?\")\n\n lowerBound = not_number_rejector(\"Enter Lower Bound: \")\n\n higher_number = False # we need to set an upper and lowerbound for game\n\n while not higher_number:\n upperBound = not_number_rejector(\"Enter Upper Bound: \")\n if upperBound > lowerBound:\n higher_number = True\n else:\n print(\"The upperbound is lower than you lowerbound: TRY AGAIN\")\n\n # above code ensures upper > lower, see stubbon_asker in EX1\n\n print(\"OK then, guess a number between {} and {} ?\".format(lowerBound, upperBound))\n lowerBound = int(lowerBound) # ensures integer is give (Not a letter)\n upperBound = int(lowerBound)\n\n actualNumber = random.randint(lowerBound, upperBound)\n\n guessed = False\n\n while not guessed:\n guessedNumber = not_number_rejector(\"Make a guess: \")\n print(\"You guessed {},\".format(guessedNumber),)\n if guessedNumber == actualNumber:\n print(\"HOW DID YOU GET THAT! It was {}\".format(actualNumber))\n guessed = True\n elif guessedNumber > upperBound:\n print(\"This is higher than the upperbound! Try again!\")\n elif guessedNumber < lowerBound:\n print(\"This is lower than the lowerbound! Try again!\")\n elif guessedNumber < actualNumber:\n print(\"{} is too small, try again\".format(actualNumber))\n else:\n print(\"{} is too big, try again \".format(actualNumber))\n return \"You got it!\"\n # the tests are looking for the exact string \"You got it!\". Don't modify that!",
"def lotto_number_picker():\r\n will_win = False\r\n main_numbers = []\r\n star_numbers = []\r\n\r\n while will_win is False:\r\n will_win = bool(random.Random(int(datetime.datetime.now().strftime(\"%Y%m%d%H%M%S\"))).getrandbits(1))\r\n if will_win is True:\r\n while len(main_numbers) <= 4:\r\n main_number = random.Random(int(datetime.datetime.now().strftime(\"%Y%m%d%H%M%S\"))).randrange(1, 51)\r\n if main_number in main_numbers:\r\n pass\r\n else:\r\n main_numbers.append(main_number)\r\n while len(star_numbers) <= 1:\r\n star_number = random.Random(int(datetime.datetime.now().strftime(\"%Y%m%d%H%M%S\"))).randrange(1, 13)\r\n if star_number in star_numbers:\r\n pass\r\n else:\r\n star_numbers.append(star_number)\r\n\r\n print(\"Lotto Numbers:\")\r\n for main_number_p in sorted(main_numbers):\r\n print('\\t{}'.format(main_number_p))\r\n print(\"Lucky Star Numbers:\")\r\n for star_number_p in sorted(star_numbers):\r\n print('\\t{}'.format(star_number_p))\r\n print(\"\"\"\r\nIf you win more than a tenner using this you owe me a pint or...\r\n\r\n\r\n -- __\r\n ~ (@) ~~~---_\r\n { `-_~,,,,,,)\r\n { (_ ',\r\n ~ . = _',\r\n ~ '. =-'\r\n ~ :\r\n . -~ ('');\r\n '. --~ \\ \\ ;\r\n '.-_ -~ \\ \\; _-=,.\r\n -~- _ -~ { '---- _'-=,.\r\n ~- _~- _ _ -~ ~---------=,.`\r\n ~- ~~-----~~~~~~ .+++~~~~~~~~-__ /\r\n ~- __ { - + } /\r\n ~- ______{_ _ -=\\ / /_ ~\r\n : ~--~ // / ..-\r\n : / / // / ((\r\n : / / { `-------,. ))\r\n : / ''=--------. }o\r\n .=._________,' ) ))\r\n ) _________ -'' ~~\r\n / / _ _\r\n (_.-.'O'-'.\r\n\r\n Raptors!!!\r\n \"\"\")",
"def guess_a_number():\n\n # TODO:\n # generate a random number (uniformly distributed between 0 and 100)\n # read input from the user and validate that the input is numeric (use the function check_raw)\n # check whether the number was guessed \n # implement the functions evaluate_my_number, which checks whether the number is too high or too low\n # and print this information to the user\n # let the computer guess, therefore implement the demo_a_number function\n random_number=randint(0,100)\n \n '''versuche=0\n max_versuche=5\n guess=-1\n test= False\n while guess != random_number:\n while test == False:\n guess= input('Gib eine Zahl zwischen 0 und 100 ein: ')\n try:\n guess= int(guess)\n test=True\n except ValueError:\n print('Try Again')\n \n if guess == random_number:\n print('Du hast die Zahl erraten!')\n elif guess > random_number:\n print('Die Zahl ist zu gross')\n versuche=versuche+1\n else:\n print('Die Zahl ist zu klein')\n versuche=versuche+1'''",
"def run_game():\n\n #global correct\n correct = False\n\n code = create_code()\n show_instructions()\n\n turns = 0\n while not correct and turns < 12:\n #print(code)\n correct_digits_and_position = take_turn(code)\n turns += 1\n #print(correct_digits_and_position[0])\n correct = check_correctness(turns, correct_digits_and_position[0])\n #print(correct)\n\n show_code(code)",
"def start_game(self):\n self.code = code.get_random_num()\n self.Player1 = self.get_player(1)\n self.Player2 = self.get_player(2)\n attempt = self.Player1.make_guess()\n guess.guess_lists(attempt, self.code)\n right_answer_list = guess.return_answer()\n num_guessed_list = guess.return_player_guess()\n check.check(num_guessed_list, right_answer_list)\n attempt = self.Player2.make_guess()\n guess.guess_lists(attempt, self.code)\n right_answer_list = guess.return_answer()\n num_guessed_list = guess.return_player_guess()\n output = check.check(num_guessed_list, right_answer_list)\n play = end_game.end_game(output)\n if play == True:\n self.keep_playing()",
"def compareWithAll(lijst, previouslist, feedback = 0):\n\n global usedcombos\n\n results = []\n\n\n if feedback == 2: #to make sure there's a 2 letter combination with gaps\n for i in previouslist:\n for letter1, letter2 in lijst:\n if letter1 in i and letter2 in i:\n results.append(i)\n\n elif feedback == 3: #to make sure there's a 3 letter combination with gaps\n for i in previouslist:\n for letter1, letter2, letter3 in lijst:\n if letter1 in i and letter2 in i and letter3 in i:\n results.append(i)\n else:\n for i in previouslist:\n\n for j in range(len(lijst)):\n\n if lijst[j] in i:\n\n results.append(i)\n\n results = [item for item in results if item not in usedcombos]\n results = list(dict.fromkeys(results))\n\n print(f\"It seems I only {len(results)} options left!\")\n\n return AIguessing(results)",
"def guess(mqtt_client, number_to_guess_entry):\n # TODO: 5. Uncomment the line of code below to make guesses with EV3.\n mqtt_client.send_message(\"guess\", [int(number_to_guess_entry.get())])\n number_to_guess_entry.delete(0, 'end')\n # Note: You can play the game with only TO DO 5 complete, but it will be easier to solve if you do TO DO 6 as well.",
"def part2():\n commands = ['A,A,B,C,B,A,C,B,C,A',\n 'L,6,R,12,L,6,L,8,L,8',\n 'L,6,R,12,R,8,L,8',\n 'L,4,L,4,L,6',\n 'n']\n\n cpu = IntCodeProcessor(path='day17input.txt', overrides = [2])\n result = cpu.execute_program(input_channel = compile_commands(commands))\n\n if result[-2:] == [10,10]:\n print_map(result)\n else:\n print(f'Part 2 answer: {result[-1]}')",
"def game_code(user_input, secret_word, my_letters, guess_count):\n#if str.isalpha(myinput1) == True and myinput1 not in my_letters and guess_count > 0:\n if user_input in secret_word and len(user_input) == 1:\n my_letters.append(user_input)\n mytempstr1 = get_guessed_word(secret_word, my_letters)\n print('Good guess: ' + mytempstr1)\n return 0\n elif user_input in ['a','e','i','o','u'] and len(user_input) == 1:\n my_letters.append(user_input)\n mytempstr1 = get_guessed_word(secret_word, my_letters)\n print('Oops! That letter is not in my word: ' + mytempstr1)\n return 1\n elif len(user_input) == 1:\n my_letters.append(user_input)\n mytempstr1 = get_guessed_word(secret_word, my_letters)\n print('Oops! That letter is not in my word: ' + mytempstr1)\n return 2",
"def create_guess_code(self, pegs):\n\t\tselected_pegs = random.sample(pegs, 4)\n\t\t\n\t\treturn selected_pegs",
"def _create_hint(self, code, guess): \n hint = \"\"\n for index, letter in enumerate(guess):\n if code[index] == letter:\n hint += \"x\"\n elif letter in code:\n hint += \"o\"\n else:\n hint += \"*\"\n return hint",
"def compare_cards(self, guess):\n \n \"\"\"\n Compares cards to determine higher_lower, \n compares result with guess\n Args: \n self: : An instance of Dealer.\n self.card_1: int\n self.card_2: int\n guess: bool\n \"\"\"\n card_str_1 = self.get_card_str(self.card_1)\n card_str_2 = self.get_card_str(self.card_2)\n if guess: \n if self.card_1 == self.card_2:\n print(f\"{card_str_2} is equal to {card_str_1}\")\n self.player.score -= 75\n elif self.card_1 > self.card_2:\n print(f\"{card_str_2} is lower than {card_str_1}\")\n self.player.score -= 75\n elif self.card_1 < self.card_2:\n print(f\"{card_str_2} is higher than {card_str_1}\")\n self.player.score += 100\n if not guess:\n if self.card_1 == self.card_2:\n print(f\"{card_str_2} is equal to {card_str_1}\")\n self.player.score -= 75\n elif self.card_1 > self.card_2:\n print(f\"{card_str_2} is lower than {card_str_1}\")\n self.player.score += 100\n elif self.card_1 < self.card_2:\n print(f\"{card_str_2} is higher than {card_str_1}\")\n self.player.score -= 75",
"def main():\n args = get_args()\n guess = args.guess.upper()\n choice = args.choice\n \n dict = {1: 'Arcosanti', 2: 'Intercept', 3: 'Positions', 4: 'Collected', 5: 'Gemstones', 6: 'Cellulose', 7: 'Mastodons', 8: 'Meteorite', 9: 'Phoenixes', 10: 'Fireflies'} \n \n if choice > 10 or choice < 1: \n die('The choice number must be between 1 and 10') \n base = dict[choice].upper()\n if len(guess) < len(base) or len(guess) > len(base):\n die('The guess must be the same length as the word: {}'.format(len(base)))\n \n for i,j in zip(guess, base): \n if i == j:\n print('{}'.format(i),end='') \n else:\n print('_', end='')\n print('\\n')",
"def get_input(self, guess):\r\n print\r\n print \"The player guessed = \", guess\r\n result = self.process_player_input(guess)\r\n print result\r\n if ((self.remaining_guesses == 0) or ( result == self.correctguess_message)):\r\n # Start a new game, with same range\r\n self.init(self.num_range)\r\n return result",
"def run(self):\n try:\n if self.guess:\n self.from_all()\n return\n\n if self.unix:\n result, indiv_output, combined_output, reason = self.from_unix_sec()\n if indiv_output is False:\n print(reason)\n else:\n print(indiv_output)\n if self.umil:\n result, indiv_output, combined_output, reason = self.from_unix_milli()\n if indiv_output is False:\n print(reason)\n else:\n print(indiv_output)\n if self.wh:\n result, indiv_output, combined_output, reason = self.from_win_64_hex()\n if indiv_output is False:\n print(reason)\n else:\n print(indiv_output)\n if self.whle:\n result, indiv_output, combined_output, reason = self.from_win_64_hexle()\n if indiv_output is False:\n print(reason)\n else:\n print(indiv_output)\n if self.chrome:\n result, indiv_output, combined_output, reason = self.from_chrome()\n if indiv_output is False:\n print(reason)\n else:\n print(indiv_output)\n if self.active:\n result, indiv_output, combined_output, reason = self.from_ad()\n if indiv_output is False:\n print(reason)\n else:\n print(indiv_output)\n if self.uhbe:\n result, indiv_output, combined_output, reason = self.from_unix_hex_32be()\n if indiv_output is False:\n print(reason)\n else:\n print(indiv_output)\n if self.uhle:\n result, indiv_output, combined_output, reason = self.from_unix_hex_32le()\n if indiv_output is False:\n print(reason)\n else:\n print(indiv_output)\n if self.cookie:\n result, indiv_output, combined_output, reason = self.from_cookie()\n if indiv_output is False:\n print(reason)\n else:\n print(indiv_output)\n if self.oleb:\n result, indiv_output, combined_output, reason = self.from_ole_be()\n if indiv_output is False:\n print(reason)\n else:\n print(indiv_output)\n if self.olel:\n result, indiv_output, combined_output, reason = self.from_ole_le()\n if indiv_output is False:\n print(reason)\n else:\n print(indiv_output)\n if self.mac:\n result, indiv_output, combined_output, reason = self.from_mac()\n if indiv_output is False:\n print(reason)\n else:\n print(indiv_output)\n if self.hfsdec:\n result, indiv_output, combined_output, reason = self.from_hfs_dec()\n if indiv_output is False:\n print(reason)\n else:\n print(indiv_output)\n if self.hfsbe:\n result, indiv_output, combined_output, reason = self.from_hfs_be()\n if indiv_output is False:\n print(reason)\n else:\n print(indiv_output)\n if self.hfsle:\n result, indiv_output, combined_output, reason = self.from_hfs_le()\n if indiv_output is False:\n print(reason)\n else:\n print(indiv_output)\n if self.fat:\n result, indiv_output, combined_output, reason = self.from_fat()\n if indiv_output is False:\n print(reason)\n else:\n print(indiv_output)\n if self.msdos:\n result, indiv_output, combined_output, reason = self.from_msdos()\n if indiv_output is False:\n print(reason)\n else:\n print(indiv_output)\n if self.systime:\n result, indiv_output, combined_output, reason = self.from_systime()\n if indiv_output is False:\n print(reason)\n else:\n print(indiv_output)\n if self.ft:\n result, indiv_output, combined_output, reason = self.from_filetime()\n if indiv_output is False:\n print(reason)\n else:\n print(indiv_output)\n if self.hotmail:\n result, indiv_output, combined_output, reason = self.from_hotmail()\n if indiv_output is False:\n print(reason)\n else:\n print(indiv_output)\n if self.pr:\n result, indiv_output, combined_output, reason = self.from_prtime()\n if indiv_output is False:\n print(reason)\n else:\n print(indiv_output)\n if self.auto:\n result, indiv_output, combined_output, reason = self.from_ole_auto()\n if indiv_output is False:\n print(reason)\n else:\n print(indiv_output)\n if self.ms1904:\n result, indiv_output, combined_output, reason = self.from_ms1904()\n if indiv_output is False:\n print(reason)\n else:\n print(indiv_output)\n if self.ios:\n result, indiv_output, combined_output, reason = self.from_ios_time()\n if indiv_output is False:\n print(reason)\n else:\n print(indiv_output)\n if self.sym:\n result, indiv_output, combined_output, reason = self.from_sym_time()\n if indiv_output is False:\n print(reason)\n else:\n print(indiv_output)\n if self.gps:\n result, indiv_output, combined_output, reason = self.from_gps_time()\n if indiv_output is False:\n print(reason)\n else:\n print(indiv_output)\n if self.eitime:\n result, indiv_output, combined_output, reason = self.from_eitime()\n if indiv_output is False:\n print(reason)\n else:\n print(indiv_output)\n if self.bplist:\n result, indiv_output, combined_output, reason = self.from_bplist()\n if indiv_output is False:\n print(reason)\n else:\n print(indiv_output)\n if self.gsm:\n result, indiv_output, combined_output, reason = self.from_gsm()\n if indiv_output is False:\n print(reason)\n else:\n print(indiv_output)\n if self.vm:\n result, indiv_output, combined_output, reason = self.from_vm()\n if indiv_output is False:\n print(reason)\n else:\n print(indiv_output)\n if self.tiktok:\n result, indiv_output, combined_output, reason = self.from_tiktok()\n if indiv_output is False:\n print(reason)\n else:\n print(indiv_output)\n if self.twitter:\n result, indiv_output, combined_output, reason = self.from_twitter()\n if indiv_output is False:\n print(reason)\n else:\n print(indiv_output)\n if self.discord:\n result, indiv_output, combined_output, reason = self.from_discord()\n if indiv_output is False:\n print(reason)\n else:\n print(indiv_output)\n if self.ksuid:\n result, indiv_output, combined_output, reason = self.from_ksuid()\n if indiv_output is False:\n print(reason)\n else:\n print(indiv_output)\n if self.mastodon:\n result, indiv_output, combined_output, reason = self.from_mastodon()\n if indiv_output is False:\n print(reason)\n else:\n print(indiv_output)\n if self.meta:\n result, indiv_output, combined_output, reason = self.from_metasploit()\n if indiv_output is False:\n print(reason)\n else:\n print(indiv_output)\n if self.sony:\n result, indiv_output, combined_output, reason = self.from_sony()\n if indiv_output is False:\n print(reason)\n else:\n print(indiv_output)\n if self.uu:\n result, indiv_output, combined_output, reason = self.from_uuid()\n if indiv_output is False:\n print(reason)\n else:\n print(indiv_output)\n if self.timestamp:\n self.to_timestamps()\n except Exception:\n exc_type, exc_obj, exc_tb = sys.exc_info()\n print(str(exc_type) + \" - \" + str(exc_obj) + \" - line \" + str(exc_tb.tb_lineno))",
"def user_guess():\n return list(input(\"What is your guess?\"))",
"def evaluate_my_number(guess, random_number):",
"def _score_code(self, code):\n # Get list of 2-tuples, each containing an input sequence and an output\n # sequence.\n io_seqs = self.task.make_io_set()\n terminal_reward = 0.0\n results = []\n reason = 'correct'\n for input_seq, output_seq in io_seqs:\n eval_result = bf.evaluate(\n code, input_buffer=input_seq, timeout=0.1,\n max_steps=self.max_execution_steps,\n base=self.task.base,\n require_correct_syntax=self.require_correct_syntax)\n result, success = eval_result.output, eval_result.success\n if not success:\n # Code execution timed out.\n terminal_reward = self.failure_reward\n results = []\n reason = eval_result.failure_reason\n break\n else:\n terminal_reward += self.reward_fn(result, output_seq, self.task.base)\n if result == output_seq:\n terminal_reward += self.correct_bonus # Bonus for correct answer.\n\n # Only add additional reward for shorter code. Subtracting reward\n # interferes with the main objective. Only optimize for length once\n # any solution is found.\n if self.min_code_length == self.max_code_length:\n terminal_reward += self.code_length_bonus\n else:\n terminal_reward += self.code_length_bonus * clipped_linear(\n x=len(code), x0=self.min_code_length, y0=1.0,\n slope=-self.time_penalty, y_range=(0.0, 1.0))\n\n # reason remains 'correct' if it is already\n elif reason == 'correct':\n reason = 'wrong'\n results.append(result)\n\n # Return list of rewards, one for each char in the code. All are 0 except\n # for the terminal reward.\n terminal_reward /= self.best_reward\n return misc.RewardInfo(\n episode_rewards=[0.0] * (len(code) - 1) + [terminal_reward],\n input_case=misc.IOTuple(i for i, o in io_seqs),\n correct_output=misc.IOTuple(o for i, o in io_seqs),\n code_output=misc.IOTuple(results),\n input_type=self.input_type,\n output_type=self.output_type,\n reason=reason)",
"def main():\n # init variables\n lower_bound = 1\n higher_bound = 10\n guess = generate_guess(1, 10)\n while True:\n try:\n secret = input(\"What should the computer guess? Enter a number between 1 and 10: \")\n except ValueError:\n print(\"{} isn't a number!\".format(secret))\n while True:\n if int(guess) == int(secret):\n print(\"I guessed {}! Your number was {}! I win!\".format(guess, secret))\n play_again = input(\"Do you want to play again? (Y/n)\")\n if play_again != \"Y\":\n print(\"Thanks for playing!\")\n exit()\n else:\n main()\n elif int(guess) != int(secret):\n high_or_low = input(\"I guessed {}. Was it high or low? (H/L)\".format(guess))\n print(\"G: {}, HB: {}, LB: {}\".format(guess, higher_bound, lower_bound))\n if high_or_low == \"H\":\n higher_bound = guess - 1\n guess = generate_guess(lower_bound, higher_bound)\n elif high_or_low == \"L\":\n lower_bound = guess + 1\n guess = generate_guess(lower_bound, higher_bound)\n else:\n print(\"Please try again: \\n\")"
] | [
"0.65523076",
"0.6467425",
"0.6455887",
"0.63705254",
"0.6136182",
"0.5947095",
"0.5909345",
"0.58723474",
"0.5860619",
"0.58369917",
"0.58310723",
"0.5819333",
"0.5809662",
"0.5804019",
"0.5748134",
"0.5729847",
"0.5718463",
"0.57156974",
"0.5695532",
"0.56886834",
"0.56766325",
"0.5675861",
"0.5651795",
"0.56499445",
"0.56497496",
"0.5628808",
"0.56097126",
"0.56087303",
"0.5583353",
"0.5578426"
] | 0.75308436 | 0 |
Get compute plugin disabled status | def nfvi_compute_plugin_disabled():
return (_compute_plugin is None) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_disabled_plugins(self):\n return self._disabled_plugins",
"def getDisabledPlugin(self, *args):\n return _libsbml.SBase_getDisabledPlugin(self, *args)",
"def get_disabled(self):\n return self._disabled",
"def get_disabled(self):\n return self._disabled",
"def getNumDisabledPlugins(self):\n return _libsbml.SBase_getNumDisabledPlugins(self)",
"def get_disabled_switch(self):\n return self.disabled",
"def disabled(self) -> pulumi.Output[bool]:\n return pulumi.get(self, \"disabled\")",
"def check_disabled(self):\n return None",
"def disabled(self) -> pulumi.Output[Optional[bool]]:\n return pulumi.get(self, \"disabled\")",
"def disabled(self) -> pulumi.Output[Optional[bool]]:\n return pulumi.get(self, \"disabled\")",
"def is_disabled(self) -> pulumi.Output[bool]:\n return pulumi.get(self, \"is_disabled\")",
"def disable_status_check(self) -> pulumi.Output[Optional[bool]]:\n return pulumi.get(self, \"disable_status_check\")",
"def disabled(self):\n return self._disabled",
"def disabled(self):\n return self._disabled",
"def disable():\n ret = {}\n result = __salt__[\"cmd.run_all\"](\n \"pfctl -d\", output_loglevel=\"trace\", python_shell=False\n )\n\n if result[\"retcode\"] == 0:\n ret = {\"comment\": \"pf disabled\", \"changes\": True}\n else:\n # If pf was already disabled the return code is also non-zero.\n # Don't raise an exception in that case.\n if result[\"stderr\"] == \"pfctl: pf not enabled\":\n ret = {\"comment\": \"pf already disabled\", \"changes\": False}\n else:\n raise CommandExecutionError(\n \"Could not disable pf\",\n info={\"errors\": [result[\"stderr\"]], \"changes\": False},\n )\n\n return ret",
"def is_disabled(self) -> pulumi.Output[Optional[bool]]:\n return pulumi.get(self, \"is_disabled\")",
"def disabled(name):\n return not enabled(name)",
"def get_status(self):\n return super(Cabling, self).get_status()",
"def getStatus(self):\n return self.enabled",
"def nfvi_notify_compute_host_disabled(host_uuid, host_name, host_personality,\n callback):\n cmd_id = _compute_plugin.invoke_plugin('notify_host_disabled',\n host_uuid, host_name,\n host_personality,\n callback=callback)\n return cmd_id",
"def get_enabled_plugins(self):\n return [p for p in self.get_all_plugins if p[\"plugin_state\"] == \"enabled\"]",
"def get_disabled(self, channel):\n return self.disabled_extensions.get(channel, set())",
"def _isdisable(self):\n return self.dp.state()==PyTango.DevState.DISABLE",
"def get_status():\n return ('off', 'off')",
"def disabled(self):\n return QgsProject.instance().readListEntry(\"Identify\", \"disabledLayers\", \"None\")[0]",
"def disabled(self) -> bool:\n return pulumi.get(self, \"disabled\")",
"def disabled(self) -> bool:\n return pulumi.get(self, \"disabled\")",
"def disabled_by_microsoft(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"disabled_by_microsoft\")",
"def test_get_current_component_status_DISABLED(self):\n self._ucr({\n 'repository/online/component/a': 'no',\n })\n ORIG = UU.FN_UPDATER_APTSOURCES_COMPONENT\n try:\n tmp = NamedTemporaryFile()\n UU.FN_UPDATER_APTSOURCES_COMPONENT = tmp.name\n self.assertEqual(UU.COMPONENT_DISABLED, self.u.get_current_component_status('a'))\n finally:\n UU.FN_UPDATER_APTSOURCES_COMPONENT = ORIG\n tmp.close()",
"def get_state_versionlock(module, repo_cfg):\n state = os.path.exists(repo_cfg)\n if state:\n config_file = open(repo_cfg,\"r\")\n for line in config_file.readlines():\n if \"enabled\" in line:\n enabled_state = line.split()[2]\n if enabled_state != '1' and enabled_state != 'True':\n module.warn(\"Warn: Plugin is installed but disabled (\"+enabled_state+\") in \" + repo_cfg)\n config_file.close\n return state"
] | [
"0.7047027",
"0.66642225",
"0.6457143",
"0.6457143",
"0.6397048",
"0.63932973",
"0.63549125",
"0.6290539",
"0.62651056",
"0.62651056",
"0.61825705",
"0.615268",
"0.6097714",
"0.6097714",
"0.6058149",
"0.6056175",
"0.60538864",
"0.6039746",
"0.60189015",
"0.6017175",
"0.60059464",
"0.5978308",
"0.595891",
"0.5956188",
"0.59512323",
"0.59231573",
"0.59231573",
"0.5904356",
"0.5833084",
"0.5818849"
] | 0.7179284 | 0 |
Get a list of host aggregates | def nfvi_get_host_aggregates(callback):
cmd_id = _compute_plugin.invoke_plugin('get_host_aggregates',
callback=callback)
return cmd_id | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def compute_host_aggregates(self):\n path = '/os-aggregates'\n res = self.compute.call(path, 'GET', data='', \n token=self.manager.identity.token)\n self.logger.debug('Get openstack host aggregates: %s' % truncate(res))\n return res[0]['aggregates']",
"def get_host_stats(self, refresh=False):",
"def aggregators(self, **kwargs):\n return aggregators.aggregators(self._host, self._session, **kwargs)",
"def get_allhosts():\n connection, tablename = HomeNetwork.get_connection_info()\n query = 'SELECT hostname from {}'.format(tablename)\n output = pandas.read_sql_query(query, connection).to_json(orient='records')\n\n for host in json.loads(output):\n yield host[\"hostname\"]",
"def get_aggregations(self):\n return []",
"def get_all_hosts(self, view='summary'):\n return self._get(endpoint='{}/hosts'.format(self.api_version),\n params=dict(view=view)).json()",
"def get_all_hosts(self, view='summary'):\n return self.api_client.get_all_hosts(view=view)['items']",
"async def address_aggregate(request):\n\n address = request.match_info['address']\n\n keys = request.query.get('keys', None)\n if keys is not None:\n keys = keys.split(',')\n\n limit = request.query.get('limit', '1000')\n limit = int(limit)\n\n aggregates = await get_computed_address_aggregates(address_list=[address],\n key_list=keys,\n limit=limit)\n\n if not aggregates.get(address):\n return web.HTTPNotFound(text=\"No aggregate found for this address\")\n\n output = {\n 'address': address,\n 'data': aggregates.get(address, {})\n }\n return web.json_response(output)",
"def all_hosts(self):\n ...",
"def hostgroup_list(self):\n return self.ezx.get_hostgroup_list()",
"def get_all_asgs(cluster_tag):\n return get_asgs(cluster_tag, [])",
"def list_nodes(hosts):\n if hosts=='hosts':\n query = \"SELECT DISTINCT host FROM metrics_index\";\n reply=[]\n elif hosts=='all':\n query = \"SELECT * FROM metrics_index\";\n reply={}\n\n rows = session.execute(query);\n for r in rows:\n if hosts=='hosts':\n if str(r.host) not in reply:\n reply.append(str(r.host))\n elif hosts=='all':\n if str(r.host) not in reply:\n reply[str(r.host)]=[]\n if str(r.metric) not in reply[str(r.host)]:\n reply[str(r.host)].append(str(r.metric))\n return json.dumps(reply)",
"def get_all_host(self, conf, tenant_id, network_id):\n\t\tpass",
"def aggregate(project, start, end,\n access='all-access', agent='all-agents', granularity='daily'):\n args = AG_ARGS.format(project=project,\n start=start,\n end=end,\n access=access,\n agent=agent,\n granularity=granularity)\n return __api__(AG_ENDPOINT, args)",
"def all_hosts(self):\n if not 'scan' in list(self._scan_result.keys()):\n return []\n listh = list(self._scan_result['scan'].keys())\n listh.sort()\n return listh",
"def get_aggs(es, query, args):\n return es.search(index=args.index,\n body=query,\n timeout=float(args.timeout),\n filter_path=['aggregations'])",
"def get_results_from_aggregation_sources(self, context):",
"def index(self, req):\n LOG.info(\"List all the nova-compute hosts in the system\")\n ctxt = req.environ['nova.context']\n authorize(ctxt)\n LOG.debug(\"%s - %s\", req.environ, req.body)\n services = dbapi.service_get_all_compute_sorted(ctxt)\n # services looks like (Service(object), Decimal('0'))\n # must convert from Decimal('0') to int() because no JSON repr\n hosts = [{'name':srv[0].host,\n 'instanceCount':int(srv[1])}\n for srv in services]\n return {'hosts': hosts}",
"def host_list(self):\n try:\n scode, hosts = Rest.get('Host')\n except Exception as e:\n Console.error(e.message)\n return\n if len(hosts) == 0:\n print(\"No hosts exist\")\n return\n\n n = 1\n e = {}\n for host in hosts:\n d = {}\n d['Ip'] = str(host['Ip'])\n d['Name'] = str(host['Name'])\n d['Port'] = str(host['Port'])\n d['Swarmmode'] = str(host['Swarmmode'])\n e[n] = d\n n = n + 1\n Console.ok(str(Printer.dict_table(e, order=['Ip', 'Name', 'Port', 'Swarmmode'])))",
"def aggregate(self):\n aggregations_params = self.pop_aggregations_params()\n if self.view._auth_enabled:\n self.check_aggregations_privacy(aggregations_params)\n self.stub_wrappers()\n\n params = self._query_params.copy()\n params['_aggregations_params'] = aggregations_params\n\n return ACLFilterES(self.view.Model.__name__).aggregate(\n request=self.view.request, **params)",
"def getAllHosts(cluster):\n nics = []\n hosts = rhevGet(\"/api/hosts\")\n doc = libxml2.parseDoc(hosts)\n ctxt = doc.xpathNewContext()\n res = ctxt.xpathEval(\"/hosts/host[cluster[@id='\" + getClusterData(cluster ,\"id\") + \"']]\")\n for i in res:\n #hrefs.append(i.prop(\"href\"))\n nic = rhevGet(i.prop(\"href\")+\"/nics\")\n nicdoc = libxml2.parseDoc(nic)\n ctxt = nicdoc.xpathNewContext()\n res = ctxt.xpathEval(\"/host_nics/host_nic/name[text() = '%s']/parent::*\" %rhev_settings.NIC)\n for i in res:\n nics.append(i.prop(\"href\"))\n return nics",
"def aggregate(self, agpath):\n return data.Aggregate(self, agpath)",
"def get_emperor_nodes_stats(self, system):\n path_inventory = u'%s/inventories/%s' % (self.ansible_path, self.environment)\n runner = Runner(inventory=path_inventory, verbosity=self.verbosity)\n hosts = runner.get_inventory(group=system)\n self.json = []\n resp = []\n for host in hosts:\n res = self.__get_stats(host)\n vassals = res.pop(u'vassals')\n res.pop(u'blacklist')\n try:\n temp = [u'%s [%s]' % (v[u'id']\\\n .replace(u'/etc/uwsgi/vassals/', u'')\\\n .replace(u'.ini', u'')\\\n , v[u'pid']) for v in vassals]\n res[u'vassals'] = u', '.join(temp)\n except:\n res[u'vassals'] = []\n res[u'host'] = host\n resp.append(res)\n self.result(resp, headers=[u'host', u'pid', u'version', u'uid', u'gid', \n u'throttle_level', u'emperor_tyrant', \n u'vassals'])",
"def get_host_list():\n gparr = GpArray.initFromCatalog(dbconn.DbURL(port = MASTER_PORT), utility = True)\n segs = gparr.getDbList()\n\n master = None\n standby_host = None\n segment_host_list = []\n\n for seg in segs:\n if seg.isSegmentStandby(current_role=True):\n standby_host = seg.getSegmentHostName()\n elif not seg.isSegmentMaster(current_role=True):\n segment_host_list.append(seg.getSegmentHostName())\n elif seg.isSegmentMaster(current_role=True):\n master = seg.getSegmentHostName()\n\n #Deduplicate the hosts so that we\n #dont install multiple times on the same host\n segment_host_list = list(set(segment_host_list))\n if master in segment_host_list:\n segment_host_list.remove(master)\n\n return (standby_host, segment_host_list)",
"def get_effective_hostgroups(self):\n # TODO: This function is incomplete and untested\n # TODO: Need error handling when object defines hostgroups but hostgroup does not exist\n result = []\n hostgroup_list = []\n # Case 1 and Case 2:\n tmp = self._get_effective_attribute('hostgroups')\n for i in tmp.split(','):\n if i == '': continue\n i = Hostgroup.objects.get_by_shortname(i)\n if not i in result: result.append(i)\n '''\n # Case 1\n if self.has_key('hostgroups'):\n grp = self['hostgroups']\n grp = grp.split(',')\n for i in grp:\n i = i.strip('+')\n i = Hostgroup.objects.get_by_shortname(i)\n if not i in result: result.append(i)\n # Case 2:\n if not self.has_key('hostgroups') or self['hostgroups'].startswith('+'):\n parents = self.get_effective_parents()\n for parent in parents:\n parent_results += parent.get_effective_hostgroups()\n '''\n # Case 3:\n if self.has_key('host_name'):\n # We will use hostgroup_list in case 4 and 5 as well\n hostgroup_list = Hostgroup.objects.filter(members__has_field=self['host_name'])\n for hg in hostgroup_list:\n if hg not in result:\n result.append( hg )\n # Case 4: \n for hg in hostgroup_list:\n if not hg.has_key('hostgroup_name'): continue\n grp = Hostgroup.objects.filter(hostgroup_members__has_field=hg['hostgroup_name'])\n for i in grp:\n if i not in result:\n result.append(i )\n # Case 5:\n for hg in hostgroup_list:\n if not hg.has_key('hostgroup_name'): continue\n grp = Hostgroup.objects.filter(use__has_field=hg['hostgroup_name'])\n for i in grp:\n if i not in result:\n result.append(i )\n \n return result",
"def get_host_stats(self, refresh=False):\n stats = []\n for nodename in self._drv_nodes:\n host_status = self.host_status_base.copy()\n host_status['hypervisor_hostname'] = nodename\n host_status['host_hostname'] = nodename\n host_status['host_name_label'] = nodename\n host_status['hypervisor_type'] = self.name\n host_status['vcpus'] = drv_conf.max_vcpus\n host_status['memory_mb'] = drv_conf.max_memory_mb\n host_status['local_gb'] = drv_conf.max_disk_gb\n stats.append(host_status)\n if len(stats) == 0:\n raise exception.NovaException(\"Azure Driver has no node\")\n elif len(stats) == 1:\n return stats[0]\n else:\n return stats",
"def getHosts(self):\n raise \"not implemented\"",
"def _aggregate_networks(self, hosts):\n nts = {}\n for host in hosts:\n # skip hosts which have low-level network names defined\n # this can be extended to pick network type based on the network name\n names = host.get(\"networks\")\n if names:\n continue\n nt = host.get(\"network\")\n if not self._is_network_type(nt):\n continue\n\n count = nts.get(nt, 0)\n count += 1\n nts[nt] = count\n return nts",
"def list_hosts():\n task_run(\"/bin/hostname -f\",RING_1_dev__allnodes)",
"def host_list(call=None):\n if call == \"action\":\n raise SaltCloudSystemExit(\n \"This function must be called with -f, --function argument.\"\n )\n ret = {}\n session = _get_session()\n hosts = session.xenapi.host.get_all()\n for host in hosts:\n host_record = session.xenapi.host.get_record(host)\n ret[host_record[\"name_label\"]] = host_record\n return ret"
] | [
"0.82682306",
"0.65386385",
"0.650028",
"0.62470096",
"0.6212668",
"0.6188747",
"0.61886394",
"0.6129932",
"0.6109547",
"0.6059295",
"0.58552474",
"0.5842673",
"0.5836488",
"0.5821537",
"0.5802988",
"0.5714081",
"0.57066965",
"0.56985897",
"0.56599617",
"0.5650841",
"0.56503606",
"0.5615336",
"0.5613876",
"0.5594336",
"0.55912876",
"0.55652714",
"0.5562712",
"0.5532463",
"0.55303824",
"0.5507335"
] | 0.7245975 | 1 |
Delete an instance type | def nfvi_delete_instance_type(instance_type_uuid, callback):
cmd_id = _compute_plugin.invoke_plugin('delete_instance_type',
instance_type_uuid,
callback=callback)
return cmd_id | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def delete(cls, type_obj):\n DB.session.delete(type_obj)\n DB.session.commit()",
"def test_instance_type_create_then_delete(self):\n name = 'Small Flavor'\n flavorid = 'flavor1'\n\n original_list = instance_types.get_all_types()\n\n # create new type and make sure values stick\n inst_type = instance_types.create(name, 256, 1, 120, 100, flavorid)\n inst_type_id = inst_type['id']\n self.assertEqual(inst_type['flavorid'], flavorid)\n self.assertEqual(inst_type['name'], name)\n self.assertEqual(inst_type['memory_mb'], 256)\n self.assertEqual(inst_type['vcpus'], 1)\n self.assertEqual(inst_type['root_gb'], 120)\n self.assertEqual(inst_type['ephemeral_gb'], 100)\n self.assertEqual(inst_type['swap'], 0)\n self.assertEqual(inst_type['rxtx_factor'], 1)\n\n # make sure new type shows up in list\n new_list = instance_types.get_all_types()\n self.assertNotEqual(len(original_list), len(new_list),\n 'instance type was not created')\n\n instance_types.destroy(name)\n self.assertRaises(exception.InstanceTypeNotFound,\n instance_types.get_instance_type, inst_type_id)\n\n # deleted instance should not be in list anymoer\n new_list = instance_types.get_all_types()\n self.assertEqual(original_list, new_list)",
"def help_destroy(self):\n print(\"delete an instance based on the class name and id\")",
"def do_destroy(self, arg):\n args = shlex.split(arg)\n if len(args) == 0:\n print(\"** class name missing **\")\n elif args[0] in class_type:\n if len(args) > 1:\n key = args[0] + \".\" + args[1]\n if key in models.storage.all():\n models.storage.all().pop(key)\n models.storage.save()\n else:\n print(\"** no instance found **\")\n else:\n print(\"** instance id missing **\")\n else:\n print(\"** class doesn't exist **\")",
"def do_destroy(self, arg):\n if len(arg) == 0:\n print(\"** class name missing **\")\n return\n coms = tuple(arg.split())\n if coms[0] not in self.cls:\n print(\"** class doesn't exist **\")\n elif len(coms) < 2:\n print(\"** instance id missing **\")\n else:\n obj = coms[0] + \".\" + coms[1]\n if obj not in storage.all().keys():\n print(\"** no instance found **\")\n else:\n del storage.all()[obj]\n storage.save()",
"def delete(self):\n self._instance.delete()\n self._instance = None\n self._data_defs = []",
"def do_destroy(self, *args):\n args = [ele for ele in args[0].split(' ')]\n if args[0] == '':\n print(\"** class name missing **\")\n return\n if args[0] not in self.list_classes:\n print(\"** class doesn't exist **\")\n return\n if len(args) != 2:\n print(\"** instance id missing **\")\n return\n\n storage.reload()\n dict_objs = storage.all()\n if dict_objs is None or dict_objs == []:\n print(\"** no instance found **\")\n return\n\n key = \"{}.{}\".format(args[0], args[1])\n if key in dict_objs.keys():\n del dict_objs[key]\n storage.save()\n else:\n print(\"** no instance found **\")",
"def do_destroy(self, arg):\n arg = arg.split()\n try:\n args = arg[0] + \".\" + arg[1]\n except:\n pass\n objects = storage.all()\n if len(arg) is 0:\n print(\"** class name missing **\")\n elif len(arg) == 1 and arg[0] in self.dict.keys():\n print(\"** instance id missing **\")\n elif arg[0] not in self.dict.keys():\n print(\"** class doesn't exist **\")\n elif args not in objects:\n print(\"** no instance found **\")\n else:\n del objects[args]\n storage.save()",
"def delete_model(self, request, instance):\n pass",
"def delete(self):\n type_model = request.json\n\n type_model = namedtuple(\"Type\", type_model.keys())(*type_model.values())\n repository = TypeRepository(\n FLASK_APP.config[\"DBUSER\"],\n FLASK_APP.config[\"DBPASS\"],\n FLASK_APP.config[\"DBHOST\"],\n FLASK_APP.config[\"DBPORT\"],\n FLASK_APP.config[\"DBNAME\"])\n\n try:\n status = repository.delete(type_model)\n if status:\n Logger.Logger.create(FLASK_APP.config[\"ELASTICURL\"],\n 'Informative',\n 'Type deleted sucessfuly',\n 'delete()',\n str(status),\n FLASK_APP.config[\"TYPE\"])\n return self.okResponse(\n response=models.Type.Type(),\n message=\"Type deleted sucessfuly.\",\n status=204), 200\n except Exception as err:\n Logger.Logger.create(FLASK_APP.config[\"ELASTICURL\"],\n 'Error',\n 'Internal server error',\n 'delete()',\n str(err),\n FLASK_APP.config[\"TYPE\"])\n return self.okResponse(\n response=err,\n message=\"Internal server error: \"+str(err),\n status=500)",
"def do_destroy(self, arg):\n args = arg.split()\n if len(args) == 0:\n print(\"** class name missing **\")\n return\n elif len(args) < 2 and args[0] in self.class_dict:\n print(\"** instance id missing **\")\n return\n elif len(args) < 2:\n print(\"** class name missing **\")\n return\n\n object_dict = storage.all()\n if args[0] in self.class_dict:\n for full_key in object_dict:\n key = full_key.split(\".\")\n if key[1] == args[1]:\n del object_dict[full_key]\n storage.save()\n return\n print(\"** no instance found **\")\n else:\n print(\"** class doesn't exist **\")",
"def delete(self, type=None, name=None, identity=None):\n if name and identity:\n name = None # Only specify one\n request = self.request(operation='DELETE', type=type, name=name,\n identity=identity)\n self.call(request, expect=error.NO_CONTENT)",
"def delete_instance(self, instance: int) -> int:\n return self.get_func(\"DC_DeleteInstance\", [POINTER(c_int)], c_int)(instance)",
"def delete(**args):\n\tglobal _objstore\n\t_objstore = _objstore or ObjStore()\n\n\t_objstore.delete(args['type'], args['name'])\n\treturn {'message':'ok'}",
"def delete(self, key, key_type=None):\n pass",
"def __delete__(self, obj):\n self._instances.pop(obj, None)",
"def delete_instance(cls, args, config):\n if len(args) == 0:\n print \"Usage: molns instance delete INSTANCE_ID\"\n return\n try:\n instance_id = int(args[0])\n except ValueError:\n print \"instance ID must be a integer\"\n return\n instance = config.get_instance_by_id(instance_id)\n if instance is None:\n print \"instance not found\"\n else:\n config.delete_instance(instance)\n print \"instance {0} deleted\".format(instance_id)",
"def do_destroy(self, arg):\n arg_list = arg.split(\" \") if type(arg) == str else arg\n if not arg:\n print(\"** class name missing **\")\n return\n if arg_list[0] not in HBNBCommand.class_list:\n print(\"** class doesn't exist **\")\n return\n if len(arg_list) < 2:\n print(\"** instance id missing **\")\n return\n key = arg_list[0] + \".\" + arg_list[1]\n if key in storage.all():\n del storage.all()[key]\n storage.save()\n return\n print(\"** no instance found **\")",
"def remove_type(self, name):\n del self.types[name]",
"def __delete__(self, instance):\n self._lib_vscf_ecc.vscf_ecc_delete(self.ctx)",
"def do_destroy(self, args):\n args = shlex.split(args)\n dicti = storage.all()\n if not args:\n print(\"** class name missing **\")\n elif not args[0] in name_of_class:\n print(\"** class doesn't exist **\")\n elif len(args) == 1:\n print(\"** instance id missing **\")\n elif \"{}.{}\".format(args[0], args[1]) in dicti:\n dicti.pop(\"{}.{}\".format(args[0], args[1]))\n storage.save()\n else:\n print(\"** no instance found **\")",
"def delete(self):\n request_pb = messages_v2_pb2.DeleteInstanceRequest(name=self.name)\n # We expect a `google.protobuf.empty_pb2.Empty`\n self._client._instance_stub.DeleteInstance(\n request_pb, self._client.timeout_seconds)",
"def destroy(self, class_name, inst_id, stored_objects):\n instance = \"{}.{}\".format(class_name, inst_id)\n if instance not in stored_objects:\n print(\"** no instance found **\")\n else:\n del stored_objects[instance]",
"def delete_parametertype(request, parametertype, **_kwargs):\n pass",
"def perform_destroy(self, instance):\n pass",
"def __delete__(self, instance):\n self.session.close()",
"def do_destroy(self, args):\n args = args.split()\n if len(args) == 0:\n print(\"** class name missing **\")\n return\n if len(args) == 1:\n print(\"** instance id missing **\")\n return\n if args[0] not in HBNBCommand.class_check:\n print(\"** class doesn't exist **\")\n return\n\n all_objs = storage.all()\n key = args[0] + '.' + args[1]\n if key in all_objs:\n all_objs.pop(key)\n storage.save()\n else:\n print(\"** no instance found **\")",
"def delete_instance(InstanceId=None, DeleteElasticIp=None, DeleteVolumes=None):\n pass",
"def do_destroy(self, line):\n try:\n tokens = split(line)\n except ValueError:\n return None\n if len(tokens) < 1:\n print(\"** class name missing **\")\n else:\n objects = models.storage.all()\n cls = models.getmodel(tokens[0])\n if cls is None:\n print(\"** class doesn't exist **\")\n elif len(tokens) < 2:\n print(\"** instance id missing **\")\n elif \".\".join(tokens[:2]) not in objects:\n print(\"** no instance found **\")\n else:\n del objects[\".\".join(tokens[:2])]\n models.storage.save()",
"def destroy(self, request, *args, **kwargs):\n instance = self.get_object()\n\n count_deleted = self.perform_destroy(instance)[0]\n if count_deleted == 0:\n _logger.warning('failed to delete instance id: %s of type: %s',\n instance.id, instance.__class__)\n\n return delete_response(request, count_deleted)"
] | [
"0.75105196",
"0.7219865",
"0.71700513",
"0.68534404",
"0.6839456",
"0.6828863",
"0.6794721",
"0.67646134",
"0.6655979",
"0.66524136",
"0.66384214",
"0.6603012",
"0.65674657",
"0.6551319",
"0.6535387",
"0.6509424",
"0.6474037",
"0.64697915",
"0.6459448",
"0.6426711",
"0.64048105",
"0.63975644",
"0.63944125",
"0.6393483",
"0.63888013",
"0.63692397",
"0.635653",
"0.63525707",
"0.6341442",
"0.63302195"
] | 0.738218 | 1 |
Cold migrate confirm an instance | def nfvi_cold_migrate_confirm_instance(instance_uuid, callback, context=None):
if context is None:
cmd_id = _compute_plugin.invoke_plugin_expediate(
'cold_migrate_confirm_instance', instance_uuid, context,
callback=callback)
else:
cmd_id = _compute_plugin.invoke_plugin(
'cold_migrate_confirm_instance', instance_uuid, context,
callback=callback)
return cmd_id | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def confirm_migration(self, migration, instance, network_info):\n raise NotImplementedError()",
"def migrate(self):\n\tpass",
"def migrate():\n if apply_migrations():\n click.echo(OK)\n else:\n sys.exit(1)",
"def migrate(cr, version):\n pass",
"def post_migrations(self):",
"def model_post_migrate(*args, **kwargs):\n global IN_MIGRATIONS\n IN_MIGRATIONS = False",
"def confirm_migrate_pickle(before, after):\n assert False",
"def migrate(cls)->None:\n pass",
"def migrate_cancel(self):\n\t\treturn Job(SDK.PrlVm_MigrateCancel(self.handle)[0])",
"def migration():",
"def _cleanup_incomplete_migrations(self, context):\n LOG.debug('Cleaning up deleted instances with incomplete migration ')\n migration_filters = {'host': CONF.host,\n 'status': 'error'}\n migrations = objects.MigrationList.get_by_filters(context,\n migration_filters)\n\n if not migrations:\n return\n\n inst_uuid_from_migrations = set([migration.instance_uuid for migration\n in migrations])\n\n inst_filters = {'deleted': True, 'soft_deleted': False,\n 'uuid': inst_uuid_from_migrations}\n attrs = ['info_cache', 'security_groups', 'system_metadata']\n with utils.temporary_mutation(context, read_deleted='yes'):\n instances = objects.InstanceList.get_by_filters(\n context, inst_filters, expected_attrs=attrs, use_slave=True)\n\n for instance in instances:\n if instance.host != CONF.host:\n for migration in migrations:\n if instance.uuid == migration.instance_uuid:\n # Delete instance files if not cleanup properly either\n # from the source or destination cloud nodes when\n # the instance is deleted during resizing.\n self.driver.delete_instance_files(instance)\n try:\n migration.status = 'failed'\n with migration.obj_as_admin():\n migration.save()\n except exception.MigrationNotFound:\n LOG.warning(_LW(\"Migration %s is not found.\"),\n migration.id, context=context,\n instance=instance)\n break",
"def migrate(ctx):\n connecter = ScalingoInterface(ctx.obj)\n connecter.manage_py(\"migrate\")",
"def nfvi_cold_migrate_instance(instance_uuid, callback, to_host_name=None,\n context=None):\n if context is None:\n cmd_id = _compute_plugin.invoke_plugin_expediate(\n 'cold_migrate_instance', instance_uuid, to_host_name, context,\n callback=callback)\n else:\n cmd_id = _compute_plugin.invoke_plugin(\n 'cold_migrate_instance', instance_uuid, to_host_name, context,\n callback=callback)\n return cmd_id",
"def confirm(self):\n self.automatically_detected=False\n self.save()",
"def nfvi_cold_migrate_revert_instance(instance_uuid, callback, context=None):\n cmd_id = _compute_plugin.invoke_plugin('cold_migrate_revert_instance',\n instance_uuid, context,\n callback=callback)\n return cmd_id",
"def test_migration_task_rollback(self):\n server, source_host, target_host = self._create_server()\n self._disable_target_host(target_host)\n self._stub_delete_server_during_scheduling(server)\n\n # Now start the cold migration which will fail due to NoValidHost.\n self.api.post_server_action(server['id'], {'migrate': None},\n check_response_status=[202])\n # We cannot monitor the migration from the API since it is deleted\n # when the instance is deleted so just wait for the failed instance\n # action event after the task rollback happens.\n # Note that we get InstanceNotFound rather than NoValidHost because\n # the NoValidHost handler in ComputeTaskManager._cold_migrate calls\n # _set_vm_state_and_notify which raises InstanceNotFound and masks\n # the NoValidHost error.\n self._assert_resize_migrate_action_fail(\n server, instance_actions.MIGRATE, 'InstanceNotFound')\n self._assert_no_allocations(server)",
"def test_vm_migration(self):\n self.check_vm_host_after_migration(positive=True)",
"def test_vm_migration(self):\n self.check_vm_host_after_migration(positive=True)",
"def test_vm_migration(self):\n self.check_vm_host_after_migration(positive=False)",
"def test_vm_migration(self):\n self.check_vm_host_after_migration(positive=False)",
"def migrate_database():\n log('Migrating the keystone database.', level=INFO)\n service_stop(keystone_service())\n # NOTE(jamespage) > icehouse creates a log file as root so use\n # sudo to execute as keystone otherwise keystone won't start\n # afterwards.\n cmd = ['sudo', '-u', 'keystone', 'keystone-manage', 'db_sync']\n subprocess.check_output(cmd)\n service_start(keystone_service())\n time.sleep(10)\n peer_store('db-initialised', 'True')",
"def test_live_migration_task_rollback(self):\n server, source_host, target_host = self._create_server()\n self._disable_target_host(target_host)\n self._stub_delete_server_during_scheduling(server)\n\n # Now start the live migration which will fail due to NoValidHost.\n body = {'os-migrateLive': {'host': None, 'block_migration': 'auto'}}\n self.api.post_server_action(server['id'], body)\n # We cannot monitor the migration from the API since it is deleted\n # when the instance is deleted so just wait for the failed instance\n # action event after the task rollback happens.\n self._wait_for_action_fail_completion(\n server, instance_actions.LIVE_MIGRATION,\n 'conductor_live_migrate_instance')\n self._assert_no_allocations(server)",
"def model_pre_migrate(*args, **kwargs):\n global IN_MIGRATIONS\n IN_MIGRATIONS = True",
"def migrate_fake():\n run('source /home/indabom/web/bin/activate && /home/indabom/web/site/manage.py migrate --fake')",
"def tearDownClass(cls):\n management.call_command(\"migrate\")",
"def confirmation_failed(self):",
"def migrate():\n puts(yellow(\"Run South migrations\"))\n django_manage('migrate')",
"def migratedb_command():\n db = get_db()\n # This migration detects whether it needs to run before making changes.\n db.migrate_add_user_is_enabled()",
"def run_migration_checks():\n check_model_state()\n check_migration_state()",
"def check_vm_host_after_migration(positive):\n flow_msg = \"with\" if positive else \"without\"\n testflow.step(\"Migrate VM %s\", conf.VM_NAME[0])\n assert ll_vms.migrateVm(positive=True, vm=conf.VM_NAME[0])\n testflow.step(\n \"Check if VM %s migrated on the host %s second VM %s\",\n conf.VM_NAME[0], flow_msg, conf.VM_NAME[1]\n )\n assert (\n ll_vms.get_vm_host(vm_name=conf.VM_NAME[0]) ==\n ll_vms.get_vm_host(vm_name=conf.VM_NAME[1])\n ) == positive"
] | [
"0.72407913",
"0.6868304",
"0.6802639",
"0.6643721",
"0.64271414",
"0.6412695",
"0.63651156",
"0.6314516",
"0.63011813",
"0.61909187",
"0.6099909",
"0.60377604",
"0.6023261",
"0.59750897",
"0.5949194",
"0.59245265",
"0.5899963",
"0.5899963",
"0.58728546",
"0.58728546",
"0.58620274",
"0.5822578",
"0.5792325",
"0.57745606",
"0.5764113",
"0.5743028",
"0.57251126",
"0.57240003",
"0.5718887",
"0.571151"
] | 0.7104281 | 1 |
Cold migrate revert an instance | def nfvi_cold_migrate_revert_instance(instance_uuid, callback, context=None):
cmd_id = _compute_plugin.invoke_plugin('cold_migrate_revert_instance',
instance_uuid, context,
callback=callback)
return cmd_id | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def migrate(cr, version):\n pass",
"def migrate(self):\n\tpass",
"def post_revert(self):",
"def pre_revert(self):",
"def migration():",
"def test_migration_task_rollback(self):\n server, source_host, target_host = self._create_server()\n self._disable_target_host(target_host)\n self._stub_delete_server_during_scheduling(server)\n\n # Now start the cold migration which will fail due to NoValidHost.\n self.api.post_server_action(server['id'], {'migrate': None},\n check_response_status=[202])\n # We cannot monitor the migration from the API since it is deleted\n # when the instance is deleted so just wait for the failed instance\n # action event after the task rollback happens.\n # Note that we get InstanceNotFound rather than NoValidHost because\n # the NoValidHost handler in ComputeTaskManager._cold_migrate calls\n # _set_vm_state_and_notify which raises InstanceNotFound and masks\n # the NoValidHost error.\n self._assert_resize_migrate_action_fail(\n server, instance_actions.MIGRATE, 'InstanceNotFound')\n self._assert_no_allocations(server)",
"def revert(self, *args, **kwargs):",
"def downgrade():\n\n op.drop_column('shares', 'revert_to_snapshot_support')",
"def upgrade():\n # commands auto generated by Alembic - please adjust! ###\n op.drop_table('review')\n # end Alembic commands ###",
"def rollback(migrator, database, fake=False, **kwargs):\n\n migrator.remove_model('tea_teas_types')\n migrator.remove_model('tea_types')\n migrator.remove_model('tea_lists_items')\n migrator.remove_model('tea_lists')\n migrator.remove_model('tea_teas')\n migrator.remove_model('tea_vendors')",
"def upgrade():\n try:\n op.drop_table(\"ggrc_gdrive_integration_alembic_version\")\n except sa.exc.OperationalError as e:\n code, _ = e.orig.args\n if code == 1051: # doesn't exist\n # we're in a new DB with no trace of the removed chain\n pass\n else:\n raise\n\n # The following duplicates a part of a gdrive-related migration,\n # since a bunch of old migrations in ggrc refer to meetings table.\n # This part is relevant only for db_reset (new databases), so we\n # shouldn't recreate this table in downgrade.\n try:\n op.drop_table(\"meetings\")\n except sa.exc.OperationalError as e:\n code, _ = e.orig.args\n if code == 1051: # doesn't exist\n # we're in an old DB where meetings has been dropped in the removed chain\n pass\n else:\n raise",
"def migrate(cls)->None:\n pass",
"def revert_fe_transfer_changes():\n # Cyclic Import\n from backend.models import FETransfer\n\n end_date = datetime.datetime.now()\n end_date = end_date.replace(second=0, microsecond=0)\n start_date = end_date - datetime.timedelta(1)\n\n all_instances = FETransfer.objects.filter(revert_flag=False,\n updated_at__range=(start_date, end_date))\n for instance in all_instances:\n instance.revert_changes()",
"def rollback(migrator, database, fake=False, **kwargs):\n pass",
"def rollback(self):\n pass",
"def rollback(self):\r\n self.db.rollback()",
"def revert(self):\n self.instance.save()\n return self.instance",
"def rollback(self):\n self.db.rollback()",
"def model_post_migrate(*args, **kwargs):\n global IN_MIGRATIONS\n IN_MIGRATIONS = False",
"def post_migrations(self):",
"def migrate_cancel(self):\n\t\treturn Job(SDK.PrlVm_MigrateCancel(self.handle)[0])",
"def downgrade():\n # # commands auto generated by Alembic - please adjust! ###\n op.drop_table('downstream_map')\n # # end Alembic commands ###",
"def downgrade(revision, sql):\n alembic_command.downgrade(alembic_config, revision, sql=sql)",
"def reset_db_danger():\n from flask.ext.migrate import init, migrate\n # Remove the migration folder if exist\n if os.path.exists('migrations'):\n shutil.rmtree('migrations')\n\n # Remove the sqlite database files if exist\n for fl in glob.glob('*.sqlite'):\n os.remove(fl)\n\n # Reset Migration Database\n init()\n\n # migrate database to latest revision\n migrate(message='init')",
"def downgrade():\n op.execute(\"\"\"\n CREATE TABLE ggrc_gdrive_integration_alembic_version (\n version_num varchar(32) NOT NULL\n ) ENGINE=InnoDB DEFAULT CHARSET=utf8\n \"\"\")\n op.execute(\"\"\"\n INSERT INTO ggrc_gdrive_integration_alembic_version (version_num)\n VALUES ('3f64d03c6c01')\n \"\"\")",
"def reset(self) -> None:\n call_command('migrate', verbosity=0, database=self._database)",
"def downgrade():\n op.drop_table(\"task_instance_note\")\n op.drop_table(\"dag_run_note\")",
"def nfvi_resize_revert_instance(instance_uuid, callback, context=None):\n cmd_id = _compute_plugin.invoke_plugin('resize_revert_instance',\n instance_uuid, context,\n callback=callback)\n return cmd_id",
"def tearDownClass(cls):\n management.call_command(\"migrate\")",
"def test_live_migration_task_rollback(self):\n server, source_host, target_host = self._create_server()\n self._disable_target_host(target_host)\n self._stub_delete_server_during_scheduling(server)\n\n # Now start the live migration which will fail due to NoValidHost.\n body = {'os-migrateLive': {'host': None, 'block_migration': 'auto'}}\n self.api.post_server_action(server['id'], body)\n # We cannot monitor the migration from the API since it is deleted\n # when the instance is deleted so just wait for the failed instance\n # action event after the task rollback happens.\n self._wait_for_action_fail_completion(\n server, instance_actions.LIVE_MIGRATION,\n 'conductor_live_migrate_instance')\n self._assert_no_allocations(server)"
] | [
"0.69084084",
"0.6837074",
"0.66857207",
"0.66517055",
"0.66482615",
"0.6580794",
"0.6542509",
"0.65224993",
"0.6518319",
"0.65061986",
"0.6489541",
"0.64496523",
"0.6444745",
"0.6442864",
"0.6369054",
"0.6349502",
"0.6275725",
"0.62747025",
"0.6249206",
"0.62250805",
"0.6209609",
"0.6194863",
"0.61852497",
"0.6168309",
"0.61588657",
"0.6152546",
"0.6152467",
"0.6101748",
"0.6099513",
"0.6098158"
] | 0.739316 | 0 |
Reject an action against an instance | def nfvi_reject_instance_action(instance_uuid, message, context):
cmd_id = _compute_plugin.invoke_plugin('reject_instance_action',
instance_uuid, message, context)
return cmd_id | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def reject(self):\n pass",
"def reject(self, responder):\n self._apply_decision(self.Status.REJECTED, responder)",
"def on_reject(self):\n self.state = REJECTED\n self._reject()",
"def reject(self, message):\n boto_connection = connection.get_connection()\n boto_connection.reject_assignment(self.assignment_id, message)",
"def test_rejected(self):\n actions = signoff_actions(appversions={\"code\": \"fx1.0\"},\n locales={\"code\": \"fr\"})\n actions = list(actions)\n eq_(len(actions), 1)\n eq_(actions[0][1], Action.REJECTED)\n so = Signoff.objects.get(action=actions[0][0])\n eq_(so.push.tip.shortrev, \"l10n fr 0003\")\n eq_(so.locale.code, \"fr\")\n eq_(so.action_set.count(), 2)",
"def rescue(self, instance):\n pass",
"def cant(user, action):\n\n return not can(user, action)",
"def reject(self):\n\n self.date_of_submission = None\n api.content.transition(obj=self,\n transition='proposal-transition-reject')",
"def reject(self, feedback=None):\n self.hit.generate_connection()\n self.hit.connection.reject_assignment(self.mturk_id, feedback=feedback)\n self.update()",
"def test_invalid_action(self):\n\n # Prepare.\n app = self.factory()\n request = self.getRequest(app)\n context = model.factory()\n\n # Ask for permission.\n state_changer = request.state_changer\n self.assertFalse(state_changer.can_perform(context, a.COMPLETE))\n\n # Beg for forgiveness.\n err = fysom.FysomError\n self.assertRaises(err, state_changer.perform, context, a.COMPLETE, None)",
"def on_buttonBox_rejected(self):\n self.reject()",
"def reject(self):\n print \"This form has been rejected. Current state:\", self.state",
"def serverReject(self):\n self.handshake_deferred.errback(ConnectionDeny(code=403, reason=\"Access denied\"))\n self.cleanup()\n logger.debug(\"WebSocket %s rejected by application\", self.reply_channel)\n self.factory.log_action(\"websocket\", \"rejected\", {\n \"path\": self.request.path,\n \"client\": \"%s:%s\" % tuple(self.client_addr) if self.client_addr else None,\n })",
"def test_revoke_inactive(self):\n self.invite.active = False\n self.invite.save()\n url = reverse(\n 'projectroles:api_invite_revoke',\n kwargs={'projectinvite': self.invite.sodar_uuid},\n )\n response = self.request_knox(url, method='POST')\n self.assertEqual(response.status_code, 400, msg=response.content)",
"def unrescue(self, instance):\n pass",
"def denyMethod(self, verb, resource):\n self._addMethod(\"Deny\", verb, resource, [])",
"def cancel_instance(self, instance_id):\r\n return self.guest.deleteObject(id=instance_id)",
"def unaccept_offer(self, pname, matchid):\n msg = '%s declined the match' % (pname)\n self._rem_offer(matchid, msg)\n msg = '%s canceled the game' % (pname)\n self._rem_game(matchid, msg)",
"def _reject(self, reason):\n log.error('Rejected: %s' % reason)\n\n self._remove_changes()\n self._remove_files()\n\n if self.user is not None:\n email = Email('importer_reject_maintainer')\n package = self.changes.get('Source', '')\n\n self.send_email(email, [self.user.email], package=package, message=reason)\n sys.exit(1)",
"def Reject(self, request, global_params=None):\n config = self.GetMethodConfig('Reject')\n return self._RunMethod(\n config, request, global_params=global_params)",
"def reject(self):\n self.skype.conn(\"PUT\", \"{0}/users/{1}/invites/8:{2}/decline\"\n .format(SkypeConnection.API_CONTACTS, self.skype.userId, self.userId),\n auth=SkypeConnection.Auth.SkypeToken)",
"def test_disallow_betrayal(self):\n s1 = self.battle.create_skirmish(self.alice, 1)\n with self.assertRaises(db.TeamException):\n s1.react(self.bob, 1, hinder=False)\n\n n = (self.sess.query(db.SkirmishAction).filter_by(parent_id=None).\n filter_by(participant=self.alice)).count()\n self.assertEqual(n, 1)",
"def action_cancel(self):\n self.state = 'canceled'",
"def canceled(self):\n self.reject()",
"def effectOnMiss(self, user, target):\n return self.stopCharge(user)",
"def effectOnMiss(self, user, target):\n return self.stopCharge(user)",
"def reject_waiting_call(self) -> None:",
"def reject(self, request, queryset):\n\n rejected_count = self.reject_stories(queryset)\n self.message_user_results(request, rejected_count, 0, \"rejected\")",
"def reject(self):\r\n QtGui.QDialog.reject(self)",
"def test_bad_action(self):\r\n action = 'robot-not-an-action'\r\n url = reverse('bulk_beta_modify_access', kwargs={'course_id': self.course.id.to_deprecated_string()})\r\n response = self.client.get(url, {'identifiers': self.beta_tester.email, 'action': action})\r\n self.assertEqual(response.status_code, 400)"
] | [
"0.7002768",
"0.64793044",
"0.6412309",
"0.6220728",
"0.6187695",
"0.6176384",
"0.6163549",
"0.6141075",
"0.61121845",
"0.6057996",
"0.603861",
"0.598698",
"0.5980348",
"0.5910807",
"0.5905218",
"0.5896408",
"0.5850485",
"0.584049",
"0.58146614",
"0.5792201",
"0.5752381",
"0.57500666",
"0.5734637",
"0.57229143",
"0.5707225",
"0.5707225",
"0.56880903",
"0.56741357",
"0.56712496",
"0.5663944"
] | 0.702213 | 0 |
Register for instance action change notifications | def nfvi_register_instance_action_change_callback(callback):
_compute_plugin.invoke_plugin('register_instance_action_change_callback',
callback=callback) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def nfvi_register_instance_action_callback(callback):\n _compute_plugin.invoke_plugin('register_instance_action_callback',\n callback=callback)",
"def on_register(cls):",
"def notifyObservers(self):",
"def _instance_changed_handler ( self, name, old, new ):\n arg_lists = self._get_instance_handlers( name )\n\n if old is not None:\n for args in arg_lists:\n old.on_trait_change( remove = True, *args )\n\n if new is not None:\n for args in arg_lists:\n new.on_trait_change( *args )",
"def register_instance(self, instance):\n self.instance = instance",
"def on_notify(self, name):\r\n pass",
"def register_observers_with_gamestate(self):",
"def auto_discover():\n auto_registration(\"actions\")",
"def on_watch(self, payload):\n pass",
"def registerAction(self, actionId, action): #$NON-NLS-1$\r",
"def nfvi_register_instance_state_change_callback(callback):\n _compute_plugin.invoke_plugin('register_instance_state_change_callback',\n callback=callback)",
"def observe_model_modification(\n sender: type, instance: Model, created: bool = False, **kwargs\n):\n if isinstance(instance, Observable):\n if created:\n handle_permission_change(instance)\n elif not getattr(instance, suppress_notifications_attribute, False):\n Observer.observe_instance_changes(instance, ChangeType.UPDATE)\n Observer.observe_instance_container(instance, ChangeType.UPDATE)",
"def notify_subscribers(self, instance, domain, state=None):\n if not self.notifier:\n return\n\n if not state:\n state = instance.state\n\n tups = domain.get_subscribers()\n for subscriber_name, subscriber_op in tups:\n properties = {'hostname': instance.public_ip}\n content = {'node_id': instance.instance_id, 'state': state,\n 'domain_id': domain.domain_id,\n 'properties': properties}\n self.notifier.notify_by_name(subscriber_name, subscriber_op, content)",
"def publish_action(self, action):\n raise NotImplementedError",
"def onRegister(self):\n pass",
"def onRegister(self):\n pass",
"def start_notify(self, on_change):\n raise NotImplementedError",
"def events(self):",
"def action_defined(sender, instance, created, raw, using, **kwargs):\n if created:\n raw_hook_event.send(\n sender=None,\n event_name=\"action_defined\",\n instance=instance,\n payload=ActionSerializer(instance).data,\n user=instance.team,\n )",
"def notify_observers(self, new_gamestate) -> None:",
"def onAction(*args):",
"def onAction(*args):",
"def onAction(*args):",
"def onAction(*args):",
"def watch(self):",
"def notify(self):\n if self.has_changed:\n event = self.event_factory(self.resource, registry=self.registry, schema=self.schema,\n changed=self.changed)\n self.registry.notify(event)",
"def __init__(self, instance, created, signal_type):\n\n self.instance = instance\n self.created = created\n self.signal_type = signal_type",
"def receiveAction(self, action):\n self.action = action",
"def on_update(self, **kwargs):\n self.get_client()\n self.update(self, kwargs['sender'], kwargs['instance'])",
"def add_action(self, action):\n self.action = action"
] | [
"0.6161295",
"0.6089405",
"0.59604",
"0.59021795",
"0.58711416",
"0.58598304",
"0.5859007",
"0.58397067",
"0.576684",
"0.56879747",
"0.5651403",
"0.5616433",
"0.5599115",
"0.5545698",
"0.55440086",
"0.55440086",
"0.55310184",
"0.552071",
"0.5515812",
"0.55045426",
"0.5493684",
"0.5493684",
"0.5493684",
"0.5493684",
"0.5487216",
"0.5472829",
"0.5435524",
"0.541051",
"0.54091513",
"0.5407344"
] | 0.6844933 | 0 |
Register for instance action callback | def nfvi_register_instance_action_callback(callback):
_compute_plugin.invoke_plugin('register_instance_action_callback',
callback=callback) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def nfvi_register_instance_action_change_callback(callback):\n _compute_plugin.invoke_plugin('register_instance_action_change_callback',\n callback=callback)",
"def on_register(cls):",
"def register(self, callback):\n self.callback = callback",
"def add_callback(callback, control_instance):\n pass",
"def register_instance(self, instance):\n self.instance = instance",
"def register_callback(self):\n raise Exception('not implemented')",
"def __init__(self, callback):\n # Log the <instance>.__init__ message\n listeners_logger.log_info(\n '{0}.__init__<{1}>'.format(self.name, callback))\n\n # Is the callback callable?\n if not callable(callback):\n\n # Raise an error\n raise TypeError(\n \"'\" + type(callback).__name__ + \"' object is not callable.\")\n\n # Log the registering message\n listeners_logger.log_info(\n '{0}.__init__ - Registering'.format(self.name))\n\n # Store the callback\n self.callback = callback\n\n # Register the listener\n self.manager.register_listener(self.callback)",
"def onRegister(self):\n pass",
"def onRegister(self):\n pass",
"def registerAction(self, actionId, action): #$NON-NLS-1$\r",
"def register_callback(self, callback):\n self.callbacks.add(callback)",
"def __init__(self, callback):\n self.callback = callback",
"def __init__(self, callback):\n self.callback = callback",
"def callback_connect(self):\n pass",
"def callback_connect(self):\n pass",
"def callback_connect(self):\n pass",
"def register_callback(cls, backend, callback):\n cls._callbacks[backend][cls] = callback",
"def __init__(self, callback, *args, **kwargs):\n self.callback = lambda: callback(*args, **kwargs)",
"def __init__(self, function, instance):\r\n self.instance = instance\r\n self.function = function",
"def register_active_event(t, callback, args, action_runner, plugin, msg_obj, mutex=None):\n def func(func_args):\n action = callback(*func_args)\n if action:\n action_runner(action=action, plugin=plugin, msg_obj=msg_obj)\n register_event(t, func, args, mutex=mutex)",
"def add_action(self, action):\n self.action = action",
"def add_action(self, action):\n self.action = action",
"def register( key, obj ):\n global callbacks\n callbacks[ key ] = obj",
"def onAction(*args):",
"def onAction(*args):",
"def onAction(*args):",
"def onAction(*args):",
"def register_on_close(self, action):\n self._action_on_close = action",
"def register_callback(self, func):\n self.callback = func",
"def call_action(self, action):\n pass"
] | [
"0.7074271",
"0.69212306",
"0.6914295",
"0.6678117",
"0.65792686",
"0.6406543",
"0.6396015",
"0.6325683",
"0.6325683",
"0.62563324",
"0.6227942",
"0.62053627",
"0.62053627",
"0.61932707",
"0.61932707",
"0.61932707",
"0.61629814",
"0.61220556",
"0.6121512",
"0.6076169",
"0.60618335",
"0.60618335",
"0.6058937",
"0.6012843",
"0.6012843",
"0.6012843",
"0.6012843",
"0.6003036",
"0.5980284",
"0.5911491"
] | 0.7680887 | 0 |
Notify compute host is enabled | def nfvi_notify_compute_host_enabled(host_uuid, host_name, host_personality,
callback):
cmd_id = _compute_plugin.invoke_plugin('notify_host_enabled',
host_uuid, host_name,
host_personality,
callback=callback)
return cmd_id | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def nfvi_notify_compute_host_disabled(host_uuid, host_name, host_personality,\n callback):\n cmd_id = _compute_plugin.invoke_plugin('notify_host_disabled',\n host_uuid, host_name,\n host_personality,\n callback=callback)\n return cmd_id",
"def nfvi_enable_compute_host_services(host_uuid, host_name, host_personality,\n callback):\n cmd_id = _compute_plugin.invoke_plugin('enable_host_services',\n host_uuid, host_name,\n host_personality,\n callback=callback)\n return cmd_id",
"def set_host_enabled(self, host, enabled):\n if enabled:\n return 'enabled'\n return 'disabled'",
"def set_host_enabled(self, host, enabled):\n if enabled:\n return 'enabled'\n return 'disabled'",
"def enable(self):\n # Netmiko reports enable and config mode as being enabled\n if not self.native.check_enable_mode():\n self.native.enable()\n # Ensure device is not in config mode\n if self.native.check_config_mode():\n self.native.exit_config_mode()\n\n log.debug(\"Host %s: Device enabled.\", self.host)",
"def on_enable(self) -> None:\n if self._send_notification_func:\n self._send_notification_func()\n self._send_notification_func = None",
"def enable_host(self, name):\n from soppa.local import aslocal\n self.guest_ip = self.guest_ip()\n self.guest_host_name = name\n # Host (remote) change\n self.file.set_setting('/etc/hosts', '{0} {1}'.format('127.0.0.1', self.guest_host_name))\n # local change\n aslocal()\n self.file.set_setting('/etc/hosts', '{0} {1}'.format(self.guest_ip, name))",
"def scp_enable(task):\n cmd = \"ip scp server enable\"\n task.run(task=netmiko_send_config, config_commands=cmd)\n c_print(f\"*** {task.host}: SCP has been enabled ***\")",
"def ceph_enabled(self):",
"def enabled_on_host() -> Callable[[Loader.ModInfo], bool]:\n def filter_fn(mod_info: Loader.ModInfo) -> bool:\n if len(mod_info.mod_def.hostnames) == 0:\n return True\n\n if HOSTNAME not in mod_info.mod_def.hostnames:\n _LOG.info(\n f\"{mod_info.mod_def.name} is not enabled for host {HOSTNAME}\"\n )\n return False\n return True\n\n return filter_fn",
"def test_reports_enabled_hosts_as_up(self):\n compute1 = self.start_service('compute', host='host1')\n compute2 = self.start_service('compute', host='host2')\n hosts = self.scheduler.driver.hosts_up(self.context, 'compute')\n self.assertEqual(2, len(hosts))\n compute1.kill()\n compute2.kill()",
"def test_doesnt_report_disabled_hosts_as_up2(self):\n compute1 = self.start_service('compute', host='host1')\n compute2 = self.start_service('compute', host='host2')\n s1 = db.service_get_by_args(self.context, 'host1', 'nova-compute')\n s2 = db.service_get_by_args(self.context, 'host2', 'nova-compute')\n db.service_update(self.context, s1['id'], {'disabled': True})\n db.service_update(self.context, s2['id'], {'disabled': True})\n hosts = self.scheduler.driver.hosts_up(self.context, 'compute')\n self.assertEqual(0, len(hosts))\n compute1.kill()\n compute2.kill()",
"def enableNotify(self):\n while True:\n self._delegate.notificationsRecvd = 0\n self._peripheral.writeCharacteristic(self._readChar.valHandle + 1, b\"\\x01\\x00\")\n self.ping()\n self._peripheral.waitForNotifications(3.0)\n if self.getNotificationsReceived() > 0:\n break\n logging.getLogger(\"anki.overdrive\").error(\"ERROR - Set notify failed - cannot communicate with car!\")",
"def test_doesnt_report_disabled_hosts_as_up_no_queue(self):\n # NOTE(vish): constructing service without create method\n # because we are going to use it without queue\n compute1 = service.Service('host1',\n 'nova-compute',\n 'compute',\n FLAGS.compute_manager)\n compute1.start()\n compute2 = service.Service('host2',\n 'nova-compute',\n 'compute',\n FLAGS.compute_manager)\n compute2.start()\n s1 = db.service_get_by_args(self.context, 'host1', 'nova-compute')\n s2 = db.service_get_by_args(self.context, 'host2', 'nova-compute')\n db.service_update(self.context, s1['id'], {'disabled': True})\n db.service_update(self.context, s2['id'], {'disabled': True})\n hosts = self.scheduler.driver.hosts_up(self.context, 'compute')\n self.assertEqual(0, len(hosts))\n compute1.kill()\n compute2.kill()",
"def enable(self):\n self._logger.debug(\"%s: request to enable monitoring\",\n self.ping_address)\n if self._monitor_enabled:\n return\n self._is_stale = False\n self._enable_monitor()",
"def nfvi_disable_compute_host_services(host_uuid, host_name, host_personality,\n callback):\n cmd_id = _compute_plugin.invoke_plugin('disable_host_services',\n host_uuid, host_name,\n host_personality,\n callback=callback)\n return cmd_id",
"def enable(ctx):\n\n config_db = ConfigDBConnector()\n config_db.connect()\n config_db.mod_entry(\"NAT_GLOBAL\", \"Values\", {\"admin_mode\": \"enabled\"})",
"def is_host_on(self):\n status = False\n cmd = \"/usr/local/bin/wedge_power.sh status\"\n data = run_shell_cmd(cmd)\n Logger.info(\"[FSCD Testing] Executing cmd= [{}]\".format(cmd))\n Logger.info(\"[FSCD Testing] Received data= [{}]\".format(data))\n if \"on\" in data:\n status = True\n Logger.info(\"[FSCD Testing] userver power status {}\".format(status))\n return status",
"def check_host(host):\n try:\n request = requests.get(host[0], timeout=3)\n host[1] = bool(re.search(host[1], request.text))\n except Exception:\n host[1] = False\n if host[1] is False:\n os.system(CONFIG['mail_command'].format(\n 'CRITICAL: {} is critical'.format(host[0])))\n\n return host",
"def test_reports_enabled_hosts_as_up_no_queue(self):\n # NOTE(vish): constructing service without create method\n # because we are going to use it without queue\n compute1 = service.Service('host1',\n 'nova-compute',\n 'compute',\n FLAGS.compute_manager)\n compute1.start()\n compute2 = service.Service('host2',\n 'nova-compute',\n 'compute',\n FLAGS.compute_manager)\n compute2.start()\n hosts = self.scheduler.driver.hosts_up(self.context, 'compute')\n self.assertEqual(2, len(hosts))\n compute1.kill()\n compute2.kill()",
"def nfvi_compute_plugin_disabled():\n return (_compute_plugin is None)",
"def set_online(rg = None):\n if isinstance(rg, int):\n rg = [rg]\n\n if rg is None:\n _cpu.enable_all_cpu()\n print(\"All CPUs enabled.\")\n else:\n for core in rg:\n try:\n _cpu.enable_cpu(core)\n if _verbose:\n print(f\"CPU {core} set online.\")\n except:\n print(f\"ERROR: An exception occurred. Check if CPU {core} exists.\")",
"async def execute_host(self):\n return True",
"def node_host_status(self, node):\n if node.is_online() or node.is_unreachable():\n return self.HOST_MONITORED\n else:\n return self.HOST_UNMONITORED",
"async def _cmdf_pmenable(self, substr, msg, privilege_level):\n enabled_str = None\n if utils.str_says_true(substr) or (len(substr) == 0):\n self._pm_msg_isenabled = True\n enabled_str = \"enabled.\"\n else:\n self._pm_msg_isenabled = False\n enabled_str = \"disabled.\"\n self._save_settings()\n\n buf = \"PM greetings is now \" + enabled_str\n await self._client.send_msg(msg, buf)\n return",
"async def power_on(self):\n ...",
"def on_enable(self) -> None:\n if self.app.state == self.app.States.clean:\n self._start_notification_cycle()",
"def enable():\n request = dict(id='gbn')\n _gbn_enable(request)",
"def power_on(self, sync=True, wait_for_guest_ready=True):\n self.vmomi_object.PowerOn()\n if sync: self._wait_for_power_on(wait_for_guest_ready)",
"def autodisable_cloud(cloud):\n log.warning(\"Autodisabling %s\", cloud)\n cloud.ctl.disable()\n title = \"Cloud %s has been automatically disabled\" % cloud.name\n message = \"%s after multiple failures to connect to it.\" % title\n notify_user(cloud.owner, title=title, message=message, email_notify=True)"
] | [
"0.6910246",
"0.63405925",
"0.6329132",
"0.6329132",
"0.6197454",
"0.6078591",
"0.5980807",
"0.5833863",
"0.5802485",
"0.57806736",
"0.5662218",
"0.5642115",
"0.558865",
"0.5550963",
"0.5544851",
"0.55175227",
"0.5497251",
"0.54892796",
"0.5441781",
"0.5439356",
"0.5438972",
"0.5424828",
"0.541771",
"0.54041463",
"0.5402884",
"0.53993165",
"0.53798443",
"0.5379302",
"0.53699285",
"0.5349018"
] | 0.7718271 | 0 |
Notify compute host is disabled | def nfvi_notify_compute_host_disabled(host_uuid, host_name, host_personality,
callback):
cmd_id = _compute_plugin.invoke_plugin('notify_host_disabled',
host_uuid, host_name,
host_personality,
callback=callback)
return cmd_id | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def nfvi_notify_compute_host_enabled(host_uuid, host_name, host_personality,\n callback):\n cmd_id = _compute_plugin.invoke_plugin('notify_host_enabled',\n host_uuid, host_name,\n host_personality,\n callback=callback)\n return cmd_id",
"def nfvi_disable_compute_host_services(host_uuid, host_name, host_personality,\n callback):\n cmd_id = _compute_plugin.invoke_plugin('disable_host_services',\n host_uuid, host_name,\n host_personality,\n callback=callback)\n return cmd_id",
"def test_doesnt_report_disabled_hosts_as_up_no_queue(self):\n # NOTE(vish): constructing service without create method\n # because we are going to use it without queue\n compute1 = service.Service('host1',\n 'nova-compute',\n 'compute',\n FLAGS.compute_manager)\n compute1.start()\n compute2 = service.Service('host2',\n 'nova-compute',\n 'compute',\n FLAGS.compute_manager)\n compute2.start()\n s1 = db.service_get_by_args(self.context, 'host1', 'nova-compute')\n s2 = db.service_get_by_args(self.context, 'host2', 'nova-compute')\n db.service_update(self.context, s1['id'], {'disabled': True})\n db.service_update(self.context, s2['id'], {'disabled': True})\n hosts = self.scheduler.driver.hosts_up(self.context, 'compute')\n self.assertEqual(0, len(hosts))\n compute1.kill()\n compute2.kill()",
"def nfvi_compute_plugin_disabled():\n return (_compute_plugin is None)",
"def check_if_host_disabled(self, host, binary='nova-compute'):\n _service_id = self._find_service_id_by_host(host, binary)\n if _service_id:\n _ser = self._client.services.find(id=_service_id)\n if _ser.status == u'enabled':\n return False\n elif _ser.status == u'disabled':\n return True\n else:\n return False",
"def test_doesnt_report_disabled_hosts_as_up2(self):\n compute1 = self.start_service('compute', host='host1')\n compute2 = self.start_service('compute', host='host2')\n s1 = db.service_get_by_args(self.context, 'host1', 'nova-compute')\n s2 = db.service_get_by_args(self.context, 'host2', 'nova-compute')\n db.service_update(self.context, s1['id'], {'disabled': True})\n db.service_update(self.context, s2['id'], {'disabled': True})\n hosts = self.scheduler.driver.hosts_up(self.context, 'compute')\n self.assertEqual(0, len(hosts))\n compute1.kill()\n compute2.kill()",
"def autodisable_cloud(cloud):\n log.warning(\"Autodisabling %s\", cloud)\n cloud.ctl.disable()\n title = \"Cloud %s has been automatically disabled\" % cloud.name\n message = \"%s after multiple failures to connect to it.\" % title\n notify_user(cloud.owner, title=title, message=message, email_notify=True)",
"def scp_disable(task):\n cmd = \"no ip scp server enable\"\n task.run(task=netmiko_send_config, config_commands=cmd)\n task.run(task=netmiko_save_config)\n c_print(f\"*** {task.host}: SCP has been disabled ***\")",
"def _vm_disable_host(zx, vm, log=None):\n log = log or zx.log\n hostid = zx.get_hostid(vm, log=log)\n\n if not hostid:\n log(ERROR, 'Zabbix host for VM %s does not exist!', vm)\n return False\n\n log(WARNING, 'Setting Zabbix host ID \"%s\" status to unmonitored for VM %s', hostid, vm)\n\n if zx.update_host(hostid, log=log, status=zx.HOST_UNMONITORED):\n log(INFO, 'Updated Zabbix host ID \"%s\" status to unmonitored', hostid)\n zx.save_host_info(vm, log=log)\n return True\n else:\n log(ERROR, 'Could not update Zabbix host ID \"%s\" status to unmonitored', hostid)\n return False",
"def wait_all_ports_admin_disabled(self):\n pass",
"def on_disable(self) -> None:\n self._cancel_notification_cycle()",
"def notEnabledDummy(self, ev):\n pass",
"def disable():\n request = dict(id='gbn')\n _gbn_disable(request)",
"def set_host_enabled(self, host, enabled):\n if enabled:\n return 'enabled'\n return 'disabled'",
"def set_host_enabled(self, host, enabled):\n if enabled:\n return 'enabled'\n return 'disabled'",
"def disable(ctx):\n config_db = ConfigDBConnector()\n config_db.connect()\n config_db.mod_entry(\"NAT_GLOBAL\", \"Values\", {\"admin_mode\": \"disabled\"})",
"def disable(self):\n self._disable_monitor()\n self._pinger.stop()",
"def on_disable(self) -> None:\n self._cancel_automation()",
"def on_disable(self) -> None:\n self._on_stop_cycle({})",
"def test_offline_nodes_disabled(self):\n with Nodes()as n:\n for node in n.nodes_offline:\n self.assertTrue(\n node.checkbox.find_element_by_tag_name('input').\n is_enabled(),\n 'Node disabled')",
"def disable_discovery(self):",
"def disabledPeriodic(self):\n self.putData()",
"def disable(self) -> None:",
"def disable_irq() -> int:",
"def disable_scheduling(topo, hypervisors, change_number='Unknown',\n task_number='Unknown', reason=None,\n when_disabled=du.get_now()):\n succeeded = []\n failed = []\n error = None\n\n # Sanity checks\n if not hypervisors:\n return succeeded, failed, \\\n 'Attempted disable hypervisors for scheduling without ' \\\n 'providing a collection of hypervisors to match. Skipping.'\n if not topo:\n return succeeded, failed, \\\n 'Attempted disable hypervisor for scheduling without ' \\\n 'providing a valid cloud topo. Skipping.'\n\n # Get a list of all compute services for this cloud\n config = topo.render('os_client_config')\n cloud_config = config.get_one_cloud(cloud=topo.cloud.canonical_name)\n cloud = shade.OpenStackCloud(cloud_config=cloud_config)\n nova_client = cloud.nova_client\n services = nova_client.services.list(binary=\"nova-compute\")\n\n # Find common list between services and provided hvs (not already disabled)\n hv_set = set(hypervisors)\n matched_services = []\n for service in services:\n if service.host in hv_set and service.status == 'enabled':\n matched_services.append(service)\n if not matched_services:\n return succeeded, failed, \\\n 'Did not find any enabled HVs to disable. skipping.'\n\n # Disable the services.\n reason = \"Disabled on %s via automation for change (%s), ctask(%s) \" \\\n \"for reason: %s\" % (when_disabled.strftime('%Y-%m-%d'),\n change_number, task_number,\n reason or 'No reason provided')\n for service in matched_services:\n try:\n nova_client.services.disable_log_reason(\n service.host, 'nova-compute', reason=reason)\n succeeded.append(service.host)\n except Exception as ex:\n LOG.exception('Exception while disabling host scheduling')\n failed.append(service.host)\n error = ex.message\n\n return succeeded, failed, error",
"def disable(self):\n self.error_code = 'DISABLED'\n self.running = False",
"def set_offline(rg = None):\n if isinstance(rg, int):\n rg = [rg]\n elif rg is None:\n rg = _cpu.get_online_cpus()\n\n for core in rg:\n try:\n _cpu.disable_cpu(core)\n if _verbose:\n print(f\"CPU {core} set offline.\")\n except:\n print(f\"ERROR: An exception occurred. Check if CPU {core} exists.\")",
"def disable(self):",
"def on_enable(self) -> None:\n if self._send_notification_func:\n self._send_notification_func()\n self._send_notification_func = None",
"def syslog_remote_disable(handle, name):\n\n mo = handle.query_dn(\"sys/svc-ext/syslog/client-\" + name)\n if mo:\n mo.admin_state = \"disabled\"\n handle.add_mo(mo, modify_present=True)\n handle.commit()\n else:\n raise ValueError(\"Syslog Mo is not available.\")"
] | [
"0.6590806",
"0.65881276",
"0.6332107",
"0.6331109",
"0.6220316",
"0.620537",
"0.6117578",
"0.6104185",
"0.6052198",
"0.6050748",
"0.59577185",
"0.58573514",
"0.5832847",
"0.5814101",
"0.5814101",
"0.5747102",
"0.5736996",
"0.57186717",
"0.5702843",
"0.57011414",
"0.5687968",
"0.56633186",
"0.5635666",
"0.5625707",
"0.5617012",
"0.5607785",
"0.5596604",
"0.55958205",
"0.5590055",
"0.5576635"
] | 0.7907137 | 0 |
Disable compute host services | def nfvi_disable_compute_host_services(host_uuid, host_name, host_personality,
callback):
cmd_id = _compute_plugin.invoke_plugin('disable_host_services',
host_uuid, host_name,
host_personality,
callback=callback)
return cmd_id | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def nfvi_notify_compute_host_disabled(host_uuid, host_name, host_personality,\n callback):\n cmd_id = _compute_plugin.invoke_plugin('notify_host_disabled',\n host_uuid, host_name,\n host_personality,\n callback=callback)\n return cmd_id",
"def check_if_host_disabled(self, host, binary='nova-compute'):\n _service_id = self._find_service_id_by_host(host, binary)\n if _service_id:\n _ser = self._client.services.find(id=_service_id)\n if _ser.status == u'enabled':\n return False\n elif _ser.status == u'disabled':\n return True\n else:\n return False",
"def test_doesnt_report_disabled_hosts_as_up2(self):\n compute1 = self.start_service('compute', host='host1')\n compute2 = self.start_service('compute', host='host2')\n s1 = db.service_get_by_args(self.context, 'host1', 'nova-compute')\n s2 = db.service_get_by_args(self.context, 'host2', 'nova-compute')\n db.service_update(self.context, s1['id'], {'disabled': True})\n db.service_update(self.context, s2['id'], {'disabled': True})\n hosts = self.scheduler.driver.hosts_up(self.context, 'compute')\n self.assertEqual(0, len(hosts))\n compute1.kill()\n compute2.kill()",
"def test_doesnt_report_disabled_hosts_as_up_no_queue(self):\n # NOTE(vish): constructing service without create method\n # because we are going to use it without queue\n compute1 = service.Service('host1',\n 'nova-compute',\n 'compute',\n FLAGS.compute_manager)\n compute1.start()\n compute2 = service.Service('host2',\n 'nova-compute',\n 'compute',\n FLAGS.compute_manager)\n compute2.start()\n s1 = db.service_get_by_args(self.context, 'host1', 'nova-compute')\n s2 = db.service_get_by_args(self.context, 'host2', 'nova-compute')\n db.service_update(self.context, s1['id'], {'disabled': True})\n db.service_update(self.context, s2['id'], {'disabled': True})\n hosts = self.scheduler.driver.hosts_up(self.context, 'compute')\n self.assertEqual(0, len(hosts))\n compute1.kill()\n compute2.kill()",
"def disable_scheduling(topo, hypervisors, change_number='Unknown',\n task_number='Unknown', reason=None,\n when_disabled=du.get_now()):\n succeeded = []\n failed = []\n error = None\n\n # Sanity checks\n if not hypervisors:\n return succeeded, failed, \\\n 'Attempted disable hypervisors for scheduling without ' \\\n 'providing a collection of hypervisors to match. Skipping.'\n if not topo:\n return succeeded, failed, \\\n 'Attempted disable hypervisor for scheduling without ' \\\n 'providing a valid cloud topo. Skipping.'\n\n # Get a list of all compute services for this cloud\n config = topo.render('os_client_config')\n cloud_config = config.get_one_cloud(cloud=topo.cloud.canonical_name)\n cloud = shade.OpenStackCloud(cloud_config=cloud_config)\n nova_client = cloud.nova_client\n services = nova_client.services.list(binary=\"nova-compute\")\n\n # Find common list between services and provided hvs (not already disabled)\n hv_set = set(hypervisors)\n matched_services = []\n for service in services:\n if service.host in hv_set and service.status == 'enabled':\n matched_services.append(service)\n if not matched_services:\n return succeeded, failed, \\\n 'Did not find any enabled HVs to disable. skipping.'\n\n # Disable the services.\n reason = \"Disabled on %s via automation for change (%s), ctask(%s) \" \\\n \"for reason: %s\" % (when_disabled.strftime('%Y-%m-%d'),\n change_number, task_number,\n reason or 'No reason provided')\n for service in matched_services:\n try:\n nova_client.services.disable_log_reason(\n service.host, 'nova-compute', reason=reason)\n succeeded.append(service.host)\n except Exception as ex:\n LOG.exception('Exception while disabling host scheduling')\n failed.append(service.host)\n error = ex.message\n\n return succeeded, failed, error",
"def nfvi_compute_plugin_disabled():\n return (_compute_plugin is None)",
"def disable(service_name: str, print_action: bool = True):\n\n if print_action:\n print_log_status(3, f\"Disabling `{service_name}`\")\n \n run_command(f\"sudo systemctl disable {service_name}\")",
"def disable_discovery(self):",
"def _vm_disable_host(zx, vm, log=None):\n log = log or zx.log\n hostid = zx.get_hostid(vm, log=log)\n\n if not hostid:\n log(ERROR, 'Zabbix host for VM %s does not exist!', vm)\n return False\n\n log(WARNING, 'Setting Zabbix host ID \"%s\" status to unmonitored for VM %s', hostid, vm)\n\n if zx.update_host(hostid, log=log, status=zx.HOST_UNMONITORED):\n log(INFO, 'Updated Zabbix host ID \"%s\" status to unmonitored', hostid)\n zx.save_host_info(vm, log=log)\n return True\n else:\n log(ERROR, 'Could not update Zabbix host ID \"%s\" status to unmonitored', hostid)\n return False",
"def test_no_adapter_opts_ignore_service_type(self):\n self.oslo_config_dict['heat'] = None\n self.assert_service_disabled(\n 'orchestration',\n \"Not in the list of requested service_types.\",\n # 'orchestration' absent from this list\n service_types=['compute'],\n )",
"def disabled(config):\n disable(config)\n reload_service('apache2')",
"def test_reports_enabled_hosts_as_up_no_queue(self):\n # NOTE(vish): constructing service without create method\n # because we are going to use it without queue\n compute1 = service.Service('host1',\n 'nova-compute',\n 'compute',\n FLAGS.compute_manager)\n compute1.start()\n compute2 = service.Service('host2',\n 'nova-compute',\n 'compute',\n FLAGS.compute_manager)\n compute2.start()\n hosts = self.scheduler.driver.hosts_up(self.context, 'compute')\n self.assertEqual(2, len(hosts))\n compute1.kill()\n compute2.kill()",
"def disable(ctx):\n config_db = ConfigDBConnector()\n config_db.connect()\n config_db.mod_entry(\"NAT_GLOBAL\", \"Values\", {\"admin_mode\": \"disabled\"})",
"def nfvi_enable_compute_host_services(host_uuid, host_name, host_personality,\n callback):\n cmd_id = _compute_plugin.invoke_plugin('enable_host_services',\n host_uuid, host_name,\n host_personality,\n callback=callback)\n return cmd_id",
"def scp_disable(task):\n cmd = \"no ip scp server enable\"\n task.run(task=netmiko_send_config, config_commands=cmd)\n task.run(task=netmiko_save_config)\n c_print(f\"*** {task.host}: SCP has been disabled ***\")",
"def disable():\n request = dict(id='gbn')\n _gbn_disable(request)",
"def test_dontStartPrivilegedService(self):\n ports = self._privilegedStartService(self.highPortNumber)\n self.assertEqual(ports, [])",
"def remove_compute(compute_targets):\n for name, ct in compute_targets.items():\n compute_targets[name].delete()",
"def disable_server(self, server):\n log.info(\"Disabling %s in netscaler\", server)\n return self.post(\"server?action=disable\", {\"server\": {\"name\": server}}, content_type=self.content_type(\"server\"))",
"def allOff():\n # Get/set special slice IDs\n root_xid = bwlimit.get_xid(\"root\")\n default_xid = bwlimit.get_xid(\"default\")\n kernelhtbs = gethtbs(root_xid, default_xid)\n if len(kernelhtbs):\n logger.log(\"bwmon: Disabling all running HTBs.\")\n for htb in kernelhtbs.keys(): bwlimit.off(htb, dev = dev_default)",
"def set_all_ports_admin_disabled(self):\n pass",
"def disable_everything(self):\n zhinst.utils.disable_everything(self.daq, self.device_id)\n self.log.info(\"Disabled everything.\")",
"def disable_service(service_name):\n run_program(['sc', 'config', service_name, 'start=', 'disabled'])\n\n # Verify service was disabled\n start_type = get_service_start_type(service_name)\n if not start_type.lower().startswith('disabled'):\n raise GenericError('Failed to disable service {}'.format(service_name))",
"def set_offline(rg = None):\n if isinstance(rg, int):\n rg = [rg]\n elif rg is None:\n rg = _cpu.get_online_cpus()\n\n for core in rg:\n try:\n _cpu.disable_cpu(core)\n if _verbose:\n print(f\"CPU {core} set offline.\")\n except:\n print(f\"ERROR: An exception occurred. Check if CPU {core} exists.\")",
"def test_reports_enabled_hosts_as_up(self):\n compute1 = self.start_service('compute', host='host1')\n compute2 = self.start_service('compute', host='host2')\n hosts = self.scheduler.driver.hosts_up(self.context, 'compute')\n self.assertEqual(2, len(hosts))\n compute1.kill()\n compute2.kill()",
"def vm_disable(self, vm, task_log=LOG):\n result = []\n izx_log = self.izx.get_log_fun(task_log)\n ezx_log = self.ezx.get_log_fun(task_log)\n\n if vm.is_zabbix_sync_active():\n result.append(self._vm_disable_host(self.izx, vm, log=izx_log))\n else:\n izx_log(INFO, 'Internal zabbix synchronization disabled for VM %s', vm)\n result.append(None)\n\n if vm.is_external_zabbix_sync_active():\n result.append(self._vm_disable_host(self.ezx, vm, log=ezx_log))\n else:\n ezx_log(INFO, 'External zabbix synchronization disabled for VM %s', vm)\n result.append(None)\n\n return result",
"def disable():\n configdb = ConfigDBConnector()\n configdb.connect()\n port_info = {}\n port_info['FLEX_COUNTER_STATUS'] = 'disable'\n configdb.mod_entry(\"FLEX_COUNTER_TABLE\", \"PORT\", port_info)",
"def cleanup_infrastructure_compute(config, context):\n log.info(\"### Cleaning up infrastructure ###\")\n admin = context.getAdministrationService()\n for datacenter in admin.listDatacenters():\n cleanup_infrastructure_storage(config, datacenter)\n cleanup_infrastructure_network(config, datacenter)\n # This will remove the datacenter and all hypervisors\n # (if they don't contain deplopyed VMs)\n log.info(\"Removing datacenter %s...\" % datacenter.getName())\n datacenter.delete()",
"def disable(self):\n self._disable_monitor()\n self._pinger.stop()",
"def disable_service(self, **kwargs):\n put_body = json.dumps(kwargs)\n resp, body = self.put('os-services/disable', put_body)\n body = json.loads(body)\n self.validate_response(schema.disable_service, resp, body)\n return rest_client.ResponseBody(resp, body)"
] | [
"0.7023965",
"0.6589337",
"0.65053415",
"0.6484101",
"0.61595577",
"0.6139749",
"0.5918607",
"0.5901828",
"0.58210856",
"0.5821036",
"0.5794583",
"0.57928306",
"0.57894987",
"0.5775211",
"0.5763872",
"0.573645",
"0.5679011",
"0.5662737",
"0.56420565",
"0.56060183",
"0.5554399",
"0.55434185",
"0.5541433",
"0.5482608",
"0.5472026",
"0.54501224",
"0.5423751",
"0.5411556",
"0.5410073",
"0.53958195"
] | 0.79389197 | 0 |
Enable compute host services | def nfvi_enable_compute_host_services(host_uuid, host_name, host_personality,
callback):
cmd_id = _compute_plugin.invoke_plugin('enable_host_services',
host_uuid, host_name,
host_personality,
callback=callback)
return cmd_id | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def nfvi_notify_compute_host_enabled(host_uuid, host_name, host_personality,\n callback):\n cmd_id = _compute_plugin.invoke_plugin('notify_host_enabled',\n host_uuid, host_name,\n host_personality,\n callback=callback)\n return cmd_id",
"def enable_host(self, name):\n from soppa.local import aslocal\n self.guest_ip = self.guest_ip()\n self.guest_host_name = name\n # Host (remote) change\n self.file.set_setting('/etc/hosts', '{0} {1}'.format('127.0.0.1', self.guest_host_name))\n # local change\n aslocal()\n self.file.set_setting('/etc/hosts', '{0} {1}'.format(self.guest_ip, name))",
"def enable_hyperv() -> None:\n try:\n subprocess.check_call(\n [\n 'DISM',\n '/Online',\n '/Enable-Feature',\n '/All',\n '/NoRestart',\n '/FeatureName:Microsoft-Hyper-V',\n ]\n )\n except subprocess.CalledProcessError as e:\n if e.returncode == 3010:\n pass # This is fine, because Windows.\n else:\n raise",
"def enable_service(service_name, start_type='auto'):\n run_program(['sc', 'config', service_name, 'start=', start_type])",
"def deploy_common_services():\n put('./minion/*', '/etc/systemd/system', use_sudo=True)\n sudo('source /etc/environment')\n sudo('/opt/bin/substitute_private_ipv4.sh /etc/systemd/system/flannel.service')\n sudo('/opt/bin/substitute_private_ipv4.sh /etc/systemd/system/kubelet.service')\n\n sudo('systemctl enable /etc/systemd/system/flannel.service')\n sudo('systemctl enable /etc/systemd/system/docker.service')\n sudo('systemctl enable /etc/systemd/system/kube-proxy.service')\n sudo('systemctl enable /etc/systemd/system/kubelet.service')\n\n sudo('systemctl daemon-reload')\n\n sudo('systemctl start flannel')\n sudo('systemctl start docker')\n sudo('systemctl start kube-proxy')\n sudo('systemctl start kubelet')",
"def _create_compute_service(self, **kwargs):\n\n dic = {'binary': 'nova-compute', 'topic': 'compute',\n 'report_count': 0, 'availability_zone': 'dummyzone'}\n dic['host'] = kwargs.get('host', 'dummy')\n s_ref = db.service_create(self.context, dic)\n if 'created_at' in kwargs.keys() or 'updated_at' in kwargs.keys():\n t = utils.utcnow() - datetime.timedelta(0)\n dic['created_at'] = kwargs.get('created_at', t)\n dic['updated_at'] = kwargs.get('updated_at', t)\n db.service_update(self.context, s_ref['id'], dic)\n\n dic = {'service_id': s_ref['id'],\n 'vcpus': 16, 'memory_mb': 32, 'local_gb': 100,\n 'vcpus_used': 16, 'local_gb_used': 10,\n 'hypervisor_type': 'qemu', 'hypervisor_version': 12003,\n 'cpu_info': ''}\n dic['memory_mb_used'] = kwargs.get('memory_mb_used', 32)\n dic['hypervisor_type'] = kwargs.get('hypervisor_type', 'qemu')\n dic['hypervisor_version'] = kwargs.get('hypervisor_version', 12003)\n db.compute_node_create(self.context, dic)\n return db.service_get(self.context, s_ref['id'])",
"def setupCompute():\n #Update /etc/hosts with mongo-server and monitoring-server\n sudo(\"pip2 install chariot-runtime\")\n #update configuration file located in /etc/chariot/chariot.conf\n run(\"cd /etc/init.d && sudo update-rc.d chariot-nm defaults 99\")\n run(\"cd /etc/init.d && sudo update-rc.d chariot-dm defaults 99\")\n print(\"\\n after reboot check the MongoDB server for the presence of ConfigSpace database and Nodes collection. This collection should have a document each for every compute node.\")\n sudo(\"reboot\")",
"def enable(service_name: str, print_action: bool = True):\n \n if print_action:\n print_log_status(3, f\"Enabling `{service_name}`\")\n \n run_command(f\"sudo systemctl enable {service_name}\")",
"def nfvi_disable_compute_host_services(host_uuid, host_name, host_personality,\n callback):\n cmd_id = _compute_plugin.invoke_plugin('disable_host_services',\n host_uuid, host_name,\n host_personality,\n callback=callback)\n return cmd_id",
"def test_reports_enabled_hosts_as_up(self):\n compute1 = self.start_service('compute', host='host1')\n compute2 = self.start_service('compute', host='host2')\n hosts = self.scheduler.driver.hosts_up(self.context, 'compute')\n self.assertEqual(2, len(hosts))\n compute1.kill()\n compute2.kill()",
"def enable_ad():\n for host in online_hosts:\n with settings(warn_only=True):\n\n result1 = sudo(\"racadm5 -r \"+host+\" -u root -p \"+PASSWORD+\" config -g cfgActiveDirectory -o cfgADEnable 1\")\n if result1.failed:\n logging.error(\"Host: [ \"+host+\" ] : \" + \"Enabling Active Directory failed \")\n\n result2 = sudo(\"racadm5 -r \"+host+\" -u root -p \"+PASSWORD+\" config -g cfgActiveDirectory -o cfgADType 2\")\n if result2.failed:\n logging.error(\"Host: [ \"+host+\" ] : \" + \"Setting of standard schema for AD failed \")",
"def set_host_enabled(self, host, enabled):\n if enabled:\n return 'enabled'\n return 'disabled'",
"def set_host_enabled(self, host, enabled):\n if enabled:\n return 'enabled'\n return 'disabled'",
"def enable(self):\n # Netmiko reports enable and config mode as being enabled\n if not self.native.check_enable_mode():\n self.native.enable()\n # Ensure device is not in config mode\n if self.native.check_config_mode():\n self.native.exit_config_mode()\n\n log.debug(\"Host %s: Device enabled.\", self.host)",
"def enable_scheduling(topo, change_number=None):\n succeeded = []\n failed = []\n error = None\n if not topo:\n return succeeded, failed, \\\n 'Attempted re-enable hypervisors for scheduling without ' \\\n 'providing a valid topology. Bailing out. Fix it!'\n\n # Get a list of all compute services for this cloud\n config = topo.render('os_client_config')\n cloud_config = config.get_one_cloud(cloud=topo.cloud.canonical_name)\n cloud = shade.OpenStackCloud(cloud_config=cloud_config)\n nova_client = cloud.nova_client\n services = nova_client.services.list(binary=\"nova-compute\")\n\n # Find all disabled hosts that have our change_number text\n matched_services = []\n for service in services:\n if service.status == 'disabled' and service.disabled_reason and \\\n change_number in service.disabled_reason:\n matched_services.append(service)\n if not matched_services:\n return succeeded, failed, \\\n 'Did not find any disabled HVs to re-enable. skipping.'\n\n # Re-enable the services.\n for service in matched_services:\n try:\n nova_client.services.enable(service.host, 'nova-compute')\n succeeded.append(service.host)\n except Exception as ex:\n error = ex.message\n failed.append(service.host)\n\n return succeeded, failed, error",
"def test_doesnt_report_disabled_hosts_as_up2(self):\n compute1 = self.start_service('compute', host='host1')\n compute2 = self.start_service('compute', host='host2')\n s1 = db.service_get_by_args(self.context, 'host1', 'nova-compute')\n s2 = db.service_get_by_args(self.context, 'host2', 'nova-compute')\n db.service_update(self.context, s1['id'], {'disabled': True})\n db.service_update(self.context, s2['id'], {'disabled': True})\n hosts = self.scheduler.driver.hosts_up(self.context, 'compute')\n self.assertEqual(0, len(hosts))\n compute1.kill()\n compute2.kill()",
"def run_services():\n for service in (\"minvd\", \"httpd\", \"ntpd\"):\n sudo(\"service %s start\" % service)\n sudo(\"chkconfig %s on\" % service)",
"def test_reports_enabled_hosts_as_up_no_queue(self):\n # NOTE(vish): constructing service without create method\n # because we are going to use it without queue\n compute1 = service.Service('host1',\n 'nova-compute',\n 'compute',\n FLAGS.compute_manager)\n compute1.start()\n compute2 = service.Service('host2',\n 'nova-compute',\n 'compute',\n FLAGS.compute_manager)\n compute2.start()\n hosts = self.scheduler.driver.hosts_up(self.context, 'compute')\n self.assertEqual(2, len(hosts))\n compute1.kill()\n compute2.kill()",
"def enable_network_management(self):\n self._request({\"enable-network-management\": True})",
"def enable_service(self, **kwargs):\n put_body = json.dumps(kwargs)\n resp, body = self.put('os-services/enable', put_body)\n body = json.loads(body)\n self.validate_response(schema.enable_service, resp, body)\n return rest_client.ResponseBody(resp, body)",
"def service():\n conf = template('remote/addok.service', **config)\n put(conf, '/etc/systemd/system/addok.service')\n systemctl('enable addok.service')",
"def enable(ctx):\n\n config_db = ConfigDBConnector()\n config_db.connect()\n config_db.mod_entry(\"NAT_GLOBAL\", \"Values\", {\"admin_mode\": \"enabled\"})",
"def _add_services(self):\n this_service = {'name': 'keystone'}\n other_services = [\n {'name': 'percona-cluster', 'constraints': {'mem': '3072M'}},\n {'name': 'rabbitmq-server'}, # satisfy wrkload stat\n {'name': 'cinder'},\n ]\n super(KeystoneBasicDeployment, self)._add_services(this_service,\n other_services)",
"def enable_server(self, server):\n log.info(\"Enabling %s in netscaler\", server)\n return self.post(\"server?action=enable\", {\"server\": {\"name\": server}}, content_type=self.content_type(\"server\"))",
"def keystonehost():\n env.cd = cd\n env.run = run\n env.hosts = settings.HOSTS['keystone']\n env.exists = exists",
"def enable_service(self, service):\n svc = self.service_path % service\n ret = self.rclient.put(svc)\n if ret.status != restclient.Status.ACCEPTED:\n exception_msg = (_(\"Cannot enable %s service.\") % service)\n raise exception.ShareBackendException(msg=exception_msg)",
"def scp_enable(task):\n cmd = \"ip scp server enable\"\n task.run(task=netmiko_send_config, config_commands=cmd)\n c_print(f\"*** {task.host}: SCP has been enabled ***\")",
"def horizonhost():\n env.cd = cd\n env.run = run\n env.hosts = settings.HOSTS['horizon']\n env.exists = exists",
"def enable_dns_management(self):\n self._request({\"enable-dns-management\": True})",
"def test_doesnt_report_disabled_hosts_as_up_no_queue(self):\n # NOTE(vish): constructing service without create method\n # because we are going to use it without queue\n compute1 = service.Service('host1',\n 'nova-compute',\n 'compute',\n FLAGS.compute_manager)\n compute1.start()\n compute2 = service.Service('host2',\n 'nova-compute',\n 'compute',\n FLAGS.compute_manager)\n compute2.start()\n s1 = db.service_get_by_args(self.context, 'host1', 'nova-compute')\n s2 = db.service_get_by_args(self.context, 'host2', 'nova-compute')\n db.service_update(self.context, s1['id'], {'disabled': True})\n db.service_update(self.context, s2['id'], {'disabled': True})\n hosts = self.scheduler.driver.hosts_up(self.context, 'compute')\n self.assertEqual(0, len(hosts))\n compute1.kill()\n compute2.kill()"
] | [
"0.63588804",
"0.63349974",
"0.6177922",
"0.6154969",
"0.61493015",
"0.606561",
"0.6054695",
"0.6049442",
"0.60288525",
"0.6004999",
"0.59542507",
"0.58375365",
"0.58375365",
"0.5803038",
"0.57903194",
"0.5772306",
"0.56935096",
"0.567679",
"0.5676517",
"0.56693524",
"0.56370294",
"0.5602592",
"0.5580821",
"0.55650777",
"0.5561792",
"0.5523349",
"0.5505773",
"0.5490049",
"0.5472197",
"0.5435728"
] | 0.78876954 | 0 |
Initialize the NFVI compute package | def nfvi_compute_initialize(config, pool):
global _compute_plugin
if _compute_plugin is None:
_compute_plugin = NFVIComputePlugin(config['namespace'], pool)
if _compute_plugin.ready_to_initialize(config['config_file']):
_compute_plugin.initialize(config['config_file'])
return True
else:
return False | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def initialize(self):\n self.initialize_edges()\n self.initialize_prob()\n self.initialize_total_input_dict()\n\n self.initialize_fpmusigv_dict()",
"def main():\n run_nutanix_vm_creation_module()",
"def __init__(self, nvim):\n self.nvim = nvim",
"def __init__(self,compute_core):\n self.compute_core = compute_core",
"def __init__(self, *args):\n _snap.PNEANetV_swiginit(self, _snap.new_PNEANetV(*args))",
"def __init__(self, UI):\n super(COTDeployESXi, self).__init__(UI)\n self.datastore = None\n \"\"\"ESXi datastore to deploy to.\"\"\"\n self.host = None\n \"\"\"vSphere host to deploy to - set implicitly by self.locator.\"\"\"\n self.server = None\n \"\"\"vCenter server or vSphere host - set implicitly by self.locator.\"\"\"\n self._locator = None\n self._ovftool_args = []\n\n self.ovftool = OVFTool()",
"def initialise(self):\n self.sc.init.exec_action(self.variables)",
"def __init__(self):\n self.svclassifier = SVC(kernel='linear')",
"def init():\n rino.initialize.initialize()",
"def __init__(self, *args, **kwargs):\n self.license_file = None\n self.license_env_var = None\n\n self.home_subdir = os.path.join(os.getenv('HOME'), 'intel')\n common_tmp_dir = os.path.dirname(tempfile.gettempdir()) # common tmp directory, same across nodes\n self.home_subdir_local = os.path.join(common_tmp_dir, os.getenv('USER'), 'easybuild_intel')\n\n super(IntelBase, self).__init__(*args, **kwargs)\n\n # prepare (local) 'intel' home subdir\n self.setup_local_home_subdir()\n self.clean_home_subdir()",
"def __init__(self):\n self._lib_vscf_ecc = VscfEcc()\n self._c_impl = None\n self._ctx = None\n self.ctx = self._lib_vscf_ecc.vscf_ecc_new()",
"def initialize(self):\n \n casalog.origin(\"ParallelDataHelper\")\n\n # self._arg is populated inside ParallelTaskHelper._init_()\n self._arg['vis'] = os.path.abspath(self._arg['vis'])\n # MPI setting\n if self._mpi_cluster:\n self._cluster.start_services()\n \n if (self._arg['outputvis'] != \"\"):\n self._arg['outputvis'] = os.path.abspath(self._arg['outputvis']) \n\n outputPath, self.outputBase = os.path.split(self._arg['outputvis'])\n try:\n if self.outputBase[-1] == '.':\n self.outputBase = self.outputBase[:self.outputBase.rindex('.')]\n except ValueError:\n # outputBase must not have a trailing .\n pass\n\n if self.outputBase == '.' or self.outputBase == './':\n raise ValueError, 'Error dealing with outputvis'\n \n # The subMS are first saved inside a temporary directory\n self.dataDir = outputPath + '/' + self.outputBase+'.data'\n if os.path.exists(self.dataDir): \n shutil.rmtree(self.dataDir)\n\n os.mkdir(self.dataDir)",
"def __init__(self, ni, nj, nk):\n \n self.ni = ni\n self.nj = nj\n self.nk = nk\n \n self.nn = np.zeros(3)\n \n self.nn[0] = self.ni\n self.nn[1] = self.nj\n self.nn[2] = self.nk\n \n self.x0 = np.zeros(3)\n self.dh = np.zeros(3)\n self.xm = np.zeros(3)\n self.xc = np.zeros(3)\n \n self.EPS_0 = 8.85418782e-12\n self.QE = 1.602176565e-19;\n self.AMU = 1.660538921e-27\n self.ME = 9.10938215e-31;\n self.K = 1.380648e-23;\n self.EvToK = self.QE/self.K;\n \n self.phi = np.zeros((self.ni, self.nj, self.nk))\n self.phi_new = np.zeros((self.ni, self.nj, self.nk))\n self.R = np.zeros((self.ni, self.nj, self.nk))\n self.rho = np.zeros((self.ni, self.nj, self.nk))\n self.node_vol = np.zeros((self.ni, self.nj, self.nk))\n self.ef = np.zeros((self.ni, self.nj, self.nk, 3))",
"def init(self, attrs):\n\n self.name = attrs[\"name\"]\n self.file_path = attrs.get(\"file\", \"pod.yaml\")\n\n self.nodes, self.nfvi_host, self.host_mgmt = \\\n self.helper.parse_pod_file(self.file_path, 'OvsDpdk')\n\n self.attrs = attrs\n self.vm_flavor = attrs.get('flavor', {})\n self.servers = attrs.get('servers', {})\n self.vm_deploy = attrs.get(\"vm_deploy\", True)\n self.ovs_properties = attrs.get('ovs_properties', {})\n # add optional static network definition\n self.networks = attrs.get(\"networks\", {})\n\n LOG.debug(\"Nodes: %r\", self.nodes)\n LOG.debug(\"NFVi Node: %r\", self.nfvi_host)\n LOG.debug(\"Networks: %r\", self.networks)",
"def initialize_project():\n # Initialize work flow\n wk_flow = vdapi.VDriveAPI('VULCAN')\n archive_root = '/SNS/VULCAN'\n if os.path.exists(archive_root) is False:\n archive_root = None\n wk_flow.set_data_root_directory(archive_root)\n wk_flow.set_working_directory('~/Temp/VDriveTest/')\n\n # Set to my_data\n my_data.set(wk_flow)\n\n return",
"def __init__(self, compute, project, zone=None, region=None):\n pass",
"def _initialize_fembv_varx(X, n_components, init='random',\n u=None, memory=0, random_state=None):\n if init is None:\n init = 'random'\n\n if init == 'random':\n return _initialize_fembv_varx_random(\n X, n_components, u=u, memory=memory, random_state=random_state)\n else:\n raise ValueError(\n 'Invalid init parameter: got %r instead of one of %r' %\n (init, FEMBV_VARX_INITIALIZATION_METHODS))",
"def __init__(self):\n super().__init__()\n import sklearn\n import sklearn.svm\n self.model = sklearn.svm.NuSVR",
"def nfvi_compute_finalize():\n if _compute_plugin is not None:\n _compute_plugin.finalize()",
"def __init__(self):\r\n\r\n self.Helpers = Helpers(\"Movidius\")\r\n self.confs = self.Helpers.confs\r\n\r\n self.classes = []\r\n self.ncsGraph = None\r\n self.ncsDevice = None\r\n self.reqsize = None\r\n\r\n self.mean = 128\r\n self.std = 1 / 128\r\n\r\n #mvnc.SetGlobalOption(mvnc.GlobalOption.LOG_LEVEL, 2)\r\n\r\n self.Helpers.logger.info(\"Movidius class initialization complete.\")",
"def __init__(self):\n self.update_os_packages()\n self.upgrade_os_packages()",
"def initialize(self):\n self.write_model(path=PATH.GRAD, suffix='new')\n\n if PAR.RANDOM_OVER_IT or optimize.iter == 1:\n self.get_random_frequencies()\n\n print('Generating synthetics')\n system.run('solver', 'eval_func',\n hosts='all',\n path=PATH.GRAD)\n\n self.write_misfit(path=PATH.GRAD, suffix='new')",
"def __initialize__(self):\n self.initialized = True\n if self.ids_to_consider is None:\n self.V_tot = np.sum(self.V[self.p_ids-1])\n else:\n self.V_tot = np.sum(self.V[self.ids_to_consider-1])\n return",
"def __init__(self):\n \n self.load_PSF_data()",
"def __init__ (self) :\n self.loadCSPAD2x2CalibParsDefault()",
"def _init_gpf(self):\n raise NotImplementedError('Abstract Method')",
"def __init__(self, osi, fy, e0, a, n):\n self.osi = osi\n self.fy = float(fy)\n self.e0 = float(e0)\n self.a = float(a)\n self.n = float(n)\n if osi is not None:\n osi.n_mat += 1\n self._tag = osi.n_mat\n self._parameters = [self.op_type, self._tag, self.fy, self.e0, self.a, self.n]\n if osi is None:\n self.built = 0\n if osi is not None:\n self.to_process(osi)",
"def __init__( self ):\n self.NQ = 16\n self.Nbranches = 3\n self.NatomsUC = 1\n self.dim = 3\n self.QVectors = np.zeros( ( self.NQ , 3 ) )\n self.MakeQVectors()\n self.EigenVectors = np.zeros( [ self.NQ , \n self.Nbranches ,\n self.NatomsUC , \n self.dim ] )\n self.MakeEigenVectors()",
"def init():\n safe_call(backend.get().af_init())",
"def init_vars():\n\tda_vinci.base.usepackage(\"pgfkeys\")\n\tda_vinci.base.add_preamble(setup_script)"
] | [
"0.6496471",
"0.6278192",
"0.6062997",
"0.59552294",
"0.59278333",
"0.5924738",
"0.5892015",
"0.57887715",
"0.5772586",
"0.5722492",
"0.57131547",
"0.5701967",
"0.56974494",
"0.5684051",
"0.56797975",
"0.5674536",
"0.56544673",
"0.56248695",
"0.56169355",
"0.5615452",
"0.5615084",
"0.5609633",
"0.5572741",
"0.5559083",
"0.55503684",
"0.554845",
"0.5535275",
"0.55209714",
"0.5513001",
"0.55114007"
] | 0.7209175 | 0 |
Finalize the NFVI compute package | def nfvi_compute_finalize():
if _compute_plugin is not None:
_compute_plugin.finalize() | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def finalize(self):\n self.classifier.finalize()",
"def finalize(self):\n pass\n #fftw2py.clean_fftw_solver(self.output_field.dimension)",
"def finalize(self):\n self.report('Finalizing optimization procedure.')\n with self.optimizer() as opt:\n optimal_process_output = opt.result_value\n optimal_process_output.store()\n self.out('optimal_process_output', optimal_process_output)\n result_index = opt.result_index\n optimal_process = self.ctx[self.eval_key(result_index)]\n self.out('optimal_process_uuid', Str(optimal_process.uuid).store())",
"def finalize():",
"def finalize():",
"def finalize():",
"def finalize(self):\n print('Cleaning up...')",
"def finalize(self):\n\t\tself.logger.info(\"Please wait while finalizing the operation.. Thank you\")\n\t\tself.save_checkpoint()\n\t\tself.summary_writer.export_scalars_to_json(\"{}all_scalars.json\".format(self.config.summary_dir))\n\t\tself.summary_writer.close()\n\t\tself.data_loader.finalize()\n\t\tif self.config.output_model == True:\n\t\t\ttry:\n\t\t\t\tself.logger.info('Saving model for external usage.')\n\t\t\t\tself.load_checkpoint('model_best.pth.tar')\n\t\t\t\ttraced = torch.jit.trace(self.model,self.data_loader.train_loader.dataset[:2][0].float().to(self.device))\n\t\t\t\ttraced.save(self.config.output_model_path)\n\t\t\texcept IOError:\n\t\t\t\tself.logger.info('Output model path not found.')",
"def finalize(self):",
"def finalize(self):",
"def Finalize():\n pass",
"def finalize(self):\r\n self.outfile_param.close()\r\n self.outfile_sim.close()",
"def Finalize(self):\n return _gmat_py.ICRFFile_Finalize(self)",
"def finalize(self):\n self._iou_ap.compute_ap_curve()\n self._pixel_ap.compute_ap_curve()\n self._azimuth_ap.compute_ap_curve()\n self._polar_ap.compute_ap_curve()",
"def finalize(self):\n if self.rank > 0:\n return\n\n self._check_all_files_are_saved()\n self._collect_embedding_id_offset()\n\n self._build_ivf()\n self._update_metadata()",
"def finalize(self, *args, **kwargs):\n self._add_all_odes_to_interface()\n self._transform_derivatives_on_rhs()\n super(InterfaceGenerator, self).finalize(*args, **kwargs)",
"def _finalize(self):\n if self.vcount > 1:\n # skewness = g1 = sqrt(n) M3/(M2^(3/2)) # zero \n # kurtosis = g2 = n M4/M2^2 - 3 # zero for normal\n # sk = (M3/nf)/(sigma**3)\n # ku = (M4/nf)/sigma**4 - 3\n n = self.vcount\n nf = float(n)\n mu2 = self.vm2/nf\n self.vvar = self.vm2/(nf-1)\n try:\n self.vskewness = self.vm3/nf/(mu2**1.5)\n self.vkurtosis = self.vm4/nf/(mu2**2)\n except:\n self.vskewness = 0\n self.vkurtosis = 0\n elif self.vcount == 1:\n self.vvar = 0\n self.vskewness = 0\n self.vkurtosis = 0\n self.dirty = False",
"def finalize(self):\n pass",
"def finalize(self):\n pass",
"def finalize(self):\n pass",
"def finalize(self):\n pass",
"def finalize(self):\n pass",
"def finalize(self):\n pass",
"def finalize(self):\r\n pass",
"def finalize():\n pass",
"def finalize():\n pass",
"def finalize(self) -> None:\n pass",
"def finalize() -> None:\n collective.finalize()",
"def finalize():\n global interpreter\n del interpreter\n blotish._cleanup()\n\n # Set the progress printing state to whatever it was before\n import paraview.servermanager\n global wasProgressPrinting\n paraview.servermanager.SetProgressPrintingEnabled(wasProgressPrinting)",
"def finalize(self): # real signature unknown; restored from __doc__\n pass"
] | [
"0.6502741",
"0.650001",
"0.62286294",
"0.6211019",
"0.6211019",
"0.6211019",
"0.61800075",
"0.6171059",
"0.6159695",
"0.6159695",
"0.61318797",
"0.6069689",
"0.6061937",
"0.60552156",
"0.6054228",
"0.60331243",
"0.60229737",
"0.60056174",
"0.60056174",
"0.60056174",
"0.60056174",
"0.60056174",
"0.60056174",
"0.5980043",
"0.59737647",
"0.59737647",
"0.597142",
"0.5953966",
"0.5942039",
"0.5934185"
] | 0.8275187 | 0 |
If we have multiple projects, will loop through the projects to find the one with the given story. returns None if not found | def find_project_for_story(story_id):
for project in Project.all():
story = project.load_story(story_id)
if story is not None:
return project
#Not found
print "No project found for story: #{}".format(story_id)
return None | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def find_project_for_story(story_id):\r\n\r\n for project in Project.all():\r\n story = project.load_story(story_id)\r\n if story is not None:\r\n return project\r\n\r\n #Not found\r\n print \"No project found for story: #{}\".format(story_id)\r\n return None",
"def get_project(db, id):\n \n for element in db:\n if element['project_no'] == id:\n return element\n return None",
"def find_project(hw, r):\n\n # Search in the current directory for a CMakeLists.txt file that\n # contains something like the given project.\n cmd = \"find . -name CMakeLists.txt -exec grep -Hi {0} {{}} \\; | grep -i project\".format(hw)\n p = Popen(cmd, shell=True, stdout=PIPE)\n out = p.stdout.read()\n p.stdout.close()\n p.wait()\n\n # Transform the output into something readable.\n for i in out:\n found = i.split(':')\n \n # Scrub the path name\n path = os.path.dirname(found[0])[2:]\n if not path:\n path = \"top-level directory\"\n else:\n path = \"directory '{0}'\".format(path)\n r.note(\" possible candidate in the {0}\".format(path))",
"def load_story(self, story_id):\r\n story_url = \"https://www.pivotaltracker.com/services/v3/projects/{}/stories/{}\".format(self.project_id, story_id)\r\n\r\n resposne = _perform_pivotal_get(story_url)\r\n # print resposne.text\r\n if resposne.status_code == 404:\r\n # Not Found\r\n return None\r\n else:\r\n #Found, parsing story\r\n root = ET.fromstring(resposne.text)\r\n return Story.from_node(root)",
"def get_project(self, name=None):\n if not name:\n name = self.get_project_name()\n projects = self.get_projects()\n for p in projects:\n if p.name == name:\n return p\n raise NotFound(name)",
"def find_project(self, value, key=\"name\"):\n if not value:\n return\n if key.lower() not in (\"name\", \"id\"):\n raise ValueError()\n\n if key == \"name\" and not getattr(self, \"projects\", None):\n self.get_projects()\n elif key == \"id\" and not getattr(self, \"projects\", None):\n return self.get_project(value)\n\n try:\n if key.lower() == \"name\":\n return self.projects[self._project_indices_by_name[value]]\n elif key.lower() == \"id\":\n return self.projects[self._project_indices_by_id[value]]\n except KeyError:\n self.logger.debug(\"Project {}: {} not found\".format(key, value))",
"def get_story(self, project, filter):\n resource = \"projects/{0:d}/stories\".format(project.id)\n params = {\n \"fields\": Story.FIELDS,\n \"filter\": \"type:feature,chore,bug {0}\".format(filter),\n \"limit\": 1\n }\n stories = self._request(\"get\", resource, params=params)\n\n if len(stories) == 1:\n ret_val = Story(stories[0])\n else:\n ret_val = None\n\n return ret_val",
"def get_project(self, id):\n for project in self.projects:\n if project.id == int(id):\n ret_val = project\n break\n else:\n ret_val = None\n\n return ret_val",
"def load_story(self, story_id):\n story_url = \"https://www.pivotaltracker.com/services/v5/projects/{}/stories/{}\".format(self.project_id, story_id)\n\n\n try:\n response = _perform_pivotal_get(story_url)\n return Story.from_json(response)\n except requests.HTTPError as e:\n if e.response.status_code == 404:\n # Not Found\n return None\n raise",
"def getProjectByName(self, name):\n\n for project in self.__projects:\n if project.getName() == name:\n return project\n\n return None",
"def get_project(self, name=None):\n if not name:\n if not self.select_project:\n log.error(\"no default project name specified\")\n return\n name = self.select_project\n\n if name in self.projects:\n return self.projects[name]\n\n log.debug( \"project {} not found in {} projects \".format(name, len(self.projects)) )\n return None",
"def find_best_match(worktree, token):\n possibilities = [x.src for x in worktree.projects]\n matches = difflib.get_close_matches(token, possibilities, cutoff=0)\n if matches:\n closest_src = matches[0]\n return worktree.get_project(closest_src).path\n return None",
"def game_from_team_name(games, team_name, verbose=False):\n for game in games:\n teams = teams_from_game(game)\n if team_name in teams:\n if verbose:\n print(f\"found {team_name} game\")\n return game\n if verbose:\n print(f\"{team_name} game NOT found\")\n return None",
"def get(self, story_id):",
"def get(self, name):\n try:\n return self.projects[name]\n except KeyError:\n print(\"No project called %s was found\" %name)",
"def searchClientProject(self, name):\n for client in self.getClients():\n try:\n for project in self.getClientProjects(client['id']):\n if project['name'] == name:\n return project\n except Exception:\n continue\n\n print('Could not find client by the name')\n return None",
"def get_story_by_name(self, story_name):\n return Story.get_by_name(story_name)",
"def _get_target_version_by_name(jira, conf, name):\n versions = jira.project_versions(conf.JIRA['project'])\n for version in versions:\n if getattr(version, 'name') == name:\n return version\n\n return None",
"def get_project_data(project_id):\n projects = wf.cached_data('projects', None, max_age=0)\n\n # Loop through projects and return project with a match\n for project in projects:\n if int(project['id']) == int(project_id):\n return project",
"def fetch_project(search_info):\n search = search_collection.find_one({\"_id\": SEARCH_ID})\n user = user_collection.find_one({\"_id\": search_info[\"USER_ID\"]})\n user_bookmarks = user[\"bookmarks\"]\n user_contributions = user[\"contributions\"]\n user_outgoing = user[\"outgoing\"]\n try:\n project_id_list = search[search_info[\"search_query\"]]\n except KeyError:\n project_id_list = None\n except AttributeError:\n project_id_list = None\n if project_id_list != None:\n projects_list = list()\n for id in project_id_list:\n project = project_collection.find_one({\"_id\": id})\n if project == None:\n continue\n if user_bookmarks == None:\n project[\"bookmark\"] = False\n else:\n project[\"bookmark\"] = True if id in user_bookmarks else False\n if user_outgoing == None:\n project[\"contribution\"] = False\n\n else:\n project[\"contribution\"] = True if id in user_outgoing else False\n projects_list.append(project)\n return projects_list\n else:\n return []",
"def get_project_id_by_name(self, name):\n\n for project in self.api.state['projects']:\n if project['name'] == name:\n return project['id']\n\n return None",
"def _get_story(self, article):\n # Get similar Articles by tag, sorted by most similar descending.\n similar = article.tags.similar_objects()\n similar = list(filter(\n lambda item: self._filter_similar_articles(\n article=item, compare=article\n ),\n similar\n ))\n if similar:\n return similar[0].story\n\n # If no similar articles were found, create a new story.\n return Story.objects.create()",
"def get_or_create_project(group, project_label):\n\n print(f\"Looking for prject.label {project_label}\")\n projects = group.projects.find(f\"label={project_label}\")\n if len(projects) > 0:\n print(f\"Found it.\")\n project = projects[0]\n print(f\"project.label {project.label}\")\n print(f\"project.id {project.id}\")\n else:\n print(\"Project not found - Creating it.\")\n project = group.add_project(label=f\"{project_label}\")\n print(f\"project.label {project.label}\")\n print(f\"project.id {project.id}\")\n return project",
"def get_project(self, i):\r\n return self.__projects[i]",
"def test_find_project(self):\n result = Project.objects.find(\n ['test'], project_type=PROJECT_TYPE_PROJECT\n )\n self.assertEqual(len(result), 1)\n self.assertEqual(result[0], self.project)",
"def parse_one_project(self, args, project_arg):\n project = self.linguist_worktree.get_linguist_project(project_arg, raises=True)\n return [project]",
"def project_or_fund(self):\n if self.category == Account.PROJECT:\n return self.project_set.first()\n else:\n return self.campaign_set.first()",
"def get_project(self, project):\n project_name = project\n\n try:\n # FIXME: project should be an integer or str, no both\n project_id = int(project)\n except ValueError:\n project_id = None\n\n try:\n # Find the first project occurrence\n project_found = next(p for p in self.get_projects() if p[\"id\"] == project_id\n or p[\"name\"] == project_name)\n # FIXME: use namedtuple instead? create a self.project = dict()?\n self.project_name = project_found[\"name\"]\n self.project_id = project_found[\"id\"]\n self.project_address = \"projects/%s/\" % self.project_id\n except StopIteration:\n logger.error(\"Project %s not found\" % project)\n raise KeyError",
"def process_project(self, project_name):\n self.logging.debug('Retrieving project %s..', project_name)\n\n try:\n project = self.get_lp_client().projects[project_name]\n except KeyError:\n self.logging.error(\n \"Project %s wasn't found. Skipped..\",\n project_name\n )\n else:\n if project:\n self.logging.debug(\n 'Retrieving active milestone %s..',\n self.get_new_milestone_name()\n )\n\n new_milestone = project.getMilestone(\n name=self.get_new_milestone_name()\n )\n self.get_stats()[project.name] = {}\n\n for old_milestone_name in self.get_old_milestone_names():\n if self.is_limit_achived():\n break\n\n self.process_milestone_on_project(\n project, old_milestone_name, new_milestone\n )\n\n else:\n self.logging.debug(\n \"Project %s wasn't found. Skipped..\",\n project_name\n )",
"def get_project_or_study(obj_type, obj_id):\n \n response = None\n\n try:\n if obj_type not in set([\"projects\", \"studies\"]):\n raise Exception(\"Invalid object type specified\")\n\n files_d = {}\n files_d.update(file_dict[obj_type][\"valid\"])\n files_d.update(file_dict[obj_type][\"invalid\"])\n\n if obj_id in files_d.keys():\n json_file = data_dir + files_d[obj_id]\n if os.path.exists(json_file):\n response = get_response(open(json_file, \"r\").read())\n else:\n response = get_response(not_found_json, status=404)\n else:\n if obj_id == \"NA\": # endpoint not implemented simulation,\n # return 501 instead of 404\n response = get_response(not_found_json, status=501)\n else: \n response = get_response(not_found_json, status=404)\n\n except Exception as e:\n response_body = '''{\"message\": \"invalid resource '%s'\"}''' % obj_type\n response = get_response(response_body, status=400)\n \n return response"
] | [
"0.84241813",
"0.62560344",
"0.61722744",
"0.60507",
"0.59317017",
"0.5916581",
"0.59009284",
"0.587145",
"0.5809766",
"0.57527995",
"0.56877387",
"0.56099844",
"0.56028783",
"0.56005305",
"0.5553567",
"0.5518918",
"0.5457603",
"0.53568715",
"0.53509325",
"0.53243184",
"0.5304747",
"0.5280034",
"0.5274395",
"0.52623177",
"0.52496094",
"0.52383953",
"0.5219083",
"0.52172464",
"0.5187457",
"0.5175368"
] | 0.84125274 | 1 |
returns the first label if any from labels. Used for grouping | def first_label(self):
if self.labels:
return self.labels[0]
else:
return None | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def first_label(self):\r\n return self.labels.split(',')[0]",
"def label(tree):\n return tree[0]",
"def getLabel(labels):\r\n elems = {}\r\n for l in labels:\r\n if l not in elems.keys():\r\n elems[l] = 1\r\n else:\r\n elems[l] += 1\r\n counts = sorted(elems.values(), reverse=True)\r\n if len(counts) > 1 and counts[0] == counts[1]:\r\n return choice(list(elems.keys()))\r\n return sorted(elems, key=elems.get, reverse=True)[0]",
"def _find_first(self, ast, label):\n res = self._find_all(ast, label, max_results=1)\n if len(res):\n return res[0]\n return None",
"def user_labels_first(*args):\n return _ida_hexrays.user_labels_first(*args)",
"def get_label(self, name):\n label_list = self.wls_board.get_labels()\n for label in label_list:\n if name in label.name: \n return label",
"def get_label_name(label):\n\tindex = np.argmax(label)\n\tlabels = ['airplane', 'automobile', 'bird', 'cat', 'deer', 'dog', 'frog', 'horse', 'ship', 'truck']\n\treturn labels[int(index)]",
"def fromLabel(name):\n return Data.labels.index(name)",
"def _get_label(obj):\n # NOTE: BarContainer and StemContainer are instances of tuple\n while not hasattr(obj, 'get_label') and isinstance(obj, tuple) and len(obj) > 1:\n obj = obj[-1]\n label = getattr(obj, 'get_label', lambda: None)()\n return label if label and label[:1] != '_' else None",
"def find_label_by_id(self, _id):\n search = True\n i = 0\n while search:\n if i == len(self.labels):\n break;\n\n if self.labels[i].id == _id:\n return self.labels[i]\n search = False\n #print self.labels[i].id\n i += 1\n if search:\n return None",
"def get_labels(self):\n return self.labels[1:]",
"def auto_label_text_chunk(text, labels):\n\n for label in labels:\n regex_str = \"|\".join(labels[label])\n r = re.compile(regex_str, flags=re.I)\n if re.search(r, text) is not None:\n return label\n\n return \"None\"",
"def label_from_index(self, index):\n assert self.labels is not None, \"Labels not processed\"\n return self.labels[index]",
"def get_label_id(self) -> Optional[str]:\n\n return None if (self.elements is None or len(self.elements) == 0) else self.elements[0].label_id",
"def extract_labels_single_format(featureset):\n def extract(label): return 1 if label == [1,0] else 0\n y = lambda label: extract(label), featureset['Labels']\n return y",
"def get_label(urs):\n return assign_term(urs)[1]",
"def label_from_index(self, index):\n assert self.labels is not None, \"Labels not processed\"\n #return self.labels[index, :, :]\n return self.labels[index]",
"def get_label(self, indices=None):\n if indices is None:\n indices = list(range(0, self.get_sample_size()))\n elif isinstance(indices, collections.abc.Iterable):\n indices = sorted(list(set(indices)))\n else:\n indices = [indices]\n\n if len(indices) == 0:\n return []\n partitions = self.get_partitions(self.persistence)\n labels = self.X.shape[0] * [None]\n for label, partition_indices in partitions.items():\n for idx in np.intersect1d(partition_indices, indices):\n labels[idx] = label\n\n labels = np.array(labels)\n if len(indices) == 1:\n return labels[indices][0]\n return labels[indices]",
"def label_from_example(example):\n val = example.features.feature['label'].int64_list.value\n if val:\n return int(val[0])\n else:\n return None",
"def get_labels(self):\r\n return None",
"def first_field_by_label(self, label: str, case_sensitive=True):\n fields = self.fields_by_label(label, case_sensitive=case_sensitive)\n f = fields[0]\n return f",
"def _get_label(self):\n return self.label",
"def get_labels():\n return if_found(dao.get_labels())",
"def get_label(id):\n return if_found(dao.get_label(id))",
"def get_label(column):\n\n for key, label in column_to_label.items():\n if key in column:\n return label",
"def _get_labels(touches):\n \n out = touches.copy(deep=True)\n # pandas df.min() ignores NaN values\n first_touch = touches[['stop_loss', 'take_profit']].min(axis=1)\n for loc, t in first_touch.items():\n if pd.isnull(t):\n out.loc[loc, 'label'] = 0\n elif t == touches.loc[loc, 'stop_loss']:\n out.loc[loc, 'label'] = -1\n else:\n out.loc[loc, 'label'] = 1\n return out",
"def choose_one(self,labels) :\n # Exercise 1: Reduce k until you find a unique winner\n counts = Counter(labels)\n winner, winner_count = counts.most_common(1)[0]\n num_winners = len([count for count in counts.values() if count == winner_count])\n # replace&complete: The winning label has to be found and returned\n if num_winners == 1:\n return winner\n else:\n return self.choose_one(labels[:-1])",
"def _findLabel(self, label):\n if self.label == label:\n return self\n else:\n for i in range(self.nChildren()):\n found = self.children[i]._findLabel(label)\n if found:\n return found\n return None",
"def get_label(self, key):\n return self.labels.get(key, None)",
"def fromIndex(index):\n return Data.labels[index]"
] | [
"0.8280834",
"0.68157196",
"0.6787128",
"0.67655355",
"0.67253506",
"0.6636867",
"0.6554082",
"0.65352005",
"0.65143234",
"0.6482284",
"0.64466333",
"0.64403844",
"0.64376044",
"0.64165425",
"0.6404469",
"0.6386726",
"0.6379",
"0.6357332",
"0.6351127",
"0.6299424",
"0.6292083",
"0.62911296",
"0.62755877",
"0.62585104",
"0.62415445",
"0.62302303",
"0.6224106",
"0.6199329",
"0.6184269",
"0.61482066"
] | 0.851758 | 0 |
Given a filter strong, returns an list of stories matching that filter. If none will return an empty list | def get_stories(self, filter_string):
story_filter = quote(filter_string, safe='')
stories_url = "https://www.pivotaltracker.com/services/v5/projects/{}/stories?filter={}".format(self.project_id, story_filter)
response = _perform_pivotal_get(stories_url)
return [Story.from_json(story_node) for story_node in response] | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def GetStories(self, filt=None):\n stories = self._ApiQueryStories(filt)\n parsed = xml.dom.minidom.parseString(stories)\n els = parsed.getElementsByTagName('story')\n lst = []\n for el in els:\n lst.append(Story.FromXml(el.toxml()))\n return lst",
"def get_stories(self, filter_string):\r\n\r\n story_filter = quote(filter_string, safe='')\r\n stories_url = \"https://www.pivotaltracker.com/services/v3/projects/{}/stories?filter={}\".format(self.project_id, story_filter)\r\n\r\n response = _perform_pivotal_get(stories_url)\r\n stories_root = ET.fromstring(response.text)\r\n\r\n return [Story.from_node(story_node) for story_node in stories_root]",
"def filter_stories(stories, triggerlist):\n # TODO: Problem 10\n filteredStories = []\n## print 'stories:', stories\n## print 'triggerlist:', triggerlist\n \n for story in stories:\n for trig in triggerlist:\n if trig.evaluate(story):\n filteredStories.append(story)\n break #added without testing not sure if works but should optimize\n return filteredStories",
"def get_story(self, project, filter):\n resource = \"projects/{0:d}/stories\".format(project.id)\n params = {\n \"fields\": Story.FIELDS,\n \"filter\": \"type:feature,chore,bug {0}\".format(filter),\n \"limit\": 1\n }\n stories = self._request(\"get\", resource, params=params)\n\n if len(stories) == 1:\n ret_val = Story(stories[0])\n else:\n ret_val = None\n\n return ret_val",
"def filter(self, filters):",
"def filterByReview(minReviewNum, stories):\n\tsortedstories = filter(lambda s: s[\"reviews\"] >= minReviewNum, stories)\n\treturn [x for x in sortedstories]",
"def get_stories(self, query):\n url = self.base_url + 'search?query={query}'\n\n req = requests.get(headers=self.headers, url=url.format(query=query))\n\n return req.json()['stories']['stories']",
"def search(self, filter):\n return [note for note in self.notes if note.match(filter)]",
"def filter_stories(stories,trigger_dict_eng,trigger_dict_span,num_line):\n# trig_story = []\n temp_stories = stories[:num_line]\n for index, story in enumerate(temp_stories):\n print('\\n'+str(index),end=' ')\n #print(story.get_gevent_id(),story.get_dateAdded())\n story.clean_text()\n if story.get_text() == None:\n pass\n else:\n try:\n detect(story.text)\n if detect(story.text) == 'en':\n story.set_language('en')\n #print(type(story.get_language()))\n print(story.get_language())\n for key,trig in trigger_dict_eng.items():\n try:\n story.set_taxonomy((key,trig.get_args())) if trig.evaluate(story) else ctime()#print('False',end=' ')\n except AttributeError:\n pass\n if detect(story.text) == 'es':\n story.set_language('es')\n #print(type(story.get_language()))\n print(story.get_language()) \n for key,trig in trigger_dict_span.items():\n try:\n story.set_taxonomy((key,trig.get_args())) if trig.evaluate(story) else ctime()#print('False',end=' ')\n except AttributeError:\n pass\n except:\n print('error')\n print(story.get_taxonomy())",
"def _ApplyTestFilter(testfilter, bot_spec):\n if testfilter:\n return [(botname, set(testfilter) | (tests & set(['compile'])))\n for botname, tests in bot_spec]\n else:\n return bot_spec",
"def filterPick(list, filter, classification):\n y = []\n for job in list:\n x = [(job, classification) for l in job for m in (filter(l),) if m]\n y.append(x)\n return y",
"def filter(predicate): #pylint: disable=redefined-builtin\n from xpedite.analytics.timelineFilter import TimelineFilter\n profiles = FilteredProfiles(TimelineFilter(predicate).apply(globalProfile()))\n return profiles",
"def list_of_stories():\n return render_template(\"list_of_stories.html\", stories = stories.values())",
"def ls(filter=None):",
"def get_wiki_lines(wt, predicate=None):\n return [line for line in wt.contents.split('\\n') if not callable(predicate) or predicate(line)]",
"def test_explore_get_list_only_published(self):\n story1 = create_story(title=\"Test Story\", summary=\"Test Summary\",\n byline=\"Test Byline\", status='published')\n \n story2 = create_story(title=\"Test Story 2\", summary=\"Test Summary 2\",\n byline=\"Test Byline 2\", status='draft')\n resp = self.api_client.get('/api/0.1/stories/explore/')\n self.assertValidJSONResponse(resp)\n self.assertEqual(len(self.deserialize(resp)['objects']), 1)\n self.assertEqual(self.deserialize(resp)['objects'][0]['story_id'], story1.story_id)",
"def get_filters():\n print('Hello! Let\\'s explore some US bikeshare data!')",
"def test_get_list_published_only(self):\n story1 = create_story(title=\"Test Story\", summary=\"Test Summary\",\n byline=\"Test Byline\", status='published',\n language=\"en\", author=self.user)\n story2 = create_story(title=\"Test Story 2\", summary=\"Test Summary 2\",\n byline=\"Test Byline 2\", status='draft',\n language=\"en\", author=self.user)\n uri = '/api/0.1/stories/'\n resp = self.api_client.get(uri)\n self.assertValidJSONResponse(resp)\n self.assertEqual(len(self.deserialize(resp)['objects']), 1)\n story_ids = [story['story_id'] for story in self.deserialize(resp)['objects']]\n self.assertNotIn(story2.story_id, story_ids)",
"def accept_story(self, story):\n\n queryset = Story.objects.filter(pk=story.pk)\n return self.accept_stories(queryset)",
"def filter(full_poi_list, type_of_poi):\n pois = []\n if type_of_poi == \"all\":\n for i in full_poi_list:\n entry = i[0]\n pois.append(entry)\n if type_of_poi == \"gym\":\n for i in full_poi_list:\n if i[1] == 2:\n entry = i[0]\n pois.append(entry)\n return pois",
"def search(request):\n if 'q' in request.GET:\n term = request.GET['q']\n story_list = Story.objects.filter(Q(title__contains=term)|Q(markdown_content__contains=term))\n heading = \"Search results\"\n return render_to_response(\"cms/story_list.html\",locals())",
"def search(self, filtro):\n return [nota for nota in self.notas if nota.match(filtro)]",
"def get_filter_word_list(self):\n self.filter_words = self.read_word_file(self.filter_word_file)",
"def get_stories(self, story_type=''):\n all_rows = self._get_zipped_rows(self._get_soup(page=story_type))\n return self._build_story(all_rows)",
"def search(self, filter: str = None) -> dict:\n r = requests.get(self.url, headers=self.headers)\n\n if filter:\n data = r.json()\n return filter_list(data=data, filter_by=filter)\n\n return r.json()",
"def test_filter_list_of_dictionary_submission():\n submission_list = get_list_of_submission_dictionaries(TEST_SUBREDDIT, TEST_START_DATE, TEST_END_DATE, TEST_MAX)\n print(len(submission_list))\n\n filtered_list = filter_list_of_dictionary_submission(submission_list, MIN_COMMENTS)\n print(len(filtered_list))",
"def filters():\n states = list(storage.all('State').values())\n states.sort(key=lambda state: state.name)\n cities = list(storage.all('City').values())\n cities.sort(key=lambda city: city.name)\n amenities = list(storage.all('Amenity').values())\n amenities.sort(key=lambda amenity: amenity.name)\n\n return render_template('10-hbnb_filters.html', states=states,\n cities=cities, amenities=amenities)",
"def extract_words_from_text_with_filters(text: str, char_filters: Union[str, List[str]]) -> List[str]:\n translate_dict = dict((c, \" \") for c in char_filters)\n new_text = Str.get_string_from_translate_dict(text.lower(), translate_dict)\n return [word for word in new_text.split(\" \") if len(word) > 0]",
"def test_get_work_list_type_filter(self):\n # Login as simple user\n self.authenticate(self.user)\n\n # Get works list for type wt1\n # should return only work1 and work2\n response = self.client.get(self.url, {\"type\": \"wt1\"})\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n self.assertEqual(response.data[\"count\"], 2)\n self.assertEqual(len(response.data[\"results\"]), 2)\n\n # works are sorted by name\n self.check_work_json(response.data[\"results\"][0], self.work1)\n self.check_work_json(response.data[\"results\"][1], self.work2)\n\n # Get works list for type wt2\n # should return only work3\n response = self.client.get(self.url, {\"type\": \"wt2\"})\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n self.assertEqual(response.data[\"count\"], 1)\n self.assertEqual(len(response.data[\"results\"]), 1)\n\n # works are sorted by name\n self.check_work_json(response.data[\"results\"][0], self.work3)",
"def test_filter_6(self):\n\n usernames = ['Kinder', 'Ken', 'Alan', 'Tracy']\n filter_dict = {'location-includes': 'Spadina'}\n\n actual = tf.get_filter_results(twitter_dict, usernames, filter_dict)\n expected = ['Kinder', 'Ken', 'Alan']\n self.assertEqual(actual, expected)"
] | [
"0.69686466",
"0.68707925",
"0.6780581",
"0.6072194",
"0.57923883",
"0.55801576",
"0.5537283",
"0.5517888",
"0.55005556",
"0.5456136",
"0.53563553",
"0.53528243",
"0.5330737",
"0.5328852",
"0.5321616",
"0.5312549",
"0.5293955",
"0.5254234",
"0.52464867",
"0.5211708",
"0.52068645",
"0.516673",
"0.516228",
"0.51530266",
"0.51436627",
"0.5136828",
"0.51254976",
"0.50862",
"0.50802946",
"0.5072578"
] | 0.68931544 | 1 |
parses an int from an ElementTree node, if not found returns None | def _parse_int(node, key):
element = node.get(key)
if element is not None:
return int(element)
else:
return None | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _parse_int(node, key):\r\n element = node.find(key)\r\n if element is not None:\r\n return int(element.text)\r\n else:\r\n return None",
"def parseint(el):\n return parse(el, int)",
"def convertStringToInt(xmlNode):\n try:\n val = int(xmlNode.text)\n return val\n except (ValueError,TypeError):\n raise IOError('Integer value is required for content of node %s, but got %s' %(node.tag, node.text))",
"def parse_int(value):\n try:\n return int(value)\n except (ValueError, TypeError):\n return None",
"def get_int_attribute(element, attr, default = 0):\n result = element.getAttribute(attr)\n if result == \"\":\n return default\n return int(result)",
"def parse_int(self, selector):\n return int(re.sub('[^0-9]', '', self.parse_string(selector)))",
"def get_xml_node_value (root, name):\n node = root.find(name)\n\n if not node:\n return None\n elif len(node.text) < 1:\n return None\n else:\n return node.text.strip()",
"def parse(value):\n return int(value)",
"def getvalueofnode(node):\r\n if node is not None:\r\n return node.text\r\n else:\r\n None",
"def get(self,root,key):\n node = root\n for digit in key:\n node = node.children[ord(digit)-ord('0')]\n if(node==None):\n return None\n return node.value.value",
"def getvalueofnode(node):\n return node.text if node is not None else None",
"def getvalueofnode(node):\n return node.text if node is not None else None",
"def support(node):\n try:\n return float(node.name.split(':')[0])\n except (ValueError, AttributeError):\n return None",
"def getvalueofnode(node):\r\n return node.text if node is not None else None",
"def try_parse_int(value):\n try:\n return int(value)\n except:\n return 0",
"def xmlparsing(xmlpath, valuetoget):\n try:\n _Handle = ET.parse(xmlpath)\n rootelement = _Handle.getroot()\n number = rootelement[valuetoget].text\n return number\n #return _Handle\n except Exception as er:\n print \"Not able to Read /usr/local/McAfee/ProductConfig.xml file\"",
"def extract_int(text):\n m = re.search(r\"\\d+\", text)\n if m is not None:\n return m.group(0)",
"def parse_int(sint):\n if sint == '':\n return None\n else:\n return int(sint)",
"def get_intval(record, field_name):\n val = recordval(record, field_name)\n if val != \"\" and not re.match('[0-9]+', val):\n parser_error(\"bad value in \"+field_name+\": '\"+val+\"'-- try a number\")\n return val",
"def xpathCastNodeToNumber(self):\n ret = libxml2mod.xmlXPathCastNodeToNumber(self._o)\n return ret",
"def _parse(self, the_id: typing.Union[int, str]) -> int:\n return int(the_id)",
"def find_numeric(text):\n text_digits = \\\n re.sub('[^\\d]', '', text)\n if not text_digits:\n return None\n try:\n return int(text_digits)\n except ValueError:\n return None",
"def tryCastToInt(number):\n try:\n return int(number)\n except:\n print(\"Error! Impossible to parse this variable\")\n return 0",
"def _getint(\n parser: configparser.ConfigParser,\n key: str,\n section: str = \"wpwatcher\",\n ) -> int:\n try:\n return parser.getint(section, key)\n except ValueError as err:\n raise ValueError(\n f\"Could not read int value in config file for key '{key}' and string '{parser.get(section, key)}'. Must be an integer\"\n ) from err",
"def intparse(text):\n return int(text, 0)",
"def _try_int(self, string):\n value = re.sub(r\"[^0-9]+\", '', string)\n try:\n value = int(value)\n except ValueError:\n value = None\n return value",
"def try_int(value: Any) -> Optional[float]:\n try:\n return int(value)\n except (TypeError, ValueError):\n return None",
"def visit_Numeric(self, node):\n return node.value",
"def get_num(self, data):\n data = NUM_PATTERN.findall(data)\n if data:\n return int(data[0])\n return 0",
"def convert_to_int(number):\n try:\n return int(number)\n except:\n return None"
] | [
"0.8366567",
"0.68826133",
"0.6643716",
"0.6231448",
"0.6134712",
"0.60349727",
"0.588818",
"0.5871328",
"0.58434844",
"0.58222824",
"0.579166",
"0.579166",
"0.57692295",
"0.57690537",
"0.576285",
"0.5756573",
"0.57532364",
"0.5742738",
"0.57313293",
"0.56075567",
"0.55872804",
"0.55650395",
"0.5553056",
"0.55472",
"0.5517547",
"0.55105925",
"0.55058396",
"0.5499412",
"0.54927677",
"0.5473401"
] | 0.8116926 | 1 |
parses an boolean from an ElementTree node, if not found returns None | def _parse_boolean(node, key):
element = node.get(key)
if element is not None:
return bool(element)
else:
return None | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _parse_boolean(node, key):\r\n element = node.find(key)\r\n if element is not None:\r\n if element.text == 'true':\r\n return True\r\n else:\r\n return False\r\n else:\r\n return None",
"def _get_bool(element, name, context, default=None):\n\n value = element.get(name)\n try:\n value = int(value)\n except:\n value = default\n\n _assert(value is not None,\n \"Missing or invalid boolean value of '{0}.{1}'.\".format(context,\n name))\n\n return bool(value)",
"def parsebool(el):\n txt = text(el)\n up = txt.upper()\n if up == \"OUI\":\n return True\n if up == \"NON\":\n return False\n\n return bool(parseint(el))",
"def unbool(element, true=object(), false=object()):\r\n\r\n if element is True:\r\n return true\r\n elif element is False:\r\n return false\r\n return element",
"def get_xml_bool_attribute(elem, attribute, default=None):\n value = elem.get(attribute, default)\n if value is None:\n raise XMLSchemaKeyError(attribute)\n elif value in ('true', '1') or value is True:\n return True\n elif value in ('false', '0') or value is False:\n return False\n else:\n raise XMLSchemaTypeError(\"an XML boolean value is required for attribute %r\" % attribute)",
"def f_boolean(node, pos, size, context, v):\n if xpath.tools.nodesetp(v):\n return len(v) > 0\n elif xpath.tools.numberp(v):\n if v == 0 or v != v:\n return False\n return True\n elif xpath.tools.stringp(v):\n return v != ''\n\n return v",
"def htmlIsBooleanAttr(name):\n ret = libxml2mod.htmlIsBooleanAttr(name)\n return ret",
"def getBool(string):\n return (True)",
"def set_boolean(dict, name, elem):\n node = elem.find(name)\n if node is not None:\n if node.text.lower() == 'yes':\n dict[name] = True\n elif node.text.lower() == 'no':\n dict[name] = False",
"def add_bool_as_scalar(self, node):\n if node.value == 'true' or node.value == 'false' :\n return self.construct_yaml_bool(node)\n return self.construct_scalar(node)",
"def xpathBooleanFunction(self, nargs):\n libxml2mod.xmlXPathBooleanFunction(self._o, nargs)",
"def _bool(value):\n if isinstance(value, bool):\n return value;\n \n if isinstance(value, str):\n value = value.strip()\n \n if value.lower() in _BOOLEAN_STATES:\n return _BOOLEAN_STATES[value.lower()]\n return None",
"def get_boolean_attribute_value(attrs, attr_name):\n return 1 if attrs.get(attr_name, 0) in [\"True\", \"1\"] else 0",
"def convertStringToBool(nodeText):\n stringsThatMeanTrue = list(['yes','y','true','t','on'])\n val = False\n if nodeText.lower() in stringsThatMeanTrue:\n val = True\n return val",
"def visit_true(self) -> T:",
"def get_bool(self, name, default=False):\n return self.get_as(self.parse_bool, name, default, value_type=bool)",
"def evalBoolean(tree):\n # check if children the children is a \"or\" or a \"and\" tokken\n if (tree.children[0].data == \"or\"):\n return evalBoolean(tree.children[0].children[0]) or evalBoolean(tree.children[0].children[1])\n if (tree.children[0].data) == \"and\":\n return evalBoolean(tree.children[0].children[0]) and evalBoolean(tree.children[0].children[1])\n \n # set var1\n if(tree.children[0].data == \"integer\"):\n var1 = evalInteger(tree.children[0])\n elif(tree.children[0].data == \"variable\"):\n var1 = getValue(tree.children[0].children[0].value)\n\n # set var2\n if(tree.children[2].data == \"integer\"):\n var2 = evalInteger(tree.children[2])\n elif(tree.children[2].data == \"variable\"):\n var2 = getValue(tree.children[2].children[0].value)\n\n if(tree.children[1].children[0].data == \"greater\"):\n return var1 > var2\n if(tree.children[1].children[0].data == \"less\"):\n return var1 < var2\n if(tree.children[1].children[0].data == \"equals\"):\n return var1 == var2\n if(tree.children[1].children[0].data == \"nequal\"):\n return var1 != var2\n\n print(\"ERROR : UNEXPECTED TOKKEN\")\n return False",
"def read_bool(self):\n return bool(self.read_and_unpack('l')[0])",
"def getbool(self, section, option, default=None):\r\n return self.get(section, option, type=bool, default=default)",
"def bool_attr(attr):\n if attr.lower() == \"true\":\n val = True\n elif attr.lower() == \"false\":\n val = False\n else:\n raise EzXMLError(\"Must be \"\\\n \"'true' or 'false'. Not %s\" % (attr))\n return val",
"def arg_to_boolean(arg: str) -> Optional[bool]:\n return argToBoolean(arg) if arg else None",
"def get_bool(section, option, default=False):\n\tres = get(section, option, default)\n\n\tif res == default:\n\t\treturn default\n\n\tif res.lower() == \"true\" or res == \"1\":\n\t\treturn True\n\n\treturn default",
"def get_attr_bool(self, name, default=False):\n v = self.get_attr(name)\n if v is None:\n return default\n if v.lower() in [\"t\", \"true\", \"y\", \"yes\", \"1\"]:\n return True\n else:\n return False",
"def true(self):\n val = self.read(4)\n if val != b'true':\n self.on_parser_error(\"true token expected\")\n return True",
"def get_tag_value_or_none(node, element_name):\n tag_value = node.tags.get(element_name, 'n/a')\n\n if 'n/a' == tag_value:\n return None\n\n return tag_value",
"def parse_bool(value):\n if value in (\"true\", \"True\", \"yes\", \"1\", \"on\"):\n return True\n if value in (\"false\", \"False\", \"None\", \"no\", \"0\", \"off\"):\n return False\n return bool(int(value))",
"def boolean(val):\n\tif val == \"True\" or val == \"1\":\n\t\treturn True\n\telse:\n\t\treturn False",
"def process_bool_arg(arg):\n if isinstance(arg, bool):\n return arg\n elif isinstance(arg, basestring):\n if arg.lower() in [\"true\", \"1\"]:\n return True\n elif arg.lower() in [\"false\", \"0\"]:\n return False",
"def false(self):\n val = self.read(5)\n if val != b'false':\n self.on_parser_error(\"false token expected\")\n return False",
"def _parse_bool(line):\n return line in ('true', 'True', '1')"
] | [
"0.83501446",
"0.6932032",
"0.692632",
"0.65882355",
"0.65580535",
"0.6457279",
"0.63471276",
"0.6239328",
"0.60651743",
"0.59987414",
"0.59204286",
"0.58954495",
"0.5884764",
"0.5840993",
"0.5837576",
"0.58079356",
"0.5725734",
"0.57157356",
"0.5690965",
"0.56830674",
"0.56765",
"0.5671768",
"0.56560826",
"0.56476563",
"0.5638766",
"0.5615574",
"0.56130064",
"0.55993897",
"0.55794847",
"0.5574653"
] | 0.83073354 | 1 |
Checks if value is a dict | def is_dict(value):
return isinstance(value, dict) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _is_dict(val):\n\n return isinstance(val, dict)",
"def _is_dict(item):\n return isinstance(item, dict)",
"def isDict(data):\n\ttry:\n\t\tfrom types import DictType\n\t\tif type(data) == DictType:\n\t\t\treturn True\n\texcept ImportError:\n\t\tif type(data) == type({}):\n\t\t\treturn True\n\treturn False",
"def is_dictionary(obj):\n return type(obj) is dict",
"def is_dict(obj):\n return type(obj) == type({})",
"def isdict(val: Any) -> bool:\n return isinstance(val, MutableMapping)",
"def is_dict(self) -> bool:\n return True",
"def isdictinstance(obj):\n return isinstance(obj, dict) or isinstance(obj, DotDict)",
"def is_typed_dict(self) -> bool:\n return True",
"def rule_00_config_is_dict(session):\n return isinstance(session[\"config\"], dict)",
"def has_value(value):\n return IsDictContainingValue(wrap_matcher(value))",
"def check_for_dict(check):",
"def quacks_like_dict(object):\n return isinstance(object, Mapping)",
"def _is_valid_dict(content_type: str) -> bool:\n content_type = content_type.strip()\n\n if not content_type.startswith(\"pt:dict\"):\n return False\n\n if not _has_matched_brackets(content_type):\n return False\n\n if not _has_brackets(content_type):\n return False\n\n sub_types = _get_sub_types_of_compositional_types(content_type)\n if len(sub_types) != 2:\n return False\n\n sub_type_1 = sub_types[0]\n sub_type_2 = sub_types[1]\n return _is_valid_pt(sub_type_1) and _is_valid_pt(sub_type_2)",
"def dict_type(verifield, required):\n if verifield is None: return True\n if not isinstance(verifield, dict): return False\n all_of = [value or True for value in verifield.values() if isinstance(value, required) or value is None]\n return not verifield or (all(all_of or [False]) and len(all_of) == len(verifield))",
"def is_sequence_of_dict(items):\n return all(isinstance(item, dict) for item in items)",
"def can_insert(data):\n return isinstance(data, dict)",
"def is_dictionary_subclass(obj):\n return (hasattr(obj, '__class__') and\n issubclass(obj.__class__, dict) and not is_dictionary(obj))",
"def quacks_like_dict(object):\n return isinstance(object, collections.Mapping)",
"def eval_dict(self, value):\n\n okay = True\n if all(ast_class(k) == 'Str' for k in value.keys):\n count = 0\n for v in value.values:\n if not self.eval_value(v):\n okay = False\n break\n count += 1\n return okay",
"def validate_to_python(self, value):\n super(DictField, self).validate(value)\n if value == None:\n return {}\n if not isinstance(value, dict):\n raise ValidationError('Must be a dict, got {0}'.format(type(value).__name__))\n form = self.Form(value)\n if form.is_valid():\n return form.cleaned_data\n else:\n errors = form.errors.as_text()\n raise ValidationError(errors)",
"def _is_json_object(blob):\n try:\n return isinstance(json.loads(blob), dict)\n except ValueError:\n return False",
"def clean_value(self, value):\n if isinstance(value, dict):\n return value\n elif isinstance(value, str):\n try:\n result = json.loads(value)\n except ValueError:\n raise ValidationError(\n gettext('This value is not a valid JSON document'))\n\n if isinstance(result, dict):\n return result\n\n raise ValidationError(\n gettext('This value is not a valid dictionary value'))",
"def test_dictionary(self):\n self.assertIsInstance(self.test1json, dict)",
"def is_dictionary_type(self):\n raise exceptions.NotImplementedError()",
"def dict_support_required(self):\n\t\treturn self.typemanager.has_dicts",
"def verifyDictTypes( template, dictToCheck ):\n for key in dictToCheck:\n if not ( ( isinstance( dictToCheck[ key ], list ) and\n isinstance( template[ key ], list ) ) or\n ( isinstance( dictToCheck[ key ], dict ) and\n isinstance( template[ key ], dict ) ) or\n ( isinstance( dictToCheck[ key ], template[ key ] ) ) ):\n return False\n\n return True",
"def _validate_dict_entry(self, dict_entry):\r\n try:\r\n # Type-check all of the type-critical items.\r\n if (\r\n type(dict_entry[\"id\"]) == int and\r\n type(dict_entry[\"date\"]) == datetime.date and\r\n type(dict_entry[\"time\"]) == datetime.time and\r\n type(dict_entry[\"datetime\"]) == datetime.datetime and\r\n type(dict_entry[\"duration\"]) == datetime.timedelta):\r\n return True\r\n else:\r\n return False\r\n # end if\r\n except Exception as err:\r\n _z_exc(\"logentry.py/_validate_dict_entry\", err)\r\n # end try\r",
"def to_python(self, value):\n if value in validators.EMPTY_VALUES:\n return {}\n elif not isinstance(value, dict):\n raise ValidationError(self.error_messages['invalid_value'],\n code='invalid_value')\n return value",
"def test_is_dict(self):\n inst = FileStorage()\n self.assertEqual(type(inst.all()), dict)"
] | [
"0.8891944",
"0.84969",
"0.7979519",
"0.7861109",
"0.7839897",
"0.77455854",
"0.75733715",
"0.74826515",
"0.72116905",
"0.7184174",
"0.7079954",
"0.7000585",
"0.6909393",
"0.6878562",
"0.68191737",
"0.6789989",
"0.66548675",
"0.6653721",
"0.66456836",
"0.65667856",
"0.65665245",
"0.6525902",
"0.6513373",
"0.6505802",
"0.6452264",
"0.6443197",
"0.6397047",
"0.6299396",
"0.6256561",
"0.61440706"
] | 0.8975367 | 0 |
Checks if value is a list | def is_list(value):
return isinstance(value, list) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _is_list(val):\n\n return isinstance(val, list)",
"def is_list(value):\n return isinstance(value, list) or None",
"def _is_list(item):\n return isinstance(item, list)",
"def isList(data):\n\ttry:\n\t\tfrom types import ListType\n\t\tif type(data) == ListType:\n\t\t\treturn True\n\texcept ImportError:\n\t\tif type(data) == type([]):\n\t\t\treturn True\n\treturn False",
"def is_list ( self, s ):\r\n\t\treturn isinstance ( s, type( list () ) )",
"def _is_list(self):\n # TODO\n if self.is_int():\n return self.int() == 0\n else:\n return self.size_words() == 2 and self.tag() == 0 and self.field(1)._is_list()",
"def _is_list(arg):\n return isinstance(arg, collections.Sequence) and not _is_string(arg)",
"def isList(x):\n \n return ( type(x) == list ) # True if the type of x is a list",
"def is_list(self) -> bool:\n return False",
"def is_list(obj):\n return type(obj) is list",
"def is_list_like(value):\n if is_iterable(value) and not isinstance(value, six.string_types):\n return True\n\n else:\n return False",
"def isList(self, item):\n\t retval = False\n\t if type(item) in (ListType, TupleType) :\n\t retval = True",
"def isList(obj):\n return type(obj)==types.ListType",
"def is_list(self) -> bool:\n if self.is_list_of_list: # pylint: disable=R1705\n return False\n else:\n return bool(AnnotationWrapper.list_field_re.match(self.data))",
"def _list_like(self, value):\n return (not hasattr(value, \"strip\") and\n (hasattr(value, \"__getitem__\") or\n hasattr(value, \"__iter__\")))\n # return is_sequence(value) # use from pandas.core.common import is_sequence",
"def is_list(s_list):\n return isa(s_list, List)",
"def isList(l):\r\n return hasattr(l, '__iter__') \\\r\n or (type(l) in (types.ListType, types.TupleType))",
"def is_list_of_list(self) -> bool:\n return bool(AnnotationWrapper.list_of_list_re.match(self.data))",
"def is_list(self):\n answer = self._call('is_list')\n return answer.yes",
"def is_list(annotation):\n\n annotation_origin = getattr(annotation, \"__origin__\", None)\n\n return annotation_origin == list",
"def is_tuple_or_list(value):\n return isinstance(value, list) or isinstance(value, tuple)",
"def _as_list(value):\n if not isinstance(value, list):\n value = [value]\n return value",
"def is_list_of(seq, expected_type):\n return is_seq_of(seq, expected_type, seq_type=list)",
"def is_listlike(x: Any) -> bool:\r\n return (isinstance(x, (list, tuple)))",
"def is_sequence_of_list(items):\n return all(isinstance(item, list) for item in items)",
"def _is_valid_list(content_type: str) -> bool:\n content_type = content_type.strip()\n\n if not content_type.startswith(\"pt:list\"):\n return False\n\n if not _has_matched_brackets(content_type):\n return False\n\n if not _has_brackets(content_type):\n return False\n\n sub_types = _get_sub_types_of_compositional_types(content_type)\n if len(sub_types) != 1:\n return False\n\n sub_type = sub_types[0]\n return _is_valid_pt(sub_type)",
"def listify(value):\n if isinstance(value, list):\n return value\n else:\n return [value]",
"def str_or_list(value):\n if isinstance(value, list):\n return value\n return [value]",
"def ensure_list(value: Any) -> List[Any]:\n\n if isinstance(value, (Mapping, str)): # do not unpack dictionaries\n return [value]\n elif isinstance(value, Iterable):\n return list(value)\n else:\n return [value]",
"def _validate_list_type(self, name, obj, *args):\n if obj is None:\n return\n if isinstance(obj, list):\n for i in obj:\n self._validate_type_not_null(name, i, *args)\n else:\n self._validate_type(name, obj, *args)"
] | [
"0.9028631",
"0.8771568",
"0.8495955",
"0.8216776",
"0.81552017",
"0.8091219",
"0.8072429",
"0.79981005",
"0.7949894",
"0.7946823",
"0.7922643",
"0.77240074",
"0.7716478",
"0.76670384",
"0.7631471",
"0.761264",
"0.7607154",
"0.7554368",
"0.75167143",
"0.7500501",
"0.7436007",
"0.7407042",
"0.7405275",
"0.72709423",
"0.71398586",
"0.7063658",
"0.70359075",
"0.69864225",
"0.69648576",
"0.69318974"
] | 0.9064887 | 0 |
Get epoch time (seconds) from either passed in UTC datetime or current datetime | def get_epoch_time(utc_datetime=None):
if not utc_datetime:
utc_datetime = datetime.datetime.utcnow()
return math.ceil((utc_datetime - EPOCH_START).total_seconds()) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _current_epoch_secs():\n now = datetime.datetime.utcnow()\n epoch = datetime.datetime(1970, 1, 1)\n return (now - epoch).total_seconds()",
"def epoch_time(when):\n if not when: return 0\n epoch = datetime.utcfromtimestamp(0)\n delta = when - epoch\n return int(delta.total_seconds())",
"def timestampfromutc(utc):\n return (utc - datetime(1970, 1, 1)).total_seconds()",
"def epoch_time_now():\n return int(time.time())",
"def epoch():\n return datetime2epoch(datetime.now())",
"def seconds_since_epoch(date_time, epoch=None):\n return microseconds_since_epoch(date_time) / 10.0**6",
"def datetime_to_epoch(indate):\n origin = datetime.datetime(1970, 1, 1)\n if indate.tzinfo:\n origin = pytz.timezone('UTC').localize(origin)\n return (indate - origin).total_seconds()",
"def datetime_to_epoch(indate):\n origin = datetime.datetime(1970, 1, 1)\n if indate.tzinfo:\n origin = pytz.timezone('UTC').localize(origin)\n return (indate - origin).total_seconds()",
"def epochnow():\n return time.time()",
"def datetime_utc_epoch_start() -> datetime:\n return timestamp_to_datetime(0)",
"def epoch_seconds(date):\r\n td = date - epoch\r\n return td.days * 86400 + td.seconds + (float(td.microseconds) / 1000000)",
"def epoch_seconds(date):\n td = date - epoch\n return td.days * 86400 + td.seconds + (float(td.microseconds) / 1000000)",
"def epoch_seconds(date):\n td = date - epoch\n return td.days * 86400 + td.seconds + (float(td.microseconds) / 1000000)",
"def _get_current_epoch_time() -> float:\n return time.time()",
"def get_utc_timestamp(utc_datetime=None):\n if utc_datetime is None:\n utc_datetime = get_now_utc()\n diff = utc_datetime - TIMESTAMP_0\n return int(diff.total_seconds() * 10**6)",
"def get_utc_timestamp(utc_datetime=None):\n if utc_datetime is None:\n utc_datetime = get_now_utc()\n diff = utc_datetime - TIMESTAMP_0\n return int(diff.total_seconds() * 10**6)",
"def epoch(value):\n if isinstance(value, datetime.datetime):\n return int(calendar.timegm(value.timetuple())*1000)\n return '' #fails silently for non-datetime objects",
"def datetime_to_timestamp(dt):\n\n epoch = datetime.utcfromtimestamp(0).replace(tzinfo=UTC)\n return (dt - epoch).total_seconds()",
"def timestamp(self):\n # this only returns second precision, which is why we don't use it\n #now = calendar.timegm(datetime.datetime.utcnow().utctimetuple())\n\n # this returns microsecond precision\n # http://bugs.python.org/msg180110\n epoch = datetime.datetime(1970, 1, 1)\n return (self - epoch).total_seconds()",
"def datetime2epoch(dt):\n return int(mktime(dt.timetuple())*1000)",
"def get_utc_now_timestamp() -> int:\n return int(datetime.datetime.now(datetime.timezone.utc).timestamp() * 1000)",
"def to_epoch(datetime_obj):\n if sys.version_info[0:2] < (3, 3):\n import calendar\n\n return (\n calendar.timegm(datetime_obj.timetuple())\n + datetime_obj.microsecond / 1000000\n )\n else:\n return datetime_obj.timestamp()",
"def epoch():\n\treturn time.time()",
"def microseconds_since_epoch(date_time, epoch=None):\n if not epoch:\n epoch = datetime.datetime.utcfromtimestamp(0)\n\n delta = date_time - epoch\n\n # 86400 is 24 * 60 * 60 e.g. total seconds in a day\n return delta.microseconds + (delta.seconds + delta.days * 86400) * 10**6",
"def datetime_to_epoch(datetime_obj):\n return int(datetime_obj.strftime(\"%s\")) * 1000",
"def utctime(self) -> datetime:\n return datetime.utcfromtimestamp(float(self.ns_since_epoch) / 1e9)",
"def datetime_to_epoch(datetime):\n return datetime.astype('int64') // 1e9",
"def epoch2datetime(t):\n return datetime.fromtimestamp(t/1000.0)",
"def timestamp_from_datetime(date):\n if getattr(date, 'tzinfo', None) is None:\n return (date - datetime.datetime(1970, 1, 1)).total_seconds()\n else:\n return (date - datetime.datetime(\n 1970, 1, 1, tzinfo=pytz.utc)).total_seconds()",
"def _create_timestamp():\n return (datetime.utcnow() - datetime(1970,1,1)).total_seconds()"
] | [
"0.7687502",
"0.75600356",
"0.7200416",
"0.7182325",
"0.69882435",
"0.6922301",
"0.68947226",
"0.68947226",
"0.68390167",
"0.6823786",
"0.67153645",
"0.6711426",
"0.6711426",
"0.6687371",
"0.6630511",
"0.6630511",
"0.66261214",
"0.6570021",
"0.6527329",
"0.65121967",
"0.6504262",
"0.64992005",
"0.6472697",
"0.6398382",
"0.6395586",
"0.63832706",
"0.6375401",
"0.6341797",
"0.63133",
"0.62884223"
] | 0.77804226 | 0 |
Get epoch time (milliseconds) from either passed in UTC datetime or current datetime | def get_epoch_time_milliseconds(utc_datetime=None):
epoch_seconds = get_epoch_time(utc_datetime)
return epoch_seconds * MILLISECONDS_IN_SECOND | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_epoch_time(utc_datetime=None):\n if not utc_datetime:\n utc_datetime = datetime.datetime.utcnow()\n return math.ceil((utc_datetime - EPOCH_START).total_seconds())",
"def epoch_time_now():\n return int(time.time())",
"def epoch_time(when):\n if not when: return 0\n epoch = datetime.utcfromtimestamp(0)\n delta = when - epoch\n return int(delta.total_seconds())",
"def _current_epoch_secs():\n now = datetime.datetime.utcnow()\n epoch = datetime.datetime(1970, 1, 1)\n return (now - epoch).total_seconds()",
"def epochnow():\n return time.time()",
"def epoch():\n return datetime2epoch(datetime.now())",
"def _time_ms(dt):\n epoch = datetime.datetime.utcfromtimestamp(0)\n diff = dt - epoch\n return diff.total_seconds() * 1000",
"def _get_current_epoch_time() -> float:\n return time.time()",
"def _time_ms(self, dt):\n if dt.tzinfo is None:\n dt = dt.replace(tzinfo=pytz.utc)\n return int((dt - self._EPOCH).total_seconds() * 1000)",
"def get_utc_now_timestamp() -> int:\n return int(datetime.datetime.now(datetime.timezone.utc).timestamp() * 1000)",
"def epoch(value):\n if isinstance(value, datetime.datetime):\n return int(calendar.timegm(value.timetuple())*1000)\n return '' #fails silently for non-datetime objects",
"def datetime_utc_epoch_start() -> datetime:\n return timestamp_to_datetime(0)",
"def getUnixTimeStamp():\n return calendar.timegm(datetime.utcnow().utctimetuple())",
"def timestampfromutc(utc):\n return (utc - datetime(1970, 1, 1)).total_seconds()",
"def get_utc_timestamp(utc_datetime=None):\n if utc_datetime is None:\n utc_datetime = get_now_utc()\n diff = utc_datetime - TIMESTAMP_0\n return int(diff.total_seconds() * 10**6)",
"def get_utc_timestamp(utc_datetime=None):\n if utc_datetime is None:\n utc_datetime = get_now_utc()\n diff = utc_datetime - TIMESTAMP_0\n return int(diff.total_seconds() * 10**6)",
"def EpochNano():\n return int(time.time() * 1000000000)",
"def unix_time_nanos(dt):\n return timedelta_to_micros(dt - epoch)",
"def epoch():\n\treturn time.time()",
"def timestamp(self):\n # this only returns second precision, which is why we don't use it\n #now = calendar.timegm(datetime.datetime.utcnow().utctimetuple())\n\n # this returns microsecond precision\n # http://bugs.python.org/msg180110\n epoch = datetime.datetime(1970, 1, 1)\n return (self - epoch).total_seconds()",
"def utctime(self) -> datetime:\n return datetime.utcfromtimestamp(float(self.ns_since_epoch) / 1e9)",
"def to_epoch(datetime_obj):\n if sys.version_info[0:2] < (3, 3):\n import calendar\n\n return (\n calendar.timegm(datetime_obj.timetuple())\n + datetime_obj.microsecond / 1000000\n )\n else:\n return datetime_obj.timestamp()",
"def datetime2epoch(dt):\n return int(mktime(dt.timetuple())*1000)",
"def curTimeMs():\n\treturn int((datetime.utcnow() - datetime(1970,1,1)).total_seconds() * 1000)",
"def getNowMilliseconds():\n return (datetime.datetime.utcnow() - Common.epoch_).total_seconds() * 1000.0",
"def epoch2datetime(t):\n return datetime.fromtimestamp(t/1000.0)",
"def microseconds_since_epoch(date_time, epoch=None):\n if not epoch:\n epoch = datetime.datetime.utcfromtimestamp(0)\n\n delta = date_time - epoch\n\n # 86400 is 24 * 60 * 60 e.g. total seconds in a day\n return delta.microseconds + (delta.seconds + delta.days * 86400) * 10**6",
"def get_current_unix_timestamp_ms():\r\n return int(datetime.timestamp(datetime.now())) * 1000",
"def local_timestamp():\n # type: () -> int\n now = datetime.utcnow()\n timestamp_in_seconds = calendar.timegm(now.timetuple()) + (now.microsecond / 1e6)\n timestamp_in_milliseconds = int(timestamp_in_seconds * 1000)\n return timestamp_in_milliseconds",
"def unixTimeMs(dateAndTime):\n dateAndTime = dateAndTime + datetime.timedelta(hours=HOUR_ADJUSTMENT)\n return int((dateAndTime - EPOCH).total_seconds() * 1000.0)"
] | [
"0.78353167",
"0.75055903",
"0.74338925",
"0.7330113",
"0.72694373",
"0.726492",
"0.7158576",
"0.7086221",
"0.69683",
"0.68982023",
"0.6857221",
"0.6855786",
"0.6811981",
"0.6789083",
"0.6776045",
"0.6776045",
"0.6746983",
"0.67409736",
"0.6735191",
"0.6717645",
"0.67158127",
"0.67012686",
"0.66958135",
"0.66678417",
"0.66326416",
"0.6620518",
"0.6596514",
"0.6574572",
"0.6568161",
"0.6559219"
] | 0.75269914 | 1 |
Returns True if val is Falsy, otherwise returns False | def is_empty(val):
return not bool(val) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def is_truthy(val):\n return bool(val)",
"def non_empty(val):\n return val is not None and val != \"\"",
"def _val_is_null(self, val):\r\n return val is None",
"def not_none(value):\n return not value is None",
"def is_false(value):\n \n return (value is False)",
"def empty(self, value):\r\n return not value",
"def is_null(val):\n return (val is None)",
"def empty(self, value):\r\n return value is None",
"def empty(self, value):\r\n return value is None",
"def empty(self, value):\r\n return value is None",
"def empty(self, value):\r\n return value is None",
"def empty(self, value):\r\n return value is None",
"def bool_(val):\n if isinstance(val, six.string_types) and val.lower() == 'false':\n return False\n return bool(val)",
"def empty(self, value):\n return not value",
"def has_value(var) :\n return var != None",
"def empty(self, value):\n return value is None",
"def is_empty_value(cls, value):\n return not value or cls.is_placeholder_value(value)",
"def valid_value_intbool(val):\n if val is not np.nan:\n return 1\n else:\n return 0",
"def not_(x):\n if bool(x):\n return False\n return True",
"def est_nul(self):\n\t\tif self.__valide:\n\t\t\treturn (self.degre() == 0) and (self.valuation().est_nul())\n\t\telse:\n\t\t\treturn False",
"def validate_value_flag(self):\n if not self.app.args.value is None or self.app.args.value == '':\n return True\n else:\n return False",
"def testIsNullFalse(self):\n val = is_null(\"False\") \n self.assertFalse(val)",
"def is_empty(self, value, context):\n return (value is None)",
"def is_null(value: Any) -> bool:\n return not value",
"def __bool__(self):\n # Do explicit cast to bool, as value can be a NumPy type, resulting in\n # an np.bool_ type for the expression (not allowed for __bool__)\n return bool(self.value != self.default_value)",
"def is_no_channel(val) -> bool:\n if isinstance(val, torch.Tensor):\n return bool(torch.isnan(val))\n if isinstance(val, str):\n return val == \"no_channel\"\n if np.isscalar(val):\n return bool(np.isnan(val))\n return val is None",
"def isValid(self, value):\n return value is None if self._onlyNullAllowed else value is not None",
"def testIsNullFalseAgain(self):\n val = is_null(5) \n self.assertFalse(val)",
"def is_empty(val):\n return val in [None, ''] or val.isspace()",
"def is_str_none_or_empty(val):\n if val is None:\n return True\n if isinstance(val, string_types):\n val = val.strip()\n if not val:\n return True\n return False"
] | [
"0.7918933",
"0.790834",
"0.742545",
"0.7357178",
"0.73020804",
"0.70956635",
"0.70306563",
"0.7005919",
"0.7005919",
"0.7005919",
"0.7005919",
"0.7005919",
"0.69641405",
"0.6962459",
"0.69548154",
"0.69414115",
"0.6874724",
"0.6852935",
"0.6849699",
"0.6779061",
"0.6711758",
"0.6685595",
"0.6670785",
"0.6654701",
"0.65700144",
"0.65477693",
"0.6542356",
"0.65407276",
"0.65275913",
"0.65139645"
] | 0.82153666 | 0 |
Return the food "Item" string with most calories | def get_food_most_calories(df=df):
return df[df.Calories == df.Calories.max()]["Item"].values[0] | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_food_most_calories(df=df):\r\n max_calories_row = df.loc[df['Calories'].idxmax()]\r\n return max_calories_row['Item']",
"def get_longest_item(self,items):\n # Assume longest is initially zero\n longest = 0\n for item in items:\n # get length of item name\n length = len(item[ITEM])\n if length > longest:\n longest = length\n return longest",
"def test_get_food_most_calories_smaller_population():\n df_breakfast = df[df['Category'] == 'Breakfast']\n\n actual = get_food_most_calories(df_breakfast)\n expected = 'Big Breakfast with Hotcakes (Large Biscuit)'\n assert actual == expected",
"def how_popular_most_popular(data):\r\n #list of artists\r\n artists = []\r\n for key in data:\r\n if key[-1] != \"$\":\r\n for x in data[key]:\r\n artists += [x]\r\n sorted_artists = selection_sort(artists)\r\n count = 1\r\n max_count = 1\r\n max_artists = []\r\n for i in range(len(sorted_artists)-1):\r\n #ends at second to last index because I use i and i + 1\r\n if sorted_artists[i] == sorted_artists[i+1]:\r\n count += 1\r\n else:\r\n if count == max_count:\r\n max_artists += [sorted_artists[i]]\r\n count = 1\r\n elif count > max_count:\r\n max_artists = []\r\n max_artists += [sorted_artists[i]]\r\n max_count = count\r\n count = 1\r\n return max_count",
"def most_powerful_weapon(self):\n # sets inital damge to 0\n max_damage = 0\n # sets the best weapon to nothing\n best_weapon = None\n # Loop for each item in inventory\n for item in self.inventory:\n # Code adapted from Make Your own Python Text Based Adventure\n # tries to see if the item damage is greator than the current max\n # damage and then replaces the best weapon in inventory\n try:\n if item.damage > max_damage:\n best_weapon = item\n max_damage = item.damage\n except AttributeError:\n pass\n # sends the best weapon to function\n return best_weapon",
"def highest_growth_rate():\r\n growth_mappings = {\r\n \"Organic Red Helles\": 0,\r\n \"Organic Pilsner\": 0,\r\n \"Organic Dunkel\": 0\r\n }\r\n rate_strings = []\r\n #Growth Rate of Organic Red Helles\r\n growth_rate = int(growth_calculations(\"Organic Red Helles\"))\r\n growth_mappings[\"Organic Red Helles\"] = growth_rate\r\n rate_strings.append(\"The current growth rate for Organic Red \" +\r\n \"Helles is: \" + str(growth_rate) +\r\n \" bottles per month\")\r\n #Growth Rate of Organic Pilsner\r\n growth_rate = int(growth_calculations(\"Organic Pilsner\"))\r\n growth_mappings[\"Organic Pilsner\"] = growth_rate\r\n rate_strings.append(\"The current growth rate for Organic \" +\r\n \"Pilsner is: \" + str(growth_rate) +\r\n \" bottles per month\")\r\n #Growth Rate of Organic Dunkel\r\n growth_rate = int(growth_calculations(\"Organic Dunkel\"))\r\n growth_mappings[\"Organic Dunkel\"] = growth_rate\r\n rate_strings.append(\"The current growth rate for Organic \" +\r\n \"Dunkel is: \" + str(growth_rate) +\r\n \" bottles per month\")\r\n #Finds largest key value in dictionary\r\n most_demand_recipe = max(growth_mappings, key=growth_mappings.get)\r\n most_demand_string = (\"Therefore the beer recipe with the highest growth\" +\r\n \" rate currently is: \" + most_demand_recipe)\r\n return rate_strings, most_demand_string",
"def format_popular_item(name: str, quantity: Decimal):\n return f\"{name} (QTY: {quantity})\"",
"def get_popular(self, max_items=None):\n data = [(x[1], x[0]) for x in self.iteritems()]\n data.sort(key=lambda x: (sys.maxint - x[0], x[1]))\n if max_items:\n return data[:max_items]\n else:\n return data",
"def calculate_most_popular(text, n_populars, steam=False):\n fdist = calculate_fdist(text, steam)\n term = []\n for key, value in fdist.items():\n term.append((key, value))\n term.sort(key=lambda x: int(x[1]), reverse=True)\n return term[:n_populars]",
"def get_most_popular_artists(data):\r\n #list of artists\r\n artists = []\r\n for key in data:\r\n if key[-1] != \"$\":\r\n for x in data[key]:\r\n artists += [x]\r\n sorted_artists = selection_sort(artists)\r\n count = 1\r\n max_count = 1\r\n max_artists = []\r\n for i in range(len(sorted_artists)-1):\r\n #ends at second to last index because I use i and i + 1\r\n if sorted_artists[i] == sorted_artists[i+1]:\r\n count += 1\r\n else:\r\n if count == max_count:\r\n max_artists += [sorted_artists[i]]\r\n count = 1\r\n elif count > max_count:\r\n max_artists = []\r\n max_artists += [sorted_artists[i]]\r\n max_count = count\r\n count = 1\r\n return max_artists",
"def get_movie_most_nominations(movies: list) -> str:\n pass",
"def most_read_book(self):\n reading_max = 0\n most_reads = \"\"\n for book in self.books.keys():\n rating = book.get_average_rating()\n if rating > reading_max:\n most_reads = book\n reading_max = rating\n else:\n continue\n return most_reads",
"def get_most_popular(self):\n\t\tpopular_rated = self.data_final[self.data_final['Rating'] == 10]\n\t\tpopular_jokes = popular_rated.groupby('JokeID').count().reset_index()\n\t\tpopular_jokes = popular_jokes[['JokeID','Rating']]\n\t\tpopular_jokes.columns = ['JokeID','Number_rated10']\n\t\ttop_joke = popular_jokes.sort_values(by=['Number_rated10'], ascending=False).head(1)\n\t\ttop_joke_val = top_joke['JokeID'].values[0]\n\t\tjokes_list = sorted(set(self.data_final['JokeID']))\n\t\tjoke_num = jokes_list.index(top_joke_val)\n\t\ttop_joke_desc = self.data_jokes[self.data_jokes['JokeID'] == top_joke_val].values[0][1]\n\n\t\treturn top_joke_desc, joke_num",
"def max_findings_per_item(self) -> float:\n return pulumi.get(self, \"max_findings_per_item\")",
"def summarize_food_data(unprocessed_food_list: List[str]) -> List[Dict[str, str]]:\n summary: List[Dict[str, str]] = []\n item_count_data: Dict[str, int] = {}\n\n for item in unprocessed_food_list:\n if item not in item_count_data:\n item_count_data[item] = 1\n else:\n item_count_data[item] += 1\n \n for product in item_count_data:\n item_information: Dict[str, str] = {}\n item_information[\"name\"] = product\n item_information[\"quantity\"] = str(item_count_data[product])\n item_information[\"units\"] = \"-\"\n summary.append(item_information)\n \n return summary",
"def most_reducible(wordlist):\n\n\t# We create a memo for reducible words since is_reducible is \n\t# recursive. The keys are the words and the values are the \n\t# number of characters\n\tglobal reducible_words\n\treducible_words = dict()\n\treducible_words['a'], reducible_words['i'] = 1, 1\n\t\n\tword_dict = to_dictionary(wordlist)\n\tfor line in word_dict:\n\t\tis_reducible(line, word_dict)\n\n\t# Varible that will search the memo for the longest word\n\tcurrent_greatest = ''\n\tfor word in reducible_words:\n\t\tif reducible_words[word] > len(current_greatest):\n\t\t\tcurrent_greatest = word\n\tprint(current_greatest)",
"def most_popular_gender(data):\n answer = \"\"\n genders = count_gender(data)\n if genders[0] == genders[1]:\n answer = \"Equal\"\n elif genders[0] > genders[1]:\n answer = \"Male\"\n else:\n answer = \"Female\"\n return answer",
"def get_top_words(input_string):\n # count the words\n top_words = Counter(input_string)\n # order the words in descending order\n top_words_ordered = sorted(top_words.items(), key=operator.itemgetter(1), reverse=True)\n # keep the top twenty elements\n top_twenty = top_words_ordered[0:20]\n print(top_twenty)\n return top_twenty",
"def display_get_most_played():\n title_game = reports.get_most_played(filename)\n print(\"Title of most played game in {}: {}\\n\".format(filename, title_game))",
"def top_three_letters(string):\n print(Counter(string))\n print(Counter(string).most_common(3))",
"def description(self):\n item_counts = [f'{i.quantity}x {i.item.name}' for i in self.items]\n return ','.join(item_counts)",
"def get_most_common(self, lst):\n data = Counter(lst)\n mc = data.most_common(2) \n #if len(mc) == 1 or (mc[0][1] != (mc[1][1])):\n # return mc[0][0]\n #return \"AMB\"\n return data.most_common(1)[0][0]",
"def counterFrequency(text):\n dictText = {}\n maxN = 0\n mostFrequent = \"\"\n for item in text:\n if (item not in dictText):\n dictText[item] = 1\n else: \n dictText[item] +=1\n \n if (dictText[item] > maxN):\n mostFrequent = item\n maxN = dictText[item]\n return mostFrequent",
"def largest_item(list):\n pass",
"def select_most_sold_products_from_personas_mannen():\n return sql_select(\"\"\"SELECT prodid, name,\n COUNT(*)\n FROM personas_mannen\n \n GROUP BY prodid ,name \n ORDER BY COUNT(*) DESC ; \"\"\")",
"def greedy(items, maxCost, keyFunction):\n result = []\n itemsCopy = sorted(items, key=keyFunction, reverse=True)\n totalValue , totalCalories = 0.0, 0.0\n for i in range(len(itemsCopy)):\n item = itemsCopy[i]\n if (totalCalories + item.getCalories()) <= maxCost:\n result.append(item)\n totalCalories += item.getCalories()\n totalValue += item.getValue()\n return result, totalValue",
"def most_popular_articles():\n print '1. The most popular articles are...'\n return (\"\"\"SELECT articles.title, COUNT(*) as num FROM articles, log\"\"\"\n \"\"\" WHERE SUBSTRING (log.path FROM 10) = articles.slug and\"\"\"\n \"\"\" log.path != '/' Group By articles.title ORDER By num\"\"\"\n \"\"\" DESC LIMIT 3;\"\"\")",
"def print_all_items_in_dict_for_human(all_items):\n # Find the length of the longest item.\n longest_item = 0\n for item in all_items:\n item_length = len(f\"{item}\")\n if item_length > longest_item:\n longest_item = item_length\n\n for item in sorted(all_items):\n print(f\"{item}\".rjust(longest_item) + f\": {all_items[item]}\")",
"def printing_food_and_calories(food_item_names: list, total_calories: int) -> None:\n avg_calories = total_calories / len(_calories)\n print(\"\\nFood Items:\", sorted(food_item_names))\n print(\"Total Calories:\", total_calories,\n \"Average Calories: %0.1f\\n\" % avg_calories)",
"def get_bodybuilder_friendly_foods(df=df, excl_drinks=False):\n if excl_drinks:\n fltr_excl_drinks = ~df[\"Category\"].isin([\"Coffee & Tea\", \"Beverages\"])\n else:\n fltr_excl_drinks = True\n df_nzc = df[(df.Calories != 0) & fltr_excl_drinks] # non zero calories\n fltr = (df_nzc.Protein / df_nzc.Calories).nlargest(5).index\n return df_nzc[df_nzc.index.isin(fltr)][\"Item\"].values[:]"
] | [
"0.75917083",
"0.6445081",
"0.64260274",
"0.6388405",
"0.62523913",
"0.61627793",
"0.6064445",
"0.6044672",
"0.60257804",
"0.592605",
"0.59001374",
"0.58869326",
"0.582938",
"0.58228385",
"0.5761905",
"0.5699086",
"0.56825083",
"0.5679168",
"0.56749237",
"0.5670001",
"0.5637825",
"0.5625574",
"0.5613467",
"0.5613123",
"0.5603153",
"0.5603137",
"0.55995584",
"0.5585085",
"0.5565481",
"0.55432427"
] | 0.75638163 | 1 |
Calulate the Protein/Calories ratio of foods and return the 5 foods with the best ratio. This function has a excl_drinks switch which, when turned on, should exclude 'Coffee & Tea' and 'Beverages' from this top 5. You will probably need to filter out foods with 0 calories to get the right results. Return a list of the top 5 foot Item stings. | def get_bodybuilder_friendly_foods(df=df, excl_drinks=False):
if excl_drinks:
fltr_excl_drinks = ~df["Category"].isin(["Coffee & Tea", "Beverages"])
else:
fltr_excl_drinks = True
df_nzc = df[(df.Calories != 0) & fltr_excl_drinks] # non zero calories
fltr = (df_nzc.Protein / df_nzc.Calories).nlargest(5).index
return df_nzc[df_nzc.index.isin(fltr)]["Item"].values[:] | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_bodybuilder_friendly_foods(df=df, excl_drinks=False):\r\n df_calories = df[df['Calories'] > 0]\r\n if excl_drinks:\r\n df_calories = df_calories[~df_calories['Category'].isin(['Beverages', 'Coffee & Tea'])]\r\n df_calories['Protein/Calories Ratio'] = df_calories['Protein']/df_calories['Calories']\r\n items = df_calories.sort_values('Protein/Calories Ratio',ascending=False).head(5)\r\n return [item for item in items['Item']]",
"def dp_all_td(foods, done_count, cal_left, pro_left, fat_left, carb_left):\n # using a tolerance of 50 cal/5 carb/fat/pro\n if cal_left < 50 and pro_left < 5 and fat_left < 5 and carb_left < 5:\n return {}\n\n # done going through all the foods\n if len(foods) <= done_count:\n return {0: 9999999}\n\n # need everything in ints for data struct\n cal_left = int(cal_left)\n pro_left = int(pro_left)\n fat_left = int(fat_left)\n carb_left = int(carb_left)\n\n # try to use solution already calculated\n try:\n foods_used_a = \\\n dp[(str(done_count + 1) + '-'\n + str(cal_left) + '-'\n + str(pro_left) + '-'\n + str(fat_left) + '-'\n + str(carb_left))]\n except KeyError:\n # calculate scenario where you don't use current food\n foods_used_a = dp_all_td(\n foods, done_count + 1, cal_left, pro_left, fat_left, carb_left)\n\n # if current food violates reqs, then don't bother calc-ing using it\n if ((cal_left - foods[done_count]['calories']) < -50\n or (pro_left - foods[done_count]['protein']) < -5\n or (fat_left - foods[done_count]['fat']) < -5\n or (carb_left - foods[done_count]['carbs']) < -5):\n return foods_used_a\n\n # try to use solution already calculated\n try:\n foods_used_b = \\\n dp[(str(done_count) + '-'\n + str(cal_left - foods[done_count]['calories']) + '-'\n + str(pro_left - foods[done_count]['protein']) + '-'\n + str(fat_left - foods[done_count]['fat']) + '-'\n + str(carb_left - foods[done_count]['carbs']))]\n except KeyError:\n # calculate for scenario where you use the current food\n foods_used_b = dp_all_td(\n foods,\n done_count,\n cal_left - foods[done_count]['calories'],\n pro_left - foods[done_count]['protein'],\n fat_left - foods[done_count]['fat'],\n carb_left - foods[done_count]['carbs'])\n try:\n foods_used_b[done_count] += 1\n except KeyError:\n foods_used_b[done_count] = 1\n\n # store cheapest, then return it\n if cost(foods, foods_used_a) > cost(foods, foods_used_b):\n dp[(str(done_count) + '-'\n + str(cal_left) + '-'\n + str(pro_left) + '-'\n + str(fat_left) + '-'\n + str(carb_left))] = foods_used_b\n return foods_used_b\n dp[(str(done_count) + '-'\n + str(cal_left) + '-'\n + str(pro_left) + '-'\n + str(fat_left) + '-'\n + str(carb_left))] = foods_used_a\n return foods_used_a",
"def dp_calories_only(foods, cal_goal):\n macros = init_one_d_array(cal_goal, 999999999)\n foods_used = init_one_d_array(cal_goal, {})\n for i in range(cal_goal):\n for j in range(len(foods)):\n food = foods[j]\n if int(food['calories']) <= i:\n if macros[i - int(food['calories'])] == 999999999:\n prev_cost = 0\n prev_foods_used = {}\n else:\n prev_cost = macros[i - int(food['calories'])]\n prev_foods_used = foods_used[i -\n int(food['calories'])].copy()\n if macros[i] > prev_cost + food['serving_cost']:\n macros[i] = prev_cost + food['serving_cost']\n try:\n prev_foods_used[j] += 1\n except KeyError:\n prev_foods_used[j] = 1\n foods_used[i] = prev_foods_used\n return foods_used[cal_goal - 1]",
"def brute_force_cal_and_pro_only(foods, done_count, cal_left, pro_left):\n if cal_left < 50 and pro_left < 5: # using a tolerance of 50 cal/5 pro\n return {}\n\n if len(foods) <= done_count: # done going through all the foods\n return {0: 999999} # this sucks, try to fix it\n\n # calculate for scenario where you ignore the current food and don't use it\n foods_used_a = brute_force_cal_and_pro_only(\n foods, done_count + 1, cal_left, pro_left)\n # calculate for scenario where you use the current food\n if ((cal_left - foods[done_count]['calories']) < -50\n or (pro_left - foods[done_count]['protein']) < -15):\n return foods_used_a\n foods_used_b = brute_force_cal_and_pro_only(\n foods, done_count, cal_left - foods[done_count]['calories'],\n pro_left - foods[done_count]['protein'])\n try:\n foods_used_b[done_count] += 1\n except KeyError:\n foods_used_b[done_count] = 1\n\n if cost(foods, foods_used_a) > cost(foods, foods_used_b):\n return foods_used_b\n return foods_used_a",
"def dp_all(foods, cal_goal, pro_goal, carb_goal, fat_goal):\n costs = init_four_d_array((cal_goal, pro_goal, carb_goal, fat_goal),\n 999999999)\n foods_used = init_four_d_array((cal_goal, pro_goal, carb_goal, fat_goal),\n {})\n\n for i in range(cal_goal):\n for j in range(pro_goal):\n for k in range(carb_goal):\n for l in range(fat_goal):\n for n in range(len(foods)):\n food = foods[n]\n if (int(food['calories']) > i\n or int(food['protein']) > j\n or int(food['carbs']) > k\n or int(food['fat']) > l):\n continue\n if (costs[i - int(food['calories'])]\n [j - int(food['protein'])]\n [k - int(food['carbs'])]\n [l - int(food['fat'])]\n == 999999999):\n prev_cost = 0\n prev_foods_used = {}\n else:\n prev_cost = (macros[i - int(food['calories'])]\n [j - int(food['protein'])]\n [j - int(food['carbs'])]\n [j - int(food['fat'])])\n prev_foods_used = \\\n (foods_used[i - int(food['calories'])]\n [j - int(food['protein'])]\n [k - int(food['carbs'])]\n [l - int(food['fat'])]).copy()\n new_cal = calories(\n foods, prev_foods_used) + food['calories']\n new_pro = protein(\n foods, prev_foods_used) + food['protein']\n new_car = carbs(\n foods, prev_foods_used) + food['protein']\n new_fat = fat(\n foods, prev_foods_used) + food['protein']\n if (costs[i][j] > prev_cost + food['serving_cost']\n and new_cal > i - 20 and new_cal < i + 10\n and new_pro < j + 5 and new_pro < j + 5\n and new_car < j + 5 and new_car < j + 5\n and new_fat < j + 5 and new_fat < j + 5):\n costs[i][j][k][l] = prev_cost + \\\n food['serving_cost']\n try:\n prev_foods_used[n] += 1\n except KeyError:\n prev_foods_used[n] = 1\n foods_used[i][j][k][l] = prev_foods_used\n return foods_used[cal_goal - 1][pro_goal - 1][carb_goal - 1][fat_goal - 1]",
"def brute_force_calories_only(foods, done_count, calories_left):\n if calories_left < 50: # using a tolerance of 50 calories\n return {}\n\n if len(foods) <= done_count: # done going through all the foods\n return {0: 999999}\n\n # calculate for scenario where you ignore the current food and don't use it\n foods_used_a = brute_force_calories_only(\n foods, done_count + 1, calories_left)\n # calculate for scenario where you use the current food\n if (calories_left - foods[done_count]['calories']) < -50:\n return foods_used_a\n foods_used_b = brute_force_calories_only(\n foods, done_count, calories_left - foods[done_count]['calories'])\n try:\n foods_used_b[done_count] += 1\n except KeyError:\n foods_used_b[done_count] = 1\n\n if cost(foods, foods_used_a) > cost(foods, foods_used_b):\n return foods_used_b\n return foods_used_a",
"def calories(foods, foods_used):\n calories = 0.0\n for i, count in foods_used.items():\n calories += (foods[i]['calories'] * count)\n return calories",
"def brute_force_all(foods, done_count, cal_left, pro_left, fat_left, carb_left):\n # using a tolerance of 50 cal/5 carb/fat/pro\n if cal_left < 50 and pro_left < 5 and fat_left < 5 and carb_left < 5:\n return {}\n\n if len(foods) <= done_count: # done going through all the foods\n return {0: 9999999}\n\n # calculate scenario where you don't use the current food\n foods_used_a = brute_force_all(\n foods, done_count + 1, cal_left, pro_left, fat_left, carb_left)\n\n # if current food violates reqs, then don't bother calculating for it\n if ((cal_left - foods[done_count]['calories']) < -50\n or (pro_left - foods[done_count]['protein']) < -5\n or (fat_left - foods[done_count]['fat']) < -5\n or (carb_left - foods[done_count]['carbs']) < -5):\n return foods_used_a\n\n # calculate scenario where you use the current food\n foods_used_b = brute_force_all(\n foods, done_count, cal_left - foods[done_count]['calories'],\n pro_left - foods[done_count]['protein'],\n fat_left - foods[done_count]['fat'],\n carb_left - foods[done_count]['carbs'])\n try:\n foods_used_b[done_count] += 1\n except KeyError:\n foods_used_b[done_count] = 1\n\n if len(foods_used_b) == 0:\n return foods_used_a\n\n # calculate cheapest and return\n if cost(foods, foods_used_a) > cost(foods, foods_used_b):\n return foods_used_b\n return foods_used_a",
"def get_food_most_calories(df=df):\n return df[df.Calories == df.Calories.max()][\"Item\"].values[0]",
"def fat(foods, foods_used):\n fat = 0.0\n for i, count in foods_used.items():\n fat += (foods[i]['fat'] * count)\n return fat",
"def _get_discount(self):\n\n # For every 2 PENS, one free discount\n number_of_pens = len([x for x in self._products if x.code == 'PEN'])\n discount = 5.0 * int(number_of_pens / 2)\n\n # If there are more than 3 T-Shirts in the basket, 5 EUR of discount in every of them (25%)\n number_of_tshirts = len([x for x in self._products if x.code == 'TSHIRT'])\n if number_of_tshirts >= 3:\n discount += 5.0 * number_of_tshirts\n\n return discount",
"def printing_food_and_calories(food_item_names: list, total_calories: int) -> None:\n avg_calories = total_calories / len(_calories)\n print(\"\\nFood Items:\", sorted(food_item_names))\n print(\"Total Calories:\", total_calories,\n \"Average Calories: %0.1f\\n\" % avg_calories)",
"def get_food_most_calories(df=df):\r\n max_calories_row = df.loc[df['Calories'].idxmax()]\r\n return max_calories_row['Item']",
"def carbs(foods, foods_used):\n carbs = 0.0\n for i, count in foods_used.items():\n carbs += (foods[i]['carbs'] * count)\n return carbs",
"def dp_cal_and_pro_only(foods, cal_goal, pro_goal):\n macros = init_two_d_array((cal_goal, pro_goal), 999999999)\n foods_used = init_two_d_array((cal_goal, pro_goal), {})\n\n for i in range(cal_goal):\n for j in range(pro_goal):\n for n in range(len(foods)):\n food = foods[n]\n if (int(food['calories']) > i and int(food['protein']) > j):\n continue\n if (macros[i - int(food['calories'])]\n [j - int(food['protein'])]\n == 999999999):\n prev_cost = 0\n prev_foods_used = {}\n else:\n prev_cost = (macros[i - int(food['calories'])]\n [j - int(food['protein'])])\n prev_foods_used = \\\n (foods_used[i - int(food['calories'])]\n [j - int(food['protein'])]).copy()\n new_cal = calories(foods, prev_foods_used) + food['calories']\n new_pro = protein(foods, prev_foods_used) + food['protein']\n if (macros[i][j] > prev_cost + food['serving_cost']\n and new_cal > i - 50 and new_cal < i + 10\n and new_pro > j - 5 and new_pro < j + 5):\n macros[i][j] = prev_cost + food['serving_cost']\n try:\n prev_foods_used[n] += 1\n except KeyError:\n prev_foods_used[n] = 1\n foods_used[i][j] = prev_foods_used\n return foods_used[cal_goal - 1][pro_goal - 1]",
"def computeMealFrecuency(self):\n self.getOrdersData()\n self.getOrderValues()\n meals = set(self.meals)\n for meal in meals:\n self.labels.append(meal)\n self.quantity.append(self.meals.count(meal))",
"def greedy(items, maxCost, keyFunction):\n result = []\n itemsCopy = sorted(items, key=keyFunction, reverse=True)\n totalValue , totalCalories = 0.0, 0.0\n for i in range(len(itemsCopy)):\n item = itemsCopy[i]\n if (totalCalories + item.getCalories()) <= maxCost:\n result.append(item)\n totalCalories += item.getCalories()\n totalValue += item.getValue()\n return result, totalValue",
"def calculate_score(dice):\n # version_1\n\n if len(dice) > 6:\n raise Exception(\"Cheating Cheater!\")\n\n counts = Counter(dice)\n\n if len(counts) == 6:\n return 1500\n\n if len(counts) == 3 and all(val == 2 for val in counts.values()):\n return 1500\n\n score = 0\n\n ones_used = fives_used = False\n\n for num in range(1, 6 + 1):\n\n pip_count = counts[num]\n\n if pip_count >= 3:\n\n if num == 1:\n\n ones_used = True\n\n elif num == 5:\n\n fives_used = True\n\n score += num * 100\n\n # handle 4,5,6 of a kind\n pips_beyond_3 = pip_count - 3\n\n score += score * pips_beyond_3\n\n # bug if 2 threesomes? Let's test it\n\n # 1s are worth 10x\n if num == 1:\n score *= 10\n\n if not ones_used:\n score += counts.get(1, 0) * 100\n\n if not fives_used:\n score += counts.get(5, 0) * 50\n\n return score",
"def fat(self) -> List[RecipeObjectNutrientsCalories]:\n return self._fat",
"def test_get_food_most_calories_smaller_population():\n df_breakfast = df[df['Category'] == 'Breakfast']\n\n actual = get_food_most_calories(df_breakfast)\n expected = 'Big Breakfast with Hotcakes (Large Biscuit)'\n assert actual == expected",
"def cost(foods, foods_used):\n cost = 0.00\n for i, count in foods_used.items():\n cost += (foods[i]['serving_cost'] * count)\n return cost",
"def get_cream(list_of_things):\n\n return int(len(list_of_things) * 0.2)",
"def carnivore_eats(self):\n self.order_by_fitness()\n for carn in self.fauna_list['Carnivore']:\n food_required = carn.parameters['F']\n amount_to_eat = 0\n not_eaten_animals = []\n for i, herb in enumerate(self.fauna_list['Herbivore']):\n if food_required <= amount_to_eat:\n not_eaten_animals.extend(self.fauna_list['Herbivore'][i:])\n break\n elif np.random.random() < carn.probability_of_kill(herb):\n if food_required - amount_to_eat < herb.weight:\n amount_to_eat += herb.weight\n elif food_required - amount_to_eat > herb.weight:\n amount_to_eat += food_required - amount_to_eat\n else:\n not_eaten_animals.append(herb)\n carn.animal_eats(amount_to_eat)\n self.fauna_list['Herbivore'] = not_eaten_animals",
"def getDangerousFoodGoal(self, gameState):\n food = self.dangerousFood\n # print(food)\n myPos = self.getCurrentObservation().getAgentState(self.index).getPosition()\n if len(food) > 0:\n dis = 9999\n nearestFood = food[0]\n for a in food:\n temp = self.getMazeDistance(myPos, a)\n if temp < dis:\n dis = temp\n nearestFood = a\n return nearestFood, dis\n else:\n return None, None",
"def get_sharpe_ratio(allocs, prices):\n\tport_val = get_portfolio_value(prices, allocs, start_val=1.0)\n\tsharpe_ratio = get_portfolio_stats(port_val, daily_rf=0.0, samples_per_year=252)[3]\n\treturn -sharpe_ratio",
"def pr_dominant_offpring(offspring_zygosity):\n\n homozygous_dominant, heterozygous, homozygous_recessive = offspring_zygosity\n\n total = homozygous_dominant + heterozygous + homozygous_recessive\n dominant = homozygous_dominant + heterozygous\n\n pr_dominant = dominant / total\n\n return pr_dominant",
"def chosen_items(sack, items, weight):\n total = total_profit(sack, items, weight)\n chosen = []\n \n while total != 0:\n for i in range(items + 1):\n if total in sack[i]:\n chosen.append(i) \n total = total - profit[i - 1] \n break \n \n return sorted(chosen)",
"def hunt_outcomes(self, food_earnings):\n\n their_actions = determine_opponent_decisions(food_earnings)\n # TODO determine how my reputation has influenced their actions",
"def calculate_amount_payable_rule_five(self, total):\n return self.amount_raised * Decimal(0.95)",
"def protein(foods, foods_used):\n protein = 0.0\n for i, count in foods_used.items():\n protein += (foods[i]['protein'] * count)\n return protein"
] | [
"0.74542266",
"0.59881717",
"0.59221107",
"0.5832556",
"0.579231",
"0.57728446",
"0.5715874",
"0.5698817",
"0.5600146",
"0.5584469",
"0.55476344",
"0.54607743",
"0.5436297",
"0.5423807",
"0.5420557",
"0.5348513",
"0.5281259",
"0.5276994",
"0.52353954",
"0.5232096",
"0.52217996",
"0.5162562",
"0.51434785",
"0.5130235",
"0.5125579",
"0.510896",
"0.5105543",
"0.50591177",
"0.50471175",
"0.5033147"
] | 0.7044193 | 1 |
Hydrodynamic added mass matrix of a vertical cylinder | def cylindervert_addedmass(R, z1, z2, rho, Ca=1, AxCa=1,
m_f=0, z_f=0, m_mg=0, z_mg=0):
if z1<z2:
raise Exception('z1 should be above z2')
if z1<0:
# Fully submerged
ztop = z1
A0=0
nAx=2
else:
# Partially submerged
ztop = 0
A0 = np.pi*R**2 # undisplaced waterplane area of platform (m^2)
nAx=1
h = ztop-z2 # submerged height
z_b = (ztop+z2)/2 # coordinates of the center of buoyancy of the undisplaced platform (m)
V0 = np.pi*R**2*h # undisplaced volume of platform (m^3)
M=np.zeros((6,6))
M[0,0] = Ca * rho*V0
M[1,1] = Ca * rho*V0
M[2,2] = nAx*AxCa * 2/3*rho*np.pi * R**3 # rho*V0* D/(3*h)
M[4,0] = M[0,0]*z_b # TODO empirical
M[3,1] = -M[0,0]*z_b # TODO empirical
M[0,4] = M[0,0]*z_b # TODO empirical
M[1,3] = -M[0,0]*z_b # TODO empirical
T1 =Ca*rho*np.pi*R**2 * h**3 /3 # Ca * rho*V0 * h**2/3 # TODO a bit empirical
M[3,3] = T1
M[4,4] = T1
return M | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def create_mass_matrix(self, model):\n # Create list of mass matrices for each equation to be put into block\n # diagonal mass matrix for the model\n mass_list = []\n mass_inv_list = []\n\n # get a list of model rhs variables that are sorted according to\n # where they are in the state vector\n model_variables = model.rhs.keys()\n model_slices = []\n for v in model_variables:\n model_slices.append(self.y_slices[v][0])\n sorted_model_variables = [\n v for _, v in sorted(zip(model_slices, model_variables))\n ]\n\n # Process mass matrices for the differential equations\n for var in sorted_model_variables:\n if var.domain == []:\n # If variable domain empty then mass matrix is just 1\n mass_list.append(1.0)\n mass_inv_list.append(1.0)\n else:\n mass = (\n self.spatial_methods[var.domain[0]]\n .mass_matrix(var, self.bcs)\n .entries\n )\n mass_list.append(mass)\n if isinstance(\n self.spatial_methods[var.domain[0]],\n (pybamm.ZeroDimensionalSpatialMethod, pybamm.FiniteVolume),\n ):\n # for 0D methods the mass matrix is just a scalar 1 and for\n # finite volumes the mass matrix is identity, so no need to\n # compute the inverse\n mass_inv_list.append(mass)\n else:\n # inverse is more efficient in csc format\n mass_inv = inv(csc_matrix(mass))\n mass_inv_list.append(mass_inv)\n\n # Create lumped mass matrix (of zeros) of the correct shape for the\n # discretised algebraic equations\n if model.algebraic.keys():\n mass_algebraic_size = model.concatenated_algebraic.shape[0]\n mass_algebraic = csr_matrix((mass_algebraic_size, mass_algebraic_size))\n mass_list.append(mass_algebraic)\n\n # Create block diagonal (sparse) mass matrix (if model is not empty)\n # and inverse (if model has odes)\n if len(model.rhs) + len(model.algebraic) > 0:\n mass_matrix = pybamm.Matrix(block_diag(mass_list, format=\"csr\"))\n if len(model.rhs) > 0:\n mass_matrix_inv = pybamm.Matrix(block_diag(mass_inv_list, format=\"csr\"))\n else:\n mass_matrix_inv = None\n else:\n mass_matrix, mass_matrix_inv = None, None\n\n return mass_matrix, mass_matrix_inv",
"def added_mass(w, z1, z2, diameter, Cm=2):\n\n config = {\n 'end1': [0, 0, z1],\n 'end2': [0, 0, z2],\n 'diameter': diameter,\n 'strip width': 1.0,\n }\n Morison_model = ViscousDragModel({\n 'inertia coefficient': Cm,\n 'members': [config],\n })\n A1 = Morison_model.Morison_added_mass()\n A = np.tile(A1, (len(w), 1, 1))\n return LinearSystem(A, zeros_like(A), zeros_like(A))",
"def Mass_Matrix(self):\n self.mass_matrix = np.empty((self.N,self.N))\n for n1 in range(self.N):\n for n2 in range(self.N):\n self.mass_matrix[n1,n2] = integrate.quad(lambda x:self.basis[n1](x)*self.basis[n2](x),-1,1)[0]",
"def mounting_matrix(self):\n # fmt: off\n count = 0\n for x in range(self.ntheta):\n self.M[count][count] = 1\n self.f[count][0] = self.p_in\n count = count + self.nz - 1\n self.M[count][count] = 1\n self.f[count][0] = self.p_out\n count = count + 1\n count = 0\n for x in range(self.nz - 2):\n self.M[self.ntotal - self.nz + 1 + count][1 + count] = 1\n self.M[self.ntotal - self.nz + 1 + count][self.ntotal - self.nz + 1 + count] = -1\n count = count + 1\n count = 1\n j = 0\n for i in range(1, self.nz - 1):\n a = (1 / self.dtheta ** 2) * (self.c1[i][self.ntheta - 1])\n self.M[count][self.ntotal - 2 * self.nz + count] = a\n b = (1 / self.dz ** 2) * (self.c2[i - 1, j])\n self.M[count][count - 1] = b\n c = -((1 / self.dtheta ** 2) * ((self.c1[i][j]) + self.c1[i][self.ntheta - 1])\n + (1 / self.dz ** 2) * (self.c2[i][j] + self.c2[i - 1][j]))\n self.M[count, count] = c\n d = (1 / self.dz ** 2) * (self.c2[i][j])\n self.M[count][count + 1] = d\n e = (1 / self.dtheta ** 2) * (self.c1[i][j])\n self.M[count][count + self.nz] = e\n count = count + 1\n count = self.nz + 1\n for j in range(1, self.ntheta - 1):\n for i in range(1, self.nz - 1):\n a = (1 / self.dtheta ** 2) * (self.c1[i, j - 1])\n self.M[count][count - self.nz] = a\n b = (1 / self.dz ** 2) * (self.c2[i - 1][j])\n self.M[count][count - 1] = b\n c = -((1 / self.dtheta ** 2) * ((self.c1[i][j]) + self.c1[i][j - 1])\n + (1 / self.dz ** 2) * (self.c2[i][j] + self.c2[i - 1][j]))\n self.M[count, count] = c\n d = (1 / self.dz ** 2) * (self.c2[i][j])\n self.M[count][count + 1] = d\n e = (1 / self.dtheta ** 2) * (self.c1[i][j])\n self.M[count][count + self.nz] = e\n count = count + 1\n count = count + 2\n count = 1\n for j in range(self.ntheta - 1):\n for i in range(1, self.nz - 1):\n if j == 0:\n self.f[count][0] = (self.c0w[i][j] - self.c0w[i][self.ntheta - 1]) / self.dtheta\n else:\n self.f[count][0] = (self.c0w[i, j] - self.c0w[i, j - 1]) / self.dtheta\n count = count + 1\n count = count + 2\n # fmt: on",
"def Morison_added_mass(w, draft, radius, Cm=2.0):\n from whales.viscous_drag import ViscousDragModel\n Morison_model = ViscousDragModel({\n 'inertia coefficient': Cm,\n 'members': [{\n 'end1': [0, 0, 0],\n 'end2': [0, 0, -draft],\n 'diameter': 2*radius,\n 'strip width': 1.0,\n }]\n })\n A1 = Morison_model.Morison_added_mass()\n A = np.tile(A1, (len(w), 1, 1))\n return LinearSystem(A, zeros_like(A), zeros_like(A))",
"def total_electronic_hamiltonian(self):\n return block_diag(*[self.electronic_manifold(n) for n in range(3)])",
"def get_effective_mass():\n\n H_BAR = 6.582119514e-16 # eV*s\n M_0 = 9.10938356e-31 # kg\n N_KPTS = 6 # Number of k-points included in the parabola.\n\n spin_up = Spin(1)\n\n band_structure = Vasprun('vasprun.xml').get_band_structure()\n\n # Locations of CBM and VBM in band_structure.bands\n cbm_band_index = band_structure.get_cbm()['band_index'][spin_up][0]\n cbm_kpoint_index = band_structure.get_cbm()['kpoint_index'][0]\n\n vbm_band_index = band_structure.get_vbm()['band_index'][spin_up][0]\n vbm_kpoint_index = band_structure.get_vbm()['kpoint_index'][0]\n\n k = {'electron': {'left': [], 'right': []},\n 'hole': {'left': [], 'right': []}}\n E = {'electron': {'left': [], 'right': []},\n 'hole': {'left': [], 'right': []}}\n\n e_ref_coords = band_structure.kpoints[cbm_kpoint_index]._ccoords\n h_ref_coords = band_structure.kpoints[vbm_kpoint_index]._ccoords\n\n for n in range(-N_KPTS, 1):\n e_coords = band_structure.kpoints[cbm_kpoint_index + n]._ccoords\n h_coords = band_structure.kpoints[vbm_kpoint_index + n]._ccoords\n\n k['electron']['left'].append(\n ((e_coords[0] - e_ref_coords[0])**2 +\n (e_coords[1] - e_ref_coords[1])**2 +\n (e_coords[2] - e_ref_coords[2])**2)**0.5\n )\n k['hole']['left'].append(\n ((h_coords[0] - h_ref_coords[0])**2 +\n (h_coords[1] - h_ref_coords[1])**2 +\n (h_coords[2] - h_ref_coords[2])**2)**0.5\n )\n\n e_energy = band_structure.bands[\n spin_up][cbm_band_index][cbm_kpoint_index + n]\n h_energy = band_structure.bands[\n spin_up][vbm_band_index][vbm_kpoint_index + n]\n\n E['electron']['left'].append(e_energy)\n E['hole']['left'].append(h_energy)\n\n for n in range(1, 1 + N_KPTS):\n e_coords = band_structure.kpoints[cbm_kpoint_index + n]._ccoords\n h_coords = band_structure.kpoints[vbm_kpoint_index + n]._ccoords\n\n k['electron']['right'].append(\n ((e_coords[0] - e_ref_coords[0])**2 +\n (e_coords[1] - e_ref_coords[1])**2 +\n (e_coords[2] - e_ref_coords[2])**2)**0.5\n )\n k['hole']['right'].append(\n ((h_coords[0] - h_ref_coords[0])**2 +\n (h_coords[1] - h_ref_coords[1])**2 +\n (h_coords[2] - h_ref_coords[2])**2)**0.5\n )\n\n e_energy = band_structure.bands[\n spin_up][cbm_band_index][cbm_kpoint_index + n]\n h_energy = band_structure.bands[\n spin_up][vbm_band_index][vbm_kpoint_index + n]\n\n E['electron']['right'].append(e_energy)\n E['hole']['right'].append(h_energy)\n\n # 2nd order fits\n e_l_fit = np.poly1d(\n np.polyfit(k['electron']['left'], E['electron']['left'], 2))\n e_r_fit = np.poly1d(\n np.polyfit(k['electron']['right'], E['electron']['right'], 2))\n h_l_fit = np.poly1d(\n np.polyfit(k['hole']['left'], E['hole']['left'], 2))\n h_r_fit = np.poly1d(\n np.polyfit(k['hole']['right'], E['hole']['right'], 2))\n\n # Curvatures\n e_l_curvature = e_l_fit.deriv().deriv()[0]\n e_r_curvature = e_r_fit.deriv().deriv()[0]\n h_l_curvature = h_l_fit.deriv().deriv()[0]\n h_r_curvature = h_r_fit.deriv().deriv()[0]\n\n # Unit conversion\n e_m_eff_l = 10 * ((H_BAR ** 2) / e_l_curvature) / M_0\n e_m_eff_r = 10 * ((H_BAR ** 2) / e_r_curvature) / M_0\n h_m_eff_l = -10 * ((H_BAR ** 2) / h_l_curvature) / M_0\n h_m_eff_r = -10 * ((H_BAR ** 2) / h_r_curvature) / M_0\n\n return {'electron': {'left': e_m_eff_l, 'right': e_m_eff_r},\n 'hole': {'left': h_m_eff_l, 'right': h_m_eff_r}}",
"def dhMatrix(self):\n row1 = np.array([np.cos(self.theta), -np.sin(self.theta)*np.cos(self.alpha), np.sin(self.theta)*np.sin(self.alpha), self.a*np.cos(self.theta)])\n row2 = np.array([np.sin(self.theta), np.cos(self.theta)*np.cos(self.alpha), -np.cos(self.theta)*np.sin(self.alpha), self.a*np.sin(self.theta)])\n row3 = np.array([0.0, np.sin(self.alpha), np.cos(self.alpha), self.d])\n row4 = np.array([0.0, 0.0, 0.0, 1.0])\n T = np.array([row1, row2, row3, row4])\n return T",
"def makecylinder(model=[0,0,0,1,0,0,1],height = 1,density=10):\n # extract info from cylinder model\n radius = model[6]\n X,Y,Z = model[:3]\n # get 3d points to make an upright cylinder centered to the origin\n n = np.arange(0,360,int(360/density))\n height = np.arange(0,height,height/density)\n n = np.deg2rad(n)\n x,z = np.meshgrid(n,height)\n x = x.flatten()\n z = z.flatten()\n cyl = np.vstack([np.cos(x)*radius,np.sin(x)*radius,z]).T\n # rotate and translate the cylinder to fit the model\n rotation = rotation_matrix_from_vectors([0,0,1],model[3:6])\n rotated_cylinder = np.matmul(rotation,cyl.T).T + np.array([X,Y,Z])\n return rotated_cylinder",
"def htm(rotation_matrix, displacement_vector):\n htm_matrix = np.append(rotation_matrix, displacement_vector, axis=1)\n htm_matrix = np.append(htm_matrix, [[0, 0, 0, 1]], axis=0)\n return htm_matrix",
"def designMatrix(self,x,m):\n\n phi = []\n\n for i in x:\n matric = []\n for j in range(0, m + 1):\n matric.append(np.power(i,j))\n phi.append(matric)\n return np.asarray(phi)",
"def form_matrix_yt(w):\r\n M = np.zeros((len(w),len(w)))\r\n for i in range(len(w)):\r\n for j in range(len(w)):\r\n M[i,j] = YoungTableaux(w[i],w[j]).CMNR()\r\n return M",
"def calc_matrix(self, attribute=False, basis=None):\n matrix = mgfns.dynamical_matrix_magnetic_gyros(self, basis=basis)\n return matrix",
"def update_H(self):\n self.grid.H[self.loc] -= (\n self.grid.courant_number\n * self.grid.inverse_permeability[self.loc]\n * self.phi_H\n )",
"def template_cylinder_annulus(height, outer_radius, inner_radius=0):\n\n img = _template_sphere_disc(dim=2, outer_radius=outer_radius,\n inner_radius=inner_radius)\n img = np.tile(np.atleast_3d(img), reps=height)\n return img",
"def material_matrix(self):\n out = Tmatrix()\n out.translate(Vector([.5, .5, .5]))\n out.scale(Vector([self.radius, self.radius, self.radius]) *\n (.5 / (self.radius + self.thickness)))\n return out",
"def origami_H2_2cyl(w1,h1,t1,w2,h2,t2):\n assert((w2 < w1) and (t1 < w1) and (t2 < w2))\n\n # v for volumes and z for z\n v1 = h1*w1\n v2 = h2*w2\n z1 = (h1-1)*w1 + 1\n z2 = v1 + (h2-1)*w2 + 1\n\n # the horizontal permutation\n x = [None] + range(2,v1+v2+1) + [1]\n for i in range(h1):\n x[(i+1)*w1] = i*w1 + 1\n for i in range(h2):\n x[v1 + (i+1)*w2] = v1 + i*w2 + 1\n\n # the vertical permutation\n y = ([None] +\n range(w1+1,v1+1) + [None]*w1 +\n range(v1+w2+1,v1+v2+1) + [None]*w2)\n\n for i in range(w2):\n # up-left of the first cylinder\n # print \"U1L) z1 + (t1+i)%w1 -> 1+v1+i: \", z1+(t1+i)%w1, 1+v1+i\n y[z1+(t1+i)%w1] = 1+v1+i\n for i in range(w2):\n # up of the second cylinder\n # print \"U2) z2+(t2+i)%w2 -> 1 + (t1+i)%w1: \", z2+(t2+i)%w2, 1+(t1+i)%w1\n y[z2+(t2+i)%w2] = 1+i\n for i in range(w1-w2):\n # up-right of the first cylinder\n # print \"U1R) z1+w2+(t1+i) -> 1+i: \", z1+(w2+t1+i)%w1, 1+w2+i\n y[z1+(w2+t1+i)%w1] = 1+w2+i\n\n return Origami(x[1:],y[1:])",
"def _matvec(self, x):\n \n x = x.reshape((self.NH,))\n #\n # Compute kinetic energy operator\n #\n tx = self.KEO @ x \n \n # \n # Compute potential energy operator\n #\n xquad = self.basis.fbrToQuad(x,axis = 0) # xquad has shape (Nq,)\n vx = self.basis.quadToFbr(self.V * xquad) # vx has shape (NH,)\n \n return tx + vx",
"def hamiltonian_2nu_matter(h_vacuum_energy_independent, energy, VCC):\n h_matter = cp.deepcopy(h_vacuum_energy_independent)\n h_matter = np.multiply(1.0/energy, h_matter)\n\n # Add the matter potential to the ee term to find the matter\n # Hamiltonian\n h_matter[0][0] += VCC\n\n return h_matter",
"def mass_tot_rho(self):\n\n dm = np.zeros(self.nzon)\n dm[0] = 4. * np.pi / 3. * (self.r[0] ** 3 - self.r_cen ** 3) * self.rho[0]\n for i in range(1, self.nzon):\n dm[i] = 4. / 3. * np.pi * (self.r[i] ** 3 - self.r[i - 1] ** 3) * self.rho[i]\n # print(f' M_tot(Density) = {np.sum(dm)/phys.M_sun:.3f}')\n return np.sum(dm)",
"def eclogite_massive():\n\n rho = 3490.\n\n C = np.zeros((6,6), dtype=float)\n C[0,0] = 238.85; C[0,1] = 82.01; C[0,2] = 81.44; C[0,3] = 0.3; C[0,4] = -0.02; C[0,5] = 0.5\n C[1,0] = C[0,1]; C[1,1] = 242.12; C[1,2] = 81.11; C[1,3] = -0.66; C[1,4] = 0.33; C[1,5] = 0.12\n C[2,0] = C[0,2]; C[2,1] = C[1,2]; C[2,2] = 235.57; C[2,3] = -0.28; C[2,4] = 0.22; C[2,5] = 0.31\n C[3,0] = C[0,3]; C[3,1] = C[1,3]; C[3,2] = C[2,3]; C[3,3] = 78.72; C[3,4] = 0.27; C[3,5] = 0.\n C[4,0] = C[0,4]; C[4,1] = C[1,4]; C[4,2] = C[2,4]; C[4,3] = C[3,4]; C[4,4] = 78.37; C[4,5] = 0.25\n C[5,0] = C[0,5]; C[5,1] = C[1,5]; C[5,2] = C[2,5]; C[5,3] = C[3,5]; C[5,4] = C[4,5]; C[5,5] = 77.91\n\n return C, rho",
"def tapered_cylinder_prop_MG(R1, R2, Rmg1, Rmg2, L, rho):\r\n # get V and CV for element\r\n Vinner, cVinner = tapered_cylinder_geom(R1, R2, L)\r\n # get V and CV for marine growth displacement\r\n Vouter, cVouter = tapered_cylinder_geom(Rmg1, Rmg2, L)\r\n # get mass and CV specific to marine growth thickness\r\n m_mg = (Vouter - Vinner)*rho\r\n if m_mg==0:\r\n h_c = 0.0\r\n else:\r\n h_c = (cVouter*Vouter - Vinner*cVinner)/(Vouter - Vinner)\r\n # get two moments of inertia for marine growth as if solid...\r\n Ilouter, Irouter = tapered_cylinder_inertia(Rmg1, Rmg2, L, rho) # inertias for marine growth if solid\r\n Ilinner, Irinner = tapered_cylinder_inertia(R1 , R2 , L, rho) # inertias for element if filled with marine growth\r\n # subtract to get moments of inertia of marine growth shell\r\n Ilmg = Ilouter - Ilinner\r\n Irmg = Irouter - Irinner\r\n return Vinner, Vouter, m_mg, h_c, Ilmg, Irmg",
"def magnetization(h):\n if h.has_eh: raise\n if h.has_spin: \n mx = extract.mx(h.intra)\n my = extract.my(h.intra)\n mz = extract.mz(h.intra)\n else: raise\n np.savetxt(\"MAGNETIZATION_X.OUT\",np.matrix([h.geometry.x,h.geometry.y,mx]).T)\n np.savetxt(\"MAGNETIZATION_Y.OUT\",np.matrix([h.geometry.x,h.geometry.y,my]).T)\n np.savetxt(\"MAGNETIZATION_Z.OUT\",np.matrix([h.geometry.x,h.geometry.y,mz]).T)",
"def createMesh(self, chem, coord_x_start, coord_y_start) :\r\n init_conc = .0\r\n self.compParDiff(chem)\r\n comp.Comp.createMeshHomo(self, 'SC', chem, init_conc, coord_x_start, coord_y_start)\r\n #self.meshes[0].setConc(1)\r",
"def calc_mass(self):\n\n star = self.star\n\n M, K, N = star.mesh_size\n ph = star.phi_coords\n mu = star.mu_coords\n r = star.r_coords\n\n def Q1(j, k):\n sum = 0\n\n for i in range(0, M - 2, 2):\n sum += (1 / 6) * (ph[i + 2] - ph[i]) * (star.rho[i, j, k] +\n 4 *\n star.rho[i + 1, j, k]\n + star.rho[i + 2, j, k])\n\n return 2 * sum\n\n def Q2(k):\n sum = 0\n\n for j in range(0, K - 2, 2):\n sum += (1 / 6) * (mu[j + 2] - mu[j]) * \\\n (Q1(j, k) + 4 * Q1(j + 1, k) + Q1(j + 2, k))\n\n return 2 * sum\n\n mass = 0\n\n for k in range(0, N - 2, 2):\n mass += (1 / 6) * (r[k + 2] - r[k]) * (r[k]**2 * Q2(k) +\n 4 * r[k + 1]**2 * Q2(k + 1) +\n r[k + 2]**2 * Q2(k + 2))\n\n return mass",
"def mass_3d(r, Rs, rho0, gamma_inner, gamma_outer):\n Rs = float(Rs)\n const = 4 * np.pi * r ** 3 * rho0 * (Rs/r) ** gamma_inner\n m_3d = const/(3-gamma_inner) * hyp2f1((3-gamma_inner)/2,\n (gamma_outer-gamma_inner)/2,\n (5-gamma_inner)/2, -(r/Rs)**2)\n return m_3d",
"def bc_matrix(params):\r\n w = params['w']\r\n kx = params['kx']\r\n d_list = params['d_list']\r\n ex_list = params['ex_list']\r\n ez_list = params['ez_list']\r\n kz_list = params['kz_list']\r\n N = len(d_list)\r\n assert N == len(d_list) == len(ex_list) == len(ez_list) == len(kz_list)\r\n assert N >= 2\r\n assert d_list[0] == d_list[-1] == inf\r\n \r\n # delta = e^{i * kz * d}, i.e. phase change across each layer\r\n # delta[0] and delta[-1] are undefined and are not used.\r\n delta_list = [cmath.exp(1j * kz_list[i] * d_list[i]) for i in range(N)]\r\n \r\n Ex_up_over_H_up_list = [kz_list[i] / (w * ex_list[i] * nu.eps0)\r\n for i in range(N)]\r\n Ex_down_over_H_down_list = [-a for a in Ex_up_over_H_up_list]\r\n Ez_up_over_H_up_list = [-kx / (w * ez_list[i] * nu.eps0) for i in range(N)]\r\n Ez_down_over_H_down_list = Ez_up_over_H_up_list[:]\r\n \r\n mat = np.zeros((2*N-2, 2*N-2), dtype=complex)\r\n \r\n for row_now in range(N-1):\r\n # This row concerns continuity of Ex across the boundary between\r\n # layer_under and layer_over (under and over the boundary respectively)\r\n layer_under = row_now\r\n layer_over = layer_under + 1\r\n # up_under_index is the column index in mat that gets multiplied by\r\n # H_{up} in layer_under.\r\n up_under_index = 2 * layer_under - 1\r\n down_under_index = 2 * layer_under\r\n up_over_index = 2 * layer_over - 1\r\n down_over_index = 2 * layer_over\r\n \r\n if layer_under != 0:\r\n assert 0 <= up_under_index < 2*N-2\r\n mat[row_now, up_under_index] = (\r\n Ex_up_over_H_up_list[layer_under] * delta_list[layer_under])\r\n mat[row_now, down_under_index] = Ex_down_over_H_down_list[layer_under]\r\n mat[row_now, up_over_index] = -Ex_up_over_H_up_list[layer_over]\r\n if layer_over != N-1:\r\n assert 0 <= down_over_index < 2*N-2\r\n mat[row_now, down_over_index] = (\r\n -Ex_down_over_H_down_list[layer_over] * delta_list[layer_over])\r\n\r\n for row_now in range(N-1, 2*N-2):\r\n # This row concerns continuity of eps_z * Ez across the boundary between\r\n # layer_under and layer_over (under and over the boundary respectively)\r\n layer_under = row_now - (N-1)\r\n layer_over = layer_under + 1\r\n # up_under_index is the column index in mat that gets multiplied by\r\n # H_{up} in layer_under.\r\n up_under_index = 2 * layer_under - 1\r\n down_under_index = 2 * layer_under\r\n up_over_index = 2 * layer_over - 1\r\n down_over_index = 2 * layer_over\r\n \r\n if layer_under != 0:\r\n assert 0 <= up_under_index < 2*N-2\r\n mat[row_now, up_under_index] = (ez_list[layer_under] *\r\n Ez_up_over_H_up_list[layer_under] * delta_list[layer_under])\r\n mat[row_now, down_under_index] = (ez_list[layer_under] *\r\n Ez_down_over_H_down_list[layer_under])\r\n mat[row_now, up_over_index] = (-ez_list[layer_over] * \r\n Ez_up_over_H_up_list[layer_over])\r\n if layer_over != N-1:\r\n assert 0 <= down_over_index < 2*N-2\r\n mat[row_now, down_over_index] = (-ez_list[layer_over] *\r\n Ez_down_over_H_down_list[layer_over] * delta_list[layer_over])\r\n \r\n return mat",
"def matrix_M1(l, omega, S, cn, csn, rhos, rho):\n sqrt = np.sqrt(l * (l + 1))\n zl = omega * S / cn['l']\n zt = omega * S / cn['t']\n xl = omega * S / csn['l']\n xt = omega * S / csn['t']\n col1 = np.array((d11(l, zt), d21(l, zt), d31(l, zt), d41(l, zt))) / zt\n col2 = (-sqrt * np.array((d12(l, zl), d22(l, zl),\n d32(l, zl), d42(l, zl))) / zl\n )\n col3 = (- np.array((d13(l, xt), d23(l, xt),\n d33(l, xt, zt, rhos, rho),\n d43(l, xt, zt, rhos, rho))) / xt\n )\n col4 = (sqrt * np.array((d14(l, xl), d24(l, xl),\n d34(l, xl, xt, zt, rhos, rho),\n d44(l, xl, xt, zt, rhos, rho))) / xl\n )\n M = np.array((col1, col2, col3, col4))\n return M.T",
"def Hamiltonian(self):\n U = self.U.flatten()\n Vmat = sparse.spdiags([U], [0], len(U), len(U))\n Kmat = sparse.kron(-self.KEy * Schrodinger.D2mat(len(self.y), self.y[1] - self.y[0], self.periodic_y, self.qy),\n sparse.identity(len(self.x))) + \\\n sparse.kron(sparse.identity(len(self.y)),\n -self.KEx * Schrodinger.D2mat(len(self.x), self.x[1] - self.x[0], self.periodic_x, self.qx))\n return Kmat + Vmat",
"def H_massive(self, z):\n return self.H0*(self.Omega_m*(1.+z)**3. +\n self.Omega_lambda*self.X_DE(z) +\n self.Omega_rad*(1.+z)**4. +\n self.Omega_K*(1.+z)**2.)**0.5"
] | [
"0.637787",
"0.6240188",
"0.6122389",
"0.6016837",
"0.58427894",
"0.5698982",
"0.5682351",
"0.5587956",
"0.55861175",
"0.55698025",
"0.5528634",
"0.5519884",
"0.5518754",
"0.5508576",
"0.5492544",
"0.54866624",
"0.5483343",
"0.5426447",
"0.54144776",
"0.53910035",
"0.53630376",
"0.53604895",
"0.5356911",
"0.5349418",
"0.5332085",
"0.53320146",
"0.5313371",
"0.5308615",
"0.5303329",
"0.52987576"
] | 0.69000477 | 0 |
Does a None user return False | def test_user_is_none(self):
self.assertFalse(send_rotate_to_can(None, self.BIN_NUM)) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test__user_passed_as_none(self):\r\n access.has_access(None, 'staff', 'global', None)",
"def is_not_none(e):\n return e is not None",
"def is_none(obj):\n return obj is None",
"def not_none(value):\n return not value is None",
"def NoPrompt(self) -> bool:",
"def validUser(self):\n if self.state == SessionStates.LOGGED_OUT:\n return False\n\n # if self.user == None:\n # return False\n return True",
"def test_none_input(self):\n eq_(None, output())",
"def do_nothing(arg):\n return True",
"def return_false(self):\n return False",
"def empty(self, value):\r\n return value is None",
"def empty(self, value):\r\n return value is None",
"def empty(self, value):\r\n return value is None",
"def empty(self, value):\r\n return value is None",
"def empty(self, value):\r\n return value is None",
"def test_query_no_def_none(self):\n self.assertEqual(\n query_yes_no(question=\"Is anyone wiser than Socrates?\", default=None), False\n )",
"def is_self(user_id):\n query_user_id = request.args.get('user_id', default=None, type=int)\n return user_id==query_user_id and user_id is not None",
"def noyable(self):\n return False",
"def compare_with_none():\n value = {};\n if value is not None:\n print(\"value is not none\")\n else:\n print(\"value is none\")",
"def optional(self) -> bool:\n return False",
"def empty(self, value):\n return value is None",
"def _is_valid_user(self, user):\n return user and user['lat'] != \"\" and user['lon'] != \"\"",
"def _is_valid_user(self, user):\n return user and user['lat'] != \"\" and user['lon'] != \"\"",
"def is_yummy(self):\n return False",
"def null_enabled(self):\n return False",
"def testIsNullFalseAgain(self):\n val = is_null(5) \n self.assertFalse(val)",
"def _isEmpty(self, x, y):\n\t\treturn self.getValue(x, y) == None",
"def __nonzero__(self):\n return not (self.year is None and\n self.month is None and\n self.day is None)",
"def is_call_not_answered(self) -> bool:",
"def invariant(self):\n\t\treturn ((self.tenant != \"\") and (self.loginUser != \"\"))",
"def check_auth_none(self, username):\n return AUTH_FAILED"
] | [
"0.73663986",
"0.68698525",
"0.68031555",
"0.67773694",
"0.6765492",
"0.6731775",
"0.67047375",
"0.6696048",
"0.66367894",
"0.65957487",
"0.65957487",
"0.65957487",
"0.65957487",
"0.65957487",
"0.65849674",
"0.6499765",
"0.64993566",
"0.64586663",
"0.64354175",
"0.64343435",
"0.63879335",
"0.63879335",
"0.6377748",
"0.63653487",
"0.63528323",
"0.6344684",
"0.6336287",
"0.6333902",
"0.6333186",
"0.63292897"
] | 0.7621539 | 0 |
A CanInfo where a matching user cannot be found returns False | def test_can_info_does_not_exist(self):
fake_user = User(username='Fake', password='')
self.assertFalse(send_rotate_to_can(fake_user, self.BIN_NUM)) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def check_user(self):\n try:\n if self.get_customer()[0][0] == self.dni:\n return True\n else:\n return False\n except:\n return False",
"def has_user(self, user): # pylint: disable=unused-argument\r\n return False",
"def can_view(self, user):\r\n return True",
"def can_be_viewed_by(self,user):\n return True",
"def validateUser(self,admin):\n \n res=admin.helper.getOneUser(self.name)\n if res == False:\n return True\n else:\n return False",
"def is_visible_to(self, user):\n return True",
"def check_user(user):\n result_user = search_column_with_constraint(choose_database(\"auth\"), \"users\", \"id\", \"id\", user)\n # result_user = search_single_entry(choose_database(\"auth\"), \"users\", \"id\", user)\n\n if len(result_user) == 0:\n return 0\n else:\n return 1",
"def is_user(self, user='') -> int:\n try:\n if user in self.users:\n return(1)\n else:\n return(0)\n except Exception as error:\n print(f\"Error: self.is_user({user}) -> {error}\")",
"def is_access_allowed(self, user_id):\n ### DATABASE CODE GOES HERE\n return False",
"def has_access(self, user):\n if user.is_superuser:\n return True\n return self.user_objects(user).filter(id=self.id).exists()",
"def can_be_accessed(self, user):\n if self.shared_with_everyone:\n return True\n\n if self.user == user or self.users_allowed.filter(pk=user.pk).exists():\n return True\n\n for group in self.groups_allowed.all():\n if user.groups.filter(pk=group.pk).exists():\n return True\n\n return False",
"def user_auth_inst(request):\n if request.user.is_authenticated:\n user = User.objects.get(email=request.user.email)\n if UserInformation.objects.filter(user=user).exists():\n inst = UserInformation.objects.get(user=user)\n if(inst.user_instructor):\n return True\n return False",
"def testPersonIsUser(self):\n member = self.portal.portal_membership.getMemberById('abc123')\n self.failUnless(member,\"%s\" % member)",
"def can_retrieve(self, user):\n return user.has_perm('agenda.can_see')",
"def get_is_por_holder(self, obj):\n user = self.context['request'].user\n if not user.is_authenticated:\n return False\n # pylint: disable=no-member\n profile = UserProfile.objects.get(user=user)\n if obj in profile.get_council_privileges():\n return True\n return False",
"def _is_valid_user(self, user):\n return user and user['lat'] != \"\" and user['lon'] != \"\"",
"def _is_valid_user(self, user):\n return user and user['lat'] != \"\" and user['lon'] != \"\"",
"def test_func(self):\n member_to_view = self.get_object()\n is_self = self.request.user.rfid == member_to_view.rfid\n view_others = self.request.user.has_permission(\"core.view_member\")\n return view_others or is_self",
"def ref_user_flag(self):\n try:\n ref = User.objects.get(\n associated_emails__email__iexact=self.reference_email,\n associated_emails__is_verified=True)\n return True\n except ObjectDoesNotExist:\n return False",
"def _has_data(cls):\n return User.objects.count() > 0",
"def test__user_passed_as_none(self):\r\n access.has_access(None, 'staff', 'global', None)",
"def get_is_por_holder(self, obj):\n user = self.context['request'].user\n if not user.is_authenticated:\n return False\n # pylint: disable=no-member\n profile = UserProfile.objects.get(user=user)\n if obj in profile.get_club_privileges():\n return True\n return False",
"async def test_regular_member_cannot_target_another_member(self, constants):\n constants.MODERATION_ROLES = [self.moderator_role.id]\n ctx = helpers.MockContext(author=self.author)\n\n await self.cog.user_info(self.cog, ctx, self.target)\n\n ctx.send.assert_called_once_with(\"You may not use this command on users other than yourself.\")",
"def user_auth(request):\n if request.user.is_authenticated:\n user = User.objects.get(email=request.user.email)\n if UserInformation.objects.filter(user=user).exists():\n return True\n return False",
"def can_edit_or_403(self, user):\n if user.id != self.game_master.id:\n raise PermissionDenied\n return True",
"def view(self, user, action, *args):\n if user.is_anonymous or user.is_client:\n return False\n\n if user.is_administrator:\n return True\n\n if user.is_manager:\n return False\n\n # TODO check groups in request maybe ? dunno\n if user.is_advisor:\n return True\n\n return self.admin_permission(user, action, *args)",
"def check_for_user_not_in_system(player_name: str) -> bool:\n\n for uid, user in self.connected_users.items():\n if user['authorized'] and user['main']['player_name'] == player_name:\n return False\n return True",
"def get_viewable(self, user):\n return True",
"def test_func(self):\n answer = self.get_object()\n return True if self.request.user == answer.author or self.request.user.is_superuser else False",
"def verify_user_existance(self, user):\n for client in self.clients:\n if user == client.get_name():\n return True\n return False"
] | [
"0.6713832",
"0.6679743",
"0.6537976",
"0.6529905",
"0.64860743",
"0.62871456",
"0.627677",
"0.62588006",
"0.62487483",
"0.61500865",
"0.6142585",
"0.608562",
"0.6068029",
"0.6054016",
"0.60393703",
"0.6035473",
"0.6035473",
"0.60319674",
"0.59979326",
"0.5986419",
"0.5980946",
"0.59576297",
"0.5949034",
"0.594729",
"0.5945374",
"0.5942712",
"0.5939642",
"0.59360754",
"0.59356624",
"0.59292656"
] | 0.6814037 | 0 |
When the channel on the CanInfo is None, return False | def test_request_channel_is_none(self):
CanInfo.objects.filter(can_id=self.UUID).update(channel_name=None)
self.assertFalse(send_rotate_to_can(self.USER, self.BIN_NUM)) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def is_channel(self):\n return True",
"def ccheck(self, msg):\r\n if msg.channel == self.channel or (msg.channel.is_private and self.ispm):\r\n return True\r\n return False",
"def single_channel():\n return True",
"def is_empty(self):\n return self.channels is None or self.timestamp is None",
"def get_none2(self, channel):\n pass",
"async def check_na_channel(self, guild: discord.Guild):\n\n ch_id = await self.config.guild(guild).na_channel_id()\n\n if ch_id:\n return discord.utils.get(guild.text_channels, id=ch_id)\n return False",
"async def _check_channel(\n self, starboard: StarboardEntry, channel: discord.TextChannel\n ) -> bool:\n if starboard.whitelist_channel:\n return channel.id in starboard.whitelist_channel\n else:\n return channel.id not in starboard.blacklist_channel",
"def is_no_channel(val) -> bool:\n if isinstance(val, torch.Tensor):\n return bool(torch.isnan(val))\n if isinstance(val, str):\n return val == \"no_channel\"\n if np.isscalar(val):\n return bool(np.isnan(val))\n return val is None",
"def has_data(self):\n if len(self.channels) > 0:\n return True\n return False",
"async def checktype(self, ctx:commands.Context):\r\n\r\n t = await self.GetChannelType(ctx.guild, ctx.channel.id)\r\n if t == 'none':\r\n await ctx.send(\r\n f'<#{ctx.channel.id}> is a normal channel (use `register <channel type>` to make this a specialized channel)')\r\n else:\r\n await ctx.send(f'<#{ctx.channel.id}> is a {t}')",
"def _check_has_channel(data):\r\n return re.findall(\r\n r'^:[a-zA-Z0-9_]+\\![a-zA-Z0-9_]+@[a-zA-Z0-9_]+'\r\n r'\\.tmi\\.twitch\\.tv '\r\n r'JOIN #([a-zA-Z0-9_]+)$', data)",
"def check_channel_shell_request(self, channel):\n return False",
"def cog_check(self, ctx):\n if ctx.guild is None:\n raise commands.NoPrivateMessage()\n return True",
"def check_channel_exec_request(self, channel, command):\n return False",
"async def test_skipped_missing_channel(self):\n self.cog.unsilence_timestamps.items.return_value = [(123, -1), (123, 1), (123, 10000000000)]\n self.bot.get_channel.return_value = None\n\n await self.cog._reschedule()\n\n self.cog.notifier.add_channel.assert_not_called()\n self.cog._unsilence_wrapper.assert_not_called()\n self.cog.scheduler.schedule_later.assert_not_called()",
"def check_channel_request(self, kind, chanid):\n return OPEN_FAILED_ADMINISTRATIVELY_PROHIBITED",
"def is_channel_owner():\n\n async def check(ctx):\n if ctx.guild:\n owner = ctx.author == ctx.guild.owner\n if not owner:\n await ctx.send(\"I guess you are not this server's pogchamp. Bruh.\")\n return owner\n return True\n\n return commands.check(check)",
"def is_not_tilted(self, channel=None):\n return not self.get_state(channel)",
"def is_valid(self):\n if self.selection is None:\n return False\n if self.selection not in self.cryptomattes:\n return False\n if \"channels\" not in self.cryptomattes[self.selection]:\n return False\n if len(self.cryptomattes[self.selection][\"channels\"]) < 2:\n return False\n return True",
"def can_message(guild, channel):\n\treturn authorized(guild, channel) and not muted(guild, channel)",
"def check_channel_forward_agent_request(self, channel):\n return False",
"def check(self, chan_id):\r\n other = False\r\n size = len(self.channels)\r\n for data in self.channels:\r\n\r\n if data.chan_id != chan_id:\r\n other = True\r\n else:\r\n other = False\r\n break\r\n return other",
"def noyable(self):\n return False",
"def check_event_channel(ctx: commands.Context) -> bool:\n if get_active_feature(ctx.channel) != ActivationState.EVENT:\n raise error_handling.InactiveChannelError()\n else:\n return True",
"def channel_is_streaming(self, channel_name = ''): \n \n self.get_stream(channel_name)\n stream_json = self.stream['stream']\n if stream_json is None:\n return False\n else:\n print(stream_json['channel']['name'])\n print(stream_json['game'])\n print(stream_json['viewers'])\n print(stream_json['created_at'])\n return True",
"async def __local_check(self, ctx):\n if not isinstance(ctx.channel, discord.TextChannel):\n raise InvalidChannelCheck(ctx.command)\n me = ctx.me.guild_permissions\n perms = (me.manage_messages, me.manage_nicknames, me.ban_members, me.kick_members)\n if not all(perms):\n raise BotPermissionsCheck(ctx.command)\n else:\n return True",
"async def cog_check(self, ctx: Context) -> bool: # type: ignore[override]\n\n return ctx.guild is not None",
"def is_normal(self, channel=None):\n return self.get_state(channel) == 0",
"def check_notch(self):\n\n return self.pins[0] == self.notch",
"def is_connected(self):\n return self.connected_channel is not None"
] | [
"0.7227819",
"0.7064459",
"0.66487986",
"0.6543717",
"0.6476909",
"0.6457839",
"0.6433133",
"0.63275003",
"0.6317527",
"0.6239171",
"0.62300515",
"0.62059534",
"0.61681944",
"0.612439",
"0.607638",
"0.6019896",
"0.5983071",
"0.5969868",
"0.59639364",
"0.5955076",
"0.5921539",
"0.5919506",
"0.5906496",
"0.586115",
"0.5849225",
"0.58415776",
"0.5826772",
"0.58223397",
"0.5798717",
"0.5786851"
] | 0.76990074 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.