body
stringlengths
26
98.2k
body_hash
int64
-9,222,864,604,528,158,000
9,221,803,474B
docstring
stringlengths
1
16.8k
path
stringlengths
5
230
name
stringlengths
1
96
repository_name
stringlengths
7
89
lang
stringclasses
1 value
body_without_docstring
stringlengths
20
98.2k
@pytest.fixture def root(): 'Root function for tests.' dirname = tempfile.mkdtemp() (yield os.path.join(dirname, 'output1')) print('Directory structure was:') tree(dirname) shutil.rmtree(dirname)
258,632,522,870,728,640
Root function for tests.
tests/integration/test_data_finder.py
root
Peter9192/ESMValCore
python
@pytest.fixture def root(): dirname = tempfile.mkdtemp() (yield os.path.join(dirname, 'output1')) print('Directory structure was:') tree(dirname) shutil.rmtree(dirname)
@pytest.mark.parametrize('cfg', CONFIG['get_input_filelist']) def test_get_input_filelist(root, cfg): 'Test retrieving input filelist.' create_tree(root, cfg.get('available_files'), cfg.get('available_symlinks')) rootpath = {cfg['variable']['project']: [root]} drs = {cfg['variable']['project']: cfg['drs']} input_filelist = get_input_filelist(cfg['variable'], rootpath, drs) reference = [os.path.join(root, file) for file in cfg['found_files']] assert (sorted(input_filelist) == sorted(reference))
-2,881,636,409,293,850,600
Test retrieving input filelist.
tests/integration/test_data_finder.py
test_get_input_filelist
Peter9192/ESMValCore
python
@pytest.mark.parametrize('cfg', CONFIG['get_input_filelist']) def test_get_input_filelist(root, cfg): create_tree(root, cfg.get('available_files'), cfg.get('available_symlinks')) rootpath = {cfg['variable']['project']: [root]} drs = {cfg['variable']['project']: cfg['drs']} input_filelist = get_input_filelist(cfg['variable'], rootpath, drs) reference = [os.path.join(root, file) for file in cfg['found_files']] assert (sorted(input_filelist) == sorted(reference))
@pytest.mark.parametrize('cfg', CONFIG['get_input_fx_filelist']) def test_get_input_fx_filelist(root, cfg): 'Test retrieving fx filelist.' create_tree(root, cfg.get('available_files'), cfg.get('available_symlinks')) rootpath = {cfg['variable']['project']: [root]} drs = {cfg['variable']['project']: cfg['drs']} fx_files = get_input_fx_filelist(cfg['variable'], rootpath, drs) reference = {fx_var: (os.path.join(root, filename) if filename else None) for (fx_var, filename) in cfg['found_files'].items()} assert (fx_files == reference)
-1,748,723,419,376,551,200
Test retrieving fx filelist.
tests/integration/test_data_finder.py
test_get_input_fx_filelist
Peter9192/ESMValCore
python
@pytest.mark.parametrize('cfg', CONFIG['get_input_fx_filelist']) def test_get_input_fx_filelist(root, cfg): create_tree(root, cfg.get('available_files'), cfg.get('available_symlinks')) rootpath = {cfg['variable']['project']: [root]} drs = {cfg['variable']['project']: cfg['drs']} fx_files = get_input_fx_filelist(cfg['variable'], rootpath, drs) reference = {fx_var: (os.path.join(root, filename) if filename else None) for (fx_var, filename) in cfg['found_files'].items()} assert (fx_files == reference)
def spline_filter1d(input, order=3, axis=(- 1), output=numpy.float64, output_type=None): 'Calculates a one-dimensional spline filter along the given axis.\n\n The lines of the array along the given axis are filtered by a\n spline filter. The order of the spline must be >= 2 and <= 5.\n ' if ((order < 0) or (order > 5)): raise RuntimeError('spline order not supported') input = numpy.asarray(input) if numpy.iscomplexobj(input): raise TypeError('Complex type not supported') (output, return_value) = _ni_support._get_output(output, input, output_type) if (order in [0, 1]): output[...] = numpy.array(input) else: axis = _ni_support._check_axis(axis, input.ndim) _nd_image.spline_filter1d(input, order, axis, output) return return_value
-8,221,713,997,674,315,000
Calculates a one-dimensional spline filter along the given axis. The lines of the array along the given axis are filtered by a spline filter. The order of the spline must be >= 2 and <= 5.
kapteyn/interpolation.py
spline_filter1d
kapteyn-astro/kapteyn
python
def spline_filter1d(input, order=3, axis=(- 1), output=numpy.float64, output_type=None): 'Calculates a one-dimensional spline filter along the given axis.\n\n The lines of the array along the given axis are filtered by a\n spline filter. The order of the spline must be >= 2 and <= 5.\n ' if ((order < 0) or (order > 5)): raise RuntimeError('spline order not supported') input = numpy.asarray(input) if numpy.iscomplexobj(input): raise TypeError('Complex type not supported') (output, return_value) = _ni_support._get_output(output, input, output_type) if (order in [0, 1]): output[...] = numpy.array(input) else: axis = _ni_support._check_axis(axis, input.ndim) _nd_image.spline_filter1d(input, order, axis, output) return return_value
def spline_filter(input, order=3, output=numpy.float64, output_type=None): 'Multi-dimensional spline filter.\n\n Note: The multi-dimensional filter is implemented as a sequence of\n one-dimensional spline filters. The intermediate arrays are stored\n in the same data type as the output. Therefore, for output types\n with a limited precision, the results may be imprecise because\n intermediate results may be stored with insufficient precision.\n ' if ((order < 2) or (order > 5)): raise RuntimeError('spline order not supported') input = numpy.asarray(input) if numpy.iscomplexobj(input): raise TypeError('Complex type not supported') (output, return_value) = _ni_support._get_output(output, input, output_type) if ((order not in [0, 1]) and (input.ndim > 0)): for axis in range(input.ndim): spline_filter1d(input, order, axis, output=output) input = output else: output[...] = input[...] return return_value
13,522,673,866,341,064
Multi-dimensional spline filter. Note: The multi-dimensional filter is implemented as a sequence of one-dimensional spline filters. The intermediate arrays are stored in the same data type as the output. Therefore, for output types with a limited precision, the results may be imprecise because intermediate results may be stored with insufficient precision.
kapteyn/interpolation.py
spline_filter
kapteyn-astro/kapteyn
python
def spline_filter(input, order=3, output=numpy.float64, output_type=None): 'Multi-dimensional spline filter.\n\n Note: The multi-dimensional filter is implemented as a sequence of\n one-dimensional spline filters. The intermediate arrays are stored\n in the same data type as the output. Therefore, for output types\n with a limited precision, the results may be imprecise because\n intermediate results may be stored with insufficient precision.\n ' if ((order < 2) or (order > 5)): raise RuntimeError('spline order not supported') input = numpy.asarray(input) if numpy.iscomplexobj(input): raise TypeError('Complex type not supported') (output, return_value) = _ni_support._get_output(output, input, output_type) if ((order not in [0, 1]) and (input.ndim > 0)): for axis in range(input.ndim): spline_filter1d(input, order, axis, output=output) input = output else: output[...] = input[...] return return_value
def geometric_transform(input, mapping, output_shape=None, output_type=None, output=None, order=3, mode='constant', cval=0.0, prefilter=True, extra_arguments=(), extra_keywords={}): "Apply an arbritrary geometric transform.\n\n The given mapping function is used to find, for each point in the\n output, the corresponding coordinates in the input. The value of the\n input at those coordinates is determined by spline interpolation of\n the requested order.\n\n mapping must be a callable object that accepts a tuple of length\n equal to the output array rank and returns the corresponding input\n coordinates as a tuple of length equal to the input array\n rank. Points outside the boundaries of the input are filled\n according to the given mode ('constant', 'nearest', 'reflect' or\n 'wrap'). The output shape can optionally be given. If not given,\n it is equal to the input shape. The parameter prefilter determines\n if the input is pre-filtered before interpolation (necessary for\n spline interpolation of order > 1). If False it is assumed that\n the input is already filtered. The extra_arguments and\n extra_keywords arguments can be used to provide extra arguments\n and keywords that are passed to the mapping function at each call.\n\n Example\n -------\n >>> a = arange(12.).reshape((4,3))\n >>> def shift_func(output_coordinates):\n ... return (output_coordinates[0]-0.5, output_coordinates[1]-0.5)\n ...\n >>> print geometric_transform(a,shift_func)\n array([[ 0. , 0. , 0. ],\n [ 0. , 1.3625, 2.7375],\n [ 0. , 4.8125, 6.1875],\n [ 0. , 8.2625, 9.6375]])\n " if ((order < 0) or (order > 5)): raise RuntimeError('spline order not supported') input = numpy.asarray(input) if numpy.iscomplexobj(input): raise TypeError('Complex type not supported') if (output_shape is None): output_shape = input.shape if ((input.ndim < 1) or (len(output_shape) < 1)): raise RuntimeError('input and output rank must be > 0') mode = _extend_mode_to_code(mode) if (prefilter and (order > 1)): filtered = spline_filter(input, order, output=numpy.float64) else: filtered = input (output, return_value) = _ni_support._get_output(output, input, output_type, shape=output_shape) _nd_image.geometric_transform(filtered, mapping, None, None, None, output, order, mode, cval, extra_arguments, extra_keywords) return return_value
2,634,614,057,414,725,600
Apply an arbritrary geometric transform. The given mapping function is used to find, for each point in the output, the corresponding coordinates in the input. The value of the input at those coordinates is determined by spline interpolation of the requested order. mapping must be a callable object that accepts a tuple of length equal to the output array rank and returns the corresponding input coordinates as a tuple of length equal to the input array rank. Points outside the boundaries of the input are filled according to the given mode ('constant', 'nearest', 'reflect' or 'wrap'). The output shape can optionally be given. If not given, it is equal to the input shape. The parameter prefilter determines if the input is pre-filtered before interpolation (necessary for spline interpolation of order > 1). If False it is assumed that the input is already filtered. The extra_arguments and extra_keywords arguments can be used to provide extra arguments and keywords that are passed to the mapping function at each call. Example ------- >>> a = arange(12.).reshape((4,3)) >>> def shift_func(output_coordinates): ... return (output_coordinates[0]-0.5, output_coordinates[1]-0.5) ... >>> print geometric_transform(a,shift_func) array([[ 0. , 0. , 0. ], [ 0. , 1.3625, 2.7375], [ 0. , 4.8125, 6.1875], [ 0. , 8.2625, 9.6375]])
kapteyn/interpolation.py
geometric_transform
kapteyn-astro/kapteyn
python
def geometric_transform(input, mapping, output_shape=None, output_type=None, output=None, order=3, mode='constant', cval=0.0, prefilter=True, extra_arguments=(), extra_keywords={}): "Apply an arbritrary geometric transform.\n\n The given mapping function is used to find, for each point in the\n output, the corresponding coordinates in the input. The value of the\n input at those coordinates is determined by spline interpolation of\n the requested order.\n\n mapping must be a callable object that accepts a tuple of length\n equal to the output array rank and returns the corresponding input\n coordinates as a tuple of length equal to the input array\n rank. Points outside the boundaries of the input are filled\n according to the given mode ('constant', 'nearest', 'reflect' or\n 'wrap'). The output shape can optionally be given. If not given,\n it is equal to the input shape. The parameter prefilter determines\n if the input is pre-filtered before interpolation (necessary for\n spline interpolation of order > 1). If False it is assumed that\n the input is already filtered. The extra_arguments and\n extra_keywords arguments can be used to provide extra arguments\n and keywords that are passed to the mapping function at each call.\n\n Example\n -------\n >>> a = arange(12.).reshape((4,3))\n >>> def shift_func(output_coordinates):\n ... return (output_coordinates[0]-0.5, output_coordinates[1]-0.5)\n ...\n >>> print geometric_transform(a,shift_func)\n array([[ 0. , 0. , 0. ],\n [ 0. , 1.3625, 2.7375],\n [ 0. , 4.8125, 6.1875],\n [ 0. , 8.2625, 9.6375]])\n " if ((order < 0) or (order > 5)): raise RuntimeError('spline order not supported') input = numpy.asarray(input) if numpy.iscomplexobj(input): raise TypeError('Complex type not supported') if (output_shape is None): output_shape = input.shape if ((input.ndim < 1) or (len(output_shape) < 1)): raise RuntimeError('input and output rank must be > 0') mode = _extend_mode_to_code(mode) if (prefilter and (order > 1)): filtered = spline_filter(input, order, output=numpy.float64) else: filtered = input (output, return_value) = _ni_support._get_output(output, input, output_type, shape=output_shape) _nd_image.geometric_transform(filtered, mapping, None, None, None, output, order, mode, cval, extra_arguments, extra_keywords) return return_value
def map_coordinates(input, coordinates, output_type=None, output=None, order=3, mode='constant', cval=0.0, prefilter=True): "\n Map the input array to new coordinates by interpolation.\n\n The array of coordinates is used to find, for each point in the output,\n the corresponding coordinates in the input. The value of the input at\n those coordinates is determined by spline interpolation of the\n requested order.\n\n The shape of the output is derived from that of the coordinate\n array by dropping the first axis. The values of the array along\n the first axis are the coordinates in the input array at which the\n output value is found.\n\n Parameters\n ----------\n input : ndarray\n The input array\n coordinates : array_like\n The coordinates at which `input` is evaluated.\n output_type : deprecated\n Use `output` instead.\n output : dtype, optional\n If the output has to have a certain type, specify the dtype.\n The default behavior is for the output to have the same type\n as `input`.\n order : int, optional\n The order of the spline interpolation, default is 3.\n The order has to be in the range 0-5.\n mode : str, optional\n Points outside the boundaries of the input are filled according\n to the given mode ('constant', 'nearest', 'reflect' or 'wrap').\n Default is 'constant'.\n cval : scalar, optional\n Value used for points outside the boundaries of the input if\n `mode='constant`. Default is 0.0\n prefilter : bool, optional\n The parameter prefilter determines if the input is\n pre-filtered with `spline_filter`_ before interpolation\n (necessary for spline interpolation of order > 1).\n If False, it is assumed that the input is already filtered.\n\n Returns\n -------\n return_value : ndarray\n The result of transforming the input. The shape of the\n output is derived from that of `coordinates` by dropping\n the first axis.\n\n\n See Also\n --------\n spline_filter, geometric_transform, scipy.interpolate\n\n Examples\n --------\n >>> import scipy.ndimage\n >>> a = np.arange(12.).reshape((4,3))\n >>> print a\n array([[ 0., 1., 2.],\n [ 3., 4., 5.],\n [ 6., 7., 8.],\n [ 9., 10., 11.]])\n >>> sp.ndimage.map_coordinates(a, [[0.5, 2], [0.5, 1]], order=1)\n [ 2. 7.]\n\n Above, the interpolated value of a[0.5, 0.5] gives output[0], while\n a[2, 1] is output[1].\n\n >>> inds = np.array([[0.5, 2], [0.5, 4]])\n >>> sp.ndimage.map_coordinates(a, inds, order=1, cval=-33.3)\n array([ 2. , -33.3])\n >>> sp.ndimage.map_coordinates(a, inds, order=1, mode='nearest')\n array([ 2., 8.])\n >>> sp.ndimage.map_coordinates(a, inds, order=1, cval=0, output=bool)\n array([ True, False], dtype=bool\n\n " if ((order < 0) or (order > 5)): raise RuntimeError('spline order not supported') input = numpy.asarray(input) if numpy.iscomplexobj(input): raise TypeError('Complex type not supported') coordinates = numpy.asarray(coordinates) if numpy.iscomplexobj(coordinates): raise TypeError('Complex type not supported') output_shape = coordinates.shape[1:] if ((input.ndim < 1) or (len(output_shape) < 1)): raise RuntimeError('input and output rank must be > 0') if (coordinates.shape[0] != input.ndim): raise RuntimeError('invalid shape for coordinate array') mode = _extend_mode_to_code(mode) if (prefilter and (order > 1)): filtered = spline_filter(input, order, output=numpy.float64) else: filtered = input (output, return_value) = _ni_support._get_output(output, input, output_type, shape=output_shape) _nd_image.geometric_transform(filtered, None, coordinates, None, None, output, order, mode, cval, None, None) return return_value
-8,802,813,107,881,692,000
Map the input array to new coordinates by interpolation. The array of coordinates is used to find, for each point in the output, the corresponding coordinates in the input. The value of the input at those coordinates is determined by spline interpolation of the requested order. The shape of the output is derived from that of the coordinate array by dropping the first axis. The values of the array along the first axis are the coordinates in the input array at which the output value is found. Parameters ---------- input : ndarray The input array coordinates : array_like The coordinates at which `input` is evaluated. output_type : deprecated Use `output` instead. output : dtype, optional If the output has to have a certain type, specify the dtype. The default behavior is for the output to have the same type as `input`. order : int, optional The order of the spline interpolation, default is 3. The order has to be in the range 0-5. mode : str, optional Points outside the boundaries of the input are filled according to the given mode ('constant', 'nearest', 'reflect' or 'wrap'). Default is 'constant'. cval : scalar, optional Value used for points outside the boundaries of the input if `mode='constant`. Default is 0.0 prefilter : bool, optional The parameter prefilter determines if the input is pre-filtered with `spline_filter`_ before interpolation (necessary for spline interpolation of order > 1). If False, it is assumed that the input is already filtered. Returns ------- return_value : ndarray The result of transforming the input. The shape of the output is derived from that of `coordinates` by dropping the first axis. See Also -------- spline_filter, geometric_transform, scipy.interpolate Examples -------- >>> import scipy.ndimage >>> a = np.arange(12.).reshape((4,3)) >>> print a array([[ 0., 1., 2.], [ 3., 4., 5.], [ 6., 7., 8.], [ 9., 10., 11.]]) >>> sp.ndimage.map_coordinates(a, [[0.5, 2], [0.5, 1]], order=1) [ 2. 7.] Above, the interpolated value of a[0.5, 0.5] gives output[0], while a[2, 1] is output[1]. >>> inds = np.array([[0.5, 2], [0.5, 4]]) >>> sp.ndimage.map_coordinates(a, inds, order=1, cval=-33.3) array([ 2. , -33.3]) >>> sp.ndimage.map_coordinates(a, inds, order=1, mode='nearest') array([ 2., 8.]) >>> sp.ndimage.map_coordinates(a, inds, order=1, cval=0, output=bool) array([ True, False], dtype=bool
kapteyn/interpolation.py
map_coordinates
kapteyn-astro/kapteyn
python
def map_coordinates(input, coordinates, output_type=None, output=None, order=3, mode='constant', cval=0.0, prefilter=True): "\n Map the input array to new coordinates by interpolation.\n\n The array of coordinates is used to find, for each point in the output,\n the corresponding coordinates in the input. The value of the input at\n those coordinates is determined by spline interpolation of the\n requested order.\n\n The shape of the output is derived from that of the coordinate\n array by dropping the first axis. The values of the array along\n the first axis are the coordinates in the input array at which the\n output value is found.\n\n Parameters\n ----------\n input : ndarray\n The input array\n coordinates : array_like\n The coordinates at which `input` is evaluated.\n output_type : deprecated\n Use `output` instead.\n output : dtype, optional\n If the output has to have a certain type, specify the dtype.\n The default behavior is for the output to have the same type\n as `input`.\n order : int, optional\n The order of the spline interpolation, default is 3.\n The order has to be in the range 0-5.\n mode : str, optional\n Points outside the boundaries of the input are filled according\n to the given mode ('constant', 'nearest', 'reflect' or 'wrap').\n Default is 'constant'.\n cval : scalar, optional\n Value used for points outside the boundaries of the input if\n `mode='constant`. Default is 0.0\n prefilter : bool, optional\n The parameter prefilter determines if the input is\n pre-filtered with `spline_filter`_ before interpolation\n (necessary for spline interpolation of order > 1).\n If False, it is assumed that the input is already filtered.\n\n Returns\n -------\n return_value : ndarray\n The result of transforming the input. The shape of the\n output is derived from that of `coordinates` by dropping\n the first axis.\n\n\n See Also\n --------\n spline_filter, geometric_transform, scipy.interpolate\n\n Examples\n --------\n >>> import scipy.ndimage\n >>> a = np.arange(12.).reshape((4,3))\n >>> print a\n array([[ 0., 1., 2.],\n [ 3., 4., 5.],\n [ 6., 7., 8.],\n [ 9., 10., 11.]])\n >>> sp.ndimage.map_coordinates(a, [[0.5, 2], [0.5, 1]], order=1)\n [ 2. 7.]\n\n Above, the interpolated value of a[0.5, 0.5] gives output[0], while\n a[2, 1] is output[1].\n\n >>> inds = np.array([[0.5, 2], [0.5, 4]])\n >>> sp.ndimage.map_coordinates(a, inds, order=1, cval=-33.3)\n array([ 2. , -33.3])\n >>> sp.ndimage.map_coordinates(a, inds, order=1, mode='nearest')\n array([ 2., 8.])\n >>> sp.ndimage.map_coordinates(a, inds, order=1, cval=0, output=bool)\n array([ True, False], dtype=bool\n\n " if ((order < 0) or (order > 5)): raise RuntimeError('spline order not supported') input = numpy.asarray(input) if numpy.iscomplexobj(input): raise TypeError('Complex type not supported') coordinates = numpy.asarray(coordinates) if numpy.iscomplexobj(coordinates): raise TypeError('Complex type not supported') output_shape = coordinates.shape[1:] if ((input.ndim < 1) or (len(output_shape) < 1)): raise RuntimeError('input and output rank must be > 0') if (coordinates.shape[0] != input.ndim): raise RuntimeError('invalid shape for coordinate array') mode = _extend_mode_to_code(mode) if (prefilter and (order > 1)): filtered = spline_filter(input, order, output=numpy.float64) else: filtered = input (output, return_value) = _ni_support._get_output(output, input, output_type, shape=output_shape) _nd_image.geometric_transform(filtered, None, coordinates, None, None, output, order, mode, cval, None, None) return return_value
def affine_transform(input, matrix, offset=0.0, output_shape=None, output_type=None, output=None, order=3, mode='constant', cval=0.0, prefilter=True): 'Apply an affine transformation.\n\n The given matrix and offset are used to find for each point in the\n output the corresponding coordinates in the input by an affine\n transformation. The value of the input at those coordinates is\n determined by spline interpolation of the requested order. Points\n outside the boundaries of the input are filled according to the given\n mode. The output shape can optionally be given. If not given it is\n equal to the input shape. The parameter prefilter determines if the\n input is pre-filtered before interpolation, if False it is assumed\n that the input is already filtered.\n\n The matrix must be two-dimensional or can also be given as a\n one-dimensional sequence or array. In the latter case, it is\n assumed that the matrix is diagonal. A more efficient algorithms\n is then applied that exploits the separability of the problem.\n ' if ((order < 0) or (order > 5)): raise RuntimeError('spline order not supported') input = numpy.asarray(input) if numpy.iscomplexobj(input): raise TypeError('Complex type not supported') if (output_shape is None): output_shape = input.shape if ((input.ndim < 1) or (len(output_shape) < 1)): raise RuntimeError('input and output rank must be > 0') mode = _extend_mode_to_code(mode) if (prefilter and (order > 1)): filtered = spline_filter(input, order, output=numpy.float64) else: filtered = input (output, return_value) = _ni_support._get_output(output, input, output_type, shape=output_shape) matrix = numpy.asarray(matrix, dtype=numpy.float64) if ((matrix.ndim not in [1, 2]) or (matrix.shape[0] < 1)): raise RuntimeError('no proper affine matrix provided') if (matrix.shape[0] != input.ndim): raise RuntimeError('affine matrix has wrong number of rows') if ((matrix.ndim == 2) and (matrix.shape[1] != output.ndim)): raise RuntimeError('affine matrix has wrong number of columns') if (not matrix.flags.contiguous): matrix = matrix.copy() offset = _ni_support._normalize_sequence(offset, input.ndim) offset = numpy.asarray(offset, dtype=numpy.float64) if ((offset.ndim != 1) or (offset.shape[0] < 1)): raise RuntimeError('no proper offset provided') if (not offset.flags.contiguous): offset = offset.copy() if (matrix.ndim == 1): _nd_image.zoom_shift(filtered, matrix, offset, output, order, mode, cval) else: _nd_image.geometric_transform(filtered, None, None, matrix, offset, output, order, mode, cval, None, None) return return_value
9,161,708,642,816,973,000
Apply an affine transformation. The given matrix and offset are used to find for each point in the output the corresponding coordinates in the input by an affine transformation. The value of the input at those coordinates is determined by spline interpolation of the requested order. Points outside the boundaries of the input are filled according to the given mode. The output shape can optionally be given. If not given it is equal to the input shape. The parameter prefilter determines if the input is pre-filtered before interpolation, if False it is assumed that the input is already filtered. The matrix must be two-dimensional or can also be given as a one-dimensional sequence or array. In the latter case, it is assumed that the matrix is diagonal. A more efficient algorithms is then applied that exploits the separability of the problem.
kapteyn/interpolation.py
affine_transform
kapteyn-astro/kapteyn
python
def affine_transform(input, matrix, offset=0.0, output_shape=None, output_type=None, output=None, order=3, mode='constant', cval=0.0, prefilter=True): 'Apply an affine transformation.\n\n The given matrix and offset are used to find for each point in the\n output the corresponding coordinates in the input by an affine\n transformation. The value of the input at those coordinates is\n determined by spline interpolation of the requested order. Points\n outside the boundaries of the input are filled according to the given\n mode. The output shape can optionally be given. If not given it is\n equal to the input shape. The parameter prefilter determines if the\n input is pre-filtered before interpolation, if False it is assumed\n that the input is already filtered.\n\n The matrix must be two-dimensional or can also be given as a\n one-dimensional sequence or array. In the latter case, it is\n assumed that the matrix is diagonal. A more efficient algorithms\n is then applied that exploits the separability of the problem.\n ' if ((order < 0) or (order > 5)): raise RuntimeError('spline order not supported') input = numpy.asarray(input) if numpy.iscomplexobj(input): raise TypeError('Complex type not supported') if (output_shape is None): output_shape = input.shape if ((input.ndim < 1) or (len(output_shape) < 1)): raise RuntimeError('input and output rank must be > 0') mode = _extend_mode_to_code(mode) if (prefilter and (order > 1)): filtered = spline_filter(input, order, output=numpy.float64) else: filtered = input (output, return_value) = _ni_support._get_output(output, input, output_type, shape=output_shape) matrix = numpy.asarray(matrix, dtype=numpy.float64) if ((matrix.ndim not in [1, 2]) or (matrix.shape[0] < 1)): raise RuntimeError('no proper affine matrix provided') if (matrix.shape[0] != input.ndim): raise RuntimeError('affine matrix has wrong number of rows') if ((matrix.ndim == 2) and (matrix.shape[1] != output.ndim)): raise RuntimeError('affine matrix has wrong number of columns') if (not matrix.flags.contiguous): matrix = matrix.copy() offset = _ni_support._normalize_sequence(offset, input.ndim) offset = numpy.asarray(offset, dtype=numpy.float64) if ((offset.ndim != 1) or (offset.shape[0] < 1)): raise RuntimeError('no proper offset provided') if (not offset.flags.contiguous): offset = offset.copy() if (matrix.ndim == 1): _nd_image.zoom_shift(filtered, matrix, offset, output, order, mode, cval) else: _nd_image.geometric_transform(filtered, None, None, matrix, offset, output, order, mode, cval, None, None) return return_value
def shift(input, shift, output_type=None, output=None, order=3, mode='constant', cval=0.0, prefilter=True): 'Shift an array.\n\n The array is shifted using spline interpolation of the requested\n order. Points outside the boundaries of the input are filled according\n to the given mode. The parameter prefilter determines if the input is\n pre-filtered before interpolation, if False it is assumed that the\n input is already filtered.\n ' if ((order < 0) or (order > 5)): raise RuntimeError('spline order not supported') input = numpy.asarray(input) if numpy.iscomplexobj(input): raise TypeError('Complex type not supported') if (input.ndim < 1): raise RuntimeError('input and output rank must be > 0') mode = _extend_mode_to_code(mode) if (prefilter and (order > 1)): filtered = spline_filter(input, order, output=numpy.float64) else: filtered = input (output, return_value) = _ni_support._get_output(output, input, output_type) shift = _ni_support._normalize_sequence(shift, input.ndim) shift = [(- ii) for ii in shift] shift = numpy.asarray(shift, dtype=numpy.float64) if (not shift.flags.contiguous): shift = shift.copy() _nd_image.zoom_shift(filtered, None, shift, output, order, mode, cval) return return_value
1,732,073,668,687,526,000
Shift an array. The array is shifted using spline interpolation of the requested order. Points outside the boundaries of the input are filled according to the given mode. The parameter prefilter determines if the input is pre-filtered before interpolation, if False it is assumed that the input is already filtered.
kapteyn/interpolation.py
shift
kapteyn-astro/kapteyn
python
def shift(input, shift, output_type=None, output=None, order=3, mode='constant', cval=0.0, prefilter=True): 'Shift an array.\n\n The array is shifted using spline interpolation of the requested\n order. Points outside the boundaries of the input are filled according\n to the given mode. The parameter prefilter determines if the input is\n pre-filtered before interpolation, if False it is assumed that the\n input is already filtered.\n ' if ((order < 0) or (order > 5)): raise RuntimeError('spline order not supported') input = numpy.asarray(input) if numpy.iscomplexobj(input): raise TypeError('Complex type not supported') if (input.ndim < 1): raise RuntimeError('input and output rank must be > 0') mode = _extend_mode_to_code(mode) if (prefilter and (order > 1)): filtered = spline_filter(input, order, output=numpy.float64) else: filtered = input (output, return_value) = _ni_support._get_output(output, input, output_type) shift = _ni_support._normalize_sequence(shift, input.ndim) shift = [(- ii) for ii in shift] shift = numpy.asarray(shift, dtype=numpy.float64) if (not shift.flags.contiguous): shift = shift.copy() _nd_image.zoom_shift(filtered, None, shift, output, order, mode, cval) return return_value
def zoom(input, zoom, output_type=None, output=None, order=3, mode='constant', cval=0.0, prefilter=True): 'Zoom an array.\n\n The array is zoomed using spline interpolation of the requested order.\n Points outside the boundaries of the input are filled according to the\n given mode. The parameter prefilter determines if the input is pre-\n filtered before interpolation, if False it is assumed that the input\n is already filtered.\n ' if ((order < 0) or (order > 5)): raise RuntimeError('spline order not supported') input = numpy.asarray(input) if numpy.iscomplexobj(input): raise TypeError('Complex type not supported') if (input.ndim < 1): raise RuntimeError('input and output rank must be > 0') mode = _extend_mode_to_code(mode) if (prefilter and (order > 1)): filtered = spline_filter(input, order, output=numpy.float64) else: filtered = input zoom = _ni_support._normalize_sequence(zoom, input.ndim) output_shape = tuple([int((ii * jj)) for (ii, jj) in zip(input.shape, zoom)]) zoom = ((numpy.array(input.shape) - 1) / (numpy.array(output_shape, float) - 1)) (output, return_value) = _ni_support._get_output(output, input, output_type, shape=output_shape) zoom = numpy.asarray(zoom, dtype=numpy.float64) zoom = numpy.ascontiguousarray(zoom) _nd_image.zoom_shift(filtered, zoom, None, output, order, mode, cval) return return_value
-1,133,667,281,842,703,600
Zoom an array. The array is zoomed using spline interpolation of the requested order. Points outside the boundaries of the input are filled according to the given mode. The parameter prefilter determines if the input is pre- filtered before interpolation, if False it is assumed that the input is already filtered.
kapteyn/interpolation.py
zoom
kapteyn-astro/kapteyn
python
def zoom(input, zoom, output_type=None, output=None, order=3, mode='constant', cval=0.0, prefilter=True): 'Zoom an array.\n\n The array is zoomed using spline interpolation of the requested order.\n Points outside the boundaries of the input are filled according to the\n given mode. The parameter prefilter determines if the input is pre-\n filtered before interpolation, if False it is assumed that the input\n is already filtered.\n ' if ((order < 0) or (order > 5)): raise RuntimeError('spline order not supported') input = numpy.asarray(input) if numpy.iscomplexobj(input): raise TypeError('Complex type not supported') if (input.ndim < 1): raise RuntimeError('input and output rank must be > 0') mode = _extend_mode_to_code(mode) if (prefilter and (order > 1)): filtered = spline_filter(input, order, output=numpy.float64) else: filtered = input zoom = _ni_support._normalize_sequence(zoom, input.ndim) output_shape = tuple([int((ii * jj)) for (ii, jj) in zip(input.shape, zoom)]) zoom = ((numpy.array(input.shape) - 1) / (numpy.array(output_shape, float) - 1)) (output, return_value) = _ni_support._get_output(output, input, output_type, shape=output_shape) zoom = numpy.asarray(zoom, dtype=numpy.float64) zoom = numpy.ascontiguousarray(zoom) _nd_image.zoom_shift(filtered, zoom, None, output, order, mode, cval) return return_value
def rotate(input, angle, axes=(1, 0), reshape=True, output_type=None, output=None, order=3, mode='constant', cval=0.0, prefilter=True): 'Rotate an array.\n\n The array is rotated in the plane defined by the two axes given by the\n axes parameter using spline interpolation of the requested order. The\n angle is given in degrees. Points outside the boundaries of the input\n are filled according to the given mode. If reshape is true, the output\n shape is adapted so that the input array is contained completely in\n the output. The parameter prefilter determines if the input is pre-\n filtered before interpolation, if False it is assumed that the input\n is already filtered.\n ' input = numpy.asarray(input) axes = list(axes) rank = input.ndim if (axes[0] < 0): axes[0] += rank if (axes[1] < 0): axes[1] += rank if ((axes[0] < 0) or (axes[1] < 0) or (axes[0] > rank) or (axes[1] > rank)): raise RuntimeError('invalid rotation plane specified') if (axes[0] > axes[1]): axes = (axes[1], axes[0]) angle = ((numpy.pi / 180) * angle) m11 = math.cos(angle) m12 = math.sin(angle) m21 = (- math.sin(angle)) m22 = math.cos(angle) matrix = numpy.array([[m11, m12], [m21, m22]], dtype=numpy.float64) iy = input.shape[axes[0]] ix = input.shape[axes[1]] if reshape: mtrx = numpy.array([[m11, (- m21)], [(- m12), m22]], dtype=numpy.float64) minc = [0, 0] maxc = [0, 0] coor = numpy.dot(mtrx, [0, ix]) (minc, maxc) = _minmax(coor, minc, maxc) coor = numpy.dot(mtrx, [iy, 0]) (minc, maxc) = _minmax(coor, minc, maxc) coor = numpy.dot(mtrx, [iy, ix]) (minc, maxc) = _minmax(coor, minc, maxc) oy = int(((maxc[0] - minc[0]) + 0.5)) ox = int(((maxc[1] - minc[1]) + 0.5)) else: oy = input.shape[axes[0]] ox = input.shape[axes[1]] offset = numpy.zeros((2,), dtype=numpy.float64) offset[0] = ((float(oy) / 2.0) - 0.5) offset[1] = ((float(ox) / 2.0) - 0.5) offset = numpy.dot(matrix, offset) tmp = numpy.zeros((2,), dtype=numpy.float64) tmp[0] = ((float(iy) / 2.0) - 0.5) tmp[1] = ((float(ix) / 2.0) - 0.5) offset = (tmp - offset) output_shape = list(input.shape) output_shape[axes[0]] = oy output_shape[axes[1]] = ox output_shape = tuple(output_shape) (output, return_value) = _ni_support._get_output(output, input, output_type, shape=output_shape) if (input.ndim <= 2): affine_transform(input, matrix, offset, output_shape, None, output, order, mode, cval, prefilter) else: coordinates = [] size = numpy.product(input.shape, axis=0) size /= input.shape[axes[0]] size /= input.shape[axes[1]] for ii in range(input.ndim): if (ii not in axes): coordinates.append(0) else: coordinates.append(slice(None, None, None)) iter_axes = list(range(input.ndim)) iter_axes.reverse() iter_axes.remove(axes[0]) iter_axes.remove(axes[1]) os = (output_shape[axes[0]], output_shape[axes[1]]) for ii in range(size): ia = input[tuple(coordinates)] oa = output[tuple(coordinates)] affine_transform(ia, matrix, offset, os, None, oa, order, mode, cval, prefilter) for jj in iter_axes: if (coordinates[jj] < (input.shape[jj] - 1)): coordinates[jj] += 1 break else: coordinates[jj] = 0 return return_value
8,433,568,912,036,353,000
Rotate an array. The array is rotated in the plane defined by the two axes given by the axes parameter using spline interpolation of the requested order. The angle is given in degrees. Points outside the boundaries of the input are filled according to the given mode. If reshape is true, the output shape is adapted so that the input array is contained completely in the output. The parameter prefilter determines if the input is pre- filtered before interpolation, if False it is assumed that the input is already filtered.
kapteyn/interpolation.py
rotate
kapteyn-astro/kapteyn
python
def rotate(input, angle, axes=(1, 0), reshape=True, output_type=None, output=None, order=3, mode='constant', cval=0.0, prefilter=True): 'Rotate an array.\n\n The array is rotated in the plane defined by the two axes given by the\n axes parameter using spline interpolation of the requested order. The\n angle is given in degrees. Points outside the boundaries of the input\n are filled according to the given mode. If reshape is true, the output\n shape is adapted so that the input array is contained completely in\n the output. The parameter prefilter determines if the input is pre-\n filtered before interpolation, if False it is assumed that the input\n is already filtered.\n ' input = numpy.asarray(input) axes = list(axes) rank = input.ndim if (axes[0] < 0): axes[0] += rank if (axes[1] < 0): axes[1] += rank if ((axes[0] < 0) or (axes[1] < 0) or (axes[0] > rank) or (axes[1] > rank)): raise RuntimeError('invalid rotation plane specified') if (axes[0] > axes[1]): axes = (axes[1], axes[0]) angle = ((numpy.pi / 180) * angle) m11 = math.cos(angle) m12 = math.sin(angle) m21 = (- math.sin(angle)) m22 = math.cos(angle) matrix = numpy.array([[m11, m12], [m21, m22]], dtype=numpy.float64) iy = input.shape[axes[0]] ix = input.shape[axes[1]] if reshape: mtrx = numpy.array([[m11, (- m21)], [(- m12), m22]], dtype=numpy.float64) minc = [0, 0] maxc = [0, 0] coor = numpy.dot(mtrx, [0, ix]) (minc, maxc) = _minmax(coor, minc, maxc) coor = numpy.dot(mtrx, [iy, 0]) (minc, maxc) = _minmax(coor, minc, maxc) coor = numpy.dot(mtrx, [iy, ix]) (minc, maxc) = _minmax(coor, minc, maxc) oy = int(((maxc[0] - minc[0]) + 0.5)) ox = int(((maxc[1] - minc[1]) + 0.5)) else: oy = input.shape[axes[0]] ox = input.shape[axes[1]] offset = numpy.zeros((2,), dtype=numpy.float64) offset[0] = ((float(oy) / 2.0) - 0.5) offset[1] = ((float(ox) / 2.0) - 0.5) offset = numpy.dot(matrix, offset) tmp = numpy.zeros((2,), dtype=numpy.float64) tmp[0] = ((float(iy) / 2.0) - 0.5) tmp[1] = ((float(ix) / 2.0) - 0.5) offset = (tmp - offset) output_shape = list(input.shape) output_shape[axes[0]] = oy output_shape[axes[1]] = ox output_shape = tuple(output_shape) (output, return_value) = _ni_support._get_output(output, input, output_type, shape=output_shape) if (input.ndim <= 2): affine_transform(input, matrix, offset, output_shape, None, output, order, mode, cval, prefilter) else: coordinates = [] size = numpy.product(input.shape, axis=0) size /= input.shape[axes[0]] size /= input.shape[axes[1]] for ii in range(input.ndim): if (ii not in axes): coordinates.append(0) else: coordinates.append(slice(None, None, None)) iter_axes = list(range(input.ndim)) iter_axes.reverse() iter_axes.remove(axes[0]) iter_axes.remove(axes[1]) os = (output_shape[axes[0]], output_shape[axes[1]]) for ii in range(size): ia = input[tuple(coordinates)] oa = output[tuple(coordinates)] affine_transform(ia, matrix, offset, os, None, oa, order, mode, cval, prefilter) for jj in iter_axes: if (coordinates[jj] < (input.shape[jj] - 1)): coordinates[jj] += 1 break else: coordinates[jj] = 0 return return_value
def __init__(self, algorithm=None, max_failed_trial_count=None, max_trial_count=None, metrics_collector_spec=None, nas_config=None, objective=None, parallel_trial_count=None, parameters=None, resume_policy=None, trial_template=None): 'V1alpha3ExperimentSpec - a model defined in Swagger' self._algorithm = None self._max_failed_trial_count = None self._max_trial_count = None self._metrics_collector_spec = None self._nas_config = None self._objective = None self._parallel_trial_count = None self._parameters = None self._resume_policy = None self._trial_template = None self.discriminator = None if (algorithm is not None): self.algorithm = algorithm if (max_failed_trial_count is not None): self.max_failed_trial_count = max_failed_trial_count if (max_trial_count is not None): self.max_trial_count = max_trial_count if (metrics_collector_spec is not None): self.metrics_collector_spec = metrics_collector_spec if (nas_config is not None): self.nas_config = nas_config if (objective is not None): self.objective = objective if (parallel_trial_count is not None): self.parallel_trial_count = parallel_trial_count if (parameters is not None): self.parameters = parameters if (resume_policy is not None): self.resume_policy = resume_policy if (trial_template is not None): self.trial_template = trial_template
133,299,599,670,606,750
V1alpha3ExperimentSpec - a model defined in Swagger
sdk/python/v1alpha3/kubeflow/katib/models/v1alpha3_experiment_spec.py
__init__
ChenjunZou/katib
python
def __init__(self, algorithm=None, max_failed_trial_count=None, max_trial_count=None, metrics_collector_spec=None, nas_config=None, objective=None, parallel_trial_count=None, parameters=None, resume_policy=None, trial_template=None): self._algorithm = None self._max_failed_trial_count = None self._max_trial_count = None self._metrics_collector_spec = None self._nas_config = None self._objective = None self._parallel_trial_count = None self._parameters = None self._resume_policy = None self._trial_template = None self.discriminator = None if (algorithm is not None): self.algorithm = algorithm if (max_failed_trial_count is not None): self.max_failed_trial_count = max_failed_trial_count if (max_trial_count is not None): self.max_trial_count = max_trial_count if (metrics_collector_spec is not None): self.metrics_collector_spec = metrics_collector_spec if (nas_config is not None): self.nas_config = nas_config if (objective is not None): self.objective = objective if (parallel_trial_count is not None): self.parallel_trial_count = parallel_trial_count if (parameters is not None): self.parameters = parameters if (resume_policy is not None): self.resume_policy = resume_policy if (trial_template is not None): self.trial_template = trial_template
@property def algorithm(self): 'Gets the algorithm of this V1alpha3ExperimentSpec. # noqa: E501\n\n Describes the suggestion algorithm. # noqa: E501\n\n :return: The algorithm of this V1alpha3ExperimentSpec. # noqa: E501\n :rtype: V1alpha3AlgorithmSpec\n ' return self._algorithm
7,650,702,324,478,667,000
Gets the algorithm of this V1alpha3ExperimentSpec. # noqa: E501 Describes the suggestion algorithm. # noqa: E501 :return: The algorithm of this V1alpha3ExperimentSpec. # noqa: E501 :rtype: V1alpha3AlgorithmSpec
sdk/python/v1alpha3/kubeflow/katib/models/v1alpha3_experiment_spec.py
algorithm
ChenjunZou/katib
python
@property def algorithm(self): 'Gets the algorithm of this V1alpha3ExperimentSpec. # noqa: E501\n\n Describes the suggestion algorithm. # noqa: E501\n\n :return: The algorithm of this V1alpha3ExperimentSpec. # noqa: E501\n :rtype: V1alpha3AlgorithmSpec\n ' return self._algorithm
@algorithm.setter def algorithm(self, algorithm): 'Sets the algorithm of this V1alpha3ExperimentSpec.\n\n Describes the suggestion algorithm. # noqa: E501\n\n :param algorithm: The algorithm of this V1alpha3ExperimentSpec. # noqa: E501\n :type: V1alpha3AlgorithmSpec\n ' self._algorithm = algorithm
-1,595,579,673,576,640,300
Sets the algorithm of this V1alpha3ExperimentSpec. Describes the suggestion algorithm. # noqa: E501 :param algorithm: The algorithm of this V1alpha3ExperimentSpec. # noqa: E501 :type: V1alpha3AlgorithmSpec
sdk/python/v1alpha3/kubeflow/katib/models/v1alpha3_experiment_spec.py
algorithm
ChenjunZou/katib
python
@algorithm.setter def algorithm(self, algorithm): 'Sets the algorithm of this V1alpha3ExperimentSpec.\n\n Describes the suggestion algorithm. # noqa: E501\n\n :param algorithm: The algorithm of this V1alpha3ExperimentSpec. # noqa: E501\n :type: V1alpha3AlgorithmSpec\n ' self._algorithm = algorithm
@property def max_failed_trial_count(self): 'Gets the max_failed_trial_count of this V1alpha3ExperimentSpec. # noqa: E501\n\n Max failed trials to mark experiment as failed. # noqa: E501\n\n :return: The max_failed_trial_count of this V1alpha3ExperimentSpec. # noqa: E501\n :rtype: int\n ' return self._max_failed_trial_count
5,420,390,502,883,747,000
Gets the max_failed_trial_count of this V1alpha3ExperimentSpec. # noqa: E501 Max failed trials to mark experiment as failed. # noqa: E501 :return: The max_failed_trial_count of this V1alpha3ExperimentSpec. # noqa: E501 :rtype: int
sdk/python/v1alpha3/kubeflow/katib/models/v1alpha3_experiment_spec.py
max_failed_trial_count
ChenjunZou/katib
python
@property def max_failed_trial_count(self): 'Gets the max_failed_trial_count of this V1alpha3ExperimentSpec. # noqa: E501\n\n Max failed trials to mark experiment as failed. # noqa: E501\n\n :return: The max_failed_trial_count of this V1alpha3ExperimentSpec. # noqa: E501\n :rtype: int\n ' return self._max_failed_trial_count
@max_failed_trial_count.setter def max_failed_trial_count(self, max_failed_trial_count): 'Sets the max_failed_trial_count of this V1alpha3ExperimentSpec.\n\n Max failed trials to mark experiment as failed. # noqa: E501\n\n :param max_failed_trial_count: The max_failed_trial_count of this V1alpha3ExperimentSpec. # noqa: E501\n :type: int\n ' self._max_failed_trial_count = max_failed_trial_count
3,150,749,497,333,372,000
Sets the max_failed_trial_count of this V1alpha3ExperimentSpec. Max failed trials to mark experiment as failed. # noqa: E501 :param max_failed_trial_count: The max_failed_trial_count of this V1alpha3ExperimentSpec. # noqa: E501 :type: int
sdk/python/v1alpha3/kubeflow/katib/models/v1alpha3_experiment_spec.py
max_failed_trial_count
ChenjunZou/katib
python
@max_failed_trial_count.setter def max_failed_trial_count(self, max_failed_trial_count): 'Sets the max_failed_trial_count of this V1alpha3ExperimentSpec.\n\n Max failed trials to mark experiment as failed. # noqa: E501\n\n :param max_failed_trial_count: The max_failed_trial_count of this V1alpha3ExperimentSpec. # noqa: E501\n :type: int\n ' self._max_failed_trial_count = max_failed_trial_count
@property def max_trial_count(self): 'Gets the max_trial_count of this V1alpha3ExperimentSpec. # noqa: E501\n\n Max completed trials to mark experiment as succeeded # noqa: E501\n\n :return: The max_trial_count of this V1alpha3ExperimentSpec. # noqa: E501\n :rtype: int\n ' return self._max_trial_count
6,569,465,222,338,827,000
Gets the max_trial_count of this V1alpha3ExperimentSpec. # noqa: E501 Max completed trials to mark experiment as succeeded # noqa: E501 :return: The max_trial_count of this V1alpha3ExperimentSpec. # noqa: E501 :rtype: int
sdk/python/v1alpha3/kubeflow/katib/models/v1alpha3_experiment_spec.py
max_trial_count
ChenjunZou/katib
python
@property def max_trial_count(self): 'Gets the max_trial_count of this V1alpha3ExperimentSpec. # noqa: E501\n\n Max completed trials to mark experiment as succeeded # noqa: E501\n\n :return: The max_trial_count of this V1alpha3ExperimentSpec. # noqa: E501\n :rtype: int\n ' return self._max_trial_count
@max_trial_count.setter def max_trial_count(self, max_trial_count): 'Sets the max_trial_count of this V1alpha3ExperimentSpec.\n\n Max completed trials to mark experiment as succeeded # noqa: E501\n\n :param max_trial_count: The max_trial_count of this V1alpha3ExperimentSpec. # noqa: E501\n :type: int\n ' self._max_trial_count = max_trial_count
915,055,181,498,908,300
Sets the max_trial_count of this V1alpha3ExperimentSpec. Max completed trials to mark experiment as succeeded # noqa: E501 :param max_trial_count: The max_trial_count of this V1alpha3ExperimentSpec. # noqa: E501 :type: int
sdk/python/v1alpha3/kubeflow/katib/models/v1alpha3_experiment_spec.py
max_trial_count
ChenjunZou/katib
python
@max_trial_count.setter def max_trial_count(self, max_trial_count): 'Sets the max_trial_count of this V1alpha3ExperimentSpec.\n\n Max completed trials to mark experiment as succeeded # noqa: E501\n\n :param max_trial_count: The max_trial_count of this V1alpha3ExperimentSpec. # noqa: E501\n :type: int\n ' self._max_trial_count = max_trial_count
@property def metrics_collector_spec(self): 'Gets the metrics_collector_spec of this V1alpha3ExperimentSpec. # noqa: E501\n\n For v1alpha3 we will keep the metrics collector implementation same as v1alpha1. # noqa: E501\n\n :return: The metrics_collector_spec of this V1alpha3ExperimentSpec. # noqa: E501\n :rtype: V1alpha3MetricsCollectorSpec\n ' return self._metrics_collector_spec
-1,540,824,200,271,872,500
Gets the metrics_collector_spec of this V1alpha3ExperimentSpec. # noqa: E501 For v1alpha3 we will keep the metrics collector implementation same as v1alpha1. # noqa: E501 :return: The metrics_collector_spec of this V1alpha3ExperimentSpec. # noqa: E501 :rtype: V1alpha3MetricsCollectorSpec
sdk/python/v1alpha3/kubeflow/katib/models/v1alpha3_experiment_spec.py
metrics_collector_spec
ChenjunZou/katib
python
@property def metrics_collector_spec(self): 'Gets the metrics_collector_spec of this V1alpha3ExperimentSpec. # noqa: E501\n\n For v1alpha3 we will keep the metrics collector implementation same as v1alpha1. # noqa: E501\n\n :return: The metrics_collector_spec of this V1alpha3ExperimentSpec. # noqa: E501\n :rtype: V1alpha3MetricsCollectorSpec\n ' return self._metrics_collector_spec
@metrics_collector_spec.setter def metrics_collector_spec(self, metrics_collector_spec): 'Sets the metrics_collector_spec of this V1alpha3ExperimentSpec.\n\n For v1alpha3 we will keep the metrics collector implementation same as v1alpha1. # noqa: E501\n\n :param metrics_collector_spec: The metrics_collector_spec of this V1alpha3ExperimentSpec. # noqa: E501\n :type: V1alpha3MetricsCollectorSpec\n ' self._metrics_collector_spec = metrics_collector_spec
5,239,656,019,610,327,000
Sets the metrics_collector_spec of this V1alpha3ExperimentSpec. For v1alpha3 we will keep the metrics collector implementation same as v1alpha1. # noqa: E501 :param metrics_collector_spec: The metrics_collector_spec of this V1alpha3ExperimentSpec. # noqa: E501 :type: V1alpha3MetricsCollectorSpec
sdk/python/v1alpha3/kubeflow/katib/models/v1alpha3_experiment_spec.py
metrics_collector_spec
ChenjunZou/katib
python
@metrics_collector_spec.setter def metrics_collector_spec(self, metrics_collector_spec): 'Sets the metrics_collector_spec of this V1alpha3ExperimentSpec.\n\n For v1alpha3 we will keep the metrics collector implementation same as v1alpha1. # noqa: E501\n\n :param metrics_collector_spec: The metrics_collector_spec of this V1alpha3ExperimentSpec. # noqa: E501\n :type: V1alpha3MetricsCollectorSpec\n ' self._metrics_collector_spec = metrics_collector_spec
@property def nas_config(self): 'Gets the nas_config of this V1alpha3ExperimentSpec. # noqa: E501\n\n\n :return: The nas_config of this V1alpha3ExperimentSpec. # noqa: E501\n :rtype: V1alpha3NasConfig\n ' return self._nas_config
5,473,597,441,255,212,000
Gets the nas_config of this V1alpha3ExperimentSpec. # noqa: E501 :return: The nas_config of this V1alpha3ExperimentSpec. # noqa: E501 :rtype: V1alpha3NasConfig
sdk/python/v1alpha3/kubeflow/katib/models/v1alpha3_experiment_spec.py
nas_config
ChenjunZou/katib
python
@property def nas_config(self): 'Gets the nas_config of this V1alpha3ExperimentSpec. # noqa: E501\n\n\n :return: The nas_config of this V1alpha3ExperimentSpec. # noqa: E501\n :rtype: V1alpha3NasConfig\n ' return self._nas_config
@nas_config.setter def nas_config(self, nas_config): 'Sets the nas_config of this V1alpha3ExperimentSpec.\n\n\n :param nas_config: The nas_config of this V1alpha3ExperimentSpec. # noqa: E501\n :type: V1alpha3NasConfig\n ' self._nas_config = nas_config
-5,358,884,515,209,816,000
Sets the nas_config of this V1alpha3ExperimentSpec. :param nas_config: The nas_config of this V1alpha3ExperimentSpec. # noqa: E501 :type: V1alpha3NasConfig
sdk/python/v1alpha3/kubeflow/katib/models/v1alpha3_experiment_spec.py
nas_config
ChenjunZou/katib
python
@nas_config.setter def nas_config(self, nas_config): 'Sets the nas_config of this V1alpha3ExperimentSpec.\n\n\n :param nas_config: The nas_config of this V1alpha3ExperimentSpec. # noqa: E501\n :type: V1alpha3NasConfig\n ' self._nas_config = nas_config
@property def objective(self): 'Gets the objective of this V1alpha3ExperimentSpec. # noqa: E501\n\n Describes the objective of the experiment. # noqa: E501\n\n :return: The objective of this V1alpha3ExperimentSpec. # noqa: E501\n :rtype: V1alpha3ObjectiveSpec\n ' return self._objective
-7,287,374,412,294,059,000
Gets the objective of this V1alpha3ExperimentSpec. # noqa: E501 Describes the objective of the experiment. # noqa: E501 :return: The objective of this V1alpha3ExperimentSpec. # noqa: E501 :rtype: V1alpha3ObjectiveSpec
sdk/python/v1alpha3/kubeflow/katib/models/v1alpha3_experiment_spec.py
objective
ChenjunZou/katib
python
@property def objective(self): 'Gets the objective of this V1alpha3ExperimentSpec. # noqa: E501\n\n Describes the objective of the experiment. # noqa: E501\n\n :return: The objective of this V1alpha3ExperimentSpec. # noqa: E501\n :rtype: V1alpha3ObjectiveSpec\n ' return self._objective
@objective.setter def objective(self, objective): 'Sets the objective of this V1alpha3ExperimentSpec.\n\n Describes the objective of the experiment. # noqa: E501\n\n :param objective: The objective of this V1alpha3ExperimentSpec. # noqa: E501\n :type: V1alpha3ObjectiveSpec\n ' self._objective = objective
-4,573,635,184,005,900,000
Sets the objective of this V1alpha3ExperimentSpec. Describes the objective of the experiment. # noqa: E501 :param objective: The objective of this V1alpha3ExperimentSpec. # noqa: E501 :type: V1alpha3ObjectiveSpec
sdk/python/v1alpha3/kubeflow/katib/models/v1alpha3_experiment_spec.py
objective
ChenjunZou/katib
python
@objective.setter def objective(self, objective): 'Sets the objective of this V1alpha3ExperimentSpec.\n\n Describes the objective of the experiment. # noqa: E501\n\n :param objective: The objective of this V1alpha3ExperimentSpec. # noqa: E501\n :type: V1alpha3ObjectiveSpec\n ' self._objective = objective
@property def parallel_trial_count(self): 'Gets the parallel_trial_count of this V1alpha3ExperimentSpec. # noqa: E501\n\n How many trials can be processed in parallel. Defaults to 3 # noqa: E501\n\n :return: The parallel_trial_count of this V1alpha3ExperimentSpec. # noqa: E501\n :rtype: int\n ' return self._parallel_trial_count
-989,895,674,724,652,300
Gets the parallel_trial_count of this V1alpha3ExperimentSpec. # noqa: E501 How many trials can be processed in parallel. Defaults to 3 # noqa: E501 :return: The parallel_trial_count of this V1alpha3ExperimentSpec. # noqa: E501 :rtype: int
sdk/python/v1alpha3/kubeflow/katib/models/v1alpha3_experiment_spec.py
parallel_trial_count
ChenjunZou/katib
python
@property def parallel_trial_count(self): 'Gets the parallel_trial_count of this V1alpha3ExperimentSpec. # noqa: E501\n\n How many trials can be processed in parallel. Defaults to 3 # noqa: E501\n\n :return: The parallel_trial_count of this V1alpha3ExperimentSpec. # noqa: E501\n :rtype: int\n ' return self._parallel_trial_count
@parallel_trial_count.setter def parallel_trial_count(self, parallel_trial_count): 'Sets the parallel_trial_count of this V1alpha3ExperimentSpec.\n\n How many trials can be processed in parallel. Defaults to 3 # noqa: E501\n\n :param parallel_trial_count: The parallel_trial_count of this V1alpha3ExperimentSpec. # noqa: E501\n :type: int\n ' self._parallel_trial_count = parallel_trial_count
-8,998,847,579,390,305,000
Sets the parallel_trial_count of this V1alpha3ExperimentSpec. How many trials can be processed in parallel. Defaults to 3 # noqa: E501 :param parallel_trial_count: The parallel_trial_count of this V1alpha3ExperimentSpec. # noqa: E501 :type: int
sdk/python/v1alpha3/kubeflow/katib/models/v1alpha3_experiment_spec.py
parallel_trial_count
ChenjunZou/katib
python
@parallel_trial_count.setter def parallel_trial_count(self, parallel_trial_count): 'Sets the parallel_trial_count of this V1alpha3ExperimentSpec.\n\n How many trials can be processed in parallel. Defaults to 3 # noqa: E501\n\n :param parallel_trial_count: The parallel_trial_count of this V1alpha3ExperimentSpec. # noqa: E501\n :type: int\n ' self._parallel_trial_count = parallel_trial_count
@property def parameters(self): 'Gets the parameters of this V1alpha3ExperimentSpec. # noqa: E501\n\n List of hyperparameter configurations. # noqa: E501\n\n :return: The parameters of this V1alpha3ExperimentSpec. # noqa: E501\n :rtype: list[V1alpha3ParameterSpec]\n ' return self._parameters
7,362,714,691,322,202,000
Gets the parameters of this V1alpha3ExperimentSpec. # noqa: E501 List of hyperparameter configurations. # noqa: E501 :return: The parameters of this V1alpha3ExperimentSpec. # noqa: E501 :rtype: list[V1alpha3ParameterSpec]
sdk/python/v1alpha3/kubeflow/katib/models/v1alpha3_experiment_spec.py
parameters
ChenjunZou/katib
python
@property def parameters(self): 'Gets the parameters of this V1alpha3ExperimentSpec. # noqa: E501\n\n List of hyperparameter configurations. # noqa: E501\n\n :return: The parameters of this V1alpha3ExperimentSpec. # noqa: E501\n :rtype: list[V1alpha3ParameterSpec]\n ' return self._parameters
@parameters.setter def parameters(self, parameters): 'Sets the parameters of this V1alpha3ExperimentSpec.\n\n List of hyperparameter configurations. # noqa: E501\n\n :param parameters: The parameters of this V1alpha3ExperimentSpec. # noqa: E501\n :type: list[V1alpha3ParameterSpec]\n ' self._parameters = parameters
-6,827,287,592,512,544,000
Sets the parameters of this V1alpha3ExperimentSpec. List of hyperparameter configurations. # noqa: E501 :param parameters: The parameters of this V1alpha3ExperimentSpec. # noqa: E501 :type: list[V1alpha3ParameterSpec]
sdk/python/v1alpha3/kubeflow/katib/models/v1alpha3_experiment_spec.py
parameters
ChenjunZou/katib
python
@parameters.setter def parameters(self, parameters): 'Sets the parameters of this V1alpha3ExperimentSpec.\n\n List of hyperparameter configurations. # noqa: E501\n\n :param parameters: The parameters of this V1alpha3ExperimentSpec. # noqa: E501\n :type: list[V1alpha3ParameterSpec]\n ' self._parameters = parameters
@property def resume_policy(self): 'Gets the resume_policy of this V1alpha3ExperimentSpec. # noqa: E501\n\n Describes resuming policy which usually take effect after experiment terminated. # noqa: E501\n\n :return: The resume_policy of this V1alpha3ExperimentSpec. # noqa: E501\n :rtype: str\n ' return self._resume_policy
-902,392,194,512,818,800
Gets the resume_policy of this V1alpha3ExperimentSpec. # noqa: E501 Describes resuming policy which usually take effect after experiment terminated. # noqa: E501 :return: The resume_policy of this V1alpha3ExperimentSpec. # noqa: E501 :rtype: str
sdk/python/v1alpha3/kubeflow/katib/models/v1alpha3_experiment_spec.py
resume_policy
ChenjunZou/katib
python
@property def resume_policy(self): 'Gets the resume_policy of this V1alpha3ExperimentSpec. # noqa: E501\n\n Describes resuming policy which usually take effect after experiment terminated. # noqa: E501\n\n :return: The resume_policy of this V1alpha3ExperimentSpec. # noqa: E501\n :rtype: str\n ' return self._resume_policy
@resume_policy.setter def resume_policy(self, resume_policy): 'Sets the resume_policy of this V1alpha3ExperimentSpec.\n\n Describes resuming policy which usually take effect after experiment terminated. # noqa: E501\n\n :param resume_policy: The resume_policy of this V1alpha3ExperimentSpec. # noqa: E501\n :type: str\n ' self._resume_policy = resume_policy
-3,797,899,067,050,496,500
Sets the resume_policy of this V1alpha3ExperimentSpec. Describes resuming policy which usually take effect after experiment terminated. # noqa: E501 :param resume_policy: The resume_policy of this V1alpha3ExperimentSpec. # noqa: E501 :type: str
sdk/python/v1alpha3/kubeflow/katib/models/v1alpha3_experiment_spec.py
resume_policy
ChenjunZou/katib
python
@resume_policy.setter def resume_policy(self, resume_policy): 'Sets the resume_policy of this V1alpha3ExperimentSpec.\n\n Describes resuming policy which usually take effect after experiment terminated. # noqa: E501\n\n :param resume_policy: The resume_policy of this V1alpha3ExperimentSpec. # noqa: E501\n :type: str\n ' self._resume_policy = resume_policy
@property def trial_template(self): 'Gets the trial_template of this V1alpha3ExperimentSpec. # noqa: E501\n\n Template for each run of the trial. # noqa: E501\n\n :return: The trial_template of this V1alpha3ExperimentSpec. # noqa: E501\n :rtype: V1alpha3TrialTemplate\n ' return self._trial_template
5,719,433,743,809,543,000
Gets the trial_template of this V1alpha3ExperimentSpec. # noqa: E501 Template for each run of the trial. # noqa: E501 :return: The trial_template of this V1alpha3ExperimentSpec. # noqa: E501 :rtype: V1alpha3TrialTemplate
sdk/python/v1alpha3/kubeflow/katib/models/v1alpha3_experiment_spec.py
trial_template
ChenjunZou/katib
python
@property def trial_template(self): 'Gets the trial_template of this V1alpha3ExperimentSpec. # noqa: E501\n\n Template for each run of the trial. # noqa: E501\n\n :return: The trial_template of this V1alpha3ExperimentSpec. # noqa: E501\n :rtype: V1alpha3TrialTemplate\n ' return self._trial_template
@trial_template.setter def trial_template(self, trial_template): 'Sets the trial_template of this V1alpha3ExperimentSpec.\n\n Template for each run of the trial. # noqa: E501\n\n :param trial_template: The trial_template of this V1alpha3ExperimentSpec. # noqa: E501\n :type: V1alpha3TrialTemplate\n ' self._trial_template = trial_template
-3,362,570,201,147,115,000
Sets the trial_template of this V1alpha3ExperimentSpec. Template for each run of the trial. # noqa: E501 :param trial_template: The trial_template of this V1alpha3ExperimentSpec. # noqa: E501 :type: V1alpha3TrialTemplate
sdk/python/v1alpha3/kubeflow/katib/models/v1alpha3_experiment_spec.py
trial_template
ChenjunZou/katib
python
@trial_template.setter def trial_template(self, trial_template): 'Sets the trial_template of this V1alpha3ExperimentSpec.\n\n Template for each run of the trial. # noqa: E501\n\n :param trial_template: The trial_template of this V1alpha3ExperimentSpec. # noqa: E501\n :type: V1alpha3TrialTemplate\n ' self._trial_template = trial_template
def to_dict(self): 'Returns the model properties as a dict' result = {} for (attr, _) in six.iteritems(self.swagger_types): value = getattr(self, attr) if isinstance(value, list): result[attr] = list(map((lambda x: (x.to_dict() if hasattr(x, 'to_dict') else x)), value)) elif hasattr(value, 'to_dict'): result[attr] = value.to_dict() elif isinstance(value, dict): result[attr] = dict(map((lambda item: ((item[0], item[1].to_dict()) if hasattr(item[1], 'to_dict') else item)), value.items())) else: result[attr] = value if issubclass(V1alpha3ExperimentSpec, dict): for (key, value) in self.items(): result[key] = value return result
5,751,779,662,646,760,000
Returns the model properties as a dict
sdk/python/v1alpha3/kubeflow/katib/models/v1alpha3_experiment_spec.py
to_dict
ChenjunZou/katib
python
def to_dict(self): result = {} for (attr, _) in six.iteritems(self.swagger_types): value = getattr(self, attr) if isinstance(value, list): result[attr] = list(map((lambda x: (x.to_dict() if hasattr(x, 'to_dict') else x)), value)) elif hasattr(value, 'to_dict'): result[attr] = value.to_dict() elif isinstance(value, dict): result[attr] = dict(map((lambda item: ((item[0], item[1].to_dict()) if hasattr(item[1], 'to_dict') else item)), value.items())) else: result[attr] = value if issubclass(V1alpha3ExperimentSpec, dict): for (key, value) in self.items(): result[key] = value return result
def to_str(self): 'Returns the string representation of the model' return pprint.pformat(self.to_dict())
5,849,158,643,760,736,000
Returns the string representation of the model
sdk/python/v1alpha3/kubeflow/katib/models/v1alpha3_experiment_spec.py
to_str
ChenjunZou/katib
python
def to_str(self): return pprint.pformat(self.to_dict())
def __repr__(self): 'For `print` and `pprint`' return self.to_str()
-8,960,031,694,814,905,000
For `print` and `pprint`
sdk/python/v1alpha3/kubeflow/katib/models/v1alpha3_experiment_spec.py
__repr__
ChenjunZou/katib
python
def __repr__(self): return self.to_str()
def __eq__(self, other): 'Returns true if both objects are equal' if (not isinstance(other, V1alpha3ExperimentSpec)): return False return (self.__dict__ == other.__dict__)
323,282,487,021,102,300
Returns true if both objects are equal
sdk/python/v1alpha3/kubeflow/katib/models/v1alpha3_experiment_spec.py
__eq__
ChenjunZou/katib
python
def __eq__(self, other): if (not isinstance(other, V1alpha3ExperimentSpec)): return False return (self.__dict__ == other.__dict__)
def __ne__(self, other): 'Returns true if both objects are not equal' return (not (self == other))
7,764,124,047,908,058,000
Returns true if both objects are not equal
sdk/python/v1alpha3/kubeflow/katib/models/v1alpha3_experiment_spec.py
__ne__
ChenjunZou/katib
python
def __ne__(self, other): return (not (self == other))
def generate_class_label(data): '\n generates class label on a copy of data using the columns\n State, From_X, From_Y, To_X, To_Y\n ' r_data = data.copy() r_data['target'] = ((((((((r_data.State.astype(np.str) + '_') + r_data.From_X.astype(np.str)) + ',') + r_data.From_Y.astype(np.str)) + '_') + r_data.To_X.astype(np.str)) + ',') + r_data.To_Y.astype(np.str)) return r_data
-4,200,562,121,560,064,500
generates class label on a copy of data using the columns State, From_X, From_Y, To_X, To_Y
notebooks/pawel_ueb2/utility.py
generate_class_label
hhain/sdap17
python
def generate_class_label(data): '\n generates class label on a copy of data using the columns\n State, From_X, From_Y, To_X, To_Y\n ' r_data = data.copy() r_data['target'] = ((((((((r_data.State.astype(np.str) + '_') + r_data.From_X.astype(np.str)) + ',') + r_data.From_Y.astype(np.str)) + '_') + r_data.To_X.astype(np.str)) + ',') + r_data.To_Y.astype(np.str)) return r_data
def generate_class_label_and_drop(data): '\n generates class label on a copy of data using the columns\n State, From_X, From_Y, To_X, To_Y\n ' r_data = data.copy() r_data['target'] = ((((((((r_data.State.astype(np.str) + '_') + r_data.From_X.astype(np.str)) + ',') + r_data.From_Y.astype(np.str)) + '_') + r_data.To_X.astype(np.str)) + ',') + r_data.To_Y.astype(np.str)) r_data = r_data.drop('From_X', 1) r_data = r_data.drop('From_Y', 1) r_data = r_data.drop('To_Y', 1) r_data = r_data.drop('To_X', 1) r_data = r_data.drop('State', 1) r_data = r_data.drop('ID', 1) r_data = r_data.drop('Rng_ID', 1) r_data = r_data[(~ r_data['target'].str.contains('Pause'))] r_data = r_data[(~ r_data['target'].str.contains('Enter'))] r_data = r_data[(~ r_data['target'].str.contains('Leave'))] return r_data.reset_index()
-977,516,299,108,665,900
generates class label on a copy of data using the columns State, From_X, From_Y, To_X, To_Y
notebooks/pawel_ueb2/utility.py
generate_class_label_and_drop
hhain/sdap17
python
def generate_class_label_and_drop(data): '\n generates class label on a copy of data using the columns\n State, From_X, From_Y, To_X, To_Y\n ' r_data = data.copy() r_data['target'] = ((((((((r_data.State.astype(np.str) + '_') + r_data.From_X.astype(np.str)) + ',') + r_data.From_Y.astype(np.str)) + '_') + r_data.To_X.astype(np.str)) + ',') + r_data.To_Y.astype(np.str)) r_data = r_data.drop('From_X', 1) r_data = r_data.drop('From_Y', 1) r_data = r_data.drop('To_Y', 1) r_data = r_data.drop('To_X', 1) r_data = r_data.drop('State', 1) r_data = r_data.drop('ID', 1) r_data = r_data.drop('Rng_ID', 1) r_data = r_data[(~ r_data['target'].str.contains('Pause'))] r_data = r_data[(~ r_data['target'].str.contains('Enter'))] r_data = r_data[(~ r_data['target'].str.contains('Leave'))] return r_data.reset_index()
def generate_class_label_presence(data, state_variable='target'): "\n generates class label only for presence on a copy of data using only the columns\n Removes: Pause and merges 'Step' and 'Stand' to same class\n " r_data = data.copy() r_data = r_data[(~ r_data['target'].str.contains('Pause'))] r_data.loc[(r_data['target'].str.contains('Step'), 'target')] = 'Present' r_data.loc[(r_data['target'].str.contains('Stand'), 'target')] = 'Present' r_data.loc[(r_data['target'].str.contains('Walk'), 'target')] = 'Present' r_data = r_data[(~ r_data['target'].str.contains('Enter'))] r_data = r_data[(~ r_data['target'].str.contains('Leave'))] r_data.loc[((~ r_data['target'].str.contains('Present')), 'target')] = 'Not Present' return r_data.reset_index()
-1,007,735,542,717,022,200
generates class label only for presence on a copy of data using only the columns Removes: Pause and merges 'Step' and 'Stand' to same class
notebooks/pawel_ueb2/utility.py
generate_class_label_presence
hhain/sdap17
python
def generate_class_label_presence(data, state_variable='target'): "\n generates class label only for presence on a copy of data using only the columns\n Removes: Pause and merges 'Step' and 'Stand' to same class\n " r_data = data.copy() r_data = r_data[(~ r_data['target'].str.contains('Pause'))] r_data.loc[(r_data['target'].str.contains('Step'), 'target')] = 'Present' r_data.loc[(r_data['target'].str.contains('Stand'), 'target')] = 'Present' r_data.loc[(r_data['target'].str.contains('Walk'), 'target')] = 'Present' r_data = r_data[(~ r_data['target'].str.contains('Enter'))] r_data = r_data[(~ r_data['target'].str.contains('Leave'))] r_data.loc[((~ r_data['target'].str.contains('Present')), 'target')] = 'Not Present' return r_data.reset_index()
def generate_class_label_dyn_vs_empty(data, state_variable='target'): "\n generates class label only for presence on a copy of data using only the columns\n Removes: Pause and merges 'Step' and 'Stand' to same class\n " r_data = data.copy() r_data = r_data[(~ r_data['target'].str.contains('Pause'))] r_data.loc[(r_data['target'].str.contains('Walk'), 'target')] = 'Present' r_data.loc[(r_data['target'].str.contains('Step'), 'target')] = 'Present' r_data.loc[(r_data['target'].str.contains('Empty'), 'target')] = 'Not Present' r_data = r_data[(~ r_data['target'].str.contains('Enter'))] r_data = r_data[(~ r_data['target'].str.contains('Stand'))] r_data = r_data[(~ r_data['target'].str.contains('Leave'))] return r_data.reset_index()
-1,956,228,616,649,304,600
generates class label only for presence on a copy of data using only the columns Removes: Pause and merges 'Step' and 'Stand' to same class
notebooks/pawel_ueb2/utility.py
generate_class_label_dyn_vs_empty
hhain/sdap17
python
def generate_class_label_dyn_vs_empty(data, state_variable='target'): "\n generates class label only for presence on a copy of data using only the columns\n Removes: Pause and merges 'Step' and 'Stand' to same class\n " r_data = data.copy() r_data = r_data[(~ r_data['target'].str.contains('Pause'))] r_data.loc[(r_data['target'].str.contains('Walk'), 'target')] = 'Present' r_data.loc[(r_data['target'].str.contains('Step'), 'target')] = 'Present' r_data.loc[(r_data['target'].str.contains('Empty'), 'target')] = 'Not Present' r_data = r_data[(~ r_data['target'].str.contains('Enter'))] r_data = r_data[(~ r_data['target'].str.contains('Stand'))] r_data = r_data[(~ r_data['target'].str.contains('Leave'))] return r_data.reset_index()
def generate_class_label_presence_and_dynamic(data, state_variable='State'): "\n generates class label only for presence on a copy of data using only the columns\n Removes: Pause and merges 'Step' and 'Stand' to same class\n " r_data = data.copy() r_data['target'] = r_data[state_variable].astype(np.str) r_data = r_data[(~ r_data['target'].str.contains('Pause'))] r_data = r_data[(~ r_data['target'].str.contains('Enter'))] r_data = r_data[(~ r_data['target'].str.contains('Leave'))] r_data.loc[(r_data['target'].str.contains('Step'), 'target')] = 'Step' r_data.loc[(r_data['target'].str.contains('Walki'), 'target')] = 'Walk' r_data.loc[(r_data['target'].str.contains('Stand'), 'target')] = 'Stand' r_data.loc[(r_data['target'].str.contains('Empty'), 'target')] = 'Empty' return r_data
-8,570,686,824,285,075,000
generates class label only for presence on a copy of data using only the columns Removes: Pause and merges 'Step' and 'Stand' to same class
notebooks/pawel_ueb2/utility.py
generate_class_label_presence_and_dynamic
hhain/sdap17
python
def generate_class_label_presence_and_dynamic(data, state_variable='State'): "\n generates class label only for presence on a copy of data using only the columns\n Removes: Pause and merges 'Step' and 'Stand' to same class\n " r_data = data.copy() r_data['target'] = r_data[state_variable].astype(np.str) r_data = r_data[(~ r_data['target'].str.contains('Pause'))] r_data = r_data[(~ r_data['target'].str.contains('Enter'))] r_data = r_data[(~ r_data['target'].str.contains('Leave'))] r_data.loc[(r_data['target'].str.contains('Step'), 'target')] = 'Step' r_data.loc[(r_data['target'].str.contains('Walki'), 'target')] = 'Walk' r_data.loc[(r_data['target'].str.contains('Stand'), 'target')] = 'Stand' r_data.loc[(r_data['target'].str.contains('Empty'), 'target')] = 'Empty' return r_data
def get_contigous_borders(indices): '\n helper function to derive contiguous borders from a list of indices\n \n Parameters\n ----------\n indicies : all indices at which a certain thing occurs\n \n \n Returns\n -------\n list of groups when the indices starts and ends (note: last element is the real last element of the group _not_ n+1)\n ' r = [[indices[0]]] prev = r[0][0] for (ix, i) in enumerate(indices): if ((i - prev) > 1): r[(- 1)].append(indices[(ix - 1)]) r.append([indices[ix]]) prev = i r[(- 1)].append(indices[(- 1)]) return r
-6,765,649,735,253,048,000
helper function to derive contiguous borders from a list of indices Parameters ---------- indicies : all indices at which a certain thing occurs Returns ------- list of groups when the indices starts and ends (note: last element is the real last element of the group _not_ n+1)
notebooks/pawel_ueb2/utility.py
get_contigous_borders
hhain/sdap17
python
def get_contigous_borders(indices): '\n helper function to derive contiguous borders from a list of indices\n \n Parameters\n ----------\n indicies : all indices at which a certain thing occurs\n \n \n Returns\n -------\n list of groups when the indices starts and ends (note: last element is the real last element of the group _not_ n+1)\n ' r = [[indices[0]]] prev = r[0][0] for (ix, i) in enumerate(indices): if ((i - prev) > 1): r[(- 1)].append(indices[(ix - 1)]) r.append([indices[ix]]) prev = i r[(- 1)].append(indices[(- 1)]) return r
def get_contiguous_activity_borders(data, label): '\n returns a dict with all starts ends of all labels provided in label variable\n ' labels = data[label].unique() r = {} for l in labels: a = data[(data[label] == l)].index.values r[l] = get_contigous_borders(a) r['length'] = data.shape[0] return r
-4,592,589,030,952,876,500
returns a dict with all starts ends of all labels provided in label variable
notebooks/pawel_ueb2/utility.py
get_contiguous_activity_borders
hhain/sdap17
python
def get_contiguous_activity_borders(data, label): '\n \n ' labels = data[label].unique() r = {} for l in labels: a = data[(data[label] == l)].index.values r[l] = get_contigous_borders(a) r['length'] = data.shape[0] return r
def annotate(a): '\n draws annotation into a sns heatmap using plt annotation\n \n a : dictonary with activity name and borders\n ' min_length = 4 for k in a.keys(): if (k == 'length'): continue borders = a[k] for (s, e) in borders: s_r = (a['length'] - s) e_r = (a['length'] - e) plt.annotate('', xy=(4, s_r), xycoords='data', xytext=(4, e_r), textcoords='data', arrowprops=dict(shrink=0.0, headwidth=10.0, headlength=1.0, width=0.25, shrinkA=0.0, shrinkB=0.0)) if ((s_r - e_r) < min_length): continue plt.annotate(k, xy=(7, ((s_r - ((s_r - e_r) // 2)) - (min_length // 2))), xycoords='data', xytext=(7, ((s_r - ((s_r - e_r) // 2)) - (min_length // 2))), textcoords='data', size=9)
5,088,699,391,643,657,000
draws annotation into a sns heatmap using plt annotation a : dictonary with activity name and borders
notebooks/pawel_ueb2/utility.py
annotate
hhain/sdap17
python
def annotate(a): '\n draws annotation into a sns heatmap using plt annotation\n \n a : dictonary with activity name and borders\n ' min_length = 4 for k in a.keys(): if (k == 'length'): continue borders = a[k] for (s, e) in borders: s_r = (a['length'] - s) e_r = (a['length'] - e) plt.annotate(, xy=(4, s_r), xycoords='data', xytext=(4, e_r), textcoords='data', arrowprops=dict(shrink=0.0, headwidth=10.0, headlength=1.0, width=0.25, shrinkA=0.0, shrinkB=0.0)) if ((s_r - e_r) < min_length): continue plt.annotate(k, xy=(7, ((s_r - ((s_r - e_r) // 2)) - (min_length // 2))), xycoords='data', xytext=(7, ((s_r - ((s_r - e_r) // 2)) - (min_length // 2))), textcoords='data', size=9)
@pytest.mark.django_db def test_phone_create(self): ' Test phone contact method ' phone_num = '7739441467' phone = PhoneContactMethodFactory.create(phone=phone_num) assert (phone_num == phone.phone) assert (phone.id is not None)
3,037,570,262,273,459,000
Test phone contact method
web/tests/test_models.py
test_phone_create
DaveyDevs/maps
python
@pytest.mark.django_db def test_phone_create(self): ' ' phone_num = '7739441467' phone = PhoneContactMethodFactory.create(phone=phone_num) assert (phone_num == phone.phone) assert (phone.id is not None)
@pytest.mark.django_db def test_phone_create_invalid_num(self): ' Test phone contact method ' phone_num = 'abcdefsfdsf' phone = PhoneContactMethodFactory.create(phone=phone_num) assert (phone_num == phone.phone) assert (phone.id is not None) print('\n\n\n\n-------------id is ', id)
4,071,800,736,813,351,400
Test phone contact method
web/tests/test_models.py
test_phone_create_invalid_num
DaveyDevs/maps
python
@pytest.mark.django_db def test_phone_create_invalid_num(self): ' ' phone_num = 'abcdefsfdsf' phone = PhoneContactMethodFactory.create(phone=phone_num) assert (phone_num == phone.phone) assert (phone.id is not None) print('\n\n\n\n-------------id is ', id)
@pytest.mark.django_db def test_coop_type_create(self): ' Test coop type model ' coop_type = CoopTypeFactory(name='Test Coop Type Name') assert (coop_type.name == 'Test Coop Type Name')
-4,583,710,025,203,208,700
Test coop type model
web/tests/test_models.py
test_coop_type_create
DaveyDevs/maps
python
@pytest.mark.django_db def test_coop_type_create(self): ' ' coop_type = CoopTypeFactory(name='Test Coop Type Name') assert (coop_type.name == 'Test Coop Type Name')
@pytest.mark.django_db def test_address_create(self): ' Test address model ' address = AddressFactory() assert (address is not None)
5,765,578,803,282,311,000
Test address model
web/tests/test_models.py
test_address_create
DaveyDevs/maps
python
@pytest.mark.django_db def test_address_create(self): ' ' address = AddressFactory() assert (address is not None)
@pytest.mark.django_db def test_coop_create(self): ' Test customer model ' coop_from_factory = CoopFactory() self.assertIsNotNone(coop_from_factory) coop = Coop.objects.create(name='test') coop.addresses.set(coop_from_factory.addresses.all()) self.assertIsNotNone(coop)
2,080,276,956,993,651,500
Test customer model
web/tests/test_models.py
test_coop_create
DaveyDevs/maps
python
@pytest.mark.django_db def test_coop_create(self): ' ' coop_from_factory = CoopFactory() self.assertIsNotNone(coop_from_factory) coop = Coop.objects.create(name='test') coop.addresses.set(coop_from_factory.addresses.all()) self.assertIsNotNone(coop)
@pytest.mark.django_db def test_coop_create_with_existing_type(self): ' Test customer model ' coop_from_factory = CoopFactory() self.assertIsNotNone(coop_from_factory) coop_types = coop_from_factory.types coop = CoopFactory.create(types=[coop_types.all().first()], addresses=coop_from_factory.addresses.all()) self.assertIsNotNone(coop)
130,662,896,693,943,970
Test customer model
web/tests/test_models.py
test_coop_create_with_existing_type
DaveyDevs/maps
python
@pytest.mark.django_db def test_coop_create_with_existing_type(self): ' ' coop_from_factory = CoopFactory() self.assertIsNotNone(coop_from_factory) coop_types = coop_from_factory.types coop = CoopFactory.create(types=[coop_types.all().first()], addresses=coop_from_factory.addresses.all()) self.assertIsNotNone(coop)
@pytest.mark.django_db def test_coop_create_with_no_types(self): ' Test customer model ' print('\n\n\n\n**********-------- starting test ....\n') coop = CoopFactory.build(types=[]) print('phone:', coop.phone.phone) print('email:', coop.email.email) coop.full_clean() self.assertIsNotNone(coop) self.assertIsNone(coop.id)
8,761,434,052,913,679,000
Test customer model
web/tests/test_models.py
test_coop_create_with_no_types
DaveyDevs/maps
python
@pytest.mark.django_db def test_coop_create_with_no_types(self): ' ' print('\n\n\n\n**********-------- starting test ....\n') coop = CoopFactory.build(types=[]) print('phone:', coop.phone.phone) print('email:', coop.email.email) coop.full_clean() self.assertIsNotNone(coop) self.assertIsNone(coop.id)
def test_search_coops_wo_coords(self): '\n Look for coops with addresses without latitude/longitude coords\n ' address = AddressFactory(latitude=None, longitude=None) coop_from_factory = CoopFactory(addresses=[address]) coops = Coop.objects.find_wo_coords() results = list(coops) assert (len(results) > 0), 'Failed to find any matching results.' assert (coop_from_factory in list(coops)), 'Failed to find coop.'
-1,202,784,789,676,018,700
Look for coops with addresses without latitude/longitude coords
web/tests/test_models.py
test_search_coops_wo_coords
DaveyDevs/maps
python
def test_search_coops_wo_coords(self): '\n \n ' address = AddressFactory(latitude=None, longitude=None) coop_from_factory = CoopFactory(addresses=[address]) coops = Coop.objects.find_wo_coords() results = list(coops) assert (len(results) > 0), 'Failed to find any matching results.' assert (coop_from_factory in list(coops)), 'Failed to find coop.'
def make_request(reactor, method, path, content=b'', access_token=None, request=SynapseRequest, shorthand=True): '\n Make a web request using the given method and path, feed it the\n content, and return the Request and the Channel underneath.\n\n Args:\n method (bytes/unicode): The HTTP request method ("verb").\n path (bytes/unicode): The HTTP path, suitably URL encoded (e.g.\n escaped UTF-8 & spaces and such).\n content (bytes or dict): The body of the request. JSON-encoded, if\n a dict.\n shorthand: Whether to try and be helpful and prefix the given URL\n with the usual REST API path, if it doesn\'t contain it.\n\n Returns:\n A synapse.http.site.SynapseRequest.\n ' if (not isinstance(method, bytes)): method = method.encode('ascii') if (not isinstance(path, bytes)): path = path.encode('ascii') if (shorthand and (not path.startswith(b'/_matrix'))): path = (b'/_matrix/client/r0/' + path) path = path.replace(b'//', b'/') if isinstance(content, text_type): content = content.encode('utf8') site = FakeSite() channel = FakeChannel(reactor) req = request(site, channel) req.process = (lambda : b'') req.content = BytesIO(content) if access_token: req.requestHeaders.addRawHeader(b'Authorization', (b'Bearer ' + access_token.encode('ascii'))) if content: req.requestHeaders.addRawHeader(b'Content-Type', b'application/json') req.requestReceived(method, path, b'1.1') return (req, channel)
-1,632,200,608,994,487,800
Make a web request using the given method and path, feed it the content, and return the Request and the Channel underneath. Args: method (bytes/unicode): The HTTP request method ("verb"). path (bytes/unicode): The HTTP path, suitably URL encoded (e.g. escaped UTF-8 & spaces and such). content (bytes or dict): The body of the request. JSON-encoded, if a dict. shorthand: Whether to try and be helpful and prefix the given URL with the usual REST API path, if it doesn't contain it. Returns: A synapse.http.site.SynapseRequest.
tests/server.py
make_request
AlohaHealth/synapse
python
def make_request(reactor, method, path, content=b, access_token=None, request=SynapseRequest, shorthand=True): '\n Make a web request using the given method and path, feed it the\n content, and return the Request and the Channel underneath.\n\n Args:\n method (bytes/unicode): The HTTP request method ("verb").\n path (bytes/unicode): The HTTP path, suitably URL encoded (e.g.\n escaped UTF-8 & spaces and such).\n content (bytes or dict): The body of the request. JSON-encoded, if\n a dict.\n shorthand: Whether to try and be helpful and prefix the given URL\n with the usual REST API path, if it doesn\'t contain it.\n\n Returns:\n A synapse.http.site.SynapseRequest.\n ' if (not isinstance(method, bytes)): method = method.encode('ascii') if (not isinstance(path, bytes)): path = path.encode('ascii') if (shorthand and (not path.startswith(b'/_matrix'))): path = (b'/_matrix/client/r0/' + path) path = path.replace(b'//', b'/') if isinstance(content, text_type): content = content.encode('utf8') site = FakeSite() channel = FakeChannel(reactor) req = request(site, channel) req.process = (lambda : b) req.content = BytesIO(content) if access_token: req.requestHeaders.addRawHeader(b'Authorization', (b'Bearer ' + access_token.encode('ascii'))) if content: req.requestHeaders.addRawHeader(b'Content-Type', b'application/json') req.requestReceived(method, path, b'1.1') return (req, channel)
def wait_until_result(clock, request, timeout=100): '\n Wait until the request is finished.\n ' clock.run() x = 0 while (not request.finished): if request._channel._producer: request._channel._producer.resumeProducing() x += 1 if (x > timeout): raise TimedOutException('Timed out waiting for request to finish.') clock.advance(0.1)
1,815,053,821,847,915,300
Wait until the request is finished.
tests/server.py
wait_until_result
AlohaHealth/synapse
python
def wait_until_result(clock, request, timeout=100): '\n \n ' clock.run() x = 0 while (not request.finished): if request._channel._producer: request._channel._producer.resumeProducing() x += 1 if (x > timeout): raise TimedOutException('Timed out waiting for request to finish.') clock.advance(0.1)
def setup_test_homeserver(cleanup_func, *args, **kwargs): '\n Set up a synchronous test server, driven by the reactor used by\n the homeserver.\n ' d = _sth(cleanup_func, *args, **kwargs).result if isinstance(d, Failure): d.raiseException() clock = d.get_clock() pool = d.get_db_pool() def runWithConnection(func, *args, **kwargs): return threads.deferToThreadPool(pool._reactor, pool.threadpool, pool._runWithConnection, func, *args, **kwargs) def runInteraction(interaction, *args, **kwargs): return threads.deferToThreadPool(pool._reactor, pool.threadpool, pool._runInteraction, interaction, *args, **kwargs) pool.runWithConnection = runWithConnection pool.runInteraction = runInteraction class ThreadPool(): '\n Threadless thread pool.\n ' def start(self): pass def stop(self): pass def callInThreadWithCallback(self, onResult, function, *args, **kwargs): def _(res): if isinstance(res, Failure): onResult(False, res) else: onResult(True, res) d = Deferred() d.addCallback((lambda x: function(*args, **kwargs))) d.addBoth(_) clock._reactor.callLater(0, d.callback, True) return d clock.threadpool = ThreadPool() pool.threadpool = ThreadPool() pool.running = True return d
2,878,812,827,671,844,000
Set up a synchronous test server, driven by the reactor used by the homeserver.
tests/server.py
setup_test_homeserver
AlohaHealth/synapse
python
def setup_test_homeserver(cleanup_func, *args, **kwargs): '\n Set up a synchronous test server, driven by the reactor used by\n the homeserver.\n ' d = _sth(cleanup_func, *args, **kwargs).result if isinstance(d, Failure): d.raiseException() clock = d.get_clock() pool = d.get_db_pool() def runWithConnection(func, *args, **kwargs): return threads.deferToThreadPool(pool._reactor, pool.threadpool, pool._runWithConnection, func, *args, **kwargs) def runInteraction(interaction, *args, **kwargs): return threads.deferToThreadPool(pool._reactor, pool.threadpool, pool._runInteraction, interaction, *args, **kwargs) pool.runWithConnection = runWithConnection pool.runInteraction = runInteraction class ThreadPool(): '\n Threadless thread pool.\n ' def start(self): pass def stop(self): pass def callInThreadWithCallback(self, onResult, function, *args, **kwargs): def _(res): if isinstance(res, Failure): onResult(False, res) else: onResult(True, res) d = Deferred() d.addCallback((lambda x: function(*args, **kwargs))) d.addBoth(_) clock._reactor.callLater(0, d.callback, True) return d clock.threadpool = ThreadPool() pool.threadpool = ThreadPool() pool.running = True return d
def callFromThread(self, callback, *args, **kwargs): '\n Make the callback fire in the next reactor iteration.\n ' d = Deferred() d.addCallback((lambda x: callback(*args, **kwargs))) self.callLater(0, d.callback, True) return d
-1,258,819,838,172,941,300
Make the callback fire in the next reactor iteration.
tests/server.py
callFromThread
AlohaHealth/synapse
python
def callFromThread(self, callback, *args, **kwargs): '\n \n ' d = Deferred() d.addCallback((lambda x: callback(*args, **kwargs))) self.callLater(0, d.callback, True) return d
def __init__(self, output_dim, nb_ctrl_sig, **kwargs): "\n This layer is used to split the output of a previous Dense layer into\n nb_ctrl_sig groups of size output_dim, and choose which group to provide\n as output using a discrete control signal.\n It takes as input two tensors, namely the output of the previous layer \n and a column tensor with int32 or int64 values for the control signal.\n\n The Dense input to this layer must be of shape (None, prev_output_dim),\n where prev_output_dim = output_dim * nb_ctrl_sig.\n No checks are done at runtime to ensure that the input to the layer is\n correct, so it's better to double check.\n\n An example usage of this layer may be:\n\n input = Input(shape=(3,))\n control = Input(shape=(1,), dtype='int32')\n hidden = Dense(6)(i) # output_dim == 2, nb_ctrl_sig == 3\n output = Multiplexer(2, 3)([hidden, control])\n model = Model(input=[input, control], output=output)\n \n ...\n \n x = randn(3) # Input has size 3\n ctrl = array([0, 1, 2])\n \n # Outputs the first two neurons of the Dense layer\n model.predict([x, ctrl[0]])\n \n # Outputs the middle two neurons of the Dense layer\n model.predict([x, ctrl[1]])\n \n # Outputs the last two neurons of the Dense layer\n model.predict([x, ctrl[2]])\n \n # Arguments\n output_dim: positive integer, dimensionality of the output space.\n nb_ctrl_sig: positive integer, number of groups in which to split \n the output of the previous layer. Must satisfy the relation:\n input_size = nb_ctrl_sig * output_dim\n \n " self.output_dim = output_dim self.nb_ctrl_sig = nb_ctrl_sig super(Multiplexer, self).__init__(**kwargs)
-5,426,948,042,926,424,000
This layer is used to split the output of a previous Dense layer into nb_ctrl_sig groups of size output_dim, and choose which group to provide as output using a discrete control signal. It takes as input two tensors, namely the output of the previous layer and a column tensor with int32 or int64 values for the control signal. The Dense input to this layer must be of shape (None, prev_output_dim), where prev_output_dim = output_dim * nb_ctrl_sig. No checks are done at runtime to ensure that the input to the layer is correct, so it's better to double check. An example usage of this layer may be: input = Input(shape=(3,)) control = Input(shape=(1,), dtype='int32') hidden = Dense(6)(i) # output_dim == 2, nb_ctrl_sig == 3 output = Multiplexer(2, 3)([hidden, control]) model = Model(input=[input, control], output=output) ... x = randn(3) # Input has size 3 ctrl = array([0, 1, 2]) # Outputs the first two neurons of the Dense layer model.predict([x, ctrl[0]]) # Outputs the middle two neurons of the Dense layer model.predict([x, ctrl[1]]) # Outputs the last two neurons of the Dense layer model.predict([x, ctrl[2]]) # Arguments output_dim: positive integer, dimensionality of the output space. nb_ctrl_sig: positive integer, number of groups in which to split the output of the previous layer. Must satisfy the relation: input_size = nb_ctrl_sig * output_dim
multiplexer.py
__init__
2vin/multiplexed_cnn
python
def __init__(self, output_dim, nb_ctrl_sig, **kwargs): "\n This layer is used to split the output of a previous Dense layer into\n nb_ctrl_sig groups of size output_dim, and choose which group to provide\n as output using a discrete control signal.\n It takes as input two tensors, namely the output of the previous layer \n and a column tensor with int32 or int64 values for the control signal.\n\n The Dense input to this layer must be of shape (None, prev_output_dim),\n where prev_output_dim = output_dim * nb_ctrl_sig.\n No checks are done at runtime to ensure that the input to the layer is\n correct, so it's better to double check.\n\n An example usage of this layer may be:\n\n input = Input(shape=(3,))\n control = Input(shape=(1,), dtype='int32')\n hidden = Dense(6)(i) # output_dim == 2, nb_ctrl_sig == 3\n output = Multiplexer(2, 3)([hidden, control])\n model = Model(input=[input, control], output=output)\n \n ...\n \n x = randn(3) # Input has size 3\n ctrl = array([0, 1, 2])\n \n # Outputs the first two neurons of the Dense layer\n model.predict([x, ctrl[0]])\n \n # Outputs the middle two neurons of the Dense layer\n model.predict([x, ctrl[1]])\n \n # Outputs the last two neurons of the Dense layer\n model.predict([x, ctrl[2]])\n \n # Arguments\n output_dim: positive integer, dimensionality of the output space.\n nb_ctrl_sig: positive integer, number of groups in which to split \n the output of the previous layer. Must satisfy the relation:\n input_size = nb_ctrl_sig * output_dim\n \n " self.output_dim = output_dim self.nb_ctrl_sig = nb_ctrl_sig super(Multiplexer, self).__init__(**kwargs)
@staticmethod def multiplexer(args, output_size, nb_actions): '\n Returns a tensor of shape (None, output_size) where each sample is\n the result of masking each sample in full_input with a binary mask that \n preserves only output_size elements, based on the corresponding control \n value in indices.\n ' (full_input, indices) = args '\n For example, given:\n full_input: [[1, 2, 3, 4, 5, 6], [7, 8, 9, 10, 11, 12]]\n nb_actions: 3\n output_size: 2\n indices: [[0], [2]]\n desired output: [[1, 2], [11, 12]]\n we want to output the first two elements (index 0) of the first sample \n and the last two elements (index 2) of the second sample.\n To do this, we need the absolute indices [[0, 1], [4, 5]].\n\n To build these, first compute the base absolute indices (0 and 4) by\n multiplying the control indices for the output size:\n [[0], [2]] * 2 = [[0], [4]]\n\n ' base_absolute_indices = tf.multiply(indices, output_size) '\n Build an array containing the base absolute indices repeated output_size\n times:\n [[0, 0], [4, 4]]\n ' bai_repeated = tf.tile(base_absolute_indices, [1, output_size]) '\n Finally, add range(output_size) to these tensors to get the full\n absolute indices:\n [0, 0] + [0, 1] = [0, 1]\n [4, 4] + [0, 1] = [4, 5]\n so we have:\n [[0, 1], [4, 5]]\n ' absolute_indices = tf.add(bai_repeated, tf.range(output_size)) '\n Flatten this tensor in order to compute the one hot encoding for each \n absolute index:\n [0, 1, 4, 5]\n ' ai_flat = tf.reshape(absolute_indices, [(- 1)]) '\n Compute the one-hot encoding for the absolute indices.\n From [0, 1, 4, 5] we get:\n [[1, 0, 0, 0, 0, 0], [0, 1, 0, 0, 0, 0],\n [0, 0, 0, 0, 1, 0], [0, 0, 0, 0, 0, 1]]\n ' ai_onehot = tf.one_hot(ai_flat, (output_size * nb_actions)) '\n Build the mask for full_input from the one-hot-encoded absolute indices.\n We need to group the one-hot absolute indices into groups of output_size\n elements.\n We get:\n [\n [[1, 0, 0, 0, 0, 0], [0, 1, 0, 0, 0, 0]],\n [[0, 0, 0, 0, 1, 0], [0, 0, 0, 0, 0, 1]]\n ]\n ' group_shape = [(- 1), output_size, (output_size * nb_actions)] group = tf.reshape(ai_onehot, group_shape) '\n Reduce_sum along axis 1 to collapse the group and get the binary masks.\n [[1, 1, 0, 0, 0, 0], [0, 0, 0, 0, 1, 1]]\n ' masks = tf.reduce_sum(group, axis=1) '\n Convert the mask to boolean.\n [[True, True, False, False, False, False],\n [False, False, False, False, True, True]]\n ' zero = tf.constant(0, dtype=tf.float32) bool_masks = tf.not_equal(masks, zero) '\n Convert the boolean masks back to absolute indices for the full_input\n tensor (each element represents [sample index, value index]).\n We get:\n [[0, 0], [0, 1], [1, 4], [1, 5]]\n ' ai_mask = tf.where(bool_masks) '\n Apply the masks to full_input. We get a 1D tensor:\n [1, 2, 11, 12]\n ' reduced_output = tf.gather_nd(full_input, ai_mask) '\n Reshape the reduction to match the output shape.\n We get:\n [[1, 2], [11, 12]]\n ' return tf.reshape(reduced_output, [(- 1), output_size])
7,943,560,504,436,702,000
Returns a tensor of shape (None, output_size) where each sample is the result of masking each sample in full_input with a binary mask that preserves only output_size elements, based on the corresponding control value in indices.
multiplexer.py
multiplexer
2vin/multiplexed_cnn
python
@staticmethod def multiplexer(args, output_size, nb_actions): '\n Returns a tensor of shape (None, output_size) where each sample is\n the result of masking each sample in full_input with a binary mask that \n preserves only output_size elements, based on the corresponding control \n value in indices.\n ' (full_input, indices) = args '\n For example, given:\n full_input: [[1, 2, 3, 4, 5, 6], [7, 8, 9, 10, 11, 12]]\n nb_actions: 3\n output_size: 2\n indices: [[0], [2]]\n desired output: [[1, 2], [11, 12]]\n we want to output the first two elements (index 0) of the first sample \n and the last two elements (index 2) of the second sample.\n To do this, we need the absolute indices [[0, 1], [4, 5]].\n\n To build these, first compute the base absolute indices (0 and 4) by\n multiplying the control indices for the output size:\n [[0], [2]] * 2 = [[0], [4]]\n\n ' base_absolute_indices = tf.multiply(indices, output_size) '\n Build an array containing the base absolute indices repeated output_size\n times:\n [[0, 0], [4, 4]]\n ' bai_repeated = tf.tile(base_absolute_indices, [1, output_size]) '\n Finally, add range(output_size) to these tensors to get the full\n absolute indices:\n [0, 0] + [0, 1] = [0, 1]\n [4, 4] + [0, 1] = [4, 5]\n so we have:\n [[0, 1], [4, 5]]\n ' absolute_indices = tf.add(bai_repeated, tf.range(output_size)) '\n Flatten this tensor in order to compute the one hot encoding for each \n absolute index:\n [0, 1, 4, 5]\n ' ai_flat = tf.reshape(absolute_indices, [(- 1)]) '\n Compute the one-hot encoding for the absolute indices.\n From [0, 1, 4, 5] we get:\n [[1, 0, 0, 0, 0, 0], [0, 1, 0, 0, 0, 0],\n [0, 0, 0, 0, 1, 0], [0, 0, 0, 0, 0, 1]]\n ' ai_onehot = tf.one_hot(ai_flat, (output_size * nb_actions)) '\n Build the mask for full_input from the one-hot-encoded absolute indices.\n We need to group the one-hot absolute indices into groups of output_size\n elements.\n We get:\n [\n [[1, 0, 0, 0, 0, 0], [0, 1, 0, 0, 0, 0]],\n [[0, 0, 0, 0, 1, 0], [0, 0, 0, 0, 0, 1]]\n ]\n ' group_shape = [(- 1), output_size, (output_size * nb_actions)] group = tf.reshape(ai_onehot, group_shape) '\n Reduce_sum along axis 1 to collapse the group and get the binary masks.\n [[1, 1, 0, 0, 0, 0], [0, 0, 0, 0, 1, 1]]\n ' masks = tf.reduce_sum(group, axis=1) '\n Convert the mask to boolean.\n [[True, True, False, False, False, False],\n [False, False, False, False, True, True]]\n ' zero = tf.constant(0, dtype=tf.float32) bool_masks = tf.not_equal(masks, zero) '\n Convert the boolean masks back to absolute indices for the full_input\n tensor (each element represents [sample index, value index]).\n We get:\n [[0, 0], [0, 1], [1, 4], [1, 5]]\n ' ai_mask = tf.where(bool_masks) '\n Apply the masks to full_input. We get a 1D tensor:\n [1, 2, 11, 12]\n ' reduced_output = tf.gather_nd(full_input, ai_mask) '\n Reshape the reduction to match the output shape.\n We get:\n [[1, 2], [11, 12]]\n ' return tf.reshape(reduced_output, [(- 1), output_size])
def generate_token(user): " Currently this is workaround\n since the latest version that already has this function\n is not published on PyPI yet and we don't want\n to install the package directly from GitHub.\n See: https://github.com/mattupstate/flask-jwt/blob/9f4f3bc8dce9da5dd8a567dfada0854e0cf656ae/flask_jwt/__init__.py#L145\n " jwt = current_app.extensions['jwt'] token = jwt.jwt_encode_callback(user) return token
8,879,896,876,340,914,000
Currently this is workaround since the latest version that already has this function is not published on PyPI yet and we don't want to install the package directly from GitHub. See: https://github.com/mattupstate/flask-jwt/blob/9f4f3bc8dce9da5dd8a567dfada0854e0cf656ae/flask_jwt/__init__.py#L145
Chapter04/app/auth/resources.py
generate_token
Abhishek1373/Building-Serverless-Python-Web-Services-with-Zappa
python
def generate_token(user): " Currently this is workaround\n since the latest version that already has this function\n is not published on PyPI yet and we don't want\n to install the package directly from GitHub.\n See: https://github.com/mattupstate/flask-jwt/blob/9f4f3bc8dce9da5dd8a567dfada0854e0cf656ae/flask_jwt/__init__.py#L145\n " jwt = current_app.extensions['jwt'] token = jwt.jwt_encode_callback(user) return token
def crawl_page(project_data, prerequisites): ' Picks a random image off of the passed URL.' result = {'status': 'success', 'image': None} url = project_data.get('url') if (not url): result['status'] = 'error' result['error_message'] = 'URL was not provided.' return result logger.info('Starting to crawl %s', url) images = find_images(url) num_images = len(images) logger.info('Found %s images', num_images) if (num_images == 0): result['status'] = 'error' result['error_message'] = 'Unable to find images at the provided URL.' return result logger.info('Picking a random one...') image = random.choice(list(images)) result['image'] = image return result
9,174,871,841,792,264,000
Picks a random image off of the passed URL.
simple_workflow/v1/crawl.py
crawl_page
b12io/orchestra
python
def crawl_page(project_data, prerequisites): ' ' result = {'status': 'success', 'image': None} url = project_data.get('url') if (not url): result['status'] = 'error' result['error_message'] = 'URL was not provided.' return result logger.info('Starting to crawl %s', url) images = find_images(url) num_images = len(images) logger.info('Found %s images', num_images) if (num_images == 0): result['status'] = 'error' result['error_message'] = 'Unable to find images at the provided URL.' return result logger.info('Picking a random one...') image = random.choice(list(images)) result['image'] = image return result
def find_images(url): " Fetches a url's HTML and extracts all image sources in an <img> tag.\n " images = set() headers = {'User-Agent': 'Mozilla/5.0 (compatible; OrchestraBot/1.0; [email protected])'} response = requests.get(url, headers=headers) if ((response.status_code < 200) or (response.status_code >= 300)): logger.error("Couldn't fetch url {}".format(url)) return images content = response.text soup = BeautifulSoup(content) tags = soup.find_all('img', src=IMAGE_FILE_REGEX) for tag in tags: link = tag.get('src') if (link is None): continue if (not bool(urlparse(link).netloc)): link = urljoin(url, link) images.add(link) return images
4,603,273,694,007,618,000
Fetches a url's HTML and extracts all image sources in an <img> tag.
simple_workflow/v1/crawl.py
find_images
b12io/orchestra
python
def find_images(url): " \n " images = set() headers = {'User-Agent': 'Mozilla/5.0 (compatible; OrchestraBot/1.0; [email protected])'} response = requests.get(url, headers=headers) if ((response.status_code < 200) or (response.status_code >= 300)): logger.error("Couldn't fetch url {}".format(url)) return images content = response.text soup = BeautifulSoup(content) tags = soup.find_all('img', src=IMAGE_FILE_REGEX) for tag in tags: link = tag.get('src') if (link is None): continue if (not bool(urlparse(link).netloc)): link = urljoin(url, link) images.add(link) return images
def verify_votes(message_hash: Hash32, votes: Iterable[Tuple[(ValidatorIndex, BLSSignature, BLSPubkey)]], domain: Domain) -> Tuple[(Tuple[(BLSSignature, ...)], Tuple[(ValidatorIndex, ...)])]: '\n Verify the given votes.\n ' sigs_with_committee_info = tuple(((sig, committee_index) for (committee_index, sig, pubkey) in votes if bls.verify(message_hash=message_hash, pubkey=pubkey, signature=sig, domain=domain))) try: (sigs, committee_indices) = zip(*sigs_with_committee_info) except ValueError: sigs = tuple() committee_indices = tuple() return (sigs, committee_indices)
-5,234,643,532,797,015,000
Verify the given votes.
eth2/beacon/tools/builder/validator.py
verify_votes
AndrewBezold/trinity
python
def verify_votes(message_hash: Hash32, votes: Iterable[Tuple[(ValidatorIndex, BLSSignature, BLSPubkey)]], domain: Domain) -> Tuple[(Tuple[(BLSSignature, ...)], Tuple[(ValidatorIndex, ...)])]: '\n \n ' sigs_with_committee_info = tuple(((sig, committee_index) for (committee_index, sig, pubkey) in votes if bls.verify(message_hash=message_hash, pubkey=pubkey, signature=sig, domain=domain))) try: (sigs, committee_indices) = zip(*sigs_with_committee_info) except ValueError: sigs = tuple() committee_indices = tuple() return (sigs, committee_indices)
def aggregate_votes(bitfield: Bitfield, sigs: Sequence[BLSSignature], voting_sigs: Sequence[BLSSignature], attesting_indices: Sequence[CommitteeIndex]) -> Tuple[(Bitfield, BLSSignature)]: '\n Aggregate the votes.\n ' sigs = (tuple(sigs) + tuple(voting_sigs)) bitfield = pipe(bitfield, *(set_voted(index=committee_index) for committee_index in attesting_indices)) return (bitfield, bls.aggregate_signatures(sigs))
6,790,892,440,111,287,000
Aggregate the votes.
eth2/beacon/tools/builder/validator.py
aggregate_votes
AndrewBezold/trinity
python
def aggregate_votes(bitfield: Bitfield, sigs: Sequence[BLSSignature], voting_sigs: Sequence[BLSSignature], attesting_indices: Sequence[CommitteeIndex]) -> Tuple[(Bitfield, BLSSignature)]: '\n \n ' sigs = (tuple(sigs) + tuple(voting_sigs)) bitfield = pipe(bitfield, *(set_voted(index=committee_index) for committee_index in attesting_indices)) return (bitfield, bls.aggregate_signatures(sigs))
def create_mock_proposer_slashing_at_block(state: BeaconState, config: Eth2Config, keymap: Dict[(BLSPubkey, int)], block_root_1: Hash32, block_root_2: Hash32, proposer_index: ValidatorIndex) -> ProposerSlashing: '\n Return a `ProposerSlashing` derived from the given block roots.\n\n If the header roots do not match, the `ProposerSlashing` is valid.\n If the header roots do match, the `ProposerSlashing` is not valid.\n ' slots_per_epoch = config.SLOTS_PER_EPOCH block_header_1 = create_block_header_with_signature(state, block_root_1, keymap[state.validators[proposer_index].pubkey], slots_per_epoch) block_header_2 = create_block_header_with_signature(state, block_root_2, keymap[state.validators[proposer_index].pubkey], slots_per_epoch) return ProposerSlashing(proposer_index=proposer_index, header_1=block_header_1, header_2=block_header_2)
-93,324,441,760,108,860
Return a `ProposerSlashing` derived from the given block roots. If the header roots do not match, the `ProposerSlashing` is valid. If the header roots do match, the `ProposerSlashing` is not valid.
eth2/beacon/tools/builder/validator.py
create_mock_proposer_slashing_at_block
AndrewBezold/trinity
python
def create_mock_proposer_slashing_at_block(state: BeaconState, config: Eth2Config, keymap: Dict[(BLSPubkey, int)], block_root_1: Hash32, block_root_2: Hash32, proposer_index: ValidatorIndex) -> ProposerSlashing: '\n Return a `ProposerSlashing` derived from the given block roots.\n\n If the header roots do not match, the `ProposerSlashing` is valid.\n If the header roots do match, the `ProposerSlashing` is not valid.\n ' slots_per_epoch = config.SLOTS_PER_EPOCH block_header_1 = create_block_header_with_signature(state, block_root_1, keymap[state.validators[proposer_index].pubkey], slots_per_epoch) block_header_2 = create_block_header_with_signature(state, block_root_2, keymap[state.validators[proposer_index].pubkey], slots_per_epoch) return ProposerSlashing(proposer_index=proposer_index, header_1=block_header_1, header_2=block_header_2)
def create_mock_slashable_attestation(state: BeaconState, config: Eth2Config, keymap: Dict[(BLSPubkey, int)], attestation_slot: Slot) -> IndexedAttestation: '\n Create an `IndexedAttestation` that is signed by one attester.\n ' attester_index = ValidatorIndex(0) committee = (attester_index,) shard = Shard(0) beacon_block_root = get_block_root_at_slot(state, attestation_slot, config.SLOTS_PER_HISTORICAL_ROOT) target_root = _get_target_root(state, config, beacon_block_root) source_root = get_block_root_at_slot(state, compute_start_slot_of_epoch(state.current_justified_checkpoint.epoch, config.SLOTS_PER_EPOCH), config.SLOTS_PER_HISTORICAL_ROOT) previous_crosslink = state.current_crosslinks[shard] attestation_data = AttestationData(beacon_block_root=beacon_block_root, source=Checkpoint(epoch=state.current_justified_checkpoint.epoch, root=source_root), target=Checkpoint(epoch=compute_epoch_of_slot(attestation_slot, config.SLOTS_PER_EPOCH), root=target_root), crosslink=previous_crosslink) (message_hash, attesting_indices) = _get_mock_message_and_attesting_indices(attestation_data, committee, num_voted_attesters=1) signature = sign_transaction(message_hash=message_hash, privkey=keymap[state.validators[attesting_indices[0]].pubkey], state=state, slot=attestation_slot, signature_domain=SignatureDomain.DOMAIN_ATTESTATION, slots_per_epoch=config.SLOTS_PER_EPOCH) validator_indices = tuple((committee[i] for i in attesting_indices)) return IndexedAttestation(custody_bit_0_indices=validator_indices, custody_bit_1_indices=tuple(), data=attestation_data, signature=signature)
4,591,191,133,249,815,600
Create an `IndexedAttestation` that is signed by one attester.
eth2/beacon/tools/builder/validator.py
create_mock_slashable_attestation
AndrewBezold/trinity
python
def create_mock_slashable_attestation(state: BeaconState, config: Eth2Config, keymap: Dict[(BLSPubkey, int)], attestation_slot: Slot) -> IndexedAttestation: '\n \n ' attester_index = ValidatorIndex(0) committee = (attester_index,) shard = Shard(0) beacon_block_root = get_block_root_at_slot(state, attestation_slot, config.SLOTS_PER_HISTORICAL_ROOT) target_root = _get_target_root(state, config, beacon_block_root) source_root = get_block_root_at_slot(state, compute_start_slot_of_epoch(state.current_justified_checkpoint.epoch, config.SLOTS_PER_EPOCH), config.SLOTS_PER_HISTORICAL_ROOT) previous_crosslink = state.current_crosslinks[shard] attestation_data = AttestationData(beacon_block_root=beacon_block_root, source=Checkpoint(epoch=state.current_justified_checkpoint.epoch, root=source_root), target=Checkpoint(epoch=compute_epoch_of_slot(attestation_slot, config.SLOTS_PER_EPOCH), root=target_root), crosslink=previous_crosslink) (message_hash, attesting_indices) = _get_mock_message_and_attesting_indices(attestation_data, committee, num_voted_attesters=1) signature = sign_transaction(message_hash=message_hash, privkey=keymap[state.validators[attesting_indices[0]].pubkey], state=state, slot=attestation_slot, signature_domain=SignatureDomain.DOMAIN_ATTESTATION, slots_per_epoch=config.SLOTS_PER_EPOCH) validator_indices = tuple((committee[i] for i in attesting_indices)) return IndexedAttestation(custody_bit_0_indices=validator_indices, custody_bit_1_indices=tuple(), data=attestation_data, signature=signature)
def _get_mock_message_and_attesting_indices(attestation_data: AttestationData, committee: Sequence[ValidatorIndex], num_voted_attesters: int) -> Tuple[(Hash32, Tuple[(CommitteeIndex, ...)])]: '\n Get ``message_hash`` and voting indices of the given ``committee``.\n ' message_hash = AttestationDataAndCustodyBit(data=attestation_data, custody_bit=False).hash_tree_root committee_size = len(committee) assert (num_voted_attesters <= committee_size) attesting_indices = tuple((CommitteeIndex(i) for i in random.sample(range(committee_size), num_voted_attesters))) return (message_hash, tuple(sorted(attesting_indices)))
3,754,444,967,802,969,000
Get ``message_hash`` and voting indices of the given ``committee``.
eth2/beacon/tools/builder/validator.py
_get_mock_message_and_attesting_indices
AndrewBezold/trinity
python
def _get_mock_message_and_attesting_indices(attestation_data: AttestationData, committee: Sequence[ValidatorIndex], num_voted_attesters: int) -> Tuple[(Hash32, Tuple[(CommitteeIndex, ...)])]: '\n \n ' message_hash = AttestationDataAndCustodyBit(data=attestation_data, custody_bit=False).hash_tree_root committee_size = len(committee) assert (num_voted_attesters <= committee_size) attesting_indices = tuple((CommitteeIndex(i) for i in random.sample(range(committee_size), num_voted_attesters))) return (message_hash, tuple(sorted(attesting_indices)))
def _create_mock_signed_attestation(state: BeaconState, attestation_data: AttestationData, attestation_slot: Slot, committee: Sequence[ValidatorIndex], num_voted_attesters: int, keymap: Dict[(BLSPubkey, int)], slots_per_epoch: int) -> Attestation: '\n Create a mocking attestation of the given ``attestation_data`` slot with ``keymap``.\n ' (message_hash, attesting_indices) = _get_mock_message_and_attesting_indices(attestation_data, committee, num_voted_attesters) signatures = [sign_transaction(message_hash=message_hash, privkey=keymap[state.validators[committee[committee_index]].pubkey], state=state, slot=attestation_slot, signature_domain=SignatureDomain.DOMAIN_ATTESTATION, slots_per_epoch=slots_per_epoch) for committee_index in attesting_indices] (aggregation_bits, aggregate_signature) = aggregate_votes(bitfield=get_empty_bitfield(len(committee)), sigs=(), voting_sigs=signatures, attesting_indices=attesting_indices) return Attestation(aggregation_bits=aggregation_bits, data=attestation_data, custody_bits=Bitfield(((False,) * len(aggregation_bits))), signature=aggregate_signature)
8,585,584,768,922,157,000
Create a mocking attestation of the given ``attestation_data`` slot with ``keymap``.
eth2/beacon/tools/builder/validator.py
_create_mock_signed_attestation
AndrewBezold/trinity
python
def _create_mock_signed_attestation(state: BeaconState, attestation_data: AttestationData, attestation_slot: Slot, committee: Sequence[ValidatorIndex], num_voted_attesters: int, keymap: Dict[(BLSPubkey, int)], slots_per_epoch: int) -> Attestation: '\n \n ' (message_hash, attesting_indices) = _get_mock_message_and_attesting_indices(attestation_data, committee, num_voted_attesters) signatures = [sign_transaction(message_hash=message_hash, privkey=keymap[state.validators[committee[committee_index]].pubkey], state=state, slot=attestation_slot, signature_domain=SignatureDomain.DOMAIN_ATTESTATION, slots_per_epoch=slots_per_epoch) for committee_index in attesting_indices] (aggregation_bits, aggregate_signature) = aggregate_votes(bitfield=get_empty_bitfield(len(committee)), sigs=(), voting_sigs=signatures, attesting_indices=attesting_indices) return Attestation(aggregation_bits=aggregation_bits, data=attestation_data, custody_bits=Bitfield(((False,) * len(aggregation_bits))), signature=aggregate_signature)
def create_signed_attestation_at_slot(state: BeaconState, config: Eth2Config, state_machine: BaseBeaconStateMachine, attestation_slot: Slot, beacon_block_root: Hash32, validator_privkeys: Dict[(ValidatorIndex, int)], committee: Tuple[(ValidatorIndex, ...)], shard: Shard) -> Attestation: '\n Create the attestations of the given ``attestation_slot`` slot with ``validator_privkeys``.\n ' state_transition = state_machine.state_transition state = state_transition.apply_state_transition(state, future_slot=attestation_slot) target_epoch = compute_epoch_of_slot(attestation_slot, config.SLOTS_PER_EPOCH) target_root = _get_target_root(state, config, beacon_block_root) parent_crosslink = state.current_crosslinks[shard] attestation_data = AttestationData(beacon_block_root=beacon_block_root, source=Checkpoint(epoch=state.current_justified_checkpoint.epoch, root=state.current_justified_checkpoint.root), target=Checkpoint(root=target_root, epoch=target_epoch), crosslink=Crosslink(shard=shard, parent_root=parent_crosslink.hash_tree_root, start_epoch=parent_crosslink.end_epoch, end_epoch=target_epoch)) return _create_mock_signed_attestation(state, attestation_data, attestation_slot, committee, len(committee), keymapper((lambda index: state.validators[index].pubkey), validator_privkeys), config.SLOTS_PER_EPOCH)
2,879,226,919,838,314,500
Create the attestations of the given ``attestation_slot`` slot with ``validator_privkeys``.
eth2/beacon/tools/builder/validator.py
create_signed_attestation_at_slot
AndrewBezold/trinity
python
def create_signed_attestation_at_slot(state: BeaconState, config: Eth2Config, state_machine: BaseBeaconStateMachine, attestation_slot: Slot, beacon_block_root: Hash32, validator_privkeys: Dict[(ValidatorIndex, int)], committee: Tuple[(ValidatorIndex, ...)], shard: Shard) -> Attestation: '\n \n ' state_transition = state_machine.state_transition state = state_transition.apply_state_transition(state, future_slot=attestation_slot) target_epoch = compute_epoch_of_slot(attestation_slot, config.SLOTS_PER_EPOCH) target_root = _get_target_root(state, config, beacon_block_root) parent_crosslink = state.current_crosslinks[shard] attestation_data = AttestationData(beacon_block_root=beacon_block_root, source=Checkpoint(epoch=state.current_justified_checkpoint.epoch, root=state.current_justified_checkpoint.root), target=Checkpoint(root=target_root, epoch=target_epoch), crosslink=Crosslink(shard=shard, parent_root=parent_crosslink.hash_tree_root, start_epoch=parent_crosslink.end_epoch, end_epoch=target_epoch)) return _create_mock_signed_attestation(state, attestation_data, attestation_slot, committee, len(committee), keymapper((lambda index: state.validators[index].pubkey), validator_privkeys), config.SLOTS_PER_EPOCH)
@to_tuple def create_mock_signed_attestations_at_slot(state: BeaconState, config: Eth2Config, state_machine: BaseBeaconStateMachine, attestation_slot: Slot, beacon_block_root: Hash32, keymap: Dict[(BLSPubkey, int)], voted_attesters_ratio: float=1.0) -> Iterable[Attestation]: '\n Create the mocking attestations of the given ``attestation_slot`` slot with ``keymap``.\n ' crosslink_committees_at_slot = get_crosslink_committees_at_slot(state, attestation_slot, config) target_root = _get_target_root(state, config, beacon_block_root) target_epoch = compute_epoch_of_slot(state.slot, config.SLOTS_PER_EPOCH) for crosslink_committee in crosslink_committees_at_slot: (committee, shard) = crosslink_committee parent_crosslink = state.current_crosslinks[shard] attestation_data = AttestationData(beacon_block_root=beacon_block_root, source=Checkpoint(epoch=state.current_justified_checkpoint.epoch, root=state.current_justified_checkpoint.root), target=Checkpoint(root=target_root, epoch=target_epoch), crosslink=Crosslink(shard=shard, parent_root=parent_crosslink.hash_tree_root, start_epoch=parent_crosslink.end_epoch, end_epoch=min(target_epoch, (parent_crosslink.end_epoch + config.MAX_EPOCHS_PER_CROSSLINK)))) num_voted_attesters = int((len(committee) * voted_attesters_ratio)) (yield _create_mock_signed_attestation(state, attestation_data, attestation_slot, committee, num_voted_attesters, keymap, config.SLOTS_PER_EPOCH))
9,024,412,771,648,650,000
Create the mocking attestations of the given ``attestation_slot`` slot with ``keymap``.
eth2/beacon/tools/builder/validator.py
create_mock_signed_attestations_at_slot
AndrewBezold/trinity
python
@to_tuple def create_mock_signed_attestations_at_slot(state: BeaconState, config: Eth2Config, state_machine: BaseBeaconStateMachine, attestation_slot: Slot, beacon_block_root: Hash32, keymap: Dict[(BLSPubkey, int)], voted_attesters_ratio: float=1.0) -> Iterable[Attestation]: '\n \n ' crosslink_committees_at_slot = get_crosslink_committees_at_slot(state, attestation_slot, config) target_root = _get_target_root(state, config, beacon_block_root) target_epoch = compute_epoch_of_slot(state.slot, config.SLOTS_PER_EPOCH) for crosslink_committee in crosslink_committees_at_slot: (committee, shard) = crosslink_committee parent_crosslink = state.current_crosslinks[shard] attestation_data = AttestationData(beacon_block_root=beacon_block_root, source=Checkpoint(epoch=state.current_justified_checkpoint.epoch, root=state.current_justified_checkpoint.root), target=Checkpoint(root=target_root, epoch=target_epoch), crosslink=Crosslink(shard=shard, parent_root=parent_crosslink.hash_tree_root, start_epoch=parent_crosslink.end_epoch, end_epoch=min(target_epoch, (parent_crosslink.end_epoch + config.MAX_EPOCHS_PER_CROSSLINK)))) num_voted_attesters = int((len(committee) * voted_attesters_ratio)) (yield _create_mock_signed_attestation(state, attestation_data, attestation_slot, committee, num_voted_attesters, keymap, config.SLOTS_PER_EPOCH))
def open(self, path, mode): 'Wrapper on __builtin__.open used to simplify unit testing.' import __builtin__ return __builtin__.open(path, mode)
-6,685,491,992,604,253,000
Wrapper on __builtin__.open used to simplify unit testing.
nova/virt/hyperv/pathutils.py
open
bopopescu/nested_quota_final
python
def open(self, path, mode): import __builtin__ return __builtin__.open(path, mode)
def test_username_validation_error_msg(self, user: User): "\n Tests UserCreation Form's unique validator functions correctly by testing:\n 1) A new user with an existing username cannot be added.\n 2) Only 1 error is raised by the UserCreation Form\n 3) The desired error message is raised\n " form = UserCreationForm({'username': user.username, 'password1': user.password, 'password2': user.password}) assert (not form.is_valid()) assert (len(form.errors) == 1) assert ('username' in form.errors) assert (form.errors['username'][0] == _('This username has already been taken.'))
1,755,965,056,911,758,800
Tests UserCreation Form's unique validator functions correctly by testing: 1) A new user with an existing username cannot be added. 2) Only 1 error is raised by the UserCreation Form 3) The desired error message is raised
djcutter/users/tests/test_forms.py
test_username_validation_error_msg
macbotxxx/djcutter
python
def test_username_validation_error_msg(self, user: User): "\n Tests UserCreation Form's unique validator functions correctly by testing:\n 1) A new user with an existing username cannot be added.\n 2) Only 1 error is raised by the UserCreation Form\n 3) The desired error message is raised\n " form = UserCreationForm({'username': user.username, 'password1': user.password, 'password2': user.password}) assert (not form.is_valid()) assert (len(form.errors) == 1) assert ('username' in form.errors) assert (form.errors['username'][0] == _('This username has already been taken.'))
def drive_pump(self, volume, direction): 'Converts volume to cycles and ensures and checks pump level and values' if (direction == 0): space_in_pump = (self.max_pump_capacity - self.volume_in_pump) if (volume > space_in_pump): interfaces.lcd_out('Filling Error', line=4) else: interfaces.lcd_out('Filling {0:1.2f} ml'.format(volume), line=4) cycles = analysis.determine_pump_cycles(volume) self.drive_step_stick(cycles, direction) self.volume_in_pump += volume elif (direction == 1): if (volume > self.volume_in_pump): interfaces.lcd_out('Pumping Error', line=4) else: interfaces.lcd_out('Pumping {0:1.2f} ml'.format(volume), line=4) cycles = analysis.determine_pump_cycles(volume) offset = self.drive_step_stick(cycles, direction) if (offset != 0): self.drive_step_stick(offset, 0) self.drive_step_stick(offset, 1) self.volume_in_pump -= volume interfaces.lcd_out('Pump Vol: {0:1.2f} ml'.format(self.volume_in_pump), line=4)
-9,125,726,388,624,055,000
Converts volume to cycles and ensures and checks pump level and values
titration/utils/devices/syringe_pump_mock.py
drive_pump
KonradMcClure/AlkalinityTitrator
python
def drive_pump(self, volume, direction): if (direction == 0): space_in_pump = (self.max_pump_capacity - self.volume_in_pump) if (volume > space_in_pump): interfaces.lcd_out('Filling Error', line=4) else: interfaces.lcd_out('Filling {0:1.2f} ml'.format(volume), line=4) cycles = analysis.determine_pump_cycles(volume) self.drive_step_stick(cycles, direction) self.volume_in_pump += volume elif (direction == 1): if (volume > self.volume_in_pump): interfaces.lcd_out('Pumping Error', line=4) else: interfaces.lcd_out('Pumping {0:1.2f} ml'.format(volume), line=4) cycles = analysis.determine_pump_cycles(volume) offset = self.drive_step_stick(cycles, direction) if (offset != 0): self.drive_step_stick(offset, 0) self.drive_step_stick(offset, 1) self.volume_in_pump -= volume interfaces.lcd_out('Pump Vol: {0:1.2f} ml'.format(self.volume_in_pump), line=4)
def drive_step_stick(self, cycles, direction): '\n cycles and direction are integers\n Communicates with arduino to add HCl through pump\n :param cycles: number of rising edges for the pump\n :param direction: direction of pump\n ' if (cycles == 0): return 0 if self.serial.writable(): self.serial.write(cycles.to_bytes(4, 'little')) self.serial.write(direction.to_bytes(1, 'little')) self.serial.flush() temp = self.serial.readline() if ((temp == b'DONE\r\n') or (temp == b'')): return 0 else: return int(temp) else: interfaces.lcd_out('Arduino Unavailable', 4, constants.LCD_CENT_JUST)
5,106,291,834,239,962,000
cycles and direction are integers Communicates with arduino to add HCl through pump :param cycles: number of rising edges for the pump :param direction: direction of pump
titration/utils/devices/syringe_pump_mock.py
drive_step_stick
KonradMcClure/AlkalinityTitrator
python
def drive_step_stick(self, cycles, direction): '\n cycles and direction are integers\n Communicates with arduino to add HCl through pump\n :param cycles: number of rising edges for the pump\n :param direction: direction of pump\n ' if (cycles == 0): return 0 if self.serial.writable(): self.serial.write(cycles.to_bytes(4, 'little')) self.serial.write(direction.to_bytes(1, 'little')) self.serial.flush() temp = self.serial.readline() if ((temp == b'DONE\r\n') or (temp == b)): return 0 else: return int(temp) else: interfaces.lcd_out('Arduino Unavailable', 4, constants.LCD_CENT_JUST)
def all_entities_on_same_layout(entities: Iterable[DXFEntity]): ' Check if all entities are on the same layout (model space or any paper\n layout but not block).\n\n ' owners = set((entity.dxf.owner for entity in entities)) return (len(owners) < 2)
-3,090,958,499,230,387,700
Check if all entities are on the same layout (model space or any paper layout but not block).
src/ezdxf/entities/dxfgroups.py
all_entities_on_same_layout
dmtvanzanten/ezdxf
python
def all_entities_on_same_layout(entities: Iterable[DXFEntity]): ' Check if all entities are on the same layout (model space or any paper\n layout but not block).\n\n ' owners = set((entity.dxf.owner for entity in entities)) return (len(owners) < 2)
def get_group_name(group: DXFGroup, db: 'EntityDB') -> str: ' Get name of `group`. ' group_table = cast('Dictionary', db[group.dxf.owner]) for (name, entity) in group_table.items(): if (entity is group): return name
-3,521,012,806,978,972,000
Get name of `group`.
src/ezdxf/entities/dxfgroups.py
get_group_name
dmtvanzanten/ezdxf
python
def get_group_name(group: DXFGroup, db: 'EntityDB') -> str: ' ' group_table = cast('Dictionary', db[group.dxf.owner]) for (name, entity) in group_table.items(): if (entity is group): return name
def export_entity(self, tagwriter: 'TagWriter') -> None: ' Export entity specific data as DXF tags. ' super().export_entity(tagwriter) tagwriter.write_tag2(const.SUBCLASS_MARKER, acdb_group.name) self.dxf.export_dxf_attribs(tagwriter, ['description', 'unnamed', 'selectable']) self.export_group(tagwriter)
8,806,658,115,378,163,000
Export entity specific data as DXF tags.
src/ezdxf/entities/dxfgroups.py
export_entity
dmtvanzanten/ezdxf
python
def export_entity(self, tagwriter: 'TagWriter') -> None: ' ' super().export_entity(tagwriter) tagwriter.write_tag2(const.SUBCLASS_MARKER, acdb_group.name) self.dxf.export_dxf_attribs(tagwriter, ['description', 'unnamed', 'selectable']) self.export_group(tagwriter)
def __iter__(self) -> Iterable[DXFEntity]: ' Iterate over all DXF entities in :class:`DXFGroup` as instances of\n :class:`DXFGraphic` or inherited (LINE, CIRCLE, ...).\n\n ' return (e for e in self._data if e.is_alive)
-6,451,279,862,838,398,000
Iterate over all DXF entities in :class:`DXFGroup` as instances of :class:`DXFGraphic` or inherited (LINE, CIRCLE, ...).
src/ezdxf/entities/dxfgroups.py
__iter__
dmtvanzanten/ezdxf
python
def __iter__(self) -> Iterable[DXFEntity]: ' Iterate over all DXF entities in :class:`DXFGroup` as instances of\n :class:`DXFGraphic` or inherited (LINE, CIRCLE, ...).\n\n ' return (e for e in self._data if e.is_alive)
def __len__(self) -> int: ' Returns the count of DXF entities in :class:`DXFGroup`. ' return len(self._data)
-7,403,369,686,114,288,000
Returns the count of DXF entities in :class:`DXFGroup`.
src/ezdxf/entities/dxfgroups.py
__len__
dmtvanzanten/ezdxf
python
def __len__(self) -> int: ' ' return len(self._data)
def __getitem__(self, item): ' Returns entities by standard Python indexing and slicing. ' return self._data[item]
5,059,744,694,121,915,000
Returns entities by standard Python indexing and slicing.
src/ezdxf/entities/dxfgroups.py
__getitem__
dmtvanzanten/ezdxf
python
def __getitem__(self, item): ' ' return self._data[item]
def __contains__(self, item: Union[(str, DXFEntity)]) -> bool: ' Returns ``True`` if item is in :class:`DXFGroup`. `item` has to be\n a handle string or an object of type :class:`DXFEntity` or inherited.\n\n ' handle = (item if isinstance(item, str) else item.dxf.handle) return (handle in set(self.handles()))
8,053,672,929,536,613,000
Returns ``True`` if item is in :class:`DXFGroup`. `item` has to be a handle string or an object of type :class:`DXFEntity` or inherited.
src/ezdxf/entities/dxfgroups.py
__contains__
dmtvanzanten/ezdxf
python
def __contains__(self, item: Union[(str, DXFEntity)]) -> bool: ' Returns ``True`` if item is in :class:`DXFGroup`. `item` has to be\n a handle string or an object of type :class:`DXFEntity` or inherited.\n\n ' handle = (item if isinstance(item, str) else item.dxf.handle) return (handle in set(self.handles()))
def handles(self) -> Iterable[str]: ' Iterable of handles of all DXF entities in :class:`DXFGroup`. ' return (entity.dxf.handle for entity in self)
-6,554,987,980,983,336,000
Iterable of handles of all DXF entities in :class:`DXFGroup`.
src/ezdxf/entities/dxfgroups.py
handles
dmtvanzanten/ezdxf
python
def handles(self) -> Iterable[str]: ' ' return (entity.dxf.handle for entity in self)
@contextmanager def edit_data(self) -> List[DXFEntity]: ' Context manager which yields all the group entities as\n standard Python list::\n\n with group.edit_data() as data:\n # add new entities to a group\n data.append(modelspace.add_line((0, 0), (3, 0)))\n # remove last entity from a group\n data.pop()\n\n ' data = list(self) (yield data) self.set_data(data)
-544,134,383,874,083,260
Context manager which yields all the group entities as standard Python list:: with group.edit_data() as data: # add new entities to a group data.append(modelspace.add_line((0, 0), (3, 0))) # remove last entity from a group data.pop()
src/ezdxf/entities/dxfgroups.py
edit_data
dmtvanzanten/ezdxf
python
@contextmanager def edit_data(self) -> List[DXFEntity]: ' Context manager which yields all the group entities as\n standard Python list::\n\n with group.edit_data() as data:\n # add new entities to a group\n data.append(modelspace.add_line((0, 0), (3, 0)))\n # remove last entity from a group\n data.pop()\n\n ' data = list(self) (yield data) self.set_data(data)
def set_data(self, entities: Iterable[DXFEntity]) -> None: ' Set `entities` as new group content, entities should be an iterable\n :class:`DXFGraphic` or inherited (LINE, CIRCLE, ...).\n Raises :class:`DXFValueError` if not all entities be on the same layout\n (modelspace or any paperspace layout but not block)\n\n ' entities = list(entities) if (not all_entities_on_same_layout(entities)): raise const.DXFStructureError('All entities have to be in the same layout and are not allowed to be in a block layout.') self.clear() self._data = entities
-666,040,995,580,035,200
Set `entities` as new group content, entities should be an iterable :class:`DXFGraphic` or inherited (LINE, CIRCLE, ...). Raises :class:`DXFValueError` if not all entities be on the same layout (modelspace or any paperspace layout but not block)
src/ezdxf/entities/dxfgroups.py
set_data
dmtvanzanten/ezdxf
python
def set_data(self, entities: Iterable[DXFEntity]) -> None: ' Set `entities` as new group content, entities should be an iterable\n :class:`DXFGraphic` or inherited (LINE, CIRCLE, ...).\n Raises :class:`DXFValueError` if not all entities be on the same layout\n (modelspace or any paperspace layout but not block)\n\n ' entities = list(entities) if (not all_entities_on_same_layout(entities)): raise const.DXFStructureError('All entities have to be in the same layout and are not allowed to be in a block layout.') self.clear() self._data = entities
def extend(self, entities: Iterable[DXFEntity]) -> None: ' Add `entities` to :class:`DXFGroup`. ' self._data.extend(entities)
704,611,828,692,593,000
Add `entities` to :class:`DXFGroup`.
src/ezdxf/entities/dxfgroups.py
extend
dmtvanzanten/ezdxf
python
def extend(self, entities: Iterable[DXFEntity]) -> None: ' ' self._data.extend(entities)
def clear(self) -> None: ' Remove all entities from :class:`DXFGroup`, does not delete any\n drawing entities referenced by this group.\n\n ' self._data = []
7,693,783,732,229,944,000
Remove all entities from :class:`DXFGroup`, does not delete any drawing entities referenced by this group.
src/ezdxf/entities/dxfgroups.py
clear
dmtvanzanten/ezdxf
python
def clear(self) -> None: ' Remove all entities from :class:`DXFGroup`, does not delete any\n drawing entities referenced by this group.\n\n ' self._data = []
def audit(self, auditor: 'Auditor') -> None: ' Remove invalid handles from :class:`DXFGroup`.\n\n Invalid handles are: deleted entities, not all entities in the same\n layout or entities in a block layout.\n\n ' self.purge(auditor.entitydb) if (not all_entities_on_same_layout(self._data)): auditor.fixed_error(code=AuditError.GROUP_ENTITIES_IN_DIFFERENT_LAYOUTS, message=f'Cleared {str(self)}, not all entities are located in the same layout.') self.clear()
5,824,201,630,456,137,000
Remove invalid handles from :class:`DXFGroup`. Invalid handles are: deleted entities, not all entities in the same layout or entities in a block layout.
src/ezdxf/entities/dxfgroups.py
audit
dmtvanzanten/ezdxf
python
def audit(self, auditor: 'Auditor') -> None: ' Remove invalid handles from :class:`DXFGroup`.\n\n Invalid handles are: deleted entities, not all entities in the same\n layout or entities in a block layout.\n\n ' self.purge(auditor.entitydb) if (not all_entities_on_same_layout(self._data)): auditor.fixed_error(code=AuditError.GROUP_ENTITIES_IN_DIFFERENT_LAYOUTS, message=f'Cleared {str(self)}, not all entities are located in the same layout.') self.clear()
def purge(self, db: 'EntityDB') -> None: ' Remove invalid group entities. ' self._data = self._filter_invalid_entities(db)
7,017,494,116,063,099,000
Remove invalid group entities.
src/ezdxf/entities/dxfgroups.py
purge
dmtvanzanten/ezdxf
python
def purge(self, db: 'EntityDB') -> None: ' ' self._data = self._filter_invalid_entities(db)
def groups(self) -> Iterable[DXFGroup]: ' Iterable of all existing groups. ' for (name, group) in self: (yield group)
3,448,898,582,352,388,000
Iterable of all existing groups.
src/ezdxf/entities/dxfgroups.py
groups
dmtvanzanten/ezdxf
python
def groups(self) -> Iterable[DXFGroup]: ' ' for (name, group) in self: (yield group)
def new(self, name: str=None, description: str='', selectable: bool=True) -> DXFGroup: ' Creates a new group. If `name` is ``None`` an unnamed group is\n created, which has an automatically generated name like "\\*Annnn".\n\n Args:\n name: group name as string\n description: group description as string\n selectable: group is selectable if ``True``\n\n ' if (name in self): raise const.DXFValueError(f"GROUP '{name}' already exists.") if (name is None): name = self.next_name() unnamed = 1 else: unnamed = 0 dxfattribs = {'description': description, 'unnamed': unnamed, 'selectable': int(bool(selectable))} return cast(DXFGroup, self._new(name, dxfattribs))
4,705,095,474,630,304,000
Creates a new group. If `name` is ``None`` an unnamed group is created, which has an automatically generated name like "\*Annnn". Args: name: group name as string description: group description as string selectable: group is selectable if ``True``
src/ezdxf/entities/dxfgroups.py
new
dmtvanzanten/ezdxf
python
def new(self, name: str=None, description: str=, selectable: bool=True) -> DXFGroup: ' Creates a new group. If `name` is ``None`` an unnamed group is\n created, which has an automatically generated name like "\\*Annnn".\n\n Args:\n name: group name as string\n description: group description as string\n selectable: group is selectable if ``True``\n\n ' if (name in self): raise const.DXFValueError(f"GROUP '{name}' already exists.") if (name is None): name = self.next_name() unnamed = 1 else: unnamed = 0 dxfattribs = {'description': description, 'unnamed': unnamed, 'selectable': int(bool(selectable))} return cast(DXFGroup, self._new(name, dxfattribs))
def delete(self, group: Union[(DXFGroup, str)]) -> None: ' Delete `group`, `group` can be an object of type :class:`DXFGroup`\n or a group name as string.\n\n ' if isinstance(group, str): name = group elif (group.dxftype() == 'GROUP'): name = get_group_name(group, self.entitydb) else: raise TypeError(group.dxftype()) if (name in self): super().delete(name) else: raise const.DXFValueError('GROUP not in group table registered.')
-8,989,171,775,912,609,000
Delete `group`, `group` can be an object of type :class:`DXFGroup` or a group name as string.
src/ezdxf/entities/dxfgroups.py
delete
dmtvanzanten/ezdxf
python
def delete(self, group: Union[(DXFGroup, str)]) -> None: ' Delete `group`, `group` can be an object of type :class:`DXFGroup`\n or a group name as string.\n\n ' if isinstance(group, str): name = group elif (group.dxftype() == 'GROUP'): name = get_group_name(group, self.entitydb) else: raise TypeError(group.dxftype()) if (name in self): super().delete(name) else: raise const.DXFValueError('GROUP not in group table registered.')
def audit(self, auditor: 'Auditor') -> None: ' Removes empty groups and invalid handles from all groups. ' trash = [] for (name, group) in self: group.audit(auditor) if (not len(group)): trash.append(name) for name in trash: auditor.fixed_error(code=AuditError.REMOVE_EMPTY_GROUP, message=f'Removed empty group "{name}".') self.delete(name)
-5,605,563,065,586,540,000
Removes empty groups and invalid handles from all groups.
src/ezdxf/entities/dxfgroups.py
audit
dmtvanzanten/ezdxf
python
def audit(self, auditor: 'Auditor') -> None: ' ' trash = [] for (name, group) in self: group.audit(auditor) if (not len(group)): trash.append(name) for name in trash: auditor.fixed_error(code=AuditError.REMOVE_EMPTY_GROUP, message=f'Removed empty group "{name}".') self.delete(name)
def __init__(self, more_items_remaining=None, total_item_count=None, continuation_token=None, items=None, total=None): '\n Keyword args:\n more_items_remaining (bool): Returns a value of `true` if subsequent items can be retrieved.\n total_item_count (int): The total number of records after applying all filter query parameters. The `total_item_count` will be calculated if and only if the corresponding query parameter `total_item_count` is set to `true`. If this query parameter is not set or set to `false`, a value of `null` will be returned.\n continuation_token (str): Continuation token that can be provided in the `continuation_token` query param to get the next page of data. If you use the continuation token to page through data you are guaranteed to get all items exactly once regardless of how items are modified. If an item is added or deleted during the pagination then it may or may not be returned. The continuation token is generated if the limit is less than the remaining number of items, and the default sort is used (no sort is specified).\n items (list[VolumeSnapshot]): Returns a list of all items after filtering. The values are displayed for each name where meaningful. If `total_only=true`, the `items` list will be empty.\n total (list[VolumeSnapshot]): The aggregate value of all items after filtering. Where it makes more sense, the average value is displayed instead. The values are displayed for each field where meaningful.\n ' if (more_items_remaining is not None): self.more_items_remaining = more_items_remaining if (total_item_count is not None): self.total_item_count = total_item_count if (continuation_token is not None): self.continuation_token = continuation_token if (items is not None): self.items = items if (total is not None): self.total = total
7,190,323,510,957,232,000
Keyword args: more_items_remaining (bool): Returns a value of `true` if subsequent items can be retrieved. total_item_count (int): The total number of records after applying all filter query parameters. The `total_item_count` will be calculated if and only if the corresponding query parameter `total_item_count` is set to `true`. If this query parameter is not set or set to `false`, a value of `null` will be returned. continuation_token (str): Continuation token that can be provided in the `continuation_token` query param to get the next page of data. If you use the continuation token to page through data you are guaranteed to get all items exactly once regardless of how items are modified. If an item is added or deleted during the pagination then it may or may not be returned. The continuation token is generated if the limit is less than the remaining number of items, and the default sort is used (no sort is specified). items (list[VolumeSnapshot]): Returns a list of all items after filtering. The values are displayed for each name where meaningful. If `total_only=true`, the `items` list will be empty. total (list[VolumeSnapshot]): The aggregate value of all items after filtering. Where it makes more sense, the average value is displayed instead. The values are displayed for each field where meaningful.
pypureclient/flasharray/FA_2_7/models/volume_snapshot_get_response.py
__init__
Flav-STOR-WL/py-pure-client
python
def __init__(self, more_items_remaining=None, total_item_count=None, continuation_token=None, items=None, total=None): '\n Keyword args:\n more_items_remaining (bool): Returns a value of `true` if subsequent items can be retrieved.\n total_item_count (int): The total number of records after applying all filter query parameters. The `total_item_count` will be calculated if and only if the corresponding query parameter `total_item_count` is set to `true`. If this query parameter is not set or set to `false`, a value of `null` will be returned.\n continuation_token (str): Continuation token that can be provided in the `continuation_token` query param to get the next page of data. If you use the continuation token to page through data you are guaranteed to get all items exactly once regardless of how items are modified. If an item is added or deleted during the pagination then it may or may not be returned. The continuation token is generated if the limit is less than the remaining number of items, and the default sort is used (no sort is specified).\n items (list[VolumeSnapshot]): Returns a list of all items after filtering. The values are displayed for each name where meaningful. If `total_only=true`, the `items` list will be empty.\n total (list[VolumeSnapshot]): The aggregate value of all items after filtering. Where it makes more sense, the average value is displayed instead. The values are displayed for each field where meaningful.\n ' if (more_items_remaining is not None): self.more_items_remaining = more_items_remaining if (total_item_count is not None): self.total_item_count = total_item_count if (continuation_token is not None): self.continuation_token = continuation_token if (items is not None): self.items = items if (total is not None): self.total = total
def to_dict(self): 'Returns the model properties as a dict' result = {} for (attr, _) in six.iteritems(self.swagger_types): if hasattr(self, attr): value = getattr(self, attr) if isinstance(value, list): result[attr] = list(map((lambda x: (x.to_dict() if hasattr(x, 'to_dict') else x)), value)) elif hasattr(value, 'to_dict'): result[attr] = value.to_dict() elif isinstance(value, dict): result[attr] = dict(map((lambda item: ((item[0], item[1].to_dict()) if hasattr(item[1], 'to_dict') else item)), value.items())) else: result[attr] = value if issubclass(VolumeSnapshotGetResponse, dict): for (key, value) in self.items(): result[key] = value return result
230,603,998,305,512,800
Returns the model properties as a dict
pypureclient/flasharray/FA_2_7/models/volume_snapshot_get_response.py
to_dict
Flav-STOR-WL/py-pure-client
python
def to_dict(self): result = {} for (attr, _) in six.iteritems(self.swagger_types): if hasattr(self, attr): value = getattr(self, attr) if isinstance(value, list): result[attr] = list(map((lambda x: (x.to_dict() if hasattr(x, 'to_dict') else x)), value)) elif hasattr(value, 'to_dict'): result[attr] = value.to_dict() elif isinstance(value, dict): result[attr] = dict(map((lambda item: ((item[0], item[1].to_dict()) if hasattr(item[1], 'to_dict') else item)), value.items())) else: result[attr] = value if issubclass(VolumeSnapshotGetResponse, dict): for (key, value) in self.items(): result[key] = value return result
def to_str(self): 'Returns the string representation of the model' return pprint.pformat(self.to_dict())
5,849,158,643,760,736,000
Returns the string representation of the model
pypureclient/flasharray/FA_2_7/models/volume_snapshot_get_response.py
to_str
Flav-STOR-WL/py-pure-client
python
def to_str(self): return pprint.pformat(self.to_dict())
def __repr__(self): 'For `print` and `pprint`' return self.to_str()
-8,960,031,694,814,905,000
For `print` and `pprint`
pypureclient/flasharray/FA_2_7/models/volume_snapshot_get_response.py
__repr__
Flav-STOR-WL/py-pure-client
python
def __repr__(self): return self.to_str()
def __eq__(self, other): 'Returns true if both objects are equal' if (not isinstance(other, VolumeSnapshotGetResponse)): return False return (self.__dict__ == other.__dict__)
-7,413,502,122,930,958,000
Returns true if both objects are equal
pypureclient/flasharray/FA_2_7/models/volume_snapshot_get_response.py
__eq__
Flav-STOR-WL/py-pure-client
python
def __eq__(self, other): if (not isinstance(other, VolumeSnapshotGetResponse)): return False return (self.__dict__ == other.__dict__)
def __ne__(self, other): 'Returns true if both objects are not equal' return (not (self == other))
7,764,124,047,908,058,000
Returns true if both objects are not equal
pypureclient/flasharray/FA_2_7/models/volume_snapshot_get_response.py
__ne__
Flav-STOR-WL/py-pure-client
python
def __ne__(self, other): return (not (self == other))
def get_all_concentrations(self, projection={'_id': 0, 'inchi': 1, 'inchikey': 1, 'smiles': 1, 'name': 1}): "Get all entries that have concentration values\n \n Args:\n projection (dict, optional): mongodb query projection. Defaults to {'_id': 0, 'inchi': 1,'inchikey': 1, 'smiles': 1, 'name': 1}.\n\n Returns:\n (list): all results that meet the constraint.\n " result = [] query = {'concentrations': {'$ne': None}} docs = self.collection.find(filter=query, projection=projection) for doc in docs: result.append(doc) return result
5,753,718,575,970,389,000
Get all entries that have concentration values Args: projection (dict, optional): mongodb query projection. Defaults to {'_id': 0, 'inchi': 1,'inchikey': 1, 'smiles': 1, 'name': 1}. Returns: (list): all results that meet the constraint.
datanator_query_python/query/query_xmdb.py
get_all_concentrations
KarrLab/datanator_query_python
python
def get_all_concentrations(self, projection={'_id': 0, 'inchi': 1, 'inchikey': 1, 'smiles': 1, 'name': 1}): "Get all entries that have concentration values\n \n Args:\n projection (dict, optional): mongodb query projection. Defaults to {'_id': 0, 'inchi': 1,'inchikey': 1, 'smiles': 1, 'name': 1}.\n\n Returns:\n (list): all results that meet the constraint.\n " result = [] query = {'concentrations': {'$ne': None}} docs = self.collection.find(filter=query, projection=projection) for doc in docs: result.append(doc) return result
def get_name_by_inchikey(self, inchikey): "Get metabolite's name by its inchikey\n \n Args:\n inchikey (:obj:`str`): inchi key of metabolite\n\n Return:\n (:obj:`str`): name of metabolite\n " query = {'inchikey': inchikey} projection = {'_id': 0, 'name': 1} doc = self.collection.find_one(filter=query, projection=projection, collation=self.collation) if (doc is None): return 'No metabolite found.' else: return doc['name']
-1,496,111,138,151,051,300
Get metabolite's name by its inchikey Args: inchikey (:obj:`str`): inchi key of metabolite Return: (:obj:`str`): name of metabolite
datanator_query_python/query/query_xmdb.py
get_name_by_inchikey
KarrLab/datanator_query_python
python
def get_name_by_inchikey(self, inchikey): "Get metabolite's name by its inchikey\n \n Args:\n inchikey (:obj:`str`): inchi key of metabolite\n\n Return:\n (:obj:`str`): name of metabolite\n " query = {'inchikey': inchikey} projection = {'_id': 0, 'name': 1} doc = self.collection.find_one(filter=query, projection=projection, collation=self.collation) if (doc is None): return 'No metabolite found.' else: return doc['name']