repository_name
stringlengths 5
67
| func_path_in_repository
stringlengths 4
234
| func_name
stringlengths 0
314
| whole_func_string
stringlengths 52
3.87M
| language
stringclasses 6
values | func_code_string
stringlengths 52
3.87M
| func_documentation_string
stringlengths 1
47.2k
| func_code_url
stringlengths 85
339
|
---|---|---|---|---|---|---|---|
aetros/aetros-cli | aetros/starter.py | start | def start(logger, full_id, fetch=True, env=None, volumes=None, cpus=None, memory=None, gpu_devices=None, offline=False):
"""
Starts the job with all logging of a job_id
"""
owner, name, id = unpack_full_job_id(full_id)
if isinstance(sys.stdout, GeneralLogger):
# we don't want to have stuff written to stdout before in job's log
sys.stdout.clear_buffer()
job_backend = JobBackend(model_name=owner + '/' + name)
if fetch:
job_backend.fetch(id)
job_backend.restart(id)
job_backend.start(collect_system=False, offline=offline)
job_backend.set_status('PREPARE', add_section=False)
job = job_backend.get_job_model()
if not cpus:
cpus = job.get_cpu()
if not memory:
memory = job.get_memory()
if not gpu_devices and job.get_gpu():
# if requested 2 GPUs and we have 3 GPUs with id [0,1,2], gpus should be [0,1]
gpu_devices = []
for i in range(0, job.get_gpu()):
gpu_devices.append(i)
start_command(logger, job_backend, env, volumes, cpus=cpus, memory=memory, gpu_devices=gpu_devices, offline=offline) | python | def start(logger, full_id, fetch=True, env=None, volumes=None, cpus=None, memory=None, gpu_devices=None, offline=False):
"""
Starts the job with all logging of a job_id
"""
owner, name, id = unpack_full_job_id(full_id)
if isinstance(sys.stdout, GeneralLogger):
# we don't want to have stuff written to stdout before in job's log
sys.stdout.clear_buffer()
job_backend = JobBackend(model_name=owner + '/' + name)
if fetch:
job_backend.fetch(id)
job_backend.restart(id)
job_backend.start(collect_system=False, offline=offline)
job_backend.set_status('PREPARE', add_section=False)
job = job_backend.get_job_model()
if not cpus:
cpus = job.get_cpu()
if not memory:
memory = job.get_memory()
if not gpu_devices and job.get_gpu():
# if requested 2 GPUs and we have 3 GPUs with id [0,1,2], gpus should be [0,1]
gpu_devices = []
for i in range(0, job.get_gpu()):
gpu_devices.append(i)
start_command(logger, job_backend, env, volumes, cpus=cpus, memory=memory, gpu_devices=gpu_devices, offline=offline) | Starts the job with all logging of a job_id | https://github.com/aetros/aetros-cli/blob/a2a1f38d6af1660e1e2680c7d413ec2aef45faab/aetros/starter.py#L27-L61 |
aetros/aetros-cli | aetros/utils/pilutil.py | fromimage | def fromimage(im, flatten=False, mode=None):
"""
Return a copy of a PIL image as a numpy array.
Parameters
----------
im : PIL image
Input image.
flatten : bool
If true, convert the output to grey-scale.
mode : str, optional
Mode to convert image to, e.g. ``'RGB'``. See the Notes of the
`imread` docstring for more details.
Returns
-------
fromimage : ndarray
The different colour bands/channels are stored in the
third dimension, such that a grey-image is MxN, an
RGB-image MxNx3 and an RGBA-image MxNx4.
"""
if not Image.isImageType(im):
raise TypeError("Input is not a PIL image.")
if mode is not None:
if mode != im.mode:
im = im.convert(mode)
elif im.mode == 'P':
# Mode 'P' means there is an indexed "palette". If we leave the mode
# as 'P', then when we do `a = array(im)` below, `a` will be a 2-D
# containing the indices into the palette, and not a 3-D array
# containing the RGB or RGBA values.
if 'transparency' in im.info:
im = im.convert('RGBA')
else:
im = im.convert('RGB')
if flatten:
im = im.convert('F')
elif im.mode == '1':
# Workaround for crash in PIL. When im is 1-bit, the call array(im)
# can cause a seg. fault, or generate garbage. See
# https://github.com/scipy/scipy/issues/2138 and
# https://github.com/python-pillow/Pillow/issues/350.
#
# This converts im from a 1-bit image to an 8-bit image.
im = im.convert('L')
a = array(im)
return a | python | def fromimage(im, flatten=False, mode=None):
"""
Return a copy of a PIL image as a numpy array.
Parameters
----------
im : PIL image
Input image.
flatten : bool
If true, convert the output to grey-scale.
mode : str, optional
Mode to convert image to, e.g. ``'RGB'``. See the Notes of the
`imread` docstring for more details.
Returns
-------
fromimage : ndarray
The different colour bands/channels are stored in the
third dimension, such that a grey-image is MxN, an
RGB-image MxNx3 and an RGBA-image MxNx4.
"""
if not Image.isImageType(im):
raise TypeError("Input is not a PIL image.")
if mode is not None:
if mode != im.mode:
im = im.convert(mode)
elif im.mode == 'P':
# Mode 'P' means there is an indexed "palette". If we leave the mode
# as 'P', then when we do `a = array(im)` below, `a` will be a 2-D
# containing the indices into the palette, and not a 3-D array
# containing the RGB or RGBA values.
if 'transparency' in im.info:
im = im.convert('RGBA')
else:
im = im.convert('RGB')
if flatten:
im = im.convert('F')
elif im.mode == '1':
# Workaround for crash in PIL. When im is 1-bit, the call array(im)
# can cause a seg. fault, or generate garbage. See
# https://github.com/scipy/scipy/issues/2138 and
# https://github.com/python-pillow/Pillow/issues/350.
#
# This converts im from a 1-bit image to an 8-bit image.
im = im.convert('L')
a = array(im)
return a | Return a copy of a PIL image as a numpy array.
Parameters
----------
im : PIL image
Input image.
flatten : bool
If true, convert the output to grey-scale.
mode : str, optional
Mode to convert image to, e.g. ``'RGB'``. See the Notes of the
`imread` docstring for more details.
Returns
-------
fromimage : ndarray
The different colour bands/channels are stored in the
third dimension, such that a grey-image is MxN, an
RGB-image MxNx3 and an RGBA-image MxNx4. | https://github.com/aetros/aetros-cli/blob/a2a1f38d6af1660e1e2680c7d413ec2aef45faab/aetros/utils/pilutil.py#L32-L82 |
aetros/aetros-cli | aetros/utils/pilutil.py | bytescale | def bytescale(data, cmin=None, cmax=None, high=255, low=0):
"""
Byte scales an array (image).
Byte scaling means converting the input image to uint8 dtype and scaling
the range to ``(low, high)`` (default 0-255).
If the input image already has dtype uint8, no scaling is done.
Parameters
----------
data : ndarray
PIL image data array.
cmin : scalar, optional
Bias scaling of small values. Default is ``data.min()``.
cmax : scalar, optional
Bias scaling of large values. Default is ``data.max()``.
high : scalar, optional
Scale max value to `high`. Default is 255.
low : scalar, optional
Scale min value to `low`. Default is 0.
Returns
-------
img_array : uint8 ndarray
The byte-scaled array.
Examples
--------
>>> from scipy.misc import bytescale
>>> img = np.array([[ 91.06794177, 3.39058326, 84.4221549 ],
... [ 73.88003259, 80.91433048, 4.88878881],
... [ 51.53875334, 34.45808177, 27.5873488 ]])
>>> bytescale(img)
array([[255, 0, 236],
[205, 225, 4],
[140, 90, 70]], dtype=uint8)
>>> bytescale(img, high=200, low=100)
array([[200, 100, 192],
[180, 188, 102],
[155, 135, 128]], dtype=uint8)
>>> bytescale(img, cmin=0, cmax=255)
array([[91, 3, 84],
[74, 81, 5],
[52, 34, 28]], dtype=uint8)
"""
if data.dtype == uint8:
return data
if high > 255:
raise ValueError("`high` should be less than or equal to 255.")
if low < 0:
raise ValueError("`low` should be greater than or equal to 0.")
if high < low:
raise ValueError("`high` should be greater than or equal to `low`.")
if cmin is None:
cmin = data.min()
if cmax is None:
cmax = data.max()
cscale = cmax - cmin
if cscale < 0:
raise ValueError("`cmax` should be larger than `cmin`.")
elif cscale == 0:
cscale = 1
scale = float(high - low) / cscale
bytedata = (data - cmin) * scale + low
return (bytedata.clip(low, high) + 0.5).astype(uint8) | python | def bytescale(data, cmin=None, cmax=None, high=255, low=0):
"""
Byte scales an array (image).
Byte scaling means converting the input image to uint8 dtype and scaling
the range to ``(low, high)`` (default 0-255).
If the input image already has dtype uint8, no scaling is done.
Parameters
----------
data : ndarray
PIL image data array.
cmin : scalar, optional
Bias scaling of small values. Default is ``data.min()``.
cmax : scalar, optional
Bias scaling of large values. Default is ``data.max()``.
high : scalar, optional
Scale max value to `high`. Default is 255.
low : scalar, optional
Scale min value to `low`. Default is 0.
Returns
-------
img_array : uint8 ndarray
The byte-scaled array.
Examples
--------
>>> from scipy.misc import bytescale
>>> img = np.array([[ 91.06794177, 3.39058326, 84.4221549 ],
... [ 73.88003259, 80.91433048, 4.88878881],
... [ 51.53875334, 34.45808177, 27.5873488 ]])
>>> bytescale(img)
array([[255, 0, 236],
[205, 225, 4],
[140, 90, 70]], dtype=uint8)
>>> bytescale(img, high=200, low=100)
array([[200, 100, 192],
[180, 188, 102],
[155, 135, 128]], dtype=uint8)
>>> bytescale(img, cmin=0, cmax=255)
array([[91, 3, 84],
[74, 81, 5],
[52, 34, 28]], dtype=uint8)
"""
if data.dtype == uint8:
return data
if high > 255:
raise ValueError("`high` should be less than or equal to 255.")
if low < 0:
raise ValueError("`low` should be greater than or equal to 0.")
if high < low:
raise ValueError("`high` should be greater than or equal to `low`.")
if cmin is None:
cmin = data.min()
if cmax is None:
cmax = data.max()
cscale = cmax - cmin
if cscale < 0:
raise ValueError("`cmax` should be larger than `cmin`.")
elif cscale == 0:
cscale = 1
scale = float(high - low) / cscale
bytedata = (data - cmin) * scale + low
return (bytedata.clip(low, high) + 0.5).astype(uint8) | Byte scales an array (image).
Byte scaling means converting the input image to uint8 dtype and scaling
the range to ``(low, high)`` (default 0-255).
If the input image already has dtype uint8, no scaling is done.
Parameters
----------
data : ndarray
PIL image data array.
cmin : scalar, optional
Bias scaling of small values. Default is ``data.min()``.
cmax : scalar, optional
Bias scaling of large values. Default is ``data.max()``.
high : scalar, optional
Scale max value to `high`. Default is 255.
low : scalar, optional
Scale min value to `low`. Default is 0.
Returns
-------
img_array : uint8 ndarray
The byte-scaled array.
Examples
--------
>>> from scipy.misc import bytescale
>>> img = np.array([[ 91.06794177, 3.39058326, 84.4221549 ],
... [ 73.88003259, 80.91433048, 4.88878881],
... [ 51.53875334, 34.45808177, 27.5873488 ]])
>>> bytescale(img)
array([[255, 0, 236],
[205, 225, 4],
[140, 90, 70]], dtype=uint8)
>>> bytescale(img, high=200, low=100)
array([[200, 100, 192],
[180, 188, 102],
[155, 135, 128]], dtype=uint8)
>>> bytescale(img, cmin=0, cmax=255)
array([[91, 3, 84],
[74, 81, 5],
[52, 34, 28]], dtype=uint8) | https://github.com/aetros/aetros-cli/blob/a2a1f38d6af1660e1e2680c7d413ec2aef45faab/aetros/utils/pilutil.py#L88-L157 |
aetros/aetros-cli | aetros/utils/pilutil.py | toimage | def toimage(arr, high=255, low=0, cmin=None, cmax=None, pal=None,
mode=None, channel_axis=None):
"""Takes a numpy array and returns a PIL image.
The mode of the PIL image depends on the array shape and the `pal` and
`mode` keywords.
For 2-D arrays, if `pal` is a valid (N,3) byte-array giving the RGB values
(from 0 to 255) then ``mode='P'``, otherwise ``mode='L'``, unless mode
is given as 'F' or 'I' in which case a float and/or integer array is made.
Notes
-----
For 3-D arrays, the `channel_axis` argument tells which dimension of the
array holds the channel data.
For 3-D arrays if one of the dimensions is 3, the mode is 'RGB'
by default or 'YCbCr' if selected.
The numpy array must be either 2 dimensional or 3 dimensional.
"""
data = asarray(arr)
if iscomplexobj(data):
raise ValueError("Cannot convert a complex-valued array.")
shape = list(data.shape)
valid = len(shape) == 2 or ((len(shape) == 3) and
((3 in shape) or (4 in shape)))
if not valid:
raise ValueError("'arr' does not have a suitable array shape for "
"any mode.")
if len(shape) == 2:
shape = (shape[1], shape[0]) # columns show up first
if mode == 'F':
data32 = data.astype(numpy.float32)
image = Image.frombytes(mode, shape, data32.tostring())
return image
if mode in [None, 'L', 'P']:
bytedata = bytescale(data, high=high, low=low,
cmin=cmin, cmax=cmax)
image = Image.frombytes('L', shape, bytedata.tostring())
if pal is not None:
image.putpalette(asarray(pal, dtype=uint8).tostring())
# Becomes a mode='P' automagically.
elif mode == 'P': # default gray-scale
pal = (arange(0, 256, 1, dtype=uint8)[:, newaxis] *
ones((3,), dtype=uint8)[newaxis, :])
image.putpalette(asarray(pal, dtype=uint8).tostring())
return image
if mode == '1': # high input gives threshold for 1
bytedata = (data > high)
image = Image.frombytes('1', shape, bytedata.tostring())
return image
if cmin is None:
cmin = amin(ravel(data))
if cmax is None:
cmax = amax(ravel(data))
data = (data*1.0 - cmin)*(high - low)/(cmax - cmin) + low
if mode == 'I':
data32 = data.astype(numpy.uint32)
image = Image.frombytes(mode, shape, data32.tostring())
else:
raise ValueError(_errstr)
return image
# if here then 3-d array with a 3 or a 4 in the shape length.
# Check for 3 in datacube shape --- 'RGB' or 'YCbCr'
if channel_axis is None:
if (3 in shape):
ca = numpy.flatnonzero(asarray(shape) == 3)[0]
else:
ca = numpy.flatnonzero(asarray(shape) == 4)
if len(ca):
ca = ca[0]
else:
raise ValueError("Could not find channel dimension.")
else:
ca = channel_axis
numch = shape[ca]
if numch not in [3, 4]:
raise ValueError("Channel axis dimension is not valid.")
bytedata = bytescale(data, high=high, low=low, cmin=cmin, cmax=cmax)
if ca == 2:
strdata = bytedata.tostring()
shape = (shape[1], shape[0])
elif ca == 1:
strdata = transpose(bytedata, (0, 2, 1)).tostring()
shape = (shape[2], shape[0])
elif ca == 0:
strdata = transpose(bytedata, (1, 2, 0)).tostring()
shape = (shape[2], shape[1])
if mode is None:
if numch == 3:
mode = 'RGB'
else:
mode = 'RGBA'
if mode not in ['RGB', 'RGBA', 'YCbCr', 'CMYK']:
raise ValueError(_errstr)
if mode in ['RGB', 'YCbCr']:
if numch != 3:
raise ValueError("Invalid array shape for mode.")
if mode in ['RGBA', 'CMYK']:
if numch != 4:
raise ValueError("Invalid array shape for mode.")
# Here we know data and mode is correct
image = Image.frombytes(mode, shape, strdata)
return image | python | def toimage(arr, high=255, low=0, cmin=None, cmax=None, pal=None,
mode=None, channel_axis=None):
"""Takes a numpy array and returns a PIL image.
The mode of the PIL image depends on the array shape and the `pal` and
`mode` keywords.
For 2-D arrays, if `pal` is a valid (N,3) byte-array giving the RGB values
(from 0 to 255) then ``mode='P'``, otherwise ``mode='L'``, unless mode
is given as 'F' or 'I' in which case a float and/or integer array is made.
Notes
-----
For 3-D arrays, the `channel_axis` argument tells which dimension of the
array holds the channel data.
For 3-D arrays if one of the dimensions is 3, the mode is 'RGB'
by default or 'YCbCr' if selected.
The numpy array must be either 2 dimensional or 3 dimensional.
"""
data = asarray(arr)
if iscomplexobj(data):
raise ValueError("Cannot convert a complex-valued array.")
shape = list(data.shape)
valid = len(shape) == 2 or ((len(shape) == 3) and
((3 in shape) or (4 in shape)))
if not valid:
raise ValueError("'arr' does not have a suitable array shape for "
"any mode.")
if len(shape) == 2:
shape = (shape[1], shape[0]) # columns show up first
if mode == 'F':
data32 = data.astype(numpy.float32)
image = Image.frombytes(mode, shape, data32.tostring())
return image
if mode in [None, 'L', 'P']:
bytedata = bytescale(data, high=high, low=low,
cmin=cmin, cmax=cmax)
image = Image.frombytes('L', shape, bytedata.tostring())
if pal is not None:
image.putpalette(asarray(pal, dtype=uint8).tostring())
# Becomes a mode='P' automagically.
elif mode == 'P': # default gray-scale
pal = (arange(0, 256, 1, dtype=uint8)[:, newaxis] *
ones((3,), dtype=uint8)[newaxis, :])
image.putpalette(asarray(pal, dtype=uint8).tostring())
return image
if mode == '1': # high input gives threshold for 1
bytedata = (data > high)
image = Image.frombytes('1', shape, bytedata.tostring())
return image
if cmin is None:
cmin = amin(ravel(data))
if cmax is None:
cmax = amax(ravel(data))
data = (data*1.0 - cmin)*(high - low)/(cmax - cmin) + low
if mode == 'I':
data32 = data.astype(numpy.uint32)
image = Image.frombytes(mode, shape, data32.tostring())
else:
raise ValueError(_errstr)
return image
# if here then 3-d array with a 3 or a 4 in the shape length.
# Check for 3 in datacube shape --- 'RGB' or 'YCbCr'
if channel_axis is None:
if (3 in shape):
ca = numpy.flatnonzero(asarray(shape) == 3)[0]
else:
ca = numpy.flatnonzero(asarray(shape) == 4)
if len(ca):
ca = ca[0]
else:
raise ValueError("Could not find channel dimension.")
else:
ca = channel_axis
numch = shape[ca]
if numch not in [3, 4]:
raise ValueError("Channel axis dimension is not valid.")
bytedata = bytescale(data, high=high, low=low, cmin=cmin, cmax=cmax)
if ca == 2:
strdata = bytedata.tostring()
shape = (shape[1], shape[0])
elif ca == 1:
strdata = transpose(bytedata, (0, 2, 1)).tostring()
shape = (shape[2], shape[0])
elif ca == 0:
strdata = transpose(bytedata, (1, 2, 0)).tostring()
shape = (shape[2], shape[1])
if mode is None:
if numch == 3:
mode = 'RGB'
else:
mode = 'RGBA'
if mode not in ['RGB', 'RGBA', 'YCbCr', 'CMYK']:
raise ValueError(_errstr)
if mode in ['RGB', 'YCbCr']:
if numch != 3:
raise ValueError("Invalid array shape for mode.")
if mode in ['RGBA', 'CMYK']:
if numch != 4:
raise ValueError("Invalid array shape for mode.")
# Here we know data and mode is correct
image = Image.frombytes(mode, shape, strdata)
return image | Takes a numpy array and returns a PIL image.
The mode of the PIL image depends on the array shape and the `pal` and
`mode` keywords.
For 2-D arrays, if `pal` is a valid (N,3) byte-array giving the RGB values
(from 0 to 255) then ``mode='P'``, otherwise ``mode='L'``, unless mode
is given as 'F' or 'I' in which case a float and/or integer array is made.
Notes
-----
For 3-D arrays, the `channel_axis` argument tells which dimension of the
array holds the channel data.
For 3-D arrays if one of the dimensions is 3, the mode is 'RGB'
by default or 'YCbCr' if selected.
The numpy array must be either 2 dimensional or 3 dimensional. | https://github.com/aetros/aetros-cli/blob/a2a1f38d6af1660e1e2680c7d413ec2aef45faab/aetros/utils/pilutil.py#L160-L271 |
aetros/aetros-cli | aetros/utils/pilutil.py | imresize | def imresize(arr, size, interp='bilinear', mode=None):
"""
Resize an image.
Parameters
----------
arr : ndarray
The array of image to be resized.
size : int, float or tuple
* int - Percentage of current size.
* float - Fraction of current size.
* tuple - Size of the output image.
interp : str, optional
Interpolation to use for re-sizing ('nearest', 'lanczos', 'bilinear', 'bicubic'
or 'cubic').
mode : str, optional
The PIL image mode ('P', 'L', etc.) to convert `arr` before resizing.
Returns
-------
imresize : ndarray
The resized array of image.
See Also
--------
toimage : Implicitly used to convert `arr` according to `mode`.
scipy.ndimage.zoom : More generic implementation that does not use PIL.
"""
im = toimage(arr, mode=mode)
ts = type(size)
if issubdtype(ts, int):
percent = size / 100.0
size = tuple((array(im.size)*percent).astype(int))
elif issubdtype(type(size), float):
size = tuple((array(im.size)*size).astype(int))
else:
size = (size[1], size[0])
func = {'nearest': 0, 'lanczos': 1, 'bilinear': 2, 'bicubic': 3, 'cubic': 3}
imnew = im.resize(size, resample=func[interp])
return fromimage(imnew) | python | def imresize(arr, size, interp='bilinear', mode=None):
"""
Resize an image.
Parameters
----------
arr : ndarray
The array of image to be resized.
size : int, float or tuple
* int - Percentage of current size.
* float - Fraction of current size.
* tuple - Size of the output image.
interp : str, optional
Interpolation to use for re-sizing ('nearest', 'lanczos', 'bilinear', 'bicubic'
or 'cubic').
mode : str, optional
The PIL image mode ('P', 'L', etc.) to convert `arr` before resizing.
Returns
-------
imresize : ndarray
The resized array of image.
See Also
--------
toimage : Implicitly used to convert `arr` according to `mode`.
scipy.ndimage.zoom : More generic implementation that does not use PIL.
"""
im = toimage(arr, mode=mode)
ts = type(size)
if issubdtype(ts, int):
percent = size / 100.0
size = tuple((array(im.size)*percent).astype(int))
elif issubdtype(type(size), float):
size = tuple((array(im.size)*size).astype(int))
else:
size = (size[1], size[0])
func = {'nearest': 0, 'lanczos': 1, 'bilinear': 2, 'bicubic': 3, 'cubic': 3}
imnew = im.resize(size, resample=func[interp])
return fromimage(imnew) | Resize an image.
Parameters
----------
arr : ndarray
The array of image to be resized.
size : int, float or tuple
* int - Percentage of current size.
* float - Fraction of current size.
* tuple - Size of the output image.
interp : str, optional
Interpolation to use for re-sizing ('nearest', 'lanczos', 'bilinear', 'bicubic'
or 'cubic').
mode : str, optional
The PIL image mode ('P', 'L', etc.) to convert `arr` before resizing.
Returns
-------
imresize : ndarray
The resized array of image.
See Also
--------
toimage : Implicitly used to convert `arr` according to `mode`.
scipy.ndimage.zoom : More generic implementation that does not use PIL. | https://github.com/aetros/aetros-cli/blob/a2a1f38d6af1660e1e2680c7d413ec2aef45faab/aetros/utils/pilutil.py#L275-L318 |
aetros/aetros-cli | aetros/client.py | BackendClient.connect | def connect(self, channel):
"""
In the write-thread we detect that no connection is living anymore and try always again.
Up to the 3 connection try, we report to user. We keep trying but in silence.
Also, when more than 10 connection tries are detected, we delay extra 15 seconds.
"""
if self.connection_tries > 10:
time.sleep(10)
if self.in_connecting[channel]:
return False
self.in_connecting[channel] = True
self.logger.debug('[%s] Wanna connect ...' % (channel, ))
try:
if self.is_connected(channel) or self.online is False:
if self.is_connected(channel):
self.logger.debug('[%s] Already connected' % (channel, ))
if self.online is False:
self.logger.debug('[%s] self.online=False' % (channel, ))
return True
self.channel_lock[channel].acquire()
self.connected[channel] = None
self.registered[channel] = None
self.ssh_stream[channel] = False
self.ssh_channel[channel] = False
messages = None
stderrdata = ''
try:
if not self.ssh_stream[channel]:
self.logger.debug('[%s] Open ssh connection' % (channel, ))
self.ssh_stream[channel] = create_ssh_stream(self.config, exit_on_failure=False)
self.logger.debug('[%s] open channel' % (channel, ))
self.ssh_channel[channel] = self.ssh_stream[channel].get_transport().open_session()
self.ssh_channel[channel].exec_command('stream')
except (KeyboardInterrupt, SystemExit):
raise
except Exception as e:
self.connected[channel] = False
self.registered[channel] = False
self.logger.debug('[%s] connection failed: %s' % (channel, str(e)))
return False
finally:
self.channel_lock[channel].release()
if self.ssh_channel[channel]:
messages = self.wait_for_at_least_one_message(channel)
if not messages:
stderrdata = self.ssh_channel[channel].recv_stderr().decode("utf-8").strip()
self.connected[channel] = False
self.registered[channel] = False
else:
self.logger.debug('[%s] opened and received %d messages' % (channel, len(messages)))
self.connected[channel] = True
self.registered[channel] = self.on_connect(self.was_connected_once[channel], channel)
self.connected_since[channel] = time.time()
if channel == '' and self.registered[channel] and self.was_connected_once[channel]:
self.logger.info("Successfully reconnected.")
if not self.registered[channel]:
# make sure to close channel and connection first
try:
self.ssh_channel[channel] and self.ssh_channel[channel].close()
except: pass
try:
self.ssh_stream[channel] and self.ssh_stream[channel].close()
except: pass
self.logger.debug("[%s] Client: registration failed. stderrdata: %s" % (channel, stderrdata))
self.connected[channel] = False
try:
self.logger.debug('[%s] Client: ssh_tream close due to registration failure' % (channel, ))
self.ssh_stream[channel].close()
except (KeyboardInterrupt, SystemExit):
raise
self.connection_tries += 1
if not self.was_connected_once[channel] and self.go_offline_on_first_failed_attempt:
# initial try needs to be online, otherwise we go offline
self.go_offline()
if stderrdata:
if 'Connection refused' not in stderrdata and 'Permission denied' not in stderrdata:
self.logger.error(stderrdata)
if 'Permission denied' in stderrdata:
if self.connection_tries < 3:
self.logger.warning("Access denied. Did you setup your SSH public key correctly "
"and saved it in your AETROS Trainer user account?")
self.close()
sys.exit(1)
self.connection_error(channel, "Connection error during connecting to %s: %s" % (self.host, str(stderrdata)))
else:
self.was_connected_once[channel] = True
except Exception as error:
self.connection_error(channel, error)
finally:
self.in_connecting[channel] = False
return self.is_connected(channel) | python | def connect(self, channel):
"""
In the write-thread we detect that no connection is living anymore and try always again.
Up to the 3 connection try, we report to user. We keep trying but in silence.
Also, when more than 10 connection tries are detected, we delay extra 15 seconds.
"""
if self.connection_tries > 10:
time.sleep(10)
if self.in_connecting[channel]:
return False
self.in_connecting[channel] = True
self.logger.debug('[%s] Wanna connect ...' % (channel, ))
try:
if self.is_connected(channel) or self.online is False:
if self.is_connected(channel):
self.logger.debug('[%s] Already connected' % (channel, ))
if self.online is False:
self.logger.debug('[%s] self.online=False' % (channel, ))
return True
self.channel_lock[channel].acquire()
self.connected[channel] = None
self.registered[channel] = None
self.ssh_stream[channel] = False
self.ssh_channel[channel] = False
messages = None
stderrdata = ''
try:
if not self.ssh_stream[channel]:
self.logger.debug('[%s] Open ssh connection' % (channel, ))
self.ssh_stream[channel] = create_ssh_stream(self.config, exit_on_failure=False)
self.logger.debug('[%s] open channel' % (channel, ))
self.ssh_channel[channel] = self.ssh_stream[channel].get_transport().open_session()
self.ssh_channel[channel].exec_command('stream')
except (KeyboardInterrupt, SystemExit):
raise
except Exception as e:
self.connected[channel] = False
self.registered[channel] = False
self.logger.debug('[%s] connection failed: %s' % (channel, str(e)))
return False
finally:
self.channel_lock[channel].release()
if self.ssh_channel[channel]:
messages = self.wait_for_at_least_one_message(channel)
if not messages:
stderrdata = self.ssh_channel[channel].recv_stderr().decode("utf-8").strip()
self.connected[channel] = False
self.registered[channel] = False
else:
self.logger.debug('[%s] opened and received %d messages' % (channel, len(messages)))
self.connected[channel] = True
self.registered[channel] = self.on_connect(self.was_connected_once[channel], channel)
self.connected_since[channel] = time.time()
if channel == '' and self.registered[channel] and self.was_connected_once[channel]:
self.logger.info("Successfully reconnected.")
if not self.registered[channel]:
# make sure to close channel and connection first
try:
self.ssh_channel[channel] and self.ssh_channel[channel].close()
except: pass
try:
self.ssh_stream[channel] and self.ssh_stream[channel].close()
except: pass
self.logger.debug("[%s] Client: registration failed. stderrdata: %s" % (channel, stderrdata))
self.connected[channel] = False
try:
self.logger.debug('[%s] Client: ssh_tream close due to registration failure' % (channel, ))
self.ssh_stream[channel].close()
except (KeyboardInterrupt, SystemExit):
raise
self.connection_tries += 1
if not self.was_connected_once[channel] and self.go_offline_on_first_failed_attempt:
# initial try needs to be online, otherwise we go offline
self.go_offline()
if stderrdata:
if 'Connection refused' not in stderrdata and 'Permission denied' not in stderrdata:
self.logger.error(stderrdata)
if 'Permission denied' in stderrdata:
if self.connection_tries < 3:
self.logger.warning("Access denied. Did you setup your SSH public key correctly "
"and saved it in your AETROS Trainer user account?")
self.close()
sys.exit(1)
self.connection_error(channel, "Connection error during connecting to %s: %s" % (self.host, str(stderrdata)))
else:
self.was_connected_once[channel] = True
except Exception as error:
self.connection_error(channel, error)
finally:
self.in_connecting[channel] = False
return self.is_connected(channel) | In the write-thread we detect that no connection is living anymore and try always again.
Up to the 3 connection try, we report to user. We keep trying but in silence.
Also, when more than 10 connection tries are detected, we delay extra 15 seconds. | https://github.com/aetros/aetros-cli/blob/a2a1f38d6af1660e1e2680c7d413ec2aef45faab/aetros/client.py#L196-L310 |
aetros/aetros-cli | aetros/client.py | BackendClient._end_channel | def _end_channel(self, channel):
"""
Soft end of ssh channel. End the writing thread as soon as the message queue is empty.
"""
self.stop_on_empty_queue[channel] = True
# by joining the we wait until its loop finishes.
# it won't loop forever since we've set self.stop_on_empty_queue=True
write_thread = self.thread_write_instances[channel]
thread_join_non_blocking(write_thread) | python | def _end_channel(self, channel):
"""
Soft end of ssh channel. End the writing thread as soon as the message queue is empty.
"""
self.stop_on_empty_queue[channel] = True
# by joining the we wait until its loop finishes.
# it won't loop forever since we've set self.stop_on_empty_queue=True
write_thread = self.thread_write_instances[channel]
thread_join_non_blocking(write_thread) | Soft end of ssh channel. End the writing thread as soon as the message queue is empty. | https://github.com/aetros/aetros-cli/blob/a2a1f38d6af1660e1e2680c7d413ec2aef45faab/aetros/client.py#L458-L468 |
aetros/aetros-cli | aetros/client.py | BackendClient.wait_sending_last_messages | def wait_sending_last_messages(self):
"""
Requests all channels to close and waits for it.
"""
if self.active and self.online is not False:
self.logger.debug("client sends last %s messages ..."
% ([str(i) + ':' + str(len(x)) for i, x in six.iteritems(self.queues)],))
for channel, messages in six.iteritems(self.queues):
for idx, message in enumerate(messages):
self.logger.debug("[%s] %d: %s" % (channel, idx, str(message)[0:120]))
# send all missing messages
# by joining we wait until its loop finish.
# it won't loop forever since we've set self.stop_on_empty_queue=True
for channel in six.iterkeys(self.ssh_channel):
if channel != '':
self._end_channel(channel)
# last is control channel
self._end_channel('') | python | def wait_sending_last_messages(self):
"""
Requests all channels to close and waits for it.
"""
if self.active and self.online is not False:
self.logger.debug("client sends last %s messages ..."
% ([str(i) + ':' + str(len(x)) for i, x in six.iteritems(self.queues)],))
for channel, messages in six.iteritems(self.queues):
for idx, message in enumerate(messages):
self.logger.debug("[%s] %d: %s" % (channel, idx, str(message)[0:120]))
# send all missing messages
# by joining we wait until its loop finish.
# it won't loop forever since we've set self.stop_on_empty_queue=True
for channel in six.iterkeys(self.ssh_channel):
if channel != '':
self._end_channel(channel)
# last is control channel
self._end_channel('') | Requests all channels to close and waits for it. | https://github.com/aetros/aetros-cli/blob/a2a1f38d6af1660e1e2680c7d413ec2aef45faab/aetros/client.py#L470-L491 |
aetros/aetros-cli | aetros/client.py | BackendClient.wait_until_queue_empty | def wait_until_queue_empty(self, channels, report=True, clear_end=True):
"""
Waits until all queues of channels are empty.
"""
state = {'message': ''}
self.logger.debug("wait_until_queue_empty: report=%s %s"
% (str(report), str([channel+':'+str(len(self.queues[channel])) for channel in channels]), ))
queues = []
for channel in channels:
queues += self.queues[channel][:]
def print_progress():
if report:
self.logger.debug("all_empty=%s" % (str(all_empty),))
sys.__stderr__.write('\b' * len(state['message']))
sys.__stderr__.write("\033[K")
state['message'] = "%.2f kB/s // %.2fkB of %.2fkB // %.2f%%" \
% (self.bytes_speed / 1024, self.bytes_sent / 1024, self.bytes_total / 1024,
(self.bytes_sent / self.bytes_total * 100) if self.bytes_total else 0)
sys.__stderr__.write(state['message'])
sys.__stderr__.flush()
while True:
all_empty = all(m['_sent'] for m in queues)
print_progress()
if all_empty:
break
time.sleep(0.2)
print_progress()
if report and clear_end:
sys.__stderr__.write('\b' * len(state['message']))
sys.__stderr__.write("\033[K")
sys.__stderr__.flush() | python | def wait_until_queue_empty(self, channels, report=True, clear_end=True):
"""
Waits until all queues of channels are empty.
"""
state = {'message': ''}
self.logger.debug("wait_until_queue_empty: report=%s %s"
% (str(report), str([channel+':'+str(len(self.queues[channel])) for channel in channels]), ))
queues = []
for channel in channels:
queues += self.queues[channel][:]
def print_progress():
if report:
self.logger.debug("all_empty=%s" % (str(all_empty),))
sys.__stderr__.write('\b' * len(state['message']))
sys.__stderr__.write("\033[K")
state['message'] = "%.2f kB/s // %.2fkB of %.2fkB // %.2f%%" \
% (self.bytes_speed / 1024, self.bytes_sent / 1024, self.bytes_total / 1024,
(self.bytes_sent / self.bytes_total * 100) if self.bytes_total else 0)
sys.__stderr__.write(state['message'])
sys.__stderr__.flush()
while True:
all_empty = all(m['_sent'] for m in queues)
print_progress()
if all_empty:
break
time.sleep(0.2)
print_progress()
if report and clear_end:
sys.__stderr__.write('\b' * len(state['message']))
sys.__stderr__.write("\033[K")
sys.__stderr__.flush() | Waits until all queues of channels are empty. | https://github.com/aetros/aetros-cli/blob/a2a1f38d6af1660e1e2680c7d413ec2aef45faab/aetros/client.py#L496-L537 |
aetros/aetros-cli | aetros/client.py | BackendClient.send_message | def send_message(self, message, channel):
"""
Internal. Sends the actual message from a queue entry.
"""
if not self.is_connected(channel):
return False
message['_sending'] = True
if '_data' in message:
data = message['_data']
else:
data = msgpack.packb(message, default=invalid_json_values)
self.bytes_total += len(data)
message['_bytes_sent'] = 0
message['_id'] = -1
if is_debug2():
sys.__stderr__.write("[%s] send message: %s\n" % (channel, str(msgpack.unpackb(data))[0:180]))
try:
while data:
start = time.time()
bytes_sent = self.ssh_channel[channel].send(data)
data = data[bytes_sent:]
message['_bytes_sent'] += bytes_sent
self.bytes_sent += bytes_sent
end = time.time()
self.write_speeds.append(bytes_sent / (end-start))
speeds_len = len(self.write_speeds)
if speeds_len:
self.bytes_speed = sum(self.write_speeds) / speeds_len
if speeds_len > 10:
self.write_speeds = self.write_speeds[5:]
message['_sent'] = True
return True
except (KeyboardInterrupt, SystemExit):
if message['_sent']:
return message['_bytes_sent']
return False
except Exception as error:
self.connection_error(channel, error)
return False | python | def send_message(self, message, channel):
"""
Internal. Sends the actual message from a queue entry.
"""
if not self.is_connected(channel):
return False
message['_sending'] = True
if '_data' in message:
data = message['_data']
else:
data = msgpack.packb(message, default=invalid_json_values)
self.bytes_total += len(data)
message['_bytes_sent'] = 0
message['_id'] = -1
if is_debug2():
sys.__stderr__.write("[%s] send message: %s\n" % (channel, str(msgpack.unpackb(data))[0:180]))
try:
while data:
start = time.time()
bytes_sent = self.ssh_channel[channel].send(data)
data = data[bytes_sent:]
message['_bytes_sent'] += bytes_sent
self.bytes_sent += bytes_sent
end = time.time()
self.write_speeds.append(bytes_sent / (end-start))
speeds_len = len(self.write_speeds)
if speeds_len:
self.bytes_speed = sum(self.write_speeds) / speeds_len
if speeds_len > 10:
self.write_speeds = self.write_speeds[5:]
message['_sent'] = True
return True
except (KeyboardInterrupt, SystemExit):
if message['_sent']:
return message['_bytes_sent']
return False
except Exception as error:
self.connection_error(channel, error)
return False | Internal. Sends the actual message from a queue entry. | https://github.com/aetros/aetros-cli/blob/a2a1f38d6af1660e1e2680c7d413ec2aef45faab/aetros/client.py#L647-L696 |
aetros/aetros-cli | aetros/client.py | BackendClient.wait_for_at_least_one_message | def wait_for_at_least_one_message(self, channel):
"""
Reads until we receive at least one message we can unpack. Return all found messages.
"""
unpacker = msgpack.Unpacker(encoding='utf-8')
while True:
try:
start = time.time()
chunk = self.ssh_channel[channel].recv(1024)
end = time.time()
self.read_speeds.append( len(chunk) / (end-start) )
if len(self.read_speeds) > 20:
self.read_speeds = self.read_speeds[10:]
if chunk == b'':
# happens only when connection broke. If nothing is to be received, it hangs instead.
self.connection_error(channel, 'Connection broken w')
return False
except Exception as error:
self.connection_error(channel, error)
raise
unpacker.feed(chunk)
messages = [m for m in unpacker]
if messages:
return messages | python | def wait_for_at_least_one_message(self, channel):
"""
Reads until we receive at least one message we can unpack. Return all found messages.
"""
unpacker = msgpack.Unpacker(encoding='utf-8')
while True:
try:
start = time.time()
chunk = self.ssh_channel[channel].recv(1024)
end = time.time()
self.read_speeds.append( len(chunk) / (end-start) )
if len(self.read_speeds) > 20:
self.read_speeds = self.read_speeds[10:]
if chunk == b'':
# happens only when connection broke. If nothing is to be received, it hangs instead.
self.connection_error(channel, 'Connection broken w')
return False
except Exception as error:
self.connection_error(channel, error)
raise
unpacker.feed(chunk)
messages = [m for m in unpacker]
if messages:
return messages | Reads until we receive at least one message we can unpack. Return all found messages. | https://github.com/aetros/aetros-cli/blob/a2a1f38d6af1660e1e2680c7d413ec2aef45faab/aetros/client.py#L712-L740 |
aetros/aetros-cli | aetros/client.py | BackendClient.read | def read(self, channel):
"""
Reads from the socket and tries to unpack the message. If successful (because msgpack was able to unpack)
then we return that message. Else None. Keep calling .read() when new data is available so we try it
again.
"""
if not self.ssh_channel[channel].recv_ready():
return
try:
start = time.time()
chunk = self.ssh_channel[channel].recv(1024)
end = time.time()
self.read_speeds.append(len(chunk) / (end-start))
if len(self.read_speeds) > 20:
self.read_speeds = self.read_speeds[10:]
except Exception as error:
self.connection_error(channel, error)
raise
if chunk == b'':
# socket connection broken
self.connection_error(channel, 'Connection broken')
return None
# self.read_buffer.seek(0, 2) #make sure we write at the end
self.read_unpacker.feed(chunk)
# self.read_buffer.seek(0)
messages = [m for m in self.read_unpacker]
return messages if messages else None | python | def read(self, channel):
"""
Reads from the socket and tries to unpack the message. If successful (because msgpack was able to unpack)
then we return that message. Else None. Keep calling .read() when new data is available so we try it
again.
"""
if not self.ssh_channel[channel].recv_ready():
return
try:
start = time.time()
chunk = self.ssh_channel[channel].recv(1024)
end = time.time()
self.read_speeds.append(len(chunk) / (end-start))
if len(self.read_speeds) > 20:
self.read_speeds = self.read_speeds[10:]
except Exception as error:
self.connection_error(channel, error)
raise
if chunk == b'':
# socket connection broken
self.connection_error(channel, 'Connection broken')
return None
# self.read_buffer.seek(0, 2) #make sure we write at the end
self.read_unpacker.feed(chunk)
# self.read_buffer.seek(0)
messages = [m for m in self.read_unpacker]
return messages if messages else None | Reads from the socket and tries to unpack the message. If successful (because msgpack was able to unpack)
then we return that message. Else None. Keep calling .read() when new data is available so we try it
again. | https://github.com/aetros/aetros-cli/blob/a2a1f38d6af1660e1e2680c7d413ec2aef45faab/aetros/client.py#L742-L776 |
aetros/aetros-cli | aetros/utils/__init__.py | raise_sigint | def raise_sigint():
"""
Raising the SIGINT signal in the current process and all sub-processes.
os.kill() only issues a signal in the current process (without subprocesses).
CTRL+C on the console sends the signal to the process group (which we need).
"""
if hasattr(signal, 'CTRL_C_EVENT'):
# windows. Need CTRL_C_EVENT to raise the signal in the whole process group
os.kill(os.getpid(), signal.CTRL_C_EVENT)
else:
# unix.
pgid = os.getpgid(os.getpid())
if pgid == 1:
os.kill(os.getpid(), signal.SIGINT)
else:
os.killpg(os.getpgid(os.getpid()), signal.SIGINT) | python | def raise_sigint():
"""
Raising the SIGINT signal in the current process and all sub-processes.
os.kill() only issues a signal in the current process (without subprocesses).
CTRL+C on the console sends the signal to the process group (which we need).
"""
if hasattr(signal, 'CTRL_C_EVENT'):
# windows. Need CTRL_C_EVENT to raise the signal in the whole process group
os.kill(os.getpid(), signal.CTRL_C_EVENT)
else:
# unix.
pgid = os.getpgid(os.getpid())
if pgid == 1:
os.kill(os.getpid(), signal.SIGINT)
else:
os.killpg(os.getpgid(os.getpid()), signal.SIGINT) | Raising the SIGINT signal in the current process and all sub-processes.
os.kill() only issues a signal in the current process (without subprocesses).
CTRL+C on the console sends the signal to the process group (which we need). | https://github.com/aetros/aetros-cli/blob/a2a1f38d6af1660e1e2680c7d413ec2aef45faab/aetros/utils/__init__.py#L813-L829 |
aetros/aetros-cli | aetros/utils/__init__.py | human_size | def human_size(size_bytes, precision=0):
"""
Format a size in bytes into a 'human' file size, e.g. bytes, KB, MB, GB, TB, PB
Note that bytes/KB will be reported in whole numbers but MB and above will have greater precision
e.g. 1 byte, 43 bytes, 443 KB, 4.3 MB, 4.43 GB, etc
"""
if size_bytes == 1:
# because I really hate unnecessary plurals
return "1 byte"
suffixes_table = [('bytes',0),('KB',0),('MB',1),('GB',2),('TB',2), ('PB',2)]
num = float(size_bytes)
for suffix, precision in suffixes_table:
if num < 1024.0:
break
num /= 1024.0
if precision == 0:
formatted_size = "%d" % num
else:
formatted_size = str(round(num, ndigits=precision))
return "%s %s" % (formatted_size, suffix) | python | def human_size(size_bytes, precision=0):
"""
Format a size in bytes into a 'human' file size, e.g. bytes, KB, MB, GB, TB, PB
Note that bytes/KB will be reported in whole numbers but MB and above will have greater precision
e.g. 1 byte, 43 bytes, 443 KB, 4.3 MB, 4.43 GB, etc
"""
if size_bytes == 1:
# because I really hate unnecessary plurals
return "1 byte"
suffixes_table = [('bytes',0),('KB',0),('MB',1),('GB',2),('TB',2), ('PB',2)]
num = float(size_bytes)
for suffix, precision in suffixes_table:
if num < 1024.0:
break
num /= 1024.0
if precision == 0:
formatted_size = "%d" % num
else:
formatted_size = str(round(num, ndigits=precision))
return "%s %s" % (formatted_size, suffix) | Format a size in bytes into a 'human' file size, e.g. bytes, KB, MB, GB, TB, PB
Note that bytes/KB will be reported in whole numbers but MB and above will have greater precision
e.g. 1 byte, 43 bytes, 443 KB, 4.3 MB, 4.43 GB, etc | https://github.com/aetros/aetros-cli/blob/a2a1f38d6af1660e1e2680c7d413ec2aef45faab/aetros/utils/__init__.py#L877-L900 |
aetros/aetros-cli | aetros/utils/__init__.py | array_to_img | def array_to_img(x, scale=True):
"""
x should be shape (channels, width, height)
"""
from PIL import Image
if x.ndim != 3:
raise Exception('Unsupported shape : ', str(x.shape), '. Need (channels, width, height)')
if scale:
x += max(-np.min(x), 0)
x /= np.max(x)
x *= 255
if x.shape[0] == 3:
# RGB
if x.dtype != 'uint8':
x = x.astype('uint8')
return Image.fromarray(x.astype('uint8'), 'RGB')
elif x.shape[0] == 1:
# grayscale
if x.dtype != 'uint8':
x = x.astype('uint8')
return Image.fromarray(x.reshape(x.shape[1], x.shape[2]), 'L')
else:
raise Exception('Unsupported channel number: ', x.shape[0]) | python | def array_to_img(x, scale=True):
"""
x should be shape (channels, width, height)
"""
from PIL import Image
if x.ndim != 3:
raise Exception('Unsupported shape : ', str(x.shape), '. Need (channels, width, height)')
if scale:
x += max(-np.min(x), 0)
x /= np.max(x)
x *= 255
if x.shape[0] == 3:
# RGB
if x.dtype != 'uint8':
x = x.astype('uint8')
return Image.fromarray(x.astype('uint8'), 'RGB')
elif x.shape[0] == 1:
# grayscale
if x.dtype != 'uint8':
x = x.astype('uint8')
return Image.fromarray(x.reshape(x.shape[1], x.shape[2]), 'L')
else:
raise Exception('Unsupported channel number: ', x.shape[0]) | x should be shape (channels, width, height) | https://github.com/aetros/aetros-cli/blob/a2a1f38d6af1660e1e2680c7d413ec2aef45faab/aetros/utils/__init__.py#L903-L925 |
aetros/aetros-cli | aetros/Trainer.py | Trainer.set_generator_validation_nb | def set_generator_validation_nb(self, number):
"""
sets self.nb_val_samples which is used in model.fit if input is a generator
:param number:
:return:
"""
self.nb_val_samples = number
diff_to_batch = number % self.get_batch_size()
if diff_to_batch > 0:
self.nb_val_samples += self.get_batch_size() - diff_to_batch
import keras
if '1' != keras.__version__[0]:
self.nb_val_samples = self.nb_val_samples // self.get_batch_size() | python | def set_generator_validation_nb(self, number):
"""
sets self.nb_val_samples which is used in model.fit if input is a generator
:param number:
:return:
"""
self.nb_val_samples = number
diff_to_batch = number % self.get_batch_size()
if diff_to_batch > 0:
self.nb_val_samples += self.get_batch_size() - diff_to_batch
import keras
if '1' != keras.__version__[0]:
self.nb_val_samples = self.nb_val_samples // self.get_batch_size() | sets self.nb_val_samples which is used in model.fit if input is a generator
:param number:
:return: | https://github.com/aetros/aetros-cli/blob/a2a1f38d6af1660e1e2680c7d413ec2aef45faab/aetros/Trainer.py#L64-L78 |
aetros/aetros-cli | aetros/Trainer.py | Trainer.set_generator_training_nb | def set_generator_training_nb(self, number):
"""
sets self.samples_per_epoch which is used in model.fit if input is a generator
:param number:
:return:
"""
self.samples_per_epoch = number
diff_to_batch = number % self.get_batch_size()
if diff_to_batch > 0:
self.samples_per_epoch += self.get_batch_size() - diff_to_batch | python | def set_generator_training_nb(self, number):
"""
sets self.samples_per_epoch which is used in model.fit if input is a generator
:param number:
:return:
"""
self.samples_per_epoch = number
diff_to_batch = number % self.get_batch_size()
if diff_to_batch > 0:
self.samples_per_epoch += self.get_batch_size() - diff_to_batch | sets self.samples_per_epoch which is used in model.fit if input is a generator
:param number:
:return: | https://github.com/aetros/aetros-cli/blob/a2a1f38d6af1660e1e2680c7d413ec2aef45faab/aetros/Trainer.py#L80-L90 |
aetros/aetros-cli | aetros/logger.py | GeneralLogger.attach | def attach(self, buffer, read_line=None):
"""
Read buffer until end (read() returns '') and sends it to self.logger and self.job_backend.
:param buffer: a buffer instance with block read() or readline() method
:param read_line: callable or True to read line per line. If callable is given, it will be executed per line
and ignores does not redirect the line to stdout/logger when callable returns False.
"""
bid = id(buffer)
self.attach_last_messages[bid] = b''
def reader():
current_line = b''
def handle_line(buf):
if chunk == b'':
return
if read_line and callable(read_line):
res = read_line(buf)
if res is False:
return False
elif res is not None:
buf = res
if hasattr(buf, 'encode'):
buf = buf.encode('utf-8')
self.attach_last_messages[bid] += buf
if len(self.attach_last_messages[bid]) > 21 * 1024:
self.attach_last_messages[bid] = self.attach_last_messages[bid][-20 * 1024:]
self.write(buf)
flush_char = b'\n'
while True:
try:
# needs to be 1 so we fetch data in near real-time
chunk = buffer.read(1)
if chunk == b'':
if current_line:
handle_line(current_line)
return
current_line += chunk
while flush_char in current_line:
pos = current_line.find(flush_char)
line = current_line[:pos+1]
current_line = current_line[pos+1:]
handle_line(line)
# todo, periodically flush by '\r' only (progress bars for example)
# and make sure only necessary data is sent (by applying \r and \b control characters)
except (KeyboardInterrupt, SystemExit):
raise
except Exception:
# we need to make sure, we continue to read otherwise the process of this buffer
# will block and we have a stuck process.
sys.__stderr__.write(traceback.format_exc() + '\n')
sys.__stderr__.flush()
thread = Thread(target=reader)
thread.daemon = True
thread.start()
def wait():
thread_join_non_blocking(thread)
self.send_buffer()
return wait | python | def attach(self, buffer, read_line=None):
"""
Read buffer until end (read() returns '') and sends it to self.logger and self.job_backend.
:param buffer: a buffer instance with block read() or readline() method
:param read_line: callable or True to read line per line. If callable is given, it will be executed per line
and ignores does not redirect the line to stdout/logger when callable returns False.
"""
bid = id(buffer)
self.attach_last_messages[bid] = b''
def reader():
current_line = b''
def handle_line(buf):
if chunk == b'':
return
if read_line and callable(read_line):
res = read_line(buf)
if res is False:
return False
elif res is not None:
buf = res
if hasattr(buf, 'encode'):
buf = buf.encode('utf-8')
self.attach_last_messages[bid] += buf
if len(self.attach_last_messages[bid]) > 21 * 1024:
self.attach_last_messages[bid] = self.attach_last_messages[bid][-20 * 1024:]
self.write(buf)
flush_char = b'\n'
while True:
try:
# needs to be 1 so we fetch data in near real-time
chunk = buffer.read(1)
if chunk == b'':
if current_line:
handle_line(current_line)
return
current_line += chunk
while flush_char in current_line:
pos = current_line.find(flush_char)
line = current_line[:pos+1]
current_line = current_line[pos+1:]
handle_line(line)
# todo, periodically flush by '\r' only (progress bars for example)
# and make sure only necessary data is sent (by applying \r and \b control characters)
except (KeyboardInterrupt, SystemExit):
raise
except Exception:
# we need to make sure, we continue to read otherwise the process of this buffer
# will block and we have a stuck process.
sys.__stderr__.write(traceback.format_exc() + '\n')
sys.__stderr__.flush()
thread = Thread(target=reader)
thread.daemon = True
thread.start()
def wait():
thread_join_non_blocking(thread)
self.send_buffer()
return wait | Read buffer until end (read() returns '') and sends it to self.logger and self.job_backend.
:param buffer: a buffer instance with block read() or readline() method
:param read_line: callable or True to read line per line. If callable is given, it will be executed per line
and ignores does not redirect the line to stdout/logger when callable returns False. | https://github.com/aetros/aetros-cli/blob/a2a1f38d6af1660e1e2680c7d413ec2aef45faab/aetros/logger.py#L69-L143 |
aetros/aetros-cli | aetros/utils/image.py | upscale | def upscale(image, ratio):
"""
return upscaled image array
Arguments:
image -- a (H,W,C) numpy.ndarray
ratio -- scaling factor (>1)
"""
if not isinstance(image, np.ndarray):
raise ValueError('Expected ndarray')
if ratio < 1:
raise ValueError('Ratio must be greater than 1 (ratio=%f)' % ratio)
width = int(math.floor(image.shape[1] * ratio))
height = int(math.floor(image.shape[0] * ratio))
channels = image.shape[2]
out = np.ndarray((height, width, channels), dtype=np.uint8)
for x, y in np.ndindex((width, height)):
out[y, x] = image[int(math.floor(y / ratio)), int(math.floor(x / ratio))]
return out | python | def upscale(image, ratio):
"""
return upscaled image array
Arguments:
image -- a (H,W,C) numpy.ndarray
ratio -- scaling factor (>1)
"""
if not isinstance(image, np.ndarray):
raise ValueError('Expected ndarray')
if ratio < 1:
raise ValueError('Ratio must be greater than 1 (ratio=%f)' % ratio)
width = int(math.floor(image.shape[1] * ratio))
height = int(math.floor(image.shape[0] * ratio))
channels = image.shape[2]
out = np.ndarray((height, width, channels), dtype=np.uint8)
for x, y in np.ndindex((width, height)):
out[y, x] = image[int(math.floor(y / ratio)), int(math.floor(x / ratio))]
return out | return upscaled image array
Arguments:
image -- a (H,W,C) numpy.ndarray
ratio -- scaling factor (>1) | https://github.com/aetros/aetros-cli/blob/a2a1f38d6af1660e1e2680c7d413ec2aef45faab/aetros/utils/image.py#L40-L57 |
aetros/aetros-cli | aetros/utils/image.py | resize_image | def resize_image(image, height, width,
channels=None,
resize_mode=None
):
"""
Resizes an image and returns it as a np.array
Arguments:
image -- a PIL.Image or numpy.ndarray
height -- height of new image
width -- width of new image
Keyword Arguments:
channels -- channels of new image (stays unchanged if not specified)
resize_mode -- can be crop, squash, fill or half_crop
"""
if resize_mode is None:
resize_mode = 'squash'
if resize_mode not in ['crop', 'squash', 'fill', 'half_crop']:
raise ValueError('resize_mode "%s" not supported' % resize_mode)
if channels not in [None, 1, 3]:
raise ValueError('unsupported number of channels: %s' % channels)
if isinstance(image, PIL.Image.Image):
# Convert image mode (channels)
if channels is None:
image_mode = image.mode
if image_mode == 'L':
channels = 1
elif image_mode == 'RGB':
channels = 3
else:
raise ValueError('unknown image mode "%s"' % image_mode)
elif channels == 1:
# 8-bit pixels, black and white
image_mode = 'L'
elif channels == 3:
# 3x8-bit pixels, true color
image_mode = 'RGB'
if image.mode != image_mode:
image = image.convert(image_mode)
image = np.array(image)
elif isinstance(image, np.ndarray):
if image.dtype != np.uint8:
image = image.astype(np.uint8)
if image.ndim == 3 and image.shape[2] == 1:
image = image.reshape(image.shape[:2])
if channels is None:
if image.ndim == 2:
channels = 1
elif image.ndim == 3 and image.shape[2] == 3:
channels = 3
else:
raise ValueError('invalid image shape: %s' % (image.shape,))
elif channels == 1:
if image.ndim != 2:
if image.ndim == 3 and image.shape[2] == 3:
# color to grayscale
image = np.dot(image, [0.299, 0.587, 0.114]).astype(np.uint8)
else:
raise ValueError('invalid image shape: %s' % (image.shape,))
elif channels == 3:
if image.ndim == 2:
# grayscale to color
image = np.repeat(image, 3).reshape(image.shape + (3,))
elif image.shape[2] != 3:
raise ValueError('invalid image shape: %s' % (image.shape,))
else:
raise ValueError('resize_image() expected a PIL.Image.Image or a numpy.ndarray')
# No need to resize
if image.shape[0] == height and image.shape[1] == width:
return image
# Resize
interp = 'bilinear'
width_ratio = float(image.shape[1]) / width
height_ratio = float(image.shape[0]) / height
if resize_mode == 'squash' or width_ratio == height_ratio:
return imresize(image, (height, width), interp=interp)
elif resize_mode == 'crop':
# resize to smallest of ratios (relatively larger image), keeping aspect ratio
if width_ratio > height_ratio:
resize_height = height
resize_width = int(round(image.shape[1] / height_ratio))
else:
resize_width = width
resize_height = int(round(image.shape[0] / width_ratio))
image = imresize(image, (resize_height, resize_width), interp=interp)
# chop off ends of dimension that is still too long
if width_ratio > height_ratio:
start = int(round((resize_width - width) / 2.0))
return image[:, start:start + width]
else:
start = int(round((resize_height - height) / 2.0))
return image[start:start + height, :]
else:
if resize_mode == 'fill':
# resize to biggest of ratios (relatively smaller image), keeping aspect ratio
if width_ratio > height_ratio:
resize_width = width
resize_height = int(round(image.shape[0] / width_ratio))
if (height - resize_height) % 2 == 1:
resize_height += 1
else:
resize_height = height
resize_width = int(round(image.shape[1] / height_ratio))
if (width - resize_width) % 2 == 1:
resize_width += 1
image = imresize(image, (resize_height, resize_width), interp=interp)
elif resize_mode == 'half_crop':
# resize to average ratio keeping aspect ratio
new_ratio = (width_ratio + height_ratio) / 2.0
resize_width = int(round(image.shape[1] / new_ratio))
resize_height = int(round(image.shape[0] / new_ratio))
if width_ratio > height_ratio and (height - resize_height) % 2 == 1:
resize_height += 1
elif width_ratio < height_ratio and (width - resize_width) % 2 == 1:
resize_width += 1
image = imresize(image, (resize_height, resize_width), interp=interp)
# chop off ends of dimension that is still too long
if width_ratio > height_ratio:
start = int(round((resize_width - width) / 2.0))
image = image[:, start:start + width]
else:
start = int(round((resize_height - height) / 2.0))
image = image[start:start + height, :]
else:
raise Exception('unrecognized resize_mode "%s"' % resize_mode)
# fill ends of dimension that is too short with random noise
if width_ratio > height_ratio:
padding = (height - resize_height) / 2
noise_size = (padding, width)
if channels > 1:
noise_size += (channels,)
noise = np.random.randint(0, 255, noise_size).astype('uint8')
image = np.concatenate((noise, image, noise), axis=0)
else:
padding = (width - resize_width) / 2
noise_size = (height, padding)
if channels > 1:
noise_size += (channels,)
noise = np.random.randint(0, 255, noise_size).astype('uint8')
image = np.concatenate((noise, image, noise), axis=1)
return image | python | def resize_image(image, height, width,
channels=None,
resize_mode=None
):
"""
Resizes an image and returns it as a np.array
Arguments:
image -- a PIL.Image or numpy.ndarray
height -- height of new image
width -- width of new image
Keyword Arguments:
channels -- channels of new image (stays unchanged if not specified)
resize_mode -- can be crop, squash, fill or half_crop
"""
if resize_mode is None:
resize_mode = 'squash'
if resize_mode not in ['crop', 'squash', 'fill', 'half_crop']:
raise ValueError('resize_mode "%s" not supported' % resize_mode)
if channels not in [None, 1, 3]:
raise ValueError('unsupported number of channels: %s' % channels)
if isinstance(image, PIL.Image.Image):
# Convert image mode (channels)
if channels is None:
image_mode = image.mode
if image_mode == 'L':
channels = 1
elif image_mode == 'RGB':
channels = 3
else:
raise ValueError('unknown image mode "%s"' % image_mode)
elif channels == 1:
# 8-bit pixels, black and white
image_mode = 'L'
elif channels == 3:
# 3x8-bit pixels, true color
image_mode = 'RGB'
if image.mode != image_mode:
image = image.convert(image_mode)
image = np.array(image)
elif isinstance(image, np.ndarray):
if image.dtype != np.uint8:
image = image.astype(np.uint8)
if image.ndim == 3 and image.shape[2] == 1:
image = image.reshape(image.shape[:2])
if channels is None:
if image.ndim == 2:
channels = 1
elif image.ndim == 3 and image.shape[2] == 3:
channels = 3
else:
raise ValueError('invalid image shape: %s' % (image.shape,))
elif channels == 1:
if image.ndim != 2:
if image.ndim == 3 and image.shape[2] == 3:
# color to grayscale
image = np.dot(image, [0.299, 0.587, 0.114]).astype(np.uint8)
else:
raise ValueError('invalid image shape: %s' % (image.shape,))
elif channels == 3:
if image.ndim == 2:
# grayscale to color
image = np.repeat(image, 3).reshape(image.shape + (3,))
elif image.shape[2] != 3:
raise ValueError('invalid image shape: %s' % (image.shape,))
else:
raise ValueError('resize_image() expected a PIL.Image.Image or a numpy.ndarray')
# No need to resize
if image.shape[0] == height and image.shape[1] == width:
return image
# Resize
interp = 'bilinear'
width_ratio = float(image.shape[1]) / width
height_ratio = float(image.shape[0]) / height
if resize_mode == 'squash' or width_ratio == height_ratio:
return imresize(image, (height, width), interp=interp)
elif resize_mode == 'crop':
# resize to smallest of ratios (relatively larger image), keeping aspect ratio
if width_ratio > height_ratio:
resize_height = height
resize_width = int(round(image.shape[1] / height_ratio))
else:
resize_width = width
resize_height = int(round(image.shape[0] / width_ratio))
image = imresize(image, (resize_height, resize_width), interp=interp)
# chop off ends of dimension that is still too long
if width_ratio > height_ratio:
start = int(round((resize_width - width) / 2.0))
return image[:, start:start + width]
else:
start = int(round((resize_height - height) / 2.0))
return image[start:start + height, :]
else:
if resize_mode == 'fill':
# resize to biggest of ratios (relatively smaller image), keeping aspect ratio
if width_ratio > height_ratio:
resize_width = width
resize_height = int(round(image.shape[0] / width_ratio))
if (height - resize_height) % 2 == 1:
resize_height += 1
else:
resize_height = height
resize_width = int(round(image.shape[1] / height_ratio))
if (width - resize_width) % 2 == 1:
resize_width += 1
image = imresize(image, (resize_height, resize_width), interp=interp)
elif resize_mode == 'half_crop':
# resize to average ratio keeping aspect ratio
new_ratio = (width_ratio + height_ratio) / 2.0
resize_width = int(round(image.shape[1] / new_ratio))
resize_height = int(round(image.shape[0] / new_ratio))
if width_ratio > height_ratio and (height - resize_height) % 2 == 1:
resize_height += 1
elif width_ratio < height_ratio and (width - resize_width) % 2 == 1:
resize_width += 1
image = imresize(image, (resize_height, resize_width), interp=interp)
# chop off ends of dimension that is still too long
if width_ratio > height_ratio:
start = int(round((resize_width - width) / 2.0))
image = image[:, start:start + width]
else:
start = int(round((resize_height - height) / 2.0))
image = image[start:start + height, :]
else:
raise Exception('unrecognized resize_mode "%s"' % resize_mode)
# fill ends of dimension that is too short with random noise
if width_ratio > height_ratio:
padding = (height - resize_height) / 2
noise_size = (padding, width)
if channels > 1:
noise_size += (channels,)
noise = np.random.randint(0, 255, noise_size).astype('uint8')
image = np.concatenate((noise, image, noise), axis=0)
else:
padding = (width - resize_width) / 2
noise_size = (height, padding)
if channels > 1:
noise_size += (channels,)
noise = np.random.randint(0, 255, noise_size).astype('uint8')
image = np.concatenate((noise, image, noise), axis=1)
return image | Resizes an image and returns it as a np.array
Arguments:
image -- a PIL.Image or numpy.ndarray
height -- height of new image
width -- width of new image
Keyword Arguments:
channels -- channels of new image (stays unchanged if not specified)
resize_mode -- can be crop, squash, fill or half_crop | https://github.com/aetros/aetros-cli/blob/a2a1f38d6af1660e1e2680c7d413ec2aef45faab/aetros/utils/image.py#L60-L207 |
aetros/aetros-cli | aetros/utils/image.py | embed_image_html | def embed_image_html(image):
"""
Returns an image embedded in HTML base64 format
(Based on Caffe's web_demo)
Arguments:
image -- a PIL.Image or np.ndarray
"""
if image is None:
return None
elif isinstance(image, PIL.Image.Image):
pass
elif isinstance(image, np.ndarray):
image = PIL.Image.fromarray(image)
else:
raise ValueError('image must be a PIL.Image or a np.ndarray')
# Read format from the image
fmt = image.format
if not fmt:
# default to JPEG
fmt = 'jpeg'
else:
fmt = fmt.lower()
string_buf = StringIO()
image.save(string_buf, format=fmt)
data = string_buf.getvalue().encode('base64').replace('\n', '')
return 'data:image/%s;base64,%s' % (fmt, data) | python | def embed_image_html(image):
"""
Returns an image embedded in HTML base64 format
(Based on Caffe's web_demo)
Arguments:
image -- a PIL.Image or np.ndarray
"""
if image is None:
return None
elif isinstance(image, PIL.Image.Image):
pass
elif isinstance(image, np.ndarray):
image = PIL.Image.fromarray(image)
else:
raise ValueError('image must be a PIL.Image or a np.ndarray')
# Read format from the image
fmt = image.format
if not fmt:
# default to JPEG
fmt = 'jpeg'
else:
fmt = fmt.lower()
string_buf = StringIO()
image.save(string_buf, format=fmt)
data = string_buf.getvalue().encode('base64').replace('\n', '')
return 'data:image/%s;base64,%s' % (fmt, data) | Returns an image embedded in HTML base64 format
(Based on Caffe's web_demo)
Arguments:
image -- a PIL.Image or np.ndarray | https://github.com/aetros/aetros-cli/blob/a2a1f38d6af1660e1e2680c7d413ec2aef45faab/aetros/utils/image.py#L210-L237 |
aetros/aetros-cli | aetros/utils/image.py | add_bboxes_to_image | def add_bboxes_to_image(image, bboxes, color='red', width=1):
"""
Draw rectangles on the image for the bounding boxes
Returns a PIL.Image
Arguments:
image -- input image
bboxes -- bounding boxes in the [((l, t), (r, b)), ...] format
Keyword arguments:
color -- color to draw the rectangles
width -- line width of the rectangles
Example:
image = Image.open(filename)
add_bboxes_to_image(image, bboxes[filename], width=2, color='#FF7700')
image.show()
"""
def expanded_bbox(bbox, n):
"""
Grow the bounding box by n pixels
"""
l = min(bbox[0][0], bbox[1][0])
r = max(bbox[0][0], bbox[1][0])
t = min(bbox[0][1], bbox[1][1])
b = max(bbox[0][1], bbox[1][1])
return ((l - n, t - n), (r + n, b + n))
from PIL import Image, ImageDraw
draw = ImageDraw.Draw(image)
for bbox in bboxes:
for n in range(width):
draw.rectangle(expanded_bbox(bbox, n), outline=color)
return image | python | def add_bboxes_to_image(image, bboxes, color='red', width=1):
"""
Draw rectangles on the image for the bounding boxes
Returns a PIL.Image
Arguments:
image -- input image
bboxes -- bounding boxes in the [((l, t), (r, b)), ...] format
Keyword arguments:
color -- color to draw the rectangles
width -- line width of the rectangles
Example:
image = Image.open(filename)
add_bboxes_to_image(image, bboxes[filename], width=2, color='#FF7700')
image.show()
"""
def expanded_bbox(bbox, n):
"""
Grow the bounding box by n pixels
"""
l = min(bbox[0][0], bbox[1][0])
r = max(bbox[0][0], bbox[1][0])
t = min(bbox[0][1], bbox[1][1])
b = max(bbox[0][1], bbox[1][1])
return ((l - n, t - n), (r + n, b + n))
from PIL import Image, ImageDraw
draw = ImageDraw.Draw(image)
for bbox in bboxes:
for n in range(width):
draw.rectangle(expanded_bbox(bbox, n), outline=color)
return image | Draw rectangles on the image for the bounding boxes
Returns a PIL.Image
Arguments:
image -- input image
bboxes -- bounding boxes in the [((l, t), (r, b)), ...] format
Keyword arguments:
color -- color to draw the rectangles
width -- line width of the rectangles
Example:
image = Image.open(filename)
add_bboxes_to_image(image, bboxes[filename], width=2, color='#FF7700')
image.show() | https://github.com/aetros/aetros-cli/blob/a2a1f38d6af1660e1e2680c7d413ec2aef45faab/aetros/utils/image.py#L240-L271 |
aetros/aetros-cli | aetros/utils/image.py | get_layer_vis_square | def get_layer_vis_square(data,
allow_heatmap=True,
normalize=True,
min_img_dim=100,
max_width=1200,
channel_order='RGB',
colormap='jet',
):
"""
Returns a vis_square for the given layer data
Arguments:
data -- a np.ndarray
Keyword arguments:
allow_heatmap -- if True, convert single channel images to heatmaps
normalize -- whether to normalize the data when visualizing
max_width -- maximum width for the vis_square
"""
if channel_order not in ['RGB', 'BGR']:
raise ValueError('Unsupported channel_order %s' % channel_order)
if data.ndim == 1:
# interpret as 1x1 grayscale images
# (N, 1, 1)
data = data[:, np.newaxis, np.newaxis]
elif data.ndim == 2:
# interpret as 1x1 grayscale images
# (N, 1, 1)
data = data.reshape((data.shape[0] * data.shape[1], 1, 1))
elif data.ndim == 3:
if data.shape[0] == 3:
# interpret as a color image
# (1, H, W,3)
if channel_order == 'BGR':
data = data[[2, 1, 0], ...] # BGR to RGB (see issue #59)
data = data.transpose(1, 2, 0)
data = data[np.newaxis, ...]
else:
# interpret as grayscale images
# (N, H, W)
pass
elif data.ndim == 4:
if data.shape[0] == 3:
# interpret as HxW color images
# (N, H, W, 3)
data = data.transpose(1, 2, 3, 0)
if channel_order == 'BGR':
data = data[:, :, :, [2, 1, 0]] # BGR to RGB (see issue #59)
elif data.shape[1] == 3:
# interpret as HxW color images
# (N, H, W, 3)
data = data.transpose(0, 2, 3, 1)
if channel_order == 'BGR':
data = data[:, :, :, [2, 1, 0]] # BGR to RGB (see issue #59)
else:
# interpret as HxW grayscale images
# (N, H, W)
data = data.reshape((data.shape[0] * data.shape[1], data.shape[2], data.shape[3]))
else:
raise RuntimeError('unrecognized data shape: %s' % (data.shape,))
return get_layer_vis_square_raw(data,
allow_heatmap,
normalize,
min_img_dim,
max_width,
colormap,
) | python | def get_layer_vis_square(data,
allow_heatmap=True,
normalize=True,
min_img_dim=100,
max_width=1200,
channel_order='RGB',
colormap='jet',
):
"""
Returns a vis_square for the given layer data
Arguments:
data -- a np.ndarray
Keyword arguments:
allow_heatmap -- if True, convert single channel images to heatmaps
normalize -- whether to normalize the data when visualizing
max_width -- maximum width for the vis_square
"""
if channel_order not in ['RGB', 'BGR']:
raise ValueError('Unsupported channel_order %s' % channel_order)
if data.ndim == 1:
# interpret as 1x1 grayscale images
# (N, 1, 1)
data = data[:, np.newaxis, np.newaxis]
elif data.ndim == 2:
# interpret as 1x1 grayscale images
# (N, 1, 1)
data = data.reshape((data.shape[0] * data.shape[1], 1, 1))
elif data.ndim == 3:
if data.shape[0] == 3:
# interpret as a color image
# (1, H, W,3)
if channel_order == 'BGR':
data = data[[2, 1, 0], ...] # BGR to RGB (see issue #59)
data = data.transpose(1, 2, 0)
data = data[np.newaxis, ...]
else:
# interpret as grayscale images
# (N, H, W)
pass
elif data.ndim == 4:
if data.shape[0] == 3:
# interpret as HxW color images
# (N, H, W, 3)
data = data.transpose(1, 2, 3, 0)
if channel_order == 'BGR':
data = data[:, :, :, [2, 1, 0]] # BGR to RGB (see issue #59)
elif data.shape[1] == 3:
# interpret as HxW color images
# (N, H, W, 3)
data = data.transpose(0, 2, 3, 1)
if channel_order == 'BGR':
data = data[:, :, :, [2, 1, 0]] # BGR to RGB (see issue #59)
else:
# interpret as HxW grayscale images
# (N, H, W)
data = data.reshape((data.shape[0] * data.shape[1], data.shape[2], data.shape[3]))
else:
raise RuntimeError('unrecognized data shape: %s' % (data.shape,))
return get_layer_vis_square_raw(data,
allow_heatmap,
normalize,
min_img_dim,
max_width,
colormap,
) | Returns a vis_square for the given layer data
Arguments:
data -- a np.ndarray
Keyword arguments:
allow_heatmap -- if True, convert single channel images to heatmaps
normalize -- whether to normalize the data when visualizing
max_width -- maximum width for the vis_square | https://github.com/aetros/aetros-cli/blob/a2a1f38d6af1660e1e2680c7d413ec2aef45faab/aetros/utils/image.py#L274-L339 |
aetros/aetros-cli | aetros/utils/image.py | get_color_map | def get_color_map(name):
"""
Return a colormap as (redmap, greenmap, bluemap)
Arguments:
name -- the name of the colormap. If unrecognized, will default to 'jet'.
"""
redmap = [0]
greenmap = [0]
bluemap = [0]
if name == 'white':
# essentially a noop
redmap = [0, 1]
greenmap = [0, 1]
bluemap = [0, 1]
elif name == 'simple':
redmap = [0, 1, 1, 1]
greenmap = [0, 0, 1, 1]
bluemap = [0, 0, 0, 1]
elif name == 'hot':
redmap = [0, 0.03968253968253968, 0.07936507936507936, 0.119047619047619, 0.1587301587301587, 0.1984126984126984, 0.2380952380952381, 0.2777777777777778, 0.3174603174603174, 0.3571428571428571, 0.3968253968253968, 0.4365079365079365, 0.4761904761904762, 0.5158730158730158, 0.5555555555555556, 0.5952380952380952,
0.6349206349206349, 0.6746031746031745, 0.7142857142857142, 0.753968253968254, 0.7936507936507936, 0.8333333333333333, 0.873015873015873, 0.9126984126984127, 0.9523809523809523, 0.992063492063492, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]
greenmap = [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.03174603174603163, 0.0714285714285714, 0.1111111111111112, 0.1507936507936507, 0.1904761904761905, 0.23015873015873, 0.2698412698412698, 0.3095238095238093, 0.3492063492063491, 0.3888888888888888, 0.4285714285714284,
0.4682539682539679, 0.5079365079365079, 0.5476190476190477, 0.5873015873015872, 0.6269841269841268, 0.6666666666666665, 0.7063492063492065, 0.746031746031746, 0.7857142857142856, 0.8253968253968254, 0.8650793650793651, 0.9047619047619047, 0.9444444444444442, 0.984126984126984, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]
bluemap = [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.04761904761904745, 0.1269841269841265,
0.2063492063492056, 0.2857142857142856, 0.3650793650793656, 0.4444444444444446, 0.5238095238095237, 0.6031746031746028, 0.6825396825396828, 0.7619047619047619, 0.8412698412698409, 0.92063492063492, 1]
elif name == 'rainbow':
redmap = [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0.9365079365079367, 0.8571428571428572, 0.7777777777777777, 0.6984126984126986, 0.6190476190476191, 0.53968253968254, 0.4603174603174605, 0.3809523809523814, 0.3015873015873018, 0.2222222222222223, 0.1428571428571432,
0.06349206349206415, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.03174603174603208, 0.08465608465608465, 0.1375661375661377, 0.1904761904761907, 0.2433862433862437, 0.2962962962962963, 0.3492063492063493, 0.4021164021164023, 0.4550264550264553, 0.5079365079365079, 0.5608465608465609, 0.6137566137566139, 0.666666666666667]
greenmap = [0, 0.03968253968253968, 0.07936507936507936, 0.119047619047619, 0.1587301587301587, 0.1984126984126984, 0.2380952380952381, 0.2777777777777778, 0.3174603174603174, 0.3571428571428571, 0.3968253968253968, 0.4365079365079365, 0.4761904761904762, 0.5158730158730158, 0.5555555555555556, 0.5952380952380952, 0.6349206349206349, 0.6746031746031745, 0.7142857142857142, 0.753968253968254, 0.7936507936507936,
0.8333333333333333, 0.873015873015873, 0.9126984126984127, 0.9523809523809523, 0.992063492063492, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0.9841269841269842, 0.9047619047619047, 0.8253968253968256, 0.7460317460317465, 0.666666666666667, 0.587301587301587, 0.5079365079365079, 0.4285714285714288, 0.3492063492063493, 0.2698412698412698, 0.1904761904761907, 0.1111111111111116, 0.03174603174603208, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
bluemap = [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.01587301587301582, 0.09523809523809534, 0.1746031746031744, 0.2539682539682535,
0.333333333333333, 0.412698412698413, 0.4920634920634921, 0.5714285714285712, 0.6507936507936507, 0.7301587301587302, 0.8095238095238093, 0.8888888888888884, 0.9682539682539679, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]
elif name == 'winter':
greenmap = [0, 1]
bluemap = [1, 0.5]
else:
if name != 'jet':
print('Warning: colormap "%s" not supported. Using jet instead.' % name)
redmap = [0, 0, 0, 0, 0.5, 1, 1, 1, 0.5]
greenmap = [0, 0, 0.5, 1, 1, 1, 0.5, 0, 0]
bluemap = [0.5, 1, 1, 1, 0.5, 0, 0, 0, 0]
return 255.0 * np.array(redmap), 255.0 * np.array(greenmap), 255.0 * np.array(bluemap) | python | def get_color_map(name):
"""
Return a colormap as (redmap, greenmap, bluemap)
Arguments:
name -- the name of the colormap. If unrecognized, will default to 'jet'.
"""
redmap = [0]
greenmap = [0]
bluemap = [0]
if name == 'white':
# essentially a noop
redmap = [0, 1]
greenmap = [0, 1]
bluemap = [0, 1]
elif name == 'simple':
redmap = [0, 1, 1, 1]
greenmap = [0, 0, 1, 1]
bluemap = [0, 0, 0, 1]
elif name == 'hot':
redmap = [0, 0.03968253968253968, 0.07936507936507936, 0.119047619047619, 0.1587301587301587, 0.1984126984126984, 0.2380952380952381, 0.2777777777777778, 0.3174603174603174, 0.3571428571428571, 0.3968253968253968, 0.4365079365079365, 0.4761904761904762, 0.5158730158730158, 0.5555555555555556, 0.5952380952380952,
0.6349206349206349, 0.6746031746031745, 0.7142857142857142, 0.753968253968254, 0.7936507936507936, 0.8333333333333333, 0.873015873015873, 0.9126984126984127, 0.9523809523809523, 0.992063492063492, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]
greenmap = [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.03174603174603163, 0.0714285714285714, 0.1111111111111112, 0.1507936507936507, 0.1904761904761905, 0.23015873015873, 0.2698412698412698, 0.3095238095238093, 0.3492063492063491, 0.3888888888888888, 0.4285714285714284,
0.4682539682539679, 0.5079365079365079, 0.5476190476190477, 0.5873015873015872, 0.6269841269841268, 0.6666666666666665, 0.7063492063492065, 0.746031746031746, 0.7857142857142856, 0.8253968253968254, 0.8650793650793651, 0.9047619047619047, 0.9444444444444442, 0.984126984126984, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]
bluemap = [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.04761904761904745, 0.1269841269841265,
0.2063492063492056, 0.2857142857142856, 0.3650793650793656, 0.4444444444444446, 0.5238095238095237, 0.6031746031746028, 0.6825396825396828, 0.7619047619047619, 0.8412698412698409, 0.92063492063492, 1]
elif name == 'rainbow':
redmap = [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0.9365079365079367, 0.8571428571428572, 0.7777777777777777, 0.6984126984126986, 0.6190476190476191, 0.53968253968254, 0.4603174603174605, 0.3809523809523814, 0.3015873015873018, 0.2222222222222223, 0.1428571428571432,
0.06349206349206415, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.03174603174603208, 0.08465608465608465, 0.1375661375661377, 0.1904761904761907, 0.2433862433862437, 0.2962962962962963, 0.3492063492063493, 0.4021164021164023, 0.4550264550264553, 0.5079365079365079, 0.5608465608465609, 0.6137566137566139, 0.666666666666667]
greenmap = [0, 0.03968253968253968, 0.07936507936507936, 0.119047619047619, 0.1587301587301587, 0.1984126984126984, 0.2380952380952381, 0.2777777777777778, 0.3174603174603174, 0.3571428571428571, 0.3968253968253968, 0.4365079365079365, 0.4761904761904762, 0.5158730158730158, 0.5555555555555556, 0.5952380952380952, 0.6349206349206349, 0.6746031746031745, 0.7142857142857142, 0.753968253968254, 0.7936507936507936,
0.8333333333333333, 0.873015873015873, 0.9126984126984127, 0.9523809523809523, 0.992063492063492, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0.9841269841269842, 0.9047619047619047, 0.8253968253968256, 0.7460317460317465, 0.666666666666667, 0.587301587301587, 0.5079365079365079, 0.4285714285714288, 0.3492063492063493, 0.2698412698412698, 0.1904761904761907, 0.1111111111111116, 0.03174603174603208, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
bluemap = [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.01587301587301582, 0.09523809523809534, 0.1746031746031744, 0.2539682539682535,
0.333333333333333, 0.412698412698413, 0.4920634920634921, 0.5714285714285712, 0.6507936507936507, 0.7301587301587302, 0.8095238095238093, 0.8888888888888884, 0.9682539682539679, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]
elif name == 'winter':
greenmap = [0, 1]
bluemap = [1, 0.5]
else:
if name != 'jet':
print('Warning: colormap "%s" not supported. Using jet instead.' % name)
redmap = [0, 0, 0, 0, 0.5, 1, 1, 1, 0.5]
greenmap = [0, 0, 0.5, 1, 1, 1, 0.5, 0, 0]
bluemap = [0.5, 1, 1, 1, 0.5, 0, 0, 0, 0]
return 255.0 * np.array(redmap), 255.0 * np.array(greenmap), 255.0 * np.array(bluemap) | Return a colormap as (redmap, greenmap, bluemap)
Arguments:
name -- the name of the colormap. If unrecognized, will default to 'jet'. | https://github.com/aetros/aetros-cli/blob/a2a1f38d6af1660e1e2680c7d413ec2aef45faab/aetros/utils/image.py#L493-L534 |
aetros/aetros-cli | aetros/git.py | Git.prepare_index_file | def prepare_index_file(self):
"""
Makes sure that GIT index file we use per job (by modifying environment variable GIT_INDEX_FILE)
is not locked and empty. Git.fetch_job uses `git read-tree` to updates this index. For new jobs, we start
with an empty index - that's why we delete it every time.
"""
if os.getenv('AETROS_GIT_INDEX_FILE'):
self.index_path = os.getenv('AETROS_GIT_INDEX_FILE')
return
import tempfile
h, path = tempfile.mkstemp('aetros-git', '', self.temp_path)
self.index_path = path
# we give git a unique file path for that index. However, git expect it to be non-existent for empty indexes.
# empty file would lead to "fatal: index file smaller than expected"
os.close(h)
os.unlink(self.index_path)
self.logger.debug('GIT_INDEX_FILE created at ' + self.index_path) | python | def prepare_index_file(self):
"""
Makes sure that GIT index file we use per job (by modifying environment variable GIT_INDEX_FILE)
is not locked and empty. Git.fetch_job uses `git read-tree` to updates this index. For new jobs, we start
with an empty index - that's why we delete it every time.
"""
if os.getenv('AETROS_GIT_INDEX_FILE'):
self.index_path = os.getenv('AETROS_GIT_INDEX_FILE')
return
import tempfile
h, path = tempfile.mkstemp('aetros-git', '', self.temp_path)
self.index_path = path
# we give git a unique file path for that index. However, git expect it to be non-existent for empty indexes.
# empty file would lead to "fatal: index file smaller than expected"
os.close(h)
os.unlink(self.index_path)
self.logger.debug('GIT_INDEX_FILE created at ' + self.index_path) | Makes sure that GIT index file we use per job (by modifying environment variable GIT_INDEX_FILE)
is not locked and empty. Git.fetch_job uses `git read-tree` to updates this index. For new jobs, we start
with an empty index - that's why we delete it every time. | https://github.com/aetros/aetros-cli/blob/a2a1f38d6af1660e1e2680c7d413ec2aef45faab/aetros/git.py#L266-L286 |
aetros/aetros-cli | aetros/git.py | Git.fetch_job | def fetch_job(self, job_id, checkout=False):
"""
Fetch the current job reference (refs/aetros/job/<id>) from origin and (when checkout=True)read its tree to
the current git index and checkout into working director.
"""
self.job_id = job_id
self.logger.debug("Git fetch job reference %s" % (self.ref_head, ))
out, code, err = self.command_exec(['ls-remote', 'origin', self.ref_head])
if code:
self.logger.error('Could not find the job ' + job_id + ' on the server. Are you online and does the job exist?')
sys.exit(1)
try:
self.command_exec(['fetch', '-f', '-n', 'origin', self.ref_head+':'+self.ref_head])
except Exception:
self.logger.error("Could not load job information for " + job_id + '. You need to be online to start pre-configured jobs.')
raise
self.read_job(job_id, checkout) | python | def fetch_job(self, job_id, checkout=False):
"""
Fetch the current job reference (refs/aetros/job/<id>) from origin and (when checkout=True)read its tree to
the current git index and checkout into working director.
"""
self.job_id = job_id
self.logger.debug("Git fetch job reference %s" % (self.ref_head, ))
out, code, err = self.command_exec(['ls-remote', 'origin', self.ref_head])
if code:
self.logger.error('Could not find the job ' + job_id + ' on the server. Are you online and does the job exist?')
sys.exit(1)
try:
self.command_exec(['fetch', '-f', '-n', 'origin', self.ref_head+':'+self.ref_head])
except Exception:
self.logger.error("Could not load job information for " + job_id + '. You need to be online to start pre-configured jobs.')
raise
self.read_job(job_id, checkout) | Fetch the current job reference (refs/aetros/job/<id>) from origin and (when checkout=True)read its tree to
the current git index and checkout into working director. | https://github.com/aetros/aetros-cli/blob/a2a1f38d6af1660e1e2680c7d413ec2aef45faab/aetros/git.py#L288-L308 |
aetros/aetros-cli | aetros/git.py | Git.read_job | def read_job(self, job_id, checkout=False):
"""
Reads head and reads the tree into index,
and checkout the work-tree when checkout=True.
This does not fetch the job from the actual server. It needs to be in the local git already.
"""
self.job_id = job_id
commit = self.get_head_commit()
self.logger.debug('Job ref points to ' + commit)
self.command_exec(['read-tree', self.ref_head])
if checkout:
self.logger.debug('Working directory in ' + self.work_tree)
# make sure we have checked out all files we have added until now. Important for simple models,
# so we have the actual model.py and dataset scripts.
if os.path.exists(self.work_tree):
shutil.rmtree(self.work_tree)
os.makedirs(self.work_tree)
# make the working tree reflect exactly the tree of ref_head.
# since we removed the dir before, we have exactly the tree of the reference
# '--', '.' is important to not update HEAD
self.command_exec(['--work-tree', self.work_tree, 'checkout', self.ref_head, '--', '.']) | python | def read_job(self, job_id, checkout=False):
"""
Reads head and reads the tree into index,
and checkout the work-tree when checkout=True.
This does not fetch the job from the actual server. It needs to be in the local git already.
"""
self.job_id = job_id
commit = self.get_head_commit()
self.logger.debug('Job ref points to ' + commit)
self.command_exec(['read-tree', self.ref_head])
if checkout:
self.logger.debug('Working directory in ' + self.work_tree)
# make sure we have checked out all files we have added until now. Important for simple models,
# so we have the actual model.py and dataset scripts.
if os.path.exists(self.work_tree):
shutil.rmtree(self.work_tree)
os.makedirs(self.work_tree)
# make the working tree reflect exactly the tree of ref_head.
# since we removed the dir before, we have exactly the tree of the reference
# '--', '.' is important to not update HEAD
self.command_exec(['--work-tree', self.work_tree, 'checkout', self.ref_head, '--', '.']) | Reads head and reads the tree into index,
and checkout the work-tree when checkout=True.
This does not fetch the job from the actual server. It needs to be in the local git already. | https://github.com/aetros/aetros-cli/blob/a2a1f38d6af1660e1e2680c7d413ec2aef45faab/aetros/git.py#L317-L343 |
aetros/aetros-cli | aetros/git.py | Git.create_job_id | def create_job_id(self, data):
"""
Create a new job id and reference (refs/aetros/job/<id>) by creating a new commit with empty tree. That
root commit is the actual job id. A reference is then created to the newest (head) commit of this commit history.
The reference will always be updated once a new commit is added.
"""
self.add_file('aetros/job.json', simplejson.dumps(data, indent=4))
tree_id = self.write_tree()
self.job_id = self.command_exec(['commit-tree', '-m', "JOB_CREATED", tree_id])[0].decode('utf-8').strip()
out, code, err = self.command_exec(['show-ref', self.ref_head], allowed_to_fail=True)
if not code:
self.logger.warning("Generated job id already exists, because exact same experiment values given. Ref " + self.ref_head)
self.command_exec(['update-ref', self.ref_head, self.job_id])
# make sure we have checkedout all files we have added until now. Important for simple models, so we have the
# actual model.py and dataset scripts.
if not os.path.exists(self.work_tree):
os.makedirs(self.work_tree)
# updates index and working tree
# '--', '.' is important to not update HEAD
self.command_exec(['--work-tree', self.work_tree, 'checkout', self.ref_head, '--', '.'])
# every caller needs to make sure to call git.push
return self.job_id | python | def create_job_id(self, data):
"""
Create a new job id and reference (refs/aetros/job/<id>) by creating a new commit with empty tree. That
root commit is the actual job id. A reference is then created to the newest (head) commit of this commit history.
The reference will always be updated once a new commit is added.
"""
self.add_file('aetros/job.json', simplejson.dumps(data, indent=4))
tree_id = self.write_tree()
self.job_id = self.command_exec(['commit-tree', '-m', "JOB_CREATED", tree_id])[0].decode('utf-8').strip()
out, code, err = self.command_exec(['show-ref', self.ref_head], allowed_to_fail=True)
if not code:
self.logger.warning("Generated job id already exists, because exact same experiment values given. Ref " + self.ref_head)
self.command_exec(['update-ref', self.ref_head, self.job_id])
# make sure we have checkedout all files we have added until now. Important for simple models, so we have the
# actual model.py and dataset scripts.
if not os.path.exists(self.work_tree):
os.makedirs(self.work_tree)
# updates index and working tree
# '--', '.' is important to not update HEAD
self.command_exec(['--work-tree', self.work_tree, 'checkout', self.ref_head, '--', '.'])
# every caller needs to make sure to call git.push
return self.job_id | Create a new job id and reference (refs/aetros/job/<id>) by creating a new commit with empty tree. That
root commit is the actual job id. A reference is then created to the newest (head) commit of this commit history.
The reference will always be updated once a new commit is added. | https://github.com/aetros/aetros-cli/blob/a2a1f38d6af1660e1e2680c7d413ec2aef45faab/aetros/git.py#L388-L415 |
aetros/aetros-cli | aetros/git.py | Git.start_push_sync | def start_push_sync(self):
"""
Starts the detection of unsynced Git data.
"""
self.active_thread = True
self.active_push = True
self.thread_push_instance = Thread(target=self.thread_push)
self.thread_push_instance.daemon = True
self.thread_push_instance.start() | python | def start_push_sync(self):
"""
Starts the detection of unsynced Git data.
"""
self.active_thread = True
self.active_push = True
self.thread_push_instance = Thread(target=self.thread_push)
self.thread_push_instance.daemon = True
self.thread_push_instance.start() | Starts the detection of unsynced Git data. | https://github.com/aetros/aetros-cli/blob/a2a1f38d6af1660e1e2680c7d413ec2aef45faab/aetros/git.py#L417-L426 |
aetros/aetros-cli | aetros/git.py | Git.stop | def stop(self):
"""
Stops the `git push` thread and commits all streamed files (Git.store_file and Git.stream_file), followed
by a final git push.
You can not start the process again.
"""
self.active_thread = False
if self.thread_push_instance and self.thread_push_instance.isAlive():
self.thread_push_instance.join()
with self.batch_commit('STREAM_END'):
for path, handle in six.iteritems(self.streamed_files.copy()):
# open again and read full content
full_path = os.path.normpath(self.temp_path + '/stream-blob/' + self.job_id + '/' + path)
self.logger.debug('Git stream end for file: ' + full_path)
del self.streamed_files[path]
# make sure its written to the disk
try:
self.stream_files_lock.acquire()
if not handle.closed:
handle.flush()
handle.close()
finally:
self.stream_files_lock.release()
with open(full_path, 'r') as f:
self.commit_file(path, path, f.read())
if not self.keep_stream_files:
os.unlink(full_path)
with self.batch_commit('STORE_END'):
for path, bar in six.iteritems(self.store_files.copy()):
full_path = os.path.normpath(self.temp_path + '/store-blob/' + self.job_id + '/' + path)
self.logger.debug('Git store end for file: ' + full_path)
del self.store_files[path]
try:
self.stream_files_lock.acquire()
self.commit_file(path, path, open(full_path, 'r').read())
finally:
self.stream_files_lock.release()
if not self.keep_stream_files:
os.unlink(full_path) | python | def stop(self):
"""
Stops the `git push` thread and commits all streamed files (Git.store_file and Git.stream_file), followed
by a final git push.
You can not start the process again.
"""
self.active_thread = False
if self.thread_push_instance and self.thread_push_instance.isAlive():
self.thread_push_instance.join()
with self.batch_commit('STREAM_END'):
for path, handle in six.iteritems(self.streamed_files.copy()):
# open again and read full content
full_path = os.path.normpath(self.temp_path + '/stream-blob/' + self.job_id + '/' + path)
self.logger.debug('Git stream end for file: ' + full_path)
del self.streamed_files[path]
# make sure its written to the disk
try:
self.stream_files_lock.acquire()
if not handle.closed:
handle.flush()
handle.close()
finally:
self.stream_files_lock.release()
with open(full_path, 'r') as f:
self.commit_file(path, path, f.read())
if not self.keep_stream_files:
os.unlink(full_path)
with self.batch_commit('STORE_END'):
for path, bar in six.iteritems(self.store_files.copy()):
full_path = os.path.normpath(self.temp_path + '/store-blob/' + self.job_id + '/' + path)
self.logger.debug('Git store end for file: ' + full_path)
del self.store_files[path]
try:
self.stream_files_lock.acquire()
self.commit_file(path, path, open(full_path, 'r').read())
finally:
self.stream_files_lock.release()
if not self.keep_stream_files:
os.unlink(full_path) | Stops the `git push` thread and commits all streamed files (Git.store_file and Git.stream_file), followed
by a final git push.
You can not start the process again. | https://github.com/aetros/aetros-cli/blob/a2a1f38d6af1660e1e2680c7d413ec2aef45faab/aetros/git.py#L428-L477 |
aetros/aetros-cli | aetros/git.py | Git.batch_commit | def batch_commit(self, message):
"""
Instead of committing a lot of small commits you can batch it together using this controller.
Example:
with git.batch_commit('BATCHED'):
git.commit_file('my commit 1', 'path/to/file', 'content from file')
git.commit_json_file('[1, 2, 3]', 'path/to/file2', 'json array')
Withing the `with` block you can use group the method calls of `commit_file` and `commit_json_file`, and every other
method calling this two methods.
:type message: str
:return: with controller to be used with Python's `with git.batch_commit():`
"""
class controlled_execution:
def __init__(self, git, message):
self.git = git
self.message = message
def __enter__(self):
self.git.git_batch_commit = True
if self.git.job_id:
# make sure we're always on the tip tree
self.git.read_tree(self.git.ref_head)
def __exit__(self, type, value, traceback):
self.git.git_batch_commit = False
# if nothing committed, we return early
if not self.git.git_batch_commit_messages: return
commit_message = self.message
if self.git.git_batch_commit_messages:
commit_message = commit_message + "\n\n" + "\n".join(self.git.git_batch_commit_messages)
self.git.git_batch_commit_messages = []
self.git.commit_index(commit_message)
return controlled_execution(self, message) | python | def batch_commit(self, message):
"""
Instead of committing a lot of small commits you can batch it together using this controller.
Example:
with git.batch_commit('BATCHED'):
git.commit_file('my commit 1', 'path/to/file', 'content from file')
git.commit_json_file('[1, 2, 3]', 'path/to/file2', 'json array')
Withing the `with` block you can use group the method calls of `commit_file` and `commit_json_file`, and every other
method calling this two methods.
:type message: str
:return: with controller to be used with Python's `with git.batch_commit():`
"""
class controlled_execution:
def __init__(self, git, message):
self.git = git
self.message = message
def __enter__(self):
self.git.git_batch_commit = True
if self.git.job_id:
# make sure we're always on the tip tree
self.git.read_tree(self.git.ref_head)
def __exit__(self, type, value, traceback):
self.git.git_batch_commit = False
# if nothing committed, we return early
if not self.git.git_batch_commit_messages: return
commit_message = self.message
if self.git.git_batch_commit_messages:
commit_message = commit_message + "\n\n" + "\n".join(self.git.git_batch_commit_messages)
self.git.git_batch_commit_messages = []
self.git.commit_index(commit_message)
return controlled_execution(self, message) | Instead of committing a lot of small commits you can batch it together using this controller.
Example:
with git.batch_commit('BATCHED'):
git.commit_file('my commit 1', 'path/to/file', 'content from file')
git.commit_json_file('[1, 2, 3]', 'path/to/file2', 'json array')
Withing the `with` block you can use group the method calls of `commit_file` and `commit_json_file`, and every other
method calling this two methods.
:type message: str
:return: with controller to be used with Python's `with git.batch_commit():` | https://github.com/aetros/aetros-cli/blob/a2a1f38d6af1660e1e2680c7d413ec2aef45faab/aetros/git.py#L489-L530 |
aetros/aetros-cli | aetros/git.py | Git.store_file | def store_file(self, path, data, fast_lane=True):
"""
Store the file in temp folder and stream it to server if online.
This makes sure that we have all newest data of this file on the server directly.
This method always overwrites the content of path. If you want to append always the content,
use Git.stream_file() instead.
At the end of the job, the content the server received is stored as git blob on the server. It is then committed
locally and pushed. Git detects that the server already has the version (through the continuous streaming)
and won't push it again.
"""
self.stream_files_lock.acquire()
try:
full_path = os.path.normpath(self.temp_path + '/store-blob/' + self.job_id + '/' + path)
if not os.path.exists(os.path.dirname(full_path)):
os.makedirs(os.path.dirname(full_path))
if hasattr(data, 'encode'):
data = data.encode("utf-8", 'replace')
already_set = path in self.store_files and self.store_files[path] == data
if is_debug3():
sys.__stderr__.write('git:store_file(%s, %s, %s), already_set=%s\n'
% (str(path), str(data)[0:180], str(fast_lane), str(already_set)))
if already_set:
return
open(full_path, 'wb').write(data)
self.store_files[path] = data
if self.client.online is not False:
self.client.send({'type': 'store-blob', 'path': path, 'data': data}, channel='' if fast_lane else 'files')
finally:
self.stream_files_lock.release() | python | def store_file(self, path, data, fast_lane=True):
"""
Store the file in temp folder and stream it to server if online.
This makes sure that we have all newest data of this file on the server directly.
This method always overwrites the content of path. If you want to append always the content,
use Git.stream_file() instead.
At the end of the job, the content the server received is stored as git blob on the server. It is then committed
locally and pushed. Git detects that the server already has the version (through the continuous streaming)
and won't push it again.
"""
self.stream_files_lock.acquire()
try:
full_path = os.path.normpath(self.temp_path + '/store-blob/' + self.job_id + '/' + path)
if not os.path.exists(os.path.dirname(full_path)):
os.makedirs(os.path.dirname(full_path))
if hasattr(data, 'encode'):
data = data.encode("utf-8", 'replace')
already_set = path in self.store_files and self.store_files[path] == data
if is_debug3():
sys.__stderr__.write('git:store_file(%s, %s, %s), already_set=%s\n'
% (str(path), str(data)[0:180], str(fast_lane), str(already_set)))
if already_set:
return
open(full_path, 'wb').write(data)
self.store_files[path] = data
if self.client.online is not False:
self.client.send({'type': 'store-blob', 'path': path, 'data': data}, channel='' if fast_lane else 'files')
finally:
self.stream_files_lock.release() | Store the file in temp folder and stream it to server if online.
This makes sure that we have all newest data of this file on the server directly.
This method always overwrites the content of path. If you want to append always the content,
use Git.stream_file() instead.
At the end of the job, the content the server received is stored as git blob on the server. It is then committed
locally and pushed. Git detects that the server already has the version (through the continuous streaming)
and won't push it again. | https://github.com/aetros/aetros-cli/blob/a2a1f38d6af1660e1e2680c7d413ec2aef45faab/aetros/git.py#L541-L580 |
aetros/aetros-cli | aetros/git.py | Git.stream_file | def stream_file(self, path, fast_lane=True):
"""
Create a temp file, stream it to the server if online and append its content using the write() method.
This makes sure that we have all newest data of this file on the server directly.
At the end of the job, the content the server received is stored as git blob on the server. It is then committed
locally and pushed. Git detects that the server already has the version (through the continuous streaming)
and won't push it again. Very handy for rather large files that will append over time (like channel data, logs)
Example:
self.log_stream = git.stream_file('log.txt')
self.log_stream.write("new line\n");
self.log_stream.write("another line\n");
"""
# create temp file
# open temp file
# register stream file and write locally
# on end() git_commit that file locally
# create socket connection to server
# stream file to server
# on end() send server end signal, so he can store its content in git as blob as well.
# A git push would detect that both sides have the same content already,
# except when server connection broke between start() and end().
# Result -> already transmitted logs/channel data (probably many MBs) won't transfered twice
# when doing a git-push.
# return handler to write to this file
full_path = os.path.normpath(self.temp_path + '/stream-blob/' + self.job_id + '/' + path)
if not os.path.exists(os.path.dirname(full_path)):
os.makedirs(os.path.dirname(full_path))
handle = open(full_path, 'wb')
self.streamed_files[path] = handle
class Stream():
def __init__(self, git):
self.git = git
def write(self, data):
if path not in self.git.streamed_files:
# already committed to server
return
if hasattr(data, 'encode'):
data = data.encode("utf-8", 'replace')
try:
self.git.stream_files_lock.acquire()
if not handle.closed:
handle.write(data)
handle.flush()
except IOError as e:
handle.close()
if 'No space left' in e.__str__():
sys.stderr.write(traceback.format_exc() + '\n')
self.git.logger.error(e.__str__())
finally:
self.git.stream_files_lock.release()
if self.git.client.online is not False:
self.git.client.send({'type': 'stream-blob', 'path': path, 'data': data}, channel='' if fast_lane else 'files')
return Stream(self) | python | def stream_file(self, path, fast_lane=True):
"""
Create a temp file, stream it to the server if online and append its content using the write() method.
This makes sure that we have all newest data of this file on the server directly.
At the end of the job, the content the server received is stored as git blob on the server. It is then committed
locally and pushed. Git detects that the server already has the version (through the continuous streaming)
and won't push it again. Very handy for rather large files that will append over time (like channel data, logs)
Example:
self.log_stream = git.stream_file('log.txt')
self.log_stream.write("new line\n");
self.log_stream.write("another line\n");
"""
# create temp file
# open temp file
# register stream file and write locally
# on end() git_commit that file locally
# create socket connection to server
# stream file to server
# on end() send server end signal, so he can store its content in git as blob as well.
# A git push would detect that both sides have the same content already,
# except when server connection broke between start() and end().
# Result -> already transmitted logs/channel data (probably many MBs) won't transfered twice
# when doing a git-push.
# return handler to write to this file
full_path = os.path.normpath(self.temp_path + '/stream-blob/' + self.job_id + '/' + path)
if not os.path.exists(os.path.dirname(full_path)):
os.makedirs(os.path.dirname(full_path))
handle = open(full_path, 'wb')
self.streamed_files[path] = handle
class Stream():
def __init__(self, git):
self.git = git
def write(self, data):
if path not in self.git.streamed_files:
# already committed to server
return
if hasattr(data, 'encode'):
data = data.encode("utf-8", 'replace')
try:
self.git.stream_files_lock.acquire()
if not handle.closed:
handle.write(data)
handle.flush()
except IOError as e:
handle.close()
if 'No space left' in e.__str__():
sys.stderr.write(traceback.format_exc() + '\n')
self.git.logger.error(e.__str__())
finally:
self.git.stream_files_lock.release()
if self.git.client.online is not False:
self.git.client.send({'type': 'stream-blob', 'path': path, 'data': data}, channel='' if fast_lane else 'files')
return Stream(self) | Create a temp file, stream it to the server if online and append its content using the write() method.
This makes sure that we have all newest data of this file on the server directly.
At the end of the job, the content the server received is stored as git blob on the server. It is then committed
locally and pushed. Git detects that the server already has the version (through the continuous streaming)
and won't push it again. Very handy for rather large files that will append over time (like channel data, logs)
Example:
self.log_stream = git.stream_file('log.txt')
self.log_stream.write("new line\n");
self.log_stream.write("another line\n"); | https://github.com/aetros/aetros-cli/blob/a2a1f38d6af1660e1e2680c7d413ec2aef45faab/aetros/git.py#L582-L651 |
aetros/aetros-cli | aetros/git.py | Git.add_index | def add_index(self, mode, blob_id, path):
"""
Add new entry to the current index
:param tree:
:return:
"""
self.command_exec(['update-index', '--add', '--cacheinfo', mode, blob_id, path]) | python | def add_index(self, mode, blob_id, path):
"""
Add new entry to the current index
:param tree:
:return:
"""
self.command_exec(['update-index', '--add', '--cacheinfo', mode, blob_id, path]) | Add new entry to the current index
:param tree:
:return: | https://github.com/aetros/aetros-cli/blob/a2a1f38d6af1660e1e2680c7d413ec2aef45faab/aetros/git.py#L656-L662 |
aetros/aetros-cli | aetros/git.py | Git.add_file | def add_file(self, git_path, content):
"""
Add a new file as blob in the storage and add its tree entry into the index.
:param git_path: str
:param content: str
"""
blob_id = self.write_blob(content)
self.add_index('100644', blob_id, git_path) | python | def add_file(self, git_path, content):
"""
Add a new file as blob in the storage and add its tree entry into the index.
:param git_path: str
:param content: str
"""
blob_id = self.write_blob(content)
self.add_index('100644', blob_id, git_path) | Add a new file as blob in the storage and add its tree entry into the index.
:param git_path: str
:param content: str | https://github.com/aetros/aetros-cli/blob/a2a1f38d6af1660e1e2680c7d413ec2aef45faab/aetros/git.py#L691-L699 |
aetros/aetros-cli | aetros/git.py | Git.add_file_path_in_work_tree | def add_file_path_in_work_tree(self, path, work_tree, verbose=True):
"""
Add a new file as blob in the storage and add its tree entry into the index.
"""
args = ['--work-tree', work_tree, 'add', '-f']
if verbose:
args.append('--verbose')
args.append(path)
self.command_exec(args, show_output=verbose) | python | def add_file_path_in_work_tree(self, path, work_tree, verbose=True):
"""
Add a new file as blob in the storage and add its tree entry into the index.
"""
args = ['--work-tree', work_tree, 'add', '-f']
if verbose:
args.append('--verbose')
args.append(path)
self.command_exec(args, show_output=verbose) | Add a new file as blob in the storage and add its tree entry into the index. | https://github.com/aetros/aetros-cli/blob/a2a1f38d6af1660e1e2680c7d413ec2aef45faab/aetros/git.py#L705-L713 |
aetros/aetros-cli | aetros/git.py | Git.commit_file | def commit_file(self, message, path, content):
"""
Add a new file as blob in the storage, add its tree entry into the index and commit the index.
:param message: str
:param path: str
:param content: str
:return:
"""
if self.git_batch_commit:
self.add_file(path, content)
self.git_batch_commit_messages.append(message)
else:
with self.lock_write():
if self.job_id:
self.read_tree(self.ref_head)
self.add_file(path, content)
return self.commit_index(message) | python | def commit_file(self, message, path, content):
"""
Add a new file as blob in the storage, add its tree entry into the index and commit the index.
:param message: str
:param path: str
:param content: str
:return:
"""
if self.git_batch_commit:
self.add_file(path, content)
self.git_batch_commit_messages.append(message)
else:
with self.lock_write():
if self.job_id:
self.read_tree(self.ref_head)
self.add_file(path, content)
return self.commit_index(message) | Add a new file as blob in the storage, add its tree entry into the index and commit the index.
:param message: str
:param path: str
:param content: str
:return: | https://github.com/aetros/aetros-cli/blob/a2a1f38d6af1660e1e2680c7d413ec2aef45faab/aetros/git.py#L719-L738 |
aetros/aetros-cli | aetros/git.py | Git.diff_objects | def diff_objects(self, latest_commit_sha):
"""
Push all changes to origin, based on objects, not on commits.
Important: Call this push after every new commit, or we lose commits.
"""
base = ['git', '--bare', '--git-dir', self.git_path]
object_shas = []
summary = {'commits': [], 'trees': [], 'files': []}
def read_parents_and_tree_from(commit):
if commit in self.synced_object_shas or commit in object_shas:
# this commit has already been synced or read
return None, None
self.synced_object_shas[commit] = True
summary['commits'].append(commit)
object_shas.append(commit)
object_content = subprocess.check_output(base + ['cat-file', '-p', commit]).decode('utf-8').strip()
parents = []
tree = ''
for line in object_content.splitlines():
if line.startswith('tree '):
tree = line[len('tree '):]
if line.startswith('parent '):
parents.append(line[len('parent '):])
return parents, tree
def collect_files_from_tree(tree):
if tree in self.synced_object_shas or tree in object_shas:
# we have exactly this tree already synced or read, meaning all its objects as well
return
self.synced_object_shas[tree] = True
summary['trees'].append(tree)
object_shas.append(tree)
object_content = subprocess.check_output(base + ['ls-tree', '-r', '-t', tree]).decode('utf-8').strip()
for line in object_content.splitlines():
exploded = line.split(' ')
if len(exploded) < 3:
sys.stderr.write("Error: Wrong line format of ls-tree for %s: %s\n" % (tree, line,))
sys.exit(1)
object_to_add = str(exploded[2][:40])
path = str(exploded[2][41:])
if object_to_add in self.synced_object_shas or object_to_add in object_shas:
# have it already in the list or already synced
continue
object_shas.append(object_to_add)
self.synced_object_shas[object_to_add] = True
summary['files'].append([object_to_add, path])
commits_to_check = [latest_commit_sha]
while len(commits_to_check):
sha = commits_to_check.pop(0)
parents, tree = read_parents_and_tree_from(sha)
if parents:
for parent in parents:
if parent not in commits_to_check:
commits_to_check.append(parent)
if tree:
collect_files_from_tree(tree)
is_debug2() and self.logger.debug("shas_to_check %d: %s " % (len(object_shas), str(object_shas),))
if not object_shas:
return [], summary
try:
is_debug2() and self.logger.debug("Do git-cat-file-check.sh")
ssh_stream = create_ssh_stream(read_home_config(), exit_on_failure=False)
channel = ssh_stream.get_transport().open_session()
channel.exec_command('git-cat-file-check.sh "%s"' % (self.model_name + '.git',))
channel.sendall('\n'.join(object_shas))
channel.shutdown_write()
def readall(c):
content = b''
while True:
try:
chunk = c.recv(1024)
if chunk == b'':
break
content += chunk
except (KeyboardInterrupt, SystemExit):
return
return content
missing_objects = readall(channel).decode('utf-8').splitlines()
channel.close()
ssh_stream.close()
# make sure we have in summary only SHAs we actually will sync
for stype in six.iterkeys(summary):
ids = summary[stype][:]
for sha in ids:
if stype == 'files':
if sha[0] not in missing_objects:
summary[stype].remove(sha)
else:
if sha not in missing_objects:
summary[stype].remove(sha)
return missing_objects, summary
except (KeyboardInterrupt, SystemExit):
raise
except Exception as e:
self.logger.error("Failed to generate diff_objects: %s" % (str(e),))
for sha in object_shas:
if sha in self.synced_object_shas:
del self.synced_object_shas[sha]
return None, None | python | def diff_objects(self, latest_commit_sha):
"""
Push all changes to origin, based on objects, not on commits.
Important: Call this push after every new commit, or we lose commits.
"""
base = ['git', '--bare', '--git-dir', self.git_path]
object_shas = []
summary = {'commits': [], 'trees': [], 'files': []}
def read_parents_and_tree_from(commit):
if commit in self.synced_object_shas or commit in object_shas:
# this commit has already been synced or read
return None, None
self.synced_object_shas[commit] = True
summary['commits'].append(commit)
object_shas.append(commit)
object_content = subprocess.check_output(base + ['cat-file', '-p', commit]).decode('utf-8').strip()
parents = []
tree = ''
for line in object_content.splitlines():
if line.startswith('tree '):
tree = line[len('tree '):]
if line.startswith('parent '):
parents.append(line[len('parent '):])
return parents, tree
def collect_files_from_tree(tree):
if tree in self.synced_object_shas or tree in object_shas:
# we have exactly this tree already synced or read, meaning all its objects as well
return
self.synced_object_shas[tree] = True
summary['trees'].append(tree)
object_shas.append(tree)
object_content = subprocess.check_output(base + ['ls-tree', '-r', '-t', tree]).decode('utf-8').strip()
for line in object_content.splitlines():
exploded = line.split(' ')
if len(exploded) < 3:
sys.stderr.write("Error: Wrong line format of ls-tree for %s: %s\n" % (tree, line,))
sys.exit(1)
object_to_add = str(exploded[2][:40])
path = str(exploded[2][41:])
if object_to_add in self.synced_object_shas or object_to_add in object_shas:
# have it already in the list or already synced
continue
object_shas.append(object_to_add)
self.synced_object_shas[object_to_add] = True
summary['files'].append([object_to_add, path])
commits_to_check = [latest_commit_sha]
while len(commits_to_check):
sha = commits_to_check.pop(0)
parents, tree = read_parents_and_tree_from(sha)
if parents:
for parent in parents:
if parent not in commits_to_check:
commits_to_check.append(parent)
if tree:
collect_files_from_tree(tree)
is_debug2() and self.logger.debug("shas_to_check %d: %s " % (len(object_shas), str(object_shas),))
if not object_shas:
return [], summary
try:
is_debug2() and self.logger.debug("Do git-cat-file-check.sh")
ssh_stream = create_ssh_stream(read_home_config(), exit_on_failure=False)
channel = ssh_stream.get_transport().open_session()
channel.exec_command('git-cat-file-check.sh "%s"' % (self.model_name + '.git',))
channel.sendall('\n'.join(object_shas))
channel.shutdown_write()
def readall(c):
content = b''
while True:
try:
chunk = c.recv(1024)
if chunk == b'':
break
content += chunk
except (KeyboardInterrupt, SystemExit):
return
return content
missing_objects = readall(channel).decode('utf-8').splitlines()
channel.close()
ssh_stream.close()
# make sure we have in summary only SHAs we actually will sync
for stype in six.iterkeys(summary):
ids = summary[stype][:]
for sha in ids:
if stype == 'files':
if sha[0] not in missing_objects:
summary[stype].remove(sha)
else:
if sha not in missing_objects:
summary[stype].remove(sha)
return missing_objects, summary
except (KeyboardInterrupt, SystemExit):
raise
except Exception as e:
self.logger.error("Failed to generate diff_objects: %s" % (str(e),))
for sha in object_shas:
if sha in self.synced_object_shas:
del self.synced_object_shas[sha]
return None, None | Push all changes to origin, based on objects, not on commits.
Important: Call this push after every new commit, or we lose commits. | https://github.com/aetros/aetros-cli/blob/a2a1f38d6af1660e1e2680c7d413ec2aef45faab/aetros/git.py#L740-L863 |
aetros/aetros-cli | aetros/git.py | Git.commit_index | def commit_index(self, message):
"""
Commit the current index.
:param message: str
:return: str the generated commit sha
"""
tree_id = self.write_tree()
args = ['commit-tree', tree_id, '-p', self.ref_head]
# todo, this can end in a race-condition with other processes adding commits
commit = self.command_exec(args, message)[0].decode('utf-8').strip()
self.command_exec(['update-ref', self.ref_head, commit])
return commit | python | def commit_index(self, message):
"""
Commit the current index.
:param message: str
:return: str the generated commit sha
"""
tree_id = self.write_tree()
args = ['commit-tree', tree_id, '-p', self.ref_head]
# todo, this can end in a race-condition with other processes adding commits
commit = self.command_exec(args, message)[0].decode('utf-8').strip()
self.command_exec(['update-ref', self.ref_head, commit])
return commit | Commit the current index.
:param message: str
:return: str the generated commit sha | https://github.com/aetros/aetros-cli/blob/a2a1f38d6af1660e1e2680c7d413ec2aef45faab/aetros/git.py#L941-L955 |
aetros/aetros-cli | aetros/git.py | Git.contents | def contents(self, path):
"""
Reads the given path of current ref_head and returns its content as utf-8
"""
try:
out, code, err = self.command_exec(['cat-file', '-p', self.ref_head+':'+path])
if not code:
return out.decode('utf-8')
except Exception:
pass
return None | python | def contents(self, path):
"""
Reads the given path of current ref_head and returns its content as utf-8
"""
try:
out, code, err = self.command_exec(['cat-file', '-p', self.ref_head+':'+path])
if not code:
return out.decode('utf-8')
except Exception:
pass
return None | Reads the given path of current ref_head and returns its content as utf-8 | https://github.com/aetros/aetros-cli/blob/a2a1f38d6af1660e1e2680c7d413ec2aef45faab/aetros/git.py#L965-L976 |
aetros/aetros-cli | aetros/keras_model_utils.py | job_start | def job_start(job_backend, trainer, keras_callback):
"""
Starts the training of a job. Needs job_prepare() first.
:type job_backend: JobBackend
:type trainer: Trainer
:return:
"""
job_backend.set_status('STARTING')
job_model = job_backend.get_job_model()
model_provider = job_model.get_model_provider()
job_backend.set_status('LOAD DATA')
datasets = job_model.get_datasets(trainer)
print('trainer.input_shape = %s\n' % (simplejson.dumps(trainer.input_shape, default=invalid_json_values),))
print('trainer.classes = %s\n' % (simplejson.dumps(trainer.classes, default=invalid_json_values),))
multiple_inputs = len(datasets) > 1
insights_x = [] if multiple_inputs else None
for dataset_name in job_model.get_input_dataset_names():
dataset = datasets[dataset_name]
if is_generator(dataset['X_train']):
batch_x, batch_y = dataset['X_train'].next()
if multiple_inputs:
insights_x.append(batch_x[0])
else:
insights_x = batch_x[0]
else:
if multiple_inputs:
insights_x.append(dataset['X_train'][0])
else:
insights_x = dataset['X_train'][0]
keras_callback.insights_x = insights_x
print('Insights sample shape', keras_callback.insights_x.shape)
keras_callback.write("Possible data keys '%s'\n" % "','".join(list(datasets.keys())))
data_train = model_provider.get_training_data(trainer, datasets)
data_validation = model_provider.get_validation_data(trainer, datasets)
keras_callback.set_validation_data(data_validation, trainer.nb_val_samples)
trainer.set_status('CONSTRUCT')
model = model_provider.get_model(trainer)
trainer.set_model(model)
trainer.set_status('COMPILING')
loss = model_provider.get_loss(trainer)
optimizer = model_provider.get_optimizer(trainer)
model_provider.compile(trainer, model, loss, optimizer)
model.summary()
trainer.callbacks.append(keras_callback)
model_provider.train(trainer, model, data_train, data_validation) | python | def job_start(job_backend, trainer, keras_callback):
"""
Starts the training of a job. Needs job_prepare() first.
:type job_backend: JobBackend
:type trainer: Trainer
:return:
"""
job_backend.set_status('STARTING')
job_model = job_backend.get_job_model()
model_provider = job_model.get_model_provider()
job_backend.set_status('LOAD DATA')
datasets = job_model.get_datasets(trainer)
print('trainer.input_shape = %s\n' % (simplejson.dumps(trainer.input_shape, default=invalid_json_values),))
print('trainer.classes = %s\n' % (simplejson.dumps(trainer.classes, default=invalid_json_values),))
multiple_inputs = len(datasets) > 1
insights_x = [] if multiple_inputs else None
for dataset_name in job_model.get_input_dataset_names():
dataset = datasets[dataset_name]
if is_generator(dataset['X_train']):
batch_x, batch_y = dataset['X_train'].next()
if multiple_inputs:
insights_x.append(batch_x[0])
else:
insights_x = batch_x[0]
else:
if multiple_inputs:
insights_x.append(dataset['X_train'][0])
else:
insights_x = dataset['X_train'][0]
keras_callback.insights_x = insights_x
print('Insights sample shape', keras_callback.insights_x.shape)
keras_callback.write("Possible data keys '%s'\n" % "','".join(list(datasets.keys())))
data_train = model_provider.get_training_data(trainer, datasets)
data_validation = model_provider.get_validation_data(trainer, datasets)
keras_callback.set_validation_data(data_validation, trainer.nb_val_samples)
trainer.set_status('CONSTRUCT')
model = model_provider.get_model(trainer)
trainer.set_model(model)
trainer.set_status('COMPILING')
loss = model_provider.get_loss(trainer)
optimizer = model_provider.get_optimizer(trainer)
model_provider.compile(trainer, model, loss, optimizer)
model.summary()
trainer.callbacks.append(keras_callback)
model_provider.train(trainer, model, data_train, data_validation) | Starts the training of a job. Needs job_prepare() first.
:type job_backend: JobBackend
:type trainer: Trainer
:return: | https://github.com/aetros/aetros-cli/blob/a2a1f38d6af1660e1e2680c7d413ec2aef45faab/aetros/keras_model_utils.py#L34-L93 |
aetros/aetros-cli | aetros/auto_dataset.py | read_images_in_memory | def read_images_in_memory(job_model, dataset, node, trainer):
"""
Reads all images into memory and applies augmentation if enabled
"""
concurrent = psutil.cpu_count()
dataset_config = dataset['config']
controller = {'running': True}
q = Queue(concurrent)
result = {
'X_train': [],
'Y_train': [],
'X_test': [],
'Y_test': []
}
images = []
max = 0
path = job_model.get_dataset_downloads_dir(dataset)
if 'path' in dataset['config']:
path = dataset['config']['path']
classes_count = 0
category_map = {}
classes = []
trainer.set_status('LOAD IMAGES INTO MEMORY')
try:
for i in range(concurrent):
t = ImageReadWorker(q, job_model, node, path, images, controller)
t.daemon = True
t.start()
for validation_or_training in ['validation', 'training']:
if os.path.isdir(os.path.normpath(path + '/' + validation_or_training)):
for category_name in os.listdir(os.path.normpath(path + '/' + validation_or_training)):
if os.path.isdir(os.path.normpath(path + '/' + validation_or_training + '/' + category_name)):
if category_name not in category_map:
category_map[category_name] = classes_count
if 'classes' in dataset_config and 'category_' in category_name:
category_idx = int(category_name.replace('category_', ''))
category_map[category_name] = category_idx
target_category = dataset_config['classes'][category_idx]
classes.append(target_category['title'] or 'Class %s' % (category_idx, ))
else:
classes.append(category_name)
classes_count += 1
for id in os.listdir(os.path.normpath(path + '/' + validation_or_training + '/' + category_name)):
file_path = os.path.join(path, validation_or_training, category_name, id)
q.put([file_path, validation_or_training == 'validation', category_name])
max += 1
q.join()
controller['running'] = False
train_images = []
test_images = []
for v in images:
image, validation, category_dir = v
if validation is True:
test_images.append([image, category_map[category_dir]])
else:
train_images.append([image, category_map[category_dir]])
train_datagen = None
augmentation = bool(get_option(dataset_config, 'augmentation', False))
if augmentation:
train_datagen = get_image_data_augmentor_from_dataset(dataset)
train = InMemoryDataGenerator(train_datagen, train_images, classes_count, job_model.job['config']['batchSize'])
test = InMemoryDataGenerator(None, test_images, classes_count, job_model.job['config']['batchSize'])
nb_sample = len(train_images)
trainer.set_info('Dataset size', {'training': nb_sample, 'validation': len(test_images)})
trainer.set_generator_training_nb(nb_sample)
trainer.set_generator_validation_nb(len(test_images))
trainer.logger.info(("Found %d classes, %d images (%d in training [%saugmented], %d in validation). Read all images into memory from %s" %
(classes_count, max, len(train_images), 'not ' if augmentation is False else '', len(test_images), path)))
if classes_count == 0:
trainer.logger.warning("Could not find any classes. Does the directory contains images?")
sys.exit(1)
trainer.output_size = classes_count
trainer.set_info('classes', classes)
trainer.classes = classes
result['X_train'] = train
result['Y_train'] = train
result['X_test'] = test
result['Y_test'] = test
return result
except KeyboardInterrupt:
controller['running'] = False
sys.exit(1) | python | def read_images_in_memory(job_model, dataset, node, trainer):
"""
Reads all images into memory and applies augmentation if enabled
"""
concurrent = psutil.cpu_count()
dataset_config = dataset['config']
controller = {'running': True}
q = Queue(concurrent)
result = {
'X_train': [],
'Y_train': [],
'X_test': [],
'Y_test': []
}
images = []
max = 0
path = job_model.get_dataset_downloads_dir(dataset)
if 'path' in dataset['config']:
path = dataset['config']['path']
classes_count = 0
category_map = {}
classes = []
trainer.set_status('LOAD IMAGES INTO MEMORY')
try:
for i in range(concurrent):
t = ImageReadWorker(q, job_model, node, path, images, controller)
t.daemon = True
t.start()
for validation_or_training in ['validation', 'training']:
if os.path.isdir(os.path.normpath(path + '/' + validation_or_training)):
for category_name in os.listdir(os.path.normpath(path + '/' + validation_or_training)):
if os.path.isdir(os.path.normpath(path + '/' + validation_or_training + '/' + category_name)):
if category_name not in category_map:
category_map[category_name] = classes_count
if 'classes' in dataset_config and 'category_' in category_name:
category_idx = int(category_name.replace('category_', ''))
category_map[category_name] = category_idx
target_category = dataset_config['classes'][category_idx]
classes.append(target_category['title'] or 'Class %s' % (category_idx, ))
else:
classes.append(category_name)
classes_count += 1
for id in os.listdir(os.path.normpath(path + '/' + validation_or_training + '/' + category_name)):
file_path = os.path.join(path, validation_or_training, category_name, id)
q.put([file_path, validation_or_training == 'validation', category_name])
max += 1
q.join()
controller['running'] = False
train_images = []
test_images = []
for v in images:
image, validation, category_dir = v
if validation is True:
test_images.append([image, category_map[category_dir]])
else:
train_images.append([image, category_map[category_dir]])
train_datagen = None
augmentation = bool(get_option(dataset_config, 'augmentation', False))
if augmentation:
train_datagen = get_image_data_augmentor_from_dataset(dataset)
train = InMemoryDataGenerator(train_datagen, train_images, classes_count, job_model.job['config']['batchSize'])
test = InMemoryDataGenerator(None, test_images, classes_count, job_model.job['config']['batchSize'])
nb_sample = len(train_images)
trainer.set_info('Dataset size', {'training': nb_sample, 'validation': len(test_images)})
trainer.set_generator_training_nb(nb_sample)
trainer.set_generator_validation_nb(len(test_images))
trainer.logger.info(("Found %d classes, %d images (%d in training [%saugmented], %d in validation). Read all images into memory from %s" %
(classes_count, max, len(train_images), 'not ' if augmentation is False else '', len(test_images), path)))
if classes_count == 0:
trainer.logger.warning("Could not find any classes. Does the directory contains images?")
sys.exit(1)
trainer.output_size = classes_count
trainer.set_info('classes', classes)
trainer.classes = classes
result['X_train'] = train
result['Y_train'] = train
result['X_test'] = test
result['Y_test'] = test
return result
except KeyboardInterrupt:
controller['running'] = False
sys.exit(1) | Reads all images into memory and applies augmentation if enabled | https://github.com/aetros/aetros-cli/blob/a2a1f38d6af1660e1e2680c7d413ec2aef45faab/aetros/auto_dataset.py#L205-L310 |
aetros/aetros-cli | aetros/cuda_gpu.py | get_ordered_devices | def get_ordered_devices():
"""
Default CUDA_DEVICE_ORDER is not compatible with nvidia-docker.
Nvidia-Docker is using CUDA_DEVICE_ORDER=PCI_BUS_ID.
https://github.com/NVIDIA/nvidia-docker/wiki/nvidia-docker#gpu-isolation
"""
libcudart = get_libcudart()
devices = {}
for i in range(0, get_installed_devices()):
gpu = get_device_properties(i)
pciBusId = ctypes.create_string_buffer(64)
libcudart.cudaDeviceGetPCIBusId(ctypes.byref(pciBusId), 64, i)
full_id = pciBusId.value.decode('utf-8')
gpu['fullId'] = full_id
devices[full_id] = gpu
ordered = []
i = 0
for key in sorted(devices):
devices[key]['id'] = i
ordered.append(devices[key])
i += 1
del libcudart
return ordered | python | def get_ordered_devices():
"""
Default CUDA_DEVICE_ORDER is not compatible with nvidia-docker.
Nvidia-Docker is using CUDA_DEVICE_ORDER=PCI_BUS_ID.
https://github.com/NVIDIA/nvidia-docker/wiki/nvidia-docker#gpu-isolation
"""
libcudart = get_libcudart()
devices = {}
for i in range(0, get_installed_devices()):
gpu = get_device_properties(i)
pciBusId = ctypes.create_string_buffer(64)
libcudart.cudaDeviceGetPCIBusId(ctypes.byref(pciBusId), 64, i)
full_id = pciBusId.value.decode('utf-8')
gpu['fullId'] = full_id
devices[full_id] = gpu
ordered = []
i = 0
for key in sorted(devices):
devices[key]['id'] = i
ordered.append(devices[key])
i += 1
del libcudart
return ordered | Default CUDA_DEVICE_ORDER is not compatible with nvidia-docker.
Nvidia-Docker is using CUDA_DEVICE_ORDER=PCI_BUS_ID.
https://github.com/NVIDIA/nvidia-docker/wiki/nvidia-docker#gpu-isolation | https://github.com/aetros/aetros-cli/blob/a2a1f38d6af1660e1e2680c7d413ec2aef45faab/aetros/cuda_gpu.py#L113-L143 |
23andMe/seqseek | seqseek/lib.py | sorted_nicely | def sorted_nicely(l):
"""
Sort the given iterable in the way that humans expect.
http://blog.codinghorror.com/sorting-for-humans-natural-sort-order/
"""
convert = lambda text: int(text) if text.isdigit() else text
alphanum_key = lambda key: [ convert(c) for c in re.split('([0-9]+)', key) ]
return sorted(l, key = alphanum_key) | python | def sorted_nicely(l):
"""
Sort the given iterable in the way that humans expect.
http://blog.codinghorror.com/sorting-for-humans-natural-sort-order/
"""
convert = lambda text: int(text) if text.isdigit() else text
alphanum_key = lambda key: [ convert(c) for c in re.split('([0-9]+)', key) ]
return sorted(l, key = alphanum_key) | Sort the given iterable in the way that humans expect.
http://blog.codinghorror.com/sorting-for-humans-natural-sort-order/ | https://github.com/23andMe/seqseek/blob/773659ed280144d4fd62f313f783fc102e85458f/seqseek/lib.py#L168-L175 |
slightlynybbled/engineering_notation | engineering_notation/engineering_notation.py | EngNumber.to_pn | def to_pn(self, sub_letter=None):
"""
Returns the part number equivalent. For instance,
a '1k' would still be '1k', but a
'1.2k' would, instead, be a '1k2'
:return:
"""
string = str(self)
if '.' not in string:
return string
# take care of the case of when there is no scaling unit
if not string[-1].isalpha():
if sub_letter is not None:
return string.replace('.', sub_letter)
return string
letter = string[-1]
return string.replace('.', letter)[:-1] | python | def to_pn(self, sub_letter=None):
"""
Returns the part number equivalent. For instance,
a '1k' would still be '1k', but a
'1.2k' would, instead, be a '1k2'
:return:
"""
string = str(self)
if '.' not in string:
return string
# take care of the case of when there is no scaling unit
if not string[-1].isalpha():
if sub_letter is not None:
return string.replace('.', sub_letter)
return string
letter = string[-1]
return string.replace('.', letter)[:-1] | Returns the part number equivalent. For instance,
a '1k' would still be '1k', but a
'1.2k' would, instead, be a '1k2'
:return: | https://github.com/slightlynybbled/engineering_notation/blob/fcd930b777d506b6385d84cf3af491bf5c29a42e/engineering_notation/engineering_notation.py#L298-L317 |
praekelt/django-ultracache | ultracache/utils.py | reduce_list_size | def reduce_list_size(li):
"""Return two lists
- the last N items of li whose total size is less than MAX_SIZE
- the rest of the original list li
"""
# sys.getsizeof is nearly useless. All our data is stringable so rather
# use that as a measure of size.
size = len(repr(li))
keep = li
toss = []
n = len(li)
decrement_by = max(n / 10, 10)
while (size >= MAX_SIZE) and (n > 0):
n -= decrement_by
toss = li[:-n]
keep = li[-n:]
size = len(repr(keep))
return keep, toss | python | def reduce_list_size(li):
"""Return two lists
- the last N items of li whose total size is less than MAX_SIZE
- the rest of the original list li
"""
# sys.getsizeof is nearly useless. All our data is stringable so rather
# use that as a measure of size.
size = len(repr(li))
keep = li
toss = []
n = len(li)
decrement_by = max(n / 10, 10)
while (size >= MAX_SIZE) and (n > 0):
n -= decrement_by
toss = li[:-n]
keep = li[-n:]
size = len(repr(keep))
return keep, toss | Return two lists
- the last N items of li whose total size is less than MAX_SIZE
- the rest of the original list li | https://github.com/praekelt/django-ultracache/blob/8898f10e50fc8f8d0a4cb7d3fe4d945bf257bd9f/ultracache/utils.py#L43-L60 |
praekelt/django-ultracache | ultracache/utils.py | cache_meta | def cache_meta(request, cache_key, start_index=0):
"""Inspect request for objects in _ultracache and set appropriate entries
in Django's cache."""
path = request.get_full_path()
# todo: cache headers on the request since they never change during the
# request.
# Reduce headers to the subset as defined by the settings
headers = OrderedDict()
for k, v in sorted(request.META.items()):
if (k == "HTTP_COOKIE") and CONSIDER_COOKIES:
cookie = SimpleCookie()
cookie.load(v)
headers["cookie"] = "; ".join([
"%s=%s" % (k, morsel.value) for k, morsel \
in sorted(cookie.items()) if k in CONSIDER_COOKIES
])
elif k.startswith("HTTP_"):
k = k[5:].replace("_", "-").lower()
if k in CONSIDER_HEADERS:
headers[k] = v
# Lists needed for cache.get_many
to_set_get_keys = []
to_set_paths_get_keys = []
to_set_content_types_get_keys = []
to_set_content_types_paths_get_keys = []
# Dictionaries needed for cache.set_many
to_set = {}
to_set_paths = {}
to_set_content_types = {}
to_set_content_types_paths = {}
to_delete = []
to_set_objects = []
for ctid, obj_pk in request._ultracache[start_index:]:
# The object appears in these cache entries. If the object is modified
# then these cache entries are deleted.
key = "ucache-%s-%s" % (ctid, obj_pk)
if key not in to_set_get_keys:
to_set_get_keys.append(key)
# The object appears in these paths. If the object is modified then any
# caches that are read from when browsing to this path are cleared.
key = "ucache-pth-%s-%s" % (ctid, obj_pk)
if key not in to_set_paths_get_keys:
to_set_paths_get_keys.append(key)
# The content type appears in these cache entries. If an object of this
# content type is created then these cache entries are cleared.
key = "ucache-ct-%s" % ctid
if key not in to_set_content_types_get_keys:
to_set_content_types_get_keys.append(key)
# The content type appears in these paths. If an object of this content
# type is created then any caches that are read from when browsing to
# this path are cleared.
key = "ucache-ct-pth-%s" % ctid
if key not in to_set_content_types_paths_get_keys:
to_set_content_types_paths_get_keys.append(key)
# A list of objects that contribute to a cache entry
tu = (ctid, obj_pk)
if tu not in to_set_objects:
to_set_objects.append(tu)
# todo: rewrite to handle absence of get_many
di = cache.get_many(to_set_get_keys)
for key in to_set_get_keys:
v = di.get(key, None)
keep = []
if v is not None:
keep, toss = reduce_list_size(v)
if toss:
to_set[key] = keep
to_delete.extend(toss)
if cache_key not in keep:
if key not in to_set:
to_set[key] = keep
to_set[key] = to_set[key] + [cache_key]
if to_set == di:
to_set = {}
di = cache.get_many(to_set_paths_get_keys)
for key in to_set_paths_get_keys:
v = di.get(key, None)
keep = []
if v is not None:
keep, toss = reduce_list_size(v)
if toss:
to_set_paths[key] = keep
if [path, headers] not in keep:
if key not in to_set_paths:
to_set_paths[key] = keep
to_set_paths[key] = to_set_paths[key] + [[path, headers]]
if to_set_paths == di:
to_set_paths = {}
di = cache.get_many(to_set_content_types_get_keys)
for key in to_set_content_types_get_keys:
v = di.get(key, None)
keep = []
if v is not None:
keep, toss = reduce_list_size(v)
if toss:
to_set_content_types[key] = keep
to_delete.extend(toss)
if cache_key not in keep:
if key not in to_set_content_types:
to_set_content_types[key] = keep
to_set_content_types[key] = to_set_content_types[key] + [cache_key]
if to_set_content_types == di:
to_set_content_types = {}
di = cache.get_many(to_set_content_types_paths_get_keys)
for key in to_set_content_types_paths_get_keys:
v = di.get(key, None)
keep = []
if v is not None:
keep, toss = reduce_list_size(v)
if toss:
to_set_content_types_paths[key] = keep
if [path, headers] not in keep:
if key not in to_set_content_types_paths:
to_set_content_types_paths[key] = keep
to_set_content_types_paths[key] = to_set_content_types_paths[key] \
+ [[path, headers]]
if to_set_content_types_paths == di:
to_set_content_types_paths = {}
# Deletion must happen first because set may set some of these keys
if to_delete:
try:
cache.delete_many(to_delete)
except NotImplementedError:
for k in to_delete:
cache.delete(k)
# Do one set_many
di = {}
di.update(to_set)
del to_set
di.update(to_set_paths)
del to_set_paths
di.update(to_set_content_types)
del to_set_content_types
di.update(to_set_content_types_paths)
del to_set_content_types_paths
if to_set_objects:
di[cache_key + "-objs"] = to_set_objects
if di:
try:
cache.set_many(di, 86400)
except NotImplementedError:
for k, v in di.items():
cache.set(k, v, 86400) | python | def cache_meta(request, cache_key, start_index=0):
"""Inspect request for objects in _ultracache and set appropriate entries
in Django's cache."""
path = request.get_full_path()
# todo: cache headers on the request since they never change during the
# request.
# Reduce headers to the subset as defined by the settings
headers = OrderedDict()
for k, v in sorted(request.META.items()):
if (k == "HTTP_COOKIE") and CONSIDER_COOKIES:
cookie = SimpleCookie()
cookie.load(v)
headers["cookie"] = "; ".join([
"%s=%s" % (k, morsel.value) for k, morsel \
in sorted(cookie.items()) if k in CONSIDER_COOKIES
])
elif k.startswith("HTTP_"):
k = k[5:].replace("_", "-").lower()
if k in CONSIDER_HEADERS:
headers[k] = v
# Lists needed for cache.get_many
to_set_get_keys = []
to_set_paths_get_keys = []
to_set_content_types_get_keys = []
to_set_content_types_paths_get_keys = []
# Dictionaries needed for cache.set_many
to_set = {}
to_set_paths = {}
to_set_content_types = {}
to_set_content_types_paths = {}
to_delete = []
to_set_objects = []
for ctid, obj_pk in request._ultracache[start_index:]:
# The object appears in these cache entries. If the object is modified
# then these cache entries are deleted.
key = "ucache-%s-%s" % (ctid, obj_pk)
if key not in to_set_get_keys:
to_set_get_keys.append(key)
# The object appears in these paths. If the object is modified then any
# caches that are read from when browsing to this path are cleared.
key = "ucache-pth-%s-%s" % (ctid, obj_pk)
if key not in to_set_paths_get_keys:
to_set_paths_get_keys.append(key)
# The content type appears in these cache entries. If an object of this
# content type is created then these cache entries are cleared.
key = "ucache-ct-%s" % ctid
if key not in to_set_content_types_get_keys:
to_set_content_types_get_keys.append(key)
# The content type appears in these paths. If an object of this content
# type is created then any caches that are read from when browsing to
# this path are cleared.
key = "ucache-ct-pth-%s" % ctid
if key not in to_set_content_types_paths_get_keys:
to_set_content_types_paths_get_keys.append(key)
# A list of objects that contribute to a cache entry
tu = (ctid, obj_pk)
if tu not in to_set_objects:
to_set_objects.append(tu)
# todo: rewrite to handle absence of get_many
di = cache.get_many(to_set_get_keys)
for key in to_set_get_keys:
v = di.get(key, None)
keep = []
if v is not None:
keep, toss = reduce_list_size(v)
if toss:
to_set[key] = keep
to_delete.extend(toss)
if cache_key not in keep:
if key not in to_set:
to_set[key] = keep
to_set[key] = to_set[key] + [cache_key]
if to_set == di:
to_set = {}
di = cache.get_many(to_set_paths_get_keys)
for key in to_set_paths_get_keys:
v = di.get(key, None)
keep = []
if v is not None:
keep, toss = reduce_list_size(v)
if toss:
to_set_paths[key] = keep
if [path, headers] not in keep:
if key not in to_set_paths:
to_set_paths[key] = keep
to_set_paths[key] = to_set_paths[key] + [[path, headers]]
if to_set_paths == di:
to_set_paths = {}
di = cache.get_many(to_set_content_types_get_keys)
for key in to_set_content_types_get_keys:
v = di.get(key, None)
keep = []
if v is not None:
keep, toss = reduce_list_size(v)
if toss:
to_set_content_types[key] = keep
to_delete.extend(toss)
if cache_key not in keep:
if key not in to_set_content_types:
to_set_content_types[key] = keep
to_set_content_types[key] = to_set_content_types[key] + [cache_key]
if to_set_content_types == di:
to_set_content_types = {}
di = cache.get_many(to_set_content_types_paths_get_keys)
for key in to_set_content_types_paths_get_keys:
v = di.get(key, None)
keep = []
if v is not None:
keep, toss = reduce_list_size(v)
if toss:
to_set_content_types_paths[key] = keep
if [path, headers] not in keep:
if key not in to_set_content_types_paths:
to_set_content_types_paths[key] = keep
to_set_content_types_paths[key] = to_set_content_types_paths[key] \
+ [[path, headers]]
if to_set_content_types_paths == di:
to_set_content_types_paths = {}
# Deletion must happen first because set may set some of these keys
if to_delete:
try:
cache.delete_many(to_delete)
except NotImplementedError:
for k in to_delete:
cache.delete(k)
# Do one set_many
di = {}
di.update(to_set)
del to_set
di.update(to_set_paths)
del to_set_paths
di.update(to_set_content_types)
del to_set_content_types
di.update(to_set_content_types_paths)
del to_set_content_types_paths
if to_set_objects:
di[cache_key + "-objs"] = to_set_objects
if di:
try:
cache.set_many(di, 86400)
except NotImplementedError:
for k, v in di.items():
cache.set(k, v, 86400) | Inspect request for objects in _ultracache and set appropriate entries
in Django's cache. | https://github.com/praekelt/django-ultracache/blob/8898f10e50fc8f8d0a4cb7d3fe4d945bf257bd9f/ultracache/utils.py#L63-L223 |
PSPC-SPAC-buyandsell/von_anchor | von_anchor/anchor/smith.py | AnchorSmith.least_role | def least_role() -> Role:
"""
Return the TRUSTEE indy-sdk role for an anchor acting in an AnchorSmith capacity.
:return: TRUSTEE role
"""
LOGGER.debug('AnchorSmith.least_role >>>')
rv = Role.TRUSTEE.token()
LOGGER.debug('AnchorSmith.least_role <<< %s', rv)
return rv | python | def least_role() -> Role:
"""
Return the TRUSTEE indy-sdk role for an anchor acting in an AnchorSmith capacity.
:return: TRUSTEE role
"""
LOGGER.debug('AnchorSmith.least_role >>>')
rv = Role.TRUSTEE.token()
LOGGER.debug('AnchorSmith.least_role <<< %s', rv)
return rv | Return the TRUSTEE indy-sdk role for an anchor acting in an AnchorSmith capacity.
:return: TRUSTEE role | https://github.com/PSPC-SPAC-buyandsell/von_anchor/blob/78ac1de67be42a676274f4bf71fe12f66e72f309/von_anchor/anchor/smith.py#L37-L49 |
PSPC-SPAC-buyandsell/von_anchor | von_anchor/anchor/smith.py | AnchorSmith.send_nym | async def send_nym(self, did: str, verkey: str = None, alias: str = None, role: Role = None) -> None:
"""
Send input anchor's cryptonym (including DID, verification key, plus optional alias and role)
to the distributed ledger.
Raise BadLedgerTxn on failure, BadIdentifier for bad DID, or BadRole for bad role.
:param did: anchor DID to send to ledger
:param verkey: optional anchor verification key
:param alias: optional alias
:param role: anchor role on the ledger (default value of USER)
"""
LOGGER.debug(
'AnchorSmith.send_nym >>> did: %s, verkey: %s, alias: %s, role: %s', did, verkey, alias, role)
if not ok_did(did):
LOGGER.debug('AnchorSmith.send_nym <!< Bad DID %s', did)
raise BadIdentifier('Bad DID {}'.format(did))
req_json = await ledger.build_nym_request(self.did, did, verkey, alias, (role or Role.USER).token())
await self._sign_submit(req_json)
LOGGER.debug('AnchorSmith.send_nym <<<') | python | async def send_nym(self, did: str, verkey: str = None, alias: str = None, role: Role = None) -> None:
"""
Send input anchor's cryptonym (including DID, verification key, plus optional alias and role)
to the distributed ledger.
Raise BadLedgerTxn on failure, BadIdentifier for bad DID, or BadRole for bad role.
:param did: anchor DID to send to ledger
:param verkey: optional anchor verification key
:param alias: optional alias
:param role: anchor role on the ledger (default value of USER)
"""
LOGGER.debug(
'AnchorSmith.send_nym >>> did: %s, verkey: %s, alias: %s, role: %s', did, verkey, alias, role)
if not ok_did(did):
LOGGER.debug('AnchorSmith.send_nym <!< Bad DID %s', did)
raise BadIdentifier('Bad DID {}'.format(did))
req_json = await ledger.build_nym_request(self.did, did, verkey, alias, (role or Role.USER).token())
await self._sign_submit(req_json)
LOGGER.debug('AnchorSmith.send_nym <<<') | Send input anchor's cryptonym (including DID, verification key, plus optional alias and role)
to the distributed ledger.
Raise BadLedgerTxn on failure, BadIdentifier for bad DID, or BadRole for bad role.
:param did: anchor DID to send to ledger
:param verkey: optional anchor verification key
:param alias: optional alias
:param role: anchor role on the ledger (default value of USER) | https://github.com/PSPC-SPAC-buyandsell/von_anchor/blob/78ac1de67be42a676274f4bf71fe12f66e72f309/von_anchor/anchor/smith.py#L51-L74 |
PSPC-SPAC-buyandsell/von_anchor | von_anchor/op/setnym.py | usage | def usage() -> None:
"""
Print usage advice.
"""
print()
print('Usage: setnym.py <config-ini>')
print()
print('where <config-ini> represents the path to the configuration file.')
print()
print('The operation submits a nym to a trustee anchor to send to the ledger,')
print('if the ledger does not have it already as configured.')
print()
print('The configuration file has sections and entries as follows:')
print(' * section [Node Pool]:')
print(' - name: the name of the node pool to which the operation applies')
print(' - genesis.txn.path: the path to the genesis transaction file')
print(' for the node pool (may omit if node pool already exists)')
print(' * section [Trustee Anchor]:')
print(" - name: the trustee anchor's (wallet) name")
print(" - wallet.type: (default blank) the trustee anchor's wallet type")
print(" - wallet.access: (default blank) the trustee anchor's")
print(' wallet access credential (password) value')
print(' * section [VON Anchor]:')
print(' - role: the role to request in the send-nym transaction; specify:')
print(' - (default) empty value for user with no additional write privileges')
print(' - TRUST_ANCHOR for VON anchor with write privileges for indy artifacts')
print(' - TRUSTEE for VON anchor sending further cryptonyms to the ledger')
print(" - name: the VON anchor's (wallet) name")
print(" - seed: the VON anchor's seed (optional, for wallet creation only)")
print(" - did: the VON anchor's DID (optional, for wallet creation only)")
print(' - wallet.create: whether create the wallet if it does not yet exist')
print(' (value True/False, 1/0, or Yes/No)')
print(" - wallet.type: (default blank) the VON anchor's wallet type")
print(" - wallet.access: (default blank) the VON anchor's")
print(' wallet access credential (password) value.')
print() | python | def usage() -> None:
"""
Print usage advice.
"""
print()
print('Usage: setnym.py <config-ini>')
print()
print('where <config-ini> represents the path to the configuration file.')
print()
print('The operation submits a nym to a trustee anchor to send to the ledger,')
print('if the ledger does not have it already as configured.')
print()
print('The configuration file has sections and entries as follows:')
print(' * section [Node Pool]:')
print(' - name: the name of the node pool to which the operation applies')
print(' - genesis.txn.path: the path to the genesis transaction file')
print(' for the node pool (may omit if node pool already exists)')
print(' * section [Trustee Anchor]:')
print(" - name: the trustee anchor's (wallet) name")
print(" - wallet.type: (default blank) the trustee anchor's wallet type")
print(" - wallet.access: (default blank) the trustee anchor's")
print(' wallet access credential (password) value')
print(' * section [VON Anchor]:')
print(' - role: the role to request in the send-nym transaction; specify:')
print(' - (default) empty value for user with no additional write privileges')
print(' - TRUST_ANCHOR for VON anchor with write privileges for indy artifacts')
print(' - TRUSTEE for VON anchor sending further cryptonyms to the ledger')
print(" - name: the VON anchor's (wallet) name")
print(" - seed: the VON anchor's seed (optional, for wallet creation only)")
print(" - did: the VON anchor's DID (optional, for wallet creation only)")
print(' - wallet.create: whether create the wallet if it does not yet exist')
print(' (value True/False, 1/0, or Yes/No)')
print(" - wallet.type: (default blank) the VON anchor's wallet type")
print(" - wallet.access: (default blank) the VON anchor's")
print(' wallet access credential (password) value.')
print() | Print usage advice. | https://github.com/PSPC-SPAC-buyandsell/von_anchor/blob/78ac1de67be42a676274f4bf71fe12f66e72f309/von_anchor/op/setnym.py#L43-L79 |
PSPC-SPAC-buyandsell/von_anchor | von_anchor/op/setnym.py | _set_wallets | async def _set_wallets(an_data: dict) -> dict:
"""
Set wallets as configured for setnym operation.
:param an_data: dict mapping profiles to anchor data
:return: dict mapping anchor names to wallet objects
"""
w_mgr = WalletManager()
rv = {}
for profile in an_data:
w_cfg = {'id': an_data[profile].name}
if an_data[profile].wallet_type:
w_cfg['storage_type'] = an_data[profile].wallet_type
if an_data[profile].seed:
w_cfg['seed'] = an_data[profile].seed
if an_data[profile].did:
w_cfg['did'] = an_data[profile].did
if an_data[profile].wallet_create:
try:
await w_mgr.create(w_cfg, access=an_data[profile].wallet_access)
except ExtantWallet:
pass
rv[profile] = w_mgr.get(w_cfg, access=an_data[profile].wallet_access)
return rv | python | async def _set_wallets(an_data: dict) -> dict:
"""
Set wallets as configured for setnym operation.
:param an_data: dict mapping profiles to anchor data
:return: dict mapping anchor names to wallet objects
"""
w_mgr = WalletManager()
rv = {}
for profile in an_data:
w_cfg = {'id': an_data[profile].name}
if an_data[profile].wallet_type:
w_cfg['storage_type'] = an_data[profile].wallet_type
if an_data[profile].seed:
w_cfg['seed'] = an_data[profile].seed
if an_data[profile].did:
w_cfg['did'] = an_data[profile].did
if an_data[profile].wallet_create:
try:
await w_mgr.create(w_cfg, access=an_data[profile].wallet_access)
except ExtantWallet:
pass
rv[profile] = w_mgr.get(w_cfg, access=an_data[profile].wallet_access)
return rv | Set wallets as configured for setnym operation.
:param an_data: dict mapping profiles to anchor data
:return: dict mapping anchor names to wallet objects | https://github.com/PSPC-SPAC-buyandsell/von_anchor/blob/78ac1de67be42a676274f4bf71fe12f66e72f309/von_anchor/op/setnym.py#L82-L107 |
PSPC-SPAC-buyandsell/von_anchor | von_anchor/op/setnym.py | setnym | async def setnym(ini_path: str) -> int:
"""
Set configuration. Open pool, trustee anchor, and wallet of anchor whose nym to send.
Register exit hooks to close pool and trustee anchor.
Engage trustee anchor to send nym for VON anchor, if it differs on the ledger from configuration.
:param ini_path: path to configuration file
:return: 0 for OK, 1 for failure
"""
config = inis2dict(ini_path)
if config['Trustee Anchor']['name'] == config['VON Anchor']['name']:
raise ExtantWallet('Wallet names must differ between VON Anchor and Trustee Anchor')
cfg_van_role = config['VON Anchor'].get('role', None) or None # nudge empty value from '' to None
if not ok_role(cfg_van_role):
raise BadRole('Configured role {} is not valid'.format(cfg_van_role))
pool_data = NodePoolData(
config['Node Pool']['name'],
config['Node Pool'].get('genesis.txn.path', None) or None)
an_data = {
'tan': AnchorData(
Role.TRUSTEE,
config['Trustee Anchor']['name'],
config['Trustee Anchor'].get('seed', None) or None,
config['Trustee Anchor'].get('did', None) or None,
config['Trustee Anchor'].get('wallet.create', '0').lower() in ['1', 'true', 'yes'],
config['Trustee Anchor'].get('wallet.type', None) or None,
config['Trustee Anchor'].get('wallet.access', None) or None),
'van': AnchorData(
Role.get(cfg_van_role),
config['VON Anchor']['name'],
config['VON Anchor'].get('seed', None) or None,
config['VON Anchor'].get('did', None) or None,
config['VON Anchor'].get('wallet.create', '0').lower() in ['1', 'true', 'yes'],
config['VON Anchor'].get('wallet.type', None) or None,
config['VON Anchor'].get('wallet.access', None) or None)
}
an_wallet = await _set_wallets(an_data)
p_mgr = NodePoolManager()
if pool_data.name not in await p_mgr.list():
if pool_data.genesis_txn_path:
await p_mgr.add_config(pool_data.name, pool_data.genesis_txn_path)
else:
raise AbsentPool('Node pool {} has no ledger configuration, but {} specifies no genesis txn path'.format(
pool_data.name,
ini_path))
async with an_wallet['tan'] as w_tan, (
an_wallet['van']) as w_van, (
p_mgr.get(pool_data.name)) as pool, (
TrusteeAnchor(w_tan, pool)) as tan, (
NominalAnchor(w_van, pool)) as van:
send_verkey = van.verkey
try:
nym_role = await tan.get_nym_role(van.did)
if an_data['van'].role == nym_role:
return 0 # ledger is as per configuration
send_verkey = None # only owner can touch verkey
if nym_role != Role.USER: # only remove role when it is not already None on the ledger
await tan.send_nym(van.did, send_verkey, van.wallet.name, Role.ROLE_REMOVE)
except AbsentNym:
pass # cryptonym not there yet, fall through
await tan.send_nym(van.did, send_verkey, van.wallet.name, an_data['van'].role)
return 0 | python | async def setnym(ini_path: str) -> int:
"""
Set configuration. Open pool, trustee anchor, and wallet of anchor whose nym to send.
Register exit hooks to close pool and trustee anchor.
Engage trustee anchor to send nym for VON anchor, if it differs on the ledger from configuration.
:param ini_path: path to configuration file
:return: 0 for OK, 1 for failure
"""
config = inis2dict(ini_path)
if config['Trustee Anchor']['name'] == config['VON Anchor']['name']:
raise ExtantWallet('Wallet names must differ between VON Anchor and Trustee Anchor')
cfg_van_role = config['VON Anchor'].get('role', None) or None # nudge empty value from '' to None
if not ok_role(cfg_van_role):
raise BadRole('Configured role {} is not valid'.format(cfg_van_role))
pool_data = NodePoolData(
config['Node Pool']['name'],
config['Node Pool'].get('genesis.txn.path', None) or None)
an_data = {
'tan': AnchorData(
Role.TRUSTEE,
config['Trustee Anchor']['name'],
config['Trustee Anchor'].get('seed', None) or None,
config['Trustee Anchor'].get('did', None) or None,
config['Trustee Anchor'].get('wallet.create', '0').lower() in ['1', 'true', 'yes'],
config['Trustee Anchor'].get('wallet.type', None) or None,
config['Trustee Anchor'].get('wallet.access', None) or None),
'van': AnchorData(
Role.get(cfg_van_role),
config['VON Anchor']['name'],
config['VON Anchor'].get('seed', None) or None,
config['VON Anchor'].get('did', None) or None,
config['VON Anchor'].get('wallet.create', '0').lower() in ['1', 'true', 'yes'],
config['VON Anchor'].get('wallet.type', None) or None,
config['VON Anchor'].get('wallet.access', None) or None)
}
an_wallet = await _set_wallets(an_data)
p_mgr = NodePoolManager()
if pool_data.name not in await p_mgr.list():
if pool_data.genesis_txn_path:
await p_mgr.add_config(pool_data.name, pool_data.genesis_txn_path)
else:
raise AbsentPool('Node pool {} has no ledger configuration, but {} specifies no genesis txn path'.format(
pool_data.name,
ini_path))
async with an_wallet['tan'] as w_tan, (
an_wallet['van']) as w_van, (
p_mgr.get(pool_data.name)) as pool, (
TrusteeAnchor(w_tan, pool)) as tan, (
NominalAnchor(w_van, pool)) as van:
send_verkey = van.verkey
try:
nym_role = await tan.get_nym_role(van.did)
if an_data['van'].role == nym_role:
return 0 # ledger is as per configuration
send_verkey = None # only owner can touch verkey
if nym_role != Role.USER: # only remove role when it is not already None on the ledger
await tan.send_nym(van.did, send_verkey, van.wallet.name, Role.ROLE_REMOVE)
except AbsentNym:
pass # cryptonym not there yet, fall through
await tan.send_nym(van.did, send_verkey, van.wallet.name, an_data['van'].role)
return 0 | Set configuration. Open pool, trustee anchor, and wallet of anchor whose nym to send.
Register exit hooks to close pool and trustee anchor.
Engage trustee anchor to send nym for VON anchor, if it differs on the ledger from configuration.
:param ini_path: path to configuration file
:return: 0 for OK, 1 for failure | https://github.com/PSPC-SPAC-buyandsell/von_anchor/blob/78ac1de67be42a676274f4bf71fe12f66e72f309/von_anchor/op/setnym.py#L110-L182 |
PSPC-SPAC-buyandsell/von_anchor | von_anchor/op/setnym.py | main | def main(args: Sequence[str] = None) -> int:
"""
Main line for script: check arguments and dispatch operation to set nym.
:param args: command-line arguments
:return: 0 for OK, 1 for failure
"""
logging.basicConfig(
level=logging.INFO,
format='%(asctime)-15s | %(levelname)-8s | %(message)s',
datefmt='%Y-%m-%d %H:%M:%S')
logging.getLogger('von_anchor').setLevel(logging.WARNING)
logging.getLogger('indy').setLevel(logging.ERROR)
if args is None:
args = sys.argv[1:]
if len(sys.argv) == 2:
try:
return do_wait(setnym(sys.argv[1]))
except VonAnchorError as vax:
print(str(vax))
return 1
else:
usage()
return 1 | python | def main(args: Sequence[str] = None) -> int:
"""
Main line for script: check arguments and dispatch operation to set nym.
:param args: command-line arguments
:return: 0 for OK, 1 for failure
"""
logging.basicConfig(
level=logging.INFO,
format='%(asctime)-15s | %(levelname)-8s | %(message)s',
datefmt='%Y-%m-%d %H:%M:%S')
logging.getLogger('von_anchor').setLevel(logging.WARNING)
logging.getLogger('indy').setLevel(logging.ERROR)
if args is None:
args = sys.argv[1:]
if len(sys.argv) == 2:
try:
return do_wait(setnym(sys.argv[1]))
except VonAnchorError as vax:
print(str(vax))
return 1
else:
usage()
return 1 | Main line for script: check arguments and dispatch operation to set nym.
:param args: command-line arguments
:return: 0 for OK, 1 for failure | https://github.com/PSPC-SPAC-buyandsell/von_anchor/blob/78ac1de67be42a676274f4bf71fe12f66e72f309/von_anchor/op/setnym.py#L185-L211 |
PSPC-SPAC-buyandsell/von_anchor | von_anchor/util.py | schema_id | def schema_id(origin_did: str, name: str, version: str) -> str:
"""
Return schema identifier for input origin DID, schema name, and schema version.
:param origin_did: DID of schema originator
:param name: schema name
:param version: schema version
:return: schema identifier
"""
return '{}:2:{}:{}'.format(origin_did, name, version) | python | def schema_id(origin_did: str, name: str, version: str) -> str:
"""
Return schema identifier for input origin DID, schema name, and schema version.
:param origin_did: DID of schema originator
:param name: schema name
:param version: schema version
:return: schema identifier
"""
return '{}:2:{}:{}'.format(origin_did, name, version) | Return schema identifier for input origin DID, schema name, and schema version.
:param origin_did: DID of schema originator
:param name: schema name
:param version: schema version
:return: schema identifier | https://github.com/PSPC-SPAC-buyandsell/von_anchor/blob/78ac1de67be42a676274f4bf71fe12f66e72f309/von_anchor/util.py#L34-L44 |
PSPC-SPAC-buyandsell/von_anchor | von_anchor/util.py | ok_did | def ok_did(token: str) -> bool:
"""
Whether input token looks like a valid distributed identifier.
:param token: candidate string
:return: whether input token looks like a valid schema identifier
"""
return bool(re.match('[{}]{{21,22}}$'.format(B58), token or '')) | python | def ok_did(token: str) -> bool:
"""
Whether input token looks like a valid distributed identifier.
:param token: candidate string
:return: whether input token looks like a valid schema identifier
"""
return bool(re.match('[{}]{{21,22}}$'.format(B58), token or '')) | Whether input token looks like a valid distributed identifier.
:param token: candidate string
:return: whether input token looks like a valid schema identifier | https://github.com/PSPC-SPAC-buyandsell/von_anchor/blob/78ac1de67be42a676274f4bf71fe12f66e72f309/von_anchor/util.py#L83-L91 |
PSPC-SPAC-buyandsell/von_anchor | von_anchor/util.py | ok_schema_id | def ok_schema_id(token: str) -> bool:
"""
Whether input token looks like a valid schema identifier;
i.e., <issuer-did>:2:<name>:<version>.
:param token: candidate string
:return: whether input token looks like a valid schema identifier
"""
return bool(re.match('[{}]{{21,22}}:2:.+:[0-9.]+$'.format(B58), token or '')) | python | def ok_schema_id(token: str) -> bool:
"""
Whether input token looks like a valid schema identifier;
i.e., <issuer-did>:2:<name>:<version>.
:param token: candidate string
:return: whether input token looks like a valid schema identifier
"""
return bool(re.match('[{}]{{21,22}}:2:.+:[0-9.]+$'.format(B58), token or '')) | Whether input token looks like a valid schema identifier;
i.e., <issuer-did>:2:<name>:<version>.
:param token: candidate string
:return: whether input token looks like a valid schema identifier | https://github.com/PSPC-SPAC-buyandsell/von_anchor/blob/78ac1de67be42a676274f4bf71fe12f66e72f309/von_anchor/util.py#L94-L103 |
PSPC-SPAC-buyandsell/von_anchor | von_anchor/util.py | schema_key | def schema_key(s_id: str) -> SchemaKey:
"""
Return schema key (namedtuple) convenience for schema identifier components.
:param s_id: schema identifier
:return: schema key (namedtuple) object
"""
s_key = s_id.split(':')
s_key.pop(1) # take out indy-sdk schema marker: 2 marks indy-sdk schema id
return SchemaKey(*s_key) | python | def schema_key(s_id: str) -> SchemaKey:
"""
Return schema key (namedtuple) convenience for schema identifier components.
:param s_id: schema identifier
:return: schema key (namedtuple) object
"""
s_key = s_id.split(':')
s_key.pop(1) # take out indy-sdk schema marker: 2 marks indy-sdk schema id
return SchemaKey(*s_key) | Return schema key (namedtuple) convenience for schema identifier components.
:param s_id: schema identifier
:return: schema key (namedtuple) object | https://github.com/PSPC-SPAC-buyandsell/von_anchor/blob/78ac1de67be42a676274f4bf71fe12f66e72f309/von_anchor/util.py#L106-L117 |
PSPC-SPAC-buyandsell/von_anchor | von_anchor/util.py | cred_def_id | def cred_def_id(issuer_did: str, schema_seq_no: int, protocol: Protocol = None) -> str:
"""
Return credential definition identifier for input issuer DID and schema sequence number.
Implementation passes to NodePool Protocol.
:param issuer_did: DID of credential definition issuer
:param schema_seq_no: schema sequence number
:param protocol: indy protocol version
:return: credential definition identifier
"""
return (protocol or Protocol.DEFAULT).cred_def_id(issuer_did, schema_seq_no) | python | def cred_def_id(issuer_did: str, schema_seq_no: int, protocol: Protocol = None) -> str:
"""
Return credential definition identifier for input issuer DID and schema sequence number.
Implementation passes to NodePool Protocol.
:param issuer_did: DID of credential definition issuer
:param schema_seq_no: schema sequence number
:param protocol: indy protocol version
:return: credential definition identifier
"""
return (protocol or Protocol.DEFAULT).cred_def_id(issuer_did, schema_seq_no) | Return credential definition identifier for input issuer DID and schema sequence number.
Implementation passes to NodePool Protocol.
:param issuer_did: DID of credential definition issuer
:param schema_seq_no: schema sequence number
:param protocol: indy protocol version
:return: credential definition identifier | https://github.com/PSPC-SPAC-buyandsell/von_anchor/blob/78ac1de67be42a676274f4bf71fe12f66e72f309/von_anchor/util.py#L120-L132 |
PSPC-SPAC-buyandsell/von_anchor | von_anchor/util.py | ok_cred_def_id | def ok_cred_def_id(token: str, issuer_did: str = None) -> bool:
"""
Whether input token looks like a valid credential definition identifier from input issuer DID (default any); i.e.,
<issuer-did>:3:CL:<schema-seq-no>:<cred-def-id-tag> for protocol >= 1.4, or
<issuer-did>:3:CL:<schema-seq-no> for protocol == 1.3.
:param token: candidate string
:param issuer_did: issuer DID to match, if specified
:return: whether input token looks like a valid credential definition identifier
"""
cd_id_m = re.match('([{}]{{21,22}}):3:CL:[1-9][0-9]*(:.+)?$'.format(B58), token or '')
return bool(cd_id_m) and ((not issuer_did) or cd_id_m.group(1) == issuer_did) | python | def ok_cred_def_id(token: str, issuer_did: str = None) -> bool:
"""
Whether input token looks like a valid credential definition identifier from input issuer DID (default any); i.e.,
<issuer-did>:3:CL:<schema-seq-no>:<cred-def-id-tag> for protocol >= 1.4, or
<issuer-did>:3:CL:<schema-seq-no> for protocol == 1.3.
:param token: candidate string
:param issuer_did: issuer DID to match, if specified
:return: whether input token looks like a valid credential definition identifier
"""
cd_id_m = re.match('([{}]{{21,22}}):3:CL:[1-9][0-9]*(:.+)?$'.format(B58), token or '')
return bool(cd_id_m) and ((not issuer_did) or cd_id_m.group(1) == issuer_did) | Whether input token looks like a valid credential definition identifier from input issuer DID (default any); i.e.,
<issuer-did>:3:CL:<schema-seq-no>:<cred-def-id-tag> for protocol >= 1.4, or
<issuer-did>:3:CL:<schema-seq-no> for protocol == 1.3.
:param token: candidate string
:param issuer_did: issuer DID to match, if specified
:return: whether input token looks like a valid credential definition identifier | https://github.com/PSPC-SPAC-buyandsell/von_anchor/blob/78ac1de67be42a676274f4bf71fe12f66e72f309/von_anchor/util.py#L135-L147 |
PSPC-SPAC-buyandsell/von_anchor | von_anchor/util.py | cred_def_id2seq_no | def cred_def_id2seq_no(cd_id: str) -> int:
"""
Given a credential definition identifier, return its schema sequence number.
Raise BadIdentifier on input that is not a credential definition identifier.
:param cd_id: credential definition identifier
:return: sequence number
"""
if ok_cred_def_id(cd_id):
return int(cd_id.split(':')[3]) # sequence number is token at 0-based position 3
raise BadIdentifier('Bad credential definition identifier {}'.format(cd_id)) | python | def cred_def_id2seq_no(cd_id: str) -> int:
"""
Given a credential definition identifier, return its schema sequence number.
Raise BadIdentifier on input that is not a credential definition identifier.
:param cd_id: credential definition identifier
:return: sequence number
"""
if ok_cred_def_id(cd_id):
return int(cd_id.split(':')[3]) # sequence number is token at 0-based position 3
raise BadIdentifier('Bad credential definition identifier {}'.format(cd_id)) | Given a credential definition identifier, return its schema sequence number.
Raise BadIdentifier on input that is not a credential definition identifier.
:param cd_id: credential definition identifier
:return: sequence number | https://github.com/PSPC-SPAC-buyandsell/von_anchor/blob/78ac1de67be42a676274f4bf71fe12f66e72f309/von_anchor/util.py#L150-L161 |
PSPC-SPAC-buyandsell/von_anchor | von_anchor/util.py | rev_reg_id | def rev_reg_id(cd_id: str, tag: Union[str, int]) -> str:
"""
Given a credential definition identifier and a tag, return the corresponding
revocation registry identifier, repeating the issuer DID from the
input identifier.
:param cd_id: credential definition identifier
:param tag: tag to use
:return: revocation registry identifier
"""
return '{}:4:{}:CL_ACCUM:{}'.format(cd_id.split(":", 1)[0], cd_id, tag) | python | def rev_reg_id(cd_id: str, tag: Union[str, int]) -> str:
"""
Given a credential definition identifier and a tag, return the corresponding
revocation registry identifier, repeating the issuer DID from the
input identifier.
:param cd_id: credential definition identifier
:param tag: tag to use
:return: revocation registry identifier
"""
return '{}:4:{}:CL_ACCUM:{}'.format(cd_id.split(":", 1)[0], cd_id, tag) | Given a credential definition identifier and a tag, return the corresponding
revocation registry identifier, repeating the issuer DID from the
input identifier.
:param cd_id: credential definition identifier
:param tag: tag to use
:return: revocation registry identifier | https://github.com/PSPC-SPAC-buyandsell/von_anchor/blob/78ac1de67be42a676274f4bf71fe12f66e72f309/von_anchor/util.py#L164-L175 |
PSPC-SPAC-buyandsell/von_anchor | von_anchor/util.py | ok_rev_reg_id | def ok_rev_reg_id(token: str, issuer_did: str = None) -> bool:
"""
Whether input token looks like a valid revocation registry identifier from input issuer DID (default any); i.e.,
<issuer-did>:4:<issuer-did>:3:CL:<schema-seq-no>:<cred-def-id-tag>:CL_ACCUM:<rev-reg-id-tag> for protocol >= 1.4, or
<issuer-did>:4:<issuer-did>:3:CL:<schema-seq-no>:CL_ACCUM:<rev-reg-id-tag> for protocol == 1.3.
:param token: candidate string
:param issuer_did: issuer DID to match, if specified
:return: whether input token looks like a valid revocation registry identifier
"""
rr_id_m = re.match(
'([{0}]{{21,22}}):4:([{0}]{{21,22}}):3:CL:[1-9][0-9]*(:.+)?:CL_ACCUM:.+$'.format(B58),
token or '')
return bool(rr_id_m) and ((not issuer_did) or (rr_id_m.group(1) == issuer_did and rr_id_m.group(2) == issuer_did)) | python | def ok_rev_reg_id(token: str, issuer_did: str = None) -> bool:
"""
Whether input token looks like a valid revocation registry identifier from input issuer DID (default any); i.e.,
<issuer-did>:4:<issuer-did>:3:CL:<schema-seq-no>:<cred-def-id-tag>:CL_ACCUM:<rev-reg-id-tag> for protocol >= 1.4, or
<issuer-did>:4:<issuer-did>:3:CL:<schema-seq-no>:CL_ACCUM:<rev-reg-id-tag> for protocol == 1.3.
:param token: candidate string
:param issuer_did: issuer DID to match, if specified
:return: whether input token looks like a valid revocation registry identifier
"""
rr_id_m = re.match(
'([{0}]{{21,22}}):4:([{0}]{{21,22}}):3:CL:[1-9][0-9]*(:.+)?:CL_ACCUM:.+$'.format(B58),
token or '')
return bool(rr_id_m) and ((not issuer_did) or (rr_id_m.group(1) == issuer_did and rr_id_m.group(2) == issuer_did)) | Whether input token looks like a valid revocation registry identifier from input issuer DID (default any); i.e.,
<issuer-did>:4:<issuer-did>:3:CL:<schema-seq-no>:<cred-def-id-tag>:CL_ACCUM:<rev-reg-id-tag> for protocol >= 1.4, or
<issuer-did>:4:<issuer-did>:3:CL:<schema-seq-no>:CL_ACCUM:<rev-reg-id-tag> for protocol == 1.3.
:param token: candidate string
:param issuer_did: issuer DID to match, if specified
:return: whether input token looks like a valid revocation registry identifier | https://github.com/PSPC-SPAC-buyandsell/von_anchor/blob/78ac1de67be42a676274f4bf71fe12f66e72f309/von_anchor/util.py#L178-L192 |
PSPC-SPAC-buyandsell/von_anchor | von_anchor/util.py | rev_reg_id2cred_def_id | def rev_reg_id2cred_def_id(rr_id: str) -> str:
"""
Given a revocation registry identifier, return its corresponding credential definition identifier.
Raise BadIdentifier if input is not a revocation registry identifier.
:param rr_id: revocation registry identifier
:return: credential definition identifier
"""
if ok_rev_reg_id(rr_id):
return ':'.join(rr_id.split(':')[2:-2]) # rev reg id comprises (prefixes):<cred_def_id>:(suffixes)
raise BadIdentifier('Bad revocation registry identifier {}'.format(rr_id)) | python | def rev_reg_id2cred_def_id(rr_id: str) -> str:
"""
Given a revocation registry identifier, return its corresponding credential definition identifier.
Raise BadIdentifier if input is not a revocation registry identifier.
:param rr_id: revocation registry identifier
:return: credential definition identifier
"""
if ok_rev_reg_id(rr_id):
return ':'.join(rr_id.split(':')[2:-2]) # rev reg id comprises (prefixes):<cred_def_id>:(suffixes)
raise BadIdentifier('Bad revocation registry identifier {}'.format(rr_id)) | Given a revocation registry identifier, return its corresponding credential definition identifier.
Raise BadIdentifier if input is not a revocation registry identifier.
:param rr_id: revocation registry identifier
:return: credential definition identifier | https://github.com/PSPC-SPAC-buyandsell/von_anchor/blob/78ac1de67be42a676274f4bf71fe12f66e72f309/von_anchor/util.py#L195-L206 |
PSPC-SPAC-buyandsell/von_anchor | von_anchor/util.py | rev_reg_id2cred_def_id_tag | def rev_reg_id2cred_def_id_tag(rr_id: str) -> (str, str):
"""
Given a revocation registry identifier, return its corresponding credential definition identifier and
(stringified int) tag. Raise BadIdentifier if input is not a revocation registry identifier.
:param rr_id: revocation registry identifier
:return: credential definition identifier and tag
"""
if ok_rev_reg_id(rr_id):
return (
':'.join(rr_id.split(':')[2:-2]), # rev reg id comprises (prefixes):<cred_def_id>:(suffixes)
str(rr_id.split(':')[-1]) # tag is last token
)
raise BadIdentifier('Bad revocation registry identifier {}'.format(rr_id)) | python | def rev_reg_id2cred_def_id_tag(rr_id: str) -> (str, str):
"""
Given a revocation registry identifier, return its corresponding credential definition identifier and
(stringified int) tag. Raise BadIdentifier if input is not a revocation registry identifier.
:param rr_id: revocation registry identifier
:return: credential definition identifier and tag
"""
if ok_rev_reg_id(rr_id):
return (
':'.join(rr_id.split(':')[2:-2]), # rev reg id comprises (prefixes):<cred_def_id>:(suffixes)
str(rr_id.split(':')[-1]) # tag is last token
)
raise BadIdentifier('Bad revocation registry identifier {}'.format(rr_id)) | Given a revocation registry identifier, return its corresponding credential definition identifier and
(stringified int) tag. Raise BadIdentifier if input is not a revocation registry identifier.
:param rr_id: revocation registry identifier
:return: credential definition identifier and tag | https://github.com/PSPC-SPAC-buyandsell/von_anchor/blob/78ac1de67be42a676274f4bf71fe12f66e72f309/von_anchor/util.py#L220-L234 |
PSPC-SPAC-buyandsell/von_anchor | von_anchor/util.py | iter_briefs | def iter_briefs(briefs: Union[dict, Sequence[dict]]) -> tuple:
"""
Given a cred-brief/cred-info, an sequence thereof, or cred-brief-dict
(as HolderProver.get_cred_briefs_by_proof_req_q() returns), return tuple with
all contained cred-briefs.
:param briefs: cred-brief/cred-info, sequence thereof, or cred-brief-dict
:return: tuple of cred-briefs
"""
if isinstance(briefs, dict):
if all(ok_wallet_reft(k) for k in briefs):
return tuple(briefs.values())
return (briefs,)
return tuple(briefs) | python | def iter_briefs(briefs: Union[dict, Sequence[dict]]) -> tuple:
"""
Given a cred-brief/cred-info, an sequence thereof, or cred-brief-dict
(as HolderProver.get_cred_briefs_by_proof_req_q() returns), return tuple with
all contained cred-briefs.
:param briefs: cred-brief/cred-info, sequence thereof, or cred-brief-dict
:return: tuple of cred-briefs
"""
if isinstance(briefs, dict):
if all(ok_wallet_reft(k) for k in briefs):
return tuple(briefs.values())
return (briefs,)
return tuple(briefs) | Given a cred-brief/cred-info, an sequence thereof, or cred-brief-dict
(as HolderProver.get_cred_briefs_by_proof_req_q() returns), return tuple with
all contained cred-briefs.
:param briefs: cred-brief/cred-info, sequence thereof, or cred-brief-dict
:return: tuple of cred-briefs | https://github.com/PSPC-SPAC-buyandsell/von_anchor/blob/78ac1de67be42a676274f4bf71fe12f66e72f309/von_anchor/util.py#L237-L251 |
PSPC-SPAC-buyandsell/von_anchor | von_anchor/util.py | box_ids | def box_ids(briefs: Union[dict, Sequence[dict]], cred_ids: Union[Sequence[str], str] = None) -> dict:
"""
Given one or more cred-briefs/cred-infos, and an optional sequence of credential identifiers
(aka wallet cred ids, referents; specify None to include all), return dict mapping each
credential identifier to a box ids structure (i.e., a dict specifying its corresponding
schema identifier, credential definition identifier, and revocation registry identifier,
the latter being None if cred def does not support revocation).
:param briefs: cred-brief/cred-info, sequence thereof, or cred-brief-dict
:param cred_ids: credential identifier or sequence thereof for which to find corresponding
schema identifiers, None for all
:return: dict mapping each credential identifier to its corresponding box ids (empty dict if
no matching credential identifiers present)
"""
rv = {}
for brief in iter_briefs(briefs):
cred_info = brief.get('cred_info', {}) or brief # briefs could be cred-infos or cred-briefs
cred_id = cred_info['referent']
if ((cred_id not in rv) and (not cred_ids or cred_id in [cred_ids, [cred_ids]][isinstance(cred_ids, str)])):
rv[cred_id] = {
'schema_id': cred_info['schema_id'],
'cred_def_id': cred_info['cred_def_id'],
'rev_reg_id': cred_info['rev_reg_id']
}
return rv | python | def box_ids(briefs: Union[dict, Sequence[dict]], cred_ids: Union[Sequence[str], str] = None) -> dict:
"""
Given one or more cred-briefs/cred-infos, and an optional sequence of credential identifiers
(aka wallet cred ids, referents; specify None to include all), return dict mapping each
credential identifier to a box ids structure (i.e., a dict specifying its corresponding
schema identifier, credential definition identifier, and revocation registry identifier,
the latter being None if cred def does not support revocation).
:param briefs: cred-brief/cred-info, sequence thereof, or cred-brief-dict
:param cred_ids: credential identifier or sequence thereof for which to find corresponding
schema identifiers, None for all
:return: dict mapping each credential identifier to its corresponding box ids (empty dict if
no matching credential identifiers present)
"""
rv = {}
for brief in iter_briefs(briefs):
cred_info = brief.get('cred_info', {}) or brief # briefs could be cred-infos or cred-briefs
cred_id = cred_info['referent']
if ((cred_id not in rv) and (not cred_ids or cred_id in [cred_ids, [cred_ids]][isinstance(cred_ids, str)])):
rv[cred_id] = {
'schema_id': cred_info['schema_id'],
'cred_def_id': cred_info['cred_def_id'],
'rev_reg_id': cred_info['rev_reg_id']
}
return rv | Given one or more cred-briefs/cred-infos, and an optional sequence of credential identifiers
(aka wallet cred ids, referents; specify None to include all), return dict mapping each
credential identifier to a box ids structure (i.e., a dict specifying its corresponding
schema identifier, credential definition identifier, and revocation registry identifier,
the latter being None if cred def does not support revocation).
:param briefs: cred-brief/cred-info, sequence thereof, or cred-brief-dict
:param cred_ids: credential identifier or sequence thereof for which to find corresponding
schema identifiers, None for all
:return: dict mapping each credential identifier to its corresponding box ids (empty dict if
no matching credential identifiers present) | https://github.com/PSPC-SPAC-buyandsell/von_anchor/blob/78ac1de67be42a676274f4bf71fe12f66e72f309/von_anchor/util.py#L254-L280 |
PSPC-SPAC-buyandsell/von_anchor | von_anchor/util.py | prune_creds_json | def prune_creds_json(creds: dict, cred_ids: set) -> str:
"""
Strip all creds out of the input json structure that do not match any of the input credential identifiers.
:param creds: indy-sdk creds structure
:param cred_ids: the set of credential identifiers of interest
:return: the reduced creds json
"""
rv = deepcopy(creds)
for key in ('attrs', 'predicates'):
for attr_uuid, creds_by_uuid in rv[key].items():
rv[key][attr_uuid] = [cred for cred in creds_by_uuid if cred['cred_info']['referent'] in cred_ids]
empties = [attr_uuid for attr_uuid in rv[key] if not rv[key][attr_uuid]]
for attr_uuid in empties:
del rv[key][attr_uuid]
return json.dumps(rv) | python | def prune_creds_json(creds: dict, cred_ids: set) -> str:
"""
Strip all creds out of the input json structure that do not match any of the input credential identifiers.
:param creds: indy-sdk creds structure
:param cred_ids: the set of credential identifiers of interest
:return: the reduced creds json
"""
rv = deepcopy(creds)
for key in ('attrs', 'predicates'):
for attr_uuid, creds_by_uuid in rv[key].items():
rv[key][attr_uuid] = [cred for cred in creds_by_uuid if cred['cred_info']['referent'] in cred_ids]
empties = [attr_uuid for attr_uuid in rv[key] if not rv[key][attr_uuid]]
for attr_uuid in empties:
del rv[key][attr_uuid]
return json.dumps(rv) | Strip all creds out of the input json structure that do not match any of the input credential identifiers.
:param creds: indy-sdk creds structure
:param cred_ids: the set of credential identifiers of interest
:return: the reduced creds json | https://github.com/PSPC-SPAC-buyandsell/von_anchor/blob/78ac1de67be42a676274f4bf71fe12f66e72f309/von_anchor/util.py#L283-L301 |
PSPC-SPAC-buyandsell/von_anchor | von_anchor/util.py | proof_req_infos2briefs | def proof_req_infos2briefs(proof_req: dict, infos: Union[dict, Sequence[dict]]) -> list:
"""
Given a proof request and corresponding cred-info(s), return a list of cred-briefs
(i.e., cred-info plus interval).
The proof request must have cred def id restrictions on all requested attribute specifications.
:param proof_req: proof request
:param infos: cred-info or sequence thereof; e.g.,
::
[
{
'attrs': {
'auditDate': '2018-07-30',
'greenLevel': 'Silver',
'legalName': 'Tart City'
},
'cred_rev_id': '48',
'cred_def_id': 'WgWxqztrNooG92RXvxSTWv:3:CL:17:tag',
'referent': 'c15674a9-7321-440d-bbed-e1ac9273abd5',
'rev_reg_id': 'WgWxqztrNooG92RXvxSTWv:4:WgWxqztrNooG92RXvxSTWv:3:CL:17:tag:CL_ACCUM:0',
'schema_id': 'WgWxqztrNooG92RXvxSTWv:2:green:1.0'
},
...
]
:return: list of cred-briefs
"""
rv = []
refts = proof_req_attr_referents(proof_req)
for info in iter_briefs(infos):
if info['cred_def_id'] not in refts:
continue
brief = {
'cred_info': info,
'interval': {}
}
fro = None
to = None
for uuid in refts[info['cred_def_id']].values():
interval = proof_req['requested_attributes'][uuid].get('non_revoked', {})
if 'from' in interval:
fro = min(fro or interval['from'], interval['from'])
if 'to' in interval:
to = max(to or interval['to'], interval['to'])
if to:
brief['interval']['to'] = to
if fro:
brief['interval']['from'] = fro
if not brief['interval']:
brief['interval'] = None
rv.append(brief)
return rv | python | def proof_req_infos2briefs(proof_req: dict, infos: Union[dict, Sequence[dict]]) -> list:
"""
Given a proof request and corresponding cred-info(s), return a list of cred-briefs
(i.e., cred-info plus interval).
The proof request must have cred def id restrictions on all requested attribute specifications.
:param proof_req: proof request
:param infos: cred-info or sequence thereof; e.g.,
::
[
{
'attrs': {
'auditDate': '2018-07-30',
'greenLevel': 'Silver',
'legalName': 'Tart City'
},
'cred_rev_id': '48',
'cred_def_id': 'WgWxqztrNooG92RXvxSTWv:3:CL:17:tag',
'referent': 'c15674a9-7321-440d-bbed-e1ac9273abd5',
'rev_reg_id': 'WgWxqztrNooG92RXvxSTWv:4:WgWxqztrNooG92RXvxSTWv:3:CL:17:tag:CL_ACCUM:0',
'schema_id': 'WgWxqztrNooG92RXvxSTWv:2:green:1.0'
},
...
]
:return: list of cred-briefs
"""
rv = []
refts = proof_req_attr_referents(proof_req)
for info in iter_briefs(infos):
if info['cred_def_id'] not in refts:
continue
brief = {
'cred_info': info,
'interval': {}
}
fro = None
to = None
for uuid in refts[info['cred_def_id']].values():
interval = proof_req['requested_attributes'][uuid].get('non_revoked', {})
if 'from' in interval:
fro = min(fro or interval['from'], interval['from'])
if 'to' in interval:
to = max(to or interval['to'], interval['to'])
if to:
brief['interval']['to'] = to
if fro:
brief['interval']['from'] = fro
if not brief['interval']:
brief['interval'] = None
rv.append(brief)
return rv | Given a proof request and corresponding cred-info(s), return a list of cred-briefs
(i.e., cred-info plus interval).
The proof request must have cred def id restrictions on all requested attribute specifications.
:param proof_req: proof request
:param infos: cred-info or sequence thereof; e.g.,
::
[
{
'attrs': {
'auditDate': '2018-07-30',
'greenLevel': 'Silver',
'legalName': 'Tart City'
},
'cred_rev_id': '48',
'cred_def_id': 'WgWxqztrNooG92RXvxSTWv:3:CL:17:tag',
'referent': 'c15674a9-7321-440d-bbed-e1ac9273abd5',
'rev_reg_id': 'WgWxqztrNooG92RXvxSTWv:4:WgWxqztrNooG92RXvxSTWv:3:CL:17:tag:CL_ACCUM:0',
'schema_id': 'WgWxqztrNooG92RXvxSTWv:2:green:1.0'
},
...
]
:return: list of cred-briefs | https://github.com/PSPC-SPAC-buyandsell/von_anchor/blob/78ac1de67be42a676274f4bf71fe12f66e72f309/von_anchor/util.py#L304-L362 |
PSPC-SPAC-buyandsell/von_anchor | von_anchor/util.py | proof_req_briefs2req_creds | def proof_req_briefs2req_creds(proof_req: dict, briefs: Union[dict, Sequence[dict]]) -> dict:
"""
Given a proof request and cred-brief(s), return a requested-creds structure.
The proof request must have cred def id restrictions on all requested attribute specifications.
:param proof_req: proof request
:param briefs: credential brief, sequence thereof (as indy-sdk wallet credential search returns),
or cred-brief-dict (as HolderProver.get_cred_briefs_for_proof_req_q() returns); e.g.,
::
[
{
"cred_info": {
"cred_rev_id": "149",
"cred_def_id": "LjgpST2rjsoxYegQDRm7EL:3:CL:15:tag",
"schema_id": "LjgpST2rjsoxYegQDRm7EL:2:bc-reg:1.0",
"rev_reg_id": "LjgpST2rjsoxYegQDRm7EL:4:LjgpST2rjsoxYegQDRm7EL:3:CL:15:tag:CL_ACCUM:1",
"referent": "43f8dc18-ac00-4b72-8a96-56f47dba77ca",
"attrs": {
"busId": "11144444",
"endDate": "",
"id": "3",
"effectiveDate": "2012-12-01",
"jurisdictionId": "1",
"orgTypeId": "2",
"legalName": "Tart City"
}
},
"interval": {
"to": 1532448939,
"from": 1234567890
}
},
...
]
:return: indy-sdk requested creds json to pass to proof creation request; e.g.,
::
{
"requested_attributes": {
"15_endDate_uuid": {
"timestamp": 1532448939,
"cred_id": "43f8dc18-ac00-4b72-8a96-56f47dba77ca",
"revealed": true
},
"15_id_uuid": {
"timestamp": 1532448939,
"cred_id": "43f8dc18-ac00-4b72-8a96-56f47dba77ca",
"revealed": true
},
"15_effectiveDate_uuid": {
"timestamp": 1532448939,
"cred_id": "43f8dc18-ac00-4b72-8a96-56f47dba77ca",
"revealed": true
},
"15_busId_uuid": {
"timestamp": 1532448939,
"cred_id": "43f8dc18-ac00-4b72-8a96-56f47dba77ca",
"revealed": true
},
"15_orgTypeId_uuid": {
"timestamp": 1532448939,
"cred_id": "43f8dc18-ac00-4b72-8a96-56f47dba77ca",
"revealed": false
},
"15_jurisdictionId_uuid": {
"timestamp": 1532448939,
"cred_id": "43f8dc18-ac00-4b72-8a96-56f47dba77ca",
"revealed": true
},
"15_legalName_uuid": {
"timestamp": 1532448939,
"cred_id": "43f8dc18-ac00-4b72-8a96-56f47dba77ca",
"revealed": true
}
},
"requested_predicates": {
"15_orgTypeId_GE_uuid": {
"timestamp": 1532448939,
"cred_id": "43f8dc18-ac00-4b72-8a96-56f47dba77ca",
}
},
"self_attested_attributes": {}
}
"""
rv = {
'self_attested_attributes': {},
'requested_attributes': {},
'requested_predicates': {}
}
attr_refts = proof_req_attr_referents(proof_req)
pred_refts = proof_req_pred_referents(proof_req)
for brief in iter_briefs(briefs):
cred_info = brief['cred_info']
timestamp = (brief['interval'] or {}).get('to', None)
for attr in cred_info['attrs']:
if attr in attr_refts.get(cred_info['cred_def_id'], {}):
req_attr = {
'cred_id': cred_info['referent'],
'revealed': attr not in pred_refts.get(cred_info['cred_def_id'], {}),
'timestamp': timestamp
}
if not timestamp:
req_attr.pop('timestamp')
rv['requested_attributes'][attr_refts[cred_info['cred_def_id']][attr]] = req_attr
if attr in pred_refts.get(cred_info['cred_def_id'], {}):
for uuid in pred_refts[cred_info['cred_def_id']][attr]:
req_pred = {
'cred_id': cred_info['referent'],
'timestamp': timestamp
}
if not timestamp:
req_pred.pop('timestamp')
rv['requested_predicates'][uuid] = req_pred
return rv | python | def proof_req_briefs2req_creds(proof_req: dict, briefs: Union[dict, Sequence[dict]]) -> dict:
"""
Given a proof request and cred-brief(s), return a requested-creds structure.
The proof request must have cred def id restrictions on all requested attribute specifications.
:param proof_req: proof request
:param briefs: credential brief, sequence thereof (as indy-sdk wallet credential search returns),
or cred-brief-dict (as HolderProver.get_cred_briefs_for_proof_req_q() returns); e.g.,
::
[
{
"cred_info": {
"cred_rev_id": "149",
"cred_def_id": "LjgpST2rjsoxYegQDRm7EL:3:CL:15:tag",
"schema_id": "LjgpST2rjsoxYegQDRm7EL:2:bc-reg:1.0",
"rev_reg_id": "LjgpST2rjsoxYegQDRm7EL:4:LjgpST2rjsoxYegQDRm7EL:3:CL:15:tag:CL_ACCUM:1",
"referent": "43f8dc18-ac00-4b72-8a96-56f47dba77ca",
"attrs": {
"busId": "11144444",
"endDate": "",
"id": "3",
"effectiveDate": "2012-12-01",
"jurisdictionId": "1",
"orgTypeId": "2",
"legalName": "Tart City"
}
},
"interval": {
"to": 1532448939,
"from": 1234567890
}
},
...
]
:return: indy-sdk requested creds json to pass to proof creation request; e.g.,
::
{
"requested_attributes": {
"15_endDate_uuid": {
"timestamp": 1532448939,
"cred_id": "43f8dc18-ac00-4b72-8a96-56f47dba77ca",
"revealed": true
},
"15_id_uuid": {
"timestamp": 1532448939,
"cred_id": "43f8dc18-ac00-4b72-8a96-56f47dba77ca",
"revealed": true
},
"15_effectiveDate_uuid": {
"timestamp": 1532448939,
"cred_id": "43f8dc18-ac00-4b72-8a96-56f47dba77ca",
"revealed": true
},
"15_busId_uuid": {
"timestamp": 1532448939,
"cred_id": "43f8dc18-ac00-4b72-8a96-56f47dba77ca",
"revealed": true
},
"15_orgTypeId_uuid": {
"timestamp": 1532448939,
"cred_id": "43f8dc18-ac00-4b72-8a96-56f47dba77ca",
"revealed": false
},
"15_jurisdictionId_uuid": {
"timestamp": 1532448939,
"cred_id": "43f8dc18-ac00-4b72-8a96-56f47dba77ca",
"revealed": true
},
"15_legalName_uuid": {
"timestamp": 1532448939,
"cred_id": "43f8dc18-ac00-4b72-8a96-56f47dba77ca",
"revealed": true
}
},
"requested_predicates": {
"15_orgTypeId_GE_uuid": {
"timestamp": 1532448939,
"cred_id": "43f8dc18-ac00-4b72-8a96-56f47dba77ca",
}
},
"self_attested_attributes": {}
}
"""
rv = {
'self_attested_attributes': {},
'requested_attributes': {},
'requested_predicates': {}
}
attr_refts = proof_req_attr_referents(proof_req)
pred_refts = proof_req_pred_referents(proof_req)
for brief in iter_briefs(briefs):
cred_info = brief['cred_info']
timestamp = (brief['interval'] or {}).get('to', None)
for attr in cred_info['attrs']:
if attr in attr_refts.get(cred_info['cred_def_id'], {}):
req_attr = {
'cred_id': cred_info['referent'],
'revealed': attr not in pred_refts.get(cred_info['cred_def_id'], {}),
'timestamp': timestamp
}
if not timestamp:
req_attr.pop('timestamp')
rv['requested_attributes'][attr_refts[cred_info['cred_def_id']][attr]] = req_attr
if attr in pred_refts.get(cred_info['cred_def_id'], {}):
for uuid in pred_refts[cred_info['cred_def_id']][attr]:
req_pred = {
'cred_id': cred_info['referent'],
'timestamp': timestamp
}
if not timestamp:
req_pred.pop('timestamp')
rv['requested_predicates'][uuid] = req_pred
return rv | Given a proof request and cred-brief(s), return a requested-creds structure.
The proof request must have cred def id restrictions on all requested attribute specifications.
:param proof_req: proof request
:param briefs: credential brief, sequence thereof (as indy-sdk wallet credential search returns),
or cred-brief-dict (as HolderProver.get_cred_briefs_for_proof_req_q() returns); e.g.,
::
[
{
"cred_info": {
"cred_rev_id": "149",
"cred_def_id": "LjgpST2rjsoxYegQDRm7EL:3:CL:15:tag",
"schema_id": "LjgpST2rjsoxYegQDRm7EL:2:bc-reg:1.0",
"rev_reg_id": "LjgpST2rjsoxYegQDRm7EL:4:LjgpST2rjsoxYegQDRm7EL:3:CL:15:tag:CL_ACCUM:1",
"referent": "43f8dc18-ac00-4b72-8a96-56f47dba77ca",
"attrs": {
"busId": "11144444",
"endDate": "",
"id": "3",
"effectiveDate": "2012-12-01",
"jurisdictionId": "1",
"orgTypeId": "2",
"legalName": "Tart City"
}
},
"interval": {
"to": 1532448939,
"from": 1234567890
}
},
...
]
:return: indy-sdk requested creds json to pass to proof creation request; e.g.,
::
{
"requested_attributes": {
"15_endDate_uuid": {
"timestamp": 1532448939,
"cred_id": "43f8dc18-ac00-4b72-8a96-56f47dba77ca",
"revealed": true
},
"15_id_uuid": {
"timestamp": 1532448939,
"cred_id": "43f8dc18-ac00-4b72-8a96-56f47dba77ca",
"revealed": true
},
"15_effectiveDate_uuid": {
"timestamp": 1532448939,
"cred_id": "43f8dc18-ac00-4b72-8a96-56f47dba77ca",
"revealed": true
},
"15_busId_uuid": {
"timestamp": 1532448939,
"cred_id": "43f8dc18-ac00-4b72-8a96-56f47dba77ca",
"revealed": true
},
"15_orgTypeId_uuid": {
"timestamp": 1532448939,
"cred_id": "43f8dc18-ac00-4b72-8a96-56f47dba77ca",
"revealed": false
},
"15_jurisdictionId_uuid": {
"timestamp": 1532448939,
"cred_id": "43f8dc18-ac00-4b72-8a96-56f47dba77ca",
"revealed": true
},
"15_legalName_uuid": {
"timestamp": 1532448939,
"cred_id": "43f8dc18-ac00-4b72-8a96-56f47dba77ca",
"revealed": true
}
},
"requested_predicates": {
"15_orgTypeId_GE_uuid": {
"timestamp": 1532448939,
"cred_id": "43f8dc18-ac00-4b72-8a96-56f47dba77ca",
}
},
"self_attested_attributes": {}
} | https://github.com/PSPC-SPAC-buyandsell/von_anchor/blob/78ac1de67be42a676274f4bf71fe12f66e72f309/von_anchor/util.py#L364-L485 |
PSPC-SPAC-buyandsell/von_anchor | von_anchor/util.py | creds_display | def creds_display(creds: Union[dict, Sequence[dict]], filt: dict = None, filt_dflt_incl: bool = False) -> dict:
"""
Find indy-sdk creds matching input filter from within input creds structure,
sequence of cred-briefs/cred-infos, or cred-brief-dict. Return human-legible summary.
:param creds: creds structure, cred-brief/cred-info or sequence thereof,
or cred-brief-dict; e.g., creds
::
{
"attrs": {
"attr0_uuid": [
{
"interval": null,
"cred_info": {
"attrs": {
"attr0": "2",
"attr1": "Hello",
"attr2": "World"
},
"referent": "00000000-0000-0000-0000-000000000000",
"schema_id": "Q4zqM7aXqm7gDQkUVLng9h:2:bc-reg:1.0",
"cred_def_id": "Q4zqM7aXqm7gDQkUVLng9h:3:CL:18:tag",
"cred_rev_id": null,
"rev_reg_id": null
}
},
{
"interval": null,
"cred_info": {
"attrs": {
"attr0": "1",
"attr1": "Nice",
"attr2": "Tractor"
},
"referent": "00000000-0000-0000-0000-111111111111",
"schema_id": "Q4zqM7aXqm7gDQkUVLng9h:2:bc-reg:1.0",
"cred_def_id": "Q4zqM7aXqm7gDQkUVLng9h:3:CL:18:tag",
"cred_rev_id": null,
"rev_reg_id": null
}
}
],
"attr1_uuid": [
{
"interval": null,
"cred_info": {
"attrs": {
"attr0": "2",
"attr1": "Hello",
"attr2": "World"
},
"referent": "00000000-0000-0000-0000-000000000000",
"schema_id": "Q4zqM7aXqm7gDQkUVLng9h:2:bc-reg:1.0",
"cred_def_id": "Q4zqM7aXqm7gDQkUVLng9h:3:CL:18:tag",
"cred_rev_id": null,
"rev_reg_id": null
}
},
{
"interval": null,
"cred_info": {
"attrs": {
"attr0": "1",
"attr1": "Nice",
"attr2": "Tractor"
},
"referent": "00000000-0000-0000-0000-111111111111",
"schema_id": "Q4zqM7aXqm7gDQkUVLng9h:2:bc-reg:1.0",
"cred_def_id": "Q4zqM7aXqm7gDQkUVLng9h:3:CL:18:tag",
"cred_rev_id": null,
"rev_reg_id": null
}
}
],
"attr2_uuid": [
...
]
}
}
:param filt: filter for matching attributes and values; dict (None or empty for no filter, matching all)
mapping each cred def identifier to dict mapping attributes to values to match; e.g.,
::
{
'Q4zqM7aXqm7gDQkUVLng9h:3:CL:18:tag': {
'attr0': 1, # operation stringifies en passant
'attr1': 'Nice'
},
...
}
:param filt_dflt_incl: whether to include (True) all attributes for schema that filter does not identify
or to exclude (False) all such attributes
:return: human-legible dict mapping credential identifiers to human-readable credential briefs
(not proper indy-sdk creds structures) for creds matching input filter
"""
def _add(briefs):
nonlocal rv, filt
for brief in briefs:
cred_info = brief.get('cred_info', {}) or brief # briefs could be cred-infos or cred-briefs
if cred_info['referent'] in rv:
continue
cred_cd_id = cred_info['cred_def_id']
if (not filt) or (filt_dflt_incl and cred_cd_id not in filt):
rv[cred_info['referent']] = cred_info
continue
if filt and cred_cd_id in filt:
if ({k: str(filt[cred_cd_id][k]) for k in filt[cred_cd_id]}.items() <= cred_info['attrs'].items()):
rv[cred_info['referent']] = cred_info
rv = {}
if filt is None:
filt = {}
if isinstance(creds, dict):
if all(ok_wallet_reft(k) for k in creds):
_add(creds.values())
else:
for uuid2briefs in (creds.get('attrs', {}), creds.get('predicates', {})):
for briefs in uuid2briefs.values():
_add(briefs)
else:
_add(creds)
return rv | python | def creds_display(creds: Union[dict, Sequence[dict]], filt: dict = None, filt_dflt_incl: bool = False) -> dict:
"""
Find indy-sdk creds matching input filter from within input creds structure,
sequence of cred-briefs/cred-infos, or cred-brief-dict. Return human-legible summary.
:param creds: creds structure, cred-brief/cred-info or sequence thereof,
or cred-brief-dict; e.g., creds
::
{
"attrs": {
"attr0_uuid": [
{
"interval": null,
"cred_info": {
"attrs": {
"attr0": "2",
"attr1": "Hello",
"attr2": "World"
},
"referent": "00000000-0000-0000-0000-000000000000",
"schema_id": "Q4zqM7aXqm7gDQkUVLng9h:2:bc-reg:1.0",
"cred_def_id": "Q4zqM7aXqm7gDQkUVLng9h:3:CL:18:tag",
"cred_rev_id": null,
"rev_reg_id": null
}
},
{
"interval": null,
"cred_info": {
"attrs": {
"attr0": "1",
"attr1": "Nice",
"attr2": "Tractor"
},
"referent": "00000000-0000-0000-0000-111111111111",
"schema_id": "Q4zqM7aXqm7gDQkUVLng9h:2:bc-reg:1.0",
"cred_def_id": "Q4zqM7aXqm7gDQkUVLng9h:3:CL:18:tag",
"cred_rev_id": null,
"rev_reg_id": null
}
}
],
"attr1_uuid": [
{
"interval": null,
"cred_info": {
"attrs": {
"attr0": "2",
"attr1": "Hello",
"attr2": "World"
},
"referent": "00000000-0000-0000-0000-000000000000",
"schema_id": "Q4zqM7aXqm7gDQkUVLng9h:2:bc-reg:1.0",
"cred_def_id": "Q4zqM7aXqm7gDQkUVLng9h:3:CL:18:tag",
"cred_rev_id": null,
"rev_reg_id": null
}
},
{
"interval": null,
"cred_info": {
"attrs": {
"attr0": "1",
"attr1": "Nice",
"attr2": "Tractor"
},
"referent": "00000000-0000-0000-0000-111111111111",
"schema_id": "Q4zqM7aXqm7gDQkUVLng9h:2:bc-reg:1.0",
"cred_def_id": "Q4zqM7aXqm7gDQkUVLng9h:3:CL:18:tag",
"cred_rev_id": null,
"rev_reg_id": null
}
}
],
"attr2_uuid": [
...
]
}
}
:param filt: filter for matching attributes and values; dict (None or empty for no filter, matching all)
mapping each cred def identifier to dict mapping attributes to values to match; e.g.,
::
{
'Q4zqM7aXqm7gDQkUVLng9h:3:CL:18:tag': {
'attr0': 1, # operation stringifies en passant
'attr1': 'Nice'
},
...
}
:param filt_dflt_incl: whether to include (True) all attributes for schema that filter does not identify
or to exclude (False) all such attributes
:return: human-legible dict mapping credential identifiers to human-readable credential briefs
(not proper indy-sdk creds structures) for creds matching input filter
"""
def _add(briefs):
nonlocal rv, filt
for brief in briefs:
cred_info = brief.get('cred_info', {}) or brief # briefs could be cred-infos or cred-briefs
if cred_info['referent'] in rv:
continue
cred_cd_id = cred_info['cred_def_id']
if (not filt) or (filt_dflt_incl and cred_cd_id not in filt):
rv[cred_info['referent']] = cred_info
continue
if filt and cred_cd_id in filt:
if ({k: str(filt[cred_cd_id][k]) for k in filt[cred_cd_id]}.items() <= cred_info['attrs'].items()):
rv[cred_info['referent']] = cred_info
rv = {}
if filt is None:
filt = {}
if isinstance(creds, dict):
if all(ok_wallet_reft(k) for k in creds):
_add(creds.values())
else:
for uuid2briefs in (creds.get('attrs', {}), creds.get('predicates', {})):
for briefs in uuid2briefs.values():
_add(briefs)
else:
_add(creds)
return rv | Find indy-sdk creds matching input filter from within input creds structure,
sequence of cred-briefs/cred-infos, or cred-brief-dict. Return human-legible summary.
:param creds: creds structure, cred-brief/cred-info or sequence thereof,
or cred-brief-dict; e.g., creds
::
{
"attrs": {
"attr0_uuid": [
{
"interval": null,
"cred_info": {
"attrs": {
"attr0": "2",
"attr1": "Hello",
"attr2": "World"
},
"referent": "00000000-0000-0000-0000-000000000000",
"schema_id": "Q4zqM7aXqm7gDQkUVLng9h:2:bc-reg:1.0",
"cred_def_id": "Q4zqM7aXqm7gDQkUVLng9h:3:CL:18:tag",
"cred_rev_id": null,
"rev_reg_id": null
}
},
{
"interval": null,
"cred_info": {
"attrs": {
"attr0": "1",
"attr1": "Nice",
"attr2": "Tractor"
},
"referent": "00000000-0000-0000-0000-111111111111",
"schema_id": "Q4zqM7aXqm7gDQkUVLng9h:2:bc-reg:1.0",
"cred_def_id": "Q4zqM7aXqm7gDQkUVLng9h:3:CL:18:tag",
"cred_rev_id": null,
"rev_reg_id": null
}
}
],
"attr1_uuid": [
{
"interval": null,
"cred_info": {
"attrs": {
"attr0": "2",
"attr1": "Hello",
"attr2": "World"
},
"referent": "00000000-0000-0000-0000-000000000000",
"schema_id": "Q4zqM7aXqm7gDQkUVLng9h:2:bc-reg:1.0",
"cred_def_id": "Q4zqM7aXqm7gDQkUVLng9h:3:CL:18:tag",
"cred_rev_id": null,
"rev_reg_id": null
}
},
{
"interval": null,
"cred_info": {
"attrs": {
"attr0": "1",
"attr1": "Nice",
"attr2": "Tractor"
},
"referent": "00000000-0000-0000-0000-111111111111",
"schema_id": "Q4zqM7aXqm7gDQkUVLng9h:2:bc-reg:1.0",
"cred_def_id": "Q4zqM7aXqm7gDQkUVLng9h:3:CL:18:tag",
"cred_rev_id": null,
"rev_reg_id": null
}
}
],
"attr2_uuid": [
...
]
}
}
:param filt: filter for matching attributes and values; dict (None or empty for no filter, matching all)
mapping each cred def identifier to dict mapping attributes to values to match; e.g.,
::
{
'Q4zqM7aXqm7gDQkUVLng9h:3:CL:18:tag': {
'attr0': 1, # operation stringifies en passant
'attr1': 'Nice'
},
...
}
:param filt_dflt_incl: whether to include (True) all attributes for schema that filter does not identify
or to exclude (False) all such attributes
:return: human-legible dict mapping credential identifiers to human-readable credential briefs
(not proper indy-sdk creds structures) for creds matching input filter | https://github.com/PSPC-SPAC-buyandsell/von_anchor/blob/78ac1de67be42a676274f4bf71fe12f66e72f309/von_anchor/util.py#L488-L615 |
PSPC-SPAC-buyandsell/von_anchor | von_anchor/util.py | proof_req2wql_all | def proof_req2wql_all(proof_req: dict, x_cd_ids: Union[str, Sequence[str]] = None) -> dict:
"""
Given a proof request and a list of cred def ids to omit, return an extra WQL query dict
that will find all corresponding credentials in search.
The proof request must have cred def id restrictions on all requested attribute specifications.
At present, the utility does not support predicates.
:param proof_req: proof request
:param x_cd_ids: cred def identifier or sequence thereof to omit
:return: extra WQL dict to fetch all corresponding credentials in search.
"""
rv = {}
attr_refts = proof_req_attr_referents(proof_req)
for cd_id in [k for k in attr_refts if k not in ([x_cd_ids] if isinstance(x_cd_ids, str) else x_cd_ids or [])]:
rv[set(attr_refts[cd_id].values()).pop()] = {"cred_def_id": cd_id}
return rv | python | def proof_req2wql_all(proof_req: dict, x_cd_ids: Union[str, Sequence[str]] = None) -> dict:
"""
Given a proof request and a list of cred def ids to omit, return an extra WQL query dict
that will find all corresponding credentials in search.
The proof request must have cred def id restrictions on all requested attribute specifications.
At present, the utility does not support predicates.
:param proof_req: proof request
:param x_cd_ids: cred def identifier or sequence thereof to omit
:return: extra WQL dict to fetch all corresponding credentials in search.
"""
rv = {}
attr_refts = proof_req_attr_referents(proof_req)
for cd_id in [k for k in attr_refts if k not in ([x_cd_ids] if isinstance(x_cd_ids, str) else x_cd_ids or [])]:
rv[set(attr_refts[cd_id].values()).pop()] = {"cred_def_id": cd_id}
return rv | Given a proof request and a list of cred def ids to omit, return an extra WQL query dict
that will find all corresponding credentials in search.
The proof request must have cred def id restrictions on all requested attribute specifications.
At present, the utility does not support predicates.
:param proof_req: proof request
:param x_cd_ids: cred def identifier or sequence thereof to omit
:return: extra WQL dict to fetch all corresponding credentials in search. | https://github.com/PSPC-SPAC-buyandsell/von_anchor/blob/78ac1de67be42a676274f4bf71fe12f66e72f309/von_anchor/util.py#L618-L636 |
PSPC-SPAC-buyandsell/von_anchor | von_anchor/util.py | proof_req_attr_referents | def proof_req_attr_referents(proof_req: dict) -> dict:
"""
Given a proof request with all requested attributes having cred def id restrictions,
return its attribute referents by cred def id and attribute.
The returned structure can be useful in populating the extra WQL query parameter
in the credential search API.
:param proof_req: proof request with all requested attribute specifications having cred def id restriction; e.g.,
::
{
'name": 'proof_req',
'version': '0.0',
'requested_attributes': {
'18_greenLevel_uuid': {
'restrictions': [
{
'cred_def_id': 'WgWxqztrNooG92RXvxSTWv:3:CL:18:tag'
}
],
'name': 'greenLevel',
'non_revoked': {
'to': 1532367957,
'from': 1532367957
}
},
'18_legalName_uuid': {
'restrictions': [
{
'cred_def_id': 'WgWxqztrNooG92RXvxSTWv:3:CL:18:tag'
}
],
'name': 'legalName',
'non_revoked': {
'to': 1532367957,
'from': 1532367957
}
},
'15_id_uuid': { # this specification will not show up in response: no cred def id restriction :-(
'name': 'id',
'non_revoked': {
'to': 1532367957,
'from': 1532367957
}
}
}
'requested_predicates': {
}
}
:return: nested dict mapping cred def id to name to proof request referent; e.g.,
::
{
'WgWxqztrNooG92RXvxSTWv:3:CL:18:tag': {
'legalName': '18_legalName_uuid'
'greenLevel': '18_greenLevel_uuid'
}
}
"""
rv = {}
for uuid, spec in proof_req['requested_attributes'].items():
cd_id = None
for restriction in spec.get('restrictions', []):
cd_id = restriction.get('cred_def_id', None)
if cd_id:
break
if not cd_id:
continue
if cd_id not in rv: # cd_id of None is not OK
rv[cd_id] = {}
rv[cd_id][spec['name']] = uuid
return rv | python | def proof_req_attr_referents(proof_req: dict) -> dict:
"""
Given a proof request with all requested attributes having cred def id restrictions,
return its attribute referents by cred def id and attribute.
The returned structure can be useful in populating the extra WQL query parameter
in the credential search API.
:param proof_req: proof request with all requested attribute specifications having cred def id restriction; e.g.,
::
{
'name": 'proof_req',
'version': '0.0',
'requested_attributes': {
'18_greenLevel_uuid': {
'restrictions': [
{
'cred_def_id': 'WgWxqztrNooG92RXvxSTWv:3:CL:18:tag'
}
],
'name': 'greenLevel',
'non_revoked': {
'to': 1532367957,
'from': 1532367957
}
},
'18_legalName_uuid': {
'restrictions': [
{
'cred_def_id': 'WgWxqztrNooG92RXvxSTWv:3:CL:18:tag'
}
],
'name': 'legalName',
'non_revoked': {
'to': 1532367957,
'from': 1532367957
}
},
'15_id_uuid': { # this specification will not show up in response: no cred def id restriction :-(
'name': 'id',
'non_revoked': {
'to': 1532367957,
'from': 1532367957
}
}
}
'requested_predicates': {
}
}
:return: nested dict mapping cred def id to name to proof request referent; e.g.,
::
{
'WgWxqztrNooG92RXvxSTWv:3:CL:18:tag': {
'legalName': '18_legalName_uuid'
'greenLevel': '18_greenLevel_uuid'
}
}
"""
rv = {}
for uuid, spec in proof_req['requested_attributes'].items():
cd_id = None
for restriction in spec.get('restrictions', []):
cd_id = restriction.get('cred_def_id', None)
if cd_id:
break
if not cd_id:
continue
if cd_id not in rv: # cd_id of None is not OK
rv[cd_id] = {}
rv[cd_id][spec['name']] = uuid
return rv | Given a proof request with all requested attributes having cred def id restrictions,
return its attribute referents by cred def id and attribute.
The returned structure can be useful in populating the extra WQL query parameter
in the credential search API.
:param proof_req: proof request with all requested attribute specifications having cred def id restriction; e.g.,
::
{
'name": 'proof_req',
'version': '0.0',
'requested_attributes': {
'18_greenLevel_uuid': {
'restrictions': [
{
'cred_def_id': 'WgWxqztrNooG92RXvxSTWv:3:CL:18:tag'
}
],
'name': 'greenLevel',
'non_revoked': {
'to': 1532367957,
'from': 1532367957
}
},
'18_legalName_uuid': {
'restrictions': [
{
'cred_def_id': 'WgWxqztrNooG92RXvxSTWv:3:CL:18:tag'
}
],
'name': 'legalName',
'non_revoked': {
'to': 1532367957,
'from': 1532367957
}
},
'15_id_uuid': { # this specification will not show up in response: no cred def id restriction :-(
'name': 'id',
'non_revoked': {
'to': 1532367957,
'from': 1532367957
}
}
}
'requested_predicates': {
}
}
:return: nested dict mapping cred def id to name to proof request referent; e.g.,
::
{
'WgWxqztrNooG92RXvxSTWv:3:CL:18:tag': {
'legalName': '18_legalName_uuid'
'greenLevel': '18_greenLevel_uuid'
}
} | https://github.com/PSPC-SPAC-buyandsell/von_anchor/blob/78ac1de67be42a676274f4bf71fe12f66e72f309/von_anchor/util.py#L639-L717 |
PSPC-SPAC-buyandsell/von_anchor | von_anchor/util.py | proof_req_pred_referents | def proof_req_pred_referents(proof_req: dict) -> dict:
"""
Given a proof request with all requested predicates having cred def id restrictions,
return its predicate referents by cred def id and attribute, mapping a predicate and a limit.
The returned structure can be useful in downstream processing to filter cred-infos for predicates.
:param proof_req: proof request with all requested predicate specifications having cred def id restriction; e.g.,
::
{
'name': 'proof_req',
'version': '0.0',
'requested_attributes': {
...
}
'requested_predicates': {
'194_highscore_GE_uuid': {
'name': 'highscore',
'p_type': '>=',
'p_value': '100000',
'restrictions': [
{
'cred_def_id': 'WgWxqztrNooG92RXvxSTWv:3:CL:194:tag'
}
],
'non_revoked': {
...
}
},
'194_level_GE_uuid': {
'name': 'level',
'p_type': '>=',
'p_value': '10',
'restrictions': [
{
'cred_def_id': 'WgWxqztrNooG92RXvxSTWv:3:CL:194:tag'
}
],
'non_revoked': {
...
}
},
'194_attempts_LE_uuid': {
'name': 'attempts',
'p_type': '<=',
'p_value': '3',
'restrictions': [
{
'cred_def_id': 'WgWxqztrNooG92RXvxSTWv:3:CL:194:tag'
}
],
'non_revoked': {
...
}
},
'198_employees_LT_uuid': {
'name': 'employees',
'p_type': '<',
'p_value': '100',
'restrictions': [
{
'cred_def_id': 'WgWxqztrNooG92RXvxSTWv:3:CL:198:tag'
}
],
'non_revoked': {
...
}
},
'198_employees_GE_uuid': {
'name': 'employees',
'p_type': '>=',
'p_value': '50',
'restrictions': [
{
'cred_def_id': 'WgWxqztrNooG92RXvxSTWv:3:CL:198:tag'
}
],
'non_revoked': {
...
}
},
}
}
:return: nested dict mapping cred def id to name to proof request referent to predicate and limit; e.g.,
::
{
'WgWxqztrNooG92RXvxSTWv:3:CL:194:tag': {
'highscore': {
'194_level_GE_uuid': ['>=', 100000]
},
'level': {
'194_level_GE_uuid': ['>=', 10]
},
'attempts': {
'194_attempts_LE_uuid': ['<=', 3]
}
},
'WgWxqztrNooG92RXvxSTWv:3:CL:198:tag': {
'employees': { # may have many preds per attr, but always 1 uuid and 1 relation per pred
'198_LT_employees_uuid': ['<=', 100]
'198_GE_employees_uuid': ['>=', 50]
}
}
}
"""
rv = {}
for uuid, spec in proof_req['requested_predicates'].items():
cd_id = None
for restriction in spec.get('restrictions', []):
cd_id = restriction.get('cred_def_id', None)
if cd_id:
break
if not cd_id:
continue
if cd_id not in rv: # cd_id of None is not OK
rv[cd_id] = {}
if spec['name'] not in rv[cd_id]:
rv[cd_id][spec['name']] = {}
rv[cd_id][spec['name']][uuid] = [spec['p_type'], Predicate.to_int(spec['p_value'])]
return rv | python | def proof_req_pred_referents(proof_req: dict) -> dict:
"""
Given a proof request with all requested predicates having cred def id restrictions,
return its predicate referents by cred def id and attribute, mapping a predicate and a limit.
The returned structure can be useful in downstream processing to filter cred-infos for predicates.
:param proof_req: proof request with all requested predicate specifications having cred def id restriction; e.g.,
::
{
'name': 'proof_req',
'version': '0.0',
'requested_attributes': {
...
}
'requested_predicates': {
'194_highscore_GE_uuid': {
'name': 'highscore',
'p_type': '>=',
'p_value': '100000',
'restrictions': [
{
'cred_def_id': 'WgWxqztrNooG92RXvxSTWv:3:CL:194:tag'
}
],
'non_revoked': {
...
}
},
'194_level_GE_uuid': {
'name': 'level',
'p_type': '>=',
'p_value': '10',
'restrictions': [
{
'cred_def_id': 'WgWxqztrNooG92RXvxSTWv:3:CL:194:tag'
}
],
'non_revoked': {
...
}
},
'194_attempts_LE_uuid': {
'name': 'attempts',
'p_type': '<=',
'p_value': '3',
'restrictions': [
{
'cred_def_id': 'WgWxqztrNooG92RXvxSTWv:3:CL:194:tag'
}
],
'non_revoked': {
...
}
},
'198_employees_LT_uuid': {
'name': 'employees',
'p_type': '<',
'p_value': '100',
'restrictions': [
{
'cred_def_id': 'WgWxqztrNooG92RXvxSTWv:3:CL:198:tag'
}
],
'non_revoked': {
...
}
},
'198_employees_GE_uuid': {
'name': 'employees',
'p_type': '>=',
'p_value': '50',
'restrictions': [
{
'cred_def_id': 'WgWxqztrNooG92RXvxSTWv:3:CL:198:tag'
}
],
'non_revoked': {
...
}
},
}
}
:return: nested dict mapping cred def id to name to proof request referent to predicate and limit; e.g.,
::
{
'WgWxqztrNooG92RXvxSTWv:3:CL:194:tag': {
'highscore': {
'194_level_GE_uuid': ['>=', 100000]
},
'level': {
'194_level_GE_uuid': ['>=', 10]
},
'attempts': {
'194_attempts_LE_uuid': ['<=', 3]
}
},
'WgWxqztrNooG92RXvxSTWv:3:CL:198:tag': {
'employees': { # may have many preds per attr, but always 1 uuid and 1 relation per pred
'198_LT_employees_uuid': ['<=', 100]
'198_GE_employees_uuid': ['>=', 50]
}
}
}
"""
rv = {}
for uuid, spec in proof_req['requested_predicates'].items():
cd_id = None
for restriction in spec.get('restrictions', []):
cd_id = restriction.get('cred_def_id', None)
if cd_id:
break
if not cd_id:
continue
if cd_id not in rv: # cd_id of None is not OK
rv[cd_id] = {}
if spec['name'] not in rv[cd_id]:
rv[cd_id][spec['name']] = {}
rv[cd_id][spec['name']][uuid] = [spec['p_type'], Predicate.to_int(spec['p_value'])]
return rv | Given a proof request with all requested predicates having cred def id restrictions,
return its predicate referents by cred def id and attribute, mapping a predicate and a limit.
The returned structure can be useful in downstream processing to filter cred-infos for predicates.
:param proof_req: proof request with all requested predicate specifications having cred def id restriction; e.g.,
::
{
'name': 'proof_req',
'version': '0.0',
'requested_attributes': {
...
}
'requested_predicates': {
'194_highscore_GE_uuid': {
'name': 'highscore',
'p_type': '>=',
'p_value': '100000',
'restrictions': [
{
'cred_def_id': 'WgWxqztrNooG92RXvxSTWv:3:CL:194:tag'
}
],
'non_revoked': {
...
}
},
'194_level_GE_uuid': {
'name': 'level',
'p_type': '>=',
'p_value': '10',
'restrictions': [
{
'cred_def_id': 'WgWxqztrNooG92RXvxSTWv:3:CL:194:tag'
}
],
'non_revoked': {
...
}
},
'194_attempts_LE_uuid': {
'name': 'attempts',
'p_type': '<=',
'p_value': '3',
'restrictions': [
{
'cred_def_id': 'WgWxqztrNooG92RXvxSTWv:3:CL:194:tag'
}
],
'non_revoked': {
...
}
},
'198_employees_LT_uuid': {
'name': 'employees',
'p_type': '<',
'p_value': '100',
'restrictions': [
{
'cred_def_id': 'WgWxqztrNooG92RXvxSTWv:3:CL:198:tag'
}
],
'non_revoked': {
...
}
},
'198_employees_GE_uuid': {
'name': 'employees',
'p_type': '>=',
'p_value': '50',
'restrictions': [
{
'cred_def_id': 'WgWxqztrNooG92RXvxSTWv:3:CL:198:tag'
}
],
'non_revoked': {
...
}
},
}
}
:return: nested dict mapping cred def id to name to proof request referent to predicate and limit; e.g.,
::
{
'WgWxqztrNooG92RXvxSTWv:3:CL:194:tag': {
'highscore': {
'194_level_GE_uuid': ['>=', 100000]
},
'level': {
'194_level_GE_uuid': ['>=', 10]
},
'attempts': {
'194_attempts_LE_uuid': ['<=', 3]
}
},
'WgWxqztrNooG92RXvxSTWv:3:CL:198:tag': {
'employees': { # may have many preds per attr, but always 1 uuid and 1 relation per pred
'198_LT_employees_uuid': ['<=', 100]
'198_GE_employees_uuid': ['>=', 50]
}
}
} | https://github.com/PSPC-SPAC-buyandsell/von_anchor/blob/78ac1de67be42a676274f4bf71fe12f66e72f309/von_anchor/util.py#L720-L847 |
PSPC-SPAC-buyandsell/von_anchor | von_anchor/util.py | revoc_info | def revoc_info(briefs: Union[dict, Sequence[dict]], filt: dict = None) -> dict:
"""
Given a cred-brief, cred-info or sequence of either, return a dict mapping pairs
(revocation registry identifier, credential revocation identifier)
to attribute name: (raw) value dicts.
If the caller includes a filter of attribute:value pairs, retain only matching attributes.
:param briefs: cred-brief/cred-info, or sequence thereof
:param filt: dict mapping attributes to values of interest; e.g.,
::
{
'legalName': 'Flan Nebula',
'effectiveDate': '2018-01-01',
'endDate': None
}
:return: dict mapping (rev_reg_id, cred_rev_id) pairs to (raw) attributes; e.g.,
::
{
('LjgpST2rjsoxYegQDRm7EL:4:LjgpST2rjsoxYegQDRm7EL:3:CL:17:tag:CL_ACCUM:1', '2'): {
'busId': '11121398',
'effectiveDate': '2010-10-10',
'endDate': '',
'id': '1',
'jurisdictionId': '1',
'legalName': 'The Original House of Pies',
'orgTypeId': '2'},
('LjgpST2rjsoxYegQDRm7EL:4:LjgpST2rjsoxYegQDRm7EL:3:CL:17:tag:CL_ACCUM:1', '3'): {
'busId': '11133333',
'effectiveDate': '2011-10-01',
'endDate': '',
'id': '2',
'jurisdictionId': '1',
'legalName': 'Planet Cake',
'orgTypeId': '1'}
}
"""
rv = {}
for brief in iter_briefs(briefs):
cred_info = brief.get('cred_info', {}) or brief # briefs could be cred-infos or cred-briefs
(rr_id, cr_id) = (cred_info['rev_reg_id'], cred_info['cred_rev_id'])
if (rr_id, cr_id) in rv or rr_id is None or cr_id is None:
continue
if not filt:
rv[(rr_id, cr_id)] = cred_info['attrs']
continue
if ({attr: str(filt[attr]) for attr in filt}.items() <= cred_info['attrs'].items()):
rv[(rr_id, cr_id)] = cred_info['attrs']
return rv | python | def revoc_info(briefs: Union[dict, Sequence[dict]], filt: dict = None) -> dict:
"""
Given a cred-brief, cred-info or sequence of either, return a dict mapping pairs
(revocation registry identifier, credential revocation identifier)
to attribute name: (raw) value dicts.
If the caller includes a filter of attribute:value pairs, retain only matching attributes.
:param briefs: cred-brief/cred-info, or sequence thereof
:param filt: dict mapping attributes to values of interest; e.g.,
::
{
'legalName': 'Flan Nebula',
'effectiveDate': '2018-01-01',
'endDate': None
}
:return: dict mapping (rev_reg_id, cred_rev_id) pairs to (raw) attributes; e.g.,
::
{
('LjgpST2rjsoxYegQDRm7EL:4:LjgpST2rjsoxYegQDRm7EL:3:CL:17:tag:CL_ACCUM:1', '2'): {
'busId': '11121398',
'effectiveDate': '2010-10-10',
'endDate': '',
'id': '1',
'jurisdictionId': '1',
'legalName': 'The Original House of Pies',
'orgTypeId': '2'},
('LjgpST2rjsoxYegQDRm7EL:4:LjgpST2rjsoxYegQDRm7EL:3:CL:17:tag:CL_ACCUM:1', '3'): {
'busId': '11133333',
'effectiveDate': '2011-10-01',
'endDate': '',
'id': '2',
'jurisdictionId': '1',
'legalName': 'Planet Cake',
'orgTypeId': '1'}
}
"""
rv = {}
for brief in iter_briefs(briefs):
cred_info = brief.get('cred_info', {}) or brief # briefs could be cred-infos or cred-briefs
(rr_id, cr_id) = (cred_info['rev_reg_id'], cred_info['cred_rev_id'])
if (rr_id, cr_id) in rv or rr_id is None or cr_id is None:
continue
if not filt:
rv[(rr_id, cr_id)] = cred_info['attrs']
continue
if ({attr: str(filt[attr]) for attr in filt}.items() <= cred_info['attrs'].items()):
rv[(rr_id, cr_id)] = cred_info['attrs']
return rv | Given a cred-brief, cred-info or sequence of either, return a dict mapping pairs
(revocation registry identifier, credential revocation identifier)
to attribute name: (raw) value dicts.
If the caller includes a filter of attribute:value pairs, retain only matching attributes.
:param briefs: cred-brief/cred-info, or sequence thereof
:param filt: dict mapping attributes to values of interest; e.g.,
::
{
'legalName': 'Flan Nebula',
'effectiveDate': '2018-01-01',
'endDate': None
}
:return: dict mapping (rev_reg_id, cred_rev_id) pairs to (raw) attributes; e.g.,
::
{
('LjgpST2rjsoxYegQDRm7EL:4:LjgpST2rjsoxYegQDRm7EL:3:CL:17:tag:CL_ACCUM:1', '2'): {
'busId': '11121398',
'effectiveDate': '2010-10-10',
'endDate': '',
'id': '1',
'jurisdictionId': '1',
'legalName': 'The Original House of Pies',
'orgTypeId': '2'},
('LjgpST2rjsoxYegQDRm7EL:4:LjgpST2rjsoxYegQDRm7EL:3:CL:17:tag:CL_ACCUM:1', '3'): {
'busId': '11133333',
'effectiveDate': '2011-10-01',
'endDate': '',
'id': '2',
'jurisdictionId': '1',
'legalName': 'Planet Cake',
'orgTypeId': '1'}
} | https://github.com/PSPC-SPAC-buyandsell/von_anchor/blob/78ac1de67be42a676274f4bf71fe12f66e72f309/von_anchor/util.py#L850-L906 |
PSPC-SPAC-buyandsell/von_anchor | von_anchor/util.py | revealed_attrs | def revealed_attrs(proof: dict) -> dict:
"""
Fetch revealed attributes from input proof and return dict mapping credential definition identifiers
to dicts, each dict mapping attribute names to (raw) values, for processing in further creds downstream.
:param proof: indy-sdk proof as dict
:return: dict mapping cred-ids to dicts, each mapping revealed attribute names to (raw) values
"""
rv = {}
for sub_index in range(len(proof['identifiers'])):
cd_id = proof['identifiers'][sub_index]['cred_def_id']
rv[cd_id] = ({ # uses von_anchor convention for uuid (referent) construction: will break on foreign anchor's
'_'.join(uuid.split('_')[1:-1]): proof['requested_proof']['revealed_attrs'][uuid]['raw']
for uuid in proof['requested_proof']['revealed_attrs']
if proof['requested_proof']['revealed_attrs'][uuid]['sub_proof_index'] == sub_index})
return rv | python | def revealed_attrs(proof: dict) -> dict:
"""
Fetch revealed attributes from input proof and return dict mapping credential definition identifiers
to dicts, each dict mapping attribute names to (raw) values, for processing in further creds downstream.
:param proof: indy-sdk proof as dict
:return: dict mapping cred-ids to dicts, each mapping revealed attribute names to (raw) values
"""
rv = {}
for sub_index in range(len(proof['identifiers'])):
cd_id = proof['identifiers'][sub_index]['cred_def_id']
rv[cd_id] = ({ # uses von_anchor convention for uuid (referent) construction: will break on foreign anchor's
'_'.join(uuid.split('_')[1:-1]): proof['requested_proof']['revealed_attrs'][uuid]['raw']
for uuid in proof['requested_proof']['revealed_attrs']
if proof['requested_proof']['revealed_attrs'][uuid]['sub_proof_index'] == sub_index})
return rv | Fetch revealed attributes from input proof and return dict mapping credential definition identifiers
to dicts, each dict mapping attribute names to (raw) values, for processing in further creds downstream.
:param proof: indy-sdk proof as dict
:return: dict mapping cred-ids to dicts, each mapping revealed attribute names to (raw) values | https://github.com/PSPC-SPAC-buyandsell/von_anchor/blob/78ac1de67be42a676274f4bf71fe12f66e72f309/von_anchor/util.py#L909-L926 |
PSPC-SPAC-buyandsell/von_anchor | von_anchor/anchor/verifier.py | Verifier.config | def config(self, value: dict) -> None:
"""
Set configuration dict
:param value: configuration dict
"""
self._config = value or {}
validate_config('verifier', self._config) | python | def config(self, value: dict) -> None:
"""
Set configuration dict
:param value: configuration dict
"""
self._config = value or {}
validate_config('verifier', self._config) | Set configuration dict
:param value: configuration dict | https://github.com/PSPC-SPAC-buyandsell/von_anchor/blob/78ac1de67be42a676274f4bf71fe12f66e72f309/von_anchor/anchor/verifier.py#L129-L137 |
PSPC-SPAC-buyandsell/von_anchor | von_anchor/anchor/verifier.py | Verifier._build_rr_state_json | async def _build_rr_state_json(self, rr_id: str, timestamp: int) -> (str, int):
"""
Build rev reg state json at a given requested timestamp.
Return rev reg state json and its transaction time on the distributed ledger,
with upper bound at input timestamp of interest.
Raise AbsentRevReg if no revocation registry exists on input rev reg id,
or BadRevStateTime if requested timestamp predates revocation registry creation.
:param rr_id: rev reg id
:param timestamp: timestamp of interest (epoch seconds)
:return: rev reg state json and ledger timestamp (epoch seconds)
"""
LOGGER.debug('_Verifier._build_rr_state_json >>> rr_id: %s, timestamp: %s', rr_id, timestamp)
if not ok_rev_reg_id(rr_id):
LOGGER.debug('Verifier._build_rr_state_json <!< Bad rev reg id %s', rr_id)
raise BadIdentifier('Bad rev reg id {}'.format(rr_id))
rr_json = None
ledger_timestamp = None
get_rr_req_json = await ledger.build_get_revoc_reg_request(self.did, rr_id, timestamp)
resp_json = await self._submit(get_rr_req_json)
resp = json.loads(resp_json)
if resp.get('result', {}).get('data', None) and resp['result']['data'].get('value', None):
# timestamp at or beyond rev reg creation, carry on
try:
(_, rr_json, ledger_timestamp) = await ledger.parse_get_revoc_reg_response(resp_json)
except IndyError: # ledger replied, but there is no such rev reg available
LOGGER.debug('Verifier._build_rr_state_json <!< no rev reg exists on %s', rr_id)
raise AbsentRevReg('No rev reg exists on {}'.format(rr_id))
else:
LOGGER.debug(
'_Verifier._build_rr_state_json <!< Rev reg %s created after asked-for time %s',
rr_id,
timestamp)
raise BadRevStateTime('Rev reg {} created after asked-for time {}'.format(rr_id, timestamp))
rv = (rr_json, ledger_timestamp)
LOGGER.debug('_Verifier._build_rr_state_json <<< %s', rv)
return rv | python | async def _build_rr_state_json(self, rr_id: str, timestamp: int) -> (str, int):
"""
Build rev reg state json at a given requested timestamp.
Return rev reg state json and its transaction time on the distributed ledger,
with upper bound at input timestamp of interest.
Raise AbsentRevReg if no revocation registry exists on input rev reg id,
or BadRevStateTime if requested timestamp predates revocation registry creation.
:param rr_id: rev reg id
:param timestamp: timestamp of interest (epoch seconds)
:return: rev reg state json and ledger timestamp (epoch seconds)
"""
LOGGER.debug('_Verifier._build_rr_state_json >>> rr_id: %s, timestamp: %s', rr_id, timestamp)
if not ok_rev_reg_id(rr_id):
LOGGER.debug('Verifier._build_rr_state_json <!< Bad rev reg id %s', rr_id)
raise BadIdentifier('Bad rev reg id {}'.format(rr_id))
rr_json = None
ledger_timestamp = None
get_rr_req_json = await ledger.build_get_revoc_reg_request(self.did, rr_id, timestamp)
resp_json = await self._submit(get_rr_req_json)
resp = json.loads(resp_json)
if resp.get('result', {}).get('data', None) and resp['result']['data'].get('value', None):
# timestamp at or beyond rev reg creation, carry on
try:
(_, rr_json, ledger_timestamp) = await ledger.parse_get_revoc_reg_response(resp_json)
except IndyError: # ledger replied, but there is no such rev reg available
LOGGER.debug('Verifier._build_rr_state_json <!< no rev reg exists on %s', rr_id)
raise AbsentRevReg('No rev reg exists on {}'.format(rr_id))
else:
LOGGER.debug(
'_Verifier._build_rr_state_json <!< Rev reg %s created after asked-for time %s',
rr_id,
timestamp)
raise BadRevStateTime('Rev reg {} created after asked-for time {}'.format(rr_id, timestamp))
rv = (rr_json, ledger_timestamp)
LOGGER.debug('_Verifier._build_rr_state_json <<< %s', rv)
return rv | Build rev reg state json at a given requested timestamp.
Return rev reg state json and its transaction time on the distributed ledger,
with upper bound at input timestamp of interest.
Raise AbsentRevReg if no revocation registry exists on input rev reg id,
or BadRevStateTime if requested timestamp predates revocation registry creation.
:param rr_id: rev reg id
:param timestamp: timestamp of interest (epoch seconds)
:return: rev reg state json and ledger timestamp (epoch seconds) | https://github.com/PSPC-SPAC-buyandsell/von_anchor/blob/78ac1de67be42a676274f4bf71fe12f66e72f309/von_anchor/anchor/verifier.py#L149-L192 |
PSPC-SPAC-buyandsell/von_anchor | von_anchor/anchor/verifier.py | Verifier.build_proof_req_json | async def build_proof_req_json(self, cd_id2spec: dict) -> str:
"""
Build and return indy-sdk proof request for input attributes and non-revocation intervals by cred def id.
:param cd_id2spec: dict mapping cred def ids to:
- (optionally) 'attrs': lists of names of attributes of interest (omit for all, empty list or None for none)
- (optionally) '>=': (pred) inclusive int lower-bounds of interest (omit, empty list, or None for none)
- (optionally) '>': (pred) exclusive int lower-bounds of interest (omit, empty list, or None for none)
- (optionally) '<=': (pred) inclusive int upper-bounds of interest (omit, empty list, or None for none)
- (optionally) '<': (pred) exclusive int upper-bounds of interest (omit, empty list, or None for none)
- (optionally), 'interval': either
- (2-tuple) pair of epoch second counts marking 'from' and 'to' timestamps, or
- | single epoch second count to set 'from' and 'to' the same; default
| (now, now) for cred defs supporting revocation or None otherwise; e.g.,
::
{
'Vx4E82R17q...:3:CL:16:tag': {
'attrs': [ # request attrs 'name' and 'favouriteDrink' from this cred def's schema
'name',
'favouriteDrink'
],
'>=': { # request predicate score>=80 from this cred def
'score': 80
}
'<=': { # request ranking <=10 from this cred def
'ranking': 10
}
'interval': 1528116008 # same instant for all attrs and preds of corresponding schema
},
'R17v42T4pk...:3:CL:19:tag': None, # request all attrs, no preds, default intervals on all attrs
'e3vc5K168n...:3:CL:23:tag': {}, # request all attrs, no preds, default intervals on all attrs
'Z9ccax812j...:3:CL:27:tag': { # request all attrs, no preds, this interval on all attrs
'interval': (1528112408, 1528116008)
},
'9cHbp54C8n...:3:CL:37:tag': { # request no attrs and some predicates; specify interval
'attrs': [], # or equivalently, 'attrs': None
'>=': {
'employees': '50' # nicety: implementation converts to int for caller
},
'>=': {
'revenue': '10000000' # nicety: implementation converts to int for caller
'ebidta': 0
}
'interval': (1528029608, 1528116008)
},
'6caBcmLi33...:3:CL:41:tag': { # all attrs, one pred, default intervals to now on attrs & pred
'>': {
'regEpoch': 1514782800
}
},
...
}
:return: indy-sdk proof request json
"""
LOGGER.debug('Verifier.build_proof_req_json >>> cd_id2spec: %s', cd_id2spec)
cd_id2schema = {}
now = int(time())
rv = {
'nonce': str(int(time())),
'name': 'proof_req',
'version': '0.0',
'requested_attributes': {},
'requested_predicates': {}
}
for cd_id in cd_id2spec:
if not ok_cred_def_id(cd_id):
LOGGER.debug('Verifier.build_proof_req_json <!< Bad cred def id %s', cd_id)
raise BadIdentifier('Bad cred def id {}'.format(cd_id))
interval = None
cred_def = json.loads(await self.get_cred_def(cd_id))
seq_no = cred_def_id2seq_no(cd_id)
cd_id2schema[cd_id] = json.loads(await self.get_schema(seq_no))
if 'revocation' in cred_def['value']:
fro_to = cd_id2spec[cd_id].get('interval', (now, now)) if cd_id2spec[cd_id] else (now, now)
interval = {
'from': fro_to if isinstance(fro_to, int) else min(fro_to),
'to': fro_to if isinstance(fro_to, int) else max(fro_to)
}
for attr in (cd_id2spec[cd_id].get('attrs', cd_id2schema[cd_id]['attrNames']) or []
if cd_id2spec[cd_id] else cd_id2schema[cd_id]['attrNames']):
attr_uuid = '{}_{}_uuid'.format(seq_no, canon(attr))
rv['requested_attributes'][attr_uuid] = {
'name': attr,
'restrictions': [{
'cred_def_id': cd_id
}]
}
if interval:
rv['requested_attributes'][attr_uuid]['non_revoked'] = interval
for pred in Predicate:
for attr in (cd_id2spec[cd_id].get(pred.value.math, {}) or {} if cd_id2spec[cd_id] else {}):
pred_uuid = '{}_{}_{}_uuid'.format(seq_no, canon(attr), pred.value.fortran)
try:
rv['requested_predicates'][pred_uuid] = {
'name': attr,
'p_type': pred.value.math,
'p_value': Predicate.to_int(cd_id2spec[cd_id][pred.value.math][attr]),
'restrictions': [{
'cred_def_id': cd_id
}]
}
except ValueError:
LOGGER.info(
'cannot build %s predicate on non-int bound %s for %s',
pred.value.fortran,
cd_id2spec[cd_id][pred.value.math][attr],
attr)
continue # int conversion failed - reject candidate
if interval:
rv['requested_predicates'][pred_uuid]['non_revoked'] = interval
LOGGER.debug('Verifier.build_proof_req_json <<< %s', json.dumps(rv))
return json.dumps(rv) | python | async def build_proof_req_json(self, cd_id2spec: dict) -> str:
"""
Build and return indy-sdk proof request for input attributes and non-revocation intervals by cred def id.
:param cd_id2spec: dict mapping cred def ids to:
- (optionally) 'attrs': lists of names of attributes of interest (omit for all, empty list or None for none)
- (optionally) '>=': (pred) inclusive int lower-bounds of interest (omit, empty list, or None for none)
- (optionally) '>': (pred) exclusive int lower-bounds of interest (omit, empty list, or None for none)
- (optionally) '<=': (pred) inclusive int upper-bounds of interest (omit, empty list, or None for none)
- (optionally) '<': (pred) exclusive int upper-bounds of interest (omit, empty list, or None for none)
- (optionally), 'interval': either
- (2-tuple) pair of epoch second counts marking 'from' and 'to' timestamps, or
- | single epoch second count to set 'from' and 'to' the same; default
| (now, now) for cred defs supporting revocation or None otherwise; e.g.,
::
{
'Vx4E82R17q...:3:CL:16:tag': {
'attrs': [ # request attrs 'name' and 'favouriteDrink' from this cred def's schema
'name',
'favouriteDrink'
],
'>=': { # request predicate score>=80 from this cred def
'score': 80
}
'<=': { # request ranking <=10 from this cred def
'ranking': 10
}
'interval': 1528116008 # same instant for all attrs and preds of corresponding schema
},
'R17v42T4pk...:3:CL:19:tag': None, # request all attrs, no preds, default intervals on all attrs
'e3vc5K168n...:3:CL:23:tag': {}, # request all attrs, no preds, default intervals on all attrs
'Z9ccax812j...:3:CL:27:tag': { # request all attrs, no preds, this interval on all attrs
'interval': (1528112408, 1528116008)
},
'9cHbp54C8n...:3:CL:37:tag': { # request no attrs and some predicates; specify interval
'attrs': [], # or equivalently, 'attrs': None
'>=': {
'employees': '50' # nicety: implementation converts to int for caller
},
'>=': {
'revenue': '10000000' # nicety: implementation converts to int for caller
'ebidta': 0
}
'interval': (1528029608, 1528116008)
},
'6caBcmLi33...:3:CL:41:tag': { # all attrs, one pred, default intervals to now on attrs & pred
'>': {
'regEpoch': 1514782800
}
},
...
}
:return: indy-sdk proof request json
"""
LOGGER.debug('Verifier.build_proof_req_json >>> cd_id2spec: %s', cd_id2spec)
cd_id2schema = {}
now = int(time())
rv = {
'nonce': str(int(time())),
'name': 'proof_req',
'version': '0.0',
'requested_attributes': {},
'requested_predicates': {}
}
for cd_id in cd_id2spec:
if not ok_cred_def_id(cd_id):
LOGGER.debug('Verifier.build_proof_req_json <!< Bad cred def id %s', cd_id)
raise BadIdentifier('Bad cred def id {}'.format(cd_id))
interval = None
cred_def = json.loads(await self.get_cred_def(cd_id))
seq_no = cred_def_id2seq_no(cd_id)
cd_id2schema[cd_id] = json.loads(await self.get_schema(seq_no))
if 'revocation' in cred_def['value']:
fro_to = cd_id2spec[cd_id].get('interval', (now, now)) if cd_id2spec[cd_id] else (now, now)
interval = {
'from': fro_to if isinstance(fro_to, int) else min(fro_to),
'to': fro_to if isinstance(fro_to, int) else max(fro_to)
}
for attr in (cd_id2spec[cd_id].get('attrs', cd_id2schema[cd_id]['attrNames']) or []
if cd_id2spec[cd_id] else cd_id2schema[cd_id]['attrNames']):
attr_uuid = '{}_{}_uuid'.format(seq_no, canon(attr))
rv['requested_attributes'][attr_uuid] = {
'name': attr,
'restrictions': [{
'cred_def_id': cd_id
}]
}
if interval:
rv['requested_attributes'][attr_uuid]['non_revoked'] = interval
for pred in Predicate:
for attr in (cd_id2spec[cd_id].get(pred.value.math, {}) or {} if cd_id2spec[cd_id] else {}):
pred_uuid = '{}_{}_{}_uuid'.format(seq_no, canon(attr), pred.value.fortran)
try:
rv['requested_predicates'][pred_uuid] = {
'name': attr,
'p_type': pred.value.math,
'p_value': Predicate.to_int(cd_id2spec[cd_id][pred.value.math][attr]),
'restrictions': [{
'cred_def_id': cd_id
}]
}
except ValueError:
LOGGER.info(
'cannot build %s predicate on non-int bound %s for %s',
pred.value.fortran,
cd_id2spec[cd_id][pred.value.math][attr],
attr)
continue # int conversion failed - reject candidate
if interval:
rv['requested_predicates'][pred_uuid]['non_revoked'] = interval
LOGGER.debug('Verifier.build_proof_req_json <<< %s', json.dumps(rv))
return json.dumps(rv) | Build and return indy-sdk proof request for input attributes and non-revocation intervals by cred def id.
:param cd_id2spec: dict mapping cred def ids to:
- (optionally) 'attrs': lists of names of attributes of interest (omit for all, empty list or None for none)
- (optionally) '>=': (pred) inclusive int lower-bounds of interest (omit, empty list, or None for none)
- (optionally) '>': (pred) exclusive int lower-bounds of interest (omit, empty list, or None for none)
- (optionally) '<=': (pred) inclusive int upper-bounds of interest (omit, empty list, or None for none)
- (optionally) '<': (pred) exclusive int upper-bounds of interest (omit, empty list, or None for none)
- (optionally), 'interval': either
- (2-tuple) pair of epoch second counts marking 'from' and 'to' timestamps, or
- | single epoch second count to set 'from' and 'to' the same; default
| (now, now) for cred defs supporting revocation or None otherwise; e.g.,
::
{
'Vx4E82R17q...:3:CL:16:tag': {
'attrs': [ # request attrs 'name' and 'favouriteDrink' from this cred def's schema
'name',
'favouriteDrink'
],
'>=': { # request predicate score>=80 from this cred def
'score': 80
}
'<=': { # request ranking <=10 from this cred def
'ranking': 10
}
'interval': 1528116008 # same instant for all attrs and preds of corresponding schema
},
'R17v42T4pk...:3:CL:19:tag': None, # request all attrs, no preds, default intervals on all attrs
'e3vc5K168n...:3:CL:23:tag': {}, # request all attrs, no preds, default intervals on all attrs
'Z9ccax812j...:3:CL:27:tag': { # request all attrs, no preds, this interval on all attrs
'interval': (1528112408, 1528116008)
},
'9cHbp54C8n...:3:CL:37:tag': { # request no attrs and some predicates; specify interval
'attrs': [], # or equivalently, 'attrs': None
'>=': {
'employees': '50' # nicety: implementation converts to int for caller
},
'>=': {
'revenue': '10000000' # nicety: implementation converts to int for caller
'ebidta': 0
}
'interval': (1528029608, 1528116008)
},
'6caBcmLi33...:3:CL:41:tag': { # all attrs, one pred, default intervals to now on attrs & pred
'>': {
'regEpoch': 1514782800
}
},
...
}
:return: indy-sdk proof request json | https://github.com/PSPC-SPAC-buyandsell/von_anchor/blob/78ac1de67be42a676274f4bf71fe12f66e72f309/von_anchor/anchor/verifier.py#L194-L317 |
PSPC-SPAC-buyandsell/von_anchor | von_anchor/anchor/verifier.py | Verifier.load_cache_for_verification | async def load_cache_for_verification(self, archive: bool = False) -> int:
"""
Load schema, cred def, revocation caches; optionally archive enough to go
offline and be able to verify proof on content marked of interest in configuration.
Return timestamp (epoch seconds) of cache load event, also used as subdirectory
for cache archives.
:param archive: True to archive now or False to demur (subclasses may still
need to augment archivable caches further)
:return: cache load event timestamp (epoch seconds)
"""
LOGGER.debug('Verifier.load_cache_for_verification >>> archive: %s', archive)
rv = int(time())
for s_id in self.config.get('archive-verifier-caches-on-close', {}).get('schema_id', {}):
if ok_schema_id(s_id):
with SCHEMA_CACHE.lock:
await self.get_schema(s_id)
else:
LOGGER.info('Not archiving schema for specified bad id %s', s_id)
for cd_id in self.config.get('archive-verifier-caches-on-close', {}).get('cred_def_id', {}):
if ok_cred_def_id(cd_id):
with CRED_DEF_CACHE.lock:
await self.get_cred_def(cd_id)
else:
LOGGER.info('Not archiving cred def for specified bad id %s', cd_id)
for rr_id in self.config.get('archive-verifier-caches-on-close', {}).get('rev_reg_id', {}):
if ok_rev_reg_id(rr_id):
await self.get_rev_reg_def(rr_id)
with REVO_CACHE.lock:
revo_cache_entry = REVO_CACHE.get(rr_id, None)
if revo_cache_entry:
try:
await revo_cache_entry.get_state_json(self._build_rr_state_json, rv, rv)
except ClosedPool:
LOGGER.warning(
'Verifier %s is offline from pool %s, cannot update revo cache reg state for %s to %s',
self.name,
self.pool.name,
rr_id,
rv)
except AbsentPool:
LOGGER.warning(
'Verifier %s has no pool, cannot update revo cache reg state for %s to %s',
self.name,
rr_id,
rv)
else:
LOGGER.info('Not archiving rev reg for specified bad id %s', rr_id)
if archive:
ArchivableCaches.archive(self.dir_cache)
LOGGER.debug('Verifier.load_cache_for_verification <<< %s', rv)
return rv | python | async def load_cache_for_verification(self, archive: bool = False) -> int:
"""
Load schema, cred def, revocation caches; optionally archive enough to go
offline and be able to verify proof on content marked of interest in configuration.
Return timestamp (epoch seconds) of cache load event, also used as subdirectory
for cache archives.
:param archive: True to archive now or False to demur (subclasses may still
need to augment archivable caches further)
:return: cache load event timestamp (epoch seconds)
"""
LOGGER.debug('Verifier.load_cache_for_verification >>> archive: %s', archive)
rv = int(time())
for s_id in self.config.get('archive-verifier-caches-on-close', {}).get('schema_id', {}):
if ok_schema_id(s_id):
with SCHEMA_CACHE.lock:
await self.get_schema(s_id)
else:
LOGGER.info('Not archiving schema for specified bad id %s', s_id)
for cd_id in self.config.get('archive-verifier-caches-on-close', {}).get('cred_def_id', {}):
if ok_cred_def_id(cd_id):
with CRED_DEF_CACHE.lock:
await self.get_cred_def(cd_id)
else:
LOGGER.info('Not archiving cred def for specified bad id %s', cd_id)
for rr_id in self.config.get('archive-verifier-caches-on-close', {}).get('rev_reg_id', {}):
if ok_rev_reg_id(rr_id):
await self.get_rev_reg_def(rr_id)
with REVO_CACHE.lock:
revo_cache_entry = REVO_CACHE.get(rr_id, None)
if revo_cache_entry:
try:
await revo_cache_entry.get_state_json(self._build_rr_state_json, rv, rv)
except ClosedPool:
LOGGER.warning(
'Verifier %s is offline from pool %s, cannot update revo cache reg state for %s to %s',
self.name,
self.pool.name,
rr_id,
rv)
except AbsentPool:
LOGGER.warning(
'Verifier %s has no pool, cannot update revo cache reg state for %s to %s',
self.name,
rr_id,
rv)
else:
LOGGER.info('Not archiving rev reg for specified bad id %s', rr_id)
if archive:
ArchivableCaches.archive(self.dir_cache)
LOGGER.debug('Verifier.load_cache_for_verification <<< %s', rv)
return rv | Load schema, cred def, revocation caches; optionally archive enough to go
offline and be able to verify proof on content marked of interest in configuration.
Return timestamp (epoch seconds) of cache load event, also used as subdirectory
for cache archives.
:param archive: True to archive now or False to demur (subclasses may still
need to augment archivable caches further)
:return: cache load event timestamp (epoch seconds) | https://github.com/PSPC-SPAC-buyandsell/von_anchor/blob/78ac1de67be42a676274f4bf71fe12f66e72f309/von_anchor/anchor/verifier.py#L319-L374 |
PSPC-SPAC-buyandsell/von_anchor | von_anchor/anchor/verifier.py | Verifier.open | async def open(self) -> 'Verifier':
"""
Explicit entry. Perform ancestor opening operations,
then parse cache from archive if so configured, and
synchronize revocation registry to tails tree content.
:return: current object
"""
LOGGER.debug('Verifier.open >>>')
await super().open()
if self.config.get('parse-caches-on-open', False):
ArchivableCaches.parse(self.dir_cache)
LOGGER.debug('Verifier.open <<<')
return self | python | async def open(self) -> 'Verifier':
"""
Explicit entry. Perform ancestor opening operations,
then parse cache from archive if so configured, and
synchronize revocation registry to tails tree content.
:return: current object
"""
LOGGER.debug('Verifier.open >>>')
await super().open()
if self.config.get('parse-caches-on-open', False):
ArchivableCaches.parse(self.dir_cache)
LOGGER.debug('Verifier.open <<<')
return self | Explicit entry. Perform ancestor opening operations,
then parse cache from archive if so configured, and
synchronize revocation registry to tails tree content.
:return: current object | https://github.com/PSPC-SPAC-buyandsell/von_anchor/blob/78ac1de67be42a676274f4bf71fe12f66e72f309/von_anchor/anchor/verifier.py#L376-L392 |
PSPC-SPAC-buyandsell/von_anchor | von_anchor/anchor/verifier.py | Verifier.close | async def close(self) -> None:
"""
Explicit exit. If so configured, populate cache to prove for any creds on schemata,
cred defs, and rev regs marked of interest in configuration at initialization,
archive cache, and purge prior cache archives.
:return: current object
"""
LOGGER.debug('Verifier.close >>>')
if self.config.get('archive-verifier-caches-on-close', {}):
await self.load_cache_for_verification(True)
ArchivableCaches.purge_archives(self.dir_cache, True)
await BaseAnchor.close(self)
LOGGER.debug('Verifier.close <<<') | python | async def close(self) -> None:
"""
Explicit exit. If so configured, populate cache to prove for any creds on schemata,
cred defs, and rev regs marked of interest in configuration at initialization,
archive cache, and purge prior cache archives.
:return: current object
"""
LOGGER.debug('Verifier.close >>>')
if self.config.get('archive-verifier-caches-on-close', {}):
await self.load_cache_for_verification(True)
ArchivableCaches.purge_archives(self.dir_cache, True)
await BaseAnchor.close(self)
LOGGER.debug('Verifier.close <<<') | Explicit exit. If so configured, populate cache to prove for any creds on schemata,
cred defs, and rev regs marked of interest in configuration at initialization,
archive cache, and purge prior cache archives.
:return: current object | https://github.com/PSPC-SPAC-buyandsell/von_anchor/blob/78ac1de67be42a676274f4bf71fe12f66e72f309/von_anchor/anchor/verifier.py#L394-L411 |
PSPC-SPAC-buyandsell/von_anchor | von_anchor/anchor/verifier.py | Verifier.check_encoding | def check_encoding(proof_req: dict, proof: dict) -> bool:
"""
Return whether the proof's raw values correspond to their encodings
as cross-referenced against proof request.
:param proof request: proof request
:param proof: corresponding proof to check
:return: True if OK, False for encoding mismatch
"""
LOGGER.debug('Verifier.check_encoding <<< proof_req: %s, proof: %s', proof_req, proof)
cd_id2proof_id = {} # invert proof['identifiers'] per cd_id
p_preds = {} # cd_id and attr to bound
for idx in range(len(proof['identifiers'])):
cd_id = proof['identifiers'][idx]['cred_def_id']
cd_id2proof_id[cd_id] = idx # since at most 1 cred per cred def
p_preds[cd_id] = {
ge_proof['predicate']['attr_name']: ge_proof['predicate']['value']
for ge_proof in proof['proof']['proofs'][idx]['primary_proof']['ge_proofs']
}
for (uuid, req_attr) in proof_req['requested_attributes'].items(): # proof req xref proof per revealed attr
canon_attr = canon(req_attr['name'])
proof_ident_idx = cd_id2proof_id[req_attr['restrictions'][0]['cred_def_id']]
enco = proof['proof']['proofs'][proof_ident_idx]['primary_proof']['eq_proof']['revealed_attrs'].get(
canon_attr)
if not enco:
continue # requested but declined from revelation in proof: must appear in a predicate
if enco != proof['requested_proof']['revealed_attrs'][uuid]['encoded']:
LOGGER.debug('Verifier.check_proof_encoding <<< False')
return False
if enco != encode(proof['requested_proof']['revealed_attrs'][uuid]['raw']):
LOGGER.debug('Verifier.check_proof_encoding <<< False')
return False
for (uuid, req_pred) in proof_req['requested_predicates'].items(): # proof req xref proof per pred
canon_attr = canon(req_pred['name'])
if p_preds[req_pred['restrictions'][0]['cred_def_id']].get(canon_attr) != req_pred['p_value']:
LOGGER.debug('Verifier.check_proof_encoding <<< False')
return False
LOGGER.debug('Verifier.check_proof_encoding <<< True')
return True | python | def check_encoding(proof_req: dict, proof: dict) -> bool:
"""
Return whether the proof's raw values correspond to their encodings
as cross-referenced against proof request.
:param proof request: proof request
:param proof: corresponding proof to check
:return: True if OK, False for encoding mismatch
"""
LOGGER.debug('Verifier.check_encoding <<< proof_req: %s, proof: %s', proof_req, proof)
cd_id2proof_id = {} # invert proof['identifiers'] per cd_id
p_preds = {} # cd_id and attr to bound
for idx in range(len(proof['identifiers'])):
cd_id = proof['identifiers'][idx]['cred_def_id']
cd_id2proof_id[cd_id] = idx # since at most 1 cred per cred def
p_preds[cd_id] = {
ge_proof['predicate']['attr_name']: ge_proof['predicate']['value']
for ge_proof in proof['proof']['proofs'][idx]['primary_proof']['ge_proofs']
}
for (uuid, req_attr) in proof_req['requested_attributes'].items(): # proof req xref proof per revealed attr
canon_attr = canon(req_attr['name'])
proof_ident_idx = cd_id2proof_id[req_attr['restrictions'][0]['cred_def_id']]
enco = proof['proof']['proofs'][proof_ident_idx]['primary_proof']['eq_proof']['revealed_attrs'].get(
canon_attr)
if not enco:
continue # requested but declined from revelation in proof: must appear in a predicate
if enco != proof['requested_proof']['revealed_attrs'][uuid]['encoded']:
LOGGER.debug('Verifier.check_proof_encoding <<< False')
return False
if enco != encode(proof['requested_proof']['revealed_attrs'][uuid]['raw']):
LOGGER.debug('Verifier.check_proof_encoding <<< False')
return False
for (uuid, req_pred) in proof_req['requested_predicates'].items(): # proof req xref proof per pred
canon_attr = canon(req_pred['name'])
if p_preds[req_pred['restrictions'][0]['cred_def_id']].get(canon_attr) != req_pred['p_value']:
LOGGER.debug('Verifier.check_proof_encoding <<< False')
return False
LOGGER.debug('Verifier.check_proof_encoding <<< True')
return True | Return whether the proof's raw values correspond to their encodings
as cross-referenced against proof request.
:param proof request: proof request
:param proof: corresponding proof to check
:return: True if OK, False for encoding mismatch | https://github.com/PSPC-SPAC-buyandsell/von_anchor/blob/78ac1de67be42a676274f4bf71fe12f66e72f309/von_anchor/anchor/verifier.py#L414-L457 |
PSPC-SPAC-buyandsell/von_anchor | von_anchor/anchor/verifier.py | Verifier.verify_proof | async def verify_proof(self, proof_req: dict, proof: dict) -> str:
"""
Verify proof as Verifier. Raise AbsentRevReg if a proof cites a revocation registry
that does not exist on the distributed ledger.
:param proof_req: proof request as Verifier creates, as per proof_req_json above
:param proof: proof as HolderProver creates
:return: json encoded True if proof is valid; False if not
"""
LOGGER.debug('Verifier.verify_proof >>> proof_req: %s, proof: %s', proof_req, proof)
if not Verifier.check_encoding(proof_req, proof):
LOGGER.info(
'Proof encoding does not cross-reference with proof request %s: failing verification',
proof_req.get('nonce', '(missing nonce)'))
LOGGER.debug('Verifier.verify_proof <<< "False"')
return json.dumps(False)
async def _set_schema(s_id: str) -> None:
nonlocal s_id2schema
if not ok_schema_id(s_id):
LOGGER.debug('Verifier.verify_proof <!< Bad schema id %s', s_id)
raise BadIdentifier('Bad schema id {}'.format(s_id))
if s_id not in s_id2schema:
schema = json.loads(await self.get_schema(s_id)) # add to cache en passant
if not schema:
LOGGER.debug(
'Verifier.verify_proof <!< absent schema %s, proof req may be for another ledger',
s_id)
raise AbsentSchema('Absent schema {}, proof req may be for another ledger'.format(s_id))
s_id2schema[s_id] = schema
async def _set_cred_def(cd_id: str) -> None:
nonlocal cd_id2cred_def
if not ok_cred_def_id(cd_id):
LOGGER.debug('Verifier.verify_proof <!< Bad cred def id %s', cd_id)
raise BadIdentifier('Bad cred def id {}'.format(cd_id))
if cd_id not in cd_id2cred_def:
cd_id2cred_def[cd_id] = json.loads(await self.get_cred_def(cd_id)) # add to cache en passant
async def _set_rev_reg_def(rr_id: str) -> bool:
"""
Return true to continue to timestamp setting, false to short-circuit
"""
nonlocal rr_id2rr_def
if not rr_id:
return False
if not ok_rev_reg_id(rr_id):
LOGGER.debug('Verifier.verify_proof <!< Bad rev reg id %s', rr_id)
raise BadIdentifier('Bad rev reg id {}'.format(rr_id))
if rr_id not in rr_id2rr_def:
rr_id2rr_def[rr_id] = json.loads(await self.get_rev_reg_def(rr_id))
return True
async def _set_timestamp(rr_id: str, timestamp: int) -> None:
nonlocal rr_id2rr
with REVO_CACHE.lock:
revo_cache_entry = REVO_CACHE.get(rr_id, None)
(rr_json, _) = await revo_cache_entry.get_state_json(self._build_rr_state_json, timestamp, timestamp)
if rr_id not in rr_id2rr:
rr_id2rr[rr_id] = {}
rr_id2rr[rr_id][timestamp] = json.loads(rr_json)
s_id2schema = {}
cd_id2cred_def = {}
rr_id2rr_def = {}
rr_id2rr = {}
proof_ids = proof['identifiers']
for proof_id in proof_ids:
await _set_schema(proof_id['schema_id'])
await _set_cred_def(proof_id['cred_def_id'])
rr_id = proof_id['rev_reg_id']
if await _set_rev_reg_def(rr_id):
await _set_timestamp(rr_id, proof_id['timestamp'])
rv = json.dumps(await anoncreds.verifier_verify_proof(
json.dumps(proof_req),
json.dumps(proof),
json.dumps(s_id2schema),
json.dumps(cd_id2cred_def),
json.dumps(rr_id2rr_def),
json.dumps(rr_id2rr)))
LOGGER.debug('Verifier.verify_proof <<< %s', rv)
return rv | python | async def verify_proof(self, proof_req: dict, proof: dict) -> str:
"""
Verify proof as Verifier. Raise AbsentRevReg if a proof cites a revocation registry
that does not exist on the distributed ledger.
:param proof_req: proof request as Verifier creates, as per proof_req_json above
:param proof: proof as HolderProver creates
:return: json encoded True if proof is valid; False if not
"""
LOGGER.debug('Verifier.verify_proof >>> proof_req: %s, proof: %s', proof_req, proof)
if not Verifier.check_encoding(proof_req, proof):
LOGGER.info(
'Proof encoding does not cross-reference with proof request %s: failing verification',
proof_req.get('nonce', '(missing nonce)'))
LOGGER.debug('Verifier.verify_proof <<< "False"')
return json.dumps(False)
async def _set_schema(s_id: str) -> None:
nonlocal s_id2schema
if not ok_schema_id(s_id):
LOGGER.debug('Verifier.verify_proof <!< Bad schema id %s', s_id)
raise BadIdentifier('Bad schema id {}'.format(s_id))
if s_id not in s_id2schema:
schema = json.loads(await self.get_schema(s_id)) # add to cache en passant
if not schema:
LOGGER.debug(
'Verifier.verify_proof <!< absent schema %s, proof req may be for another ledger',
s_id)
raise AbsentSchema('Absent schema {}, proof req may be for another ledger'.format(s_id))
s_id2schema[s_id] = schema
async def _set_cred_def(cd_id: str) -> None:
nonlocal cd_id2cred_def
if not ok_cred_def_id(cd_id):
LOGGER.debug('Verifier.verify_proof <!< Bad cred def id %s', cd_id)
raise BadIdentifier('Bad cred def id {}'.format(cd_id))
if cd_id not in cd_id2cred_def:
cd_id2cred_def[cd_id] = json.loads(await self.get_cred_def(cd_id)) # add to cache en passant
async def _set_rev_reg_def(rr_id: str) -> bool:
"""
Return true to continue to timestamp setting, false to short-circuit
"""
nonlocal rr_id2rr_def
if not rr_id:
return False
if not ok_rev_reg_id(rr_id):
LOGGER.debug('Verifier.verify_proof <!< Bad rev reg id %s', rr_id)
raise BadIdentifier('Bad rev reg id {}'.format(rr_id))
if rr_id not in rr_id2rr_def:
rr_id2rr_def[rr_id] = json.loads(await self.get_rev_reg_def(rr_id))
return True
async def _set_timestamp(rr_id: str, timestamp: int) -> None:
nonlocal rr_id2rr
with REVO_CACHE.lock:
revo_cache_entry = REVO_CACHE.get(rr_id, None)
(rr_json, _) = await revo_cache_entry.get_state_json(self._build_rr_state_json, timestamp, timestamp)
if rr_id not in rr_id2rr:
rr_id2rr[rr_id] = {}
rr_id2rr[rr_id][timestamp] = json.loads(rr_json)
s_id2schema = {}
cd_id2cred_def = {}
rr_id2rr_def = {}
rr_id2rr = {}
proof_ids = proof['identifiers']
for proof_id in proof_ids:
await _set_schema(proof_id['schema_id'])
await _set_cred_def(proof_id['cred_def_id'])
rr_id = proof_id['rev_reg_id']
if await _set_rev_reg_def(rr_id):
await _set_timestamp(rr_id, proof_id['timestamp'])
rv = json.dumps(await anoncreds.verifier_verify_proof(
json.dumps(proof_req),
json.dumps(proof),
json.dumps(s_id2schema),
json.dumps(cd_id2cred_def),
json.dumps(rr_id2rr_def),
json.dumps(rr_id2rr)))
LOGGER.debug('Verifier.verify_proof <<< %s', rv)
return rv | Verify proof as Verifier. Raise AbsentRevReg if a proof cites a revocation registry
that does not exist on the distributed ledger.
:param proof_req: proof request as Verifier creates, as per proof_req_json above
:param proof: proof as HolderProver creates
:return: json encoded True if proof is valid; False if not | https://github.com/PSPC-SPAC-buyandsell/von_anchor/blob/78ac1de67be42a676274f4bf71fe12f66e72f309/von_anchor/anchor/verifier.py#L459-L545 |
PSPC-SPAC-buyandsell/von_anchor | von_anchor/a2a/publickey.py | PublicKeyType.get | def get(value: str) -> 'Protocol':
"""
Return enum instance corresponding to input version value ('RsaVerificationKey2018' etc.)
"""
for pktype in PublicKeyType:
if value in (pktype.ver_type, pktype.authn_type):
return pktype
return None | python | def get(value: str) -> 'Protocol':
"""
Return enum instance corresponding to input version value ('RsaVerificationKey2018' etc.)
"""
for pktype in PublicKeyType:
if value in (pktype.ver_type, pktype.authn_type):
return pktype
return None | Return enum instance corresponding to input version value ('RsaVerificationKey2018' etc.) | https://github.com/PSPC-SPAC-buyandsell/von_anchor/blob/78ac1de67be42a676274f4bf71fe12f66e72f309/von_anchor/a2a/publickey.py#L46-L54 |
PSPC-SPAC-buyandsell/von_anchor | von_anchor/a2a/publickey.py | PublicKey.to_dict | def to_dict(self):
"""
Return dict representation of public key to embed in DID document.
"""
return {
'id': self.id,
'type': str(self.type.ver_type),
'controller': canon_ref(self.did, self.controller),
**self.type.specification(self.value)
} | python | def to_dict(self):
"""
Return dict representation of public key to embed in DID document.
"""
return {
'id': self.id,
'type': str(self.type.ver_type),
'controller': canon_ref(self.did, self.controller),
**self.type.specification(self.value)
} | Return dict representation of public key to embed in DID document. | https://github.com/PSPC-SPAC-buyandsell/von_anchor/blob/78ac1de67be42a676274f4bf71fe12f66e72f309/von_anchor/a2a/publickey.py#L195-L205 |
PSPC-SPAC-buyandsell/von_anchor | von_anchor/anchor/rrbuilder.py | main | async def main(wallet_name: str) -> None:
"""
Main line for revocation registry builder operating in external process on behalf of issuer agent.
:param wallet_name: wallet name - must match that of issuer with existing wallet
"""
logging.basicConfig(level=logging.WARN, format='%(levelname)-8s | %(name)-12s | %(message)s')
logging.getLogger('indy').setLevel(logging.ERROR)
path_start = join(RevRegBuilder.dir_tails_sentinel(wallet_name), '.start')
with open(path_start, 'r') as fh_start:
start_data = json.loads(fh_start.read())
remove(path_start)
logging.getLogger(__name__).setLevel(start_data['logging']['level'])
for path_log in start_data['logging']['paths']:
logging.getLogger(__name__).addHandler(logging.FileHandler(path_log))
wallet = WalletManager().get(
{
'id': wallet_name,
'storage_type': start_data['wallet']['storage_type'],
**start_data['wallet']['config'],
},
access=start_data['wallet']['access_creds'].get('key', None))
async with wallet, RevRegBuilder(wallet, rrbx=True) as rrban:
await rrban.serve() | python | async def main(wallet_name: str) -> None:
"""
Main line for revocation registry builder operating in external process on behalf of issuer agent.
:param wallet_name: wallet name - must match that of issuer with existing wallet
"""
logging.basicConfig(level=logging.WARN, format='%(levelname)-8s | %(name)-12s | %(message)s')
logging.getLogger('indy').setLevel(logging.ERROR)
path_start = join(RevRegBuilder.dir_tails_sentinel(wallet_name), '.start')
with open(path_start, 'r') as fh_start:
start_data = json.loads(fh_start.read())
remove(path_start)
logging.getLogger(__name__).setLevel(start_data['logging']['level'])
for path_log in start_data['logging']['paths']:
logging.getLogger(__name__).addHandler(logging.FileHandler(path_log))
wallet = WalletManager().get(
{
'id': wallet_name,
'storage_type': start_data['wallet']['storage_type'],
**start_data['wallet']['config'],
},
access=start_data['wallet']['access_creds'].get('key', None))
async with wallet, RevRegBuilder(wallet, rrbx=True) as rrban:
await rrban.serve() | Main line for revocation registry builder operating in external process on behalf of issuer agent.
:param wallet_name: wallet name - must match that of issuer with existing wallet | https://github.com/PSPC-SPAC-buyandsell/von_anchor/blob/78ac1de67be42a676274f4bf71fe12f66e72f309/von_anchor/anchor/rrbuilder.py#L400-L428 |
PSPC-SPAC-buyandsell/von_anchor | von_anchor/anchor/rrbuilder.py | RevRegBuilder._start_data_json | def _start_data_json(self) -> str:
"""
Output json with start data to write for external revocation registry builder process pickup.
:return: logging and wallet init data json
"""
rv = {
'logging': {
'paths': []
},
'wallet': {
}
}
logger = LOGGER
while not logger.level:
logger = logger.parent
if logger is None:
break
rv['logging']['level'] = logger.level
logger = LOGGER
log_paths = [realpath(h.baseFilename) for h in logger.handlers if hasattr(h, 'baseFilename')]
while not log_paths:
logger = logger.parent
if logger is None:
break
log_paths = [realpath(h.baseFilename) for h in logger.handlers if hasattr(h, 'baseFilename')]
for log_path in log_paths:
rv['logging']['paths'].append(log_path)
rv['wallet']['storage_type'] = self.wallet.storage_type
rv['wallet']['config'] = self.wallet.config
rv['wallet']['access_creds'] = self.wallet.access_creds
return json.dumps(rv) | python | def _start_data_json(self) -> str:
"""
Output json with start data to write for external revocation registry builder process pickup.
:return: logging and wallet init data json
"""
rv = {
'logging': {
'paths': []
},
'wallet': {
}
}
logger = LOGGER
while not logger.level:
logger = logger.parent
if logger is None:
break
rv['logging']['level'] = logger.level
logger = LOGGER
log_paths = [realpath(h.baseFilename) for h in logger.handlers if hasattr(h, 'baseFilename')]
while not log_paths:
logger = logger.parent
if logger is None:
break
log_paths = [realpath(h.baseFilename) for h in logger.handlers if hasattr(h, 'baseFilename')]
for log_path in log_paths:
rv['logging']['paths'].append(log_path)
rv['wallet']['storage_type'] = self.wallet.storage_type
rv['wallet']['config'] = self.wallet.config
rv['wallet']['access_creds'] = self.wallet.access_creds
return json.dumps(rv) | Output json with start data to write for external revocation registry builder process pickup.
:return: logging and wallet init data json | https://github.com/PSPC-SPAC-buyandsell/von_anchor/blob/78ac1de67be42a676274f4bf71fe12f66e72f309/von_anchor/anchor/rrbuilder.py#L143-L179 |
PSPC-SPAC-buyandsell/von_anchor | von_anchor/anchor/rrbuilder.py | RevRegBuilder._get_state | def _get_state(wallet_name: str) -> _STATE:
"""
Return current state of revocation registry builder process.
:param wallet_name: name of wallet for corresponding Issuer
:return: current process state as _STATE enum
"""
dir_sentinel = RevRegBuilder.dir_tails_sentinel(wallet_name)
file_pid = join(dir_sentinel, '.pid')
file_start = join(dir_sentinel, '.start')
file_stop = join(dir_sentinel, '.stop')
if isfile(file_stop):
return _STATE.STOPPING
if isfile(file_start) or isfile(file_pid):
return _STATE.RUNNING
return _STATE.ABSENT | python | def _get_state(wallet_name: str) -> _STATE:
"""
Return current state of revocation registry builder process.
:param wallet_name: name of wallet for corresponding Issuer
:return: current process state as _STATE enum
"""
dir_sentinel = RevRegBuilder.dir_tails_sentinel(wallet_name)
file_pid = join(dir_sentinel, '.pid')
file_start = join(dir_sentinel, '.start')
file_stop = join(dir_sentinel, '.stop')
if isfile(file_stop):
return _STATE.STOPPING
if isfile(file_start) or isfile(file_pid):
return _STATE.RUNNING
return _STATE.ABSENT | Return current state of revocation registry builder process.
:param wallet_name: name of wallet for corresponding Issuer
:return: current process state as _STATE enum | https://github.com/PSPC-SPAC-buyandsell/von_anchor/blob/78ac1de67be42a676274f4bf71fe12f66e72f309/von_anchor/anchor/rrbuilder.py#L182-L201 |
PSPC-SPAC-buyandsell/von_anchor | von_anchor/anchor/rrbuilder.py | RevRegBuilder.dir_tails_top | def dir_tails_top(self, rr_id) -> str:
"""
Return top of tails tree for input rev reg id.
:param rr_id: revocation registry identifier
:return: top of tails tree
"""
return join(self.dir_tails_hopper, rr_id) if self.external else self.dir_tails | python | def dir_tails_top(self, rr_id) -> str:
"""
Return top of tails tree for input rev reg id.
:param rr_id: revocation registry identifier
:return: top of tails tree
"""
return join(self.dir_tails_hopper, rr_id) if self.external else self.dir_tails | Return top of tails tree for input rev reg id.
:param rr_id: revocation registry identifier
:return: top of tails tree | https://github.com/PSPC-SPAC-buyandsell/von_anchor/blob/78ac1de67be42a676274f4bf71fe12f66e72f309/von_anchor/anchor/rrbuilder.py#L215-L223 |
PSPC-SPAC-buyandsell/von_anchor | von_anchor/anchor/rrbuilder.py | RevRegBuilder.dir_tails_target | def dir_tails_target(self, rr_id) -> str:
"""
Return target directory for revocation registry and tails file production.
:param rr_id: revocation registry identifier
:return: tails target directory
"""
return join(self.dir_tails_top(rr_id), rev_reg_id2cred_def_id(rr_id)) | python | def dir_tails_target(self, rr_id) -> str:
"""
Return target directory for revocation registry and tails file production.
:param rr_id: revocation registry identifier
:return: tails target directory
"""
return join(self.dir_tails_top(rr_id), rev_reg_id2cred_def_id(rr_id)) | Return target directory for revocation registry and tails file production.
:param rr_id: revocation registry identifier
:return: tails target directory | https://github.com/PSPC-SPAC-buyandsell/von_anchor/blob/78ac1de67be42a676274f4bf71fe12f66e72f309/von_anchor/anchor/rrbuilder.py#L225-L233 |
PSPC-SPAC-buyandsell/von_anchor | von_anchor/anchor/rrbuilder.py | RevRegBuilder.mark_in_progress | def mark_in_progress(self, rr_id: str, rr_size: int) -> None:
"""
Prepare sentinel directory for revocation registry construction.
:param rr_id: revocation registry identifier
:rr_size: size of revocation registry to build
"""
try:
makedirs(join(self._dir_tails_sentinel, rr_id), exist_ok=False)
except FileExistsError:
LOGGER.warning('Rev reg %s construction already in progress', rr_id)
else:
open(join(self._dir_tails_sentinel, rr_id, '.{}'.format(rr_size)), 'w').close() | python | def mark_in_progress(self, rr_id: str, rr_size: int) -> None:
"""
Prepare sentinel directory for revocation registry construction.
:param rr_id: revocation registry identifier
:rr_size: size of revocation registry to build
"""
try:
makedirs(join(self._dir_tails_sentinel, rr_id), exist_ok=False)
except FileExistsError:
LOGGER.warning('Rev reg %s construction already in progress', rr_id)
else:
open(join(self._dir_tails_sentinel, rr_id, '.{}'.format(rr_size)), 'w').close() | Prepare sentinel directory for revocation registry construction.
:param rr_id: revocation registry identifier
:rr_size: size of revocation registry to build | https://github.com/PSPC-SPAC-buyandsell/von_anchor/blob/78ac1de67be42a676274f4bf71fe12f66e72f309/von_anchor/anchor/rrbuilder.py#L235-L247 |
PSPC-SPAC-buyandsell/von_anchor | von_anchor/anchor/rrbuilder.py | RevRegBuilder.serve | async def serve(self) -> None:
"""
Write pidfile to sentinel directory if need be, and wait for sentinels
to shut down or build revocation registry and tails file.
"""
LOGGER.debug('RevRegBuilder.serve >>>')
assert self.external
file_pid = join(self._dir_tails_sentinel, '.pid')
if isfile(file_pid):
with open(file_pid, 'r') as fh_pid:
pid = int(fh_pid.read())
try:
kill(pid, 0)
except ProcessLookupError:
remove(file_pid)
LOGGER.info('RevRegBuilder removed derelict .pid file')
except PermissionError:
LOGGER.info('RevRegBuilder process already running with pid %s: exiting', pid)
LOGGER.debug('RevRegBuilder.serve <<<')
return
else:
LOGGER.info('RevRegBuilder process already running with pid %s: exiting', pid)
LOGGER.debug('RevRegBuilder.serve <<<')
return
pid = getpid()
with open(file_pid, 'w') as pid_fh:
print(str(pid), file=pid_fh)
file_stop = join(self._dir_tails_sentinel, '.stop')
while True:
if isfile(file_stop): # stop now, pick up any pending tasks next invocation
remove(file_stop)
remove(file_pid)
break
p_pending = [join(self._dir_tails_sentinel, d) for d in listdir(self._dir_tails_sentinel)
if isdir(join(self._dir_tails_sentinel, d))]
p_pending = [p for p in p_pending if [s for s in listdir(p) if s.startswith('.')]] # size marker
if p_pending:
pdir = basename(p_pending[0])
rr_id = pdir
rr_size = int([s for s in listdir(p_pending[0]) if s.startswith('.')][0][1:])
open(join(p_pending[0], '.in-progress'), 'w').close()
await self.create_rev_reg(rr_id, rr_size or None)
rmtree(p_pending[0])
await asyncio.sleep(1)
LOGGER.debug('RevRegBuilder.serve <<<') | python | async def serve(self) -> None:
"""
Write pidfile to sentinel directory if need be, and wait for sentinels
to shut down or build revocation registry and tails file.
"""
LOGGER.debug('RevRegBuilder.serve >>>')
assert self.external
file_pid = join(self._dir_tails_sentinel, '.pid')
if isfile(file_pid):
with open(file_pid, 'r') as fh_pid:
pid = int(fh_pid.read())
try:
kill(pid, 0)
except ProcessLookupError:
remove(file_pid)
LOGGER.info('RevRegBuilder removed derelict .pid file')
except PermissionError:
LOGGER.info('RevRegBuilder process already running with pid %s: exiting', pid)
LOGGER.debug('RevRegBuilder.serve <<<')
return
else:
LOGGER.info('RevRegBuilder process already running with pid %s: exiting', pid)
LOGGER.debug('RevRegBuilder.serve <<<')
return
pid = getpid()
with open(file_pid, 'w') as pid_fh:
print(str(pid), file=pid_fh)
file_stop = join(self._dir_tails_sentinel, '.stop')
while True:
if isfile(file_stop): # stop now, pick up any pending tasks next invocation
remove(file_stop)
remove(file_pid)
break
p_pending = [join(self._dir_tails_sentinel, d) for d in listdir(self._dir_tails_sentinel)
if isdir(join(self._dir_tails_sentinel, d))]
p_pending = [p for p in p_pending if [s for s in listdir(p) if s.startswith('.')]] # size marker
if p_pending:
pdir = basename(p_pending[0])
rr_id = pdir
rr_size = int([s for s in listdir(p_pending[0]) if s.startswith('.')][0][1:])
open(join(p_pending[0], '.in-progress'), 'w').close()
await self.create_rev_reg(rr_id, rr_size or None)
rmtree(p_pending[0])
await asyncio.sleep(1)
LOGGER.debug('RevRegBuilder.serve <<<') | Write pidfile to sentinel directory if need be, and wait for sentinels
to shut down or build revocation registry and tails file. | https://github.com/PSPC-SPAC-buyandsell/von_anchor/blob/78ac1de67be42a676274f4bf71fe12f66e72f309/von_anchor/anchor/rrbuilder.py#L249-L301 |
PSPC-SPAC-buyandsell/von_anchor | von_anchor/anchor/rrbuilder.py | RevRegBuilder.stop | async def stop(wallet_name: str) -> None:
"""
Gracefully stop an external revocation registry builder, waiting for its current.
The indy-sdk toolkit uses a temporary directory for tails file mustration,
and shutting down the toolkit removes the directory, crashing the external
tails file write. This method allows a graceful stop to wait for completion
of such tasks already in progress.
:wallet_name: name external revocation registry builder to check
:return: whether a task is pending.
"""
LOGGER.debug('RevRegBuilder.stop >>>')
dir_sentinel = join(RevRegBuilder.dir_tails_sentinel(wallet_name))
if isdir(dir_sentinel):
open(join(dir_sentinel, '.stop'), 'w').close() # touch
while any(isfile(join(dir_sentinel, d, '.in-progress')) for d in listdir(dir_sentinel)):
await asyncio.sleep(1)
LOGGER.debug('RevRegBuilder.stop <<<') | python | async def stop(wallet_name: str) -> None:
"""
Gracefully stop an external revocation registry builder, waiting for its current.
The indy-sdk toolkit uses a temporary directory for tails file mustration,
and shutting down the toolkit removes the directory, crashing the external
tails file write. This method allows a graceful stop to wait for completion
of such tasks already in progress.
:wallet_name: name external revocation registry builder to check
:return: whether a task is pending.
"""
LOGGER.debug('RevRegBuilder.stop >>>')
dir_sentinel = join(RevRegBuilder.dir_tails_sentinel(wallet_name))
if isdir(dir_sentinel):
open(join(dir_sentinel, '.stop'), 'w').close() # touch
while any(isfile(join(dir_sentinel, d, '.in-progress')) for d in listdir(dir_sentinel)):
await asyncio.sleep(1)
LOGGER.debug('RevRegBuilder.stop <<<') | Gracefully stop an external revocation registry builder, waiting for its current.
The indy-sdk toolkit uses a temporary directory for tails file mustration,
and shutting down the toolkit removes the directory, crashing the external
tails file write. This method allows a graceful stop to wait for completion
of such tasks already in progress.
:wallet_name: name external revocation registry builder to check
:return: whether a task is pending. | https://github.com/PSPC-SPAC-buyandsell/von_anchor/blob/78ac1de67be42a676274f4bf71fe12f66e72f309/von_anchor/anchor/rrbuilder.py#L304-L327 |
PSPC-SPAC-buyandsell/von_anchor | von_anchor/anchor/rrbuilder.py | RevRegBuilder.create_rev_reg | async def create_rev_reg(self, rr_id: str, rr_size: int = None) -> None:
"""
Create revocation registry artifacts and new tails file (with association to
corresponding revocation registry identifier via symbolic link name)
for input revocation registry identifier. Symbolic link presence signals completion.
If revocation registry builder operates in a process external to its Issuer's,
target directory is hopper directory.
Raise WalletState for closed wallet.
:param rr_id: revocation registry identifier
:param rr_size: revocation registry size (defaults to 64)
"""
LOGGER.debug('RevRegBuilder.create_rev_reg >>> rr_id: %s, rr_size: %s', rr_id, rr_size)
if not self.wallet.handle:
LOGGER.debug('RevRegBuilder.create_rev_reg <!< Wallet %s is closed', self.name)
raise WalletState('Wallet {} is closed'.format(self.name))
if not ok_rev_reg_id(rr_id):
LOGGER.debug('RevRegBuilder.create_rev_reg <!< Bad rev reg id %s', rr_id)
raise BadIdentifier('Bad rev reg id {}'.format(rr_id))
rr_size = rr_size or 64
(cd_id, tag) = rev_reg_id2cred_def_id_tag(rr_id)
dir_tails = self.dir_tails_top(rr_id)
dir_target = self.dir_tails_target(rr_id)
if self.external:
try:
makedirs(dir_target, exist_ok=False)
except FileExistsError:
LOGGER.warning(
'RevRegBuilder.create_rev_reg found dir %s, but task not in progress: rebuilding rev reg %s',
dir_target,
rr_id)
rmtree(dir_target)
makedirs(dir_target, exist_ok=False)
LOGGER.info('Creating revocation registry (capacity %s) for rev reg id %s', rr_size, rr_id)
tails_writer_handle = await blob_storage.open_writer(
'default',
json.dumps({
'base_dir': dir_target,
'uri_pattern': ''
}))
(created_rr_id, rr_def_json, rr_ent_json) = await anoncreds.issuer_create_and_store_revoc_reg(
self.wallet.handle,
self.did,
'CL_ACCUM',
tag,
cd_id,
json.dumps({
'max_cred_num': rr_size,
'issuance_type': 'ISSUANCE_BY_DEFAULT'
}),
tails_writer_handle)
tails_hash = basename(Tails.unlinked(dir_target).pop())
with open(join(dir_target, 'rr_def.json'), 'w') as rr_def_fh:
print(rr_def_json, file=rr_def_fh)
with open(join(dir_target, 'rr_ent.json'), 'w') as rr_ent_fh:
print(rr_ent_json, file=rr_ent_fh)
Tails.associate(dir_tails, created_rr_id, tails_hash) # associate last: symlink signals completion
LOGGER.debug('RevRegBuilder.create_rev_reg <<<') | python | async def create_rev_reg(self, rr_id: str, rr_size: int = None) -> None:
"""
Create revocation registry artifacts and new tails file (with association to
corresponding revocation registry identifier via symbolic link name)
for input revocation registry identifier. Symbolic link presence signals completion.
If revocation registry builder operates in a process external to its Issuer's,
target directory is hopper directory.
Raise WalletState for closed wallet.
:param rr_id: revocation registry identifier
:param rr_size: revocation registry size (defaults to 64)
"""
LOGGER.debug('RevRegBuilder.create_rev_reg >>> rr_id: %s, rr_size: %s', rr_id, rr_size)
if not self.wallet.handle:
LOGGER.debug('RevRegBuilder.create_rev_reg <!< Wallet %s is closed', self.name)
raise WalletState('Wallet {} is closed'.format(self.name))
if not ok_rev_reg_id(rr_id):
LOGGER.debug('RevRegBuilder.create_rev_reg <!< Bad rev reg id %s', rr_id)
raise BadIdentifier('Bad rev reg id {}'.format(rr_id))
rr_size = rr_size or 64
(cd_id, tag) = rev_reg_id2cred_def_id_tag(rr_id)
dir_tails = self.dir_tails_top(rr_id)
dir_target = self.dir_tails_target(rr_id)
if self.external:
try:
makedirs(dir_target, exist_ok=False)
except FileExistsError:
LOGGER.warning(
'RevRegBuilder.create_rev_reg found dir %s, but task not in progress: rebuilding rev reg %s',
dir_target,
rr_id)
rmtree(dir_target)
makedirs(dir_target, exist_ok=False)
LOGGER.info('Creating revocation registry (capacity %s) for rev reg id %s', rr_size, rr_id)
tails_writer_handle = await blob_storage.open_writer(
'default',
json.dumps({
'base_dir': dir_target,
'uri_pattern': ''
}))
(created_rr_id, rr_def_json, rr_ent_json) = await anoncreds.issuer_create_and_store_revoc_reg(
self.wallet.handle,
self.did,
'CL_ACCUM',
tag,
cd_id,
json.dumps({
'max_cred_num': rr_size,
'issuance_type': 'ISSUANCE_BY_DEFAULT'
}),
tails_writer_handle)
tails_hash = basename(Tails.unlinked(dir_target).pop())
with open(join(dir_target, 'rr_def.json'), 'w') as rr_def_fh:
print(rr_def_json, file=rr_def_fh)
with open(join(dir_target, 'rr_ent.json'), 'w') as rr_ent_fh:
print(rr_ent_json, file=rr_ent_fh)
Tails.associate(dir_tails, created_rr_id, tails_hash) # associate last: symlink signals completion
LOGGER.debug('RevRegBuilder.create_rev_reg <<<') | Create revocation registry artifacts and new tails file (with association to
corresponding revocation registry identifier via symbolic link name)
for input revocation registry identifier. Symbolic link presence signals completion.
If revocation registry builder operates in a process external to its Issuer's,
target directory is hopper directory.
Raise WalletState for closed wallet.
:param rr_id: revocation registry identifier
:param rr_size: revocation registry size (defaults to 64) | https://github.com/PSPC-SPAC-buyandsell/von_anchor/blob/78ac1de67be42a676274f4bf71fe12f66e72f309/von_anchor/anchor/rrbuilder.py#L329-L397 |
praekelt/django-ultracache | ultracache/templatetags/ultracache_tags.py | do_ultracache | def do_ultracache(parser, token):
"""Based on Django's default cache template tag"""
nodelist = parser.parse(("endultracache",))
parser.delete_first_token()
tokens = token.split_contents()
if len(tokens) < 3:
raise TemplateSyntaxError(""%r" tag requires at least 2 arguments." % tokens[0])
return UltraCacheNode(nodelist,
parser.compile_filter(tokens[1]),
tokens[2], # fragment_name can"t be a variable.
[parser.compile_filter(token) for token in tokens[3:]]) | python | def do_ultracache(parser, token):
"""Based on Django's default cache template tag"""
nodelist = parser.parse(("endultracache",))
parser.delete_first_token()
tokens = token.split_contents()
if len(tokens) < 3:
raise TemplateSyntaxError(""%r" tag requires at least 2 arguments." % tokens[0])
return UltraCacheNode(nodelist,
parser.compile_filter(tokens[1]),
tokens[2], # fragment_name can"t be a variable.
[parser.compile_filter(token) for token in tokens[3:]]) | Based on Django's default cache template tag | https://github.com/praekelt/django-ultracache/blob/8898f10e50fc8f8d0a4cb7d3fe4d945bf257bd9f/ultracache/templatetags/ultracache_tags.py#L88-L98 |
PSPC-SPAC-buyandsell/von_anchor | von_anchor/frill.py | ppjson | def ppjson(dumpit: Any, elide_to: int = None) -> str:
"""
JSON pretty printer, whether already json-encoded or not
:param dumpit: object to pretty-print
:param elide_to: optional maximum length including ellipses ('...')
:return: json pretty-print
"""
if elide_to is not None:
elide_to = max(elide_to, 3) # make room for ellipses '...'
try:
rv = json.dumps(json.loads(dumpit) if isinstance(dumpit, str) else dumpit, indent=4)
except TypeError:
rv = '{}'.format(pformat(dumpit, indent=4, width=120))
return rv if elide_to is None or len(rv) <= elide_to else '{}...'.format(rv[0 : elide_to - 3]) | python | def ppjson(dumpit: Any, elide_to: int = None) -> str:
"""
JSON pretty printer, whether already json-encoded or not
:param dumpit: object to pretty-print
:param elide_to: optional maximum length including ellipses ('...')
:return: json pretty-print
"""
if elide_to is not None:
elide_to = max(elide_to, 3) # make room for ellipses '...'
try:
rv = json.dumps(json.loads(dumpit) if isinstance(dumpit, str) else dumpit, indent=4)
except TypeError:
rv = '{}'.format(pformat(dumpit, indent=4, width=120))
return rv if elide_to is None or len(rv) <= elide_to else '{}...'.format(rv[0 : elide_to - 3]) | JSON pretty printer, whether already json-encoded or not
:param dumpit: object to pretty-print
:param elide_to: optional maximum length including ellipses ('...')
:return: json pretty-print | https://github.com/PSPC-SPAC-buyandsell/von_anchor/blob/78ac1de67be42a676274f4bf71fe12f66e72f309/von_anchor/frill.py#L30-L45 |
PSPC-SPAC-buyandsell/von_anchor | von_anchor/frill.py | do_wait | def do_wait(coro: Callable) -> Any:
"""
Perform aynchronous operation; await then return the result.
:param coro: coroutine to await
:return: coroutine result
"""
event_loop = None
try:
event_loop = asyncio.get_event_loop()
except RuntimeError:
event_loop = asyncio.new_event_loop()
asyncio.set_event_loop(event_loop)
return event_loop.run_until_complete(coro) | python | def do_wait(coro: Callable) -> Any:
"""
Perform aynchronous operation; await then return the result.
:param coro: coroutine to await
:return: coroutine result
"""
event_loop = None
try:
event_loop = asyncio.get_event_loop()
except RuntimeError:
event_loop = asyncio.new_event_loop()
asyncio.set_event_loop(event_loop)
return event_loop.run_until_complete(coro) | Perform aynchronous operation; await then return the result.
:param coro: coroutine to await
:return: coroutine result | https://github.com/PSPC-SPAC-buyandsell/von_anchor/blob/78ac1de67be42a676274f4bf71fe12f66e72f309/von_anchor/frill.py#L48-L62 |
PSPC-SPAC-buyandsell/von_anchor | von_anchor/frill.py | inis2dict | def inis2dict(ini_paths: Union[str, Sequence[str]]) -> dict:
"""
Take one or more ini files and return a dict with configuration from all,
interpolating bash-style variables ${VAR} or ${VAR:-DEFAULT}.
:param ini_paths: path or paths to .ini files
"""
var_dflt = r'\${(.*?):-(.*?)}'
def _interpolate(content):
rv = expandvars(content)
while True:
match = re.search(var_dflt, rv)
if match is None:
break
bash_var = '${{{}}}'.format(match.group(1))
value = expandvars(bash_var)
rv = re.sub(var_dflt, match.group(2) if value == bash_var else value, rv, count=1)
return rv
parser = ConfigParser()
for ini in [ini_paths] if isinstance(ini_paths, str) else ini_paths:
if not isfile(ini):
raise FileNotFoundError('No such file: {}'.format(ini))
with open(ini, 'r') as ini_fh:
ini_text = _interpolate(ini_fh.read())
parser.read_string(ini_text)
return {s: dict(parser[s].items()) for s in parser.sections()} | python | def inis2dict(ini_paths: Union[str, Sequence[str]]) -> dict:
"""
Take one or more ini files and return a dict with configuration from all,
interpolating bash-style variables ${VAR} or ${VAR:-DEFAULT}.
:param ini_paths: path or paths to .ini files
"""
var_dflt = r'\${(.*?):-(.*?)}'
def _interpolate(content):
rv = expandvars(content)
while True:
match = re.search(var_dflt, rv)
if match is None:
break
bash_var = '${{{}}}'.format(match.group(1))
value = expandvars(bash_var)
rv = re.sub(var_dflt, match.group(2) if value == bash_var else value, rv, count=1)
return rv
parser = ConfigParser()
for ini in [ini_paths] if isinstance(ini_paths, str) else ini_paths:
if not isfile(ini):
raise FileNotFoundError('No such file: {}'.format(ini))
with open(ini, 'r') as ini_fh:
ini_text = _interpolate(ini_fh.read())
parser.read_string(ini_text)
return {s: dict(parser[s].items()) for s in parser.sections()} | Take one or more ini files and return a dict with configuration from all,
interpolating bash-style variables ${VAR} or ${VAR:-DEFAULT}.
:param ini_paths: path or paths to .ini files | https://github.com/PSPC-SPAC-buyandsell/von_anchor/blob/78ac1de67be42a676274f4bf71fe12f66e72f309/von_anchor/frill.py#L65-L95 |
PSPC-SPAC-buyandsell/von_anchor | von_anchor/frill.py | Stopwatch.mark | def mark(self, digits: int = None) -> float:
"""
Return time in seconds since last mark, reset, or construction.
:param digits: number of fractional decimal digits to retain (default as constructed)
"""
self._mark[:] = [self._mark[1], time()]
rv = self._mark[1] - self._mark[0]
if digits is not None and digits > 0:
rv = round(rv, digits)
elif digits == 0 or self._digits == 0:
rv = int(rv)
elif self._digits is not None and self._digits > 0:
rv = round(rv, self._digits)
return rv | python | def mark(self, digits: int = None) -> float:
"""
Return time in seconds since last mark, reset, or construction.
:param digits: number of fractional decimal digits to retain (default as constructed)
"""
self._mark[:] = [self._mark[1], time()]
rv = self._mark[1] - self._mark[0]
if digits is not None and digits > 0:
rv = round(rv, digits)
elif digits == 0 or self._digits == 0:
rv = int(rv)
elif self._digits is not None and self._digits > 0:
rv = round(rv, self._digits)
return rv | Return time in seconds since last mark, reset, or construction.
:param digits: number of fractional decimal digits to retain (default as constructed) | https://github.com/PSPC-SPAC-buyandsell/von_anchor/blob/78ac1de67be42a676274f4bf71fe12f66e72f309/von_anchor/frill.py#L113-L130 |
PSPC-SPAC-buyandsell/von_anchor | von_anchor/wallet/search.py | StorageRecordSearch.open | async def open(self) -> None:
"""
Begin the search operation.
"""
LOGGER.debug('StorageRecordSearch.open >>>')
if self.opened:
LOGGER.debug('StorageRecordSearch.open <!< Search is already opened')
raise BadSearch('Search is already opened')
if not self._wallet.opened:
LOGGER.debug('StorageRecordSearch.open <!< Wallet %s is closed', self._wallet.name)
raise WalletState('Wallet {} is closed'.format(self._wallet.name))
self._handle = await non_secrets.open_wallet_search(
self._wallet.handle,
self._type,
self._query_json,
StorageRecordSearch.OPTIONS_JSON)
LOGGER.debug('StorageRecordSearch.open <<<') | python | async def open(self) -> None:
"""
Begin the search operation.
"""
LOGGER.debug('StorageRecordSearch.open >>>')
if self.opened:
LOGGER.debug('StorageRecordSearch.open <!< Search is already opened')
raise BadSearch('Search is already opened')
if not self._wallet.opened:
LOGGER.debug('StorageRecordSearch.open <!< Wallet %s is closed', self._wallet.name)
raise WalletState('Wallet {} is closed'.format(self._wallet.name))
self._handle = await non_secrets.open_wallet_search(
self._wallet.handle,
self._type,
self._query_json,
StorageRecordSearch.OPTIONS_JSON)
LOGGER.debug('StorageRecordSearch.open <<<') | Begin the search operation. | https://github.com/PSPC-SPAC-buyandsell/von_anchor/blob/78ac1de67be42a676274f4bf71fe12f66e72f309/von_anchor/wallet/search.py#L97-L118 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.