code
stringlengths 75
104k
| docstring
stringlengths 1
46.9k
|
---|---|
def _process_loaded_object(self, path):
"""process the :paramref:`path`.
:param str path: the path to load an svg from
"""
file_name = os.path.basename(path)
name = os.path.splitext(file_name)[0]
with open(path) as file:
string = file.read()
self._instruction_type_to_file_content[name] = string | process the :paramref:`path`.
:param str path: the path to load an svg from |
def get_context_data(self, **kwargs):
"""Add context data to view"""
context = super().get_context_data(**kwargs)
context.update({
'title': self.title,
'submit_value': self.submit_value,
'cancel_url': self.cancel_url
})
return context | Add context data to view |
def clean_out_dir(directory):
"""
Delete all the files and subdirectories in a directory.
"""
if not isinstance(directory, path):
directory = path(directory)
for file_path in directory.files():
file_path.remove()
for dir_path in directory.dirs():
dir_path.rmtree() | Delete all the files and subdirectories in a directory. |
def dump_copy(self, path, relativePath, name=None,
description=None,
replace=False, verbose=False):
"""
Copy an exisitng system file to the repository.
attribute in the Repository with utc timestamp.
:Parameters:
#. path (str): The full path of the file to copy into the repository.
#. relativePath (str): The relative to the repository path of the directory where the file should be dumped.
If relativePath does not exist, it will be created automatically.
#. name (string): The file name.
If None is given, name will be split from path.
#. description (None, string, pickable object): Any random description about the file.
#. replace (boolean): Whether to replace any existing file with the same name if existing.
#. verbose (boolean): Whether to be warn and informed about any abnormalities.
"""
relativePath = os.path.normpath(relativePath)
if relativePath == '.':
relativePath = ''
if name is None:
_,name = os.path.split(path)
# ensure directory added
self.add_directory(relativePath)
# ger real path
realPath = os.path.join(self.__path, relativePath)
# get directory info dict
dirInfoDict, errorMessage = self.get_directory_info(relativePath)
assert dirInfoDict is not None, errorMessage
if name in dict.__getitem__(dirInfoDict, "files"):
if not replace:
if verbose:
warnings.warn("a file with the name '%s' is already defined in repository dictionary info. Set replace flag to True if you want to replace the existing file"%(name))
return
# convert dump and pull methods to strings
dump = "raise Exception(\"dump is ambiguous for copied file '$FILE_PATH' \")"
pull = "raise Exception(\"pull is ambiguous for copied file '$FILE_PATH' \")"
# dump file
try:
shutil.copyfile(path, os.path.join(realPath,name))
except Exception as e:
if verbose:
warnings.warn(e)
return
# set info
klass = None
# save the new file to the repository
dict.__getitem__(dirInfoDict, "files")[name] = {"dump":dump,
"pull":pull,
"timestamp":datetime.utcnow(),
"id":str(uuid.uuid1()),
"class": klass,
"description":description}
# save repository
self.save() | Copy an exisitng system file to the repository.
attribute in the Repository with utc timestamp.
:Parameters:
#. path (str): The full path of the file to copy into the repository.
#. relativePath (str): The relative to the repository path of the directory where the file should be dumped.
If relativePath does not exist, it will be created automatically.
#. name (string): The file name.
If None is given, name will be split from path.
#. description (None, string, pickable object): Any random description about the file.
#. replace (boolean): Whether to replace any existing file with the same name if existing.
#. verbose (boolean): Whether to be warn and informed about any abnormalities. |
def new_pattern(self, id_, name, rows=None):
"""Create a new knitting pattern.
If rows is :obj:`None` it is replaced with the
:meth:`new_row_collection`.
"""
if rows is None:
rows = self.new_row_collection()
return self._spec.new_pattern(id_, name, rows, self) | Create a new knitting pattern.
If rows is :obj:`None` it is replaced with the
:meth:`new_row_collection`. |
def get_file(self, sharename, fileid):
"""
Get a specific file. Does not require authentication.
Input:
* A sharename
* A fileid - must be an integer
Output:
* A :py:mod:`pygett.files.GettFile` object
Example::
file = client.get_file("4ddfds", 0)
"""
if not isinstance(fileid, int):
raise TypeError("'fileid' must be an integer")
response = GettRequest().get("/files/%s/%d" % (sharename, fileid))
if response.http_status == 200:
return GettFile(self.user, **response.response) | Get a specific file. Does not require authentication.
Input:
* A sharename
* A fileid - must be an integer
Output:
* A :py:mod:`pygett.files.GettFile` object
Example::
file = client.get_file("4ddfds", 0) |
def plot(image, overlay=None, blend=False,
alpha=1, cmap='Greys_r', overlay_cmap='jet', overlay_alpha=0.9,
cbar=False, cbar_length=0.8, cbar_dx=0., cbar_vertical=True,
axis=0, nslices=12, slices=None, ncol=None, slice_buffer=None, black_bg=True,
bg_thresh_quant=0.01, bg_val_quant=0.99, domain_image_map=None, crop=False, scale=False,
reverse=False, title=None, title_fontsize=20, title_dx=0., title_dy=0.,
filename=None, dpi=500, figsize=1.5, reorient=True):
"""
Plot an ANTsImage.
By default, images will be reoriented to 'LAI' orientation before plotting.
So, if axis == 0, the images will be ordered from the
left side of the brain to the right side of the brain. If axis == 1,
the images will be ordered from the anterior (front) of the brain to
the posterior (back) of the brain. And if axis == 2, the images will
be ordered from the inferior (bottom) of the brain to the superior (top)
of the brain.
ANTsR function: `plot.antsImage`
Arguments
---------
image : ANTsImage
image to plot
overlay : ANTsImage
image to overlay on base image
cmap : string
colormap to use for base image. See matplotlib.
overlay_cmap : string
colormap to use for overlay images, if applicable. See matplotlib.
overlay_alpha : float
level of transparency for any overlays. Smaller value means
the overlay is more transparent. See matplotlib.
axis : integer
which axis to plot along if image is 3D
nslices : integer
number of slices to plot if image is 3D
slices : list or tuple of integers
specific slice indices to plot if image is 3D.
If given, this will override `nslices`.
This can be absolute array indices (e.g. (80,100,120)), or
this can be relative array indices (e.g. (0.4,0.5,0.6))
ncol : integer
Number of columns to have on the plot if image is 3D.
slice_buffer : integer
how many slices to buffer when finding the non-zero slices of
a 3D images. So, if slice_buffer = 10, then the first slice
in a 3D image will be the first non-zero slice index plus 10 more
slices.
black_bg : boolean
if True, the background of the image(s) will be black.
if False, the background of the image(s) will be determined by the
values `bg_thresh_quant` and `bg_val_quant`.
bg_thresh_quant : float
if white_bg=True, the background will be determined by thresholding
the image at the `bg_thresh` quantile value and setting the background
intensity to the `bg_val` quantile value.
This value should be in [0, 1] - somewhere around 0.01 is recommended.
- equal to 1 will threshold the entire image
- equal to 0 will threshold none of the image
bg_val_quant : float
if white_bg=True, the background will be determined by thresholding
the image at the `bg_thresh` quantile value and setting the background
intensity to the `bg_val` quantile value.
This value should be in [0, 1]
- equal to 1 is pure white
- equal to 0 is pure black
- somewhere in between is gray
domain_image_map : ANTsImage
this input ANTsImage or list of ANTsImage types contains a reference image
`domain_image` and optional reference mapping named `domainMap`.
If supplied, the image(s) to be plotted will be mapped to the domain
image space before plotting - useful for non-standard image orientations.
crop : boolean
if true, the image(s) will be cropped to their bounding boxes, resulting
in a potentially smaller image size.
if false, the image(s) will not be cropped
scale : boolean or 2-tuple
if true, nothing will happen to intensities of image(s) and overlay(s)
if false, dynamic range will be maximized when visualizing overlays
if 2-tuple, the image will be dynamically scaled between these quantiles
reverse : boolean
if true, the order in which the slices are plotted will be reversed.
This is useful if you want to plot from the front of the brain first
to the back of the brain, or vice-versa
title : string
add a title to the plot
filename : string
if given, the resulting image will be saved to this file
dpi : integer
determines resolution of image if saved to file. Higher values
result in higher resolution images, but at a cost of having a
larger file size
Example
-------
>>> import ants
>>> import numpy as np
>>> img = ants.image_read(ants.get_data('r16'))
>>> segs = img.kmeans_segmentation(k=3)['segmentation']
>>> ants.plot(img, segs*(segs==1), crop=True)
>>> ants.plot(img, segs*(segs==1), crop=False)
>>> mni = ants.image_read(ants.get_data('mni'))
>>> segs = mni.kmeans_segmentation(k=3)['segmentation']
>>> ants.plot(mni, segs*(segs==1), crop=False)
"""
if (axis == 'x') or (axis == 'saggittal'):
axis = 0
if (axis == 'y') or (axis == 'coronal'):
axis = 1
if (axis == 'z') or (axis == 'axial'):
axis = 2
def mirror_matrix(x):
return x[::-1,:]
def rotate270_matrix(x):
return mirror_matrix(x.T)
def rotate180_matrix(x):
return x[::-1,:]
def rotate90_matrix(x):
return x.T
def flip_matrix(x):
return mirror_matrix(rotate180_matrix(x))
def reorient_slice(x, axis):
if (axis != 2):
x = rotate90_matrix(x)
if (axis == 2):
x = rotate270_matrix(x)
x = mirror_matrix(x)
return x
# need this hack because of a weird NaN warning from matplotlib with overlays
warnings.simplefilter('ignore')
# handle `image` argument
if isinstance(image, str):
image = iio2.image_read(image)
if not isinstance(image, iio.ANTsImage):
raise ValueError('image argument must be an ANTsImage')
if (image.pixeltype not in {'float', 'double'}) or (image.is_rgb):
scale = False # turn off scaling if image is discrete
# handle `overlay` argument
if overlay is not None:
if isinstance(overlay, str):
overlay = iio2.image_read(overlay)
if not isinstance(overlay, iio.ANTsImage):
raise ValueError('overlay argument must be an ANTsImage')
if not iio.image_physical_space_consistency(image, overlay):
overlay = reg.resample_image_to_target(overlay, image, interp_type='linear')
if blend:
if alpha == 1:
alpha = 0.5
image = image*alpha + overlay*(1-alpha)
overlay = None
alpha = 1.
# handle `domain_image_map` argument
if domain_image_map is not None:
if isinstance(domain_image_map, iio.ANTsImage):
tx = tio2.new_ants_transform(precision='float', transform_type='AffineTransform',
dimension=image.dimension)
image = tio.apply_ants_transform_to_image(tx, image, domain_image_map)
if overlay is not None:
overlay = tio.apply_ants_transform_to_image(tx, overlay,
domain_image_map,
interpolation='linear')
elif isinstance(domain_image_map, (list, tuple)):
# expect an image and transformation
if len(domain_image_map) != 2:
raise ValueError('domain_image_map list or tuple must have length == 2')
dimg = domain_image_map[0]
if not isinstance(dimg, iio.ANTsImage):
raise ValueError('domain_image_map first entry should be ANTsImage')
tx = domain_image_map[1]
image = reg.apply_transforms(dimg, image, transform_list=tx)
if overlay is not None:
overlay = reg.apply_transforms(dimg, overlay, transform_list=tx,
interpolator='linear')
## single-channel images ##
if image.components == 1:
# potentially crop image
if crop:
plotmask = image.get_mask(cleanup=0)
if plotmask.max() == 0:
plotmask += 1
image = image.crop_image(plotmask)
if overlay is not None:
overlay = overlay.crop_image(plotmask)
# potentially find dynamic range
if scale == True:
vmin, vmax = image.quantile((0.05,0.95))
elif isinstance(scale, (list,tuple)):
if len(scale) != 2:
raise ValueError('scale argument must be boolean or list/tuple with two values')
vmin, vmax = image.quantile(scale)
else:
vmin = None
vmax = None
# Plot 2D image
if image.dimension == 2:
img_arr = image.numpy()
img_arr = rotate90_matrix(img_arr)
if not black_bg:
img_arr[img_arr<image.quantile(bg_thresh_quant)] = image.quantile(bg_val_quant)
if overlay is not None:
ov_arr = overlay.numpy()
ov_arr = rotate90_matrix(ov_arr)
ov_arr[np.abs(ov_arr) == 0] = np.nan
fig = plt.figure()
if title is not None:
fig.suptitle(title, fontsize=title_fontsize, x=0.5+title_dx, y=0.95+title_dy)
ax = plt.subplot(111)
# plot main image
im = ax.imshow(img_arr, cmap=cmap,
alpha=alpha,
vmin=vmin, vmax=vmax)
if overlay is not None:
im = ax.imshow(ov_arr,
alpha=overlay_alpha,
cmap=overlay_cmap)
if cbar:
cbar_orient = 'vertical' if cbar_vertical else 'horizontal'
fig.colorbar(im, orientation=cbar_orient)
plt.axis('off')
# Plot 3D image
elif image.dimension == 3:
# resample image if spacing is very unbalanced
spacing = [s for i,s in enumerate(image.spacing) if i != axis]
was_resampled = False
if (max(spacing) / min(spacing)) > 3.:
was_resampled = True
new_spacing = (1,1,1)
image = image.resample_image(tuple(new_spacing))
if overlay is not None:
overlay = overlay.resample_image(tuple(new_spacing))
if reorient:
image = image.reorient_image2('LAI')
img_arr = image.numpy()
# reorder dims so that chosen axis is first
img_arr = np.rollaxis(img_arr, axis)
if overlay is not None:
if reorient:
overlay = overlay.reorient_image2('LAI')
ov_arr = overlay.numpy()
ov_arr[np.abs(ov_arr) == 0] = np.nan
ov_arr = np.rollaxis(ov_arr, axis)
if slices is None:
if not isinstance(slice_buffer, (list, tuple)):
if slice_buffer is None:
slice_buffer = (int(img_arr.shape[1]*0.1), int(img_arr.shape[2]*0.1))
else:
slice_buffer = (slice_buffer, slice_buffer)
nonzero = np.where(img_arr.sum(axis=(1,2)) > 0.01)[0]
min_idx = nonzero[0] + slice_buffer[0]
max_idx = nonzero[-1] - slice_buffer[1]
slice_idxs = np.linspace(min_idx, max_idx, nslices).astype('int')
if reverse:
slice_idxs = np.array(list(reversed(slice_idxs)))
else:
if isinstance(slices, (int,float)):
slices = [slices]
# if all slices are less than 1, infer that they are relative slices
if sum([s > 1 for s in slices]) == 0:
slices = [int(s*img_arr.shape[0]) for s in slices]
slice_idxs = slices
nslices = len(slices)
if was_resampled:
# re-calculate slices to account for new image shape
slice_idxs = np.unique(np.array([int(s*(image.shape[axis]/img_arr.shape[0])) for s in slice_idxs]))
# only have one row if nslices <= 6 and user didnt specify ncol
if ncol is None:
if (nslices <= 6):
ncol = nslices
else:
ncol = int(round(math.sqrt(nslices)))
# calculate grid size
nrow = math.ceil(nslices / ncol)
xdim = img_arr.shape[2]
ydim = img_arr.shape[1]
dim_ratio = ydim/xdim
fig = plt.figure(figsize=((ncol+1)*figsize*dim_ratio, (nrow+1)*figsize))
if title is not None:
fig.suptitle(title, fontsize=title_fontsize, x=0.5+title_dx, y=0.95+title_dy)
gs = gridspec.GridSpec(nrow, ncol,
wspace=0.0, hspace=0.0,
top=1.-0.5/(nrow+1), bottom=0.5/(nrow+1),
left=0.5/(ncol+1), right=1-0.5/(ncol+1))
slice_idx_idx = 0
for i in range(nrow):
for j in range(ncol):
if slice_idx_idx < len(slice_idxs):
imslice = img_arr[slice_idxs[slice_idx_idx]]
imslice = reorient_slice(imslice, axis)
if not black_bg:
imslice[imslice<image.quantile(bg_thresh_quant)] = image.quantile(bg_val_quant)
else:
imslice = np.zeros_like(img_arr[0])
imslice = reorient_slice(imslice, axis)
ax = plt.subplot(gs[i,j])
im = ax.imshow(imslice, cmap=cmap,
vmin=vmin, vmax=vmax)
if overlay is not None:
if slice_idx_idx < len(slice_idxs):
ovslice = ov_arr[slice_idxs[slice_idx_idx]]
ovslice = reorient_slice(ovslice, axis)
im = ax.imshow(ovslice, alpha=overlay_alpha, cmap=overlay_cmap)
ax.axis('off')
slice_idx_idx += 1
if cbar:
cbar_start = (1-cbar_length) / 2
if cbar_vertical:
cax = fig.add_axes([0.9+cbar_dx, cbar_start, 0.03, cbar_length])
cbar_orient = 'vertical'
else:
cax = fig.add_axes([cbar_start, 0.08+cbar_dx, cbar_length, 0.03])
cbar_orient = 'horizontal'
fig.colorbar(im, cax=cax, orientation=cbar_orient)
## multi-channel images ##
elif image.components > 1:
if not image.is_rgb:
raise ValueError('Multi-component images only supported if they are RGB')
img_arr = image.numpy()
img_arr = np.stack([rotate90_matrix(img_arr[:,:,i]) for i in range(3)], axis=-1)
fig = plt.figure()
ax = plt.subplot(111)
# plot main image
ax.imshow(img_arr, alpha=alpha)
plt.axis('off')
if filename is not None:
filename = os.path.expanduser(filename)
plt.savefig(filename, dpi=dpi, transparent=True, bbox_inches='tight')
plt.close(fig)
else:
plt.show()
# turn warnings back to default
warnings.simplefilter('default') | Plot an ANTsImage.
By default, images will be reoriented to 'LAI' orientation before plotting.
So, if axis == 0, the images will be ordered from the
left side of the brain to the right side of the brain. If axis == 1,
the images will be ordered from the anterior (front) of the brain to
the posterior (back) of the brain. And if axis == 2, the images will
be ordered from the inferior (bottom) of the brain to the superior (top)
of the brain.
ANTsR function: `plot.antsImage`
Arguments
---------
image : ANTsImage
image to plot
overlay : ANTsImage
image to overlay on base image
cmap : string
colormap to use for base image. See matplotlib.
overlay_cmap : string
colormap to use for overlay images, if applicable. See matplotlib.
overlay_alpha : float
level of transparency for any overlays. Smaller value means
the overlay is more transparent. See matplotlib.
axis : integer
which axis to plot along if image is 3D
nslices : integer
number of slices to plot if image is 3D
slices : list or tuple of integers
specific slice indices to plot if image is 3D.
If given, this will override `nslices`.
This can be absolute array indices (e.g. (80,100,120)), or
this can be relative array indices (e.g. (0.4,0.5,0.6))
ncol : integer
Number of columns to have on the plot if image is 3D.
slice_buffer : integer
how many slices to buffer when finding the non-zero slices of
a 3D images. So, if slice_buffer = 10, then the first slice
in a 3D image will be the first non-zero slice index plus 10 more
slices.
black_bg : boolean
if True, the background of the image(s) will be black.
if False, the background of the image(s) will be determined by the
values `bg_thresh_quant` and `bg_val_quant`.
bg_thresh_quant : float
if white_bg=True, the background will be determined by thresholding
the image at the `bg_thresh` quantile value and setting the background
intensity to the `bg_val` quantile value.
This value should be in [0, 1] - somewhere around 0.01 is recommended.
- equal to 1 will threshold the entire image
- equal to 0 will threshold none of the image
bg_val_quant : float
if white_bg=True, the background will be determined by thresholding
the image at the `bg_thresh` quantile value and setting the background
intensity to the `bg_val` quantile value.
This value should be in [0, 1]
- equal to 1 is pure white
- equal to 0 is pure black
- somewhere in between is gray
domain_image_map : ANTsImage
this input ANTsImage or list of ANTsImage types contains a reference image
`domain_image` and optional reference mapping named `domainMap`.
If supplied, the image(s) to be plotted will be mapped to the domain
image space before plotting - useful for non-standard image orientations.
crop : boolean
if true, the image(s) will be cropped to their bounding boxes, resulting
in a potentially smaller image size.
if false, the image(s) will not be cropped
scale : boolean or 2-tuple
if true, nothing will happen to intensities of image(s) and overlay(s)
if false, dynamic range will be maximized when visualizing overlays
if 2-tuple, the image will be dynamically scaled between these quantiles
reverse : boolean
if true, the order in which the slices are plotted will be reversed.
This is useful if you want to plot from the front of the brain first
to the back of the brain, or vice-versa
title : string
add a title to the plot
filename : string
if given, the resulting image will be saved to this file
dpi : integer
determines resolution of image if saved to file. Higher values
result in higher resolution images, but at a cost of having a
larger file size
Example
-------
>>> import ants
>>> import numpy as np
>>> img = ants.image_read(ants.get_data('r16'))
>>> segs = img.kmeans_segmentation(k=3)['segmentation']
>>> ants.plot(img, segs*(segs==1), crop=True)
>>> ants.plot(img, segs*(segs==1), crop=False)
>>> mni = ants.image_read(ants.get_data('mni'))
>>> segs = mni.kmeans_segmentation(k=3)['segmentation']
>>> ants.plot(mni, segs*(segs==1), crop=False) |
def user_cache_dir():
r"""Return the per-user cache dir (full path).
- Linux, *BSD, SunOS: ~/.cache/glances
- macOS: ~/Library/Caches/glances
- Windows: {%LOCALAPPDATA%,%APPDATA%}\glances\cache
"""
if WINDOWS:
path = os.path.join(os.environ.get('LOCALAPPDATA') or os.environ.get('APPDATA'),
'glances', 'cache')
elif MACOS:
path = os.path.expanduser('~/Library/Caches/glances')
else:
path = os.path.join(os.environ.get('XDG_CACHE_HOME') or os.path.expanduser('~/.cache'),
'glances')
return path | r"""Return the per-user cache dir (full path).
- Linux, *BSD, SunOS: ~/.cache/glances
- macOS: ~/Library/Caches/glances
- Windows: {%LOCALAPPDATA%,%APPDATA%}\glances\cache |
def wait_for_service_tasks_state(
service_name,
expected_task_count,
expected_task_states,
timeout_sec=120
):
""" Returns once the service has at least N tasks in one of the specified state(s)
:param service_name: the service name
:type service_name: str
:param expected_task_count: the expected number of tasks in the specified state(s)
:type expected_task_count: int
:param expected_task_states: the expected state(s) for tasks to be in, e.g. 'TASK_RUNNING'
:type expected_task_states: [str]
:param timeout_sec: duration to wait
:type timeout_sec: int
:return: the duration waited in seconds
:rtype: int
"""
return time_wait(
lambda: task_states_predicate(service_name, expected_task_count, expected_task_states),
timeout_seconds=timeout_sec) | Returns once the service has at least N tasks in one of the specified state(s)
:param service_name: the service name
:type service_name: str
:param expected_task_count: the expected number of tasks in the specified state(s)
:type expected_task_count: int
:param expected_task_states: the expected state(s) for tasks to be in, e.g. 'TASK_RUNNING'
:type expected_task_states: [str]
:param timeout_sec: duration to wait
:type timeout_sec: int
:return: the duration waited in seconds
:rtype: int |
def display_candidates(self, candidates, pdf_file=None):
"""
Displays the bounding boxes corresponding to candidates on an image of the pdf
boxes is a list of 5-tuples (page, top, left, bottom, right)
"""
if not pdf_file:
pdf_file = os.path.join(
self.pdf_path, candidates[0][0].context.sentence.document.name
)
if os.path.isfile(pdf_file + ".pdf"):
pdf_file += ".pdf"
elif os.path.isfile(pdf_file + ".PDF"):
pdf_file += ".PDF"
else:
logger.error("display_candidates failed: pdf file missing.")
boxes = [
get_box(mention.context) for c in candidates for mention in c.get_mentions()
]
imgs = self.display_boxes(pdf_file, boxes, alternate_colors=True)
return display(*imgs) | Displays the bounding boxes corresponding to candidates on an image of the pdf
boxes is a list of 5-tuples (page, top, left, bottom, right) |
def CheckProg(context, prog_name):
"""Simple check if a program exists in the path. Returns the path
for the application, or None if not found.
"""
res = SCons.Conftest.CheckProg(context, prog_name)
context.did_show_result = 1
return res | Simple check if a program exists in the path. Returns the path
for the application, or None if not found. |
def readinto(self, b):
"""
Read bytes into a pre-allocated, writable bytes-like object b,
and return the number of bytes read.
Args:
b (bytes-like object): buffer.
Returns:
int: number of bytes read
"""
if not self._readable:
raise UnsupportedOperation('read')
with self._seek_lock:
# Gets seek
seek = self._seek
# Initializes queue
queue = self._read_queue
if seek == 0:
# Starts initial preloading on first call
self._preload_range()
# Initializes read data buffer
size = len(b)
if size:
# Preallocated buffer:
# Use memory view to avoid copies
b_view = memoryview(b)
size_left = size
else:
# Dynamic buffer:
# Can't avoid copy, read until EOF
b_view = b
size_left = -1
b_end = 0
# Starts reading
buffer_size = self._buffer_size
while size_left > 0 or size_left == -1:
# Finds buffer position in queue and buffer seek
start = seek % buffer_size
queue_index = seek - start
# Gets preloaded buffer
try:
buffer = queue[queue_index]
except KeyError:
# EOF
break
# Get buffer from future
with handle_os_exceptions():
try:
queue[queue_index] = buffer = buffer.result()
# Already evaluated
except AttributeError:
pass
buffer_view = memoryview(buffer)
data_size = len(buffer)
# Checks if end of file reached
if not data_size:
break
# Gets theoretical range to copy
if size_left != -1:
end = start + size_left
else:
end = data_size - start
# Checks for end of buffer
if end >= data_size:
# Adjusts range to copy
end = data_size
# Removes consumed buffer from queue
del queue[queue_index]
# Append another buffer preload at end of queue
index = queue_index + buffer_size * self._max_buffers
if index < self._size:
queue[index] = self._workers.submit(
self._read_range, index, index + buffer_size)
# Gets read size, updates seek and updates size left
read_size = end - start
if size_left != -1:
size_left -= read_size
seek += read_size
# Defines read buffer range
b_start = b_end
b_end = b_start + read_size
# Copy data from preload buffer to read buffer
b_view[b_start:b_end] = buffer_view[start:end]
# Updates seek and sync raw
self._seek = seek
self._raw.seek(seek)
# Returns read size
return b_end | Read bytes into a pre-allocated, writable bytes-like object b,
and return the number of bytes read.
Args:
b (bytes-like object): buffer.
Returns:
int: number of bytes read |
def error(self, message=None):
"""
Delegates to `ArgumentParser.error`
"""
if self.__parser__: # pylint: disable-msg=E1101
self.__parser__.error(message) # pylint: disable-msg=E1101
else:
self.logger.error(message)
sys.exit(2) | Delegates to `ArgumentParser.error` |
def parse_section_entry_points(self, section_options):
"""Parses `entry_points` configuration file section.
:param dict section_options:
"""
parsed = self._parse_section_to_dict(section_options, self._parse_list)
self['entry_points'] = parsed | Parses `entry_points` configuration file section.
:param dict section_options: |
def _submit(self, pathfile, filedata, filename):
'''
Submit either a file from disk, or a in-memory file to the solver service, and
return the request ID associated with the new captcha task.
'''
if pathfile and os.path.exists(pathfile):
files = {'file': open(pathfile, 'rb')}
elif filedata:
assert filename
files = {'file' : (filename, io.BytesIO(filedata))}
else:
raise ValueError("You must pass either a valid file path, or a bytes array containing the captcha image!")
payload = {
'key' : self.api_key,
'method' : 'post',
'json' : True,
}
self.log.info("Uploading to 2Captcha.com.")
url = self.getUrlFor('input', {})
request = requests.post(url, files=files, data=payload)
if not request.ok:
raise exc.CaptchaSolverFailure("Posting captcha to solve failed!")
resp_json = json.loads(request.text)
return self._process_response(resp_json) | Submit either a file from disk, or a in-memory file to the solver service, and
return the request ID associated with the new captcha task. |
def make_autogen_str():
r"""
Returns:
str:
CommandLine:
python -m utool.util_ipynb --exec-make_autogen_str --show
Example:
>>> # DISABLE_DOCTEST
>>> from utool.util_ipynb import * # NOQA
>>> import utool as ut
>>> result = make_autogen_str()
>>> print(result)
"""
import utool as ut
def get_regen_cmd():
try:
if len(sys.argv) > 0 and ut.checkpath(sys.argv[0]):
# Check if running python command
if ut.is_python_module(sys.argv[0]):
python_exe = ut.python_executable(check=False)
modname = ut.get_modname_from_modpath(sys.argv[0])
new_argv = [python_exe, '-m', modname] + sys.argv[1:]
return ' '.join(new_argv)
except Exception as ex:
ut.printex(ex, iswarning=True)
return ' '.join(sys.argv)
autogenkw = dict(
stamp=ut.timestamp('printable'),
regen_cmd=get_regen_cmd()
#' '.join(sys.argv)
)
return ut.codeblock(
'''
# Autogenerated on {stamp}
# Regen Command:
# {regen_cmd}
#
'''
).format(**autogenkw) | r"""
Returns:
str:
CommandLine:
python -m utool.util_ipynb --exec-make_autogen_str --show
Example:
>>> # DISABLE_DOCTEST
>>> from utool.util_ipynb import * # NOQA
>>> import utool as ut
>>> result = make_autogen_str()
>>> print(result) |
def sync_from_spec(redis, schema):
"""
Takes an input experiment spec and creates/modifies/archives the existing
experiments to match the spec.
If there's an experiment in the spec that currently doesn't exist, it will
be created along with the associated choices.
If there's an experiment in the spec that currently exists, and the set of
choices are different, that experiment's choices will be modified to match
the spec.
If there's an experiment not in the spec that currently exists, it will be
archived.
A spec looks like this:
{
"experiment 1": ["choice 1", "choice 2", "choice 3"],
"experiment 2": ["choice 1", "choice 2"]
}
"""
def get_experiments_dict(active=True):
"""Returns a dictionary of experiment names -> experiment objects"""
return dict((experiment.name, experiment) for experiment in get_experiments(redis, active=active))
# Get the current experiments
active_experiments = get_experiments_dict()
archived_experiments = get_experiments_dict(active=False)
# Get the newly defined experiment names and the names of the experiments
# already setup
new_experiment_names = set(schema.keys())
active_experiment_names = set(active_experiments.keys())
# Find all the experiments that are in the schema and are defined among the
# archived experiments, but not the active ones (we check against active
# experiments to prevent the edge case where an experiment might be defined
# doubly in both active and archived experiments)
unarchivable_experiment_names = (new_experiment_names - active_experiment_names) & set(archived_experiments.keys())
# De-archive the necessary experiments
for unarchivable_experiment_name in unarchivable_experiment_names:
print("- De-archiving %s" % unarchivable_experiment_name)
# Because there is no function to de-archive an experiment, it must
# be done manually
pipe = redis.pipeline(transaction=True)
pipe.sadd(ACTIVE_EXPERIMENTS_REDIS_KEY, unarchivable_experiment_name)
pipe.srem(ARCHIVED_EXPERIMENTS_REDIS_KEY, unarchivable_experiment_name)
pipe.execute()
# Reload the active experiments if we de-archived any
if unarchivable_experiment_names:
active_experiments = get_experiments_dict()
active_experiment_names = set(active_experiments.keys())
# Create the new experiments
for new_experiment_name in new_experiment_names - active_experiment_names:
print("- Creating experiment %s" % new_experiment_name)
experiment = add_experiment(redis, new_experiment_name)
for choice in schema[new_experiment_name]:
print(" - Adding choice %s" % choice)
experiment.add_choice(choice)
# Archive experiments not defined in the schema
for archivable_experiment_name in active_experiment_names - new_experiment_names:
print("- Archiving %s" % archivable_experiment_name)
active_experiments[archivable_experiment_name].archive()
# Update the choices for existing experiments that are also defined in the
# schema
for experiment_name in new_experiment_names & active_experiment_names:
experiment = active_experiments[experiment_name]
new_choice_names = set(schema[experiment_name])
old_choice_names = set(experiment.choice_names)
# Add choices in the schema that do not exist yet
for new_choice_name in new_choice_names - old_choice_names:
print("- Adding choice %s to existing experiment %s" % (new_choice_name, experiment_name))
experiment.add_choice(new_choice_name)
# Remove choices that aren't in the schema
for removable_choice_name in old_choice_names - new_choice_names:
print("- Removing choice %s from existing experiment %s" % (removable_choice_name, experiment_name))
experiment.remove_choice(removable_choice_name) | Takes an input experiment spec and creates/modifies/archives the existing
experiments to match the spec.
If there's an experiment in the spec that currently doesn't exist, it will
be created along with the associated choices.
If there's an experiment in the spec that currently exists, and the set of
choices are different, that experiment's choices will be modified to match
the spec.
If there's an experiment not in the spec that currently exists, it will be
archived.
A spec looks like this:
{
"experiment 1": ["choice 1", "choice 2", "choice 3"],
"experiment 2": ["choice 1", "choice 2"]
} |
def load_sgems_exp_var(filename):
""" read an SGEM experimental variogram into a sequence of
pandas.DataFrames
Parameters
----------
filename : (str)
an SGEMS experimental variogram XML file
Returns
-------
dfs : list
a list of pandas.DataFrames of x, y, pairs for each
division in the experimental variogram
"""
assert os.path.exists(filename)
import xml.etree.ElementTree as etree
tree = etree.parse(filename)
root = tree.getroot()
dfs = {}
for variogram in root:
#print(variogram.tag)
for attrib in variogram:
#print(attrib.tag,attrib.text)
if attrib.tag == "title":
title = attrib.text.split(',')[0].split('=')[-1]
elif attrib.tag == "x":
x = [float(i) for i in attrib.text.split()]
elif attrib.tag == "y":
y = [float(i) for i in attrib.text.split()]
elif attrib.tag == "pairs":
pairs = [int(i) for i in attrib.text.split()]
for item in attrib:
print(item,item.tag)
df = pd.DataFrame({"x":x,"y":y,"pairs":pairs})
df.loc[df.y<0.0,"y"] = np.NaN
dfs[title] = df
return dfs | read an SGEM experimental variogram into a sequence of
pandas.DataFrames
Parameters
----------
filename : (str)
an SGEMS experimental variogram XML file
Returns
-------
dfs : list
a list of pandas.DataFrames of x, y, pairs for each
division in the experimental variogram |
async def fetch_messages(self, selected: SelectedMailbox,
sequence_set: SequenceSet,
attributes: FrozenSet[FetchAttribute]) \
-> Tuple[Iterable[Tuple[int, MessageInterface]], SelectedMailbox]:
"""Get a list of loaded message objects corresponding to given sequence
set.
Args:
selected: The selected mailbox session.
sequence_set: Sequence set of message sequences or UIDs.
attributes: Fetch attributes for the messages.
Raises:
:class:`~pymap.exceptions.MailboxNotFound`
"""
... | Get a list of loaded message objects corresponding to given sequence
set.
Args:
selected: The selected mailbox session.
sequence_set: Sequence set of message sequences or UIDs.
attributes: Fetch attributes for the messages.
Raises:
:class:`~pymap.exceptions.MailboxNotFound` |
def slices(src_path):
''' Return slices as a flat list '''
pages = list_slices(src_path)
slices = []
for page in pages:
slices.extend(page.slices)
return slices | Return slices as a flat list |
def addVariantAnnotationSet(self, variantAnnotationSet):
"""
Adds the specified variantAnnotationSet to this dataset.
"""
id_ = variantAnnotationSet.getId()
self._variantAnnotationSetIdMap[id_] = variantAnnotationSet
self._variantAnnotationSetIds.append(id_) | Adds the specified variantAnnotationSet to this dataset. |
def add_external_reference(self,ext_ref):
"""
Adds an external reference to the role
@param ext_ref: the external reference object
@type ext_ref: L{CexternalReference}
"""
#check if the externalreferences sublayer exist for the role, and create it in case
node_ext_refs = self.node.find('externalReferences')
ext_refs = None
if node_ext_refs == None:
ext_refs = CexternalReferences()
self.node.append(ext_refs.get_node())
else:
ext_refs = CexternalReferences(node_ext_refs)
ext_refs.add_external_reference(ext_ref) | Adds an external reference to the role
@param ext_ref: the external reference object
@type ext_ref: L{CexternalReference} |
def expects_call(self):
"""The fake must be called.
.. doctest::
:hide:
>>> import fudge
>>> fudge.clear_expectations()
>>> fudge.clear_calls()
This is useful for when you stub out a function
as opposed to a class. For example::
>>> import fudge
>>> remove = fudge.Fake('os.remove').expects_call()
>>> fudge.verify()
Traceback (most recent call last):
...
AssertionError: fake:os.remove() was not called
.. doctest::
:hide:
>>> fudge.clear_expectations()
"""
self._callable = ExpectedCall(self, call_name=self._name,
callable=True)
return self | The fake must be called.
.. doctest::
:hide:
>>> import fudge
>>> fudge.clear_expectations()
>>> fudge.clear_calls()
This is useful for when you stub out a function
as opposed to a class. For example::
>>> import fudge
>>> remove = fudge.Fake('os.remove').expects_call()
>>> fudge.verify()
Traceback (most recent call last):
...
AssertionError: fake:os.remove() was not called
.. doctest::
:hide:
>>> fudge.clear_expectations() |
def _execute(self, query, model, adapter, raw=False):
"""
We have to override this because in some situation
(such as with Filebackend, or any dummy backend)
we have to parse / adapt results *before* when can execute the query
"""
values = self.load(model, adapter)
return IterableStore(values=values)._execute(query, model=model, adapter=None, raw=raw) | We have to override this because in some situation
(such as with Filebackend, or any dummy backend)
we have to parse / adapt results *before* when can execute the query |
def delete(self, record_key):
''' a method to delete a record from S3
:param record_key: string with key of record
:return: string reporting outcome
'''
title = '%s.delete' % self.__class__.__name__
# validate inputs
input_fields = {
'record_key': record_key
}
for key, value in input_fields.items():
object_title = '%s(%s=%s)' % (title, key, str(value))
self.fields.validate(value, '.%s' % key, object_title)
# delete record
try:
self.s3.delete_record(self.bucket_name, record_key)
except:
if not self.exists(record_key):
exit_msg = '%s does not exist.' % record_key
return exit_msg
raise
exit_msg = '%s has been deleted.' % record_key
return exit_msg | a method to delete a record from S3
:param record_key: string with key of record
:return: string reporting outcome |
def _find_supported(self, features, mechanism_classes):
"""
Find the first mechansim class which supports a mechanism announced in
the given stream features.
:param features: Current XMPP stream features
:type features: :class:`~.nonza.StreamFeatures`
:param mechanism_classes: SASL mechanism classes to use
:type mechanism_classes: iterable of :class:`SASLMechanism`
sub\\ *classes*
:raises aioxmpp.errors.SASLUnavailable: if the peer does not announce
SASL support
:return: the :class:`SASLMechanism` subclass to use and a token
:rtype: pair
Return a supported SASL mechanism class, by looking the given
stream features `features`.
If no matching mechanism is found, ``(None, None)`` is
returned. Otherwise, a pair consisting of the mechanism class and the
value returned by the respective
:meth:`~.sasl.SASLMechanism.any_supported` method is returned. The
latter is an opaque token which must be passed to the `token` argument
of :meth:`_execute` or :meth:`aiosasl.SASLMechanism.authenticate`.
"""
try:
mechanisms = features[SASLMechanisms]
except KeyError:
logger.error("No sasl mechanisms: %r", list(features))
raise errors.SASLUnavailable(
"Remote side does not support SASL") from None
remote_mechanism_list = mechanisms.get_mechanism_list()
for our_mechanism in mechanism_classes:
token = our_mechanism.any_supported(remote_mechanism_list)
if token is not None:
return our_mechanism, token
return None, None | Find the first mechansim class which supports a mechanism announced in
the given stream features.
:param features: Current XMPP stream features
:type features: :class:`~.nonza.StreamFeatures`
:param mechanism_classes: SASL mechanism classes to use
:type mechanism_classes: iterable of :class:`SASLMechanism`
sub\\ *classes*
:raises aioxmpp.errors.SASLUnavailable: if the peer does not announce
SASL support
:return: the :class:`SASLMechanism` subclass to use and a token
:rtype: pair
Return a supported SASL mechanism class, by looking the given
stream features `features`.
If no matching mechanism is found, ``(None, None)`` is
returned. Otherwise, a pair consisting of the mechanism class and the
value returned by the respective
:meth:`~.sasl.SASLMechanism.any_supported` method is returned. The
latter is an opaque token which must be passed to the `token` argument
of :meth:`_execute` or :meth:`aiosasl.SASLMechanism.authenticate`. |
def send_message(self, output):
"""
Send a message to the socket
"""
file_system_event = None
if self.my_action_input:
file_system_event = self.my_action_input.file_system_event or None
output_action = ActionInput(file_system_event,
output,
self.name,
"*")
Global.MESSAGE_DISPATCHER.send_message(output_action) | Send a message to the socket |
def process_fastq_minimal(fastq, **kwargs):
"""Swiftly extract minimal features (length and timestamp) from a rich fastq file"""
infastq = handle_compressed_input(fastq)
try:
df = pd.DataFrame(
data=[rec for rec in fq_minimal(infastq) if rec],
columns=["timestamp", "lengths"]
)
except IndexError:
logging.error("Fatal: Incorrect file structure for fastq_minimal")
sys.exit("Error: file does not match expected structure for fastq_minimal")
return ut.reduce_memory_usage(df) | Swiftly extract minimal features (length and timestamp) from a rich fastq file |
def build_documentation_lines(self):
"""Build a parameter documentation string that can appended to the
docstring of a function that uses this :class:`~.Filters` instance
to build filters.
"""
return [
line_string for key in sorted(self.keys)
for line_string in self.build_paramter_string(key)
] | Build a parameter documentation string that can appended to the
docstring of a function that uses this :class:`~.Filters` instance
to build filters. |
def manage_itstat(self):
"""Compute, record, and display iteration statistics."""
# Extract and record iteration stats
itst = self.iteration_stats()
self.itstat.append(itst)
self.display_status(self.fmtstr, itst) | Compute, record, and display iteration statistics. |
def fill_datetime(self):
"""Returns when the slot was filled.
Returns:
A datetime.datetime.
Raises:
SlotNotFilledError if the value hasn't been filled yet.
"""
if not self.filled:
raise SlotNotFilledError('Slot with name "%s", key "%s" not yet filled.'
% (self.name, self.key))
return self._fill_datetime | Returns when the slot was filled.
Returns:
A datetime.datetime.
Raises:
SlotNotFilledError if the value hasn't been filled yet. |
def templates(self, name=None, params=None):
"""
`<https://www.elastic.co/guide/en/elasticsearch/reference/current/cat-templates.html>`_
:arg name: A pattern that returned template names must match
:arg format: a short version of the Accept header, e.g. json, yaml
:arg h: Comma-separated list of column names to display
:arg help: Return help information, default False
:arg local: Return local information, do not retrieve the state from
master node (default: false)
:arg master_timeout: Explicit operation timeout for connection to master
node
:arg s: Comma-separated list of column names or column aliases to sort
by
:arg v: Verbose mode. Display column headers, default False
"""
return self.transport.perform_request('GET', _make_path('_cat',
'templates', name), params=params) | `<https://www.elastic.co/guide/en/elasticsearch/reference/current/cat-templates.html>`_
:arg name: A pattern that returned template names must match
:arg format: a short version of the Accept header, e.g. json, yaml
:arg h: Comma-separated list of column names to display
:arg help: Return help information, default False
:arg local: Return local information, do not retrieve the state from
master node (default: false)
:arg master_timeout: Explicit operation timeout for connection to master
node
:arg s: Comma-separated list of column names or column aliases to sort
by
:arg v: Verbose mode. Display column headers, default False |
def _read_para_overlay_ttl(self, code, cbit, clen, *, desc, length, version):
"""Read HIP OVERLAY_TTL parameter.
Structure of HIP OVERLAY_TTL parameter [RFC 6078]:
0 1 2 3
0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| Type | Length |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| TTL | Reserved |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
Octets Bits Name Description
0 0 overlay_ttl.type Parameter Type
1 15 overlay_ttl.critical Critical Bit
2 16 overlay_ttl.length Length of Contents
4 32 overlay_ttl.ttl TTL
6 48 - Reserved
"""
if clen != 4:
raise ProtocolError(f'HIPv{version}: [Parano {code}] invalid format')
_ttln = self._read_unpack(2)
overlay_ttl = dict(
type=desc,
critical=cbit,
length=clen,
ttl=_ttln,
)
return overlay_ttl | Read HIP OVERLAY_TTL parameter.
Structure of HIP OVERLAY_TTL parameter [RFC 6078]:
0 1 2 3
0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| Type | Length |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| TTL | Reserved |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
Octets Bits Name Description
0 0 overlay_ttl.type Parameter Type
1 15 overlay_ttl.critical Critical Bit
2 16 overlay_ttl.length Length of Contents
4 32 overlay_ttl.ttl TTL
6 48 - Reserved |
def _process_status(self, status):
""" Process latest status update. """
self._screen_id = status.get(ATTR_SCREEN_ID)
self.status_update_event.set() | Process latest status update. |
def c32address(version, hash160hex):
"""
>>> c32address(22, 'a46ff88886c2ef9762d970b4d2c63678835bd39d')
'SP2J6ZY48GV1EZ5V2V5RB9MP66SW86PYKKNRV9EJ7'
>>> c32address(0, '0000000000000000000000000000000000000000')
'S0000000000000000000002AA028H'
>>> c32address(31, '0000000000000000000000000000000000000001')
'SZ00000000000000000005HZ3DVN'
>>> c32address(20, '1000000000000000000000000000000000000001')
'SM80000000000000000000000000000004WBEWKC'
>>> c32address(26, '1000000000000000000000000000000000000000')
'ST80000000000000000000000000000002YBNPV3'
"""
if not re.match(r'^[0-9a-fA-F]{40}$', hash160hex):
raise ValueError('Invalid argument: not a hash160 hex string')
c32string = c32checkEncode(version, hash160hex)
return 'S{}'.format(c32string) | >>> c32address(22, 'a46ff88886c2ef9762d970b4d2c63678835bd39d')
'SP2J6ZY48GV1EZ5V2V5RB9MP66SW86PYKKNRV9EJ7'
>>> c32address(0, '0000000000000000000000000000000000000000')
'S0000000000000000000002AA028H'
>>> c32address(31, '0000000000000000000000000000000000000001')
'SZ00000000000000000005HZ3DVN'
>>> c32address(20, '1000000000000000000000000000000000000001')
'SM80000000000000000000000000000004WBEWKC'
>>> c32address(26, '1000000000000000000000000000000000000000')
'ST80000000000000000000000000000002YBNPV3' |
def sign_blob(
self,
name,
payload,
delegates=None,
retry=google.api_core.gapic_v1.method.DEFAULT,
timeout=google.api_core.gapic_v1.method.DEFAULT,
metadata=None,
):
"""
Signs a blob using a service account's system-managed private key.
Example:
>>> from google.cloud import iam_credentials_v1
>>>
>>> client = iam_credentials_v1.IAMCredentialsClient()
>>>
>>> name = client.service_account_path('[PROJECT]', '[SERVICE_ACCOUNT]')
>>>
>>> # TODO: Initialize `payload`:
>>> payload = b''
>>>
>>> response = client.sign_blob(name, payload)
Args:
name (str): The resource name of the service account for which the credentials are
requested, in the following format:
``projects/-/serviceAccounts/{ACCOUNT_EMAIL_OR_UNIQUEID}``.
payload (bytes): The bytes to sign.
delegates (list[str]): The sequence of service accounts in a delegation chain. Each service
account must be granted the ``roles/iam.serviceAccountTokenCreator``
role on its next service account in the chain. The last service account
in the chain must be granted the
``roles/iam.serviceAccountTokenCreator`` role on the service account
that is specified in the ``name`` field of the request.
The delegates must have the following format:
``projects/-/serviceAccounts/{ACCOUNT_EMAIL_OR_UNIQUEID}``
retry (Optional[google.api_core.retry.Retry]): A retry object used
to retry requests. If ``None`` is specified, requests will not
be retried.
timeout (Optional[float]): The amount of time, in seconds, to wait
for the request to complete. Note that if ``retry`` is
specified, the timeout applies to each individual attempt.
metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata
that is provided to the method.
Returns:
A :class:`~google.cloud.iam_credentials_v1.types.SignBlobResponse` instance.
Raises:
google.api_core.exceptions.GoogleAPICallError: If the request
failed for any reason.
google.api_core.exceptions.RetryError: If the request failed due
to a retryable error and retry attempts failed.
ValueError: If the parameters are invalid.
"""
# Wrap the transport method to add retry and timeout logic.
if "sign_blob" not in self._inner_api_calls:
self._inner_api_calls[
"sign_blob"
] = google.api_core.gapic_v1.method.wrap_method(
self.transport.sign_blob,
default_retry=self._method_configs["SignBlob"].retry,
default_timeout=self._method_configs["SignBlob"].timeout,
client_info=self._client_info,
)
request = common_pb2.SignBlobRequest(
name=name, payload=payload, delegates=delegates
)
if metadata is None:
metadata = []
metadata = list(metadata)
try:
routing_header = [("name", name)]
except AttributeError:
pass
else:
routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata(
routing_header
)
metadata.append(routing_metadata)
return self._inner_api_calls["sign_blob"](
request, retry=retry, timeout=timeout, metadata=metadata
) | Signs a blob using a service account's system-managed private key.
Example:
>>> from google.cloud import iam_credentials_v1
>>>
>>> client = iam_credentials_v1.IAMCredentialsClient()
>>>
>>> name = client.service_account_path('[PROJECT]', '[SERVICE_ACCOUNT]')
>>>
>>> # TODO: Initialize `payload`:
>>> payload = b''
>>>
>>> response = client.sign_blob(name, payload)
Args:
name (str): The resource name of the service account for which the credentials are
requested, in the following format:
``projects/-/serviceAccounts/{ACCOUNT_EMAIL_OR_UNIQUEID}``.
payload (bytes): The bytes to sign.
delegates (list[str]): The sequence of service accounts in a delegation chain. Each service
account must be granted the ``roles/iam.serviceAccountTokenCreator``
role on its next service account in the chain. The last service account
in the chain must be granted the
``roles/iam.serviceAccountTokenCreator`` role on the service account
that is specified in the ``name`` field of the request.
The delegates must have the following format:
``projects/-/serviceAccounts/{ACCOUNT_EMAIL_OR_UNIQUEID}``
retry (Optional[google.api_core.retry.Retry]): A retry object used
to retry requests. If ``None`` is specified, requests will not
be retried.
timeout (Optional[float]): The amount of time, in seconds, to wait
for the request to complete. Note that if ``retry`` is
specified, the timeout applies to each individual attempt.
metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata
that is provided to the method.
Returns:
A :class:`~google.cloud.iam_credentials_v1.types.SignBlobResponse` instance.
Raises:
google.api_core.exceptions.GoogleAPICallError: If the request
failed for any reason.
google.api_core.exceptions.RetryError: If the request failed due
to a retryable error and retry attempts failed.
ValueError: If the parameters are invalid. |
def p_casecontent_condition_single(self, p):
'casecontent_condition : casecontent_condition COMMA expression'
p[0] = p[1] + (p[3],)
p.set_lineno(0, p.lineno(1)) | casecontent_condition : casecontent_condition COMMA expression |
def _extract_shape(idx, x, j, cur_center):
"""
>>> _extract_shape(np.array([0,1,2]), np.array([[1,2,3], [4,5,6]]), 1, np.array([0,3,4]))
array([-1., 0., 1.])
>>> _extract_shape(np.array([0,1,2]), np.array([[-1,2,3], [4,-5,6]]), 1, np.array([0,3,4]))
array([-0.96836405, 1.02888681, -0.06052275])
>>> _extract_shape(np.array([1,0,1,0]), np.array([[1,2,3,4], [0,1,2,3], [-1,1,-1,1], [1,2,2,3]]), 0, np.array([0,0,0,0]))
array([-1.2089303 , -0.19618238, 0.19618238, 1.2089303 ])
>>> _extract_shape(np.array([0,0,1,0]), np.array([[1,2,3,4],[0,1,2,3],[-1,1,-1,1],[1,2,2,3]]), 0, np.array([-1.2089303,-0.19618238,0.19618238,1.2089303]))
array([-1.19623139, -0.26273649, 0.26273649, 1.19623139])
"""
_a = []
for i in range(len(idx)):
if idx[i] == j:
if cur_center.sum() == 0:
opt_x = x[i]
else:
_, opt_x = _sbd(cur_center, x[i])
_a.append(opt_x)
a = np.array(_a)
if len(a) == 0:
return np.zeros((1, x.shape[1]))
columns = a.shape[1]
y = zscore(a, axis=1, ddof=1)
s = np.dot(y.transpose(), y)
p = np.empty((columns, columns))
p.fill(1.0/columns)
p = np.eye(columns) - p
m = np.dot(np.dot(p, s), p)
_, vec = eigh(m)
centroid = vec[:, -1]
finddistance1 = math.sqrt(((a[0] - centroid) ** 2).sum())
finddistance2 = math.sqrt(((a[0] + centroid) ** 2).sum())
if finddistance1 >= finddistance2:
centroid *= -1
return zscore(centroid, ddof=1) | >>> _extract_shape(np.array([0,1,2]), np.array([[1,2,3], [4,5,6]]), 1, np.array([0,3,4]))
array([-1., 0., 1.])
>>> _extract_shape(np.array([0,1,2]), np.array([[-1,2,3], [4,-5,6]]), 1, np.array([0,3,4]))
array([-0.96836405, 1.02888681, -0.06052275])
>>> _extract_shape(np.array([1,0,1,0]), np.array([[1,2,3,4], [0,1,2,3], [-1,1,-1,1], [1,2,2,3]]), 0, np.array([0,0,0,0]))
array([-1.2089303 , -0.19618238, 0.19618238, 1.2089303 ])
>>> _extract_shape(np.array([0,0,1,0]), np.array([[1,2,3,4],[0,1,2,3],[-1,1,-1,1],[1,2,2,3]]), 0, np.array([-1.2089303,-0.19618238,0.19618238,1.2089303]))
array([-1.19623139, -0.26273649, 0.26273649, 1.19623139]) |
def get_library_config(name):
"""Get distutils-compatible extension extras for the given library.
This requires ``pkg-config``.
"""
try:
proc = Popen(['pkg-config', '--cflags', '--libs', name], stdout=PIPE, stderr=PIPE)
except OSError:
print('pkg-config is required for building PyAV')
exit(1)
raw_cflags, err = proc.communicate()
if proc.wait():
return
known, unknown = parse_cflags(raw_cflags.decode('utf8'))
if unknown:
print("pkg-config returned flags we don't understand: {}".format(unknown))
exit(1)
return known | Get distutils-compatible extension extras for the given library.
This requires ``pkg-config``. |
def _add_parameter(self, parameter):
'''
Force adds a `Parameter` object to the instance.
'''
if isinstance(parameter, MethodParameter):
# create a bound instance of the MethodParameter
parameter = parameter.bind(alloy=self)
self._parameters[parameter.name] = parameter
for alias in parameter.aliases:
self._aliases[alias] = parameter | Force adds a `Parameter` object to the instance. |
def footprint(sobject):
"""
Get the I{virtual footprint} of the object.
This is really a count of the attributes in the branch with a significant
value.
@param sobject: A suds object.
@type sobject: L{Object}
@return: The branch footprint.
@rtype: int
"""
n = 0
for a in sobject.__keylist__:
v = getattr(sobject, a)
if v is None:
continue
if isinstance(v, Object):
n += footprint(v)
continue
if hasattr(v, '__len__'):
if len(v):
n += 1
continue
n += 1
return n | Get the I{virtual footprint} of the object.
This is really a count of the attributes in the branch with a significant
value.
@param sobject: A suds object.
@type sobject: L{Object}
@return: The branch footprint.
@rtype: int |
def find_keys(self, regex, bucket_name=None):
"""Finds a list of S3 keys matching the passed regex
Given a regular expression, this method searches the S3 bucket
for matching keys, and returns an array of strings for matched
keys, an empty array if non are found.
:param regex: (str) Regular expression to use is the key search
:param bucket_name: (str) Name of bucket to search (optional)
:return: Array of strings containing matched S3 keys
"""
log = logging.getLogger(self.cls_logger + '.find_keys')
matched_keys = []
if not isinstance(regex, basestring):
log.error('regex argument is not a string, found: {t}'.format(t=regex.__class__.__name__))
return None
# Determine which bucket to use
if bucket_name is None:
s3bucket = self.bucket
else:
log.debug('Using the provided S3 bucket: {n}'.format(n=bucket_name))
s3bucket = self.s3resource.Bucket(bucket_name)
log.info('Looking up S3 keys based on regex: {r}'.format(r=regex))
for item in s3bucket.objects.all():
log.debug('Checking if regex matches key: {k}'.format(k=item.key))
match = re.search(regex, item.key)
if match:
matched_keys.append(item.key)
log.info('Found matching keys: {k}'.format(k=matched_keys))
return matched_keys | Finds a list of S3 keys matching the passed regex
Given a regular expression, this method searches the S3 bucket
for matching keys, and returns an array of strings for matched
keys, an empty array if non are found.
:param regex: (str) Regular expression to use is the key search
:param bucket_name: (str) Name of bucket to search (optional)
:return: Array of strings containing matched S3 keys |
def namedb_get_name_DID_info(cur, name, block_height):
"""
Given a name and a DB cursor, find out its DID info at the given block.
Returns {'name_type': ..., 'address': ..., 'index': ...} on success
Return None if there is no such name
"""
# get the latest creator addresses for this name, as well as where this name was created in the blockchain
sql = "SELECT name_records.name,history.creator_address,history.block_id,history.vtxindex FROM name_records JOIN history ON name_records.name = history.history_id " + \
"WHERE name = ? AND creator_address IS NOT NULL AND history.block_id <= ? ORDER BY history.block_id DESC, history.vtxindex DESC LIMIT 1;"
args = (name,block_height)
# log.debug(namedb_format_query(sql, args))
rows = namedb_query_execute(cur, sql, args)
row = rows.fetchone()
if row is None:
return None
creator_address = row['creator_address']
latest_block_height = row['block_id']
latest_vtxindex = row['vtxindex']
# how many names has this address created up to this name?
query = "SELECT COUNT(*) FROM name_records JOIN history ON name_records.name = history.history_id " + \
"WHERE history.creator_address = ? AND (history.block_id < ? OR (history.block_id = ? AND history.vtxindex <= ?));"
args = (creator_address,latest_block_height,latest_block_height,latest_vtxindex)
# log.debug(namedb_format_query(query, args))
count_rows = namedb_query_execute(cur, query, args)
count_row = count_rows.fetchone()
if count_row is None:
return None
count = count_row['COUNT(*)'] - 1
return {'name_type': 'name', 'address': str(creator_address), 'index': count} | Given a name and a DB cursor, find out its DID info at the given block.
Returns {'name_type': ..., 'address': ..., 'index': ...} on success
Return None if there is no such name |
def try_lock(lock):
"""Attempts to acquire a lock, and auto releases if acquired (on exit)."""
# NOTE(harlowja): the keyword argument for 'blocking' does not work
# in py2.x and only is fixed in py3.x (this adjustment is documented
# and/or debated in http://bugs.python.org/issue10789); so we'll just
# stick to the format that works in both (oddly the keyword argument
# works in py2.x but only with reentrant locks).
was_locked = lock.acquire(False)
try:
yield was_locked
finally:
if was_locked:
lock.release() | Attempts to acquire a lock, and auto releases if acquired (on exit). |
def queue_actions(self, source, actions, event_args=None):
"""
Queue a list of \a actions for processing from \a source.
Triggers an aura refresh afterwards.
"""
source.event_args = event_args
ret = self.trigger_actions(source, actions)
source.event_args = None
return ret | Queue a list of \a actions for processing from \a source.
Triggers an aura refresh afterwards. |
def save(self, filename=None):
"""
Save the point histories to sqlite3 database.
Save the device object properties to a pickle file so the device can be reloaded.
"""
if filename:
if ".db" in filename:
filename = filename.split(".")[0]
self.properties.db_name = filename
else:
self.properties.db_name = "{}".format(self.properties.name)
# Does file exist? If so, append data
if os.path.isfile("{}.db".format(self.properties.db_name)):
his = self._read_from_sql(
'select * from "{}"'.format("history"), self.properties.db_name
)
his.index = his["index"].apply(Timestamp)
try:
last = his.index[-1]
df_to_backup = self.backup_histories_df()[last:]
except IndexError:
df_to_backup = self.backup_histories_df()
else:
self._log.debug("Creating a new backup database")
df_to_backup = self.backup_histories_df()
# DataFrames that will be saved to SQL
with contextlib.closing(
sqlite3.connect("{}.db".format(self.properties.db_name))
) as con:
sql.to_sql(
df_to_backup,
name="history",
con=con,
index_label="index",
index=True,
if_exists="append",
)
# Saving other properties to a pickle file...
prop_backup = {}
prop_backup["device"] = self.dev_properties_df()
prop_backup["points"] = self.points_properties_df()
with open("{}.bin".format(self.properties.db_name), "wb") as file:
pickle.dump(prop_backup, file)
self._log.info("Device saved to {}.db".format(self.properties.db_name)) | Save the point histories to sqlite3 database.
Save the device object properties to a pickle file so the device can be reloaded. |
def summarize_provenance(self):
"""Utility function to summarize provenance files for cached items used by a Cohort.
At the moment, most PROVENANCE files contain details about packages used to
generate files. However, this function is generic & so it summarizes the contents
of those files irrespective of their contents.
Returns
----------
Dict containing summary of provenance items, among all cache dirs used by the Cohort.
IE if all provenances are identical across all cache dirs, then a single set of
provenances is returned. Otherwise, if all provenances are not identical, the provenance
items per cache_dir are returned.
See also
----------
`?cohorts.Cohort.summarize_provenance_per_cache` which is used to summarize provenance
for each existing cache_dir.
"""
provenance_per_cache = self.summarize_provenance_per_cache()
summary_provenance = None
num_discrepant = 0
for cache in provenance_per_cache:
if not(summary_provenance):
## pick arbitrary provenance & call this the "summary" (for now)
summary_provenance = provenance_per_cache[cache]
summary_provenance_name = cache
## for each cache, check equivalence with summary_provenance
num_discrepant += compare_provenance(
provenance_per_cache[cache],
summary_provenance,
left_outer_diff = "In %s but not in %s" % (cache, summary_provenance_name),
right_outer_diff = "In %s but not in %s" % (summary_provenance_name, cache)
)
## compare provenance across cached items
if num_discrepant == 0:
prov = summary_provenance ## report summary provenance if exists
else:
prov = provenance_per_cache ## otherwise, return provenance per cache
return(prov) | Utility function to summarize provenance files for cached items used by a Cohort.
At the moment, most PROVENANCE files contain details about packages used to
generate files. However, this function is generic & so it summarizes the contents
of those files irrespective of their contents.
Returns
----------
Dict containing summary of provenance items, among all cache dirs used by the Cohort.
IE if all provenances are identical across all cache dirs, then a single set of
provenances is returned. Otherwise, if all provenances are not identical, the provenance
items per cache_dir are returned.
See also
----------
`?cohorts.Cohort.summarize_provenance_per_cache` which is used to summarize provenance
for each existing cache_dir. |
def source_list(source, source_hash, saltenv):
'''
Check the source list and return the source to use
CLI Example:
.. code-block:: bash
salt '*' file.source_list salt://http/httpd.conf '{hash_type: 'md5', 'hsum': <md5sum>}' base
'''
contextkey = '{0}_|-{1}_|-{2}'.format(source, source_hash, saltenv)
if contextkey in __context__:
return __context__[contextkey]
# get the master file list
if isinstance(source, list):
mfiles = [(f, saltenv) for f in __salt__['cp.list_master'](saltenv)]
mdirs = [(d, saltenv) for d in __salt__['cp.list_master_dirs'](saltenv)]
for single in source:
if isinstance(single, dict):
single = next(iter(single))
path, senv = salt.utils.url.parse(single)
if senv:
mfiles += [(f, senv) for f in __salt__['cp.list_master'](senv)]
mdirs += [(d, senv) for d in __salt__['cp.list_master_dirs'](senv)]
ret = None
for single in source:
if isinstance(single, dict):
# check the proto, if it is http or ftp then download the file
# to check, if it is salt then check the master list
# if it is a local file, check if the file exists
if len(single) != 1:
continue
single_src = next(iter(single))
single_hash = single[single_src] if single[single_src] else source_hash
urlparsed_single_src = _urlparse(single_src)
# Fix this for Windows
if salt.utils.platform.is_windows():
# urlparse doesn't handle a local Windows path without the
# protocol indicator (file://). The scheme will be the
# drive letter instead of the protocol. So, we'll add the
# protocol and re-parse
if urlparsed_single_src.scheme.lower() in string.ascii_lowercase:
urlparsed_single_src = _urlparse('file://' + single_src)
proto = urlparsed_single_src.scheme
if proto == 'salt':
path, senv = salt.utils.url.parse(single_src)
if not senv:
senv = saltenv
if (path, saltenv) in mfiles or (path, saltenv) in mdirs:
ret = (single_src, single_hash)
break
elif proto.startswith('http') or proto == 'ftp':
ret = (single_src, single_hash)
break
elif proto == 'file' and (
os.path.exists(urlparsed_single_src.netloc) or
os.path.exists(urlparsed_single_src.path) or
os.path.exists(os.path.join(
urlparsed_single_src.netloc,
urlparsed_single_src.path))):
ret = (single_src, single_hash)
break
elif single_src.startswith(os.sep) and os.path.exists(single_src):
ret = (single_src, single_hash)
break
elif isinstance(single, six.string_types):
path, senv = salt.utils.url.parse(single)
if not senv:
senv = saltenv
if (path, senv) in mfiles or (path, senv) in mdirs:
ret = (single, source_hash)
break
urlparsed_src = _urlparse(single)
if salt.utils.platform.is_windows():
# urlparse doesn't handle a local Windows path without the
# protocol indicator (file://). The scheme will be the
# drive letter instead of the protocol. So, we'll add the
# protocol and re-parse
if urlparsed_src.scheme.lower() in string.ascii_lowercase:
urlparsed_src = _urlparse('file://' + single)
proto = urlparsed_src.scheme
if proto == 'file' and (
os.path.exists(urlparsed_src.netloc) or
os.path.exists(urlparsed_src.path) or
os.path.exists(os.path.join(
urlparsed_src.netloc,
urlparsed_src.path))):
ret = (single, source_hash)
break
elif proto.startswith('http') or proto == 'ftp':
ret = (single, source_hash)
break
elif single.startswith(os.sep) and os.path.exists(single):
ret = (single, source_hash)
break
if ret is None:
# None of the list items matched
raise CommandExecutionError(
'none of the specified sources were found'
)
else:
ret = (source, source_hash)
__context__[contextkey] = ret
return ret | Check the source list and return the source to use
CLI Example:
.. code-block:: bash
salt '*' file.source_list salt://http/httpd.conf '{hash_type: 'md5', 'hsum': <md5sum>}' base |
def relabel(self, qubits: Qubits) -> 'Gate':
"""Return a copy of this Gate with new qubits"""
gate = copy(self)
gate.vec = gate.vec.relabel(qubits)
return gate | Return a copy of this Gate with new qubits |
def get(self, sid):
"""
Constructs a OriginationUrlContext
:param sid: The unique string that identifies the resource
:returns: twilio.rest.trunking.v1.trunk.origination_url.OriginationUrlContext
:rtype: twilio.rest.trunking.v1.trunk.origination_url.OriginationUrlContext
"""
return OriginationUrlContext(self._version, trunk_sid=self._solution['trunk_sid'], sid=sid, ) | Constructs a OriginationUrlContext
:param sid: The unique string that identifies the resource
:returns: twilio.rest.trunking.v1.trunk.origination_url.OriginationUrlContext
:rtype: twilio.rest.trunking.v1.trunk.origination_url.OriginationUrlContext |
def new_job(self, task, inputdata, callback, launcher_name="Unknown", debug=False, ssh_callback=None):
""" Add a new job. Every callback will be called once and only once.
:type task: Task
:param inputdata: input from the student
:type inputdata: Storage or dict
:param callback: a function that will be called asynchronously in the client's process, with the results.
it's signature must be (result, grade, problems, tests, custom, archive), where:
result is itself a tuple containing the result string and the main feedback (i.e. ('success', 'You succeeded');
grade is a number between 0 and 100 indicating the grade of the users;
problems is a dict of tuple, in the form {'problemid': result};
test is a dict of tests made in the container
custom is a dict containing random things set in the container
archive is either None or a bytes containing a tgz archive of files from the job
:type callback: __builtin__.function or __builtin__.instancemethod
:param launcher_name: for informational use
:type launcher_name: str
:param debug: Either True(outputs more info), False(default), or "ssh" (starts a remote ssh server. ssh_callback needs to be defined)
:type debug: bool or string
:param ssh_callback: a callback function that will be called with (host, port, password), the needed credentials to connect to the
remote ssh server. May be called with host, port, password being None, meaning no session was open.
:type ssh_callback: __builtin__.function or __builtin__.instancemethod or None
:return: the new job id
"""
job_id = str(uuid.uuid4())
if debug == "ssh" and ssh_callback is None:
self._logger.error("SSH callback not set in %s/%s", task.get_course_id(), task.get_id())
callback(("crash", "SSH callback not set."), 0.0, {}, {}, {}, None, "", "")
return
# wrap ssh_callback to ensure it is called at most once, and that it can always be called to simplify code
ssh_callback = _callable_once(ssh_callback if ssh_callback is not None else lambda _1, _2, _3: None)
environment = task.get_environment()
if environment not in self._available_containers:
self._logger.warning("Env %s not available for task %s/%s", environment, task.get_course_id(), task.get_id())
ssh_callback(None, None, None) # ssh_callback must be called once
callback(("crash", "Environment not available."), 0.0, {}, {}, "", {}, None, "", "")
return
enable_network = task.allow_network_access_grading()
try:
limits = task.get_limits()
time_limit = int(limits.get('time', 20))
hard_time_limit = int(limits.get('hard_time', 3 * time_limit))
mem_limit = int(limits.get('memory', 200))
except:
self._logger.exception("Cannot retrieve limits for task %s/%s", task.get_course_id(), task.get_id())
ssh_callback(None, None, None) # ssh_callback must be called once
callback(("crash", "Error while reading task limits"), 0.0, {}, {}, "", {}, None, "", "")
return
msg = ClientNewJob(job_id, task.get_course_id(), task.get_id(), inputdata, environment, enable_network, time_limit,
hard_time_limit, mem_limit, debug, launcher_name)
self._loop.call_soon_threadsafe(asyncio.ensure_future, self._create_transaction(msg, task=task, callback=callback,
ssh_callback=ssh_callback))
return job_id | Add a new job. Every callback will be called once and only once.
:type task: Task
:param inputdata: input from the student
:type inputdata: Storage or dict
:param callback: a function that will be called asynchronously in the client's process, with the results.
it's signature must be (result, grade, problems, tests, custom, archive), where:
result is itself a tuple containing the result string and the main feedback (i.e. ('success', 'You succeeded');
grade is a number between 0 and 100 indicating the grade of the users;
problems is a dict of tuple, in the form {'problemid': result};
test is a dict of tests made in the container
custom is a dict containing random things set in the container
archive is either None or a bytes containing a tgz archive of files from the job
:type callback: __builtin__.function or __builtin__.instancemethod
:param launcher_name: for informational use
:type launcher_name: str
:param debug: Either True(outputs more info), False(default), or "ssh" (starts a remote ssh server. ssh_callback needs to be defined)
:type debug: bool or string
:param ssh_callback: a callback function that will be called with (host, port, password), the needed credentials to connect to the
remote ssh server. May be called with host, port, password being None, meaning no session was open.
:type ssh_callback: __builtin__.function or __builtin__.instancemethod or None
:return: the new job id |
def load_items(self, items):
"""Loads any number of items in chunks, handling continuation tokens.
:param items: Unpacked in chunks into "RequestItems" for :func:`boto3.DynamoDB.Client.batch_get_item`.
"""
loaded_items = {}
requests = collections.deque(create_batch_get_chunks(items))
while requests:
request = requests.pop()
try:
response = self.dynamodb_client.batch_get_item(RequestItems=request)
except botocore.exceptions.ClientError as error:
raise BloopException("Unexpected error while loading items.") from error
# Accumulate results
for table_name, table_items in response.get("Responses", {}).items():
loaded_items.setdefault(table_name, []).extend(table_items)
# Push additional request onto the deque.
# "UnprocessedKeys" is {} if this request is done
if response["UnprocessedKeys"]:
requests.append(response["UnprocessedKeys"])
return loaded_items | Loads any number of items in chunks, handling continuation tokens.
:param items: Unpacked in chunks into "RequestItems" for :func:`boto3.DynamoDB.Client.batch_get_item`. |
def v_type_extension(ctx, stmt):
"""verify that the extension matches the extension definition"""
(modulename, identifier) = stmt.keyword
revision = stmt.i_extension_revision
module = modulename_to_module(stmt.i_module, modulename, revision)
if module is None:
return
if identifier not in module.i_extensions:
if module.i_modulename == stmt.i_orig_module.i_modulename:
# extension defined in current submodule
if identifier not in stmt.i_orig_module.i_extensions:
err_add(ctx.errors, stmt.pos, 'EXTENSION_NOT_DEFINED',
(identifier, module.arg))
return
else:
stmt.i_extension = stmt.i_orig_module.i_extensions[identifier]
else:
err_add(ctx.errors, stmt.pos, 'EXTENSION_NOT_DEFINED',
(identifier, module.arg))
return
else:
stmt.i_extension = module.i_extensions[identifier]
ext_arg = stmt.i_extension.search_one('argument')
if stmt.arg is not None and ext_arg is None:
err_add(ctx.errors, stmt.pos, 'EXTENSION_ARGUMENT_PRESENT',
identifier)
elif stmt.arg is None and ext_arg is not None:
err_add(ctx.errors, stmt.pos, 'EXTENSION_NO_ARGUMENT_PRESENT',
identifier) | verify that the extension matches the extension definition |
def _init_records(self, record_types):
"""Initalize all records for this form."""
for record_type in record_types:
# This conditional was inserted on 7/11/14. It may prove problematic:
if str(record_type) not in self._my_map['recordTypeIds']:
record_initialized = self._init_record(str(record_type))
if record_initialized:
self._my_map['recordTypeIds'].append(str(record_type)) | Initalize all records for this form. |
def _push_processor(self, proc, index=None):
"""
Pushes a processor onto the processor stack. Processors are
objects with proc_request(), proc_response(), and/or
proc_exception() methods, which can intercept requests,
responses, and exceptions. When a method invokes the send()
method on a request, the proc_request() method on each
processor is called in turn. Likewise, responses are
processed by the proc_response() method of each processor, in
the reverse order of the calls to proc_request(). The
proc_exception() methods are called if an exception is raised
instead of a response being returned.
Note that this method can append a processor to the stack, if
the index parameter is None (the default), or a processor may
be inserted into the stack by specifying an integer index.
For more information about processors, see the
requiem.Processor class.
"""
if index is None:
self._procstack.append(proc)
else:
self._procstack.insert(index, proc) | Pushes a processor onto the processor stack. Processors are
objects with proc_request(), proc_response(), and/or
proc_exception() methods, which can intercept requests,
responses, and exceptions. When a method invokes the send()
method on a request, the proc_request() method on each
processor is called in turn. Likewise, responses are
processed by the proc_response() method of each processor, in
the reverse order of the calls to proc_request(). The
proc_exception() methods are called if an exception is raised
instead of a response being returned.
Note that this method can append a processor to the stack, if
the index parameter is None (the default), or a processor may
be inserted into the stack by specifying an integer index.
For more information about processors, see the
requiem.Processor class. |
def log(self, n=None, **kwargs):
"""
Run the repository log command
Returns:
str: output of log command (``git log -n <n> <--kwarg=value>``)
"""
kwargs['format'] = kwargs.pop('template', self.template)
cmd = ['git', 'log']
if n:
cmd.append('-n%d' % n)
cmd.extend(
(('--%s=%s' % (k, v))
for (k, v) in iteritems(kwargs)))
try:
output = self.sh(cmd, shell=False)
if "fatal: bad default revision 'HEAD'" in output:
return output
return output
except Exception as e:
e
return | Run the repository log command
Returns:
str: output of log command (``git log -n <n> <--kwarg=value>``) |
async def create(self, token):
"""Creates a new token with a given policy
Parameters:
token (Object): Token specification
Returns:
Object: token ID
The create endpoint is used to make a new token.
A token has a name, a type, and a set of ACL rules.
The request body may take the form::
{
"Name": "my-app-token",
"Type": "client",
"Rules": ""
}
None of the fields are mandatory. The **Name** and **Rules** fields
default to being blank, and the **Type** defaults to "client".
**Name** is opaque to Consul. To aid human operators, it should
be a meaningful indicator of the ACL's purpose.
**Type** is either **client** or **management**. A management token
is comparable to a root user and has the ability to perform any action
including creating, modifying and deleting ACLs.
**ID** field may be provided, and if omitted a random UUID will be
generated.
The format of **Rules** is
`documented here <https://www.consul.io/docs/internals/acl.html>`_.
A successful response body will return the **ID** of the newly
created ACL, like so::
{
"ID": "adf4238a-882b-9ddc-4a9d-5b6758e4159e"
}
"""
token = encode_token(token)
response = await self._api.put("/v1/acl/create", data=token)
return response.body | Creates a new token with a given policy
Parameters:
token (Object): Token specification
Returns:
Object: token ID
The create endpoint is used to make a new token.
A token has a name, a type, and a set of ACL rules.
The request body may take the form::
{
"Name": "my-app-token",
"Type": "client",
"Rules": ""
}
None of the fields are mandatory. The **Name** and **Rules** fields
default to being blank, and the **Type** defaults to "client".
**Name** is opaque to Consul. To aid human operators, it should
be a meaningful indicator of the ACL's purpose.
**Type** is either **client** or **management**. A management token
is comparable to a root user and has the ability to perform any action
including creating, modifying and deleting ACLs.
**ID** field may be provided, and if omitted a random UUID will be
generated.
The format of **Rules** is
`documented here <https://www.consul.io/docs/internals/acl.html>`_.
A successful response body will return the **ID** of the newly
created ACL, like so::
{
"ID": "adf4238a-882b-9ddc-4a9d-5b6758e4159e"
} |
def get_short_reads(vals):
(args,txome,seed,chunk) = vals
#fast forward some ammount
"""Emit the short reads first"""
txe = TranscriptomeEmitter(txome,TranscriptomeEmitter.Options(seed=seed))
if args.weights:
weights = {}
if args.weights[-3:]=='.gz': inf = gzip.open(args.weights)
else: inf = open(args.weights)
for line in inf:
f = line.rstrip().split("\t")
weights[f[0]] = float(f[1])
txs = {}
for tx in txome.transcripts: txs[tx.name] = tx.length
for name in weights:
weights[name] *= txs[name]
txe.set_weights_by_dict(weights)
else:
weights = {}
txs = {}
for tx in txome.transcripts: txs[tx.name] = tx.length
txe.set_weights_by_dict(weights)
reademit = ReadEmitter(txe)
shortreads = []
sp = args.short_read_insert_size
reademit.cutter.set_custom(sp[0],sp[1],sp[2])
if args.short_read_error_rate:
emfr = ErrorMakerFlatRate(rate=args.short_read_error_rate,rand=reademit.options.rand)
reademit.add_error_maker(emfr)
for i in range(0,chunk):
e = reademit.emit(args.short_read_length)
shortreads.append(e)
return shortreads | Emit the short reads first |
def parse_lines(lines: [str], units: Units, use_na: bool = True) -> [dict]: # type: ignore
"""
Returns a list of parsed line dictionaries
"""
parsed_lines = []
prob = ''
while lines:
raw_line = lines[0].strip()
line = core.sanitize_line(raw_line)
# Remove prob from the beginning of a line
if line.startswith('PROB'):
# Add standalone prob to next line
if len(line) == 6:
prob = line
line = ''
# Add to current line
elif len(line) > 6:
prob = line[:6]
line = line[6:].strip()
if line:
parsed_line = (parse_na_line if use_na else parse_in_line)(line, units)
for key in ('start_time', 'end_time'):
parsed_line[key] = core.make_timestamp(parsed_line[key]) # type: ignore
parsed_line['probability'] = core.make_number(prob[4:]) # type: ignore
parsed_line['raw'] = raw_line
parsed_line['sanitized'] = prob + ' ' + line if prob else line
prob = ''
parsed_lines.append(parsed_line)
lines.pop(0)
return parsed_lines | Returns a list of parsed line dictionaries |
def apply_modification(self):
"""Modifications on the right side need to be committed"""
self.__changing_model = True
if self.adding_model: self.model.add(self.adding_model)
elif self.editing_model and self.editing_iter:
# notifies the currencies model
path = self.model.get_path(self.editing_iter)
self.model.row_changed(path, self.editing_iter)
pass
self.view.remove_currency_view()
self.adding_model = None
self.editing_model = None
self.editing_iter = None
self.curreny = None
self.unselect()
self.__changing_model = False
return | Modifications on the right side need to be committed |
def title_line(text):
"""Returns a string that represents the
text as a title blurb
"""
columns = shutil.get_terminal_size()[0]
start = columns // 2 - len(text) // 2
output = '='*columns + '\n\n' + \
' ' * start + str(text) + "\n\n" + \
'='*columns + '\n'
return output | Returns a string that represents the
text as a title blurb |
def density_contour(self, *args, **kwargs):
"""
Estimates point density of the given linear orientation measurements
(Interpreted as poles, lines, rakes, or "raw" longitudes and latitudes
based on the `measurement` keyword argument.) and plots contour lines of
the resulting density distribution.
Parameters
----------
*args : A variable number of sequences of measurements.
By default, this will be expected to be ``strike`` & ``dip``, both
array-like sequences representing poles to planes. (Rake
measurements require three parameters, thus the variable number of
arguments.) The ``measurement`` kwarg controls how these arguments
are interpreted.
measurement : string, optional
Controls how the input arguments are interpreted. Defaults to
``"poles"``. May be one of the following:
``"poles"`` : strikes, dips
Arguments are assumed to be sequences of strikes and dips
of planes. Poles to these planes are used for contouring.
``"lines"`` : plunges, bearings
Arguments are assumed to be sequences of plunges and
bearings of linear features.
``"rakes"`` : strikes, dips, rakes
Arguments are assumed to be sequences of strikes, dips, and
rakes along the plane.
``"radians"`` : lon, lat
Arguments are assumed to be "raw" longitudes and latitudes
in the stereonet's underlying coordinate system.
method : string, optional
The method of density estimation to use. Defaults to
``"exponential_kamb"``. May be one of the following:
``"exponential_kamb"`` : Kamb with exponential smoothing
A modified Kamb method using exponential smoothing [1]_. Units
are in numbers of standard deviations by which the density
estimate differs from uniform.
``"linear_kamb"`` : Kamb with linear smoothing
A modified Kamb method using linear smoothing [1]_. Units are
in numbers of standard deviations by which the density estimate
differs from uniform.
``"kamb"`` : Kamb with no smoothing
Kamb's method [2]_ with no smoothing. Units are in numbers of
standard deviations by which the density estimate differs from
uniform.
``"schmidt"`` : 1% counts
The traditional "Schmidt" (a.k.a. 1%) method. Counts points
within a counting circle comprising 1% of the total area of the
hemisphere. Does not take into account sample size. Units are
in points per 1% area.
sigma : int or float, optional
The number of standard deviations defining the expected number of
standard deviations by which a random sample from a uniform
distribution of points would be expected to vary from being evenly
distributed across the hemisphere. This controls the size of the
counting circle, and therefore the degree of smoothing. Higher
sigmas will lead to more smoothing of the resulting density
distribution. This parameter only applies to Kamb-based methods.
Defaults to 3.
gridsize : int or 2-item tuple of ints, optional
The size of the grid that the density is estimated on. If a single
int is given, it is interpreted as an NxN grid. If a tuple of ints
is given it is interpreted as (nrows, ncols). Defaults to 100.
weights : array-like, optional
The relative weight to be applied to each input measurement. The
array will be normalized to sum to 1, so absolute value of the
weights do not affect the result. Defaults to None.
**kwargs
Additional keyword arguments are passed on to matplotlib's
`contour` function.
Returns
-------
A matplotlib ContourSet.
See Also
--------
mplstereonet.density_grid
mplstereonet.StereonetAxes.density_contourf
matplotlib.pyplot.contour
matplotlib.pyplot.clabel
Examples
--------
Plot density contours of poles to the specified planes using a
modified Kamb method with exponential smoothing [1]_.
>>> strikes, dips = [120, 315, 86], [22, 85, 31]
>>> ax.density_contour(strikes, dips)
Plot density contours of a set of linear orientation measurements.
>>> plunges, bearings = [-10, 20, -30], [120, 315, 86]
>>> ax.density_contour(plunges, bearings, measurement='lines')
Plot density contours of a set of rake measurements.
>>> strikes, dips, rakes = [120, 315, 86], [22, 85, 31], [-5, 20, 9]
>>> ax.density_contour(strikes, dips, rakes, measurement='rakes')
Plot density contours of a set of "raw" longitudes and latitudes.
>>> lon, lat = np.radians([-40, 30, -85]), np.radians([21, -59, 45])
>>> ax.density_contour(lon, lat, measurement='radians')
Plot density contours of poles to planes using a Kamb method [2]_
with the density estimated on a 10x10 grid (in long-lat space)
>>> strikes, dips = [120, 315, 86], [22, 85, 31]
>>> ax.density_contour(strikes, dips, method='kamb', gridsize=10)
Plot density contours of poles to planes with contours at [1,2,3]
standard deviations.
>>> strikes, dips = [120, 315, 86], [22, 85, 31]
>>> ax.density_contour(strikes, dips, levels=[1,2,3])
References
----------
.. [1] Vollmer, 1995. C Program for Automatic Contouring of Spherical
Orientation Data Using a Modified Kamb Method. Computers &
Geosciences, Vol. 21, No. 1, pp. 31--49.
.. [2] Kamb, 1959. Ice Petrofabric Observations from Blue Glacier,
Washington, in Relation to Theory and Experiment. Journal of
Geophysical Research, Vol. 64, No. 11, pp. 1891--1909.
"""
lon, lat, totals, kwargs = self._contour_helper(args, kwargs)
return self.contour(lon, lat, totals, **kwargs) | Estimates point density of the given linear orientation measurements
(Interpreted as poles, lines, rakes, or "raw" longitudes and latitudes
based on the `measurement` keyword argument.) and plots contour lines of
the resulting density distribution.
Parameters
----------
*args : A variable number of sequences of measurements.
By default, this will be expected to be ``strike`` & ``dip``, both
array-like sequences representing poles to planes. (Rake
measurements require three parameters, thus the variable number of
arguments.) The ``measurement`` kwarg controls how these arguments
are interpreted.
measurement : string, optional
Controls how the input arguments are interpreted. Defaults to
``"poles"``. May be one of the following:
``"poles"`` : strikes, dips
Arguments are assumed to be sequences of strikes and dips
of planes. Poles to these planes are used for contouring.
``"lines"`` : plunges, bearings
Arguments are assumed to be sequences of plunges and
bearings of linear features.
``"rakes"`` : strikes, dips, rakes
Arguments are assumed to be sequences of strikes, dips, and
rakes along the plane.
``"radians"`` : lon, lat
Arguments are assumed to be "raw" longitudes and latitudes
in the stereonet's underlying coordinate system.
method : string, optional
The method of density estimation to use. Defaults to
``"exponential_kamb"``. May be one of the following:
``"exponential_kamb"`` : Kamb with exponential smoothing
A modified Kamb method using exponential smoothing [1]_. Units
are in numbers of standard deviations by which the density
estimate differs from uniform.
``"linear_kamb"`` : Kamb with linear smoothing
A modified Kamb method using linear smoothing [1]_. Units are
in numbers of standard deviations by which the density estimate
differs from uniform.
``"kamb"`` : Kamb with no smoothing
Kamb's method [2]_ with no smoothing. Units are in numbers of
standard deviations by which the density estimate differs from
uniform.
``"schmidt"`` : 1% counts
The traditional "Schmidt" (a.k.a. 1%) method. Counts points
within a counting circle comprising 1% of the total area of the
hemisphere. Does not take into account sample size. Units are
in points per 1% area.
sigma : int or float, optional
The number of standard deviations defining the expected number of
standard deviations by which a random sample from a uniform
distribution of points would be expected to vary from being evenly
distributed across the hemisphere. This controls the size of the
counting circle, and therefore the degree of smoothing. Higher
sigmas will lead to more smoothing of the resulting density
distribution. This parameter only applies to Kamb-based methods.
Defaults to 3.
gridsize : int or 2-item tuple of ints, optional
The size of the grid that the density is estimated on. If a single
int is given, it is interpreted as an NxN grid. If a tuple of ints
is given it is interpreted as (nrows, ncols). Defaults to 100.
weights : array-like, optional
The relative weight to be applied to each input measurement. The
array will be normalized to sum to 1, so absolute value of the
weights do not affect the result. Defaults to None.
**kwargs
Additional keyword arguments are passed on to matplotlib's
`contour` function.
Returns
-------
A matplotlib ContourSet.
See Also
--------
mplstereonet.density_grid
mplstereonet.StereonetAxes.density_contourf
matplotlib.pyplot.contour
matplotlib.pyplot.clabel
Examples
--------
Plot density contours of poles to the specified planes using a
modified Kamb method with exponential smoothing [1]_.
>>> strikes, dips = [120, 315, 86], [22, 85, 31]
>>> ax.density_contour(strikes, dips)
Plot density contours of a set of linear orientation measurements.
>>> plunges, bearings = [-10, 20, -30], [120, 315, 86]
>>> ax.density_contour(plunges, bearings, measurement='lines')
Plot density contours of a set of rake measurements.
>>> strikes, dips, rakes = [120, 315, 86], [22, 85, 31], [-5, 20, 9]
>>> ax.density_contour(strikes, dips, rakes, measurement='rakes')
Plot density contours of a set of "raw" longitudes and latitudes.
>>> lon, lat = np.radians([-40, 30, -85]), np.radians([21, -59, 45])
>>> ax.density_contour(lon, lat, measurement='radians')
Plot density contours of poles to planes using a Kamb method [2]_
with the density estimated on a 10x10 grid (in long-lat space)
>>> strikes, dips = [120, 315, 86], [22, 85, 31]
>>> ax.density_contour(strikes, dips, method='kamb', gridsize=10)
Plot density contours of poles to planes with contours at [1,2,3]
standard deviations.
>>> strikes, dips = [120, 315, 86], [22, 85, 31]
>>> ax.density_contour(strikes, dips, levels=[1,2,3])
References
----------
.. [1] Vollmer, 1995. C Program for Automatic Contouring of Spherical
Orientation Data Using a Modified Kamb Method. Computers &
Geosciences, Vol. 21, No. 1, pp. 31--49.
.. [2] Kamb, 1959. Ice Petrofabric Observations from Blue Glacier,
Washington, in Relation to Theory and Experiment. Journal of
Geophysical Research, Vol. 64, No. 11, pp. 1891--1909. |
def invert_index(source_dir, index_url=INDEX_URL, init=False):
"""
Build the invert index from give source_dir
Output a Shove object built on the store_path
Input:
source_dir: a directory on the filesystem
index_url: the store_path for the Shove object
init: clear the old index and rebuild from scratch
Output:
index: a Shove object
"""
raw_index = defaultdict(list)
for base, dir_list, fn_list in os.walk(source_dir):
for fn in fn_list:
fp = os.path.join(base, fn)
code = fn
with open(fp) as f:
tokens = f.read().strip().split('\n')
for token in tokens:
raw_index[token].append(code)
index = Shove(store=index_url)
if init:
index.clear()
index.update(raw_index)
index.sync()
return index | Build the invert index from give source_dir
Output a Shove object built on the store_path
Input:
source_dir: a directory on the filesystem
index_url: the store_path for the Shove object
init: clear the old index and rebuild from scratch
Output:
index: a Shove object |
def trapz2(f, x=None, y=None, dx=1.0, dy=1.0):
"""Double integrate."""
return numpy.trapz(numpy.trapz(f, x=y, dx=dy), x=x, dx=dx) | Double integrate. |
def get_vector(self, max_choice=3):
"""Return pseudo-choice vectors."""
vec = {}
for dim in ['forbidden', 'required', 'permitted']:
if self.meta[dim] is None:
continue
dim_vec = map(lambda x: (x, max_choice), self.meta[dim])
vec[dim] = dict(dim_vec)
return vec | Return pseudo-choice vectors. |
def column_keymap(self):
""" Returns keymap and keystates used in column mode """
keystates = set()
shortcuts = self.cp.items('column_keymap')
keymap_dict = dict(shortcuts)
for combo, action in shortcuts:
# add all possible prefixes to keystates
combo_as_list = re.split('(<[A-Z].+?>|.)', combo)[1::2]
if len(combo_as_list) > 1:
keystates |= set(accumulate(combo_as_list[:-1]))
if action in ['pri', 'postpone', 'postpone_s']:
keystates.add(combo)
if action == 'pri':
for c in ascii_lowercase:
keymap_dict[combo + c] = 'cmd pri {} ' + c
return (keymap_dict, keystates) | Returns keymap and keystates used in column mode |
def _get_magnitude_term(self, C, mag):
"""
Returns the magnitude scaling term.
"""
lny = C['C1'] + (C['C3'] * ((8.5 - mag) ** 2.))
if mag > 6.3:
return lny + (-C['H'] * C['C5']) * (mag - 6.3)
else:
return lny + C['C2'] * (mag - 6.3) | Returns the magnitude scaling term. |
def SETPE(cpu, dest):
"""
Sets byte if parity even.
:param cpu: current CPU.
:param dest: destination operand.
"""
dest.write(Operators.ITEBV(dest.size, cpu.PF, 1, 0)) | Sets byte if parity even.
:param cpu: current CPU.
:param dest: destination operand. |
def assign(self, expr):
"""Give *expr* a name."""
name = self.variable()
self.statements.append(ast.Assign([ast.Name(name, ast.Store())], expr))
return ast.Name(name, ast.Load()) | Give *expr* a name. |
def _sort_converters(cls, app_ready=False):
'''Sorts the converter functions'''
# app_ready is True when called from DMP's AppConfig.ready()
# we can't sort before then because models aren't ready
cls._sorting_enabled = cls._sorting_enabled or app_ready
if cls._sorting_enabled:
for converter in cls.converters:
converter.prepare_sort_key()
cls.converters.sort(key=attrgetter('sort_key')) | Sorts the converter functions |
def _register_server_authenticator(klass, name):
"""Add a client authenticator class to `SERVER_MECHANISMS_D`,
`SERVER_MECHANISMS` and, optionally, to `SECURE_SERVER_MECHANISMS`
"""
# pylint: disable-msg=W0212
SERVER_MECHANISMS_D[name] = klass
items = sorted(SERVER_MECHANISMS_D.items(), key = _key_func, reverse = True)
SERVER_MECHANISMS[:] = [k for (k, v) in items ]
SECURE_SERVER_MECHANISMS[:] = [k for (k, v) in items
if v._pyxmpp_sasl_secure] | Add a client authenticator class to `SERVER_MECHANISMS_D`,
`SERVER_MECHANISMS` and, optionally, to `SECURE_SERVER_MECHANISMS` |
def create_contact(self, *args, **kwargs):
"""Creates a contact"""
url = 'contacts'
data = {
'view_all_tickets': False,
'description': 'Freshdesk Contact'
}
data.update(kwargs)
return Contact(**self._api._post(url, data=json.dumps(data))) | Creates a contact |
async def _retrieve_messages_around_strategy(self, retrieve):
"""Retrieve messages using around parameter."""
if self.around:
around = self.around.id if self.around else None
data = await self.logs_from(self.channel.id, retrieve, around=around)
self.around = None
return data
return [] | Retrieve messages using around parameter. |
def append(self, element):
"""
Append an element onto the array.
>>> array = Array()
>>> array.append('test')
"""
from refract.refraction import refract
self.content.append(refract(element)) | Append an element onto the array.
>>> array = Array()
>>> array.append('test') |
def _get_ptext_to_endchars(value, endchars):
"""Scan printables/quoted-pairs until endchars and return unquoted ptext.
This function turns a run of qcontent, ccontent-without-comments, or
dtext-with-quoted-printables into a single string by unquoting any
quoted printables. It returns the string, the remaining value, and
a flag that is True iff there were any quoted printables decoded.
"""
_3to2list = list(_wsp_splitter(value, 1))
fragment, remainder, = _3to2list[:1] + [_3to2list[1:]]
vchars = []
escape = False
had_qp = False
for pos in range(len(fragment)):
if fragment[pos] == '\\':
if escape:
escape = False
had_qp = True
else:
escape = True
continue
if escape:
escape = False
elif fragment[pos] in endchars:
break
vchars.append(fragment[pos])
else:
pos = pos + 1
return ''.join(vchars), ''.join([fragment[pos:]] + remainder), had_qp | Scan printables/quoted-pairs until endchars and return unquoted ptext.
This function turns a run of qcontent, ccontent-without-comments, or
dtext-with-quoted-printables into a single string by unquoting any
quoted printables. It returns the string, the remaining value, and
a flag that is True iff there were any quoted printables decoded. |
def _build_request(self, type, commands):
'''
Build NX-API JSON request.
'''
request = {}
headers = {
'content-type': 'application/json',
}
if self.nxargs['connect_over_uds']:
user = self.nxargs['cookie']
headers['cookie'] = 'nxapi_auth=' + user + ':local'
request['url'] = self.NXAPI_UDS_URI_PATH
else:
request['url'] = '{transport}://{host}:{port}{uri}'.format(
transport=self.nxargs['transport'],
host=self.nxargs['host'],
port=self.nxargs['port'],
uri=self.NXAPI_REMOTE_URI_PATH,
)
if isinstance(commands, (list, set, tuple)):
commands = ' ; '.join(commands)
payload = {}
payload['ins_api'] = {
'version': self.NXAPI_VERSION,
'type': type,
'chunk': '0',
'sid': '1',
'input': commands,
'output_format': 'json',
}
request['headers'] = headers
request['payload'] = json.dumps(payload)
request['opts'] = {
'http_request_timeout': self.nxargs['timeout']
}
log.info('request: %s', request)
return request | Build NX-API JSON request. |
def variants(self, case_id, skip=0, count=1000, filters=None):
"""Return all variants in the VCF.
This function will apply the given filter and return the 'count' first
variants. If skip the first 'skip' variants will not be regarded.
Args:
case_id (str): Path to a vcf file (for this adapter)
skip (int): Skip first variants
count (int): The number of variants to return
filters (dict): A dictionary with filters. Currently this will
look like: {
gene_list: [] (list of hgnc ids),
frequency: None (float),
cadd: None (float),
sv_len: None (float),
consequence: [] (list of consequences),
is_lof: None (Bool),
genetic_models [] (list of genetic models)
sv_type: List (list of sv types),
}
Returns:
puzzle.constants.Results : Named tuple with variants and
nr_of_variants
"""
filters = filters or {}
case_obj = self.case(case_id=case_id)
limit = count + skip
genes = set()
if filters.get('gene_ids'):
genes = set([gene_id.strip() for gene_id in filters['gene_ids']])
frequency = None
if filters.get('frequency'):
frequency = float(filters['frequency'])
cadd = None
if filters.get('cadd'):
cadd = float(filters['cadd'])
genetic_models = None
if filters.get('genetic_models'):
genetic_models = set(filters['genetic_models'])
sv_len = None
if filters.get('sv_len'):
sv_len = float(filters['sv_len'])
impact_severities = None
if filters.get('impact_severities'):
impact_severities = set(filters['impact_severities'])
vcf_file_path = case_obj.variant_source
self.head = get_header(vcf_file_path)
self.vep_header = self.head.vep_columns
self.snpeff_header = self.head.snpeff_columns
variants = self._get_filtered_variants(vcf_file_path, filters)
result = []
skip_index = 0
for index, variant in enumerate(variants):
index += 1
if skip_index >= skip:
variant_obj = self._format_variants(
variant=variant,
index=index,
case_obj=case_obj,
)
if genes and variant_obj:
if not set(variant_obj['gene_symbols']).intersection(genes):
variant_obj = None
if impact_severities and variant_obj:
if not variant_obj['impact_severity'] in impact_severities:
variant_obj = None
if frequency and variant_obj:
if variant_obj.max_freq > frequency:
variant_obj = None
if cadd and variant_obj:
if variant_obj['cadd_score'] < cadd:
variant_obj = None
if genetic_models and variant_obj:
models = set(variant_obj.genetic_models)
if not models.intersection(genetic_models):
variant_obj = None
if sv_len and variant_obj:
if variant_obj.sv_len < sv_len:
variant_obj = None
if variant_obj:
skip_index += 1
if skip_index <= limit:
result.append(variant_obj)
else:
break
else:
skip_index += 1
return Results(result, len(result)) | Return all variants in the VCF.
This function will apply the given filter and return the 'count' first
variants. If skip the first 'skip' variants will not be regarded.
Args:
case_id (str): Path to a vcf file (for this adapter)
skip (int): Skip first variants
count (int): The number of variants to return
filters (dict): A dictionary with filters. Currently this will
look like: {
gene_list: [] (list of hgnc ids),
frequency: None (float),
cadd: None (float),
sv_len: None (float),
consequence: [] (list of consequences),
is_lof: None (Bool),
genetic_models [] (list of genetic models)
sv_type: List (list of sv types),
}
Returns:
puzzle.constants.Results : Named tuple with variants and
nr_of_variants |
def tag_manifest_into_registry(self, session, worker_digest):
"""
Tags the manifest identified by worker_digest into session.registry with all the
configured tags found in workflow.tag_conf.
"""
self.log.info("%s: Tagging manifest", session.registry)
digest = worker_digest['digest']
source_repo = worker_digest['repository']
image_manifest, _, media_type, _ = self.get_manifest(session, source_repo, digest)
if media_type == MEDIA_TYPE_DOCKER_V2_SCHEMA2:
digests = ManifestDigest(v1=digest)
elif media_type == MEDIA_TYPE_OCI_V1:
digests = ManifestDigest(oci=digest)
else:
raise RuntimeError("Unexpected media type found in worker repository: {}"
.format(media_type))
push_conf_registry = self.workflow.push_conf.add_docker_registry(session.registry,
insecure=session.insecure)
for image in self.workflow.tag_conf.images:
target_repo = image.to_str(registry=False, tag=False)
self.store_manifest_in_repository(session, image_manifest, media_type,
source_repo, target_repo, tag=image.tag)
# add a tag for any plugins running later that expect it
push_conf_registry.digests[image.tag] = digests | Tags the manifest identified by worker_digest into session.registry with all the
configured tags found in workflow.tag_conf. |
def push(self, repository=None, tag=None):
"""
Push image to registry. Raise exception when push fail.
:param repository: str, see constructor
:param tag: str, see constructor
:return: None
"""
image = self
if repository or tag:
image = self.tag_image(repository, tag)
for json_e in self.d.push(repository=image.name, tag=image.tag, stream=True, decode=True):
logger.debug(json_e)
status = graceful_get(json_e, "status")
if status:
logger.info(status)
else:
error = graceful_get(json_e, "error")
if error is not None:
logger.error(status)
raise ConuException("There was an error while pushing the image %s: %s",
self.name, error)
return image | Push image to registry. Raise exception when push fail.
:param repository: str, see constructor
:param tag: str, see constructor
:return: None |
def _to_addr(worksheet, row, col, row_fixed=False, col_fixed=False):
"""converts a (0,0) based coordinate to an excel address"""
addr = ""
A = ord('A')
col += 1
while col > 0:
addr = chr(A + ((col - 1) % 26)) + addr
col = (col - 1) // 26
prefix = ("'%s'!" % worksheet) if worksheet else ""
col_modifier = "$" if col_fixed else ""
row_modifier = "$" if row_fixed else ""
return prefix + "%s%s%s%d" % (col_modifier, addr, row_modifier, row+1) | converts a (0,0) based coordinate to an excel address |
def connect_async(self, connection_id, connection_string, callback):
"""Connect to a device by its connection_string
This function looks for the device on AWS IOT using the preconfigured
topic prefix and looking for:
<prefix>/devices/connection_string
It then attempts to lock that device for exclusive access and
returns a callback if successful.
Args:
connection_id (int): A unique integer set by the caller for referring to this connection
once created
connection_string (string): A device id of the form d--XXXX-YYYY-ZZZZ-WWWW
callback (callable): A callback function called when the connection has succeeded or
failed
"""
topics = MQTTTopicValidator(self.prefix + 'devices/{}'.format(connection_string))
key = self._generate_key()
name = self.name
conn_message = {'type': 'command', 'operation': 'connect', 'key': key, 'client': name}
context = {'key': key, 'slug': connection_string, 'topics': topics}
self.conns.begin_connection(connection_id, connection_string, callback, context, self.get_config('default_timeout'))
self._bind_topics(topics)
try:
self.client.publish(topics.connect, conn_message)
except IOTileException:
self._unbind_topics(topics)
self.conns.finish_connection(connection_id, False, 'Failed to send connection message') | Connect to a device by its connection_string
This function looks for the device on AWS IOT using the preconfigured
topic prefix and looking for:
<prefix>/devices/connection_string
It then attempts to lock that device for exclusive access and
returns a callback if successful.
Args:
connection_id (int): A unique integer set by the caller for referring to this connection
once created
connection_string (string): A device id of the form d--XXXX-YYYY-ZZZZ-WWWW
callback (callable): A callback function called when the connection has succeeded or
failed |
def event_update(
self,
event_id,
name=None,
season=None,
start_time=None,
event_group_id=None,
status=None,
account=None,
**kwargs
):
""" Update an event. This needs to be **proposed**.
:param str event_id: Id of the event to update
:param list name: Internationalized names, e.g. ``[['de', 'Foo'],
['en', 'bar']]``
:param list season: Internationalized season, e.g. ``[['de',
'Foo'], ['en', 'bar']]``
:param str event_group_id: Event group ID to create the event for
(defaults to *relative* id ``0.0.0``)
:param datetime start_time: Time of the start of the event
:param str status: Event status
:param str account: (optional) the account to allow access
to (defaults to ``default_account``)
"""
assert isinstance(season, list)
assert isinstance(
start_time, datetime
), "start_time needs to be a `datetime.datetime`"
if not account:
if "default_account" in self.config:
account = self.config["default_account"]
if not account:
raise ValueError("You need to provide an account")
account = Account(account)
event = Event(event_id)
op_data = {
"fee": {"amount": 0, "asset_id": "1.3.0"},
"event_id": event["id"],
"prefix": self.prefix,
}
# Do not try to update status of it doesn't change it on the chain
if event["status"] == status:
status = None
if event_group_id:
if event_group_id[0] == "1":
# Test if object exists
EventGroup(event_group_id)
else:
# Test if object is proposed
test_proposal_in_buffer(
kwargs.get("append_to", self.propbuffer),
"event_group_create",
event_group_id,
)
op_data.update({"new_event_group_id": event_group_id})
if name:
op_data.update({"new_name": name})
if season:
op_data.update({"new_season": season})
if start_time:
op_data.update({"new_start_time": formatTime(start_time)})
if status:
op_data.update({"new_status": status})
op = operations.Event_update(**op_data)
return self.finalizeOp(op, account["name"], "active", **kwargs) | Update an event. This needs to be **proposed**.
:param str event_id: Id of the event to update
:param list name: Internationalized names, e.g. ``[['de', 'Foo'],
['en', 'bar']]``
:param list season: Internationalized season, e.g. ``[['de',
'Foo'], ['en', 'bar']]``
:param str event_group_id: Event group ID to create the event for
(defaults to *relative* id ``0.0.0``)
:param datetime start_time: Time of the start of the event
:param str status: Event status
:param str account: (optional) the account to allow access
to (defaults to ``default_account``) |
def convert_snapshot(self, shift, instruction):
"""Return converted `Snapshot`.
Args:
shift(int): Offset time.
instruction (Snapshot): snapshot instruction.
Returns:
dict: Dictionary of required parameters.
"""
command_dict = {
'name': 'snapshot',
't0': shift+instruction.start_time,
'label': instruction.name,
'type': instruction.type
}
return self._qobj_model(**command_dict) | Return converted `Snapshot`.
Args:
shift(int): Offset time.
instruction (Snapshot): snapshot instruction.
Returns:
dict: Dictionary of required parameters. |
def build_stop_times(pfeed, routes, shapes, stops, trips, buffer=cs.BUFFER):
"""
Given a ProtoFeed and its corresponding routes (DataFrame),
shapes (DataFrame), stops (DataFrame), trips (DataFrame),
return DataFrame representing ``stop_times.txt``.
Includes the optional ``shape_dist_traveled`` column.
Don't make stop times for trips with no nearby stops.
"""
# Get the table of trips and add frequency and service window details
routes = (
routes
.filter(['route_id', 'route_short_name'])
.merge(pfeed.frequencies.drop(['shape_id'], axis=1))
)
trips = (
trips
.assign(service_window_id=lambda x: x.trip_id.map(
lambda y: y.split(cs.SEP)[2]))
.merge(routes)
)
# Get the geometries of ``shapes`` and not ``pfeed.shapes``
geometry_by_shape = dict(
gt.geometrize_shapes(shapes, use_utm=True)
.filter(['shape_id', 'geometry'])
.values
)
# Save on distance computations by memoizing
dist_by_stop_by_shape = {shape: {} for shape in geometry_by_shape}
def compute_stops_dists_times(geo_stops, linestring, shape,
start_time, end_time):
"""
Given a GeoDataFrame of stops on one side of a given Shapely
LineString with given shape ID, compute distances and departure
times of a trip traversing the LineString from start to end
at the given start and end times (in seconds past midnight)
and stopping at the stops encountered along the way.
Do not assume that the stops are ordered by trip encounter.
Return three lists of the same length: the stop IDs in order
that the trip encounters them, the shape distances traveled
along distances at the stops, and the times the stops are
encountered, respectively.
"""
g = geo_stops.copy()
dists_and_stops = []
for i, stop in enumerate(g['stop_id'].values):
if stop in dist_by_stop_by_shape[shape]:
d = dist_by_stop_by_shape[shape][stop]
else:
d = gt.get_segment_length(linestring,
g.geometry.iat[i])/1000 # km
dist_by_stop_by_shape[shape][stop] = d
dists_and_stops.append((d, stop))
dists, stops = zip(*sorted(dists_and_stops))
D = linestring.length/1000
dists_are_reasonable = all([d < D + 100 for d in dists])
if not dists_are_reasonable:
# Assume equal distances between stops :-(
n = len(stops)
delta = D/(n - 1)
dists = [i*delta for i in range(n)]
# Compute times using distances, start and end stop times,
# and linear interpolation
t0, t1 = start_time, end_time
d0, d1 = dists[0], dists[-1]
# Interpolate
times = np.interp(dists, [d0, d1], [t0, t1])
return stops, dists, times
# Iterate through trips and set stop times based on stop ID
# and service window frequency.
# Remember that every trip has a valid shape ID.
# Gather stops geographically from ``stops``.
rows = []
geo_stops = gt.geometrize_stops(stops, use_utm=True)
# Look on the side of the traffic side of street for this timezone
side = cs.traffic_by_timezone[pfeed.meta.agency_timezone.iat[0]]
for index, row in trips.iterrows():
shape = row['shape_id']
geom = geometry_by_shape[shape]
stops = get_nearby_stops(geo_stops, geom, side, buffer=buffer)
# Don't make stop times for trips without nearby stops
if stops.empty:
continue
length = geom.length/1000 # km
speed = row['speed'] # km/h
duration = int((length/speed)*3600) # seconds
frequency = row['frequency']
if not frequency:
# No stop times for this trip/frequency combo
continue
headway = 3600/frequency # seconds
trip = row['trip_id']
__, route, window, base_timestr, direction, i = (
trip.split(cs.SEP))
direction = int(direction)
base_time = gt.timestr_to_seconds(base_timestr)
start_time = base_time + headway*int(i)
end_time = start_time + duration
stops, dists, times = compute_stops_dists_times(stops, geom, shape,
start_time, end_time)
new_rows = [[trip, stop, j, time, time, dist]
for j, (stop, time, dist) in enumerate(zip(stops, times, dists))]
rows.extend(new_rows)
g = pd.DataFrame(rows, columns=['trip_id', 'stop_id', 'stop_sequence',
'arrival_time', 'departure_time', 'shape_dist_traveled'])
# Convert seconds back to time strings
g[['arrival_time', 'departure_time']] =\
g[['arrival_time', 'departure_time']].applymap(
lambda x: gt.timestr_to_seconds(x, inverse=True))
return g | Given a ProtoFeed and its corresponding routes (DataFrame),
shapes (DataFrame), stops (DataFrame), trips (DataFrame),
return DataFrame representing ``stop_times.txt``.
Includes the optional ``shape_dist_traveled`` column.
Don't make stop times for trips with no nearby stops. |
def expect_constructor(target):
"""
Set an expectation on a ``ClassDouble`` constructor
:param ClassDouble target: The ClassDouble to set the expectation on.
:return: an ``Expectation`` for the __new__ method.
:raise: ``ConstructorDoubleError`` if target is not a ClassDouble.
"""
if not isinstance(target, ClassDouble):
raise ConstructorDoubleError(
'Cannot allow_constructor of {} since it is not a ClassDouble.'.format(target),
)
return expect(target)._doubles__new__ | Set an expectation on a ``ClassDouble`` constructor
:param ClassDouble target: The ClassDouble to set the expectation on.
:return: an ``Expectation`` for the __new__ method.
:raise: ``ConstructorDoubleError`` if target is not a ClassDouble. |
def speech_speaker(self):
"""Retrieves the speaker of the audio or video file associated with the element.
The source is inherited from ancestor elements if none is specified. For this reason, always use this method rather than access the ``src`` attribute directly.
Returns:
str or None if not found
"""
if self.speaker:
return self.speaker
elif self.parent:
return self.parent.speech_speaker()
else:
return None | Retrieves the speaker of the audio or video file associated with the element.
The source is inherited from ancestor elements if none is specified. For this reason, always use this method rather than access the ``src`` attribute directly.
Returns:
str or None if not found |
def execute_script(code_block, example_globals, image_path, fig_count,
src_file, gallery_conf):
"""Executes the code block of the example file"""
time_elapsed = 0
stdout = ''
# We need to execute the code
print('plotting code blocks in %s' % src_file)
plt.close('all')
cwd = os.getcwd()
# Redirect output to stdout and
orig_stdout = sys.stdout
try:
# First cd in the original example dir, so that any file
# created by the example get created in this directory
os.chdir(os.path.dirname(src_file))
my_buffer = StringIO()
my_stdout = Tee(sys.stdout, my_buffer)
sys.stdout = my_stdout
t_start = time()
exec(code_block, example_globals)
time_elapsed = time() - t_start
sys.stdout = orig_stdout
my_stdout = my_buffer.getvalue().strip().expandtabs()
if my_stdout:
stdout = CODE_OUTPUT.format(indent(my_stdout, ' ' * 4))
os.chdir(cwd)
figure_list = save_figures(image_path, fig_count, gallery_conf)
# Depending on whether we have one or more figures, we're using a
# horizontal list or a single rst call to 'image'.
image_list = ""
if len(figure_list) == 1:
figure_name = figure_list[0]
image_list = SINGLE_IMAGE % figure_name.lstrip('/')
elif len(figure_list) > 1:
image_list = HLIST_HEADER
for figure_name in figure_list:
image_list += HLIST_IMAGE_TEMPLATE % figure_name.lstrip('/')
except Exception:
formatted_exception = traceback.format_exc()
print(80 * '_')
print('%s is not compiling:' % src_file)
print(formatted_exception)
print(80 * '_')
figure_list = []
image_list = codestr2rst(formatted_exception, lang='pytb')
# Overrides the output thumbnail in the gallery for easy identification
broken_img = os.path.join(glr_path_static(), 'broken_example.png')
shutil.copyfile(broken_img, os.path.join(cwd, image_path.format(1)))
fig_count += 1 # raise count to avoid overwriting image
# Breaks build on first example error
if gallery_conf['abort_on_example_error']:
raise
finally:
os.chdir(cwd)
sys.stdout = orig_stdout
print(" - time elapsed : %.2g sec" % time_elapsed)
code_output = "\n{0}\n\n{1}\n\n".format(image_list, stdout)
return code_output, time_elapsed, fig_count + len(figure_list) | Executes the code block of the example file |
def police_priority_map_exceed_map_pri3_exceed(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
police_priority_map = ET.SubElement(config, "police-priority-map", xmlns="urn:brocade.com:mgmt:brocade-policer")
name_key = ET.SubElement(police_priority_map, "name")
name_key.text = kwargs.pop('name')
exceed = ET.SubElement(police_priority_map, "exceed")
map_pri3_exceed = ET.SubElement(exceed, "map-pri3-exceed")
map_pri3_exceed.text = kwargs.pop('map_pri3_exceed')
callback = kwargs.pop('callback', self._callback)
return callback(config) | Auto Generated Code |
def get_diff_idxs(array, rtol, atol):
"""
Given an array with (C, N, L) values, being the first the reference value,
compute the relative differences and discard the one below the tolerance.
:returns: indices where there are sensible differences.
"""
C, N, L = array.shape
diff_idxs = set() # indices of the sites with differences
for c in range(1, C):
for n in range(N):
if not numpy.allclose(array[c, n], array[0, n], rtol, atol):
diff_idxs.add(n)
return numpy.fromiter(diff_idxs, int) | Given an array with (C, N, L) values, being the first the reference value,
compute the relative differences and discard the one below the tolerance.
:returns: indices where there are sensible differences. |
def _dens(self,R,z,phi=0.,t=0.):
"""
NAME:
_dens
PURPOSE:
evaluate the density for this potential
INPUT:
R - Galactocentric cylindrical radius
z - vertical height
phi - azimuth
t - time
OUTPUT:
the density
HISTORY:
2018-08-06 - Written - Bovy (UofT)
"""
x,y,z= bovy_coords.cyl_to_rect(R,phi,z)
if self._aligned:
xp, yp, zp= x, y, z
else:
xyzp= numpy.dot(self._rot,numpy.array([x,y,z]))
xp, yp, zp= xyzp[0], xyzp[1], xyzp[2]
m= numpy.sqrt(xp**2.+yp**2./self._b2+zp**2./self._c2)
return self._mdens(m) | NAME:
_dens
PURPOSE:
evaluate the density for this potential
INPUT:
R - Galactocentric cylindrical radius
z - vertical height
phi - azimuth
t - time
OUTPUT:
the density
HISTORY:
2018-08-06 - Written - Bovy (UofT) |
def tatoeba(language, word, minlength = 10, maxlength = 100):
''' Returns a list of suitable textsamples for a given word using Tatoeba.org. '''
word, sentences = unicode(word), []
page = requests.get('http://tatoeba.org/deu/sentences/search?query=%s&from=%s&to=und' % (word, lltk.locale.iso639_1to3(language)))
tree = html.fromstring(page.text)
for sentence in tree.xpath('//div[contains(concat(" ", normalize-space(@class), " "), " mainSentence ")]/div/a/text()'):
sentence = sentence.strip(u' "„“').replace(u'“ „', u' – ').replace('" "', u' – ')
if word in sentence and len(sentence) < maxlength and len(sentence) > minlength:
sentences.append(sentence)
return sentences | Returns a list of suitable textsamples for a given word using Tatoeba.org. |
def get_map(self, url, auth_map=None):
"""Envia uma requisição GET.
:param url: URL para enviar a requisição HTTP.
:param auth_map: Dicionário com as informações para autenticação na networkAPI.
:return: Retorna uma tupla contendo:
(< código de resposta http >, < corpo da resposta >).
:raise ConnectionError: Falha na conexão com a networkAPI.
:raise RestError: Falha no acesso à networkAPI.
"""
response_code, content = self.get(url, auth_map)
return response_code, content | Envia uma requisição GET.
:param url: URL para enviar a requisição HTTP.
:param auth_map: Dicionário com as informações para autenticação na networkAPI.
:return: Retorna uma tupla contendo:
(< código de resposta http >, < corpo da resposta >).
:raise ConnectionError: Falha na conexão com a networkAPI.
:raise RestError: Falha no acesso à networkAPI. |
def robots(request):
"""Return a simple "don't index me" robots.txt file."""
resp = request.response
resp.status = '200 OK'
resp.content_type = 'text/plain'
resp.body = """
User-Agent: *
Disallow: /
"""
return resp | Return a simple "don't index me" robots.txt file. |
def get_email(self, token):
"""Fetches email address from email API endpoint"""
resp = requests.get(self.emails_url,
params={'access_token': token.token})
emails = resp.json().get('values', [])
email = ''
try:
email = emails[0].get('email')
primary_emails = [e for e in emails if e.get('is_primary', False)]
email = primary_emails[0].get('email')
except (IndexError, TypeError, KeyError):
return ''
finally:
return email | Fetches email address from email API endpoint |
def _create_function(name, doc=""):
"""Create a PySpark function by its name"""
def _(col):
sc = SparkContext._active_spark_context
jc = getattr(sc._jvm.functions, name)(col._jc if isinstance(col, Column) else col)
return Column(jc)
_.__name__ = name
_.__doc__ = doc
return _ | Create a PySpark function by its name |
def _create_menu(self, items):
"""
Create a menu from the given node.
:param list items: list of menu items
:returns: a new Gtk.Menu object holding all items of the node
"""
menu = Gtk.Menu()
self._create_menu_items(menu, items)
return menu | Create a menu from the given node.
:param list items: list of menu items
:returns: a new Gtk.Menu object holding all items of the node |
def file_md5(file_name):
'''
Generate an MD5 hash of the specified file.
@file_name - The file to hash.
Returns an MD5 hex digest string.
'''
md5 = hashlib.md5()
with open(file_name, 'rb') as f:
for chunk in iter(lambda: f.read(128 * md5.block_size), b''):
md5.update(chunk)
return md5.hexdigest() | Generate an MD5 hash of the specified file.
@file_name - The file to hash.
Returns an MD5 hex digest string. |
def get_force(self, component_info=None, data=None, component_position=None):
"""Get force data."""
components = []
append_components = components.append
for _ in range(component_info.plate_count):
component_position, plate = QRTPacket._get_exact(
RTForcePlate, data, component_position
)
force_list = []
for _ in range(plate.force_count):
component_position, force = QRTPacket._get_exact(
RTForce, data, component_position
)
force_list.append(force)
append_components((plate, force_list))
return components | Get force data. |
def list(self, filter_name=None, filter_ids=None, filter_labels=None, page=None):
"""
This API endpoint returns a paginated list of the Servers
associated with your New Relic account. Servers can be filtered
by their name or by a list of server IDs.
:type filter_name: str
:param filter_name: Filter by server name
:type filter_ids: list of ints
:param filter_ids: Filter by server ids
:type filter_labels: dict of label type: value pairs
:param filter_labels: Filter by server labels
:type page: int
:param page: Pagination index
:rtype: dict
:return: The JSON response of the API, with an additional 'pages' key
if there are paginated results
::
{
"servers": [
{
"id": "integer",
"account_id": "integer",
"name": "string",
"host": "string",
"reporting": "boolean",
"last_reported_at": "time",
"summary": {
"cpu": "float",
"cpu_stolen": "float",
"disk_io": "float",
"memory": "float",
"memory_used": "integer",
"memory_total": "integer",
"fullest_disk": "float",
"fullest_disk_free": "integer"
}
}
],
"pages": {
"last": {
"url": "https://api.newrelic.com/v2/servers.json?page=2",
"rel": "last"
},
"next": {
"url": "https://api.newrelic.com/v2/servers.json?page=2",
"rel": "next"
}
}
}
"""
label_param = ''
if filter_labels:
label_param = ';'.join(['{}:{}'.format(label, value) for label, value in filter_labels.items()])
filters = [
'filter[name]={0}'.format(filter_name) if filter_name else None,
'filter[ids]={0}'.format(','.join([str(app_id) for app_id in filter_ids])) if filter_ids else None,
'filter[labels]={0}'.format(label_param) if filter_labels else None,
'page={0}'.format(page) if page else None
]
return self._get(
url='{0}servers.json'.format(self.URL),
headers=self.headers,
params=self.build_param_string(filters)
) | This API endpoint returns a paginated list of the Servers
associated with your New Relic account. Servers can be filtered
by their name or by a list of server IDs.
:type filter_name: str
:param filter_name: Filter by server name
:type filter_ids: list of ints
:param filter_ids: Filter by server ids
:type filter_labels: dict of label type: value pairs
:param filter_labels: Filter by server labels
:type page: int
:param page: Pagination index
:rtype: dict
:return: The JSON response of the API, with an additional 'pages' key
if there are paginated results
::
{
"servers": [
{
"id": "integer",
"account_id": "integer",
"name": "string",
"host": "string",
"reporting": "boolean",
"last_reported_at": "time",
"summary": {
"cpu": "float",
"cpu_stolen": "float",
"disk_io": "float",
"memory": "float",
"memory_used": "integer",
"memory_total": "integer",
"fullest_disk": "float",
"fullest_disk_free": "integer"
}
}
],
"pages": {
"last": {
"url": "https://api.newrelic.com/v2/servers.json?page=2",
"rel": "last"
},
"next": {
"url": "https://api.newrelic.com/v2/servers.json?page=2",
"rel": "next"
}
}
} |
def DetermineRunner(bbdir):
'''Checks if the given directory is a worker or a master and returns the
appropriate run function.'''
tacfile = os.path.join(bbdir, 'buildbot.tac')
if not os.path.exists(tacfile):
# No tac-file - use master runner by default.
import buildbot.scripts.runner
return buildbot.scripts.runner.run
with open(tacfile, 'r') as f:
contents = f.read()
try:
if 'import Worker' in contents:
import buildbot_worker.scripts.runner
return buildbot_worker.scripts.runner.run
except ImportError:
# Not a worker.
pass
try:
if 'import BuildSlave' in contents:
import buildslave.scripts.runner
return buildslave.scripts.runner.run
except ImportError:
# Not an old buildslave.
pass
# Treat as master by default.
import buildbot.scripts.runner
return buildbot.scripts.runner.run | Checks if the given directory is a worker or a master and returns the
appropriate run function. |
Subsets and Splits