sentence1
stringlengths 52
3.87M
| sentence2
stringlengths 1
47.2k
| label
stringclasses 1
value |
---|---|---|
def get_lyrics(artist, song, linesep='\n', timeout=None):
"""Retrieve the lyrics of the song and return the first one in case
multiple versions are available."""
return get_all_lyrics(artist, song, linesep, timeout)[0] | Retrieve the lyrics of the song and return the first one in case
multiple versions are available. | entailment |
def get_all_lyrics(artist, song, linesep='\n', timeout=None):
"""Retrieve a list of all the lyrics versions of a song."""
url = create_url(artist, song)
response = _requests.get(url, timeout=timeout)
soup = _BeautifulSoup(response.content, "html.parser")
lyricboxes = soup.findAll('div', {'class': 'lyricbox'})
if not lyricboxes:
raise LyricsNotFound('Cannot download lyrics')
for lyricbox in lyricboxes:
for br in lyricbox.findAll('br'):
br.replace_with(linesep)
return [lyricbox.text.strip() for lyricbox in lyricboxes] | Retrieve a list of all the lyrics versions of a song. | entailment |
def open_file(name, mode=None, driver=None, libver=None, userblock_size=None, **kwargs):
"""Open an ARF file, creating as necessary.
Use this instead of h5py.File to ensure that root-level attributes and group
creation property lists are set correctly.
"""
import sys
import os
from h5py import h5p
from h5py._hl import files
try:
# If the byte string doesn't match the default
# encoding, just pass it on as-is. Note Unicode
# objects can always be encoded.
name = name.encode(sys.getfilesystemencoding())
except (UnicodeError, LookupError):
pass
exists = os.path.exists(name)
try:
fcpl = h5p.create(h5p.FILE_CREATE)
fcpl.set_link_creation_order(
h5p.CRT_ORDER_TRACKED | h5p.CRT_ORDER_INDEXED)
except AttributeError:
# older version of h5py
fp = files.File(name, mode=mode, driver=driver,
libver=libver, **kwargs)
else:
fapl = files.make_fapl(driver, libver, **kwargs)
fp = files.File(files.make_fid(name, mode, userblock_size, fapl, fcpl))
if not exists and fp.mode == 'r+':
set_attributes(fp,
arf_library='python',
arf_library_version=__version__,
arf_version=spec_version)
return fp | Open an ARF file, creating as necessary.
Use this instead of h5py.File to ensure that root-level attributes and group
creation property lists are set correctly. | entailment |
def create_entry(group, name, timestamp, **attributes):
"""Create a new ARF entry under group, setting required attributes.
An entry is an abstract collection of data which all refer to the same time
frame. Data can include physiological recordings, sound recordings, and
derived data such as spike times and labels. See add_data() for information
on how data are stored.
name -- the name of the new entry. any valid python string.
timestamp -- timestamp of entry (datetime object, or seconds since
January 1, 1970). Can be an integer, a float, or a tuple
of integers (seconds, microsceconds)
Additional keyword arguments are set as attributes on created entry.
Returns: newly created entry object
"""
# create group using low-level interface to store creation order
from h5py import h5p, h5g, _hl
try:
gcpl = h5p.create(h5p.GROUP_CREATE)
gcpl.set_link_creation_order(
h5p.CRT_ORDER_TRACKED | h5p.CRT_ORDER_INDEXED)
except AttributeError:
grp = group.create_group(name)
else:
name, lcpl = group._e(name, lcpl=True)
grp = _hl.group.Group(h5g.create(group.id, name, lcpl=lcpl, gcpl=gcpl))
set_uuid(grp, attributes.pop("uuid", None))
set_attributes(grp,
timestamp=convert_timestamp(timestamp),
**attributes)
return grp | Create a new ARF entry under group, setting required attributes.
An entry is an abstract collection of data which all refer to the same time
frame. Data can include physiological recordings, sound recordings, and
derived data such as spike times and labels. See add_data() for information
on how data are stored.
name -- the name of the new entry. any valid python string.
timestamp -- timestamp of entry (datetime object, or seconds since
January 1, 1970). Can be an integer, a float, or a tuple
of integers (seconds, microsceconds)
Additional keyword arguments are set as attributes on created entry.
Returns: newly created entry object | entailment |
def create_dataset(group, name, data, units='', datatype=DataTypes.UNDEFINED,
chunks=True, maxshape=None, compression=None,
**attributes):
"""Create an ARF dataset under group, setting required attributes
Required arguments:
name -- the name of dataset in which to store the data
data -- the data to store
Data can be of the following types:
* sampled data: an N-D numerical array of measurements
* "simple" event data: a 1-D array of times
* "complex" event data: a 1-D array of records, with field 'start' required
Optional arguments:
datatype -- a code defining the nature of the data in the channel
units -- channel units (optional for sampled data, otherwise required)
sampling_rate -- required for sampled data and event data with units=='samples'
Arguments passed to h5py:
maxshape -- make the node resizable up to this shape. Use None for axes that
need to be unlimited.
chunks -- specify the chunk size. The optimal chunk size depends on the
intended use of the data. For single-channel sampled data the
auto-chunking (True) is probably best.
compression -- compression strategy. Can be 'gzip', 'szip', 'lzf' or an integer
in range(10) specifying gzip(N). Only gzip is really portable.
Additional arguments are set as attributes on the created dataset
Returns the created dataset
"""
from numpy import asarray
srate = attributes.get('sampling_rate', None)
# check data validity before doing anything
if not hasattr(data, 'dtype'):
data = asarray(data)
if data.dtype.kind in ('S', 'O', 'U'):
raise ValueError(
"data must be in array with numeric or compound type")
if data.dtype.kind == 'V':
if 'start' not in data.dtype.names:
raise ValueError("complex event data requires 'start' field")
if not isinstance(units, (list, tuple)):
raise ValueError("complex event data requires sequence of units")
if not len(units) == len(data.dtype.names):
raise ValueError("number of units doesn't match number of fields")
if units == '':
if srate is None or not srate > 0:
raise ValueError(
"unitless data assumed time series and requires sampling_rate attribute")
elif units == 'samples':
if srate is None or not srate > 0:
raise ValueError(
"data with units of 'samples' requires sampling_rate attribute")
# NB: can't really catch case where sampled data has units but doesn't
# have sampling_rate attribute
dset = group.create_dataset(
name, data=data, maxshape=maxshape, chunks=chunks, compression=compression)
set_attributes(dset, units=units, datatype=datatype, **attributes)
return dset | Create an ARF dataset under group, setting required attributes
Required arguments:
name -- the name of dataset in which to store the data
data -- the data to store
Data can be of the following types:
* sampled data: an N-D numerical array of measurements
* "simple" event data: a 1-D array of times
* "complex" event data: a 1-D array of records, with field 'start' required
Optional arguments:
datatype -- a code defining the nature of the data in the channel
units -- channel units (optional for sampled data, otherwise required)
sampling_rate -- required for sampled data and event data with units=='samples'
Arguments passed to h5py:
maxshape -- make the node resizable up to this shape. Use None for axes that
need to be unlimited.
chunks -- specify the chunk size. The optimal chunk size depends on the
intended use of the data. For single-channel sampled data the
auto-chunking (True) is probably best.
compression -- compression strategy. Can be 'gzip', 'szip', 'lzf' or an integer
in range(10) specifying gzip(N). Only gzip is really portable.
Additional arguments are set as attributes on the created dataset
Returns the created dataset | entailment |
def create_table(group, name, dtype, **attributes):
"""Create a new array dataset under group with compound datatype and maxshape=(None,)"""
dset = group.create_dataset(
name, shape=(0,), dtype=dtype, maxshape=(None,))
set_attributes(dset, **attributes)
return dset | Create a new array dataset under group with compound datatype and maxshape=(None,) | entailment |
def append_data(dset, data):
"""Append data to dset along axis 0. Data must be a single element or
a 1D array of the same type as the dataset (including compound datatypes)."""
N = data.shape[0] if hasattr(data, 'shape') else 1
if N == 0:
return
oldlen = dset.shape[0]
newlen = oldlen + N
dset.resize(newlen, axis=0)
dset[oldlen:] = data | Append data to dset along axis 0. Data must be a single element or
a 1D array of the same type as the dataset (including compound datatypes). | entailment |
def check_file_version(file):
"""Check the ARF version attribute of file for compatibility.
Raises DeprecationWarning for backwards-incompatible files, FutureWarning
for (potentially) forwards-incompatible files, and UserWarning for files
that may not have been created by an ARF library.
Returns the version for the file
"""
from distutils.version import StrictVersion as Version
try:
ver = file.attrs.get('arf_version', None)
if ver is None:
ver = file.attrs['arf_library_version']
except KeyError:
raise UserWarning(
"Unable to determine ARF version for {0.filename};"
"created by another program?".format(file))
try:
# if the attribute is stored as a string, it's ascii-encoded
ver = ver.decode("ascii")
except (LookupError, AttributeError):
pass
# should be backwards compatible after 1.1
file_version = Version(ver)
if file_version < Version('1.1'):
raise DeprecationWarning(
"ARF library {} may have trouble reading file "
"version {} (< 1.1)".format(version, file_version))
elif file_version >= Version('3.0'):
raise FutureWarning(
"ARF library {} may be incompatible with file "
"version {} (>= 3.0)".format(version, file_version))
return file_version | Check the ARF version attribute of file for compatibility.
Raises DeprecationWarning for backwards-incompatible files, FutureWarning
for (potentially) forwards-incompatible files, and UserWarning for files
that may not have been created by an ARF library.
Returns the version for the file | entailment |
def set_attributes(node, overwrite=True, **attributes):
"""Set multiple attributes on node.
If overwrite is False, and the attribute already exists, does nothing. If
the value for a key is None, the attribute is deleted.
"""
aset = node.attrs
for k, v in attributes.items():
if not overwrite and k in aset:
pass
elif v is None:
if k in aset:
del aset[k]
else:
aset[k] = v | Set multiple attributes on node.
If overwrite is False, and the attribute already exists, does nothing. If
the value for a key is None, the attribute is deleted. | entailment |
def keys_by_creation(group):
"""Returns a sequence of links in group in order of creation.
Raises an error if the group was not set to track creation order.
"""
from h5py import h5
out = []
try:
group._id.links.iterate(
out.append, idx_type=h5.INDEX_CRT_ORDER, order=h5.ITER_INC)
except (AttributeError, RuntimeError):
# pre 2.2 shim
def f(name):
if name.find(b'/', 1) == -1:
out.append(name)
group._id.links.visit(
f, idx_type=h5.INDEX_CRT_ORDER, order=h5.ITER_INC)
return map(group._d, out) | Returns a sequence of links in group in order of creation.
Raises an error if the group was not set to track creation order. | entailment |
def convert_timestamp(obj):
"""Make an ARF timestamp from an object.
Argument can be a datetime.datetime object, a time.struct_time, an integer,
a float, or a tuple of integers. The returned value is a numpy array with
the integer number of seconds since the Epoch and any additional
microseconds.
Note that because floating point values are approximate, the conversion
between float and integer tuple may not be reversible.
"""
import numbers
from datetime import datetime
from time import mktime, struct_time
from numpy import zeros
out = zeros(2, dtype='int64')
if isinstance(obj, datetime):
out[0] = mktime(obj.timetuple())
out[1] = obj.microsecond
elif isinstance(obj, struct_time):
out[0] = mktime(obj)
elif isinstance(obj, numbers.Integral):
out[0] = obj
elif isinstance(obj, numbers.Real):
out[0] = obj
out[1] = (obj - out[0]) * 1e6
else:
try:
out[:2] = obj[:2]
except:
raise TypeError("unable to convert %s to timestamp" % obj)
return out | Make an ARF timestamp from an object.
Argument can be a datetime.datetime object, a time.struct_time, an integer,
a float, or a tuple of integers. The returned value is a numpy array with
the integer number of seconds since the Epoch and any additional
microseconds.
Note that because floating point values are approximate, the conversion
between float and integer tuple may not be reversible. | entailment |
def timestamp_to_datetime(timestamp):
"""Convert an ARF timestamp to a datetime.datetime object (naive local time)"""
from datetime import datetime, timedelta
obj = datetime.fromtimestamp(timestamp[0])
return obj + timedelta(microseconds=int(timestamp[1])) | Convert an ARF timestamp to a datetime.datetime object (naive local time) | entailment |
def set_uuid(obj, uuid=None):
"""Set the uuid attribute of an HDF5 object. Use this method to ensure correct dtype """
from uuid import uuid4, UUID
if uuid is None:
uuid = uuid4()
elif isinstance(uuid, bytes):
if len(uuid) == 16:
uuid = UUID(bytes=uuid)
else:
uuid = UUID(hex=uuid)
if "uuid" in obj.attrs:
del obj.attrs["uuid"]
obj.attrs.create("uuid", str(uuid).encode('ascii'), dtype="|S36") | Set the uuid attribute of an HDF5 object. Use this method to ensure correct dtype | entailment |
def get_uuid(obj):
"""Return the uuid for obj, or null uuid if none is set"""
# TODO: deprecate null uuid ret val
from uuid import UUID
try:
uuid = obj.attrs['uuid']
except KeyError:
return UUID(int=0)
# convert to unicode for python 3
try:
uuid = uuid.decode('ascii')
except (LookupError, AttributeError):
pass
return UUID(uuid) | Return the uuid for obj, or null uuid if none is set | entailment |
def count_children(obj, type=None):
"""Return the number of children of obj, optionally restricting by class"""
if type is None:
return len(obj)
else:
# there doesn't appear to be any hdf5 function for getting this
# information without inspecting each child, which makes this somewhat
# slow
return sum(1 for x in obj if obj.get(x, getclass=True) is type) | Return the number of children of obj, optionally restricting by class | entailment |
def _todict(cls):
""" generate a dict keyed by value """
return dict((getattr(cls, attr), attr) for attr in dir(cls) if not attr.startswith('_')) | generate a dict keyed by value | entailment |
def get_template(template_name,fields=None):
'''get_template will return a template in the template folder,
with some substitutions (eg, {'{{ graph | safe }}':"fill this in!"}
'''
template = None
if not template_name.endswith('.html'):
template_name = "%s.html" %(template_name)
here = "%s/cli/app/templates" %(get_installdir())
template_path = "%s/%s" %(here,template_name)
if os.path.exists(template_path):
template = ''.join(read_file(template_path))
if fields is not None:
for tag,sub in fields.items():
template = template.replace(tag,sub)
return template | get_template will return a template in the template folder,
with some substitutions (eg, {'{{ graph | safe }}':"fill this in!"} | entailment |
def container_similarity_vector(container1=None,packages_set=None,by=None):
'''container similarity_vector is similar to compare_packages, but intended
to compare a container object (singularity image or singularity hub container)
to a list of packages. If packages_set is not provided, the default used is
'docker-os'. This can be changed to 'docker-library', or if the user wants a custom
list, should define custom_set.
:param container1: singularity image or singularity hub container.
:param packages_set: a name of a package set, provided are docker-os and docker-library
:by: metrics to compare by (files.txt and or folders.txt)
'''
if by == None:
by = ['files.txt']
if not isinstance(by,list):
by = [by]
if not isinstance(packages_set,list):
packages_set = [packages_set]
comparisons = dict()
for b in by:
bot.debug("Starting comparisons for %s" %b)
df = pandas.DataFrame(columns=packages_set)
for package2 in packages_set:
sim = calculate_similarity(container1=container1,
image_package2=package2,
by=b)[b]
name1 = os.path.basename(package2).replace('.img.zip','')
bot.debug("container vs. %s: %s" %(name1,sim))
df.loc["container",package2] = sim
df.columns = [os.path.basename(x).replace('.img.zip','') for x in df.columns.tolist()]
comparisons[b] = df
return comparisons | container similarity_vector is similar to compare_packages, but intended
to compare a container object (singularity image or singularity hub container)
to a list of packages. If packages_set is not provided, the default used is
'docker-os'. This can be changed to 'docker-library', or if the user wants a custom
list, should define custom_set.
:param container1: singularity image or singularity hub container.
:param packages_set: a name of a package set, provided are docker-os and docker-library
:by: metrics to compare by (files.txt and or folders.txt) | entailment |
def compare_singularity_images(image_paths1, image_paths2=None):
'''compare_singularity_images is a wrapper for compare_containers to compare
singularity containers. If image_paths2 is not defined, pairwise comparison is done
with image_paths1
'''
repeat = False
if image_paths2 is None:
image_paths2 = image_paths1
repeat = True
if not isinstance(image_paths1,list):
image_paths1 = [image_paths1]
if not isinstance(image_paths2,list):
image_paths2 = [image_paths2]
dfs = pandas.DataFrame(index=image_paths1,columns=image_paths2)
comparisons_done = []
for image1 in image_paths1:
fileobj1,tar1 = get_image_tar(image1)
members1 = [x.name for x in tar1]
for image2 in image_paths2:
comparison_id = [image1,image2]
comparison_id.sort()
comparison_id = "".join(comparison_id)
if comparison_id not in comparisons_done:
if image1 == image2:
sim = 1.0
else:
fileobj2,tar2 = get_image_tar(image2)
members2 = [x.name for x in tar2]
c = compare_lists(members1, members2)
sim = information_coefficient(c['total1'],c['total2'],c['intersect'])
delete_image_tar(fileobj2, tar2)
dfs.loc[image1,image2] = sim
if repeat:
dfs.loc[image2,image1] = sim
comparisons_done.append(comparison_id)
delete_image_tar(fileobj1, tar1)
return dfs | compare_singularity_images is a wrapper for compare_containers to compare
singularity containers. If image_paths2 is not defined, pairwise comparison is done
with image_paths1 | entailment |
def compare_containers(container1=None, container2=None):
'''compare_containers will generate a data structure with common and unique files to
two images. If environmental variable SINGULARITY_HUB is set, will use container
database objects.
:param container1: first container for comparison
:param container2: second container for comparison if either not defined must include
default compares just files
'''
# Get files and folders for each
container1_guts = get_container_contents(split_delim="\n",
container=container1)['all']
container2_guts = get_container_contents(split_delim="\n",
container=container2)['all']
# Do the comparison for each metric
return compare_lists(container1_guts, container2_guts) | compare_containers will generate a data structure with common and unique files to
two images. If environmental variable SINGULARITY_HUB is set, will use container
database objects.
:param container1: first container for comparison
:param container2: second container for comparison if either not defined must include
default compares just files | entailment |
def compare_lists(list1,list2):
'''compare lists is the lowest level that drives compare_containers and
compare_packages. It returns a comparison object (dict) with the unique,
total, and intersecting things between two lists
:param list1: the list for container1
:param list2: the list for container2
'''
intersect = list(set(list1).intersection(list2))
unique1 = list(set(list1).difference(list2))
unique2 = list(set(list2).difference(list1))
# Return data structure
comparison = {"intersect":intersect,
"unique1": unique1,
"unique2": unique2,
"total1": len(list1),
"total2": len(list2)}
return comparison | compare lists is the lowest level that drives compare_containers and
compare_packages. It returns a comparison object (dict) with the unique,
total, and intersecting things between two lists
:param list1: the list for container1
:param list2: the list for container2 | entailment |
def calculate_similarity(container1=None,
container2=None,
comparison=None,
metric=None):
'''calculate_similarity will calculate similarity of two containers
by files content, default will calculate
2.0*len(intersect) / total package1 + total package2
Parameters
==========
container1: container 1
container2: container 2 must be defined or
metric a function to take a total1, total2, and intersect count
(we can make this more general if / when more are added)
valid are currently files.txt or folders.txt
comparison: the comparison result object for the tree. If provided,
will skip over function to obtain it.
'''
if metric is None:
metric = information_coefficient
if comparison == None:
comparison = compare_containers(container1=container1,
container2=container2)
return metric(total1=comparison['total1'],
total2=comparison['total2'],
intersect=comparison["intersect"]) | calculate_similarity will calculate similarity of two containers
by files content, default will calculate
2.0*len(intersect) / total package1 + total package2
Parameters
==========
container1: container 1
container2: container 2 must be defined or
metric a function to take a total1, total2, and intersect count
(we can make this more general if / when more are added)
valid are currently files.txt or folders.txt
comparison: the comparison result object for the tree. If provided,
will skip over function to obtain it. | entailment |
def package_node(root=None, name=None):
'''package node aims to package a (present working node) for a user into
a container. This assumes that the node is a single partition.
:param root: the root of the node to package, default is /
:param name: the name for the image. If not specified, will use machine's
psutil.disk_partitions()
'''
if name is None:
name = platform.node()
if root is None:
root = "/"
tmpdir = tempfile.mkdtemp()
image = "%s/%s.tgz" %(tmpdir,name)
print("Preparing to package root %s into %s" %(root,name))
cmd = ["tar","--one-file-system","-czvSf", image, root,"--exclude",image]
output = run_command(cmd)
return image | package node aims to package a (present working node) for a user into
a container. This assumes that the node is a single partition.
:param root: the root of the node to package, default is /
:param name: the name for the image. If not specified, will use machine's
psutil.disk_partitions() | entailment |
def unpack_node(image_path,name=None,output_folder=None,size=None):
'''unpackage node is intended to unpackage a node that was packaged with
package_node. The image should be a .tgz file. The general steps are to:
1. Package the node using the package_node function
2. Transfer the package somewhere that Singularity is installed'''
if not image_path.endswith(".tgz"):
bot.error("The image_path should end with .tgz. Did you create with package_node?")
sys.exit(1)
if output_folder is None:
output_folder = os.path.dirname(os.path.abspath(image_path))
image_name = os.path.basename(image_path)
if name is None:
name = image_name.replace('.tgz','.img')
if not name.endswith('.img'):
name = "%s.img" %(name)
bot.debug("Preparing to unpack %s to %s." %(image_name,name))
unpacked_image = "%s/%s" %(output_folder,name)
if not os.path.exists(unpacked_image):
os.mkdir(unpacked_image)
cmd = ["gunzip","-dc",image_path,"|","sudo","singularity","import", unpacked_image]
output = run_command(cmd)
# TODO: singularity mount the container, cleanup files (/etc/fstab,...)
# and add your custom singularity files.
return unpacked_image | unpackage node is intended to unpackage a node that was packaged with
package_node. The image should be a .tgz file. The general steps are to:
1. Package the node using the package_node function
2. Transfer the package somewhere that Singularity is installed | entailment |
def get_build_template(template_name,params=None,to_file=None):
'''get_build template returns a string or file for a particular build template, which is
intended to build a version of a Singularity image on a cloud resource.
:param template_name: the name of the template to retrieve in build/scripts
:param params: (if needed) a dictionary of parameters to substitute in the file
:param to_file: if defined, will write to file. Default returns string.
'''
base = get_installdir()
template_folder = "%s/build/scripts" %(base)
template_file = "%s/%s" %(template_folder,template_name)
if os.path.exists(template_file):
bot.debug("Found template %s" %template_file)
# Implement when needed - substitute params here
# Will need to read in file instead of copying below
# if params != None:
if to_file is not None:
shutil.copyfile(template_file,to_file)
bot.debug("Template file saved to %s" %to_file)
return to_file
# If the user wants a string
content = ''.join(read_file(template_file))
return content
else:
bot.warning("Template %s not found." %template_file) | get_build template returns a string or file for a particular build template, which is
intended to build a version of a Singularity image on a cloud resource.
:param template_name: the name of the template to retrieve in build/scripts
:param params: (if needed) a dictionary of parameters to substitute in the file
:param to_file: if defined, will write to file. Default returns string. | entailment |
def sniff_extension(file_path,verbose=True):
'''sniff_extension will attempt to determine the file type based on the extension,
and return the proper mimetype
:param file_path: the full path to the file to sniff
:param verbose: print stuff out
'''
mime_types = { "xls": 'application/vnd.ms-excel',
"xlsx": 'application/vnd.openxmlformats-officedocument.spreadsheetml.sheet',
"xml": 'text/xml',
"ods": 'application/vnd.oasis.opendocument.spreadsheet',
"csv": 'text/plain',
"tmpl": 'text/plain',
"pdf": 'application/pdf',
"php": 'application/x-httpd-php',
"jpg": 'image/jpeg',
"png": 'image/png',
"gif": 'image/gif',
"bmp": 'image/bmp',
"txt": 'text/plain',
"doc": 'application/msword',
"js": 'text/js',
"swf": 'application/x-shockwave-flash',
"mp3": 'audio/mpeg',
"zip": 'application/zip',
"simg": 'application/zip',
"rar": 'application/rar',
"tar": 'application/tar',
"arj": 'application/arj',
"cab": 'application/cab',
"html": 'text/html',
"htm": 'text/html',
"default": 'application/octet-stream',
"folder": 'application/vnd.google-apps.folder',
"img" : "application/octet-stream" }
ext = os.path.basename(file_path).split('.')[-1]
mime_type = mime_types.get(ext,None)
if mime_type == None:
mime_type = mime_types['txt']
if verbose==True:
bot.info("%s --> %s" %(file_path, mime_type))
return mime_type | sniff_extension will attempt to determine the file type based on the extension,
and return the proper mimetype
:param file_path: the full path to the file to sniff
:param verbose: print stuff out | entailment |
def get_script(script_name):
'''get_script will return a build script_name, if it is included
in singularity/build/scripts, otherwise will alert the user and return None
:param script_name: the name of the script to look for
'''
install_dir = get_installdir()
script_path = "%s/build/scripts/%s" %(install_dir,script_name)
if os.path.exists(script_path):
return script_path
else:
bot.error("Script %s is not included in singularity-python!" %script_path)
return None | get_script will return a build script_name, if it is included
in singularity/build/scripts, otherwise will alert the user and return None
:param script_name: the name of the script to look for | entailment |
def zip_up(file_list,zip_name,output_folder=None):
'''zip_up will zip up some list of files into a package (.zip)
:param file_list: a list of files to include in the zip.
:param output_folder: the output folder to create the zip in. If not
:param zip_name: the name of the zipfile to return.
specified, a temporary folder will be given.
'''
tmpdir = tempfile.mkdtemp()
# Make a new archive
output_zip = "%s/%s" %(tmpdir,zip_name)
zf = zipfile.ZipFile(output_zip, "w", zipfile.ZIP_DEFLATED, allowZip64=True)
# Write files to zip, depending on type
for filename,content in file_list.items():
bot.debug("Adding %s to package..." %filename)
# If it's the files list, move files into the archive
if filename.lower() == "files":
if not isinstance(content,list):
content = [content]
for copyfile in content:
zf.write(copyfile,os.path.basename(copyfile))
os.remove(copyfile)
else:
output_file = "%s/%s" %(tmpdir, filename)
# If it's a list, write to new file, and save
if isinstance(content,list):
write_file(output_file,"\n".join(content))
# If it's a dict, save to json
elif isinstance(content,dict):
write_json(content,output_file)
# If bytes, need to decode
elif isinstance(content,bytes):
write_file(output_file,content.decode('utf-8'))
# String or other
else:
output_file = write_file(output_file,content)
if os.path.exists(output_file):
zf.write(output_file,filename)
os.remove(output_file)
# Close the zip file
zf.close()
if output_folder is not None:
shutil.copyfile(output_zip,"%s/%s"%(output_folder,zip_name))
shutil.rmtree(tmpdir)
output_zip = "%s/%s"%(output_folder,zip_name)
return output_zip | zip_up will zip up some list of files into a package (.zip)
:param file_list: a list of files to include in the zip.
:param output_folder: the output folder to create the zip in. If not
:param zip_name: the name of the zipfile to return.
specified, a temporary folder will be given. | entailment |
def get_container_contents(container, split_delim=None):
'''get_container_contents will return a list of folders and or files
for a container. The environmental variable SINGULARITY_HUB being set
means that container objects are referenced instead of packages
:param container: the container to get content for
:param gets: a list of file names to return, without parent folders
:param split_delim: if defined, will split text by split delimiter
'''
# We will look for everything in guts, then return it
guts = dict()
SINGULARITY_HUB = os.environ.get('SINGULARITY_HUB',"False")
# Visualization deployed local or elsewhere
if SINGULARITY_HUB == "False":
file_obj,tar = get_image_tar(container)
guts = extract_guts(image_path=container, tar=tar)
delete_image_tar(file_obj, tar)
# Visualization deployed by singularity hub
else:
# user has provided a package, but not a container
if container == None:
guts = load_package(image_package,get=gets)
# user has provided a container, but not a package
else:
for sfile in container.files:
for gut_key in gets:
if os.path.basename(sfile['name']) == gut_key:
if split_delim == None:
guts[gut_key] = requests.get(sfile['mediaLink']).text
else:
guts[gut_key] = requests.get(sfile['mediaLink']).text.split(split_delim)
return guts | get_container_contents will return a list of folders and or files
for a container. The environmental variable SINGULARITY_HUB being set
means that container objects are referenced instead of packages
:param container: the container to get content for
:param gets: a list of file names to return, without parent folders
:param split_delim: if defined, will split text by split delimiter | entailment |
def get_image_hashes(image_path, version=None, levels=None):
'''get_image_hashes returns the hash for an image across all levels. This is the quickest,
easiest way to define a container's reproducibility on each level.
'''
if levels is None:
levels = get_levels(version=version)
hashes = dict()
for level_name,level_filter in levels.items():
hashes[level_name] = get_image_hash(image_path,
level_filter=level_filter)
return hashes | get_image_hashes returns the hash for an image across all levels. This is the quickest,
easiest way to define a container's reproducibility on each level. | entailment |
def get_image_hash(image_path,
level=None,level_filter=None,
include_files=None,
skip_files=None,
version=None):
'''get_image_hash will generate a sha1 hash of an image, depending on a level
of reproducibility specified by the user. (see function get_levels for descriptions)
the user can also provide a level_filter manually with level_filter (for custom levels)
:param level: the level of reproducibility to use, which maps to a set regular
expression to match particular files/folders in the image. Choices are in notes.
:param skip_files: an optional list of files to skip
:param include_files: an optional list of files to keep (only if level not defined)
:param version: the version to use. If not defined, default is 2.3
::notes
LEVEL DEFINITIONS
The level definitions come down to including folders/files in the comparison. For files
that Singularity produces on the fly that might be different (timestamps) but equal content
(eg for a replication) we hash the content ("assess_content") instead of the file.
'''
# First get a level dictionary, with description and regexp
if level_filter is not None:
file_filter = level_filter
elif level is None:
file_filter = get_level("RECIPE",
version=version,
include_files=include_files,
skip_files=skip_files)
else:
file_filter = get_level(level,version=version,
skip_files=skip_files,
include_files=include_files)
file_obj, tar = get_image_tar(image_path)
hasher = hashlib.md5()
for member in tar:
member_name = member.name.replace('.','',1)
# For files, we either assess content, or include the file
if member.isdir() or member.issym():
continue
elif assess_content(member,file_filter):
content = extract_content(image_path,member.name)
hasher.update(content)
elif include_file(member,file_filter):
buf = member.tobuf()
hasher.update(buf)
digest = hasher.hexdigest()
# Close up / remove files
try:
file_obj.close()
except:
tar.close()
if os.path.exists(file_obj):
os.remove(file_obj)
return digest | get_image_hash will generate a sha1 hash of an image, depending on a level
of reproducibility specified by the user. (see function get_levels for descriptions)
the user can also provide a level_filter manually with level_filter (for custom levels)
:param level: the level of reproducibility to use, which maps to a set regular
expression to match particular files/folders in the image. Choices are in notes.
:param skip_files: an optional list of files to skip
:param include_files: an optional list of files to keep (only if level not defined)
:param version: the version to use. If not defined, default is 2.3
::notes
LEVEL DEFINITIONS
The level definitions come down to including folders/files in the comparison. For files
that Singularity produces on the fly that might be different (timestamps) but equal content
(eg for a replication) we hash the content ("assess_content") instead of the file. | entailment |
def get_content_hashes(image_path,
level=None,
regexp=None,
include_files=None,
tag_root=True,
level_filter=None,
skip_files=None,
version=None,
include_sizes=True):
'''get_content_hashes is like get_image_hash, but it returns a complete dictionary
of file names (keys) and their respective hashes (values). This function is intended
for more research purposes and was used to generate the levels in the first place.
If include_sizes is True, we include a second data structure with sizes
'''
if level_filter is not None:
file_filter = level_filter
elif level is None:
file_filter = get_level("REPLICATE",version=version,
skip_files=skip_files,
include_files=include_files)
else:
file_filter = get_level(level,version=version,
skip_files=skip_files,
include_files=include_files)
file_obj,tar = get_image_tar(image_path)
results = extract_guts(image_path=image_path,
tar=tar,
file_filter=file_filter,
tag_root=tag_root,
include_sizes=include_sizes)
delete_image_tar(file_obj, tar)
return results | get_content_hashes is like get_image_hash, but it returns a complete dictionary
of file names (keys) and their respective hashes (values). This function is intended
for more research purposes and was used to generate the levels in the first place.
If include_sizes is True, we include a second data structure with sizes | entailment |
def get_image_file_hash(image_path):
'''get_image_hash will return an md5 hash of the file based on a criteria level.
:param level: one of LOW, MEDIUM, HIGH
:param image_path: full path to the singularity image
'''
hasher = hashlib.md5()
with open(image_path, "rb") as f:
for chunk in iter(lambda: f.read(4096), b""):
hasher.update(chunk)
return hasher.hexdigest() | get_image_hash will return an md5 hash of the file based on a criteria level.
:param level: one of LOW, MEDIUM, HIGH
:param image_path: full path to the singularity image | entailment |
def container_difference(container=None,container_subtract=None,image_package=None,
image_package_subtract=None,comparison=None):
'''container_difference will return a data structure to render an html
tree (graph) of the differences between two images or packages. The second
container is subtracted from the first
:param container: the primary container object (to subtract from)
:param container_subtract: the second container object to remove
:param image_package: a zipped package for image 1, created with package
:param image_package_subtract: a zipped package for subtraction image, created with package
:param comparison: the comparison result object for the tree. If provided,
will skip over function to obtain it.
'''
if comparison == None:
comparison = compare_containers(container1=container,
container2=container_subtract,
image_package1=image_package,
image_package2=image_package_subtract,
by=['files.txt','folders.txt'])
files = comparison["files.txt"]['unique1']
folders = comparison['folders.txt']['unique1']
tree = make_container_tree(folders=folders,
files=files)
return tree | container_difference will return a data structure to render an html
tree (graph) of the differences between two images or packages. The second
container is subtracted from the first
:param container: the primary container object (to subtract from)
:param container_subtract: the second container object to remove
:param image_package: a zipped package for image 1, created with package
:param image_package_subtract: a zipped package for subtraction image, created with package
:param comparison: the comparison result object for the tree. If provided,
will skip over function to obtain it. | entailment |
def container_similarity(container1=None,container2=None,image_package1=None,
image_package2=None,comparison=None):
'''container_sim will return a data structure to render an html tree
(graph) of the intersection (commonalities) between two images or packages
:param container1: the first container object
:param container2: the second container object if either not defined, need
:param image_package1: a packaged container1 (produced by package)
:param image_package2: a packaged container2 (produced by package)
:param comparison: the comparison result object for the tree. If provided,
will skip over function to obtain it.
'''
if comparison == None:
comparison = compare_containers(container1=container1,
container2=container2,
image_package1=image_package1,
image_package2=image_package2,
by=['files.txt','folders.txt'])
files = comparison["files.txt"]['intersect']
folders = comparison['folders.txt']['intersect']
tree = make_container_tree(folders=folders,
files=files)
return tree | container_sim will return a data structure to render an html tree
(graph) of the intersection (commonalities) between two images or packages
:param container1: the first container object
:param container2: the second container object if either not defined, need
:param image_package1: a packaged container1 (produced by package)
:param image_package2: a packaged container2 (produced by package)
:param comparison: the comparison result object for the tree. If provided,
will skip over function to obtain it. | entailment |
def container_tree(container=None,image_package=None):
'''tree will render an html tree (graph) of a container
'''
guts = get_container_contents(container=container,
image_package=image_package,
split_delim="\n")
# Make the tree and return it
tree = make_container_tree(folders = guts["folders.txt"],
files = guts['files.txt'])
return tree | tree will render an html tree (graph) of a container | entailment |
def make_container_tree(folders,files,path_delim="/",parse_files=True):
'''make_container_tree will convert a list of folders and files into a json structure that represents a graph.
:param folders: a list of folders in the image
:param files: a list of files in the folder
:param parse_files: return 'files' lookup in result, to associate ID of node with files (default True)
:param path_delim: the path delimiter, default is '/'
'''
nodes = {} # first we will make a list of nodes
lookup = {}
count = 1 # count will hold an id for nodes
max_depth = 0
for folder in folders:
if folder != ".":
folder = re.sub("^[.]/","",folder)
path_components = folder.split(path_delim)
for p in range(len(path_components)):
path_component = path_components[p]
fullpath = path_delim.join(path_components[0:p+1])
# Have we created the node yet?
if fullpath not in lookup:
lookup[fullpath] = count
node = {"id":count,"name":path_component,"path":fullpath,"level":p,"children":[]}
count +=1
# Did we find a deeper level?
if p > max_depth:
max_depth = p
# Does the node have a parent?
if p==0: # base node, no parent
parent_id = 0
else: # look up the parent id
parent_path = path_delim.join(path_components[0:p])
parent_id = lookup[parent_path]
node["parent"] = parent_id
nodes[node['id']] = node
# Now make the graph, we simply append children to their parents
seen = []
graph = []
iters = list(range(max_depth+1)) # 0,1,2,3...
iters.reverse() # ...3,2,1,0
iters.pop() # remove 0
for level in iters:
children = {x:y for x,y in nodes.items() if y['level'] == level}
seen = seen + [y['id'] for x,y in children.items()]
nodes = {x:y for x,y in nodes.items() if y['id'] not in seen}
for node_id,child_node in children.items():
if node_id == 0: #base node
graph[node_id] = child_node
else:
parent_id = child_node['parent']
nodes[parent_id]["children"].append(child_node)
# Now add the parents to graph, with name as main lookup
for parent,parent_info in nodes.items():
graph.append(parent_info)
graph = {"name":"base","children":graph}
result = {"graph":graph,"lookup":lookup,"depth":max_depth+1}
# Parse files to include in tree
if parse_files == True:
file_lookup = {}
for filey in files:
filey = re.sub("^[.]/","",filey)
filepath,filename = os.path.split(filey)
if filepath in lookup:
folder_id = lookup[filepath]
if folder_id in file_lookup:
file_lookup[folder_id].append(filename)
else:
file_lookup[folder_id] = [filename]
elif filepath == '': # base folder
if 0 in file_lookup:
file_lookup[0].append(filename)
else:
file_lookup[0] = [filename]
result['files'] = file_lookup
return result | make_container_tree will convert a list of folders and files into a json structure that represents a graph.
:param folders: a list of folders in the image
:param files: a list of files in the folder
:param parse_files: return 'files' lookup in result, to associate ID of node with files (default True)
:param path_delim: the path delimiter, default is '/' | entailment |
def make_package_tree(matrix=None,labels=None,width=25,height=10,title=None,font_size=None):
'''make package tree will make a dendrogram comparing a matrix of packages
:param matrix: a pandas df of packages, with names in index and columns
:param labels: a list of labels corresponding to row names, will be
pulled from rows if not defined
:param title: a title for the plot, if not defined, will be left out.
:returns a plot that can be saved with savefig
'''
from matplotlib import pyplot as plt
from scipy.cluster.hierarchy import (
dendrogram,
linkage
)
if font_size is None:
font_size = 8.
from scipy.cluster.hierarchy import cophenet
from scipy.spatial.distance import pdist
if not isinstance(matrix,pandas.DataFrame):
bot.info("No pandas DataFrame (matrix) of similarities defined, will use default.")
matrix = compare_packages()['files.txt']
title = 'Docker Library Similarity to Base OS'
Z = linkage(matrix, 'ward')
c, coph_dists = cophenet(Z, pdist(matrix))
if labels == None:
labels = matrix.index.tolist()
plt.figure(figsize=(width, height))
if title != None:
plt.title(title)
plt.xlabel('image index')
plt.ylabel('distance')
dendrogram(Z,
leaf_rotation=90., # rotates the x axis labels
leaf_font_size=font_size, # font size for the x axis labels
labels=labels)
return plt | make package tree will make a dendrogram comparing a matrix of packages
:param matrix: a pandas df of packages, with names in index and columns
:param labels: a list of labels corresponding to row names, will be
pulled from rows if not defined
:param title: a title for the plot, if not defined, will be left out.
:returns a plot that can be saved with savefig | entailment |
def make_interactive_tree(matrix=None,labels=None):
'''make interactive tree will return complete html for an interactive tree
:param title: a title for the plot, if not defined, will be left out.
'''
from scipy.cluster.hierarchy import (
dendrogram,
linkage,
to_tree
)
d3 = None
from scipy.cluster.hierarchy import cophenet
from scipy.spatial.distance import pdist
if isinstance(matrix,pandas.DataFrame):
Z = linkage(matrix, 'ward') # clusters
T = to_tree(Z, rd=False)
if labels == None:
labels = matrix.index.tolist()
lookup = dict(zip(range(len(labels)), labels))
# Create a dendrogram object without plotting
dend = dendrogram(Z,no_plot=True,
orientation="right",
leaf_rotation=90., # rotates the x axis labels
leaf_font_size=8., # font size for the x axis labels
labels=labels)
d3 = dict(children=[], name="root")
add_node(T, d3)
label_tree(d3["children"][0],lookup)
else:
bot.warning('Please provide data as pandas Data Frame.')
return d3 | make interactive tree will return complete html for an interactive tree
:param title: a title for the plot, if not defined, will be left out. | entailment |
def add_node(node, parent):
'''add_node will add a node to it's parent
'''
newNode = dict(node_id=node.id, children=[])
parent["children"].append(newNode)
if node.left: add_node(node.left, newNode)
if node.right: add_node(node.right, newNode) | add_node will add a node to it's parent | entailment |
def label_tree(n,lookup):
'''label tree will again recursively label the tree
:param n: the root node, usually d3['children'][0]
:param lookup: the node/id lookup
'''
if len(n["children"]) == 0:
leaves = [lookup[n["node_id"]]]
else:
leaves = reduce(lambda ls, c: ls + label_tree(c,lookup), n["children"], [])
del n["node_id"]
n["name"] = name = "|||".join(sorted(map(str, leaves)))
return leaves | label tree will again recursively label the tree
:param n: the root node, usually d3['children'][0]
:param lookup: the node/id lookup | entailment |
def extract_apps(image, app_names):
''' extract app will extract metadata for one or more apps
Parameters
==========
image: the absolute path to the image
app_name: the name of the app under /scif/apps
'''
apps = dict()
if isinstance(app_names, tuple):
app_names = list(app_names)
if not isinstance(app_names, list):
app_names = [app_names]
if len(app_names) == 0:
return apps
for app_name in app_names:
metadata = dict()
# Inspect: labels, env, runscript, tests, help
try:
inspection = json.loads(Client.inspect(image, app=app_name))
del inspection['data']['attributes']['deffile']
metadata['inspect'] = inspection
# If illegal characters prevent load, not much we can do
except:
pass
apps[app_name] = metadata
return apps | extract app will extract metadata for one or more apps
Parameters
==========
image: the absolute path to the image
app_name: the name of the app under /scif/apps | entailment |
def run_command(cmd, sudo=False):
'''run_command uses subprocess to send a command to the terminal.
:param cmd: the command to send, should be a list for subprocess
:param error_message: the error message to give to user if fails,
if none specified, will alert that command failed.
:param sudopw: if specified (not None) command will be run asking for sudo
'''
if sudo is True:
cmd = ['sudo'] + cmd
output = Popen(cmd,stderr=STDOUT,stdout=PIPE)
t = output.communicate()[0],output.returncode
output = {'message':t[0],
'return_code':t[1]}
return output | run_command uses subprocess to send a command to the terminal.
:param cmd: the command to send, should be a list for subprocess
:param error_message: the error message to give to user if fails,
if none specified, will alert that command failed.
:param sudopw: if specified (not None) command will be run asking for sudo | entailment |
def download_repo(repo_url, destination, commit=None):
'''download_repo
:param repo_url: the url of the repo to clone from
:param destination: the full path to the destination for the repo
'''
command = "git clone %s %s" % (repo_url, destination)
os.system(command)
return destination | download_repo
:param repo_url: the url of the repo to clone from
:param destination: the full path to the destination for the repo | entailment |
def get_tags(container=None,
search_folders=None,
file_list=None,
return_unique=True):
'''get tags will return a list of tags that describe the software in an image,
meaning inside of a paricular folder. If search_folder is not defined, uses lib
:param container: if provided, will use container as image. Can also provide
:param image_package: if provided, can be used instead of container
:param search_folders: specify one or more folders to look for tags
:param file_list: the list of files
:param return_unique: return unique files in folders. Default True.
Default is 'bin'
::notes
The algorithm works as follows:
1) first compare package to set of base OS (provided with shub)
2) subtract the most similar os from image, leaving "custom" files
3) organize custom files into dict based on folder name
4) return search_folders as tags
'''
if file_list is None:
file_list = get_container_contents(container, split_delim='\n')['all']
if search_folders == None:
search_folders = 'bin'
if not isinstance(search_folders,list):
search_folders = [search_folders]
tags = []
for search_folder in search_folders:
for file_name in file_list:
if search_folder in file_name:
tags.append(file_name)
if return_unique == True:
tags = list(set(tags))
return tags | get tags will return a list of tags that describe the software in an image,
meaning inside of a paricular folder. If search_folder is not defined, uses lib
:param container: if provided, will use container as image. Can also provide
:param image_package: if provided, can be used instead of container
:param search_folders: specify one or more folders to look for tags
:param file_list: the list of files
:param return_unique: return unique files in folders. Default True.
Default is 'bin'
::notes
The algorithm works as follows:
1) first compare package to set of base OS (provided with shub)
2) subtract the most similar os from image, leaving "custom" files
3) organize custom files into dict based on folder name
4) return search_folders as tags | entailment |
def file_counts(container=None,
patterns=None,
image_package=None,
file_list=None):
'''file counts will return a list of files that match one or more regular expressions.
if no patterns is defined, a default of readme is used. All patterns and files are made
case insensitive.
Parameters
==========
:param container: if provided, will use container as image. Can also provide
:param image_package: if provided, can be used instead of container
:param patterns: one or more patterns (str or list) of files to search for.
:param diff: the difference between a container and it's parent OS from get_diff
if not provided, will be generated.
'''
if file_list is None:
file_list = get_container_contents(container, split_delim='\n')['all']
if patterns == None:
patterns = 'readme'
if not isinstance(patterns,list):
patterns = [patterns]
count = 0
for pattern in patterns:
count += len([x for x in file_list if re.search(pattern.lower(),x.lower())])
bot.info("Total files matching patterns is %s" %count)
return count | file counts will return a list of files that match one or more regular expressions.
if no patterns is defined, a default of readme is used. All patterns and files are made
case insensitive.
Parameters
==========
:param container: if provided, will use container as image. Can also provide
:param image_package: if provided, can be used instead of container
:param patterns: one or more patterns (str or list) of files to search for.
:param diff: the difference between a container and it's parent OS from get_diff
if not provided, will be generated. | entailment |
def extension_counts(container=None, file_list=None, return_counts=True):
'''extension counts will return a dictionary with counts of file extensions for
an image.
:param container: if provided, will use container as image. Can also provide
:param image_package: if provided, can be used instead of container
:param file_list: the complete list of files
:param return_counts: return counts over dict with files. Default True
'''
if file_list is None:
file_list = get_container_contents(container, split_delim='\n')['all']
extensions = dict()
for item in file_list:
filename,ext = os.path.splitext(item)
if ext == '':
if return_counts == False:
extensions = update_dict(extensions,'no-extension',item)
else:
extensions = update_dict_sum(extensions,'no-extension')
else:
if return_counts == False:
extensions = update_dict(extensions,ext,item)
else:
extensions = update_dict_sum(extensions,ext)
return extensions | extension counts will return a dictionary with counts of file extensions for
an image.
:param container: if provided, will use container as image. Can also provide
:param image_package: if provided, can be used instead of container
:param file_list: the complete list of files
:param return_counts: return counts over dict with files. Default True | entailment |
def assess_differences(image_file1,
image_file2,
levels=None,
version=None,
size_heuristic=False,
guts1=None,
guts2=None):
'''assess_differences will compare two images on each level of
reproducibility, returning for each level a dictionary with files
that are the same, different, and an overall score.
:param size_heuristic: if True, assess root owned files based on size
:param guts1,guts2: the result (dict with sizes,roots,etc) from get_content_hashes
'''
if levels is None:
levels = get_levels(version=version)
reports = dict()
scores = dict()
for level_name, level_filter in levels.items():
contenders = []
different = []
setdiff = []
same = 0
# Compare the dictionary of file:hash between two images, and get root owned lookup
if guts1 is None:
guts1 = get_content_hashes(image_path=image_file1,
level_filter=level_filter)
# tag_root=True
# include_sizes=True
if guts2 is None:
guts2 = get_content_hashes(image_path=image_file2,
level_filter=level_filter)
print(level_name)
files = list(set(list(guts1['hashes'].keys()) + list(guts2['hashes'].keys())))
for file_name in files:
# If it's not in one or the other
if file_name not in guts1['hashes'] or file_name not in guts2['hashes']:
setdiff.append(file_name)
else:
if guts1['hashes'][file_name] == guts2['hashes'][file_name]:
same+=1
else:
# If the file is root owned, we compare based on size
if size_heuristic == True:
if guts1['root_owned'][file_name] or guts2['root_owned'][file_name]:
if guts1['sizes'][file_name] == guts2['sizes'][file_name]:
same+=1
else:
different.append(file_name)
else:
# Otherwise, we can assess the bytes content by reading it
contenders.append(file_name)
else:
contenders.append(file_name)
# If the user wants identical (meaning extraction order and timestamps)
if level_name == "IDENTICAL":
different = different + contenders
# Otherwise we need to check based on byte content
else:
if len(contenders) > 0:
for rogue in contenders:
hashy1 = extract_content(image_file1, rogue, return_hash=True)
hashy2 = extract_content(image_file2, rogue, return_hash=True)
# If we can't compare, we use size as a heuristic
if hashy1 is None or hashy2 is None: # if one is symlink, could be None
different.append(file_name)
elif len(hashy1) == 0 or len(hashy2) == 0:
if guts1['sizes'][file_name] == guts2['sizes'][file_name]:
same+=1
else:
different.append(file_name)
elif hashy1 != hashy2:
different.append(rogue)
else:
same+=1
# We use a similar Jacaard coefficient, twice the shared information in the numerator
# (the intersection, same), as a proportion of the total summed files
union = len(guts1['hashes']) + len(guts2['hashes'])
report = {'difference': setdiff,
'intersect_different': different,
'same':same,
'union': union}
if union == 0:
scores[level_name] = 0
else:
scores[level_name] = 2*(same) / union
reports[level_name] = report
reports['scores'] = scores
return reports | assess_differences will compare two images on each level of
reproducibility, returning for each level a dictionary with files
that are the same, different, and an overall score.
:param size_heuristic: if True, assess root owned files based on size
:param guts1,guts2: the result (dict with sizes,roots,etc) from get_content_hashes | entailment |
def include_file(member,file_filter):
'''include_file will look at a path and determine
if it matches a regular expression from a level
'''
member_path = member.name.replace('.','',1)
if len(member_path) == 0:
return False
# Does the filter skip it explicitly?
if "skip_files" in file_filter:
if member_path in file_filter['skip_files']:
return False
# Include explicitly?
if "include_files" in file_filter:
if member_path in file_filter['include_files']:
return True
# Regular expression?
if "regexp" in file_filter:
if re.search(file_filter["regexp"],member_path):
return True
return False | include_file will look at a path and determine
if it matches a regular expression from a level | entailment |
def is_root_owned(member):
'''assess if a file is root owned, meaning "root" or user/group
id of 0'''
if member.uid == 0 or member.gid == 0:
return True
elif member.uname == 'root' or member.gname == 'root':
return True
return False | assess if a file is root owned, meaning "root" or user/group
id of 0 | entailment |
def assess_content(member,file_filter):
'''Determine if the filter wants the file to be read for content.
In the case of yes, we would then want to add the content to the
hash and not the file object.
'''
member_path = member.name.replace('.','',1)
if len(member_path) == 0:
return False
# Does the filter skip it explicitly?
if "skip_files" in file_filter:
if member_path in file_filter['skip_files']:
return False
if "assess_content" in file_filter:
if member_path in file_filter['assess_content']:
return True
return False | Determine if the filter wants the file to be read for content.
In the case of yes, we would then want to add the content to the
hash and not the file object. | entailment |
def get_custom_level(regexp=None,description=None,skip_files=None,include_files=None):
'''get_custom_level will generate a custom level for the user,
based on a regular expression. If used outside the context of tarsum, the user
can generate their own named and described filters.
:param regexp: must be defined, the file filter regular expression
:param description: optional description
'''
if regexp == None:
regexp = "."
if description is None:
description = "This is a custom filter generated by the user."
custom = {"description":description,
"regexp":regexp}
# Include extra files?
if include_files is not None:
if not isinstance(include_files,set):
include_files = set(include_files)
custom['include_files'] = include_files
# Skip files?
if skip_files is not None:
if not isinstance(skip_files,set):
skip_files = set(skip_files)
custom['skip_files'] = skip_files
return custom | get_custom_level will generate a custom level for the user,
based on a regular expression. If used outside the context of tarsum, the user
can generate their own named and described filters.
:param regexp: must be defined, the file filter regular expression
:param description: optional description | entailment |
def get_level(level,version=None,include_files=None,skip_files=None):
'''get_level returns a single level, with option to customize files
added and skipped.
'''
levels = get_levels(version=version)
level_names = list(levels.keys())
if level.upper() in level_names:
level = levels[level]
else:
bot.warning("%s is not a valid level. Options are %s" %(level.upper(),
"\n".join(levels)))
return None
# Add additional files to skip or remove, if defined
if skip_files is not None:
level = modify_level(level,'skip_files',skip_files)
if include_files is not None:
level = modify_level(level,'include_files',include_files)
level = make_level_set(level)
return level | get_level returns a single level, with option to customize files
added and skipped. | entailment |
def modify_level(level,field,values,append=True):
'''modify level is intended to add / modify a content type.
Default content type is list, meaning the entry is appended.
If you set append to False, the content will be overwritten
For any other content type, the entry is overwritten.
'''
field = field.lower()
valid_fields = ['regexp','skip_files','include_files']
if field not in valid_fields:
bot.warning("%s is not a valid field, skipping. Choices are %s" %(field,",".join(valid_fields)))
return level
if append:
if not isinstance(values,list):
values = [values]
if field in level:
level[field] = level[field] + values
else:
level[field] = values
else:
level[field] = values
level = make_level_set(level)
return level | modify level is intended to add / modify a content type.
Default content type is list, meaning the entry is appended.
If you set append to False, the content will be overwritten
For any other content type, the entry is overwritten. | entailment |
def get_levels(version=None):
'''get_levels returns a dictionary of levels (key) and values (dictionaries with
descriptions and regular expressions for files) for the user.
:param version: the version of singularity to use (default is 2.2)
:param include_files: files to add to the level, only relvant if
'''
valid_versions = ['2.3','2.2']
if version is None:
version = "2.3"
version = str(version)
if version not in valid_versions:
bot.error("Unsupported version %s, valid versions are %s" %(version,
",".join(valid_versions)))
levels_file = os.path.abspath(os.path.join(get_installdir(),
'analysis',
'reproduce',
'data',
'reproduce_levels.json'))
levels = read_json(levels_file)
if version == "2.2":
# Labels not added until 2.3
del levels['LABELS']
levels = make_levels_set(levels)
return levels | get_levels returns a dictionary of levels (key) and values (dictionaries with
descriptions and regular expressions for files) for the user.
:param version: the version of singularity to use (default is 2.2)
:param include_files: files to add to the level, only relvant if | entailment |
def make_levels_set(levels):
'''make set efficient will convert all lists of items
in levels to a set to speed up operations'''
for level_key,level_filters in levels.items():
levels[level_key] = make_level_set(level_filters)
return levels | make set efficient will convert all lists of items
in levels to a set to speed up operations | entailment |
def make_level_set(level):
'''make level set will convert one level into
a set'''
new_level = dict()
for key,value in level.items():
if isinstance(value,list):
new_level[key] = set(value)
else:
new_level[key] = value
return new_level | make level set will convert one level into
a set | entailment |
def extract_guts(image_path,
tar,
file_filter=None,
tag_root=True,
include_sizes=True):
'''extract the file guts from an in memory tarfile. The file is not closed.
This should not be done for large images.
'''
if file_filter is None:
file_filter = get_level('IDENTICAL')
results = dict()
digest = dict()
allfiles = []
if tag_root:
roots = dict()
if include_sizes:
sizes = dict()
for member in tar:
member_name = member.name.replace('.','',1)
allfiles.append(member_name)
included = False
if member.isdir() or member.issym():
continue
elif assess_content(member,file_filter):
digest[member_name] = extract_content(image_path, member.name, return_hash=True)
included = True
elif include_file(member,file_filter):
hasher = hashlib.md5()
buf = member.tobuf()
hasher.update(buf)
digest[member_name] = hasher.hexdigest()
included = True
if included:
if include_sizes:
sizes[member_name] = member.size
if tag_root:
roots[member_name] = is_root_owned(member)
results['all'] = allfiles
results['hashes'] = digest
if include_sizes:
results['sizes'] = sizes
if tag_root:
results['root_owned'] = roots
return results | extract the file guts from an in memory tarfile. The file is not closed.
This should not be done for large images. | entailment |
def get_memory_tar(image_path):
'''get an in memory tar of an image. Use carefully, not as reliable
as get_image_tar
'''
byte_array = Client.image.export(image_path)
file_object = io.BytesIO(byte_array)
tar = tarfile.open(mode="r|*", fileobj=file_object)
return (file_object,tar) | get an in memory tar of an image. Use carefully, not as reliable
as get_image_tar | entailment |
def get_image_tar(image_path):
'''get an image tar, either written in memory or to
the file system. file_obj will either be the file object,
or the file itself.
'''
bot.debug('Generate file system tar...')
file_obj = Client.image.export(image_path=image_path)
if file_obj is None:
bot.error("Error generating tar, exiting.")
sys.exit(1)
tar = tarfile.open(file_obj)
return file_obj, tar | get an image tar, either written in memory or to
the file system. file_obj will either be the file object,
or the file itself. | entailment |
def delete_image_tar(file_obj, tar):
'''delete image tar will close a file object (if extracted into
memory) or delete from the file system (if saved to disk)'''
try:
file_obj.close()
except:
tar.close()
if os.path.exists(file_obj):
os.remove(file_obj)
deleted = True
bot.debug('Deleted temporary tar.')
return deleted | delete image tar will close a file object (if extracted into
memory) or delete from the file system (if saved to disk) | entailment |
def extract_content(image_path, member_name, return_hash=False):
'''extract_content will extract content from an image using cat.
If hash=True, a hash sum is returned instead
'''
if member_name.startswith('./'):
member_name = member_name.replace('.','',1)
if return_hash:
hashy = hashlib.md5()
try:
content = Client.execute(image_path,'cat %s' %(member_name))
except:
return None
if not isinstance(content,bytes):
content = content.encode('utf-8')
content = bytes(content)
# If permissions don't allow read, return None
if len(content) == 0:
return None
if return_hash:
hashy.update(content)
return hashy.hexdigest()
return content | extract_content will extract content from an image using cat.
If hash=True, a hash sum is returned instead | entailment |
def run_build(build_dir, params, verbose=True):
'''run_build takes a build directory and params dictionary, and does the following:
- downloads repo to a temporary directory
- changes branch or commit, if needed
- creates and bootstraps singularity image from Singularity file
- returns a dictionary with:
image (path), metadata (dict)
The following must be included in params:
spec_file, repo_url, branch, commit
'''
# Download the repository
download_repo(repo_url=params['repo_url'],
destination=build_dir)
os.chdir(build_dir)
if params['branch'] != None:
bot.info('Checking out branch %s' %params['branch'])
os.system('git checkout %s' %(params['branch']))
else:
params['branch'] = "master"
# Set the debug level
Client.debug = params['debug']
# Commit
if params['commit'] not in [None,'']:
bot.info('Checking out commit %s' %params['commit'])
os.system('git checkout %s .' %(params['commit']))
# From here on out commit is used as a unique id, if we don't have one, we use current
else:
params['commit'] = os.popen('git log -n 1 --pretty=format:"%H"').read()
bot.warning("commit not specified, setting to current %s" %params['commit'])
# Dump some params for the builder, in case it fails after this
passing_params = "/tmp/params.pkl"
pickle.dump(params, open(passing_params,'wb'))
# Now look for spec file
if os.path.exists(params['spec_file']):
bot.info("Found spec file %s in repository" %params['spec_file'])
# If the user has a symbolic link
if os.path.islink(params['spec_file']):
bot.info("%s is a symbolic link." %params['spec_file'])
params['spec_file'] = os.path.realpath(params['spec_file'])
# START TIMING
start_time = datetime.now()
# Secure Build
image = Client.build(recipe=params['spec_file'],
build_folder=build_dir,
isolated=True)
# Save has for metadata (also is image name)
version = get_image_file_hash(image)
params['version'] = version
pickle.dump(params, open(passing_params,'wb'))
# Rename image to be hash
finished_image = "%s/%s.simg" %(os.path.dirname(image), version)
image = shutil.move(image, finished_image)
final_time = (datetime.now() - start_time).seconds
bot.info("Final time of build %s seconds." %final_time)
# Did the container build successfully?
test_result = test_container(image)
if test_result['return_code'] != 0:
bot.error("Image failed to build, cancelling.")
sys.exit(1)
# Get singularity version
singularity_version = Client.version()
Client.debug = False
inspect = Client.inspect(image) # this is a string
Client.debug = params['debug']
# Get information on apps
Client.debug = False
app_names = Client.apps(image)
Client.debug = params['debug']
apps = extract_apps(image, app_names)
metrics = {'build_time_seconds': final_time,
'singularity_version': singularity_version,
'singularity_python_version': singularity_python_version,
'inspect': inspect,
'version': version,
'apps': apps}
output = {'image':image,
'metadata':metrics,
'params':params }
return output
else:
# Tell the user what is actually there
present_files = glob("*")
bot.error("Build file %s not found in repository" %params['spec_file'])
bot.info("Found files are %s" %"\n".join(present_files))
# Params have been exported, will be found by log
sys.exit(1) | run_build takes a build directory and params dictionary, and does the following:
- downloads repo to a temporary directory
- changes branch or commit, if needed
- creates and bootstraps singularity image from Singularity file
- returns a dictionary with:
image (path), metadata (dict)
The following must be included in params:
spec_file, repo_url, branch, commit | entailment |
def send_build_data(build_dir, data, secret,
response_url=None,clean_up=True):
'''finish build sends the build and data (response) to a response url
:param build_dir: the directory of the build
:response_url: where to send the response. If None, won't send
:param data: the data object to send as a post
:param clean_up: If true (default) removes build directory
'''
# Send with Authentication header
body = '%s|%s|%s|%s|%s' %(data['container_id'],
data['commit'],
data['branch'],
data['token'],
data['tag'])
signature = generate_header_signature(secret=secret,
payload=body,
request_type="push")
headers = {'Authorization': signature }
if response_url is not None:
finish = requests.post(response_url,data=data, headers=headers)
bot.debug("RECEIVE POST TO SINGULARITY HUB ---------------------")
bot.debug(finish.status_code)
bot.debug(finish.reason)
else:
bot.warning("response_url set to None, skipping sending of build.")
if clean_up == True:
shutil.rmtree(build_dir)
# Delay a bit, to give buffer between bringing instance down
time.sleep(20) | finish build sends the build and data (response) to a response url
:param build_dir: the directory of the build
:response_url: where to send the response. If None, won't send
:param data: the data object to send as a post
:param clean_up: If true (default) removes build directory | entailment |
def send_build_close(params,response_url):
'''send build close sends a final response (post) to the server to bring down
the instance. The following must be included in params:
repo_url, logfile, repo_id, secret, log_file, token
'''
# Finally, package everything to send back to shub
response = {"log": json.dumps(params['log_file']),
"repo_url": params['repo_url'],
"logfile": params['logfile'],
"repo_id": params['repo_id'],
"container_id": params['container_id']}
body = '%s|%s|%s|%s|%s' %(params['container_id'],
params['commit'],
params['branch'],
params['token'],
params['tag'])
signature = generate_header_signature(secret=params['token'],
payload=body,
request_type="finish")
headers = {'Authorization': signature }
finish = requests.post(response_url,data=response, headers=headers)
bot.debug("FINISH POST TO SINGULARITY HUB ---------------------")
bot.debug(finish.status_code)
bot.debug(finish.reason)
return finish | send build close sends a final response (post) to the server to bring down
the instance. The following must be included in params:
repo_url, logfile, repo_id, secret, log_file, token | entailment |
def remove_unicode_dict(input_dict):
'''remove unicode keys and values from dict, encoding in utf8
'''
if isinstance(input_dict, collections.Mapping):
return dict(map(remove_unicode_dict, input_dict.iteritems()))
elif isinstance(input_dict, collections.Iterable):
return type(input_dict)(map(remove_unicode_dict, input_dict))
else:
return input_dict | remove unicode keys and values from dict, encoding in utf8 | entailment |
def update_dict(input_dict,key,value):
'''update_dict will update lists in a dictionary. If the key is not included,
if will add as new list. If it is, it will append.
:param input_dict: the dict to update
:param value: the value to update with
'''
if key in input_dict:
input_dict[key].append(value)
else:
input_dict[key] = [value]
return input_dict | update_dict will update lists in a dictionary. If the key is not included,
if will add as new list. If it is, it will append.
:param input_dict: the dict to update
:param value: the value to update with | entailment |
def update_dict_sum(input_dict,key,increment=None,initial_value=None):
'''update_dict sum will increment a dictionary key
by an increment, and add a value of 0 if it doesn't exist
:param input_dict: the dict to update
:param increment: the value to increment by. Default is 1
:param initial_value: value to start with. Default is 0
'''
if increment == None:
increment = 1
if initial_value == None:
initial_value = 0
if key in input_dict:
input_dict[key] += increment
else:
input_dict[key] = initial_value + increment
return input_dict | update_dict sum will increment a dictionary key
by an increment, and add a value of 0 if it doesn't exist
:param input_dict: the dict to update
:param increment: the value to increment by. Default is 1
:param initial_value: value to start with. Default is 0 | entailment |
def information_coefficient(total1,total2,intersect):
'''a simple jacaard (information coefficient) to compare two lists of overlaps/diffs
'''
total = total1 + total2
return 2.0*len(intersect) / total | a simple jacaard (information coefficient) to compare two lists of overlaps/diffs | entailment |
def RSA(m1,m2):
'''RSA analysis will compare the similarity of two matrices
'''
from scipy.stats import pearsonr
import scipy.linalg
import numpy
# This will take the diagonal of each matrix (and the other half is changed to nan) and flatten to vector
vectorm1 = m1.mask(numpy.triu(numpy.ones(m1.shape)).astype(numpy.bool)).values.flatten()
vectorm2 = m2.mask(numpy.triu(numpy.ones(m2.shape)).astype(numpy.bool)).values.flatten()
# Now remove the nans
m1defined = numpy.argwhere(~numpy.isnan(numpy.array(vectorm1,dtype=float)))
m2defined = numpy.argwhere(~numpy.isnan(numpy.array(vectorm2,dtype=float)))
idx = numpy.intersect1d(m1defined,m2defined)
return pearsonr(vectorm1[idx],vectorm2[idx])[0] | RSA analysis will compare the similarity of two matrices | entailment |
def get_google_service(service_type=None,version=None):
'''
get_url will use the requests library to get a url
:param service_type: the service to get (default is storage)
:param version: version to use (default is v1)
'''
if service_type == None:
service_type = "storage"
if version == None:
version = "v1"
credentials = GoogleCredentials.get_application_default()
return build(service_type, version, credentials=credentials) | get_url will use the requests library to get a url
:param service_type: the service to get (default is storage)
:param version: version to use (default is v1) | entailment |
def upload_file(storage_service,bucket,bucket_path,file_name,verbose=True):
'''get_folder will return the folder with folder_name, and if create=True,
will create it if not found. If folder is found or created, the metadata is
returned, otherwise None is returned
:param storage_service: the drive_service created from get_storage_service
:param bucket: the bucket object from get_bucket
:param file_name: the name of the file to upload
:param bucket_path: the path to upload to
'''
# Set up path on bucket
upload_path = "%s/%s" %(bucket['id'],bucket_path)
if upload_path[-1] != '/':
upload_path = "%s/" %(upload_path)
upload_path = "%s%s" %(upload_path,os.path.basename(file_name))
body = {'name': upload_path }
# Create media object with correct mimetype
if os.path.exists(file_name):
mimetype = sniff_extension(file_name,verbose=verbose)
media = http.MediaFileUpload(file_name,
mimetype=mimetype,
resumable=True)
request = storage_service.objects().insert(bucket=bucket['id'],
body=body,
predefinedAcl="publicRead",
media_body=media)
result = request.execute()
return result
bot.warning('%s requested for upload does not exist, skipping' %file_name) | get_folder will return the folder with folder_name, and if create=True,
will create it if not found. If folder is found or created, the metadata is
returned, otherwise None is returned
:param storage_service: the drive_service created from get_storage_service
:param bucket: the bucket object from get_bucket
:param file_name: the name of the file to upload
:param bucket_path: the path to upload to | entailment |
def get_image_path(repo_url, trailing_path):
'''get_image_path will determine an image path based on a repo url, removing
any token, and taking into account urls that end with .git.
:param repo_url: the repo url to parse:
:param trailing_path: the trailing path (commit then hash is common)
'''
repo_url = repo_url.split('@')[-1].strip()
if repo_url.endswith('.git'):
repo_url = repo_url[:-4]
return "%s/%s" %(re.sub('^http.+//www[.]','',repo_url), trailing_path) | get_image_path will determine an image path based on a repo url, removing
any token, and taking into account urls that end with .git.
:param repo_url: the repo url to parse:
:param trailing_path: the trailing path (commit then hash is common) | entailment |
def run_build(logfile='/tmp/.shub-log'):
'''run_build will generate the Singularity build from a spec_file from a repo_url.
If no arguments are required, the metadata api is queried for the values.
:param build_dir: directory to do the build in. If not specified, will use temporary.
:param spec_file: the spec_file name to use, assumed to be in git repo
:param repo_url: the url to download the repo from
:param repo_id: the repo_id to uniquely identify the repo (in case name changes)
:param commit: the commit to checkout. If none provided, will use most recent.
:param bucket_name: the name of the bucket to send files to
:param verbose: print out extra details as we go (default True)
:param token: a token to send back to the server to authenticate the collection
:param secret: a secret to match to the correct container
:param response_url: the build url to send the response back to. Should also come
from metadata. If not specified, no response is sent
:param branch: the branch to checkout for the build.
:: note: this function is currently configured to work with Google Compute
Engine metadata api, and should (will) be customized if needed to work elsewhere
'''
# If we are building the image, this will not be set
go = get_build_metadata(key='dobuild')
if go == None:
sys.exit(0)
# If the user wants debug, this will be set
debug = True
enable_debug = get_build_metadata(key='debug')
if enable_debug == None:
debug = False
bot.info('DEBUG %s' %debug)
# Uaw /tmp for build directory
build_dir = tempfile.mkdtemp()
# Get variables from the instance metadata API
metadata = [{'key': 'repo_url', 'value': None },
{'key': 'repo_id', 'value': None },
{'key': 'response_url', 'value': None },
{'key': 'bucket_name', 'value': None },
{'key': 'tag', 'value': None },
{'key': 'container_id', 'value': None },
{'key': 'commit', 'value': None },
{'key': 'token', 'value': None},
{'key': 'branch', 'value': None },
{'key': 'spec_file', 'value': None},
{'key': 'logging_url', 'value': None },
{'key': 'logfile', 'value': logfile }]
# Obtain values from build
bot.log("BUILD PARAMETERS:")
params = get_build_params(metadata)
params['debug'] = debug
# Default spec file is Singularity
if params['spec_file'] == None:
params['spec_file'] = "Singularity"
if params['bucket_name'] == None:
params['bucket_name'] = "singularityhub"
if params['tag'] == None:
params['tag'] = "latest"
output = run_build_main(build_dir=build_dir,
params=params)
# Output includes:
finished_image = output['image']
metadata = output['metadata']
params = output['params']
# Upload image package files to Google Storage
if os.path.exists(finished_image):
bot.info("%s successfully built" %finished_image)
dest_dir = tempfile.mkdtemp(prefix='build')
# The path to the images on google drive will be the github url/commit folder
trailing_path = "%s/%s" %(params['commit'], params['version'])
image_path = get_image_path(params['repo_url'], trailing_path)
# commits are no longer unique
# storage is by commit
build_files = [finished_image]
bot.info("Sending image to storage:")
bot.info('\n'.join(build_files))
# Start the storage service, retrieve the bucket
storage_service = get_google_service() # default is "storage" "v1"
bucket = get_bucket(storage_service,params["bucket_name"])
# For each file, upload to storage
files = []
for build_file in build_files:
bot.info("Uploading %s to storage..." %build_file)
storage_file = upload_file(storage_service,
bucket=bucket,
bucket_path=image_path,
file_name=build_file)
files.append(storage_file)
# Finally, package everything to send back to shub
response = {"files": json.dumps(files),
"repo_url": params['repo_url'],
"commit": params['commit'],
"repo_id": params['repo_id'],
"branch": params['branch'],
"tag": params['tag'],
"container_id": params['container_id'],
"spec_file":params['spec_file'],
"token": params['token'],
"metadata": json.dumps(metadata)}
# Did the user specify a specific log file?
custom_logfile = get_build_metadata('logfile')
if custom_logfile is not None:
logfile = custom_logfile
response['logfile'] = logfile
# Send final build data to instance
send_build_data(build_dir=build_dir,
response_url=params['response_url'],
secret=params['token'],
data=response)
# Dump final params, for logger to retrieve
passing_params = "/tmp/params.pkl"
pickle.dump(params,open(passing_params,'wb')) | run_build will generate the Singularity build from a spec_file from a repo_url.
If no arguments are required, the metadata api is queried for the values.
:param build_dir: directory to do the build in. If not specified, will use temporary.
:param spec_file: the spec_file name to use, assumed to be in git repo
:param repo_url: the url to download the repo from
:param repo_id: the repo_id to uniquely identify the repo (in case name changes)
:param commit: the commit to checkout. If none provided, will use most recent.
:param bucket_name: the name of the bucket to send files to
:param verbose: print out extra details as we go (default True)
:param token: a token to send back to the server to authenticate the collection
:param secret: a secret to match to the correct container
:param response_url: the build url to send the response back to. Should also come
from metadata. If not specified, no response is sent
:param branch: the branch to checkout for the build.
:: note: this function is currently configured to work with Google Compute
Engine metadata api, and should (will) be customized if needed to work elsewhere | entailment |
def finish_build(verbose=True):
'''finish_build will finish the build by way of sending the log to the same bucket.
the params are loaded from the previous function that built the image, expected in
$HOME/params.pkl
:: note: this function is currently configured to work with Google Compute
Engine metadata api, and should (will) be customized if needed to work elsewhere
'''
# If we are building the image, this will not be set
go = get_build_metadata(key='dobuild')
if go == None:
sys.exit(0)
# Load metadata
passing_params = "/tmp/params.pkl"
params = pickle.load(open(passing_params,'rb'))
# Start the storage service, retrieve the bucket
storage_service = get_google_service()
bucket = get_bucket(storage_service,params['bucket_name'])
# If version isn't in params, build failed
version = 'error-%s' % str(uuid.uuid4())
if 'version' in params:
version = params['version']
trailing_path = "%s/%s" %(params['commit'], version)
image_path = get_image_path(params['repo_url'], trailing_path)
# Upload the log file
params['log_file'] = upload_file(storage_service,
bucket=bucket,
bucket_path=image_path,
file_name=params['logfile'])
# Close up shop
send_build_close(params=params,
response_url=params['logging_url']) | finish_build will finish the build by way of sending the log to the same bucket.
the params are loaded from the previous function that built the image, expected in
$HOME/params.pkl
:: note: this function is currently configured to work with Google Compute
Engine metadata api, and should (will) be customized if needed to work elsewhere | entailment |
def get_build_metadata(key):
'''get_build_metadata will return metadata about an instance from within it.
:param key: the key to look up
'''
headers = {"Metadata-Flavor":"Google"}
url = "http://metadata.google.internal/computeMetadata/v1/instance/attributes/%s" %(key)
response = requests.get(url=url,headers=headers)
if response.status_code == 200:
return response.text
return None | get_build_metadata will return metadata about an instance from within it.
:param key: the key to look up | entailment |
def get_build_params(metadata):
'''get_build_params uses get_build_metadata to retrieve corresponding meta data values for a build
:param metadata: a list, each item a dictionary of metadata, in format:
metadata = [{'key': 'repo_url', 'value': repo_url },
{'key': 'repo_id', 'value': repo_id },
{'key': 'credential', 'value': credential },
{'key': 'response_url', 'value': response_url },
{'key': 'token', 'value': token},
{'key': 'commit', 'value': commit }]
'''
params = dict()
for item in metadata:
if item['value'] == None:
response = get_build_metadata(key=item['key'])
item['value'] = response
params[item['key']] = item['value']
if item['key'] not in ['token', 'secret', 'credential']:
bot.info('%s is set to %s' %(item['key'],item['value']))
return params | get_build_params uses get_build_metadata to retrieve corresponding meta data values for a build
:param metadata: a list, each item a dictionary of metadata, in format:
metadata = [{'key': 'repo_url', 'value': repo_url },
{'key': 'repo_id', 'value': repo_id },
{'key': 'credential', 'value': credential },
{'key': 'response_url', 'value': response_url },
{'key': 'token', 'value': token},
{'key': 'commit', 'value': commit }] | entailment |
def rsync(*args, **kwargs):
""" wrapper around the rsync command.
the ssh connection arguments are set automatically.
any args are just passed directly to rsync.
you can use {host_string} in place of the server.
the kwargs are passed on the 'local' fabric command.
if not set, 'capture' is set to False.
example usage:
rsync('-pthrvz', "{host_string}:/some/src/directory", "some/destination/")
"""
kwargs.setdefault('capture', False)
replacements = dict(
host_string="{user}@{host}".format(
user=env.instance.config.get('user', 'root'),
host=env.instance.config.get(
'host', env.instance.config.get(
'ip', env.instance.uid))))
args = [x.format(**replacements) for x in args]
ssh_info = env.instance.init_ssh_key()
ssh_info.pop('host')
ssh_info.pop('user')
ssh_args = env.instance.ssh_args_from_info(ssh_info)
cmd_parts = ['rsync']
cmd_parts.extend(['-e', "ssh %s" % shjoin(ssh_args)])
cmd_parts.extend(args)
cmd = shjoin(cmd_parts)
return local(cmd, **kwargs) | wrapper around the rsync command.
the ssh connection arguments are set automatically.
any args are just passed directly to rsync.
you can use {host_string} in place of the server.
the kwargs are passed on the 'local' fabric command.
if not set, 'capture' is set to False.
example usage:
rsync('-pthrvz', "{host_string}:/some/src/directory", "some/destination/") | entailment |
def bootstrap(**kwargs):
""" Bootstrap an EC2 instance that has been booted into an AMI from http://www.daemonology.net/freebsd-on-ec2/
Note: deprecated, current AMI images are basically pre-bootstrapped, they just need to be configured.
"""
# the user for the image is `ec2-user`, there is no sudo, but we can su to root w/o password
original_host = env.host_string
env.host_string = 'ec2-user@%s' % env.instance.uid
bootstrap_files = env.instance.config.get('bootstrap-files', 'bootstrap-files')
put('%s/authorized_keys' % bootstrap_files, '/tmp/authorized_keys')
put(join(bsdploy_path, 'enable_root_login_on_daemonology.sh'), '/tmp/', mode='0775')
run("""su root -c '/tmp/enable_root_login_on_daemonology.sh'""")
# revert back to root
env.host_string = original_host
# give sshd a chance to restart
sleep(2)
run('rm /tmp/enable_root_login_on_daemonology.sh')
# allow overwrites from the commandline
env.instance.config.update(kwargs)
bu = BootstrapUtils()
bu.ssh_keys = None
bu.upload_authorized_keys = False
bu.bootstrap_files_yaml = 'daemonology-files.yml'
bu.print_bootstrap_files()
bu.create_bootstrap_directories()
bu.upload_bootstrap_files({})
# we need to install python here, because there is no way to install it via
# ansible playbooks
bu.install_pkg('/', chroot=False, packages=['python27']) | Bootstrap an EC2 instance that has been booted into an AMI from http://www.daemonology.net/freebsd-on-ec2/
Note: deprecated, current AMI images are basically pre-bootstrapped, they just need to be configured. | entailment |
def bootstrap(**kwargs):
"""Digital Oceans FreeBSD droplets are pretty much already pre-bootstrapped,
including having python2.7 and sudo etc. pre-installed.
the only thing we need to change is to allow root to login (without a password)
enable pf and ensure it is running
"""
bu = BootstrapUtils()
# (temporarily) set the user to `freebsd`
original_host = env.host_string
env.host_string = 'freebsd@%s' % env.instance.uid
# copy DO bsdclout-init results:
if bu.os_release.startswith('10'):
sudo("""cat /etc/rc.digitalocean.d/droplet.conf > /etc/rc.conf""")
sudo("""sysrc zfs_enable=YES""")
sudo("""sysrc sshd_enable=YES""")
# enable and start pf
sudo("""sysrc pf_enable=YES""")
sudo("""sysrc -f /boot/loader.conf pfload=YES""")
sudo('kldload pf', warn_only=True)
sudo('''echo 'pass in all' > /etc/pf.conf''')
sudo('''echo 'pass out all' >> /etc/pf.conf''')
sudo('''chmod 644 /etc/pf.conf''')
sudo('service pf start')
# overwrite sshd_config, because the DO version only contains defaults
# and a line explicitly forbidding root to log in
sudo("""echo 'PermitRootLogin without-password' > /etc/ssh/sshd_config""")
# additionally, make sure the root user is unlocked!
sudo('pw unlock root')
# overwrite the authorized keys for root, because DO creates its entries to explicitly
# disallow root login
bootstrap_files = env.instance.config.get('bootstrap-files', 'bootstrap-files')
put(path.abspath(path.join(env['config_base'], bootstrap_files, 'authorized_keys')), '/tmp/authorized_keys', use_sudo=True)
sudo('''mv /tmp/authorized_keys /root/.ssh/''')
sudo('''chown root:wheel /root/.ssh/authorized_keys''')
sudo("""service sshd fastreload""")
# revert back to root
env.host_string = original_host
# give sshd a chance to restart
sleep(2)
# clean up DO cloudinit leftovers
run("rm -f /etc/rc.d/digitalocean")
run("rm -rf /etc/rc.digitalocean.d")
run("rm -rf /usr/local/bsd-cloudinit/")
run("pkg remove -y avahi-autoipd || true")
# allow overwrites from the commandline
env.instance.config.update(kwargs)
bu.ssh_keys = None
bu.upload_authorized_keys = False | Digital Oceans FreeBSD droplets are pretty much already pre-bootstrapped,
including having python2.7 and sudo etc. pre-installed.
the only thing we need to change is to allow root to login (without a password)
enable pf and ensure it is running | entailment |
def bootstrap_files(self):
""" we need some files to bootstrap the FreeBSD installation.
Some...
- need to be provided by the user (i.e. authorized_keys)
- others have some (sensible) defaults (i.e. rc.conf)
- some can be downloaded via URL (i.e.) http://pkg.freebsd.org/freebsd:10:x86:64/latest/Latest/pkg.txz
For those which can be downloaded we check the downloads directory. if the file exists there
(and if the checksum matches TODO!) we will upload it to the host. If not, we will fetch the file
from the given URL from the host.
For files that cannot be downloaded (authorized_keys, rc.conf etc.) we allow the user to provide their
own version in a ``bootstrap-files`` folder. The location of this folder can either be explicitly provided
via the ``bootstrap-files`` key in the host definition of the config file or it defaults to ``deployment/bootstrap-files``.
User provided files can be rendered as Jinja2 templates, by providing ``use_jinja: True`` in the YAML file.
They will be rendered with the instance configuration dictionary as context.
If the file is not found there, we revert to the default
files that are part of bsdploy. If the file cannot be found there either we either error out or for authorized_keys
we look in ``~/.ssh/identity.pub``.
"""
bootstrap_file_yamls = [
abspath(join(self.default_template_path, self.bootstrap_files_yaml)),
abspath(join(self.custom_template_path, self.bootstrap_files_yaml))]
bootstrap_files = dict()
if self.upload_authorized_keys:
bootstrap_files['authorized_keys'] = BootstrapFile(self, 'authorized_keys', **{
'directory': '/mnt/root/.ssh',
'directory_mode': '0600',
'remote': '/mnt/root/.ssh/authorized_keys',
'fallback': [
'~/.ssh/identity.pub',
'~/.ssh/id_rsa.pub',
'~/.ssh/id_dsa.pub',
'~/.ssh/id_ecdsa.pub']})
for bootstrap_file_yaml in bootstrap_file_yamls:
if not exists(bootstrap_file_yaml):
continue
with open(bootstrap_file_yaml) as f:
info = yaml.load(f, Loader=SafeLoader)
if info is None:
continue
for k, v in info.items():
bootstrap_files[k] = BootstrapFile(self, k, **v)
for bf in bootstrap_files.values():
if not exists(bf.local) and bf.raw_fallback:
if not bf.existing_fallback:
print("Found no public key in %s, you have to create '%s' manually" % (expanduser('~/.ssh'), bf.local))
sys.exit(1)
print("The '%s' file is missing." % bf.local)
for path in bf.existing_fallback:
yes = env.instance.config.get('bootstrap-yes', False)
if yes or yesno("Should we generate it using the key in '%s'?" % path):
if not exists(bf.expected_path):
os.mkdir(bf.expected_path)
with open(bf.local, 'wb') as out:
with open(path, 'rb') as f:
out.write(f.read())
break
else:
# answered no to all options
sys.exit(1)
if not bf.check():
print('Cannot find %s' % bf.local)
sys.exit(1)
packages_path = join(self.download_path, 'packages')
if exists(packages_path):
for dirpath, dirnames, filenames in os.walk(packages_path):
path = dirpath.split(packages_path)[1][1:]
for filename in filenames:
if not filename.endswith('.txz'):
continue
bootstrap_files[join(path, filename)] = BootstrapFile(
self, join(path, filename), **dict(
local=join(packages_path, join(path, filename)),
remote=join('/mnt/var/cache/pkg/All', filename),
encrypted=False))
if self.ssh_keys is not None:
for ssh_key_name, ssh_key_options in list(self.ssh_keys):
ssh_key = join(self.custom_template_path, ssh_key_name)
if exists(ssh_key):
pub_key_name = '%s.pub' % ssh_key_name
pub_key = '%s.pub' % ssh_key
if not exists(pub_key):
print("Public key '%s' for '%s' missing." % (pub_key, ssh_key))
sys.exit(1)
bootstrap_files[ssh_key_name] = BootstrapFile(
self, ssh_key_name, **dict(
local=ssh_key,
remote='/mnt/etc/ssh/%s' % ssh_key_name,
mode=0600))
bootstrap_files[pub_key_name] = BootstrapFile(
self, pub_key_name, **dict(
local=pub_key,
remote='/mnt/etc/ssh/%s' % pub_key_name,
mode=0644))
if hasattr(env.instance, 'get_vault_lib'):
vaultlib = env.instance.get_vault_lib()
for bf in bootstrap_files.values():
if bf.encrypted is None and exists(bf.local):
with open(bf.local) as f:
data = f.read()
bf.info['encrypted'] = vaultlib.is_encrypted(data)
return bootstrap_files | we need some files to bootstrap the FreeBSD installation.
Some...
- need to be provided by the user (i.e. authorized_keys)
- others have some (sensible) defaults (i.e. rc.conf)
- some can be downloaded via URL (i.e.) http://pkg.freebsd.org/freebsd:10:x86:64/latest/Latest/pkg.txz
For those which can be downloaded we check the downloads directory. if the file exists there
(and if the checksum matches TODO!) we will upload it to the host. If not, we will fetch the file
from the given URL from the host.
For files that cannot be downloaded (authorized_keys, rc.conf etc.) we allow the user to provide their
own version in a ``bootstrap-files`` folder. The location of this folder can either be explicitly provided
via the ``bootstrap-files`` key in the host definition of the config file or it defaults to ``deployment/bootstrap-files``.
User provided files can be rendered as Jinja2 templates, by providing ``use_jinja: True`` in the YAML file.
They will be rendered with the instance configuration dictionary as context.
If the file is not found there, we revert to the default
files that are part of bsdploy. If the file cannot be found there either we either error out or for authorized_keys
we look in ``~/.ssh/identity.pub``. | entailment |
def devices(self):
""" computes the name of the disk devices that are suitable
installation targets by subtracting CDROM- and USB devices
from the list of total mounts.
"""
install_devices = self.install_devices
if 'bootstrap-system-devices' in env.instance.config:
devices = set(env.instance.config['bootstrap-system-devices'].split())
else:
devices = set(self.sysctl_devices)
for sysctl_device in self.sysctl_devices:
for install_device in install_devices:
if install_device.startswith(sysctl_device):
devices.remove(sysctl_device)
return devices | computes the name of the disk devices that are suitable
installation targets by subtracting CDROM- and USB devices
from the list of total mounts. | entailment |
def fetch_assets(self):
""" download bootstrap assets to control host.
If present on the control host they will be uploaded to the target host during bootstrapping.
"""
# allow overwrites from the commandline
packages = set(
env.instance.config.get('bootstrap-packages', '').split())
packages.update(['python27'])
cmd = env.instance.config.get('bootstrap-local-download-cmd', 'wget -c -O "{0.local}" "{0.url}"')
items = sorted(self.bootstrap_files.items())
for filename, asset in items:
if asset.url:
if not exists(dirname(asset.local)):
os.makedirs(dirname(asset.local))
local(cmd.format(asset))
if filename == 'packagesite.txz':
# add packages to download
items.extend(self._fetch_packages(asset.local, packages)) | download bootstrap assets to control host.
If present on the control host they will be uploaded to the target host during bootstrapping. | entailment |
def res_to_str(res):
"""
:param res: :class:`requests.Response` object
Parse the given request and generate an informative string from it
"""
if 'Authorization' in res.request.headers:
res.request.headers['Authorization'] = "*****"
return """
####################################
url = %s
headers = %s
-------- data sent -----------------
%s
------------------------------------
@@@@@ response @@@@@@@@@@@@@@@@
headers = %s
code = %d
reason = %s
--------- data received ------------
%s
------------------------------------
####################################
""" % (res.url,
str(res.request.headers),
OLD_REQ and res.request.data or res.request.body,
res.headers,
res.status_code,
res.reason,
res.text) | :param res: :class:`requests.Response` object
Parse the given request and generate an informative string from it | entailment |
def parse_resource_definition(resource_name, resource_dct):
"""
Returns all the info extracted from a resource section of the apipie json
:param resource_name: Name of the resource that is defined by the section
:param resrouce_dict: Dictionary as generated by apipie of the resource
definition
"""
new_dict = {
'__module__': resource_dct.get('__module__', __name__),
'__doc__': resource_dct['full_description'],
'_resource_name': resource_name,
'_own_methods': set(),
'_conflicting_methods': [],
}
# methods in foreign_methods are meant for other resources,
# that is, the url and the resource field do not match /api/{resource}
foreign_methods = {}
# as defined per apipie gem, each method can have more than one api,
# for example, /api/hosts can have the GET /api/hosts api and the GET
# /api/hosts/:id api or DELETE /api/hosts
for method in resource_dct['methods']:
# set the docstring if it only has one api
if not new_dict['__doc__'] and len(method['apis']) == 1:
new_dict['__doc__'] = \
method['apis'][0]['short_description']
for api in method['apis']:
api = MethodAPIDescription(resource_name, method, api)
if api.resource != resource_name:
# this means that the json apipie passed says that an
# endpoint in the form: /api/{resource}/* belongs to
# {different_resource}, we just put it under {resource}
# later, storing it under _foreign_methods for now as we
# might not have parsed {resource} yet
functions = foreign_methods.setdefault(api.resource, {})
if api.name in functions:
old_api = functions.get(api.name).defs
# show only in debug the repeated but identical definitions
log_method = logger.warning
if api.url == old_api.url:
log_method = logger.debug
log_method(
"There is a conflict trying to redefine a method "
"for a foreign resource (%s): \n"
"\tresource:\n"
"\tapipie_resource: %s\n"
"\tnew_api: %s\n"
"\tnew_url: %s\n"
"\told_api: %s\n"
"\told_url: %s",
api.name,
resource_name,
pprint.pformat(api),
api.url,
pprint.pformat(old_api),
old_api.url,
)
new_dict['_conflicting_methods'].append(api)
continue
functions[api.name] = api.generate_func()
else:
# it's an own method, resource and url match
if api.name in new_dict['_own_methods']:
old_api = new_dict.get(api.name).defs
log_method = logger.warning
# show only in debug the repeated but identical definitions
if api.url == old_api.url:
log_method = logger.debug
log_method(
"There is a conflict trying to redefine method "
"(%s): \n"
"\tapipie_resource: %s\n"
"\tnew_api: %s\n"
"\tnew_url: %s\n"
"\told_api: %s\n"
"\told_url: %s",
api.name,
resource_name,
pprint.pformat(api),
api.url,
pprint.pformat(old_api),
old_api.url,
)
new_dict['_conflicting_methods'].append(api)
continue
new_dict['_own_methods'].add(api.name)
new_dict[api.name] = api.generate_func()
return new_dict, foreign_methods | Returns all the info extracted from a resource section of the apipie json
:param resource_name: Name of the resource that is defined by the section
:param resrouce_dict: Dictionary as generated by apipie of the resource
definition | entailment |
def parse_resource_from_url(self, url):
"""
Returns the appropriate resource name for the given URL.
:param url: API URL stub, like: '/api/hosts'
:return: Resource name, like 'hosts', or None if not found
"""
# special case for the api root
if url == '/api':
return 'api'
elif url == '/katello':
return 'katello'
match = self.resource_pattern.match(url)
if match:
return match.groupdict().get('resource', None) | Returns the appropriate resource name for the given URL.
:param url: API URL stub, like: '/api/hosts'
:return: Resource name, like 'hosts', or None if not found | entailment |
def _get_name(self):
"""
There are three cases, because apipie definitions can have multiple
signatures but python does not
For example, the api endpoint:
/api/myres/:myres_id/subres/:subres_id/subres2
for method *index* will be translated to the api method name:
subres_index_subres2
So when you want to call it from v2 object, you'll have:
myres.subres_index_subres2
"""
if self.url.count(':') > 1:
# /api/one/two/:three/four -> two_:three_four
base_name = self.url.split('/', 3)[-1].replace('/', '_')[1:]
# :one_two_three -> two_three
if base_name.startswith(':'):
base_name = base_name.split('_')[-1]
# one_:two_three_:four_five -> one_three_five
base_name = re.sub('_:[^/]+', '', base_name)
# in case that the last term was a parameter
if base_name.endswith('_'):
base_name = base_name[:-1]
# one_two_three -> one_two_method_three
base_name = (
'_' + self._method['name']
).join(base_name.rsplit('_', 1))
else:
base_name = self._method['name']
if base_name == 'import':
base_name = 'import_'
if self._apipie_resource != self.resource:
return '%s_%s' % (self._apipie_resource, base_name)
else:
return base_name | There are three cases, because apipie definitions can have multiple
signatures but python does not
For example, the api endpoint:
/api/myres/:myres_id/subres/:subres_id/subres2
for method *index* will be translated to the api method name:
subres_index_subres2
So when you want to call it from v2 object, you'll have:
myres.subres_index_subres2 | entailment |
def generate_func(self, as_global=False):
"""
Generate function for specific method and using specific api
:param as_global: if set, will use the global function name, instead of
the class method (usually {resource}_{class_method}) when defining
the function
"""
keywords = []
params_def = []
params_doc = ""
original_names = {}
params = dict(
(param['name'], param)
for param in self.params
)
# parse the url required params, as sometimes they are skipped in the
# parameters list of the definition
for param in self.url_params:
if param not in params:
param = {
'name': param,
'required': True,
'description': '',
'validator': '',
}
params[param['name']] = param
else:
params[param]['required'] = True
# split required and non-required params for the definition
req_params = []
nonreq_params = []
for param in six.itervalues(params):
if param['required']:
req_params.append(param)
else:
nonreq_params.append(param)
for param in req_params + nonreq_params:
params_doc += self.create_param_doc(param) + "\n"
local_name = param['name']
# some params collide with python keywords, that's why we do
# this switch (and undo it inside the function we generate)
if param['name'] == 'except':
local_name = 'except_'
original_names[local_name] = param['name']
keywords.append(local_name)
if param['required']:
params_def.append("%s" % local_name)
else:
params_def.append("%s=None" % local_name)
func_head = 'def {0}(self, {1}):'.format(
as_global and self.get_global_method_name() or self.name,
', '.join(params_def)
)
code_body = (
' _vars_ = locals()\n'
' _url = self._fill_url("{url}", _vars_, {url_params})\n'
' _original_names = {original_names}\n'
' _kwargs = dict((_original_names[k], _vars_[k])\n'
' for k in {keywords} if _vars_[k])\n'
' return self._foreman.do_{http_method}(_url, _kwargs)')
code_body = code_body.format(
http_method=self.http_method.lower(),
url=self.url,
url_params=self.url_params,
keywords=keywords,
original_names=original_names,
)
code = [
func_head,
' """',
self.short_desc,
'',
params_doc,
' """',
code_body,
]
code = '\n'.join(code)
six.exec_(code)
function = locals()[self.name]
# to ease debugging, all the funcs have the definitions attached
setattr(function, 'defs', self)
return function | Generate function for specific method and using specific api
:param as_global: if set, will use the global function name, instead of
the class method (usually {resource}_{class_method}) when defining
the function | entailment |
def create_param_doc(cls, param, prefix=None):
"""
Generate documentation for single parameter of function
:param param: dict contains info about parameter
:param sub: prefix string for recursive purposes
"""
desc = cls.exclude_html_reg.sub('', param['description']).strip()
if not desc:
desc = "<no description>"
name = param['name']
if prefix:
name = "%s[%s]" % (prefix, name)
doc_ = ":param %s: %s; %s" % (name, desc, param['validator'])
if param['required']:
doc_ += " (REQUIRED)"
else:
doc_ += " (OPTIONAL)"
for param in param.get('params', []):
doc_ += "\n" + cls.create_param_doc(param, name)
return doc_ | Generate documentation for single parameter of function
:param param: dict contains info about parameter
:param sub: prefix string for recursive purposes | entailment |
def convert_plugin_def(http_method, funcs):
"""
This function parses one of the elements of the definitions dict for a
plugin and extracts the relevant information
:param http_method: HTTP method that uses (GET, POST, DELETE, ...)
:param funcs: functions related to that HTTP method
"""
methods = []
if http_method not in ('GET', 'PUT', 'POST', 'DELETE'):
logger.error(
'Plugin load failure, HTTP method %s unsupported.',
http_method,
)
return methods
for fname, params in six.iteritems(funcs):
method = {
'apis': [{'short_description': 'no-doc'}],
'params': [],
}
method['apis'][0]['http_method'] = http_method
method['apis'][0]['api_url'] = '/api/' + fname
method['name'] = fname
for pname, pdef in six.iteritems(params):
param = {
'name': pname,
'validator': "Must be %s" % pdef['ptype'],
'description': '',
'required': pdef['required'],
}
method['params'].append(param)
methods.append(method)
return methods | This function parses one of the elements of the definitions dict for a
plugin and extracts the relevant information
:param http_method: HTTP method that uses (GET, POST, DELETE, ...)
:param funcs: functions related to that HTTP method | entailment |
def get_current_version(repo_path):
"""
Given a repo will return the version string, according to semantic
versioning, counting as non-backwards compatible commit any one with a
message header that matches (case insensitive)::
sem-ver: .*break.*
And as features any commit with a header matching::
sem-ver: feature
And counting any other as a bugfix
"""
repo = dulwich.repo.Repo(repo_path)
tags = get_tags(repo)
maj_version = 0
feat_version = 0
fix_version = 0
for commit_sha, children in reversed(
get_children_per_first_parent(repo_path).items()
):
commit = get_repo_object(repo, commit_sha)
maj_version, feat_version, fix_version = get_version(
commit=commit,
tags=tags,
maj_version=maj_version,
feat_version=feat_version,
fix_version=fix_version,
children=children,
)
return '%s.%s.%s' % (maj_version, feat_version, fix_version) | Given a repo will return the version string, according to semantic
versioning, counting as non-backwards compatible commit any one with a
message header that matches (case insensitive)::
sem-ver: .*break.*
And as features any commit with a header matching::
sem-ver: feature
And counting any other as a bugfix | entailment |
def get_authors(repo_path, from_commit):
"""
Given a repo and optionally a base revision to start from, will return
the list of authors.
"""
repo = dulwich.repo.Repo(repo_path)
refs = get_refs(repo)
start_including = False
authors = set()
if from_commit is None:
start_including = True
for commit_sha, children in reversed(
get_children_per_first_parent(repo_path).items()
):
commit = get_repo_object(repo, commit_sha)
if (
start_including or commit_sha.startswith(from_commit) or
fuzzy_matches_refs(from_commit, refs.get(commit_sha, []))
):
authors.add(commit.author.decode())
for child in children:
authors.add(child.author.decode())
start_including = True
return '\n'.join(sorted(authors)) | Given a repo and optionally a base revision to start from, will return
the list of authors. | entailment |
def emit(
self, tup, tup_id=None, stream=None, direct_task=None, need_task_ids=False
):
"""Emit a spout Tuple message.
:param tup: the Tuple to send to Storm, should contain only
JSON-serializable data.
:type tup: list or tuple
:param tup_id: the ID for the Tuple. Leave this blank for an
unreliable emit.
:type tup_id: str
:param stream: ID of the stream this Tuple should be emitted to.
Leave empty to emit to the default stream.
:type stream: str
:param direct_task: the task to send the Tuple to if performing a
direct emit.
:type direct_task: int
:param need_task_ids: indicate whether or not you'd like the task IDs
the Tuple was emitted (default: ``False``).
:type need_task_ids: bool
:returns: ``None``, unless ``need_task_ids=True``, in which case it will
be a ``list`` of task IDs that the Tuple was sent to if. Note
that when specifying direct_task, this will be equal to
``[direct_task]``.
"""
return super(Spout, self).emit(
tup,
tup_id=tup_id,
stream=stream,
direct_task=direct_task,
need_task_ids=need_task_ids,
) | Emit a spout Tuple message.
:param tup: the Tuple to send to Storm, should contain only
JSON-serializable data.
:type tup: list or tuple
:param tup_id: the ID for the Tuple. Leave this blank for an
unreliable emit.
:type tup_id: str
:param stream: ID of the stream this Tuple should be emitted to.
Leave empty to emit to the default stream.
:type stream: str
:param direct_task: the task to send the Tuple to if performing a
direct emit.
:type direct_task: int
:param need_task_ids: indicate whether or not you'd like the task IDs
the Tuple was emitted (default: ``False``).
:type need_task_ids: bool
:returns: ``None``, unless ``need_task_ids=True``, in which case it will
be a ``list`` of task IDs that the Tuple was sent to if. Note
that when specifying direct_task, this will be equal to
``[direct_task]``. | entailment |
def _run(self):
"""The inside of ``run``'s infinite loop.
Separated out so it can be properly unit tested.
"""
cmd = self.read_command()
if cmd["command"] == "next":
self.next_tuple()
elif cmd["command"] == "ack":
self.ack(cmd["id"])
elif cmd["command"] == "fail":
self.fail(cmd["id"])
elif cmd["command"] == "activate":
self.activate()
elif cmd["command"] == "deactivate":
self.deactivate()
else:
self.logger.error("Received invalid command from Storm: %r", cmd)
self.send_message({"command": "sync"}) | The inside of ``run``'s infinite loop.
Separated out so it can be properly unit tested. | entailment |
def ack(self, tup_id):
"""Called when a bolt acknowledges a Tuple in the topology.
:param tup_id: the ID of the Tuple that has been fully acknowledged in
the topology.
:type tup_id: str
"""
self.failed_tuples.pop(tup_id, None)
try:
del self.unacked_tuples[tup_id]
except KeyError:
self.logger.error("Received ack for unknown tuple ID: %r", tup_id) | Called when a bolt acknowledges a Tuple in the topology.
:param tup_id: the ID of the Tuple that has been fully acknowledged in
the topology.
:type tup_id: str | entailment |
def fail(self, tup_id):
"""Called when a Tuple fails in the topology
A reliable spout will replay a failed tuple up to ``max_fails`` times.
:param tup_id: the ID of the Tuple that has failed in the topology
either due to a bolt calling ``fail()`` or a Tuple
timing out.
:type tup_id: str
"""
saved_args = self.unacked_tuples.get(tup_id)
if saved_args is None:
self.logger.error("Received fail for unknown tuple ID: %r", tup_id)
return
tup, stream, direct_task, need_task_ids = saved_args
if self.failed_tuples[tup_id] < self.max_fails:
self.emit(
tup,
tup_id=tup_id,
stream=stream,
direct_task=direct_task,
need_task_ids=need_task_ids,
)
self.failed_tuples[tup_id] += 1
else:
# Just pretend we got an ack when we exceed retry limit
self.logger.info(
"Acking tuple ID %r after it exceeded retry limit " "(%r)",
tup_id,
self.max_fails,
)
self.ack(tup_id) | Called when a Tuple fails in the topology
A reliable spout will replay a failed tuple up to ``max_fails`` times.
:param tup_id: the ID of the Tuple that has failed in the topology
either due to a bolt calling ``fail()`` or a Tuple
timing out.
:type tup_id: str | entailment |
def emit(
self, tup, tup_id=None, stream=None, direct_task=None, need_task_ids=False
):
"""Emit a spout Tuple & add metadata about it to `unacked_tuples`.
In order for this to work, `tup_id` is a required parameter.
See :meth:`Bolt.emit`.
"""
if tup_id is None:
raise ValueError(
"You must provide a tuple ID when emitting with a "
"ReliableSpout in order for the tuple to be "
"tracked."
)
args = (tup, stream, direct_task, need_task_ids)
self.unacked_tuples[tup_id] = args
return super(ReliableSpout, self).emit(
tup,
tup_id=tup_id,
stream=stream,
direct_task=direct_task,
need_task_ids=need_task_ids,
) | Emit a spout Tuple & add metadata about it to `unacked_tuples`.
In order for this to work, `tup_id` is a required parameter.
See :meth:`Bolt.emit`. | entailment |
def remote_pdb_handler(signum, frame):
""" Handler to drop us into a remote debugger upon receiving SIGUSR1 """
try:
from remote_pdb import RemotePdb
rdb = RemotePdb(host="127.0.0.1", port=0)
rdb.set_trace(frame=frame)
except ImportError:
log.warning(
"remote_pdb unavailable. Please install remote_pdb to "
"allow remote debugging."
)
# Restore signal handler for later
signal.signal(signum, remote_pdb_handler) | Handler to drop us into a remote debugger upon receiving SIGUSR1 | entailment |
def emit(self, record):
"""
Emit a record.
If a formatter is specified, it is used to format the record.
If exception information is present, it is formatted using
traceback.print_exception and sent to Storm.
"""
try:
msg = self.format(record)
level = _STORM_LOG_LEVELS.get(record.levelname.lower(), _STORM_LOG_INFO)
self.serializer.send_message(
{"command": "log", "msg": str(msg), "level": level}
)
except Exception:
self.handleError(record) | Emit a record.
If a formatter is specified, it is used to format the record.
If exception information is present, it is formatted using
traceback.print_exception and sent to Storm. | entailment |
def _setup_component(self, storm_conf, context):
"""Add helpful instance variables to component after initial handshake
with Storm. Also configure logging.
"""
self.topology_name = storm_conf.get("topology.name", "")
self.task_id = context.get("taskid", "")
self.component_name = context.get("componentid")
# If using Storm before 0.10.0 componentid is not available
if self.component_name is None:
self.component_name = context.get("task->component", {}).get(
str(self.task_id), ""
)
self.debug = storm_conf.get("topology.debug", False)
self.storm_conf = storm_conf
self.context = context
# Set up logging
self.logger = logging.getLogger(".".join((__name__, self.component_name)))
log_path = self.storm_conf.get("pystorm.log.path")
log_file_name = self.storm_conf.get(
"pystorm.log.file",
"pystorm_{topology_name}" "_{component_name}" "_{task_id}" "_{pid}.log",
)
root_log = logging.getLogger()
log_level = self.storm_conf.get("pystorm.log.level", "info")
if log_path:
max_bytes = self.storm_conf.get("pystorm.log.max_bytes", 1000000) # 1 MB
backup_count = self.storm_conf.get("pystorm.log.backup_count", 10)
log_file = join(
log_path,
(
log_file_name.format(
topology_name=self.topology_name,
component_name=self.component_name,
task_id=self.task_id,
pid=self.pid,
)
),
)
handler = RotatingFileHandler(
log_file, maxBytes=max_bytes, backupCount=backup_count
)
log_format = self.storm_conf.get(
"pystorm.log.format",
"%(asctime)s - %(name)s - " "%(levelname)s - %(message)s",
)
else:
self.log(
"pystorm StormHandler logging enabled, so all messages at "
'levels greater than "pystorm.log.level" ({}) will be sent'
" to Storm.".format(log_level)
)
handler = StormHandler(self.serializer)
log_format = self.storm_conf.get(
"pystorm.log.format", "%(asctime)s - %(name)s - " "%(message)s"
)
formatter = logging.Formatter(log_format)
log_level = _PYTHON_LOG_LEVELS.get(log_level, logging.INFO)
if self.debug:
# potentially override logging that was provided if
# topology.debug was set to true
log_level = logging.DEBUG
handler.setLevel(log_level)
handler.setFormatter(formatter)
root_log.addHandler(handler)
self.logger.setLevel(log_level)
logging.getLogger("pystorm").setLevel(log_level)
# Redirect stdout to ensure that print statements/functions
# won't disrupt the multilang protocol
if self.serializer.output_stream == sys.stdout:
sys.stdout = LogStream(logging.getLogger("pystorm.stdout")) | Add helpful instance variables to component after initial handshake
with Storm. Also configure logging. | entailment |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.