language
stringclasses 6
values | original_string
stringlengths 25
887k
| text
stringlengths 25
887k
|
---|---|---|
Python | def build_graph(edgelist,d):
"""
Constructs a directed graph from its edgelist.
Parameters:
edgelist (list): List of edges, formatted as tuples (source,destination)
d (float): Teleportation parameter
Returns:
Gccm (nx.DiGraph): Depending on d, directed graph described by edgelist.
- If d=0, returns the maximal (undirected) connected component of G,
with correct directions added on its edges.
- If d>0, returns the entire graph.
In both cases, Gccm will have a 'weight' attribute on edges, with
total sum 1.
"""
G, edges = getGraph(edgelist) # Time-aggregated graph
if d==0:
# Select the largest (undirected) connected component of G
Gcc = sorted(connected_component_subgraphs(G.to_undirected()),
key = len, reverse=True)
Gccmax = Gcc[0]
gccedges = Gccmax.to_directed().edges()
# Converts the connected component to a directed graph
Gccmaxdir, dicoedgeccm = undirtodirected(edges,gccedges)
Gccm = normweight(Gccmaxdir)
else:
Gccmaxdir, dicoedgeccm = undirtodirected(edges,edges)
Gccm = normweight(Gccmaxdir)
return Gccm, dicoedgeccm | def build_graph(edgelist,d):
"""
Constructs a directed graph from its edgelist.
Parameters:
edgelist (list): List of edges, formatted as tuples (source,destination)
d (float): Teleportation parameter
Returns:
Gccm (nx.DiGraph): Depending on d, directed graph described by edgelist.
- If d=0, returns the maximal (undirected) connected component of G,
with correct directions added on its edges.
- If d>0, returns the entire graph.
In both cases, Gccm will have a 'weight' attribute on edges, with
total sum 1.
"""
G, edges = getGraph(edgelist) # Time-aggregated graph
if d==0:
# Select the largest (undirected) connected component of G
Gcc = sorted(connected_component_subgraphs(G.to_undirected()),
key = len, reverse=True)
Gccmax = Gcc[0]
gccedges = Gccmax.to_directed().edges()
# Converts the connected component to a directed graph
Gccmaxdir, dicoedgeccm = undirtodirected(edges,gccedges)
Gccm = normweight(Gccmaxdir)
else:
Gccmaxdir, dicoedgeccm = undirtodirected(edges,edges)
Gccm = normweight(Gccmaxdir)
return Gccm, dicoedgeccm |
Python | def gccdicosnap(dicosnap,dicoedgeccm):
"""
Selects all time-stamped edges with nodes belonging to the maximal connected
component.
Parameters:
dicosnap (dict): Dictionary indexed by timestamps, whose keys are
edgelists for a given time-slice.
dicoedgeccm (dict): Dictionary indexed by all edges in the aggregated
network.
Returns:
dicosnapccm (dict): Dictionary indexed by timestamps, whose keys are
edgelists for a given time-slice, with only edges in the c.c.
"""
dicosnapccm = {}
for t in dicosnap.keys():
dicosnapccm[t] = {}
dicosnapccm[t]["edgelist"] = []
for edge in dicosnap[t]["edgelist"]:
if edge in dicoedgeccm.keys():
dicosnapccm[t]["edgelist"].append(edge)
return dicosnapccm | def gccdicosnap(dicosnap,dicoedgeccm):
"""
Selects all time-stamped edges with nodes belonging to the maximal connected
component.
Parameters:
dicosnap (dict): Dictionary indexed by timestamps, whose keys are
edgelists for a given time-slice.
dicoedgeccm (dict): Dictionary indexed by all edges in the aggregated
network.
Returns:
dicosnapccm (dict): Dictionary indexed by timestamps, whose keys are
edgelists for a given time-slice, with only edges in the c.c.
"""
dicosnapccm = {}
for t in dicosnap.keys():
dicosnapccm[t] = {}
dicosnapccm[t]["edgelist"] = []
for edge in dicosnap[t]["edgelist"]:
if edge in dicoedgeccm.keys():
dicosnapccm[t]["edgelist"].append(edge)
return dicosnapccm |
Python | def contact(snapt,correspnodes,version):
"""
Builds a dictionary to represent the weighted adjacency matrix at a given
time.
Parameters:
snapt (list): List of contacts formatted as (date,(source,destination))
correspnodes (dict): Dictionary of correspondences between node labels
and their index in [0,n-1]
version (int): Indicates whether to compute TempoRank (version 1) or
out-TempoRank (version 2)
Returns:
wt
"""
#snapt =
#Dico plutot que matrice
Print = PETSc.Sys.Print
ot = {}
wt = {}
dicowt = {}
Print("Debut Wt")
for i in snapt:
#graphe non orienté => symetrique
#dicowt[(i,j)] : Nb de contacts entre i et j
#wt[i] : out-strength de i
if tuple((correspnodes[i[0]],correspnodes[i[1]])) not in dicowt.keys():
dicowt[tuple((correspnodes[i[0]],correspnodes[i[1]]))]=1
else:
dicowt[tuple((correspnodes[i[0]],correspnodes[i[1]]))]+=1
if correspnodes[i[0]] not in wt.keys():
wt[correspnodes[i[0]]] = 1
if version==2:
ot[correspnodes[i[0]]] = 1
else:
wt[correspnodes[i[0]]] += 1
return wt,ot,dicowt | def contact(snapt,correspnodes,version):
"""
Builds a dictionary to represent the weighted adjacency matrix at a given
time.
Parameters:
snapt (list): List of contacts formatted as (date,(source,destination))
correspnodes (dict): Dictionary of correspondences between node labels
and their index in [0,n-1]
version (int): Indicates whether to compute TempoRank (version 1) or
out-TempoRank (version 2)
Returns:
wt
"""
#snapt =
#Dico plutot que matrice
Print = PETSc.Sys.Print
ot = {}
wt = {}
dicowt = {}
Print("Debut Wt")
for i in snapt:
#graphe non orienté => symetrique
#dicowt[(i,j)] : Nb de contacts entre i et j
#wt[i] : out-strength de i
if tuple((correspnodes[i[0]],correspnodes[i[1]])) not in dicowt.keys():
dicowt[tuple((correspnodes[i[0]],correspnodes[i[1]]))]=1
else:
dicowt[tuple((correspnodes[i[0]],correspnodes[i[1]]))]+=1
if correspnodes[i[0]] not in wt.keys():
wt[correspnodes[i[0]]] = 1
if version==2:
ot[correspnodes[i[0]]] = 1
else:
wt[correspnodes[i[0]]] += 1
return wt,ot,dicowt |
Python | def listetransition(dicosnap,correspnodes,n,q,d,version):
"""
Assembles a list of transition matrices from a dictionary of snapshots.
Parameters:
dicosnap (dict): Dictionary of snapshots
correspnodes (dict): Dictionary of correspondences between node labels
and their index in [0,n-1]
n (int): Number of nodes in the network
q (float): Laziness parameter
d (float): Teleportation parameter
version (int): Indicates whether to compute TempoRank (version 1) or
out-TempoRank (version 2)
Returns:
listtrans (list): A list of transition matrices (as PETSc.Mat objects)
listod (list): A list of adjacency matrices (as PETSC.Mat objects)
"""
listtrans = list()
listod = list()
for t in sorted(dicosnap.keys()):
wt,ot,dicowt = contact(dicosnap[t]["edgelist"],correspnodes,version)
Bt,Ot = transition(wt,dicowt,q,n,d,ot,version)
PETSc.COMM_WORLD.barrier()
listtrans.append(Bt.copy())
if version == 2:
listod.append(Ot.copy())
Bt.destroy()
Ot.destroy()
return listtrans,listod | def listetransition(dicosnap,correspnodes,n,q,d,version):
"""
Assembles a list of transition matrices from a dictionary of snapshots.
Parameters:
dicosnap (dict): Dictionary of snapshots
correspnodes (dict): Dictionary of correspondences between node labels
and their index in [0,n-1]
n (int): Number of nodes in the network
q (float): Laziness parameter
d (float): Teleportation parameter
version (int): Indicates whether to compute TempoRank (version 1) or
out-TempoRank (version 2)
Returns:
listtrans (list): A list of transition matrices (as PETSc.Mat objects)
listod (list): A list of adjacency matrices (as PETSC.Mat objects)
"""
listtrans = list()
listod = list()
for t in sorted(dicosnap.keys()):
wt,ot,dicowt = contact(dicosnap[t]["edgelist"],correspnodes,version)
Bt,Ot = transition(wt,dicowt,q,n,d,ot,version)
PETSc.COMM_WORLD.barrier()
listtrans.append(Bt.copy())
if version == 2:
listod.append(Ot.copy())
Bt.destroy()
Ot.destroy()
return listtrans,listod |
Python | def overlay(img_in, img_layer, opacity):
"""Apply overlay blending mode of a layer on an image.
Find more information on `Wikipedia <https://en.wikipedia.org/w/index.php?title=Blend_modes&oldid=868545948#Overlay>`__.
.. note:: The implementation of this method was changed in version 2.0.0. Previously, it would be identical to the
soft light blending mode. Now, it resembles the implementation on Wikipedia. You can still use the soft light
blending mode if you are looking for backwards compatibility.
Example::
import cv2, numpy
from blend_modes import overlay
img_in = cv2.imread('./orig.png', -1).astype(float)
img_layer = cv2.imread('./layer.png', -1).astype(float)
img_out = overlay(img_in,img_layer,0.5)
cv2.imshow('window', img_out.astype(numpy.uint8))
cv2.waitKey()
Args:
img_in(3-dimensional numpy array of floats (r/g/b/a) in range 0-255.0): Image to be blended upon
img_layer(3-dimensional numpy array of floats (r/g/b/a) in range 0.0-255.0): Layer to be blended with image
opacity(float): Desired opacity of layer for blending
Returns:
3-dimensional numpy array of floats (r/g/b/a) in range 0.0-255.0: Blended image
"""
# Sanity check of inputs
assert img_in.dtype.kind == 'f', 'Input variable img_in should be of numpy.float type.'
assert img_layer.dtype.kind == 'f', 'Input variable img_layer should be of numpy.float type.'
assert img_in.shape[2] == 4, 'Input variable img_in should be of shape [:, :,4].'
assert img_layer.shape[2] == 4, 'Input variable img_layer should be of shape [:, :,4].'
assert 0.0 <= opacity <= 1.0, 'Opacity needs to be between 0.0 and 1.0.'
img_in /= 255.0
img_layer /= 255.0
ratio = _compose_alpha(img_in, img_layer, opacity)
comp = np.less(img_in[:, :, :3], 0.5) * (2 * img_in[:, :, :3] * img_layer[:, :, :3]) \
+ np.greater_equal(img_in[:, :, :3], 0.5) \
* (1 - (2 * (1 - img_in[:, :, :3]) * (1 - img_layer[:, :, :3])))
ratio_rs = np.reshape(np.repeat(ratio, 3), [comp.shape[0], comp.shape[1], comp.shape[2]])
img_out = comp * ratio_rs + img_in[:, :, :3] * (1.0 - ratio_rs)
img_out = np.nan_to_num(np.dstack((img_out, img_in[:, :, 3]))) # add alpha channel and replace nans
return img_out * 255.0 | def overlay(img_in, img_layer, opacity):
"""Apply overlay blending mode of a layer on an image.
Find more information on `Wikipedia <https://en.wikipedia.org/w/index.php?title=Blend_modes&oldid=868545948#Overlay>`__.
.. note:: The implementation of this method was changed in version 2.0.0. Previously, it would be identical to the
soft light blending mode. Now, it resembles the implementation on Wikipedia. You can still use the soft light
blending mode if you are looking for backwards compatibility.
Example::
import cv2, numpy
from blend_modes import overlay
img_in = cv2.imread('./orig.png', -1).astype(float)
img_layer = cv2.imread('./layer.png', -1).astype(float)
img_out = overlay(img_in,img_layer,0.5)
cv2.imshow('window', img_out.astype(numpy.uint8))
cv2.waitKey()
Args:
img_in(3-dimensional numpy array of floats (r/g/b/a) in range 0-255.0): Image to be blended upon
img_layer(3-dimensional numpy array of floats (r/g/b/a) in range 0.0-255.0): Layer to be blended with image
opacity(float): Desired opacity of layer for blending
Returns:
3-dimensional numpy array of floats (r/g/b/a) in range 0.0-255.0: Blended image
"""
# Sanity check of inputs
assert img_in.dtype.kind == 'f', 'Input variable img_in should be of numpy.float type.'
assert img_layer.dtype.kind == 'f', 'Input variable img_layer should be of numpy.float type.'
assert img_in.shape[2] == 4, 'Input variable img_in should be of shape [:, :,4].'
assert img_layer.shape[2] == 4, 'Input variable img_layer should be of shape [:, :,4].'
assert 0.0 <= opacity <= 1.0, 'Opacity needs to be between 0.0 and 1.0.'
img_in /= 255.0
img_layer /= 255.0
ratio = _compose_alpha(img_in, img_layer, opacity)
comp = np.less(img_in[:, :, :3], 0.5) * (2 * img_in[:, :, :3] * img_layer[:, :, :3]) \
+ np.greater_equal(img_in[:, :, :3], 0.5) \
* (1 - (2 * (1 - img_in[:, :, :3]) * (1 - img_layer[:, :, :3])))
ratio_rs = np.reshape(np.repeat(ratio, 3), [comp.shape[0], comp.shape[1], comp.shape[2]])
img_out = comp * ratio_rs + img_in[:, :, :3] * (1.0 - ratio_rs)
img_out = np.nan_to_num(np.dstack((img_out, img_in[:, :, 3]))) # add alpha channel and replace nans
return img_out * 255.0 |
Python | def updateIncar(self, settings):
"""
This function gets a dict of settings for override the INCAR file which is
going to be generated.
:settings: Dict of settings
:returns: True if everything went well, False otherwise.
"""
self.vprint("Updating incar with settings %s"%settings)
for dep in self.dependencies:
if dep.type=="INCAR":
print("INCAR FOUND")
self.vprint("INCAR found in dependencies")
for setting in settings:
dep.setSetting(setting, settings[setting])
return True
return self.setIncar(settings) | def updateIncar(self, settings):
"""
This function gets a dict of settings for override the INCAR file which is
going to be generated.
:settings: Dict of settings
:returns: True if everything went well, False otherwise.
"""
self.vprint("Updating incar with settings %s"%settings)
for dep in self.dependencies:
if dep.type=="INCAR":
print("INCAR FOUND")
self.vprint("INCAR found in dependencies")
for setting in settings:
dep.setSetting(setting, settings[setting])
return True
return self.setIncar(settings) |
Python | def prepare(self):
"""
Prepare data for the running of the VASP program:
1. Look at the prevDependencies to copy the needed files from the
prev object into the folder
2. Look at the dependencies names to see if we have all
dependencies
"""
self.controlDirectory()
self.controlExtDependencies()
self.controlPrevDependencies()
self.controlDependencies() | def prepare(self):
"""
Prepare data for the running of the VASP program:
1. Look at the prevDependencies to copy the needed files from the
prev object into the folder
2. Look at the dependencies names to see if we have all
dependencies
"""
self.controlDirectory()
self.controlExtDependencies()
self.controlPrevDependencies()
self.controlDependencies() |
Python | def controlRun(self):
""" In the case that a run script was given, this function controls that the run script exists. """
if not self.script:
return True
runFailure=False
self.vprint("Preparing job for running...")
self.vprint("Is there the run script '%s' in %s"%(self.script, self.folder))
runPath = os.path.join(self.folder, self.script)
if not os.path.exists(runPath):
runFailure = True
if not self.prev:
# we are in the first job
runPath = os.path.join(os.curdir, self.script)
if os.path.exists(runPath):
self.vprint("Run script found at %s, moving the script to the %s folder"%(os.curdir, self.folder))
shutil.copy(runPath, self.folder)
runFailure = False
else:
scriptPrevPath = os.path.join(self.prev.folder, self.script)
if os.path.exists(scriptPrevPath):
runFailure=False
shutil.copy(scriptPrevPath, self.folder)
if runFailure:
raise Exception("No script '%s' to be run found in '%s' !"%(self.script, self.folder))
sys.exit(-1) | def controlRun(self):
""" In the case that a run script was given, this function controls that the run script exists. """
if not self.script:
return True
runFailure=False
self.vprint("Preparing job for running...")
self.vprint("Is there the run script '%s' in %s"%(self.script, self.folder))
runPath = os.path.join(self.folder, self.script)
if not os.path.exists(runPath):
runFailure = True
if not self.prev:
# we are in the first job
runPath = os.path.join(os.curdir, self.script)
if os.path.exists(runPath):
self.vprint("Run script found at %s, moving the script to the %s folder"%(os.curdir, self.folder))
shutil.copy(runPath, self.folder)
runFailure = False
else:
scriptPrevPath = os.path.join(self.prev.folder, self.script)
if os.path.exists(scriptPrevPath):
runFailure=False
shutil.copy(scriptPrevPath, self.folder)
if runFailure:
raise Exception("No script '%s' to be run found in '%s' !"%(self.script, self.folder))
sys.exit(-1) |
Python | def run(self):
"""
This function runs the job. If 'execute=False' then the job will not be
run, maybe because you have already run it and you just want to run the
next jobs
"""
self.setPidFile()
if not self.execute:
self.vprint("The job %s will not be executed"%self)
return 0
self.prepare()
self.controlRun()
self.cd(self.folder)
self.runScript()
self.cd() | def run(self):
"""
This function runs the job. If 'execute=False' then the job will not be
run, maybe because you have already run it and you just want to run the
next jobs
"""
self.setPidFile()
if not self.execute:
self.vprint("The job %s will not be executed"%self)
return 0
self.prepare()
self.controlRun()
self.cd(self.folder)
self.runScript()
self.cd() |
Python | def exists(self):
"""
Checks for the existence of the generated file, useful for scripting and debugging.
"""
if os.path.exists(os.path.join(self.path, self.fileName)):
return True
else:
return False | def exists(self):
"""
Checks for the existence of the generated file, useful for scripting and debugging.
"""
if os.path.exists(os.path.join(self.path, self.fileName)):
return True
else:
return False |
Python | def rm(self):
"""
Simply remove the generated file.
"""
self.vprint("Removing file")
if self.exists():
try:
os.remove(self.filePath)
except Exception, e:
raise Exception("A problem removing file %s occourred"%self.filePath)
return False
else:
self.vprint("File removed succesfully")
return True
else:
raise Exception("Trying to remove a non-existent file at %s"%self.filePath)
return False | def rm(self):
"""
Simply remove the generated file.
"""
self.vprint("Removing file")
if self.exists():
try:
os.remove(self.filePath)
except Exception, e:
raise Exception("A problem removing file %s occourred"%self.filePath)
return False
else:
self.vprint("File removed succesfully")
return True
else:
raise Exception("Trying to remove a non-existent file at %s"%self.filePath)
return False |
Python | def append_rect(self, rectangle):
"""Adding rect to the picture_info."""
rect= Rectangle_Multiclass()
rect.load_BBox( rectangle.x1, rectangle.x2, rectangle.y1, rectangle.y2, rectangle.label, rectangle.label_chall, rectangle.label_code)
index= len(self.rects)
self.rects.insert(index, rect) | def append_rect(self, rectangle):
"""Adding rect to the picture_info."""
rect= Rectangle_Multiclass()
rect.load_BBox( rectangle.x1, rectangle.x2, rectangle.y1, rectangle.y2, rectangle.label, rectangle.label_chall, rectangle.label_code)
index= len(self.rects)
self.rects.insert(index, rect) |
Python | def main():
'''
Parse command line arguments and execute the code
'''
start = time.time()
parser = argparse.ArgumentParser()
parser.add_argument('--det_frames_folder', default='det_frames/', type=str)
parser.add_argument('--det_result_folder', default='det_results/', type=str)
parser.add_argument('--result_folder', default='summary_result/', type=str)
parser.add_argument('--summary_file', default='results.txt', type=str)
parser.add_argument('--output_name', default='output.mp4', type=str)
parser.add_argument('--perc', default=5, type=int)
parser.add_argument('--path_video', required=True, type=str)
args = parser.parse_args()
frame_list, frames = Utils_Video.extract_frames(args.path_video, args.perc)
det_frame_list,det_result_list=still_image_YOLO_DET(frame_list, frames, args.det_frames_folder,args.det_result_folder)
Utils_Video.make_video_from_list(args.output_name, det_frame_list)
print_YOLO_DET_result(det_result_list,args.result_folder, args.summary_file)
end = time.time()
print("Elapsed Time:%d Seconds"%(end-start))
print("Running Completed with Success!!!") | def main():
'''
Parse command line arguments and execute the code
'''
start = time.time()
parser = argparse.ArgumentParser()
parser.add_argument('--det_frames_folder', default='det_frames/', type=str)
parser.add_argument('--det_result_folder', default='det_results/', type=str)
parser.add_argument('--result_folder', default='summary_result/', type=str)
parser.add_argument('--summary_file', default='results.txt', type=str)
parser.add_argument('--output_name', default='output.mp4', type=str)
parser.add_argument('--perc', default=5, type=int)
parser.add_argument('--path_video', required=True, type=str)
args = parser.parse_args()
frame_list, frames = Utils_Video.extract_frames(args.path_video, args.perc)
det_frame_list,det_result_list=still_image_YOLO_DET(frame_list, frames, args.det_frames_folder,args.det_result_folder)
Utils_Video.make_video_from_list(args.output_name, det_frame_list)
print_YOLO_DET_result(det_result_list,args.result_folder, args.summary_file)
end = time.time()
print("Elapsed Time:%d Seconds"%(end-start))
print("Running Completed with Success!!!") |
Python | def filter_line(line):
'''
replace foreign characters not punctuation and numerals with space
'''
if re.search(punc,line):
clean_line = ' '.join(re.sub(pattern, ' ', line).split())
if clean_line and (clean_line[0] in ['.', ',', '?']):
clean_line = clean_line[1:]
temp_line = clean_line.replace(" ","")
if regex.search(temp_line):
return ''
return ' '.join(clean_line.split())
return '' | def filter_line(line):
'''
replace foreign characters not punctuation and numerals with space
'''
if re.search(punc,line):
clean_line = ' '.join(re.sub(pattern, ' ', line).split())
if clean_line and (clean_line[0] in ['.', ',', '?']):
clean_line = clean_line[1:]
temp_line = clean_line.replace(" ","")
if regex.search(temp_line):
return ''
return ' '.join(clean_line.split())
return '' |
Python | def __get_raw_info(self, text):
"""Parses a Debian control file and returns raw dictionary"""
# Extract package keys and values
split_regex = re.compile(r"^[A-Za-z-]+:\s", flags=re.MULTILINE)
keys = [key[:-2].lower() for key in split_regex.findall(text)]
values = [value.strip() for value in re.split(split_regex, text)[1:]]
# Composing initial package info dict
if len(values) > 0:
pkg_name = values[0]
pkg_details = dict(zip(keys[1:], values[1:]))
pkg_dict = {"name": pkg_name, "details": pkg_details}
return pkg_dict
else:
raise ValueError("file or text don't match Debian Control File schema") | def __get_raw_info(self, text):
"""Parses a Debian control file and returns raw dictionary"""
# Extract package keys and values
split_regex = re.compile(r"^[A-Za-z-]+:\s", flags=re.MULTILINE)
keys = [key[:-2].lower() for key in split_regex.findall(text)]
values = [value.strip() for value in re.split(split_regex, text)[1:]]
# Composing initial package info dict
if len(values) > 0:
pkg_name = values[0]
pkg_details = dict(zip(keys[1:], values[1:]))
pkg_dict = {"name": pkg_name, "details": pkg_details}
return pkg_dict
else:
raise ValueError("file or text don't match Debian Control File schema") |
Python | def __get_clean_info(self, raw_info):
"""Cleans up raw parsed package information and filters unneeded"""
pkg_name = raw_info["name"]
version = raw_info["details"].get("version")
long_description = raw_info["details"].get("description")
long_depends = raw_info["details"].get("depends")
synopsis, description = self.__split_description(long_description)
depends, alt_depends = self.__split_depends(long_depends)
reverse_depends = self.__get_reverse_depends(pkg_name, self.raw_pkg_info)
pkg_details = {
"version": version,
"synopsis": synopsis,
"description": description,
"depends": depends,
"alt_depends": alt_depends,
"reverse_depends": reverse_depends,
}
return {"name": pkg_name, "details": pkg_details} | def __get_clean_info(self, raw_info):
"""Cleans up raw parsed package information and filters unneeded"""
pkg_name = raw_info["name"]
version = raw_info["details"].get("version")
long_description = raw_info["details"].get("description")
long_depends = raw_info["details"].get("depends")
synopsis, description = self.__split_description(long_description)
depends, alt_depends = self.__split_depends(long_depends)
reverse_depends = self.__get_reverse_depends(pkg_name, self.raw_pkg_info)
pkg_details = {
"version": version,
"synopsis": synopsis,
"description": description,
"depends": depends,
"alt_depends": alt_depends,
"reverse_depends": reverse_depends,
}
return {"name": pkg_name, "details": pkg_details} |
Python | def __split_description(self, long_description):
"""Breaks down long descriptions into synopsis and description"""
if long_description is not None:
split_description = tuple(long_description.split("\n", maxsplit=1))
synopsis = split_description[0]
description = (
re.sub(r"^\s", "", split_description[1], flags=re.MULTILINE)
if 1 < len(split_description)
else None
)
else:
synopsis, description = None, None
return (synopsis, description) | def __split_description(self, long_description):
"""Breaks down long descriptions into synopsis and description"""
if long_description is not None:
split_description = tuple(long_description.split("\n", maxsplit=1))
synopsis = split_description[0]
description = (
re.sub(r"^\s", "", split_description[1], flags=re.MULTILINE)
if 1 < len(split_description)
else None
)
else:
synopsis, description = None, None
return (synopsis, description) |
Python | def __split_depends(self, long_depends):
"""Breaks down dependencies text into two lists of dependencies and alternatives"""
if long_depends is not None:
depends_and_alt = long_depends.split(" | ")
depends = depends_and_alt[0].split(", ")
alt_depends = (
depends_and_alt[1].split(", ") if 1 < len(depends_and_alt) else None
)
else:
depends, alt_depends = None, None
return (depends, alt_depends) | def __split_depends(self, long_depends):
"""Breaks down dependencies text into two lists of dependencies and alternatives"""
if long_depends is not None:
depends_and_alt = long_depends.split(" | ")
depends = depends_and_alt[0].split(", ")
alt_depends = (
depends_and_alt[1].split(", ") if 1 < len(depends_and_alt) else None
)
else:
depends, alt_depends = None, None
return (depends, alt_depends) |
Python | def __get_reverse_depends(self, pkg_name, pkg_dict_list):
"""Gets the names of the packages that depend on the the specified one"""
r_depends = []
for pkg in pkg_dict_list:
pkg_depends = pkg["details"].get("depends")
if pkg_depends is not None:
if pkg_name in pkg_depends:
r_depends.append(pkg["name"])
return None if len(r_depends) == 0 else r_depends | def __get_reverse_depends(self, pkg_name, pkg_dict_list):
"""Gets the names of the packages that depend on the the specified one"""
r_depends = []
for pkg in pkg_dict_list:
pkg_depends = pkg["details"].get("depends")
if pkg_depends is not None:
if pkg_name in pkg_depends:
r_depends.append(pkg["name"])
return None if len(r_depends) == 0 else r_depends |
Python | def apply_delta(self, source, previous, new, metric, with_snapshot):
"""
Helper method to assist in delta reporting of metrics.
@param source [dict or value]: the dict to retrieve the new value of <metric> (as source[metric]) or
if not a dict, then the new value of the metric
@param previous [dict]: the previous value of <metric> that was reported (as previous[metric])
@param new [dict]: the new value of the metric that will be sent new (as new[metric])
@param metric [String or Tuple]: the name of the metric in question. If the keys for source[metric],
previous[metric] and new[metric] vary, you can pass a tuple in the form of (src, dst)
@param with_snapshot [Bool]: if this metric is being sent with snapshot data
@return: None
"""
if isinstance(metric, tuple):
src_metric = metric[0]
dst_metric = metric[1]
else:
src_metric = metric
dst_metric = metric
if isinstance(source, dict):
new_value = source.get(src_metric, None)
else:
new_value = source
previous_value = previous.get(dst_metric, 0)
if previous_value != new_value or with_snapshot is True:
previous[dst_metric] = new[dst_metric] = new_value | def apply_delta(self, source, previous, new, metric, with_snapshot):
"""
Helper method to assist in delta reporting of metrics.
@param source [dict or value]: the dict to retrieve the new value of <metric> (as source[metric]) or
if not a dict, then the new value of the metric
@param previous [dict]: the previous value of <metric> that was reported (as previous[metric])
@param new [dict]: the new value of the metric that will be sent new (as new[metric])
@param metric [String or Tuple]: the name of the metric in question. If the keys for source[metric],
previous[metric] and new[metric] vary, you can pass a tuple in the form of (src, dst)
@param with_snapshot [Bool]: if this metric is being sent with snapshot data
@return: None
"""
if isinstance(metric, tuple):
src_metric = metric[0]
dst_metric = metric[1]
else:
src_metric = metric
dst_metric = metric
if isinstance(source, dict):
new_value = source.get(src_metric, None)
else:
new_value = source
previous_value = previous.get(dst_metric, 0)
if previous_value != new_value or with_snapshot is True:
previous[dst_metric] = new[dst_metric] = new_value |
Python | def collect(instance, args, kwargs):
""" Build and return a fully qualified URL for this request """
kvs = dict()
try:
kvs['host'] = instance.host
kvs['port'] = instance.port
if args is not None and len(args) == 2:
kvs['method'] = args[0]
kvs['path'] = args[1]
else:
kvs['method'] = kwargs.get('method')
kvs['path'] = kwargs.get('path')
if kvs['path'] is None:
kvs['path'] = kwargs.get('url')
# Strip any secrets from potential query params
if kvs.get('path') is not None and ('?' in kvs['path']):
parts = kvs['path'].split('?')
kvs['path'] = parts[0]
if len(parts) == 2:
kvs['query'] = strip_secrets_from_query(parts[1], agent.options.secrets_matcher,
agent.options.secrets_list)
if type(instance) is urllib3.connectionpool.HTTPSConnectionPool:
kvs['url'] = 'https://%s:%d%s' % (kvs['host'], kvs['port'], kvs['path'])
else:
kvs['url'] = 'http://%s:%d%s' % (kvs['host'], kvs['port'], kvs['path'])
except Exception:
logger.debug("urllib3 collect error", exc_info=True)
return kvs
else:
return kvs | def collect(instance, args, kwargs):
""" Build and return a fully qualified URL for this request """
kvs = dict()
try:
kvs['host'] = instance.host
kvs['port'] = instance.port
if args is not None and len(args) == 2:
kvs['method'] = args[0]
kvs['path'] = args[1]
else:
kvs['method'] = kwargs.get('method')
kvs['path'] = kwargs.get('path')
if kvs['path'] is None:
kvs['path'] = kwargs.get('url')
# Strip any secrets from potential query params
if kvs.get('path') is not None and ('?' in kvs['path']):
parts = kvs['path'].split('?')
kvs['path'] = parts[0]
if len(parts) == 2:
kvs['query'] = strip_secrets_from_query(parts[1], agent.options.secrets_matcher,
agent.options.secrets_list)
if type(instance) is urllib3.connectionpool.HTTPSConnectionPool:
kvs['url'] = 'https://%s:%d%s' % (kvs['host'], kvs['port'], kvs['path'])
else:
kvs['url'] = 'http://%s:%d%s' % (kvs['host'], kvs['port'], kvs['path'])
except Exception:
logger.debug("urllib3 collect error", exc_info=True)
return kvs
else:
return kvs |
Python | def new_start_response(status, headers, exc_info=None):
"""Modified start response with additional headers."""
tracer.inject(self.scope.span.context, ot.Format.HTTP_HEADERS, headers)
headers.append(('Server-Timing', "intid;desc=%s" % self.scope.span.context.trace_id))
res = start_response(status, headers, exc_info)
sc = status.split(' ')[0]
if 500 <= int(sc) <= 511:
self.scope.span.mark_as_errored()
self.scope.span.set_tag(tags.HTTP_STATUS_CODE, sc)
self.scope.close()
return res | def new_start_response(status, headers, exc_info=None):
"""Modified start response with additional headers."""
tracer.inject(self.scope.span.context, ot.Format.HTTP_HEADERS, headers)
headers.append(('Server-Timing', "intid;desc=%s" % self.scope.span.context.trace_id))
res = start_response(status, headers, exc_info)
sc = status.split(' ')[0]
if 500 <= int(sc) <= 511:
self.scope.span.mark_as_errored()
self.scope.span.set_tag(tags.HTTP_STATUS_CODE, sc)
self.scope.close()
return res |
Python | def _collect_command_tags(self, span, event):
"""
Extract MongoDB command name and arguments and attach it to the span
"""
cmd = event.command_name
span.set_tag("command", cmd)
for key in ["filter", "query"]:
if key in event.command:
span.set_tag("filter", json_util.dumps(event.command.get(key)))
break
# The location of command documents within the command object depends on the name
# of this command. This is the name -> command object key mapping
cmd_doc_locations = {
"insert": "documents",
"update": "updates",
"delete": "deletes",
"aggregate": "pipeline"
}
cmd_doc = None
if cmd in cmd_doc_locations:
cmd_doc = event.command.get(cmd_doc_locations[cmd])
elif cmd.lower() == "mapreduce": # mapreduce command was renamed to mapReduce in pymongo 3.9.0
# mapreduce command consists of two mandatory parts: map and reduce
cmd_doc = {
"map": event.command.get("map"),
"reduce": event.command.get("reduce")
}
if cmd_doc is not None:
span.set_tag("json", json_util.dumps(cmd_doc)) | def _collect_command_tags(self, span, event):
"""
Extract MongoDB command name and arguments and attach it to the span
"""
cmd = event.command_name
span.set_tag("command", cmd)
for key in ["filter", "query"]:
if key in event.command:
span.set_tag("filter", json_util.dumps(event.command.get(key)))
break
# The location of command documents within the command object depends on the name
# of this command. This is the name -> command object key mapping
cmd_doc_locations = {
"insert": "documents",
"update": "updates",
"delete": "deletes",
"aggregate": "pipeline"
}
cmd_doc = None
if cmd in cmd_doc_locations:
cmd_doc = event.command.get(cmd_doc_locations[cmd])
elif cmd.lower() == "mapreduce": # mapreduce command was renamed to mapReduce in pymongo 3.9.0
# mapreduce command consists of two mandatory parts: map and reduce
cmd_doc = {
"map": event.command.get("map"),
"reduce": event.command.get("reduce")
}
if cmd_doc is not None:
span.set_tag("json", json_util.dumps(cmd_doc)) |
Python | def tearDown(self):
""" Reset all environment variables of consequence """
if "K_SERVICE" in os.environ:
os.environ.pop("K_SERVICE")
if "K_CONFIGURATION" in os.environ:
os.environ.pop("K_CONFIGURATION")
if "K_REVISION" in os.environ:
os.environ.pop("K_REVISION")
if "PORT" in os.environ:
os.environ.pop("PORT")
if "INSTANA_EXTRA_HTTP_HEADERS" in os.environ:
os.environ.pop("INSTANA_EXTRA_HTTP_HEADERS")
if "INSTANA_ENDPOINT_URL" in os.environ:
os.environ.pop("INSTANA_ENDPOINT_URL")
if "INSTANA_ENDPOINT_PROXY" in os.environ:
os.environ.pop("INSTANA_ENDPOINT_PROXY")
if "INSTANA_AGENT_KEY" in os.environ:
os.environ.pop("INSTANA_AGENT_KEY")
if "INSTANA_LOG_LEVEL" in os.environ:
os.environ.pop("INSTANA_LOG_LEVEL")
if "INSTANA_SECRETS" in os.environ:
os.environ.pop("INSTANA_SECRETS")
if "INSTANA_DEBUG" in os.environ:
os.environ.pop("INSTANA_DEBUG")
if "INSTANA_TAGS" in os.environ:
os.environ.pop("INSTANA_TAGS")
set_agent(self.original_agent)
set_tracer(self.original_tracer) | def tearDown(self):
""" Reset all environment variables of consequence """
if "K_SERVICE" in os.environ:
os.environ.pop("K_SERVICE")
if "K_CONFIGURATION" in os.environ:
os.environ.pop("K_CONFIGURATION")
if "K_REVISION" in os.environ:
os.environ.pop("K_REVISION")
if "PORT" in os.environ:
os.environ.pop("PORT")
if "INSTANA_EXTRA_HTTP_HEADERS" in os.environ:
os.environ.pop("INSTANA_EXTRA_HTTP_HEADERS")
if "INSTANA_ENDPOINT_URL" in os.environ:
os.environ.pop("INSTANA_ENDPOINT_URL")
if "INSTANA_ENDPOINT_PROXY" in os.environ:
os.environ.pop("INSTANA_ENDPOINT_PROXY")
if "INSTANA_AGENT_KEY" in os.environ:
os.environ.pop("INSTANA_AGENT_KEY")
if "INSTANA_LOG_LEVEL" in os.environ:
os.environ.pop("INSTANA_LOG_LEVEL")
if "INSTANA_SECRETS" in os.environ:
os.environ.pop("INSTANA_SECRETS")
if "INSTANA_DEBUG" in os.environ:
os.environ.pop("INSTANA_DEBUG")
if "INSTANA_TAGS" in os.environ:
os.environ.pop("INSTANA_TAGS")
set_agent(self.original_agent)
set_tracer(self.original_tracer) |
Python | def collect_metrics(self, **kwargs):
"""
Collect and return metrics data (and optionally snapshot data) for this task
@return: list - with one plugin entity
"""
plugins = []
try:
if self.collector.task_metadata is not None:
plugin_data = dict()
try:
plugin_data["name"] = "com.instana.plugin.aws.ecs.task"
plugin_data["entityId"] = self.collector.task_metadata.get("TaskARN", None)
plugin_data["data"] = DictionaryOfStan()
plugin_data["data"]["taskArn"] = self.collector.task_metadata.get("TaskARN", None)
plugin_data["data"]["clusterArn"] = self.collector.task_metadata.get("Cluster", None)
plugin_data["data"]["taskDefinition"] = self.collector.task_metadata.get("Family", None)
plugin_data["data"]["taskDefinitionVersion"] = self.collector.task_metadata.get("Revision", None)
plugin_data["data"]["availabilityZone"] = self.collector.task_metadata.get("AvailabilityZone", None)
if kwargs.get("with_snapshot"):
plugin_data["data"]["desiredStatus"] = self.collector.task_metadata.get("DesiredStatus", None)
plugin_data["data"]["knownStatus"] = self.collector.task_metadata.get("KnownStatus", None)
plugin_data["data"]["pullStartedAt"] = self.collector.task_metadata.get("PullStartedAt", None)
plugin_data["data"]["pullStoppedAt"] = self.collector.task_metadata.get("PullStoppeddAt", None)
limits = self.collector.task_metadata.get("Limits", {})
plugin_data["data"]["limits"]["cpu"] = limits.get("CPU", None)
plugin_data["data"]["limits"]["memory"] = limits.get("Memory", None)
if self.collector.agent.options.zone is not None:
plugin_data["data"]["instanaZone"] = self.collector.agent.options.zone
if self.collector.agent.options.tags is not None:
plugin_data["data"]["tags"] = self.collector.agent.options.tags
except Exception:
logger.debug("collect_task_metrics: ", exc_info=True)
finally:
plugins.append(plugin_data)
except Exception:
logger.debug("collect_task_metrics: ", exc_info=True)
return plugins | def collect_metrics(self, **kwargs):
"""
Collect and return metrics data (and optionally snapshot data) for this task
@return: list - with one plugin entity
"""
plugins = []
try:
if self.collector.task_metadata is not None:
plugin_data = dict()
try:
plugin_data["name"] = "com.instana.plugin.aws.ecs.task"
plugin_data["entityId"] = self.collector.task_metadata.get("TaskARN", None)
plugin_data["data"] = DictionaryOfStan()
plugin_data["data"]["taskArn"] = self.collector.task_metadata.get("TaskARN", None)
plugin_data["data"]["clusterArn"] = self.collector.task_metadata.get("Cluster", None)
plugin_data["data"]["taskDefinition"] = self.collector.task_metadata.get("Family", None)
plugin_data["data"]["taskDefinitionVersion"] = self.collector.task_metadata.get("Revision", None)
plugin_data["data"]["availabilityZone"] = self.collector.task_metadata.get("AvailabilityZone", None)
if kwargs.get("with_snapshot"):
plugin_data["data"]["desiredStatus"] = self.collector.task_metadata.get("DesiredStatus", None)
plugin_data["data"]["knownStatus"] = self.collector.task_metadata.get("KnownStatus", None)
plugin_data["data"]["pullStartedAt"] = self.collector.task_metadata.get("PullStartedAt", None)
plugin_data["data"]["pullStoppedAt"] = self.collector.task_metadata.get("PullStoppeddAt", None)
limits = self.collector.task_metadata.get("Limits", {})
plugin_data["data"]["limits"]["cpu"] = limits.get("CPU", None)
plugin_data["data"]["limits"]["memory"] = limits.get("Memory", None)
if self.collector.agent.options.zone is not None:
plugin_data["data"]["instanaZone"] = self.collector.agent.options.zone
if self.collector.agent.options.tags is not None:
plugin_data["data"]["tags"] = self.collector.agent.options.tags
except Exception:
logger.debug("collect_task_metrics: ", exc_info=True)
finally:
plugins.append(plugin_data)
except Exception:
logger.debug("collect_task_metrics: ", exc_info=True)
return plugins |
Python | def lambda_inject_context(payload, scope):
"""
When boto3 lambda client 'Invoke' is called, we want to inject the tracing context.
boto3/botocore has specific requirements:
https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/lambda.html#Lambda.Client.invoke
"""
try:
invoke_payload = payload.get('Payload', {})
if not isinstance(invoke_payload, dict):
invoke_payload = json.loads(invoke_payload)
tracer.inject(scope.span.context, ot.Format.HTTP_HEADERS, invoke_payload)
payload['Payload'] = json.dumps(invoke_payload)
except Exception:
logger.debug("non-fatal lambda_inject_context: ", exc_info=True) | def lambda_inject_context(payload, scope):
"""
When boto3 lambda client 'Invoke' is called, we want to inject the tracing context.
boto3/botocore has specific requirements:
https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/lambda.html#Lambda.Client.invoke
"""
try:
invoke_payload = payload.get('Payload', {})
if not isinstance(invoke_payload, dict):
invoke_payload = json.loads(invoke_payload)
tracer.inject(scope.span.context, ot.Format.HTTP_HEADERS, invoke_payload)
payload['Payload'] = json.dumps(invoke_payload)
except Exception:
logger.debug("non-fatal lambda_inject_context: ", exc_info=True) |
Python | def tearDown(self):
""" Reset all environment variables of consequence """
if "AWS_EXECUTION_ENV" in os.environ:
os.environ.pop("AWS_EXECUTION_ENV")
if "INSTANA_EXTRA_HTTP_HEADERS" in os.environ:
os.environ.pop("INSTANA_EXTRA_HTTP_HEADERS")
if "INSTANA_ENDPOINT_URL" in os.environ:
os.environ.pop("INSTANA_ENDPOINT_URL")
if "INSTANA_AGENT_KEY" in os.environ:
os.environ.pop("INSTANA_AGENT_KEY")
if "INSTANA_ZONE" in os.environ:
os.environ.pop("INSTANA_ZONE")
if "INSTANA_TAGS" in os.environ:
os.environ.pop("INSTANA_TAGS")
if "INSTANA_DISABLE_METRICS_COLLECTION" in os.environ:
os.environ.pop("INSTANA_DISABLE_METRICS_COLLECTION")
set_agent(self.original_agent)
set_tracer(self.original_tracer) | def tearDown(self):
""" Reset all environment variables of consequence """
if "AWS_EXECUTION_ENV" in os.environ:
os.environ.pop("AWS_EXECUTION_ENV")
if "INSTANA_EXTRA_HTTP_HEADERS" in os.environ:
os.environ.pop("INSTANA_EXTRA_HTTP_HEADERS")
if "INSTANA_ENDPOINT_URL" in os.environ:
os.environ.pop("INSTANA_ENDPOINT_URL")
if "INSTANA_AGENT_KEY" in os.environ:
os.environ.pop("INSTANA_AGENT_KEY")
if "INSTANA_ZONE" in os.environ:
os.environ.pop("INSTANA_ZONE")
if "INSTANA_TAGS" in os.environ:
os.environ.pop("INSTANA_TAGS")
if "INSTANA_DISABLE_METRICS_COLLECTION" in os.environ:
os.environ.pop("INSTANA_DISABLE_METRICS_COLLECTION")
set_agent(self.original_agent)
set_tracer(self.original_tracer) |
Python | def collect_metrics(self, **kwargs):
"""
Collect and return metrics data (and optionally snapshot data) for this task
@return: list - with one plugin entity
"""
plugins = []
plugin_data = dict()
instance_metadata = kwargs.get('instance_metadata', {})
project_metadata = kwargs.get('project_metadata', {})
try:
plugin_data["name"] = "com.instana.plugin.gcp.run.revision.instance"
plugin_data["entityId"] = instance_metadata.get("id")
plugin_data["data"] = DictionaryOfStan()
plugin_data["data"]["runtime"] = "python"
plugin_data["data"]["region"] = instance_metadata.get("region").split("/")[-1]
plugin_data["data"]["service"] = self.collector.service
plugin_data["data"]["configuration"] = self.collector.configuration
plugin_data["data"]["revision"] = self.collector.revision
plugin_data["data"]["instanceId"] = plugin_data["entityId"]
plugin_data["data"]["port"] = os.getenv("PORT", "")
plugin_data["data"]["numericProjectId"] = project_metadata.get("numericProjectId")
plugin_data["data"]["projectId"] = project_metadata.get("projectId")
except Exception:
logger.debug("collect_service_revision_entity_metrics: ", exc_info=True)
finally:
plugins.append(plugin_data)
return plugins | def collect_metrics(self, **kwargs):
"""
Collect and return metrics data (and optionally snapshot data) for this task
@return: list - with one plugin entity
"""
plugins = []
plugin_data = dict()
instance_metadata = kwargs.get('instance_metadata', {})
project_metadata = kwargs.get('project_metadata', {})
try:
plugin_data["name"] = "com.instana.plugin.gcp.run.revision.instance"
plugin_data["entityId"] = instance_metadata.get("id")
plugin_data["data"] = DictionaryOfStan()
plugin_data["data"]["runtime"] = "python"
plugin_data["data"]["region"] = instance_metadata.get("region").split("/")[-1]
plugin_data["data"]["service"] = self.collector.service
plugin_data["data"]["configuration"] = self.collector.configuration
plugin_data["data"]["revision"] = self.collector.revision
plugin_data["data"]["instanceId"] = plugin_data["entityId"]
plugin_data["data"]["port"] = os.getenv("PORT", "")
plugin_data["data"]["numericProjectId"] = project_metadata.get("numericProjectId")
plugin_data["data"]["projectId"] = project_metadata.get("projectId")
except Exception:
logger.debug("collect_service_revision_entity_metrics: ", exc_info=True)
finally:
plugins.append(plugin_data)
return plugins |
Python | def collect_metrics(self, **kwargs):
"""
Collect and return docker metrics (and optionally snapshot data) for this task
@return: list - with one or more plugin entities
"""
plugins = []
try:
if self.collector.task_metadata is not None:
containers = self.collector.task_metadata.get("Containers", [])
for container in containers:
plugin_data = dict()
plugin_data["name"] = "com.instana.plugin.docker"
docker_id = container.get("DockerId")
name = container.get("Name", "")
labels = container.get("Labels", {})
task_arn = labels.get("com.amazonaws.ecs.task-arn", "")
plugin_data["entityId"] = "%s::%s" % (task_arn, name)
plugin_data["data"] = DictionaryOfStan()
plugin_data["data"]["Id"] = container.get("DockerId", None)
with_snapshot = kwargs.get("with_snapshot", False)
# Metrics
self._collect_container_metrics(plugin_data, docker_id, with_snapshot)
# Snapshot
if with_snapshot:
self._collect_container_snapshot(plugin_data, container)
plugins.append(plugin_data)
#logger.debug(to_pretty_json(plugin_data))
except Exception:
logger.debug("DockerHelper.collect_metrics: ", exc_info=True)
return plugins | def collect_metrics(self, **kwargs):
"""
Collect and return docker metrics (and optionally snapshot data) for this task
@return: list - with one or more plugin entities
"""
plugins = []
try:
if self.collector.task_metadata is not None:
containers = self.collector.task_metadata.get("Containers", [])
for container in containers:
plugin_data = dict()
plugin_data["name"] = "com.instana.plugin.docker"
docker_id = container.get("DockerId")
name = container.get("Name", "")
labels = container.get("Labels", {})
task_arn = labels.get("com.amazonaws.ecs.task-arn", "")
plugin_data["entityId"] = "%s::%s" % (task_arn, name)
plugin_data["data"] = DictionaryOfStan()
plugin_data["data"]["Id"] = container.get("DockerId", None)
with_snapshot = kwargs.get("with_snapshot", False)
# Metrics
self._collect_container_metrics(plugin_data, docker_id, with_snapshot)
# Snapshot
if with_snapshot:
self._collect_container_snapshot(plugin_data, container)
plugins.append(plugin_data)
#logger.debug(to_pretty_json(plugin_data))
except Exception:
logger.debug("DockerHelper.collect_metrics: ", exc_info=True)
return plugins |
Python | def read_http_query_params(event):
"""
Used to parse the Lambda QueryString formats.
@param event: lambda event dict
@return: String in the form of "a=b&c=d"
"""
params = []
try:
if event is None or type(event) is not dict:
return ""
mvqsp = event.get('multiValueQueryStringParameters', None)
qsp = event.get('queryStringParameters', None)
if mvqsp is not None and type(mvqsp) is dict:
for key in mvqsp:
params.append("%s=%s" % (key, mvqsp[key]))
return "&".join(params)
elif qsp is not None and type(qsp) is dict:
for key in qsp:
params.append("%s=%s" % (key, qsp[key]))
return "&".join(params)
else:
return ""
except Exception:
logger.debug("read_http_query_params: ", exc_info=True)
return "" | def read_http_query_params(event):
"""
Used to parse the Lambda QueryString formats.
@param event: lambda event dict
@return: String in the form of "a=b&c=d"
"""
params = []
try:
if event is None or type(event) is not dict:
return ""
mvqsp = event.get('multiValueQueryStringParameters', None)
qsp = event.get('queryStringParameters', None)
if mvqsp is not None and type(mvqsp) is dict:
for key in mvqsp:
params.append("%s=%s" % (key, mvqsp[key]))
return "&".join(params)
elif qsp is not None and type(qsp) is dict:
for key in qsp:
params.append("%s=%s" % (key, qsp[key]))
return "&".join(params)
else:
return ""
except Exception:
logger.debug("read_http_query_params: ", exc_info=True)
return "" |
Python | def capture_extra_headers(event, span, extra_headers):
"""
Capture the headers specified in `extra_headers` from `event` and log them
as a tag in the span.
@param event: the lambda event
@param span: the lambda entry span
@param extra_headers: a list of http headers to capture
@return: None
"""
try:
event_headers = event.get("headers", None)
if event_headers is not None:
for custom_header in extra_headers:
for key in event_headers:
if key.lower() == custom_header.lower():
span.set_tag("http.header.%s" % custom_header, event_headers[key])
except Exception:
logger.debug("capture_extra_headers: ", exc_info=True) | def capture_extra_headers(event, span, extra_headers):
"""
Capture the headers specified in `extra_headers` from `event` and log them
as a tag in the span.
@param event: the lambda event
@param span: the lambda entry span
@param extra_headers: a list of http headers to capture
@return: None
"""
try:
event_headers = event.get("headers", None)
if event_headers is not None:
for custom_header in extra_headers:
for key in event_headers:
if key.lower() == custom_header.lower():
span.set_tag("http.header.%s" % custom_header, event_headers[key])
except Exception:
logger.debug("capture_extra_headers: ", exc_info=True) |
Python | def enrich_lambda_span(agent, span, event, context):
"""
Extract the required information about this Lambda run (and the trigger) and store the data
on `span`.
@param agent: the AWSLambdaAgent in use
@param span: the Lambda entry span
@param event: the lambda handler event
@param context: the lambda handler context
@return: None
"""
try:
span.set_tag('lambda.arn', agent.collector.get_fq_arn())
span.set_tag('lambda.name', context.function_name)
span.set_tag('lambda.version', context.function_version)
if event is None or type(event) is not dict:
logger.debug("enrich_lambda_span: bad event %s", type(event))
return
if is_api_gateway_proxy_trigger(event):
logger.debug("Detected as API Gateway Proxy Trigger")
span.set_tag(STR_LAMBDA_TRIGGER, 'aws:api.gateway')
span.set_tag('http.method', event["httpMethod"])
span.set_tag('http.url', event["path"])
span.set_tag('http.path_tpl', event["resource"])
span.set_tag('http.params', read_http_query_params(event))
if agent.options.extra_http_headers is not None:
capture_extra_headers(event, span, agent.options.extra_http_headers)
elif is_api_gateway_v2_proxy_trigger(event):
logger.debug("Detected as API Gateway v2.0 Proxy Trigger")
reqCtx = event["requestContext"]
# trim optional HTTP method prefix
route_path = event["routeKey"].split(" ", 2)[-1]
span.set_tag(STR_LAMBDA_TRIGGER, 'aws:api.gateway')
span.set_tag('http.method', reqCtx["http"]["method"])
span.set_tag('http.url', reqCtx["http"]["path"])
span.set_tag('http.path_tpl', route_path)
span.set_tag('http.params', read_http_query_params(event))
if agent.options.extra_http_headers is not None:
capture_extra_headers(event, span, agent.options.extra_http_headers)
elif is_application_load_balancer_trigger(event):
logger.debug("Detected as Application Load Balancer Trigger")
span.set_tag(STR_LAMBDA_TRIGGER, 'aws:application.load.balancer')
span.set_tag('http.method', event["httpMethod"])
span.set_tag('http.url', event["path"])
span.set_tag('http.params', read_http_query_params(event))
if agent.options.extra_http_headers is not None:
capture_extra_headers(event, span, agent.options.extra_http_headers)
elif is_cloudwatch_trigger(event):
logger.debug("Detected as Cloudwatch Trigger")
span.set_tag(STR_LAMBDA_TRIGGER, 'aws:cloudwatch.events')
span.set_tag('data.lambda.cw.events.id', event['id'])
resources = event['resources']
resource_count = len(event['resources'])
if resource_count > 3:
resources = event['resources'][:3]
span.set_tag('lambda.cw.events.more', True)
else:
span.set_tag('lambda.cw.events.more', False)
report = []
for item in resources:
if len(item) > 200:
item = item[:200]
report.append(item)
span.set_tag('lambda.cw.events.resources', report)
elif is_cloudwatch_logs_trigger(event):
logger.debug("Detected as Cloudwatch Logs Trigger")
span.set_tag(STR_LAMBDA_TRIGGER, 'aws:cloudwatch.logs')
try:
if 'awslogs' in event and 'data' in event['awslogs']:
data = event['awslogs']['data']
decoded_data = base64.b64decode(data)
decompressed_data = gzip.GzipFile(fileobj=BytesIO(decoded_data)).read()
log_data = json.loads(decompressed_data.decode('utf-8'))
span.set_tag('lambda.cw.logs.group', log_data.get('logGroup', None))
span.set_tag('lambda.cw.logs.stream', log_data.get('logStream', None))
if len(log_data['logEvents']) > 3:
span.set_tag('lambda.cw.logs.more', True)
events = log_data['logEvents'][:3]
else:
events = log_data['logEvents']
event_data = []
for item in events:
msg = item.get('message', None)
if len(msg) > 200:
msg = msg[:200]
event_data.append(msg)
span.set_tag('lambda.cw.logs.events', event_data)
except Exception as e:
span.set_tag('lambda.cw.logs.decodingError', repr(e))
elif is_s3_trigger(event):
logger.debug("Detected as S3 Trigger")
span.set_tag(STR_LAMBDA_TRIGGER, 'aws:s3')
if "Records" in event:
events = []
for item in event["Records"][:3]:
bucket_name = "Unknown"
if "s3" in item and "bucket" in item["s3"]:
bucket_name = item["s3"]["bucket"]["name"]
object_name = ""
if "s3" in item and "object" in item["s3"]:
object_name = item["s3"]["object"].get("key", "Unknown")
if len(object_name) > 200:
object_name = object_name[:200]
events.append({"event": item['eventName'],
"bucket": bucket_name,
"object": object_name})
span.set_tag('lambda.s3.events', events)
elif is_sqs_trigger(event):
logger.debug("Detected as SQS Trigger")
span.set_tag(STR_LAMBDA_TRIGGER, 'aws:sqs')
if "Records" in event:
events = []
for item in event["Records"][:3]:
events.append({'queue': item['eventSourceARN']})
span.set_tag('lambda.sqs.messages', events)
else:
logger.debug("Detected as Unknown Trigger: %s" % event)
span.set_tag(STR_LAMBDA_TRIGGER, 'unknown')
except Exception:
logger.debug("enrich_lambda_span: ", exc_info=True) | def enrich_lambda_span(agent, span, event, context):
"""
Extract the required information about this Lambda run (and the trigger) and store the data
on `span`.
@param agent: the AWSLambdaAgent in use
@param span: the Lambda entry span
@param event: the lambda handler event
@param context: the lambda handler context
@return: None
"""
try:
span.set_tag('lambda.arn', agent.collector.get_fq_arn())
span.set_tag('lambda.name', context.function_name)
span.set_tag('lambda.version', context.function_version)
if event is None or type(event) is not dict:
logger.debug("enrich_lambda_span: bad event %s", type(event))
return
if is_api_gateway_proxy_trigger(event):
logger.debug("Detected as API Gateway Proxy Trigger")
span.set_tag(STR_LAMBDA_TRIGGER, 'aws:api.gateway')
span.set_tag('http.method', event["httpMethod"])
span.set_tag('http.url', event["path"])
span.set_tag('http.path_tpl', event["resource"])
span.set_tag('http.params', read_http_query_params(event))
if agent.options.extra_http_headers is not None:
capture_extra_headers(event, span, agent.options.extra_http_headers)
elif is_api_gateway_v2_proxy_trigger(event):
logger.debug("Detected as API Gateway v2.0 Proxy Trigger")
reqCtx = event["requestContext"]
# trim optional HTTP method prefix
route_path = event["routeKey"].split(" ", 2)[-1]
span.set_tag(STR_LAMBDA_TRIGGER, 'aws:api.gateway')
span.set_tag('http.method', reqCtx["http"]["method"])
span.set_tag('http.url', reqCtx["http"]["path"])
span.set_tag('http.path_tpl', route_path)
span.set_tag('http.params', read_http_query_params(event))
if agent.options.extra_http_headers is not None:
capture_extra_headers(event, span, agent.options.extra_http_headers)
elif is_application_load_balancer_trigger(event):
logger.debug("Detected as Application Load Balancer Trigger")
span.set_tag(STR_LAMBDA_TRIGGER, 'aws:application.load.balancer')
span.set_tag('http.method', event["httpMethod"])
span.set_tag('http.url', event["path"])
span.set_tag('http.params', read_http_query_params(event))
if agent.options.extra_http_headers is not None:
capture_extra_headers(event, span, agent.options.extra_http_headers)
elif is_cloudwatch_trigger(event):
logger.debug("Detected as Cloudwatch Trigger")
span.set_tag(STR_LAMBDA_TRIGGER, 'aws:cloudwatch.events')
span.set_tag('data.lambda.cw.events.id', event['id'])
resources = event['resources']
resource_count = len(event['resources'])
if resource_count > 3:
resources = event['resources'][:3]
span.set_tag('lambda.cw.events.more', True)
else:
span.set_tag('lambda.cw.events.more', False)
report = []
for item in resources:
if len(item) > 200:
item = item[:200]
report.append(item)
span.set_tag('lambda.cw.events.resources', report)
elif is_cloudwatch_logs_trigger(event):
logger.debug("Detected as Cloudwatch Logs Trigger")
span.set_tag(STR_LAMBDA_TRIGGER, 'aws:cloudwatch.logs')
try:
if 'awslogs' in event and 'data' in event['awslogs']:
data = event['awslogs']['data']
decoded_data = base64.b64decode(data)
decompressed_data = gzip.GzipFile(fileobj=BytesIO(decoded_data)).read()
log_data = json.loads(decompressed_data.decode('utf-8'))
span.set_tag('lambda.cw.logs.group', log_data.get('logGroup', None))
span.set_tag('lambda.cw.logs.stream', log_data.get('logStream', None))
if len(log_data['logEvents']) > 3:
span.set_tag('lambda.cw.logs.more', True)
events = log_data['logEvents'][:3]
else:
events = log_data['logEvents']
event_data = []
for item in events:
msg = item.get('message', None)
if len(msg) > 200:
msg = msg[:200]
event_data.append(msg)
span.set_tag('lambda.cw.logs.events', event_data)
except Exception as e:
span.set_tag('lambda.cw.logs.decodingError', repr(e))
elif is_s3_trigger(event):
logger.debug("Detected as S3 Trigger")
span.set_tag(STR_LAMBDA_TRIGGER, 'aws:s3')
if "Records" in event:
events = []
for item in event["Records"][:3]:
bucket_name = "Unknown"
if "s3" in item and "bucket" in item["s3"]:
bucket_name = item["s3"]["bucket"]["name"]
object_name = ""
if "s3" in item and "object" in item["s3"]:
object_name = item["s3"]["object"].get("key", "Unknown")
if len(object_name) > 200:
object_name = object_name[:200]
events.append({"event": item['eventName'],
"bucket": bucket_name,
"object": object_name})
span.set_tag('lambda.s3.events', events)
elif is_sqs_trigger(event):
logger.debug("Detected as SQS Trigger")
span.set_tag(STR_LAMBDA_TRIGGER, 'aws:sqs')
if "Records" in event:
events = []
for item in event["Records"][:3]:
events.append({'queue': item['eventSourceARN']})
span.set_tag('lambda.sqs.messages', events)
else:
logger.debug("Detected as Unknown Trigger: %s" % event)
span.set_tag(STR_LAMBDA_TRIGGER, 'unknown')
except Exception:
logger.debug("enrich_lambda_span: ", exc_info=True) |
Python | def report_data_payload(self, payload):
"""
Used to report metrics and span data to the endpoint URL in self.options.endpoint_url
"""
response = None
try:
if self.report_headers is None:
# Prepare request headers
self.report_headers = {
"Content-Type": "application/json",
"X-Instana-Host": "gcp:cloud-run:revision:{revision}".format(
revision=self.collector.revision),
"X-Instana-Key": self.options.agent_key
}
self.report_headers["X-Instana-Time"] = str(round(time.time() * 1000))
response = self.client.post(self.__data_bundle_url(),
data=to_json(payload),
headers=self.report_headers,
timeout=self.options.timeout,
verify=self.options.ssl_verify,
proxies=self.options.endpoint_proxy)
if response.status_code >= 400:
logger.info("report_data_payload: Instana responded with status code %s", response.status_code)
except Exception as exc:
logger.debug("report_data_payload: connection error (%s)", type(exc))
return response | def report_data_payload(self, payload):
"""
Used to report metrics and span data to the endpoint URL in self.options.endpoint_url
"""
response = None
try:
if self.report_headers is None:
# Prepare request headers
self.report_headers = {
"Content-Type": "application/json",
"X-Instana-Host": "gcp:cloud-run:revision:{revision}".format(
revision=self.collector.revision),
"X-Instana-Key": self.options.agent_key
}
self.report_headers["X-Instana-Time"] = str(round(time.time() * 1000))
response = self.client.post(self.__data_bundle_url(),
data=to_json(payload),
headers=self.report_headers,
timeout=self.options.timeout,
verify=self.options.ssl_verify,
proxies=self.options.endpoint_proxy)
if response.status_code >= 400:
logger.info("report_data_payload: Instana responded with status code %s", response.status_code)
except Exception as exc:
logger.debug("report_data_payload: connection error (%s)", type(exc))
return response |
Python | def _validate_options(self):
"""
Validate that the options used by this Agent are valid. e.g. can we report data?
"""
return self.options.endpoint_url is not None and self.options.agent_key is not None | def _validate_options(self):
"""
Validate that the options used by this Agent are valid. e.g. can we report data?
"""
return self.options.endpoint_url is not None and self.options.agent_key is not None |
Python | def __data_bundle_url(self):
"""
URL for posting metrics to the host agent. Only valid when announced.
"""
return "{endpoint_url}/bundle".format(endpoint_url=self.options.endpoint_url) | def __data_bundle_url(self):
"""
URL for posting metrics to the host agent. Only valid when announced.
"""
return "{endpoint_url}/bundle".format(endpoint_url=self.options.endpoint_url) |
Python | def update_tracestate(self, tracestate, in_trace_id, in_span_id):
"""
Method to update the tracestate property with the instana trace_id and span_id
:param tracestate: original tracestate header
:param in_trace_id: instana trace_id
:param in_span_id: instana parent_id
:return: tracestate updated
"""
try:
span_id = in_span_id.zfill(16) # if span_id is shorter than 16 characters we prepend zeros
instana_tracestate = "in={};{}".format(in_trace_id, span_id)
if tracestate is None or tracestate == "":
tracestate = instana_tracestate
else:
# remove the existing in= entry
if "in=" in tracestate:
splitted = tracestate.split("in=")
before_in = splitted[0]
after_in = splitted[1].split(",")[1:]
tracestate = '{}{}'.format(before_in, ",".join(after_in))
# tracestate can contain a max of 32 list members, if it contains up to 31
# we can safely add the instana one without the need to truncate anything
if len(tracestate.split(",")) <= self.MAX_NUMBER_OF_LIST_MEMBERS - 1:
tracestate = "{},{}".format(instana_tracestate, tracestate)
else:
list_members = tracestate.split(",")
list_members_to_remove = len(list_members) - self.MAX_NUMBER_OF_LIST_MEMBERS + 1
# Number 1 priority members to be removed are the ones larger than 128 characters
for i, m in reversed(list(enumerate(list_members))):
if len(m) > self.REMOVE_ENTRIES_LARGER_THAN:
list_members.pop(i)
list_members_to_remove -= 1
if list_members_to_remove == 0:
break
# if there are still more than 31 list members remaining, we remove as many members
# from the end as necessary to remain just 31 list members
while list_members_to_remove > 0:
list_members.pop()
list_members_to_remove -= 1
# update the tracestate containing just 31 list members
tracestate = ",".join(list_members)
# adding instana as first list member, total of 32 list members
tracestate = "{},{}".format(instana_tracestate, tracestate)
except Exception:
logger.debug("Something went wrong while updating tracestate: {}:".format(tracestate), exc_info=True)
return tracestate | def update_tracestate(self, tracestate, in_trace_id, in_span_id):
"""
Method to update the tracestate property with the instana trace_id and span_id
:param tracestate: original tracestate header
:param in_trace_id: instana trace_id
:param in_span_id: instana parent_id
:return: tracestate updated
"""
try:
span_id = in_span_id.zfill(16) # if span_id is shorter than 16 characters we prepend zeros
instana_tracestate = "in={};{}".format(in_trace_id, span_id)
if tracestate is None or tracestate == "":
tracestate = instana_tracestate
else:
# remove the existing in= entry
if "in=" in tracestate:
splitted = tracestate.split("in=")
before_in = splitted[0]
after_in = splitted[1].split(",")[1:]
tracestate = '{}{}'.format(before_in, ",".join(after_in))
# tracestate can contain a max of 32 list members, if it contains up to 31
# we can safely add the instana one without the need to truncate anything
if len(tracestate.split(",")) <= self.MAX_NUMBER_OF_LIST_MEMBERS - 1:
tracestate = "{},{}".format(instana_tracestate, tracestate)
else:
list_members = tracestate.split(",")
list_members_to_remove = len(list_members) - self.MAX_NUMBER_OF_LIST_MEMBERS + 1
# Number 1 priority members to be removed are the ones larger than 128 characters
for i, m in reversed(list(enumerate(list_members))):
if len(m) > self.REMOVE_ENTRIES_LARGER_THAN:
list_members.pop(i)
list_members_to_remove -= 1
if list_members_to_remove == 0:
break
# if there are still more than 31 list members remaining, we remove as many members
# from the end as necessary to remain just 31 list members
while list_members_to_remove > 0:
list_members.pop()
list_members_to_remove -= 1
# update the tracestate containing just 31 list members
tracestate = ",".join(list_members)
# adding instana as first list member, total of 32 list members
tracestate = "{},{}".format(instana_tracestate, tracestate)
except Exception:
logger.debug("Something went wrong while updating tracestate: {}:".format(tracestate), exc_info=True)
return tracestate |
Python | def _collect_runtime_metrics(self, plugin_data, with_snapshot):
if os.environ.get('INSTANA_DISABLE_METRICS_COLLECTION', False):
return
""" Collect up and return the runtime metrics """
try:
rusage = resource.getrusage(resource.RUSAGE_SELF)
if gc.isenabled():
self._collect_gc_metrics(plugin_data, with_snapshot)
self._collect_thread_metrics(plugin_data, with_snapshot)
value_diff = rusage.ru_utime - self.previous_rusage.ru_utime
self.apply_delta(value_diff, self.previous['data']['metrics'],
plugin_data['data']['metrics'], "ru_utime", with_snapshot)
value_diff = rusage.ru_stime - self.previous_rusage.ru_stime
self.apply_delta(value_diff, self.previous['data']['metrics'],
plugin_data['data']['metrics'], "ru_stime", with_snapshot)
self.apply_delta(rusage.ru_maxrss, self.previous['data']['metrics'],
plugin_data['data']['metrics'], "ru_maxrss", with_snapshot)
self.apply_delta(rusage.ru_ixrss, self.previous['data']['metrics'],
plugin_data['data']['metrics'], "ru_ixrss", with_snapshot)
self.apply_delta(rusage.ru_idrss, self.previous['data']['metrics'],
plugin_data['data']['metrics'], "ru_idrss", with_snapshot)
self.apply_delta(rusage.ru_isrss, self.previous['data']['metrics'],
plugin_data['data']['metrics'], "ru_isrss", with_snapshot)
value_diff = rusage.ru_minflt - self.previous_rusage.ru_minflt
self.apply_delta(value_diff, self.previous['data']['metrics'],
plugin_data['data']['metrics'], "ru_minflt", with_snapshot)
value_diff = rusage.ru_majflt - self.previous_rusage.ru_majflt
self.apply_delta(value_diff, self.previous['data']['metrics'],
plugin_data['data']['metrics'], "ru_majflt", with_snapshot)
value_diff = rusage.ru_nswap - self.previous_rusage.ru_nswap
self.apply_delta(value_diff, self.previous['data']['metrics'],
plugin_data['data']['metrics'], "ru_nswap", with_snapshot)
value_diff = rusage.ru_inblock - self.previous_rusage.ru_inblock
self.apply_delta(value_diff, self.previous['data']['metrics'],
plugin_data['data']['metrics'], "ru_inblock", with_snapshot)
value_diff = rusage.ru_oublock - self.previous_rusage.ru_oublock
self.apply_delta(value_diff, self.previous['data']['metrics'],
plugin_data['data']['metrics'], "ru_oublock", with_snapshot)
value_diff = rusage.ru_msgsnd - self.previous_rusage.ru_msgsnd
self.apply_delta(value_diff, self.previous['data']['metrics'],
plugin_data['data']['metrics'], "ru_msgsnd", with_snapshot)
value_diff = rusage.ru_msgrcv - self.previous_rusage.ru_msgrcv
self.apply_delta(value_diff, self.previous['data']['metrics'],
plugin_data['data']['metrics'], "ru_msgrcv", with_snapshot)
value_diff = rusage.ru_nsignals - self.previous_rusage.ru_nsignals
self.apply_delta(value_diff, self.previous['data']['metrics'],
plugin_data['data']['metrics'], "ru_nsignals", with_snapshot)
value_diff = rusage.ru_nvcsw - self.previous_rusage.ru_nvcsw
self.apply_delta(value_diff, self.previous['data']['metrics'],
plugin_data['data']['metrics'], "ru_nvcsw", with_snapshot)
value_diff = rusage.ru_nivcsw - self.previous_rusage.ru_nivcsw
self.apply_delta(value_diff, self.previous['data']['metrics'],
plugin_data['data']['metrics'], "ru_nivcsw", with_snapshot)
except Exception:
logger.debug("_collect_runtime_metrics", exc_info=True)
finally:
self.previous_rusage = rusage | def _collect_runtime_metrics(self, plugin_data, with_snapshot):
if os.environ.get('INSTANA_DISABLE_METRICS_COLLECTION', False):
return
""" Collect up and return the runtime metrics """
try:
rusage = resource.getrusage(resource.RUSAGE_SELF)
if gc.isenabled():
self._collect_gc_metrics(plugin_data, with_snapshot)
self._collect_thread_metrics(plugin_data, with_snapshot)
value_diff = rusage.ru_utime - self.previous_rusage.ru_utime
self.apply_delta(value_diff, self.previous['data']['metrics'],
plugin_data['data']['metrics'], "ru_utime", with_snapshot)
value_diff = rusage.ru_stime - self.previous_rusage.ru_stime
self.apply_delta(value_diff, self.previous['data']['metrics'],
plugin_data['data']['metrics'], "ru_stime", with_snapshot)
self.apply_delta(rusage.ru_maxrss, self.previous['data']['metrics'],
plugin_data['data']['metrics'], "ru_maxrss", with_snapshot)
self.apply_delta(rusage.ru_ixrss, self.previous['data']['metrics'],
plugin_data['data']['metrics'], "ru_ixrss", with_snapshot)
self.apply_delta(rusage.ru_idrss, self.previous['data']['metrics'],
plugin_data['data']['metrics'], "ru_idrss", with_snapshot)
self.apply_delta(rusage.ru_isrss, self.previous['data']['metrics'],
plugin_data['data']['metrics'], "ru_isrss", with_snapshot)
value_diff = rusage.ru_minflt - self.previous_rusage.ru_minflt
self.apply_delta(value_diff, self.previous['data']['metrics'],
plugin_data['data']['metrics'], "ru_minflt", with_snapshot)
value_diff = rusage.ru_majflt - self.previous_rusage.ru_majflt
self.apply_delta(value_diff, self.previous['data']['metrics'],
plugin_data['data']['metrics'], "ru_majflt", with_snapshot)
value_diff = rusage.ru_nswap - self.previous_rusage.ru_nswap
self.apply_delta(value_diff, self.previous['data']['metrics'],
plugin_data['data']['metrics'], "ru_nswap", with_snapshot)
value_diff = rusage.ru_inblock - self.previous_rusage.ru_inblock
self.apply_delta(value_diff, self.previous['data']['metrics'],
plugin_data['data']['metrics'], "ru_inblock", with_snapshot)
value_diff = rusage.ru_oublock - self.previous_rusage.ru_oublock
self.apply_delta(value_diff, self.previous['data']['metrics'],
plugin_data['data']['metrics'], "ru_oublock", with_snapshot)
value_diff = rusage.ru_msgsnd - self.previous_rusage.ru_msgsnd
self.apply_delta(value_diff, self.previous['data']['metrics'],
plugin_data['data']['metrics'], "ru_msgsnd", with_snapshot)
value_diff = rusage.ru_msgrcv - self.previous_rusage.ru_msgrcv
self.apply_delta(value_diff, self.previous['data']['metrics'],
plugin_data['data']['metrics'], "ru_msgrcv", with_snapshot)
value_diff = rusage.ru_nsignals - self.previous_rusage.ru_nsignals
self.apply_delta(value_diff, self.previous['data']['metrics'],
plugin_data['data']['metrics'], "ru_nsignals", with_snapshot)
value_diff = rusage.ru_nvcsw - self.previous_rusage.ru_nvcsw
self.apply_delta(value_diff, self.previous['data']['metrics'],
plugin_data['data']['metrics'], "ru_nvcsw", with_snapshot)
value_diff = rusage.ru_nivcsw - self.previous_rusage.ru_nivcsw
self.apply_delta(value_diff, self.previous['data']['metrics'],
plugin_data['data']['metrics'], "ru_nivcsw", with_snapshot)
except Exception:
logger.debug("_collect_runtime_metrics", exc_info=True)
finally:
self.previous_rusage = rusage |
Python | def _collect_runtime_snapshot(self, plugin_data):
""" Gathers Python specific Snapshot information for this process """
snapshot_payload = {}
try:
snapshot_payload['name'] = determine_service_name()
snapshot_payload['version'] = sys.version
snapshot_payload['f'] = platform.python_implementation() # flavor
snapshot_payload['a'] = platform.architecture()[0] # architecture
snapshot_payload['versions'] = self.gather_python_packages()
snapshot_payload['iv'] = VERSION
if 'AUTOWRAPT_BOOTSTRAP' in os.environ:
snapshot_payload['m'] = 'Autowrapt'
elif 'INSTANA_MAGIC' in os.environ:
snapshot_payload['m'] = 'AutoTrace'
else:
snapshot_payload['m'] = 'Manual'
try:
from django.conf import settings # pylint: disable=import-outside-toplevel
if hasattr(settings, 'MIDDLEWARE') and settings.MIDDLEWARE is not None:
snapshot_payload['djmw'] = settings.MIDDLEWARE
elif hasattr(settings, 'MIDDLEWARE_CLASSES') and settings.MIDDLEWARE_CLASSES is not None:
snapshot_payload['djmw'] = settings.MIDDLEWARE_CLASSES
except Exception:
pass
except Exception:
logger.debug("collect_snapshot: ", exc_info=True)
plugin_data['data']['snapshot'] = snapshot_payload | def _collect_runtime_snapshot(self, plugin_data):
""" Gathers Python specific Snapshot information for this process """
snapshot_payload = {}
try:
snapshot_payload['name'] = determine_service_name()
snapshot_payload['version'] = sys.version
snapshot_payload['f'] = platform.python_implementation() # flavor
snapshot_payload['a'] = platform.architecture()[0] # architecture
snapshot_payload['versions'] = self.gather_python_packages()
snapshot_payload['iv'] = VERSION
if 'AUTOWRAPT_BOOTSTRAP' in os.environ:
snapshot_payload['m'] = 'Autowrapt'
elif 'INSTANA_MAGIC' in os.environ:
snapshot_payload['m'] = 'AutoTrace'
else:
snapshot_payload['m'] = 'Manual'
try:
from django.conf import settings # pylint: disable=import-outside-toplevel
if hasattr(settings, 'MIDDLEWARE') and settings.MIDDLEWARE is not None:
snapshot_payload['djmw'] = settings.MIDDLEWARE
elif hasattr(settings, 'MIDDLEWARE_CLASSES') and settings.MIDDLEWARE_CLASSES is not None:
snapshot_payload['djmw'] = settings.MIDDLEWARE_CLASSES
except Exception:
pass
except Exception:
logger.debug("collect_snapshot: ", exc_info=True)
plugin_data['data']['snapshot'] = snapshot_payload |
Python | def gather_python_packages(self):
""" Collect up the list of modules in use """
versions = dict()
try:
sys_packages = sys.modules.copy()
for pkg_name in sys_packages:
# Don't report submodules (e.g. django.x, django.y, django.z)
# Skip modules that begin with underscore
if ('.' in pkg_name) or pkg_name[0] == '_':
continue
# Skip builtins
if pkg_name in ["sys", "curses"]:
continue
if sys_packages[pkg_name]:
try:
pkg_info = sys_packages[pkg_name].__dict__
if "__version__" in pkg_info:
if isinstance(pkg_info["__version__"], str):
versions[pkg_name] = pkg_info["__version__"]
else:
versions[pkg_name] = self.jsonable(pkg_info["__version__"])
elif "version" in pkg_info:
versions[pkg_name] = self.jsonable(pkg_info["version"])
else:
versions[pkg_name] = get_distribution(pkg_name).version
except DistributionNotFound:
pass
except Exception:
logger.debug("gather_python_packages: could not process module: %s", pkg_name)
# Manually set our package version
versions['instana'] = VERSION
except Exception:
logger.debug("gather_python_packages", exc_info=True)
return versions | def gather_python_packages(self):
""" Collect up the list of modules in use """
versions = dict()
try:
sys_packages = sys.modules.copy()
for pkg_name in sys_packages:
# Don't report submodules (e.g. django.x, django.y, django.z)
# Skip modules that begin with underscore
if ('.' in pkg_name) or pkg_name[0] == '_':
continue
# Skip builtins
if pkg_name in ["sys", "curses"]:
continue
if sys_packages[pkg_name]:
try:
pkg_info = sys_packages[pkg_name].__dict__
if "__version__" in pkg_info:
if isinstance(pkg_info["__version__"], str):
versions[pkg_name] = pkg_info["__version__"]
else:
versions[pkg_name] = self.jsonable(pkg_info["__version__"])
elif "version" in pkg_info:
versions[pkg_name] = self.jsonable(pkg_info["version"])
else:
versions[pkg_name] = get_distribution(pkg_name).version
except DistributionNotFound:
pass
except Exception:
logger.debug("gather_python_packages: could not process module: %s", pkg_name)
# Manually set our package version
versions['instana'] = VERSION
except Exception:
logger.debug("gather_python_packages", exc_info=True)
return versions |
Python | def __get_project_instance_metadata(self):
"""
Get the latest data from the service revision instance entity metadata and store in the class
@return: Boolean
"""
try:
# Refetch the GCR snapshot data
self.__last_gcr_md_full_fetch = int(time())
headers = {"Metadata-Flavor": "Google"}
# Response from the last call to
# ${GOOGLE_CLOUD_RUN_METADATA_ENDPOINT}/computeMetadata/v1/project/?recursive=true
self.project_metadata = self._http_client.get(self._gcr_md_project_uri, timeout=1,
headers=headers).json()
# Response from the last call to
# ${GOOGLE_CLOUD_RUN_METADATA_ENDPOINT}/computeMetadata/v1/instance/?recursive=true
self.instance_metadata = self._http_client.get(self._gcr_md_instance_uri, timeout=1,
headers=headers).json()
except Exception:
logger.debug("GoogleCloudRunCollector.get_project_instance_metadata", exc_info=True) | def __get_project_instance_metadata(self):
"""
Get the latest data from the service revision instance entity metadata and store in the class
@return: Boolean
"""
try:
# Refetch the GCR snapshot data
self.__last_gcr_md_full_fetch = int(time())
headers = {"Metadata-Flavor": "Google"}
# Response from the last call to
# ${GOOGLE_CLOUD_RUN_METADATA_ENDPOINT}/computeMetadata/v1/project/?recursive=true
self.project_metadata = self._http_client.get(self._gcr_md_project_uri, timeout=1,
headers=headers).json()
# Response from the last call to
# ${GOOGLE_CLOUD_RUN_METADATA_ENDPOINT}/computeMetadata/v1/instance/?recursive=true
self.instance_metadata = self._http_client.get(self._gcr_md_instance_uri, timeout=1,
headers=headers).json()
except Exception:
logger.debug("GoogleCloudRunCollector.get_project_instance_metadata", exc_info=True) |
Python | def tearDown(self):
""" Reset all environment variables of consequence """
if "PORT" in os.environ:
os.environ.pop("PORT")
if "INSTANA_EXTRA_HTTP_HEADERS" in os.environ:
os.environ.pop("INSTANA_EXTRA_HTTP_HEADERS")
if "INSTANA_ENDPOINT_URL" in os.environ:
os.environ.pop("INSTANA_ENDPOINT_URL")
if "INSTANA_AGENT_KEY" in os.environ:
os.environ.pop("INSTANA_AGENT_KEY")
if "INSTANA_ZONE" in os.environ:
os.environ.pop("INSTANA_ZONE")
if "INSTANA_TAGS" in os.environ:
os.environ.pop("INSTANA_TAGS")
set_agent(self.original_agent)
set_tracer(self.original_tracer) | def tearDown(self):
""" Reset all environment variables of consequence """
if "PORT" in os.environ:
os.environ.pop("PORT")
if "INSTANA_EXTRA_HTTP_HEADERS" in os.environ:
os.environ.pop("INSTANA_EXTRA_HTTP_HEADERS")
if "INSTANA_ENDPOINT_URL" in os.environ:
os.environ.pop("INSTANA_ENDPOINT_URL")
if "INSTANA_AGENT_KEY" in os.environ:
os.environ.pop("INSTANA_AGENT_KEY")
if "INSTANA_ZONE" in os.environ:
os.environ.pop("INSTANA_ZONE")
if "INSTANA_TAGS" in os.environ:
os.environ.pop("INSTANA_TAGS")
set_agent(self.original_agent)
set_tracer(self.original_tracer) |
Python | def validate(self, traceparent):
"""
Method used to validate the traceparent header
:param traceparent: string
:return: traceparent or None
"""
try:
if self.TRACEPARENT_REGEX.match(traceparent):
return traceparent
except Exception:
logger.debug("traceparent does not follow version {} specification".format(self.SPECIFICATION_VERSION))
return None | def validate(self, traceparent):
"""
Method used to validate the traceparent header
:param traceparent: string
:return: traceparent or None
"""
try:
if self.TRACEPARENT_REGEX.match(traceparent):
return traceparent
except Exception:
logger.debug("traceparent does not follow version {} specification".format(self.SPECIFICATION_VERSION))
return None |
Python | def update_traceparent(self, traceparent, in_trace_id, in_span_id, level):
"""
This method updates the traceparent header or generates one if there was no traceparent incoming header or it
was invalid
:param traceparent: the original validated traceparent header
:param in_trace_id: instana trace id, used when there is no preexisting trace_id from the traceparent header
:param in_span_id: instana span id, used to update the parent id of the traceparent header
:param level: instana level, used to determine the value of sampled flag of the traceparent header
:return: the updated traceparent header
"""
mask = 1 << 0
trace_flags = 0
if traceparent is None: # modify the trace_id part only when it was not present at all
trace_id = in_trace_id.zfill(32)
version = self.SPECIFICATION_VERSION
else:
version, trace_id, _, trace_flags = self.get_traceparent_fields(traceparent)
trace_flags = int(trace_flags, 16)
parent_id = in_span_id.zfill(16)
trace_flags = (trace_flags & ~mask) | ((level << 0) & mask)
trace_flags = format(trace_flags, '0>2x')
traceparent = "{version}-{traceid}-{parentid}-{trace_flags}".format(version=version,
traceid=trace_id,
parentid=parent_id,
trace_flags=trace_flags)
return traceparent | def update_traceparent(self, traceparent, in_trace_id, in_span_id, level):
"""
This method updates the traceparent header or generates one if there was no traceparent incoming header or it
was invalid
:param traceparent: the original validated traceparent header
:param in_trace_id: instana trace id, used when there is no preexisting trace_id from the traceparent header
:param in_span_id: instana span id, used to update the parent id of the traceparent header
:param level: instana level, used to determine the value of sampled flag of the traceparent header
:return: the updated traceparent header
"""
mask = 1 << 0
trace_flags = 0
if traceparent is None: # modify the trace_id part only when it was not present at all
trace_id = in_trace_id.zfill(32)
version = self.SPECIFICATION_VERSION
else:
version, trace_id, _, trace_flags = self.get_traceparent_fields(traceparent)
trace_flags = int(trace_flags, 16)
parent_id = in_span_id.zfill(16)
trace_flags = (trace_flags & ~mask) | ((level << 0) & mask)
trace_flags = format(trace_flags, '0>2x')
traceparent = "{version}-{traceid}-{parentid}-{trace_flags}".format(version=version,
traceid=trace_id,
parentid=parent_id,
trace_flags=trace_flags)
return traceparent |
Python | def report_data_payload(self, payload):
"""
Used to report metrics and span data to the endpoint URL in self.options.endpoint_url
"""
response = None
try:
if self.report_headers is None:
# Prepare request headers
self.report_headers = dict()
self.report_headers["Content-Type"] = "application/json"
self.report_headers["X-Instana-Host"] = self.collector.get_fq_arn()
self.report_headers["X-Instana-Key"] = self.options.agent_key
self.report_headers["X-Instana-Time"] = str(round(time.time() * 1000))
response = self.client.post(self.__data_bundle_url(),
data=to_json(payload),
headers=self.report_headers,
timeout=self.options.timeout,
verify=self.options.ssl_verify,
proxies=self.options.endpoint_proxy)
if not 200 <= response.status_code < 300:
logger.info("report_data_payload: Instana responded with status code %s", response.status_code)
except Exception as exc:
logger.debug("report_data_payload: connection error (%s)", type(exc))
return response | def report_data_payload(self, payload):
"""
Used to report metrics and span data to the endpoint URL in self.options.endpoint_url
"""
response = None
try:
if self.report_headers is None:
# Prepare request headers
self.report_headers = dict()
self.report_headers["Content-Type"] = "application/json"
self.report_headers["X-Instana-Host"] = self.collector.get_fq_arn()
self.report_headers["X-Instana-Key"] = self.options.agent_key
self.report_headers["X-Instana-Time"] = str(round(time.time() * 1000))
response = self.client.post(self.__data_bundle_url(),
data=to_json(payload),
headers=self.report_headers,
timeout=self.options.timeout,
verify=self.options.ssl_verify,
proxies=self.options.endpoint_proxy)
if not 200 <= response.status_code < 300:
logger.info("report_data_payload: Instana responded with status code %s", response.status_code)
except Exception as exc:
logger.debug("report_data_payload: connection error (%s)", type(exc))
return response |
Python | def __data_bundle_url(self):
"""
URL for posting metrics to the host agent. Only valid when announced.
"""
return "%s/bundle" % self.options.endpoint_url | def __data_bundle_url(self):
"""
URL for posting metrics to the host agent. Only valid when announced.
"""
return "%s/bundle" % self.options.endpoint_url |
Python | def generate_id():
""" Generate a 64bit base 16 ID for use as a Span or Trace ID """
global _current_pid
pid = os.getpid()
if _current_pid != pid:
_current_pid = pid
_rnd.seed(int(1000000 * time.time()) ^ pid)
new_id = format(_rnd.randint(0, 18446744073709551615), '02x')
if len(new_id) < 16:
new_id = new_id.zfill(16)
return new_id | def generate_id():
""" Generate a 64bit base 16 ID for use as a Span or Trace ID """
global _current_pid
pid = os.getpid()
if _current_pid != pid:
_current_pid = pid
_rnd.seed(int(1000000 * time.time()) ^ pid)
new_id = format(_rnd.randint(0, 18446744073709551615), '02x')
if len(new_id) < 16:
new_id = new_id.zfill(16)
return new_id |
Python | def header_to_long_id(header):
"""
We can receive headers in the following formats:
1. unsigned base 16 hex string (or bytes) of variable length
2. [eventual]
:param header: the header to analyze, validate and convert (if needed)
:return: a valid ID to be used internal to the tracer
"""
if PY3 is True and isinstance(header, bytes):
header = header.decode('utf-8')
if not isinstance(header, string_types):
return BAD_ID
try:
# Test that header is truly a hexadecimal value before we try to convert
int(header, 16)
length = len(header)
if length < 16:
# Left pad ID with zeros
header = header.zfill(16)
return header
except ValueError:
return BAD_ID | def header_to_long_id(header):
"""
We can receive headers in the following formats:
1. unsigned base 16 hex string (or bytes) of variable length
2. [eventual]
:param header: the header to analyze, validate and convert (if needed)
:return: a valid ID to be used internal to the tracer
"""
if PY3 is True and isinstance(header, bytes):
header = header.decode('utf-8')
if not isinstance(header, string_types):
return BAD_ID
try:
# Test that header is truly a hexadecimal value before we try to convert
int(header, 16)
length = len(header)
if length < 16:
# Left pad ID with zeros
header = header.zfill(16)
return header
except ValueError:
return BAD_ID |
Python | def header_to_id(header):
"""
We can receive headers in the following formats:
1. unsigned base 16 hex string (or bytes) of variable length
2. [eventual]
:param header: the header to analyze, validate and convert (if needed)
:return: a valid ID to be used internal to the tracer
"""
if PY3 is True and isinstance(header, bytes):
header = header.decode('utf-8')
if not isinstance(header, string_types):
return BAD_ID
try:
# Test that header is truly a hexadecimal value before we try to convert
int(header, 16)
length = len(header)
if length < 16:
# Left pad ID with zeros
header = header.zfill(16)
elif length > 16:
# Phase 0: Discard everything but the last 16byte
header = header[-16:]
return header
except ValueError:
return BAD_ID | def header_to_id(header):
"""
We can receive headers in the following formats:
1. unsigned base 16 hex string (or bytes) of variable length
2. [eventual]
:param header: the header to analyze, validate and convert (if needed)
:return: a valid ID to be used internal to the tracer
"""
if PY3 is True and isinstance(header, bytes):
header = header.decode('utf-8')
if not isinstance(header, string_types):
return BAD_ID
try:
# Test that header is truly a hexadecimal value before we try to convert
int(header, 16)
length = len(header)
if length < 16:
# Left pad ID with zeros
header = header.zfill(16)
elif length > 16:
# Phase 0: Discard everything but the last 16byte
header = header[-16:]
return header
except ValueError:
return BAD_ID |
Python | def report_data_payload(self, payload):
"""
Used to report metrics and span data to the endpoint URL in self.options.endpoint_url
"""
response = None
try:
if self.report_headers is None:
# Prepare request headers
self.report_headers = dict()
self.report_headers["Content-Type"] = "application/json"
self.report_headers["X-Instana-Host"] = self.collector.get_fq_arn()
self.report_headers["X-Instana-Key"] = self.options.agent_key
self.report_headers["X-Instana-Time"] = str(round(time.time() * 1000))
response = self.client.post(self.__data_bundle_url(),
data=to_json(payload),
headers=self.report_headers,
timeout=self.options.timeout,
verify=self.options.ssl_verify,
proxies=self.options.endpoint_proxy)
if 200 <= response.status_code < 300:
logger.debug("report_data_payload: Instana responded with status code %s", response.status_code)
else:
logger.info("report_data_payload: Instana responded with status code %s", response.status_code)
except Exception as exc:
logger.debug("report_data_payload: connection error (%s)", type(exc))
return response | def report_data_payload(self, payload):
"""
Used to report metrics and span data to the endpoint URL in self.options.endpoint_url
"""
response = None
try:
if self.report_headers is None:
# Prepare request headers
self.report_headers = dict()
self.report_headers["Content-Type"] = "application/json"
self.report_headers["X-Instana-Host"] = self.collector.get_fq_arn()
self.report_headers["X-Instana-Key"] = self.options.agent_key
self.report_headers["X-Instana-Time"] = str(round(time.time() * 1000))
response = self.client.post(self.__data_bundle_url(),
data=to_json(payload),
headers=self.report_headers,
timeout=self.options.timeout,
verify=self.options.ssl_verify,
proxies=self.options.endpoint_proxy)
if 200 <= response.status_code < 300:
logger.debug("report_data_payload: Instana responded with status code %s", response.status_code)
else:
logger.info("report_data_payload: Instana responded with status code %s", response.status_code)
except Exception as exc:
logger.debug("report_data_payload: connection error (%s)", type(exc))
return response |
Python | def collect_metrics(self, **kwargs):
"""
Collect and return metrics (and optionally snapshot data) for every container in this task
@return: list - with one or more plugin entities
"""
plugins = []
try:
if self.collector.task_metadata is not None:
containers = self.collector.task_metadata.get("Containers", [])
for container in containers:
plugin_data = dict()
plugin_data["name"] = "com.instana.plugin.aws.ecs.container"
try:
labels = container.get("Labels", {})
name = container.get("Name", "")
task_arn = labels.get("com.amazonaws.ecs.task-arn", "")
plugin_data["entityId"] = "%s::%s" % (task_arn, name)
plugin_data["data"] = DictionaryOfStan()
if self.collector.root_metadata["Name"] == name:
plugin_data["data"]["instrumented"] = True
plugin_data["data"]["dockerId"] = container.get("DockerId", None)
plugin_data["data"]["taskArn"] = labels.get("com.amazonaws.ecs.task-arn", None)
if kwargs.get("with_snapshot"):
plugin_data["data"]["runtime"] = "python"
plugin_data["data"]["dockerName"] = container.get("DockerName", None)
plugin_data["data"]["containerName"] = container.get("Name", None)
plugin_data["data"]["image"] = container.get("Image", None)
plugin_data["data"]["imageId"] = container.get("ImageID", None)
plugin_data["data"]["taskDefinition"] = labels.get("com.amazonaws.ecs.task-definition-family", None)
plugin_data["data"]["taskDefinitionVersion"] = labels.get("com.amazonaws.ecs.task-definition-version", None)
plugin_data["data"]["clusterArn"] = labels.get("com.amazonaws.ecs.cluster", None)
plugin_data["data"]["desiredStatus"] = container.get("DesiredStatus", None)
plugin_data["data"]["knownStatus"] = container.get("KnownStatus", None)
plugin_data["data"]["ports"] = container.get("Ports", None)
plugin_data["data"]["createdAt"] = container.get("CreatedAt", None)
plugin_data["data"]["startedAt"] = container.get("StartedAt", None)
plugin_data["data"]["type"] = container.get("Type", None)
limits = container.get("Limits", {})
plugin_data["data"]["limits"]["cpu"] = limits.get("CPU", None)
plugin_data["data"]["limits"]["memory"] = limits.get("Memory", None)
except Exception:
logger.debug("_collect_container_snapshots: ", exc_info=True)
finally:
plugins.append(plugin_data)
except Exception:
logger.debug("collect_container_metrics: ", exc_info=True)
return plugins | def collect_metrics(self, **kwargs):
"""
Collect and return metrics (and optionally snapshot data) for every container in this task
@return: list - with one or more plugin entities
"""
plugins = []
try:
if self.collector.task_metadata is not None:
containers = self.collector.task_metadata.get("Containers", [])
for container in containers:
plugin_data = dict()
plugin_data["name"] = "com.instana.plugin.aws.ecs.container"
try:
labels = container.get("Labels", {})
name = container.get("Name", "")
task_arn = labels.get("com.amazonaws.ecs.task-arn", "")
plugin_data["entityId"] = "%s::%s" % (task_arn, name)
plugin_data["data"] = DictionaryOfStan()
if self.collector.root_metadata["Name"] == name:
plugin_data["data"]["instrumented"] = True
plugin_data["data"]["dockerId"] = container.get("DockerId", None)
plugin_data["data"]["taskArn"] = labels.get("com.amazonaws.ecs.task-arn", None)
if kwargs.get("with_snapshot"):
plugin_data["data"]["runtime"] = "python"
plugin_data["data"]["dockerName"] = container.get("DockerName", None)
plugin_data["data"]["containerName"] = container.get("Name", None)
plugin_data["data"]["image"] = container.get("Image", None)
plugin_data["data"]["imageId"] = container.get("ImageID", None)
plugin_data["data"]["taskDefinition"] = labels.get("com.amazonaws.ecs.task-definition-family", None)
plugin_data["data"]["taskDefinitionVersion"] = labels.get("com.amazonaws.ecs.task-definition-version", None)
plugin_data["data"]["clusterArn"] = labels.get("com.amazonaws.ecs.cluster", None)
plugin_data["data"]["desiredStatus"] = container.get("DesiredStatus", None)
plugin_data["data"]["knownStatus"] = container.get("KnownStatus", None)
plugin_data["data"]["ports"] = container.get("Ports", None)
plugin_data["data"]["createdAt"] = container.get("CreatedAt", None)
plugin_data["data"]["startedAt"] = container.get("StartedAt", None)
plugin_data["data"]["type"] = container.get("Type", None)
limits = container.get("Limits", {})
plugin_data["data"]["limits"]["cpu"] = limits.get("CPU", None)
plugin_data["data"]["limits"]["memory"] = limits.get("Memory", None)
except Exception:
logger.debug("_collect_container_snapshots: ", exc_info=True)
finally:
plugins.append(plugin_data)
except Exception:
logger.debug("collect_container_metrics: ", exc_info=True)
return plugins |
Python | def client_update(model, dataset, server_weights, client_optimizer):
"""The most important function, It's the training of each client."""
# Initialize client weights with server weights.
client_weights = model.weights.trainable
tf.nest.map_structure(lambda x, y: x.assign(y),
client_weights, server_weights)
# For each batch in the dataset, compute the gradients using the client optimizer
for batch in dataset:
with tf.GradientTape() as tape:
outputs = model.forward_pass(batch)
grads = tape.gradient(outputs.loss, client_weights)
grads_and_weights = zip(grads, client_weights)
client_optimizer.apply_gradients(grads_and_weights)
return client_weights | def client_update(model, dataset, server_weights, client_optimizer):
"""The most important function, It's the training of each client."""
# Initialize client weights with server weights.
client_weights = model.weights.trainable
tf.nest.map_structure(lambda x, y: x.assign(y),
client_weights, server_weights)
# For each batch in the dataset, compute the gradients using the client optimizer
for batch in dataset:
with tf.GradientTape() as tape:
outputs = model.forward_pass(batch)
grads = tape.gradient(outputs.loss, client_weights)
grads_and_weights = zip(grads, client_weights)
client_optimizer.apply_gradients(grads_and_weights)
return client_weights |
Python | def server_update(model, mean_client_weights):
"""Updates the server weights with an average of the client wegiths calculated by each client"""
# Get the model weights
model_weights = model.weights.trainable
# Assign the mean of the clients weights to the server model weights
tf.nest.map_structure(lambda x, y: x.assign(y),
model_weights, mean_client_weights)
return model_weights | def server_update(model, mean_client_weights):
"""Updates the server weights with an average of the client wegiths calculated by each client"""
# Get the model weights
model_weights = model.weights.trainable
# Assign the mean of the clients weights to the server model weights
tf.nest.map_structure(lambda x, y: x.assign(y),
model_weights, mean_client_weights)
return model_weights |
Python | def model_fn():
"""Creates the Keras model with a loss function, accuray as metric and the specification of the input data"""
keras_model = create_keras_model()
return tff.learning.from_keras_model(
keras_model,
input_spec=federated_training_data[0].element_spec,
loss=tf.keras.losses.SparseCategoricalCrossentropy(),
metrics=[tf.keras.metrics.SparseCategoricalAccuracy()]
) | def model_fn():
"""Creates the Keras model with a loss function, accuray as metric and the specification of the input data"""
keras_model = create_keras_model()
return tff.learning.from_keras_model(
keras_model,
input_spec=federated_training_data[0].element_spec,
loss=tf.keras.losses.SparseCategoricalCrossentropy(),
metrics=[tf.keras.metrics.SparseCategoricalAccuracy()]
) |
Python | def aws_credentials(request: pytest.fixture, aws_utils: pytest.fixture, profile_name: str):
"""
Fixture for setting up temporary AWS credentials from assume role.
:param request: _pytest.fixtures.SubRequest class that handles getting
a pytest fixture from a pytest function/fixture.
:param aws_utils: aws_utils fixture.
:param profile_name: Named AWS profile to store temporary credentials.
"""
aws_credentials_obj = AwsCredentials(profile_name)
original_access_key, original_secret_access_key, original_token = aws_credentials_obj.get_aws_credentials()
aws_credentials_obj.set_aws_credentials_by_session(aws_utils.assume_session())
def teardown():
# Reset to the named profile using the original AWS credentials
aws_credentials_obj.set_aws_credentials(original_access_key, original_secret_access_key, original_token)
request.addfinalizer(teardown)
return aws_credentials_obj | def aws_credentials(request: pytest.fixture, aws_utils: pytest.fixture, profile_name: str):
"""
Fixture for setting up temporary AWS credentials from assume role.
:param request: _pytest.fixtures.SubRequest class that handles getting
a pytest fixture from a pytest function/fixture.
:param aws_utils: aws_utils fixture.
:param profile_name: Named AWS profile to store temporary credentials.
"""
aws_credentials_obj = AwsCredentials(profile_name)
original_access_key, original_secret_access_key, original_token = aws_credentials_obj.get_aws_credentials()
aws_credentials_obj.set_aws_credentials_by_session(aws_utils.assume_session())
def teardown():
# Reset to the named profile using the original AWS credentials
aws_credentials_obj.set_aws_credentials(original_access_key, original_secret_access_key, original_token)
request.addfinalizer(teardown)
return aws_credentials_obj |
Python | def extract_names(filename):
"""
Given a file name for baby.html, returns a list starting with the year string
followed by the name-rank strings in alphabetical order.
['2006', 'Aaliyah 91', Aaron 57', 'Abagail 895', ' ...]
"""
#open file and read it onto a string
names=[]
f=open(filename,'r')
file_contents=f.read()
#find year Popularity in 2006
year_match = re.search(r'Popularity\sin\s(\d\d\d\d)', file_contents)
if not year_match:
# We didn't find a year, so we'll exit with an error message.
sys.stderr.write('Couldn\'t find the year!\n')
sys.exit(1)
year = year_match.group(1)
names.append(year)
#solution # 1
#the following will create a list of tuples, with each tuple as (rank, name1, name2)
male_list = re.findall(r'td>(\d*\d)</td><td>(\w+)</td><td>\w+</td>', file_contents)
female_list = re.findall(r'td>(\d*\d)</td><td>\w+</td><td>(\w+)</td>', file_contents)
alist=male_list+female_list
# sort method 1
# alist.sort(key=lambda atuple:atuple[1])
blist=[x[1]+ ' '+ x[0] for x in alist]
# sort method 2
blist.sort(key=lambda item:item.split()[0])
# insert the year item
names=names+blist
print(names)
print(len(names))
print('end of solution1')
#solution # 2 - More efficient
names = []
names.append(year)
tuples = re.findall(r'<td>(\d+)</td><td>(\w+)</td>\<td>(\w+)</td>', file_contents)
names_to_rank = {}
for rank, boyname, girlname in tuples:
if boyname not in names_to_rank:
names_to_rank[boyname] = rank
if girlname not in names_to_rank:
names_to_rank[girlname] = rank
sorted_names = sorted(names_to_rank.keys())
for name in sorted_names:
names.append(name + " " + names_to_rank[name])
print(names)
print('end of solution2') | def extract_names(filename):
"""
Given a file name for baby.html, returns a list starting with the year string
followed by the name-rank strings in alphabetical order.
['2006', 'Aaliyah 91', Aaron 57', 'Abagail 895', ' ...]
"""
#open file and read it onto a string
names=[]
f=open(filename,'r')
file_contents=f.read()
#find year Popularity in 2006
year_match = re.search(r'Popularity\sin\s(\d\d\d\d)', file_contents)
if not year_match:
# We didn't find a year, so we'll exit with an error message.
sys.stderr.write('Couldn\'t find the year!\n')
sys.exit(1)
year = year_match.group(1)
names.append(year)
#solution # 1
#the following will create a list of tuples, with each tuple as (rank, name1, name2)
male_list = re.findall(r'td>(\d*\d)</td><td>(\w+)</td><td>\w+</td>', file_contents)
female_list = re.findall(r'td>(\d*\d)</td><td>\w+</td><td>(\w+)</td>', file_contents)
alist=male_list+female_list
# sort method 1
# alist.sort(key=lambda atuple:atuple[1])
blist=[x[1]+ ' '+ x[0] for x in alist]
# sort method 2
blist.sort(key=lambda item:item.split()[0])
# insert the year item
names=names+blist
print(names)
print(len(names))
print('end of solution1')
#solution # 2 - More efficient
names = []
names.append(year)
tuples = re.findall(r'<td>(\d+)</td><td>(\w+)</td>\<td>(\w+)</td>', file_contents)
names_to_rank = {}
for rank, boyname, girlname in tuples:
if boyname not in names_to_rank:
names_to_rank[boyname] = rank
if girlname not in names_to_rank:
names_to_rank[girlname] = rank
sorted_names = sorted(names_to_rank.keys())
for name in sorted_names:
names.append(name + " " + names_to_rank[name])
print(names)
print('end of solution2') |
Python | async def change_slots(ctx, new_slots:int):
"""
Updates the amount of inventory slots you have. If you haven't run this command before, this command makes your character.
"""
try:
if not is_added_user(ctx.author):
add_user(ctx.author, new_slots)
else:
change_user_slots(ctx.author, new_slots)
await ctx.send(f"{ctx.author} now has {new_slots} slots.")
except Exception as e:
print(e.with_traceback(None))
await ctx.send(f"You need to write the command as {bot.command_prefix}slots <new amount of slots>") | async def change_slots(ctx, new_slots:int):
"""
Updates the amount of inventory slots you have. If you haven't run this command before, this command makes your character.
"""
try:
if not is_added_user(ctx.author):
add_user(ctx.author, new_slots)
else:
change_user_slots(ctx.author, new_slots)
await ctx.send(f"{ctx.author} now has {new_slots} slots.")
except Exception as e:
print(e.with_traceback(None))
await ctx.send(f"You need to write the command as {bot.command_prefix}slots <new amount of slots>") |
Python | async def add_item_to_item_list(ctx, item_amount, item_name, bulk_per_item, worth_per_item):
"""
Adds new item to global item list.
"""
try:
global item_dict
item_amount = int(item_amount)
bulk_per_item = float(bulk_per_item)
item_name = item_name.lower()
if item_name in item_dict.keys():
await ctx.send(f"{item_name} has already been added.")
else:
new_obj = Object(name=item_name,bulk=bulk_per_item,price=worth_per_item)
item_dict[item_name] = new_obj
save_item_list(new_obj)
await ctx.send(f"Added {obj_name} to item list.")
except Exception as e:
print(e)
await ctx.send(f"You need to write the command as {bot.command_prefix}add <item amount> <item's name> <bulk per item> <price per item>") | async def add_item_to_item_list(ctx, item_amount, item_name, bulk_per_item, worth_per_item):
"""
Adds new item to global item list.
"""
try:
global item_dict
item_amount = int(item_amount)
bulk_per_item = float(bulk_per_item)
item_name = item_name.lower()
if item_name in item_dict.keys():
await ctx.send(f"{item_name} has already been added.")
else:
new_obj = Object(name=item_name,bulk=bulk_per_item,price=worth_per_item)
item_dict[item_name] = new_obj
save_item_list(new_obj)
await ctx.send(f"Added {obj_name} to item list.")
except Exception as e:
print(e)
await ctx.send(f"You need to write the command as {bot.command_prefix}add <item amount> <item's name> <bulk per item> <price per item>") |
Python | async def take_item(ctx, item_amount, item_name):
"""
Removes <item amount> times of <item name> from your inventory.
"""
try:
item_amount = int(item_amount)
if not is_added_user(ctx.author):
await ctx.send(f"Please use the {bot.command_prefix}slots first.")
else:
await ctx.send(take_user_item(ctx.author, item_name, item_amount))
except Exception as e:
print(e)
await ctx.send(f"You need to write the command as {bot.command_prefix}take <item amount> <item name>") | async def take_item(ctx, item_amount, item_name):
"""
Removes <item amount> times of <item name> from your inventory.
"""
try:
item_amount = int(item_amount)
if not is_added_user(ctx.author):
await ctx.send(f"Please use the {bot.command_prefix}slots first.")
else:
await ctx.send(take_user_item(ctx.author, item_name, item_amount))
except Exception as e:
print(e)
await ctx.send(f"You need to write the command as {bot.command_prefix}take <item amount> <item name>") |
Python | async def give_item(ctx, item_amount, item_name):
"""
Adds <item amount> times of <item name> to your inventory.
"""
try:
item_amount = int(item_amount)
if not is_added_user(ctx.author):
await ctx.send(f"Please use the {bot.command_prefix}slots first.")
else:
await ctx.send(give_user_object(ctx.author, item_name, item_amount))
except Exception as e:
print(e)
await ctx.send(f"You need to write the command as {bot.command_prefix}add <item amount> <item name>") | async def give_item(ctx, item_amount, item_name):
"""
Adds <item amount> times of <item name> to your inventory.
"""
try:
item_amount = int(item_amount)
if not is_added_user(ctx.author):
await ctx.send(f"Please use the {bot.command_prefix}slots first.")
else:
await ctx.send(give_user_object(ctx.author, item_name, item_amount))
except Exception as e:
print(e)
await ctx.send(f"You need to write the command as {bot.command_prefix}add <item amount> <item name>") |
Python | async def search_for_item(ctx, query):
"""
Searches for item in item list for <query>.
"""
def levenshtein(a, b):
if not a: return len(b)
if not b: return len(a)
return min(levenshtein(a[1:], b[1:])+(a[0] != b[0]),
levenshtein(a[1:], b)+1,
levenshtein(a, b[1:])+1)
try:
search_distance = lambda x : levenshtein(x.lower(),query.lower())
search_results = sorted(item_dict.keys(),key=search_distance)[:5]
await ctx.send('\n'.join(search_results))
except Exception as e:
print(e)
await ctx.send(f"You need to write the command as {bot.command_prefix}search <search term>") | async def search_for_item(ctx, query):
"""
Searches for item in item list for <query>.
"""
def levenshtein(a, b):
if not a: return len(b)
if not b: return len(a)
return min(levenshtein(a[1:], b[1:])+(a[0] != b[0]),
levenshtein(a[1:], b)+1,
levenshtein(a, b[1:])+1)
try:
search_distance = lambda x : levenshtein(x.lower(),query.lower())
search_results = sorted(item_dict.keys(),key=search_distance)[:5]
await ctx.send('\n'.join(search_results))
except Exception as e:
print(e)
await ctx.send(f"You need to write the command as {bot.command_prefix}search <search term>") |
Python | def should_build(
req, # type: InstallRequirement
need_wheel, # type: bool
check_binary_allowed, # type: BinaryAllowedPredicate
):
# type: (...) -> Optional[bool]
"""Return whether an InstallRequirement should be built into a wheel."""
if req.constraint:
# never build requirements that are merely constraints
return False
if req.is_wheel:
if need_wheel:
logger.info(
'Skipping %s, due to already being wheel.', req.name,
)
return False
if need_wheel:
# i.e. pip wheel, not pip install
return True
if req.editable or not req.source_dir:
return False
if not check_binary_allowed(req):
logger.info(
"Skipping wheel build for %s, due to binaries "
"being disabled for it.", req.name,
)
return False
return True | def should_build(
req, # type: InstallRequirement
need_wheel, # type: bool
check_binary_allowed, # type: BinaryAllowedPredicate
):
# type: (...) -> Optional[bool]
"""Return whether an InstallRequirement should be built into a wheel."""
if req.constraint:
# never build requirements that are merely constraints
return False
if req.is_wheel:
if need_wheel:
logger.info(
'Skipping %s, due to already being wheel.', req.name,
)
return False
if need_wheel:
# i.e. pip wheel, not pip install
return True
if req.editable or not req.source_dir:
return False
if not check_binary_allowed(req):
logger.info(
"Skipping wheel build for %s, due to binaries "
"being disabled for it.", req.name,
)
return False
return True |
Python | def should_cache(
req, # type: InstallRequirement
check_binary_allowed, # type: BinaryAllowedPredicate
):
# type: (...) -> Optional[bool]
"""
Return whether a built InstallRequirement can be stored in the persistent
wheel cache, assuming the wheel cache is available, and should_build()
has determined a wheel needs to be built.
"""
if not should_build(
req, need_wheel=False, check_binary_allowed=check_binary_allowed
):
# never cache if pip install (need_wheel=False) would not have built
# (editable mode, etc)
return False
if req.link and req.link.is_vcs:
# VCS checkout. Build wheel just for this run
# unless it points to an immutable commit hash in which
# case it can be cached.
assert not req.editable
assert req.source_dir
vcs_backend = vcs.get_backend_for_scheme(req.link.scheme)
assert vcs_backend
if vcs_backend.is_immutable_rev_checkout(req.link.url, req.source_dir):
return True
return False
link = req.link
base, ext = link.splitext()
if _contains_egg_info(base):
return True
# Otherwise, build the wheel just for this run using the ephemeral
# cache since we are either in the case of e.g. a local directory, or
# no cache directory is available to use.
return False | def should_cache(
req, # type: InstallRequirement
check_binary_allowed, # type: BinaryAllowedPredicate
):
# type: (...) -> Optional[bool]
"""
Return whether a built InstallRequirement can be stored in the persistent
wheel cache, assuming the wheel cache is available, and should_build()
has determined a wheel needs to be built.
"""
if not should_build(
req, need_wheel=False, check_binary_allowed=check_binary_allowed
):
# never cache if pip install (need_wheel=False) would not have built
# (editable mode, etc)
return False
if req.link and req.link.is_vcs:
# VCS checkout. Build wheel just for this run
# unless it points to an immutable commit hash in which
# case it can be cached.
assert not req.editable
assert req.source_dir
vcs_backend = vcs.get_backend_for_scheme(req.link.scheme)
assert vcs_backend
if vcs_backend.is_immutable_rev_checkout(req.link.url, req.source_dir):
return True
return False
link = req.link
base, ext = link.splitext()
if _contains_egg_info(base):
return True
# Otherwise, build the wheel just for this run using the ephemeral
# cache since we are either in the case of e.g. a local directory, or
# no cache directory is available to use.
return False |
Python | def format_command_result(
command_args, # type: List[str]
command_output, # type: Text
):
# type: (...) -> str
"""Format command information for logging."""
command_desc = format_command_args(command_args)
text = 'Command arguments: {}\n'.format(command_desc)
if not command_output:
text += 'Command output: None'
elif logger.getEffectiveLevel() > logging.DEBUG:
text += 'Command output: [use --verbose to show]'
else:
if not command_output.endswith('\n'):
command_output += '\n'
text += 'Command output:\n{}{}'.format(command_output, LOG_DIVIDER)
return text | def format_command_result(
command_args, # type: List[str]
command_output, # type: Text
):
# type: (...) -> str
"""Format command information for logging."""
command_desc = format_command_args(command_args)
text = 'Command arguments: {}\n'.format(command_desc)
if not command_output:
text += 'Command output: None'
elif logger.getEffectiveLevel() > logging.DEBUG:
text += 'Command output: [use --verbose to show]'
else:
if not command_output.endswith('\n'):
command_output += '\n'
text += 'Command output:\n{}{}'.format(command_output, LOG_DIVIDER)
return text |
Python | def _build_wheel_legacy(
name, # type: str
setup_py_path, # type: str
source_dir, # type: str
global_options, # type: List[str]
build_options, # type: List[str]
tempd, # type: str
):
# type: (...) -> Optional[str]
"""Build one unpacked package using the "legacy" build process.
Returns path to wheel if successfully built. Otherwise, returns None.
"""
wheel_args = make_setuptools_bdist_wheel_args(
setup_py_path,
global_options=global_options,
build_options=build_options,
destination_dir=tempd,
)
spin_message = 'Building wheel for %s (setup.py)' % (name,)
with open_spinner(spin_message) as spinner:
logger.debug('Destination directory: %s', tempd)
try:
output = call_subprocess(
wheel_args,
cwd=source_dir,
spinner=spinner,
)
except Exception:
spinner.finish("error")
logger.error('Failed building wheel for %s', name)
return None
names = os.listdir(tempd)
wheel_path = get_legacy_build_wheel_path(
names=names,
temp_dir=tempd,
name=name,
command_args=wheel_args,
command_output=output,
)
return wheel_path | def _build_wheel_legacy(
name, # type: str
setup_py_path, # type: str
source_dir, # type: str
global_options, # type: List[str]
build_options, # type: List[str]
tempd, # type: str
):
# type: (...) -> Optional[str]
"""Build one unpacked package using the "legacy" build process.
Returns path to wheel if successfully built. Otherwise, returns None.
"""
wheel_args = make_setuptools_bdist_wheel_args(
setup_py_path,
global_options=global_options,
build_options=build_options,
destination_dir=tempd,
)
spin_message = 'Building wheel for %s (setup.py)' % (name,)
with open_spinner(spin_message) as spinner:
logger.debug('Destination directory: %s', tempd)
try:
output = call_subprocess(
wheel_args,
cwd=source_dir,
spinner=spinner,
)
except Exception:
spinner.finish("error")
logger.error('Failed building wheel for %s', name)
return None
names = os.listdir(tempd)
wheel_path = get_legacy_build_wheel_path(
names=names,
temp_dir=tempd,
name=name,
command_args=wheel_args,
command_output=output,
)
return wheel_path |
Python | def _build_wheel_pep517(
name, # type: str
backend, # type: Pep517HookCaller
metadata_directory, # type: str
build_options, # type: List[str]
tempd, # type: str
):
# type: (...) -> Optional[str]
"""Build one InstallRequirement using the PEP 517 build process.
Returns path to wheel if successfully built. Otherwise, returns None.
"""
assert metadata_directory is not None
if build_options:
# PEP 517 does not support --build-options
logger.error('Cannot build wheel for %s using PEP 517 when '
'--build-option is present' % (name,))
return None
try:
logger.debug('Destination directory: %s', tempd)
runner = runner_with_spinner_message(
'Building wheel for {} (PEP 517)'.format(name)
)
with backend.subprocess_runner(runner):
wheel_name = backend.build_wheel(
tempd,
metadata_directory=metadata_directory,
)
except Exception:
logger.error('Failed building wheel for %s', name)
return None
return os.path.join(tempd, wheel_name) | def _build_wheel_pep517(
name, # type: str
backend, # type: Pep517HookCaller
metadata_directory, # type: str
build_options, # type: List[str]
tempd, # type: str
):
# type: (...) -> Optional[str]
"""Build one InstallRequirement using the PEP 517 build process.
Returns path to wheel if successfully built. Otherwise, returns None.
"""
assert metadata_directory is not None
if build_options:
# PEP 517 does not support --build-options
logger.error('Cannot build wheel for %s using PEP 517 when '
'--build-option is present' % (name,))
return None
try:
logger.debug('Destination directory: %s', tempd)
runner = runner_with_spinner_message(
'Building wheel for {} (PEP 517)'.format(name)
)
with backend.subprocess_runner(runner):
wheel_name = backend.build_wheel(
tempd,
metadata_directory=metadata_directory,
)
except Exception:
logger.error('Failed building wheel for %s', name)
return None
return os.path.join(tempd, wheel_name) |
Python | def _collect_buildset(
requirements, # type: Iterable[InstallRequirement]
wheel_cache, # type: WheelCache
check_binary_allowed, # type: BinaryAllowedPredicate
need_wheel, # type: bool
):
# type: (...) -> List[Tuple[InstallRequirement, str]]
"""Return the list of InstallRequirement that need to be built,
with the persistent or temporary cache directory where the built
wheel needs to be stored.
"""
buildset = []
cache_available = bool(wheel_cache.cache_dir)
for req in requirements:
if not should_build(
req,
need_wheel=need_wheel,
check_binary_allowed=check_binary_allowed,
):
continue
if (
cache_available and
should_cache(req, check_binary_allowed)
):
cache_dir = wheel_cache.get_path_for_link(req.link)
else:
cache_dir = wheel_cache.get_ephem_path_for_link(req.link)
buildset.append((req, cache_dir))
return buildset | def _collect_buildset(
requirements, # type: Iterable[InstallRequirement]
wheel_cache, # type: WheelCache
check_binary_allowed, # type: BinaryAllowedPredicate
need_wheel, # type: bool
):
# type: (...) -> List[Tuple[InstallRequirement, str]]
"""Return the list of InstallRequirement that need to be built,
with the persistent or temporary cache directory where the built
wheel needs to be stored.
"""
buildset = []
cache_available = bool(wheel_cache.cache_dir)
for req in requirements:
if not should_build(
req,
need_wheel=need_wheel,
check_binary_allowed=check_binary_allowed,
):
continue
if (
cache_available and
should_cache(req, check_binary_allowed)
):
cache_dir = wheel_cache.get_path_for_link(req.link)
else:
cache_dir = wheel_cache.get_ephem_path_for_link(req.link)
buildset.append((req, cache_dir))
return buildset |
Python | def section(self, name: str) -> YamlType:
"""Returns top section of a data from YAML file as a dictionary.
Args:
name (str): name of a section
"""
pass | def section(self, name: str) -> YamlType:
"""Returns top section of a data from YAML file as a dictionary.
Args:
name (str): name of a section
"""
pass |
Python | def _check_running_state(self):
"""
Delay operation if booting process is not finished yet.
This assures that internal data structures are not accessed before.
"""
while not self.bootup_finished:
self.log.info("Delaying request. Bootup not finished.")
yield from asyncio.sleep(1) | def _check_running_state(self):
"""
Delay operation if booting process is not finished yet.
This assures that internal data structures are not accessed before.
"""
while not self.bootup_finished:
self.log.info("Delaying request. Bootup not finished.")
yield from asyncio.sleep(1) |
Python | def generate_key(address):
"""
Generates a node identifier (key) based on the network address for this instance.
:param address:
The network address as string
:return:
Generated node id
:rtype: int
"""
return int(hashlib.sha256(address.encode()).hexdigest(), 16) % CHORD_RING_SIZE | def generate_key(address):
"""
Generates a node identifier (key) based on the network address for this instance.
:param address:
The network address as string
:return:
Generated node id
:rtype: int
"""
return int(hashlib.sha256(address.encode()).hexdigest(), 16) % CHORD_RING_SIZE |
Python | def join(self, node_id=None, node_address=None, bootstrap_address=None, additional_data=None):
"""
Joins an existing Chord network or creates a new one.
It set ups all internal state variables needed for operation.
Needs to be called previously to any other function or RPC call.
:param node_id:
Optional node ID.
If not supplied, it will be generated automatically.
:param node_address:
Optional node address formatted as an aiomas agent address (IPv4 or IPv6 address)
:param bootstrap_address:
If not given, a new Chord network is created. Otherwise, the new node
will gather the required information to integrate into the Chord network.
:param additional_data:
Optional additional data as dict that is added to the trace log if a node calls
:func:`find_successor_rec` with tracing enabled.
"""
self.id = node_id or self.generate_key(self.node_address)
self.node_address = node_address or self.node_address # normally already set in __init__
self.bootstrap_address = bootstrap_address
self.predecessor = None
self.log.info("[Configuration] node_id: %d, bootstrap_node: %s", self.id, self.bootstrap_address)
self.additional_data = additional_data or {}
if self.bootstrap_address:
# Regular node joining via bootstrap node
self.__generate_fingers(None)
# Try joining later if our successor does not respond
successor = None
while True:
successor, status = yield from self.run_rpc_safe(self.bootstrap_address, "rpc_find_successor_rec",
self.fingertable[0]["start"])
if status == 0:
if successor["status"] == 0:
# Successors seems to be reachable: we can proceed
break
else:
self.log.warn("Successor node not responding.")
else:
self.log.warn("Bootstrap node not responding.")
self.log.warn("Will retry in 3 seconds.")
yield from asyncio.sleep(3)
# Proceed with a working successor
successor = filter_node_response(successor)
self.successor.set(successor)
yield from self.init_successor_list(successor)
yield from self.init_finger_table()
self.bootup_finished = True
yield from self.update_others()
else:
# This is the bootstrap node
successor_node = self.as_dict()
self.__generate_fingers(successor_node)
self.successor.set(successor_node) # bootstrap first references itself
self.bootup_finished = True
self.print_finger_table()
# if self.bootstrap_address:
# remote_peer = yield from self.container.connect(self.bootstrap_address)
# ft = yield from remote_peer.rpc_get_fingertable()
# print("Bootstrap Finger Table: ")
# self.print_finger_table(ft) | def join(self, node_id=None, node_address=None, bootstrap_address=None, additional_data=None):
"""
Joins an existing Chord network or creates a new one.
It set ups all internal state variables needed for operation.
Needs to be called previously to any other function or RPC call.
:param node_id:
Optional node ID.
If not supplied, it will be generated automatically.
:param node_address:
Optional node address formatted as an aiomas agent address (IPv4 or IPv6 address)
:param bootstrap_address:
If not given, a new Chord network is created. Otherwise, the new node
will gather the required information to integrate into the Chord network.
:param additional_data:
Optional additional data as dict that is added to the trace log if a node calls
:func:`find_successor_rec` with tracing enabled.
"""
self.id = node_id or self.generate_key(self.node_address)
self.node_address = node_address or self.node_address # normally already set in __init__
self.bootstrap_address = bootstrap_address
self.predecessor = None
self.log.info("[Configuration] node_id: %d, bootstrap_node: %s", self.id, self.bootstrap_address)
self.additional_data = additional_data or {}
if self.bootstrap_address:
# Regular node joining via bootstrap node
self.__generate_fingers(None)
# Try joining later if our successor does not respond
successor = None
while True:
successor, status = yield from self.run_rpc_safe(self.bootstrap_address, "rpc_find_successor_rec",
self.fingertable[0]["start"])
if status == 0:
if successor["status"] == 0:
# Successors seems to be reachable: we can proceed
break
else:
self.log.warn("Successor node not responding.")
else:
self.log.warn("Bootstrap node not responding.")
self.log.warn("Will retry in 3 seconds.")
yield from asyncio.sleep(3)
# Proceed with a working successor
successor = filter_node_response(successor)
self.successor.set(successor)
yield from self.init_successor_list(successor)
yield from self.init_finger_table()
self.bootup_finished = True
yield from self.update_others()
else:
# This is the bootstrap node
successor_node = self.as_dict()
self.__generate_fingers(successor_node)
self.successor.set(successor_node) # bootstrap first references itself
self.bootup_finished = True
self.print_finger_table()
# if self.bootstrap_address:
# remote_peer = yield from self.container.connect(self.bootstrap_address)
# ft = yield from remote_peer.rpc_get_fingertable()
# print("Bootstrap Finger Table: ")
# self.print_finger_table(ft) |
Python | def init_finger_table(self):
"""Generates a basic finger table for this node joining an existing Chord network.
"""
self.print_finger_table()
# Fix references to our direct neighbors
# This is necessary that find_successor works correctly.
yield from self.update_neighbors(initialization=True)
# Retrieve successor node for each finger 0 -> m-1 (finger 0 is already retrieved from bootstrap node)
for k in range(CHORD_FINGER_TABLE_SIZE - 1):
finger = self.fingertable[k]
finger_next = self.fingertable[k + 1]
if in_interval(finger_next["start"], self.id, finger["successor"]["node_id"], inclusive_left=True):
self.log.info("Copy previous finger: %d in between [%d, %d)",
finger_next["start"],
self.id,
finger["successor"]["node_id"])
# Reuse previous finger
finger_next["successor"] = finger["successor"]
else:
self.log.info("Exceeding our successor, need a RPC.")
# TODO: validate data
# BUG: if only 2 nodes in network, the node being responsible for the requested start ID
# is wrong because bootstrap node does not updated its table yet
finger_successor, status = yield from self.run_rpc_safe(self.bootstrap_address, "rpc_find_successor_rec",
finger_next["start"])
self.log.info("Node for %d: %s", finger_next["start"], finger_successor)
finger_next["successor"] = filter_node_response(finger_successor)
# Optimization for joining node (if not bootstrap node)
# - Find close node to myself (e.g., successor)
# - Request finger table and store temporary_entries
# - for each of my needed finger table starts, use closest entries and directly ask this node.
# - Fallback to node asked previously (or bootstrap node as last fallback) if node is not responding | def init_finger_table(self):
"""Generates a basic finger table for this node joining an existing Chord network.
"""
self.print_finger_table()
# Fix references to our direct neighbors
# This is necessary that find_successor works correctly.
yield from self.update_neighbors(initialization=True)
# Retrieve successor node for each finger 0 -> m-1 (finger 0 is already retrieved from bootstrap node)
for k in range(CHORD_FINGER_TABLE_SIZE - 1):
finger = self.fingertable[k]
finger_next = self.fingertable[k + 1]
if in_interval(finger_next["start"], self.id, finger["successor"]["node_id"], inclusive_left=True):
self.log.info("Copy previous finger: %d in between [%d, %d)",
finger_next["start"],
self.id,
finger["successor"]["node_id"])
# Reuse previous finger
finger_next["successor"] = finger["successor"]
else:
self.log.info("Exceeding our successor, need a RPC.")
# TODO: validate data
# BUG: if only 2 nodes in network, the node being responsible for the requested start ID
# is wrong because bootstrap node does not updated its table yet
finger_successor, status = yield from self.run_rpc_safe(self.bootstrap_address, "rpc_find_successor_rec",
finger_next["start"])
self.log.info("Node for %d: %s", finger_next["start"], finger_successor)
finger_next["successor"] = filter_node_response(finger_successor)
# Optimization for joining node (if not bootstrap node)
# - Find close node to myself (e.g., successor)
# - Request finger table and store temporary_entries
# - for each of my needed finger table starts, use closest entries and directly ask this node.
# - Fallback to node asked previously (or bootstrap node as last fallback) if node is not responding |
Python | def init_successor_list(self, successor):
"""Fetch successor list from our immediate successor when joining a network.
"""
successor_details, status = yield from self.run_rpc_safe(successor["node_address"], "rpc_get_node_info",
successor_list=True)
self.successor.update_others(successor_details.get("successor_list"), self.id)
self.log.info("New successor list: %s", self.successor.list) | def init_successor_list(self, successor):
"""Fetch successor list from our immediate successor when joining a network.
"""
successor_details, status = yield from self.run_rpc_safe(successor["node_address"], "rpc_get_node_info",
successor_list=True)
self.successor.update_others(successor_details.get("successor_list"), self.id)
self.log.info("New successor list: %s", self.successor.list) |
Python | def update_neighbors(self, initialization=False):
""" Update immediate neighbors.
Update our successor's pointer to reference us as immediate predecessor
(according to default Chord specification).
Notify our direct predecessor about our presence. This allows to early stabilize
its immediate successor finger[0].
Requires that finger[0] is set properly.
"""
successor = self.successor.get()
if successor["node_id"] == self.id:
return
# Fix predecessor reference on our immediate successor
update_pred, conn_err = yield from self.run_rpc_safe(successor["node_address"], "rpc_update_predecessor",
self.as_dict())
if conn_err != 0:
# Immediate successor is not responding (should not happen as checked before)
self.log.warn("Immediate successor %s not responding.", successor)
return # TODO: better error handling
self.log.debug("Predecessor update result: %s", update_pred)
if update_pred["node_address"] == self.node_address and "old_predecessor" in update_pred:
# Successfully integrated into Chord overlay network
# Successor already references us at this point.
if initialization:
self.predecessor = filter_node_response(update_pred["old_predecessor"])
self.log.info("Set predecessor: %s", self.predecessor)
# Notify our predecessor to be aware of us (new immediate successor)
# It might already know. In that case, this call is useless.
# However, for a Chord network only consisting of one node, this is crucial that this node's
# successor references us. Only like this, the circle is closed in the forward direction.
yield from self.run_rpc_safe(self.predecessor["node_address"], "rpc_update_successor",
self.as_dict())
# Merge received key,values into own storage
print("Keys received:", update_pred.get("storage"))
self.storage.merge(update_pred.get("storage"))
# elif update_pred["node_address"] != self.node_address:
# # Fix concurrent joins in the same area:
# # Seems that our successor got a closere predecessor in the mean time.
# # We trust our original successor that it tells the truth and correct our successor reference.
# new_successor = filter_node_response(update_pred)
#
# if in_interval(new_successor["node_id"], self.id, successor["node_id"]):
# self.successor.set(new_successor)
# self.log.info("Periodic fix: updated successor reference to node %d (%s)",
# new_successor["node_id"], new_successor["node_address"])
#
# # Notify our new successor to change its predecessor reference to us
# # If this successor is still not the right one, it will be corrected in the next round.
# yield from self.run_rpc_safe(new_successor["node_address"], "rpc_update_predecessor",
# self.as_dict())
#
# else:
# self.log.warn("Could not stabilize. Our original successors sends rubbish.")
elif update_pred["node_address"] == self.node_address:
self.log.info("Predecessor and successor references ok. Nothing to do.")
else:
# Something went wrong during update. This is only relevant if it happened during startup
# of this node.
# A new node might have joined in the meantime -> TODO: update our reference or clean exit
print("[Update_neighbors] Response:", update_pred)
print("[Update_neighbors] Myself:", self.as_dict())
self.log.error("Could not update predecessor reference of our successor. Try restarting.") | def update_neighbors(self, initialization=False):
""" Update immediate neighbors.
Update our successor's pointer to reference us as immediate predecessor
(according to default Chord specification).
Notify our direct predecessor about our presence. This allows to early stabilize
its immediate successor finger[0].
Requires that finger[0] is set properly.
"""
successor = self.successor.get()
if successor["node_id"] == self.id:
return
# Fix predecessor reference on our immediate successor
update_pred, conn_err = yield from self.run_rpc_safe(successor["node_address"], "rpc_update_predecessor",
self.as_dict())
if conn_err != 0:
# Immediate successor is not responding (should not happen as checked before)
self.log.warn("Immediate successor %s not responding.", successor)
return # TODO: better error handling
self.log.debug("Predecessor update result: %s", update_pred)
if update_pred["node_address"] == self.node_address and "old_predecessor" in update_pred:
# Successfully integrated into Chord overlay network
# Successor already references us at this point.
if initialization:
self.predecessor = filter_node_response(update_pred["old_predecessor"])
self.log.info("Set predecessor: %s", self.predecessor)
# Notify our predecessor to be aware of us (new immediate successor)
# It might already know. In that case, this call is useless.
# However, for a Chord network only consisting of one node, this is crucial that this node's
# successor references us. Only like this, the circle is closed in the forward direction.
yield from self.run_rpc_safe(self.predecessor["node_address"], "rpc_update_successor",
self.as_dict())
# Merge received key,values into own storage
print("Keys received:", update_pred.get("storage"))
self.storage.merge(update_pred.get("storage"))
# elif update_pred["node_address"] != self.node_address:
# # Fix concurrent joins in the same area:
# # Seems that our successor got a closere predecessor in the mean time.
# # We trust our original successor that it tells the truth and correct our successor reference.
# new_successor = filter_node_response(update_pred)
#
# if in_interval(new_successor["node_id"], self.id, successor["node_id"]):
# self.successor.set(new_successor)
# self.log.info("Periodic fix: updated successor reference to node %d (%s)",
# new_successor["node_id"], new_successor["node_address"])
#
# # Notify our new successor to change its predecessor reference to us
# # If this successor is still not the right one, it will be corrected in the next round.
# yield from self.run_rpc_safe(new_successor["node_address"], "rpc_update_predecessor",
# self.as_dict())
#
# else:
# self.log.warn("Could not stabilize. Our original successors sends rubbish.")
elif update_pred["node_address"] == self.node_address:
self.log.info("Predecessor and successor references ok. Nothing to do.")
else:
# Something went wrong during update. This is only relevant if it happened during startup
# of this node.
# A new node might have joined in the meantime -> TODO: update our reference or clean exit
print("[Update_neighbors] Response:", update_pred)
print("[Update_neighbors] Myself:", self.as_dict())
self.log.error("Could not update predecessor reference of our successor. Try restarting.") |
Python | def update_successor(self, new_node):
"""Updates the reference to our immediate successor triggered by other peer's hint.
A neighboring successor uses this function to notify us about its presence. This ensures
that the Chord ring is correct.
The parameter ``new_node`` gives a hint about the new successor in this case. To verify
this hint, this node contacts its old successor.
:param new_node:
Successor hint.
"""
old_successor = self.successor.get()
# No other peers yet in the network -> no maintenance possible
if old_successor["node_id"] == self.id and new_node is None:
return
# New successor before old one or old one not responding anymore (last option is TODO)
if in_interval(new_node["node_id"], self.id, old_successor["node_id"]):
# Check old successor whether it already accepted new node
# TODO: validation + timeout catch
successor_view, peer_err = yield from self.run_rpc_safe(old_successor["node_address"],
"rpc_get_node_info")
if peer_err != 0:
# Immediate successor is not responding
self.log.warn("Immediate successor %s not responding.", old_successor)
return # TODO: better error handling, e.g., update on peer_err > 0
if successor_view["predecessor"]["node_address"] == new_node["node_address"]:
# Update finger table to point to new immediate successor
new_node = filter_node_response(new_node)
self.successor.set(new_node)
self.log.info("Updated successor reference to node %d (%s)",
new_node["node_id"], new_node["node_address"])
else:
# Do not update, only mention suspicious observation
self.log.error("Node %d (%s) wants to be our immediate successor, but original successor %d (%s) "
"does not reference it. Looks malicious. Or our view is not fresh anymore :(",
new_node["node_id"], new_node["node_address"],
old_successor["node_id"], old_successor["node_address"]) | def update_successor(self, new_node):
"""Updates the reference to our immediate successor triggered by other peer's hint.
A neighboring successor uses this function to notify us about its presence. This ensures
that the Chord ring is correct.
The parameter ``new_node`` gives a hint about the new successor in this case. To verify
this hint, this node contacts its old successor.
:param new_node:
Successor hint.
"""
old_successor = self.successor.get()
# No other peers yet in the network -> no maintenance possible
if old_successor["node_id"] == self.id and new_node is None:
return
# New successor before old one or old one not responding anymore (last option is TODO)
if in_interval(new_node["node_id"], self.id, old_successor["node_id"]):
# Check old successor whether it already accepted new node
# TODO: validation + timeout catch
successor_view, peer_err = yield from self.run_rpc_safe(old_successor["node_address"],
"rpc_get_node_info")
if peer_err != 0:
# Immediate successor is not responding
self.log.warn("Immediate successor %s not responding.", old_successor)
return # TODO: better error handling, e.g., update on peer_err > 0
if successor_view["predecessor"]["node_address"] == new_node["node_address"]:
# Update finger table to point to new immediate successor
new_node = filter_node_response(new_node)
self.successor.set(new_node)
self.log.info("Updated successor reference to node %d (%s)",
new_node["node_id"], new_node["node_address"])
else:
# Do not update, only mention suspicious observation
self.log.error("Node %d (%s) wants to be our immediate successor, but original successor %d (%s) "
"does not reference it. Looks malicious. Or our view is not fresh anymore :(",
new_node["node_id"], new_node["node_address"],
old_successor["node_id"], old_successor["node_address"]) |
Python | def update_others(self):
"""Update peers' finger table that should refer to our node and notify them.
"""
for k in range(0, CHORD_FINGER_TABLE_SIZE):
id = (self.id - 2**k) % CHORD_RING_SIZE
# Find predecessor
successor = yield from self.find_successor(id, with_neighbors=True)
p = successor["predecessor"]
# In rare cases with id exactly matching the node's key, successor is more correct to reduce hops.
# Ex: 116 is looking for node 114 (finger 2), predecessor would be node 249 with successor 114
# In this case, finger in node 114 should be changed, too.
# if p["successor"]["node_id"] == id:
# p = p["successor"]
self.log.info("Update peer: %s", p)
if self.id != p["node_id"]:
yield from self.run_rpc_safe(p["node_address"], "rpc_update_finger_table",
self.as_dict(), k) | def update_others(self):
"""Update peers' finger table that should refer to our node and notify them.
"""
for k in range(0, CHORD_FINGER_TABLE_SIZE):
id = (self.id - 2**k) % CHORD_RING_SIZE
# Find predecessor
successor = yield from self.find_successor(id, with_neighbors=True)
p = successor["predecessor"]
# In rare cases with id exactly matching the node's key, successor is more correct to reduce hops.
# Ex: 116 is looking for node 114 (finger 2), predecessor would be node 249 with successor 114
# In this case, finger in node 114 should be changed, too.
# if p["successor"]["node_id"] == id:
# p = p["successor"]
self.log.info("Update peer: %s", p)
if self.id != p["node_id"]:
yield from self.run_rpc_safe(p["node_address"], "rpc_update_finger_table",
self.as_dict(), k) |
Python | def fix_finger(self, finger_id=-1):
"""
Resolves the responsible node for the given finger and updates it accordingly.
:param finger_id:
index of the finger table to update.
The value should be between 0 and length of the finger table.
"""
if not (0 <= finger_id < len(self.fingertable)):
raise IndexError("No valid finger ID.")
cur_finger = self.fingertable[finger_id]
successor = yield from self.find_successor(cur_finger["start"])
print("For start %d, successor is '%s'" % (cur_finger["start"], successor))
if successor is None:
self.log.warn("No suitable node found for start %d. Do not update finger.", cur_finger["start"])
elif successor != cur_finger["successor"]:
self.log.info("Finger %d updated: successor is now %s (old: %s)",
finger_id, successor, cur_finger["successor"])
cur_finger["successor"] = filter_node_response(successor)
# else:
# self.log.warn("Received successor for finger %d not fitting to ID ranges in finger table: %d not in [%d, %d)",
# finger_id, successor["node_id"], cur_finger["start"], next_finger["start"]) | def fix_finger(self, finger_id=-1):
"""
Resolves the responsible node for the given finger and updates it accordingly.
:param finger_id:
index of the finger table to update.
The value should be between 0 and length of the finger table.
"""
if not (0 <= finger_id < len(self.fingertable)):
raise IndexError("No valid finger ID.")
cur_finger = self.fingertable[finger_id]
successor = yield from self.find_successor(cur_finger["start"])
print("For start %d, successor is '%s'" % (cur_finger["start"], successor))
if successor is None:
self.log.warn("No suitable node found for start %d. Do not update finger.", cur_finger["start"])
elif successor != cur_finger["successor"]:
self.log.info("Finger %d updated: successor is now %s (old: %s)",
finger_id, successor, cur_finger["successor"])
cur_finger["successor"] = filter_node_response(successor)
# else:
# self.log.warn("Received successor for finger %d not fitting to ID ranges in finger table: %d not in [%d, %d)",
# finger_id, successor["node_id"], cur_finger["start"], next_finger["start"]) |
Python | def update_successor_list(self):
"""Periodically checks availability of our successor peer, maintains a list of possible successors
and swaps to another successor if the first fails.
"""
if len(self.successor.list) == 0 or self.successor.get() == self.as_dict():
return
while len(self.successor.list) > 0:
cur_successor = self.successor.get()
# Query our successor about its current successor list
successor_details, status = yield from self.run_rpc_safe(cur_successor["node_address"], "rpc_get_node_info",
successor_list=True)
if status == 0:
# TODO: filter successor_details
self.successor.print_list()
self.successor.update_others(successor_details["successor_list"], ignore_key=self.id)
# Predecessor of a successor can be missing (None)
new_successor = successor_details.get("predecessor")
print("[update_successor_list] New successor would be:", new_successor)
if new_successor and in_interval(new_successor["node_id"], self.id, cur_successor["node_id"]):
# Our successor already has a different and closer predecessor than us
new_successor, status = yield from self.run_rpc_safe(new_successor["node_address"], "rpc_get_node_info",
successor_list=True)
print("[update_successor_list] SPECIAL CASE: would move to:", new_successor)
if status == 0 and "successor_list" in new_successor:
# Linking to the new peer being our successor now.
print("update_successor_list] SPECIAL CASE: moved to new successor")
self.successor.set(filter_node_response(new_successor))
self.successor.update_others(new_successor["successor_list"], ignore_key=self.id)
# Successor view must contain at least our previous successor in its list.
# Otherwise, this peer seems to behave strange
if self.successor.count_occurrence(cur_successor) == 0:
self.log.warn("Reverting successor list as new successor does not include previous one. "
"Looks suspicious to me.")
self.successor.revert_update()
# Notify our successor here to accelerate the stabilization
yield from self.update_neighbors()
break
else:
# Try next successor as current one does not respond appropriate
self.log.info("Successor ID %d not responding. Trying next.", self.successor.get()["node_id"])
if len(self.successor.list) > 1:
self.successor.delete_first()
else:
self.log.warn("No evidence of any other peers alive. Going over to act as bootstrap for others")
self.successor.set(self.as_dict()) | def update_successor_list(self):
"""Periodically checks availability of our successor peer, maintains a list of possible successors
and swaps to another successor if the first fails.
"""
if len(self.successor.list) == 0 or self.successor.get() == self.as_dict():
return
while len(self.successor.list) > 0:
cur_successor = self.successor.get()
# Query our successor about its current successor list
successor_details, status = yield from self.run_rpc_safe(cur_successor["node_address"], "rpc_get_node_info",
successor_list=True)
if status == 0:
# TODO: filter successor_details
self.successor.print_list()
self.successor.update_others(successor_details["successor_list"], ignore_key=self.id)
# Predecessor of a successor can be missing (None)
new_successor = successor_details.get("predecessor")
print("[update_successor_list] New successor would be:", new_successor)
if new_successor and in_interval(new_successor["node_id"], self.id, cur_successor["node_id"]):
# Our successor already has a different and closer predecessor than us
new_successor, status = yield from self.run_rpc_safe(new_successor["node_address"], "rpc_get_node_info",
successor_list=True)
print("[update_successor_list] SPECIAL CASE: would move to:", new_successor)
if status == 0 and "successor_list" in new_successor:
# Linking to the new peer being our successor now.
print("update_successor_list] SPECIAL CASE: moved to new successor")
self.successor.set(filter_node_response(new_successor))
self.successor.update_others(new_successor["successor_list"], ignore_key=self.id)
# Successor view must contain at least our previous successor in its list.
# Otherwise, this peer seems to behave strange
if self.successor.count_occurrence(cur_successor) == 0:
self.log.warn("Reverting successor list as new successor does not include previous one. "
"Looks suspicious to me.")
self.successor.revert_update()
# Notify our successor here to accelerate the stabilization
yield from self.update_neighbors()
break
else:
# Try next successor as current one does not respond appropriate
self.log.info("Successor ID %d not responding. Trying next.", self.successor.get()["node_id"])
if len(self.successor.list) > 1:
self.successor.delete_first()
else:
self.log.warn("No evidence of any other peers alive. Going over to act as bootstrap for others")
self.successor.set(self.as_dict()) |
Python | def check_predecessor(self):
"""Verifies this node's immediate predecessor's live.
If it is lost, remove reference to give new nodes a chance to repair it.
"""
if self.predecessor is None or self.predecessor["node_id"] == self.id:
return
predecessor, status = yield from self.run_rpc_safe(self.predecessor["node_address"],
"rpc_get_node_info")
print("[check_predecessor] Connected to pred: %s" % predecessor)
print("[check_predecessor] Previous pred was: %s" % self.predecessor)
if status != 0 or \
(status == 0 and predecessor["successor"]["node_address"] != self.node_address):
# Predecessor not reachable anymore or our predecessor does not reference us -> Clean up.
self.predecessor = None
self.log.warn("Removing invalid predecessor reference.") | def check_predecessor(self):
"""Verifies this node's immediate predecessor's live.
If it is lost, remove reference to give new nodes a chance to repair it.
"""
if self.predecessor is None or self.predecessor["node_id"] == self.id:
return
predecessor, status = yield from self.run_rpc_safe(self.predecessor["node_address"],
"rpc_get_node_info")
print("[check_predecessor] Connected to pred: %s" % predecessor)
print("[check_predecessor] Previous pred was: %s" % self.predecessor)
if status != 0 or \
(status == 0 and predecessor["successor"]["node_address"] != self.node_address):
# Predecessor not reachable anymore or our predecessor does not reference us -> Clean up.
self.predecessor = None
self.log.warn("Removing invalid predecessor reference.") |
Python | def find_successor_trace(self, node_id):
"""Wrapper for :func:`find_successor_rec` with trace log enabled for intermediate hops.
:param node_id:
Key ``node_id`` whose responsible successor is interesting.
:return:
Responsible successor node for given key ``node_id``.
:rtype: dict or None
"""
result = yield from self.find_successor_rec(node_id, tracing=True)
result = filter_node_response(result, trace_log=True)
return result | def find_successor_trace(self, node_id):
"""Wrapper for :func:`find_successor_rec` with trace log enabled for intermediate hops.
:param node_id:
Key ``node_id`` whose responsible successor is interesting.
:return:
Responsible successor node for given key ``node_id``.
:rtype: dict or None
"""
result = yield from self.find_successor_rec(node_id, tracing=True)
result = filter_node_response(result, trace_log=True)
return result |
Python | def find_successor_rec(self, node_id, with_neighbors=False, tracing=False):
"""Recursively locate the responsible node for a given ``node_id`` (key).
This function is the heart of the Chord DHT.
It is used locally and by remote peers.
:param node_id:
Key ``node_id`` whose responsible successor is interesting.
:param with_neighbors:
If ``True``, the immediate successor and predecessor nodes augment the result of
the responsible successor.
This is useful if the predecessor of the responsible node is needed.
:return:
Responsible successor node for given key ``node_id``.
"""
successor = self.successor.get()
if in_interval(node_id, self.id, successor["node_id"], inclusive_right=True):
# Check live of successor node and augment its information with successor and predecessor links
# if required
successor_details = successor.copy()
successor_neighborhood, status = yield from self.run_rpc_safe(successor["node_address"], "rpc_get_node_info",
additional_data=tracing)
if status == 0:
# Successor node is alive
if with_neighbors:
successor_details.update(filter_node_response(successor_neighborhood, immediate_neighbors=True))
successor_details["status"] = 0
else:
# Successor node is dead
successor_details.update({"status": 1, "message": "last hop not responding"})
# Add list for tracing
if tracing:
last_hop = successor.copy()
last_hop.update({"additional_data": successor_neighborhood.get("additional_data", {}) if successor_neighborhood else {}})
successor_details["trace"] = [last_hop]
# Include our own additional data to be integrated by our preceding hop
successor_details["additional_data"] = self.additional_data
return successor_details
else:
# Find closest finger to node_id and forward recursive query.
# If the current finger's node does not respond, try a less optimal one -> requires more hops.
# TODO: remember faulty nodes and replace if it happens too often
this_node = self.as_dict()
i = 1
next_hop = self.get_closest_preceding_finger(node_id, fall_back=0)
while next_hop != this_node:
print("[find_successor_rec] Closest finger node for %d: %s" % (node_id, next_hop))
# TODO: validate and check for None
peer_data, status = yield from self.run_rpc_safe(next_hop["node_address"], "rpc_find_successor_rec",
node_id, with_neighbors=with_neighbors, tracing=tracing)
if status == 0:
print("[find_successor_rec] Remote result for id %d: %s" % (node_id, peer_data))
# Tracing
# If the recursion tree is built completely, the touched peers are inserted in a trace list on
# the way back.
# The preceding node inserts its next hop in the trace. This provides a basic protection that a
# malicious node cannot prevent being visible in the list.
# Regarding the order, the goal peer is at position 0 in the list and the first hop from the sender
# is at the last position n-1 (n describes all involved nodes).
if tracing:
if peer_data is None:
peer_data = {"status": 1, "message": "trace incomplete."}
successor_node = next_hop.copy()
successor_node["additional_data"] = peer_data["additional_data"]
peer_data["trace"].append(successor_node)
return peer_data
print("[find_successor_rec] Remote id %d with '%s' failed. Try next [%d]." %
(next_hop["node_id"], next_hop["node_address"], i))
next_hop = self.get_closest_preceding_finger(node_id, fall_back=i)
i += 1
# Already reached end of unique peers in our finger table: we are isolated right now
self.log.info("No suitable alternatives as next hop.")
return {"status": 1, "message": "no suitable alternatives found, giving up."} | def find_successor_rec(self, node_id, with_neighbors=False, tracing=False):
"""Recursively locate the responsible node for a given ``node_id`` (key).
This function is the heart of the Chord DHT.
It is used locally and by remote peers.
:param node_id:
Key ``node_id`` whose responsible successor is interesting.
:param with_neighbors:
If ``True``, the immediate successor and predecessor nodes augment the result of
the responsible successor.
This is useful if the predecessor of the responsible node is needed.
:return:
Responsible successor node for given key ``node_id``.
"""
successor = self.successor.get()
if in_interval(node_id, self.id, successor["node_id"], inclusive_right=True):
# Check live of successor node and augment its information with successor and predecessor links
# if required
successor_details = successor.copy()
successor_neighborhood, status = yield from self.run_rpc_safe(successor["node_address"], "rpc_get_node_info",
additional_data=tracing)
if status == 0:
# Successor node is alive
if with_neighbors:
successor_details.update(filter_node_response(successor_neighborhood, immediate_neighbors=True))
successor_details["status"] = 0
else:
# Successor node is dead
successor_details.update({"status": 1, "message": "last hop not responding"})
# Add list for tracing
if tracing:
last_hop = successor.copy()
last_hop.update({"additional_data": successor_neighborhood.get("additional_data", {}) if successor_neighborhood else {}})
successor_details["trace"] = [last_hop]
# Include our own additional data to be integrated by our preceding hop
successor_details["additional_data"] = self.additional_data
return successor_details
else:
# Find closest finger to node_id and forward recursive query.
# If the current finger's node does not respond, try a less optimal one -> requires more hops.
# TODO: remember faulty nodes and replace if it happens too often
this_node = self.as_dict()
i = 1
next_hop = self.get_closest_preceding_finger(node_id, fall_back=0)
while next_hop != this_node:
print("[find_successor_rec] Closest finger node for %d: %s" % (node_id, next_hop))
# TODO: validate and check for None
peer_data, status = yield from self.run_rpc_safe(next_hop["node_address"], "rpc_find_successor_rec",
node_id, with_neighbors=with_neighbors, tracing=tracing)
if status == 0:
print("[find_successor_rec] Remote result for id %d: %s" % (node_id, peer_data))
# Tracing
# If the recursion tree is built completely, the touched peers are inserted in a trace list on
# the way back.
# The preceding node inserts its next hop in the trace. This provides a basic protection that a
# malicious node cannot prevent being visible in the list.
# Regarding the order, the goal peer is at position 0 in the list and the first hop from the sender
# is at the last position n-1 (n describes all involved nodes).
if tracing:
if peer_data is None:
peer_data = {"status": 1, "message": "trace incomplete."}
successor_node = next_hop.copy()
successor_node["additional_data"] = peer_data["additional_data"]
peer_data["trace"].append(successor_node)
return peer_data
print("[find_successor_rec] Remote id %d with '%s' failed. Try next [%d]." %
(next_hop["node_id"], next_hop["node_address"], i))
next_hop = self.get_closest_preceding_finger(node_id, fall_back=i)
i += 1
# Already reached end of unique peers in our finger table: we are isolated right now
self.log.info("No suitable alternatives as next hop.")
return {"status": 1, "message": "no suitable alternatives found, giving up."} |
Python | def drive(self, travel_range: float):
"""Return charge level after driving
Paramaters:
travel_range (float): distance which should be traveled
Returns:
battery_level (str): current charge level
"""
battery_discharge_percent = travel_range * self.__efficiency
if self.__battery_charge - battery_discharge_percent >= 0:
self.__battery_charge -= battery_discharge_percent
return self.check_battery_level()
else:
return self.check_battery_level() | def drive(self, travel_range: float):
"""Return charge level after driving
Paramaters:
travel_range (float): distance which should be traveled
Returns:
battery_level (str): current charge level
"""
battery_discharge_percent = travel_range * self.__efficiency
if self.__battery_charge - battery_discharge_percent >= 0:
self.__battery_charge -= battery_discharge_percent
return self.check_battery_level()
else:
return self.check_battery_level() |
Python | def namelist_block(config, namelist):
"""Return the namelist block as a string."""
if namelist not in config:
return ''
block = namelist + '\n'
for key in config[namelist]:
if config[namelist][key]:
value = config[namelist][key].strip()
block += '{} = {}\n'.format(key, value)
block += '/\n'
return block | def namelist_block(config, namelist):
"""Return the namelist block as a string."""
if namelist not in config:
return ''
block = namelist + '\n'
for key in config[namelist]:
if config[namelist][key]:
value = config[namelist][key].strip()
block += '{} = {}\n'.format(key, value)
block += '/\n'
return block |
Python | def card_block(config, card):
"""Return the card block as a string."""
if card not in config:
return ''
block = card
if 'option' in config[card]:
block += ' ' + config[card]['option']
block += '\n'
value = config[card]['value'].strip()
if value:
block += value + '\n'
return block | def card_block(config, card):
"""Return the card block as a string."""
if card not in config:
return ''
block = card
if 'option' in config[card]:
block += ' ' + config[card]['option']
block += '\n'
value = config[card]['value'].strip()
if value:
block += value + '\n'
return block |
Python | def create_kgrid_in(config, dirname='.'):
"""Create a kgrid.in input file following the config."""
file_in = os.path.join(dirname, 'kgrid.in')
nat = str(helpers.num_lines(config['ATOMIC_POSITIONS']['value']))
k_points = config['K_POINTS']['value'].strip().split()
nk = ' '.join(k_points[:3])
dk = ' '.join([str(float(k) * 0.5) for k in k_points[3:]])
atomic_species = config['ATOMIC_SPECIES']['value'].strip().splitlines()
elements = [s.split()[0] for s in atomic_species]
positions = ''
for atom in config['ATOMIC_POSITIONS']['value'].strip().splitlines():
atom_split = atom.split()
element = atom_split[0]
positions += '{:d} {}\n'.format(elements.index(element) + 1,
' '.join(atom_split[1:]))
with open(file_in, 'a') as f:
f.write(nk + '\n')
f.write(dk + '\n')
f.write(config['kgrid']['q-shift'].strip() + '\n\n')
f.write(config['kgrid']['cell'].strip() + '\n')
f.write(nat + '\n')
f.write(positions)
f.write('20 20 20\n')
f.write('.false.\n') | def create_kgrid_in(config, dirname='.'):
"""Create a kgrid.in input file following the config."""
file_in = os.path.join(dirname, 'kgrid.in')
nat = str(helpers.num_lines(config['ATOMIC_POSITIONS']['value']))
k_points = config['K_POINTS']['value'].strip().split()
nk = ' '.join(k_points[:3])
dk = ' '.join([str(float(k) * 0.5) for k in k_points[3:]])
atomic_species = config['ATOMIC_SPECIES']['value'].strip().splitlines()
elements = [s.split()[0] for s in atomic_species]
positions = ''
for atom in config['ATOMIC_POSITIONS']['value'].strip().splitlines():
atom_split = atom.split()
element = atom_split[0]
positions += '{:d} {}\n'.format(elements.index(element) + 1,
' '.join(atom_split[1:]))
with open(file_in, 'a') as f:
f.write(nk + '\n')
f.write(dk + '\n')
f.write(config['kgrid']['q-shift'].strip() + '\n\n')
f.write(config['kgrid']['cell'].strip() + '\n')
f.write(nat + '\n')
f.write(positions)
f.write('20 20 20\n')
f.write('.false.\n') |
Python | def create_in(config, dirname='.'):
"""Create an 'in' input file following the config."""
file_in = os.path.join(dirname, 'in')
nat = str(helpers.num_lines(config['ATOMIC_POSITIONS']['value']))
ntyp = str(helpers.num_lines(config['ATOMIC_SPECIES']['value']))
if '&system' in config:
config['&system']['nat'] = nat
config['&system']['ntyp'] = ntyp
with open(file_in, 'a') as f:
f.write(namelist_block(config, '&control'))
f.write(namelist_block(config, '&system'))
f.write(namelist_block(config, '&electrons'))
f.write(card_block(config, 'CELL_PARAMETERS'))
f.write(card_block(config, 'ATOMIC_SPECIES'))
f.write(card_block(config, 'ATOMIC_POSITIONS'))
f.write(card_block(config, 'K_POINTS')) | def create_in(config, dirname='.'):
"""Create an 'in' input file following the config."""
file_in = os.path.join(dirname, 'in')
nat = str(helpers.num_lines(config['ATOMIC_POSITIONS']['value']))
ntyp = str(helpers.num_lines(config['ATOMIC_SPECIES']['value']))
if '&system' in config:
config['&system']['nat'] = nat
config['&system']['ntyp'] = ntyp
with open(file_in, 'a') as f:
f.write(namelist_block(config, '&control'))
f.write(namelist_block(config, '&system'))
f.write(namelist_block(config, '&electrons'))
f.write(card_block(config, 'CELL_PARAMETERS'))
f.write(card_block(config, 'ATOMIC_SPECIES'))
f.write(card_block(config, 'ATOMIC_POSITIONS'))
f.write(card_block(config, 'K_POINTS')) |
Python | def create_pp_in(config, dirname='.', wfng_kgrid=True):
"""Create an 'pp_in' input file following the config."""
file_in = os.path.join(dirname, 'pp_in')
k_points = config['K_POINTS']['value'].strip().split()
q_shift = config['kgrid']['q-shift'].strip().split()
nk = k_points[:3]
dk = [float(k) * 0.5 for k in k_points[3:]]
dk = [str(dk[i] + float(q_shift[i]) * float(nk[i])) for i in range(3)]
with open(file_in, 'a') as f:
f.write('&input_pw2bgw\n')
f.write('prefix = {}\n'.format(config['&control']['prefix']))
f.write('wfng_flag = .true.\n')
if wfng_kgrid:
f.write('wfng_kgrid = .true.\n')
f.write('wfng_nk1 = {}\n'.format(nk[0]))
f.write('wfng_nk2 = {}\n'.format(nk[1]))
f.write('wfng_nk3 = {}\n'.format(nk[2]))
f.write('wfng_dk1 = {}\n'.format(dk[0]))
f.write('wfng_dk2 = {}\n'.format(dk[1]))
f.write('wfng_dk3 = {}\n'.format(dk[2]))
if 'pp_in' in config:
for key in config['pp_in']:
if config['pp_in'][key]:
f.write('{} = {}\n'.format(key, config['pp_in'][key]))
f.write('/\n') | def create_pp_in(config, dirname='.', wfng_kgrid=True):
"""Create an 'pp_in' input file following the config."""
file_in = os.path.join(dirname, 'pp_in')
k_points = config['K_POINTS']['value'].strip().split()
q_shift = config['kgrid']['q-shift'].strip().split()
nk = k_points[:3]
dk = [float(k) * 0.5 for k in k_points[3:]]
dk = [str(dk[i] + float(q_shift[i]) * float(nk[i])) for i in range(3)]
with open(file_in, 'a') as f:
f.write('&input_pw2bgw\n')
f.write('prefix = {}\n'.format(config['&control']['prefix']))
f.write('wfng_flag = .true.\n')
if wfng_kgrid:
f.write('wfng_kgrid = .true.\n')
f.write('wfng_nk1 = {}\n'.format(nk[0]))
f.write('wfng_nk2 = {}\n'.format(nk[1]))
f.write('wfng_nk3 = {}\n'.format(nk[2]))
f.write('wfng_dk1 = {}\n'.format(dk[0]))
f.write('wfng_dk2 = {}\n'.format(dk[1]))
f.write('wfng_dk3 = {}\n'.format(dk[2]))
if 'pp_in' in config:
for key in config['pp_in']:
if config['pp_in'][key]:
f.write('{} = {}\n'.format(key, config['pp_in'][key]))
f.write('/\n') |
Python | def create_scf(config, dirname='.'):
"""Create 1-scf directory and its input files."""
dirpath = os.path.join(dirname, '1-scf')
clean = os.path.join(dirpath, 'clean')
override = {
'&control': {
'calculation': '\'scf\'',
},
'&system': {
'nbnd': '',
},
'K_POINTS': {
'option': 'automatic',
},
}
os.makedirs(dirpath)
create_in(helpers.deep_merge(config, override), dirpath)
with open(clean, 'w') as f:
f.write('#!/bin/bash\n'
'rm -rf CRASH *out* charge* *.{igk,mix,wfc,save}* 2> '
'/dev/null\n'
)
helpers.make_executable(clean) | def create_scf(config, dirname='.'):
"""Create 1-scf directory and its input files."""
dirpath = os.path.join(dirname, '1-scf')
clean = os.path.join(dirpath, 'clean')
override = {
'&control': {
'calculation': '\'scf\'',
},
'&system': {
'nbnd': '',
},
'K_POINTS': {
'option': 'automatic',
},
}
os.makedirs(dirpath)
create_in(helpers.deep_merge(config, override), dirpath)
with open(clean, 'w') as f:
f.write('#!/bin/bash\n'
'rm -rf CRASH *out* charge* *.{igk,mix,wfc,save}* 2> '
'/dev/null\n'
)
helpers.make_executable(clean) |
Python | def create_wfn(config, dirname='.'):
"""Create 2-wfn directory and its input files."""
dirpath = os.path.join(dirname, '2-wfn')
get_kgrid = os.path.join(dirpath, 'get-kgrid')
clean = os.path.join(dirpath, 'clean')
override = {
'&control': {
'calculation': '\'bands\'',
'wf_collect': '.true.',
},
'K_POINTS': {
'option': 'crystal',
'value': '',
},
}
kgrid_override = {
'kgrid': {
'q-shift': '0.0 0.0 0.0',
},
}
pp_in_config = helpers.deep_merge(config, kgrid_override)
pp_in_config.pop('pp_in', None)
os.makedirs(dirpath)
create_kgrid_in(helpers.deep_merge(config, kgrid_override), dirpath)
create_pp_in(pp_in_config, dirpath)
create_in(helpers.deep_merge(config, override), dirpath)
with open(get_kgrid, 'w') as f:
f.write('#!/bin/bash\n'
'kgrid.x kgrid.in kgrid.out kgrid.log\n'
'sed -n 2,99999p kgrid.out >> in\n')
with open(clean, 'w') as f:
f.write('#!/bin/bash\n'
'rm -rf CRASH *{paw,igk,wfc,log,out,.xmgr}* MoS2.save/K00* '
'MoS2.save/*.UPF RHO \\\n'
' WFN vxc.dat *.ps bands.dat* 2> /dev/null\n'
)
helpers.make_executable(get_kgrid)
helpers.make_executable(clean) | def create_wfn(config, dirname='.'):
"""Create 2-wfn directory and its input files."""
dirpath = os.path.join(dirname, '2-wfn')
get_kgrid = os.path.join(dirpath, 'get-kgrid')
clean = os.path.join(dirpath, 'clean')
override = {
'&control': {
'calculation': '\'bands\'',
'wf_collect': '.true.',
},
'K_POINTS': {
'option': 'crystal',
'value': '',
},
}
kgrid_override = {
'kgrid': {
'q-shift': '0.0 0.0 0.0',
},
}
pp_in_config = helpers.deep_merge(config, kgrid_override)
pp_in_config.pop('pp_in', None)
os.makedirs(dirpath)
create_kgrid_in(helpers.deep_merge(config, kgrid_override), dirpath)
create_pp_in(pp_in_config, dirpath)
create_in(helpers.deep_merge(config, override), dirpath)
with open(get_kgrid, 'w') as f:
f.write('#!/bin/bash\n'
'kgrid.x kgrid.in kgrid.out kgrid.log\n'
'sed -n 2,99999p kgrid.out >> in\n')
with open(clean, 'w') as f:
f.write('#!/bin/bash\n'
'rm -rf CRASH *{paw,igk,wfc,log,out,.xmgr}* MoS2.save/K00* '
'MoS2.save/*.UPF RHO \\\n'
' WFN vxc.dat *.ps bands.dat* 2> /dev/null\n'
)
helpers.make_executable(get_kgrid)
helpers.make_executable(clean) |
Python | def create_wfnq(config, dirname='.'):
"""Create 3-wfnq directory and its input files."""
dirpath = os.path.join(dirname, '3-wfnq')
override = {
'&control': {
'calculation': '\'bands\'',
'wf_collect': '.true.',
},
'&system': {
'nbnd': '',
},
'K_POINTS': {
'option': 'crystal',
'value': '',
},
}
pp_in_config = helpers.deep_merge(config, {})
pp_in_config.pop('pp_in', None)
os.makedirs(dirpath)
create_kgrid_in(config, dirpath)
create_pp_in(pp_in_config, dirpath)
create_in(helpers.deep_merge(config, override), dirpath) | def create_wfnq(config, dirname='.'):
"""Create 3-wfnq directory and its input files."""
dirpath = os.path.join(dirname, '3-wfnq')
override = {
'&control': {
'calculation': '\'bands\'',
'wf_collect': '.true.',
},
'&system': {
'nbnd': '',
},
'K_POINTS': {
'option': 'crystal',
'value': '',
},
}
pp_in_config = helpers.deep_merge(config, {})
pp_in_config.pop('pp_in', None)
os.makedirs(dirpath)
create_kgrid_in(config, dirpath)
create_pp_in(pp_in_config, dirpath)
create_in(helpers.deep_merge(config, override), dirpath) |
Python | def create_wfn_co(config, dirname='.'):
"""Create 4-wfn_co directory and its input files."""
dirpath = os.path.join(dirname, '4-wfn_co')
k_points = config['K_POINTS']['value'].strip().split()[:3]
override = {
'&control': {
'calculation': '\'bands\'',
'wf_collect': '.true.',
},
'K_POINTS': {
'option': 'crystal',
'value': '',
},
}
kgrid_override = {
'K_POINTS': {
'value': '{} 0 0 0'.format(' '.join([str(k) for k in k_points]))
},
'kgrid': {
'q-shift': '0.0 0.0 0.0',
},
'pp_in': {
'rhog_flag': '.true.',
'vxc_flag': '.true.',
},
}
os.makedirs(dirpath)
create_kgrid_in(helpers.deep_merge(config, kgrid_override), dirpath)
create_pp_in(helpers.deep_merge(config, kgrid_override), dirpath)
create_in(helpers.deep_merge(config, override), dirpath) | def create_wfn_co(config, dirname='.'):
"""Create 4-wfn_co directory and its input files."""
dirpath = os.path.join(dirname, '4-wfn_co')
k_points = config['K_POINTS']['value'].strip().split()[:3]
override = {
'&control': {
'calculation': '\'bands\'',
'wf_collect': '.true.',
},
'K_POINTS': {
'option': 'crystal',
'value': '',
},
}
kgrid_override = {
'K_POINTS': {
'value': '{} 0 0 0'.format(' '.join([str(k) for k in k_points]))
},
'kgrid': {
'q-shift': '0.0 0.0 0.0',
},
'pp_in': {
'rhog_flag': '.true.',
'vxc_flag': '.true.',
},
}
os.makedirs(dirpath)
create_kgrid_in(helpers.deep_merge(config, kgrid_override), dirpath)
create_pp_in(helpers.deep_merge(config, kgrid_override), dirpath)
create_in(helpers.deep_merge(config, override), dirpath) |
Python | def create_wfn_fi(config, dirname='.'):
"""Create 5-wfn_fi directory and its input files."""
dirpath = os.path.join(dirname, '5-wfn_fi')
k_points = config['K_POINTS']['value'].strip().split()
nk = [int(int(k_points[i]) * 2) if int(k_points[i]) != 1 else
int(k_points[i]) for i in range(3)]
override = {
'&control': {
'calculation': '\'bands\'',
'wf_collect': '.true.',
},
'K_POINTS': {
'option': 'crystal',
'value': '',
},
}
kgrid_override = {
'K_POINTS': {
'value': '{} 0 0 0'.format(' '.join([str(k) for k in nk]))
},
'kgrid': {
'q-shift': '0.0 0.0 0.0',
},
}
pp_in_config = helpers.deep_merge(config, kgrid_override)
pp_in_config.pop('pp_in', None)
os.makedirs(dirpath)
create_kgrid_in(helpers.deep_merge(config, kgrid_override), dirpath)
create_pp_in(pp_in_config, dirpath)
create_in(helpers.deep_merge(config, override), dirpath) | def create_wfn_fi(config, dirname='.'):
"""Create 5-wfn_fi directory and its input files."""
dirpath = os.path.join(dirname, '5-wfn_fi')
k_points = config['K_POINTS']['value'].strip().split()
nk = [int(int(k_points[i]) * 2) if int(k_points[i]) != 1 else
int(k_points[i]) for i in range(3)]
override = {
'&control': {
'calculation': '\'bands\'',
'wf_collect': '.true.',
},
'K_POINTS': {
'option': 'crystal',
'value': '',
},
}
kgrid_override = {
'K_POINTS': {
'value': '{} 0 0 0'.format(' '.join([str(k) for k in nk]))
},
'kgrid': {
'q-shift': '0.0 0.0 0.0',
},
}
pp_in_config = helpers.deep_merge(config, kgrid_override)
pp_in_config.pop('pp_in', None)
os.makedirs(dirpath)
create_kgrid_in(helpers.deep_merge(config, kgrid_override), dirpath)
create_pp_in(pp_in_config, dirpath)
create_in(helpers.deep_merge(config, override), dirpath) |
Python | def create_wfnq_fi(config, dirname='.'):
"""Create 6-wfnq_fi directory and its input files."""
dirpath = os.path.join(dirname, '6-wfnq_fi')
k_points = config['K_POINTS']['value'].strip().split()
q_shift = config['kgrid']['q-shift'].strip().split()
for q in q_shift:
if float(q) > 0:
q_shift = float(q)
break
nk = [int(int(k_points[i]) * 2) if int(k_points[i]) != 1 else
int(k_points[i]) for i in range(3)]
override = {
'&control': {
'calculation': '\'bands\'',
'wf_collect': '.true.',
},
'&system': {
'nbnd': '',
},
'K_POINTS': {
'option': 'crystal',
'value': '',
},
}
kgrid_override = {
'K_POINTS': {
'value': '{} {}'.format(
' '.join([str(k) for k in nk]),
' '.join([str(2 * k * q_shift) if k != 1 else
'0' for k in nk]))
},
'kgrid': {
'q-shift': '0.0 0.0 0.0',
},
}
pp_in_config = helpers.deep_merge(config, kgrid_override)
pp_in_config.pop('pp_in', None)
os.makedirs(dirpath)
create_kgrid_in(helpers.deep_merge(config, kgrid_override), dirpath)
create_pp_in(pp_in_config, dirpath)
create_in(helpers.deep_merge(config, override), dirpath) | def create_wfnq_fi(config, dirname='.'):
"""Create 6-wfnq_fi directory and its input files."""
dirpath = os.path.join(dirname, '6-wfnq_fi')
k_points = config['K_POINTS']['value'].strip().split()
q_shift = config['kgrid']['q-shift'].strip().split()
for q in q_shift:
if float(q) > 0:
q_shift = float(q)
break
nk = [int(int(k_points[i]) * 2) if int(k_points[i]) != 1 else
int(k_points[i]) for i in range(3)]
override = {
'&control': {
'calculation': '\'bands\'',
'wf_collect': '.true.',
},
'&system': {
'nbnd': '',
},
'K_POINTS': {
'option': 'crystal',
'value': '',
},
}
kgrid_override = {
'K_POINTS': {
'value': '{} {}'.format(
' '.join([str(k) for k in nk]),
' '.join([str(2 * k * q_shift) if k != 1 else
'0' for k in nk]))
},
'kgrid': {
'q-shift': '0.0 0.0 0.0',
},
}
pp_in_config = helpers.deep_merge(config, kgrid_override)
pp_in_config.pop('pp_in', None)
os.makedirs(dirpath)
create_kgrid_in(helpers.deep_merge(config, kgrid_override), dirpath)
create_pp_in(pp_in_config, dirpath)
create_in(helpers.deep_merge(config, override), dirpath) |
Python | def create_bands(config, dirname='.'):
"""Create 7-bands directory and its input files."""
dirpath = os.path.join(dirname, '7-bands')
override = {
'&control': {
'calculation': '\'bands\'',
'wf_collect': '.true.',
},
'K_POINTS': config['K_POINTS_bands'],
}
in_config = helpers.deep_merge(config, override)
in_config.pop('pp_in', None)
os.makedirs(dirpath)
create_pp_in(in_config, dirpath, False)
create_in(in_config, dirpath) | def create_bands(config, dirname='.'):
"""Create 7-bands directory and its input files."""
dirpath = os.path.join(dirname, '7-bands')
override = {
'&control': {
'calculation': '\'bands\'',
'wf_collect': '.true.',
},
'K_POINTS': config['K_POINTS_bands'],
}
in_config = helpers.deep_merge(config, override)
in_config.pop('pp_in', None)
os.makedirs(dirpath)
create_pp_in(in_config, dirpath, False)
create_in(in_config, dirpath) |
Python | def create_qe(config, dirname='.'):
"""Create a new directory '1-qe' and all its directories."""
dirpath = os.path.join(dirname, '1-qe')
os.makedirs(dirpath)
create_link_files(config, dirpath)
create_scf(config, dirpath)
create_wfn(config, dirpath)
create_wfnq(config, dirpath)
create_wfn_co(config, dirpath)
create_wfn_fi(config, dirpath)
create_wfnq_fi(config, dirpath)
create_bands(config, dirpath) | def create_qe(config, dirname='.'):
"""Create a new directory '1-qe' and all its directories."""
dirpath = os.path.join(dirname, '1-qe')
os.makedirs(dirpath)
create_link_files(config, dirpath)
create_scf(config, dirpath)
create_wfn(config, dirpath)
create_wfnq(config, dirpath)
create_wfn_co(config, dirpath)
create_wfn_fi(config, dirpath)
create_wfnq_fi(config, dirpath)
create_bands(config, dirpath) |
Python | def input_block(config, section):
"""Return the input block as a string."""
block = ''
if section not in config:
return ''
for key in config[section]:
value = config[section][key].strip()
if value:
block += '{} {}\n'.format(key, value)
else:
block += key + '\n'
return block | def input_block(config, section):
"""Return the input block as a string."""
block = ''
if section not in config:
return ''
for key in config[section]:
value = config[section][key].strip()
if value:
block += '{} {}\n'.format(key, value)
else:
block += key + '\n'
return block |
Python | def create_epsilon(config, dirname='.'):
"""Create 1-epsilon directory and its input files."""
dirpath = os.path.join(dirname, '1-epsilon')
setup = os.path.join(dirpath, '0-setup.sh')
qpoints = os.path.join(dirpath, 'qpoints')
os.makedirs(dirpath)
with open(setup, 'a') as f:
f.writelines([
'#!/bin/bash\n'
'num_kp=$(cat qpoints | wc -l)\n',
'\n',
'# Create epsilon.inp for every kpoints inside the qpoints file\n',
'for i in $(seq 1 $num_kp); do\n',
' dir="eps$(seq -f "%02g" $i $i)"\n',
'\n',
' if [[ -z $1 ]]; then\n',
' mkdir ${dir}\n',
' cd $dir\n',
'\n',
' cat > epsilon.inp <<- EOM\n',
])
f.writelines([
'\t\t\t{}\n'.format(x) for x in input_block(
config, 'epsilon').split('\n') if x.strip()
])
f.writelines([
'\n',
' begin qpoints\n',
' $(sed -n ${i}p ../qpoints)\n',
' end\n',
' EOM\n',
'\n',
' ln -s ../WFN .\n',
' ln -s ../WFNq .\n',
'\n',
' cd ..\n',
' elif [[ $1 == "clean" ]]; then\n',
' rm -rf ${dir}\n',
' fi\n',
'done\n',
'\n',
'# Create an epsmat merge folder\n',
'if [[ -z $1 ]]; then\n',
' nkp=$((num_kp-1))\n',
'\n',
' mkdir merge\n',
' cd merge\n',
'\n',
' echo "{} $nkp" > epsmat_merge.inp\n'.format(
config['epsilon']['epsilon_cutoff']),
'\n',
' for i in $(seq 2 $num_kp); do\n',
' kpoint=$(sed -n ${i}p ../qpoints)\n',
' echo "${kpoint%?}" >> epsmat_merge.inp\n',
'\n',
' dir="eps$(seq -f "%02g" $i $i)"\n',
' epsmat="epsmat$(seq -f "%02g" $i $i)"\n',
' ln -s ../$dir/epsmat $epsmat\n',
' done\n',
'\n',
' echo "$nkp" >> epsmat_merge.inp\n',
'\n',
' for i in $(seq 2 $num_kp); do\n',
' epsmat="epsmat$(seq -f "%02g" $i $i)"\n',
' echo "$epsmat 1" >> epsmat_merge.inp\n',
' done\n',
' cd ..\n',
'elif [[ $1 == "clean" ]]; then\n',
' rm -rf merge\n',
'fi\n',
'\n',
'ln -sf eps01/eps0mat .\n',
'ln -sf merge/epsmat .\n',
])
with open(qpoints, 'a') as f:
f.write('# Replace this file with all the qpoints for epsilon.inp\n')
helpers.make_executable(setup) | def create_epsilon(config, dirname='.'):
"""Create 1-epsilon directory and its input files."""
dirpath = os.path.join(dirname, '1-epsilon')
setup = os.path.join(dirpath, '0-setup.sh')
qpoints = os.path.join(dirpath, 'qpoints')
os.makedirs(dirpath)
with open(setup, 'a') as f:
f.writelines([
'#!/bin/bash\n'
'num_kp=$(cat qpoints | wc -l)\n',
'\n',
'# Create epsilon.inp for every kpoints inside the qpoints file\n',
'for i in $(seq 1 $num_kp); do\n',
' dir="eps$(seq -f "%02g" $i $i)"\n',
'\n',
' if [[ -z $1 ]]; then\n',
' mkdir ${dir}\n',
' cd $dir\n',
'\n',
' cat > epsilon.inp <<- EOM\n',
])
f.writelines([
'\t\t\t{}\n'.format(x) for x in input_block(
config, 'epsilon').split('\n') if x.strip()
])
f.writelines([
'\n',
' begin qpoints\n',
' $(sed -n ${i}p ../qpoints)\n',
' end\n',
' EOM\n',
'\n',
' ln -s ../WFN .\n',
' ln -s ../WFNq .\n',
'\n',
' cd ..\n',
' elif [[ $1 == "clean" ]]; then\n',
' rm -rf ${dir}\n',
' fi\n',
'done\n',
'\n',
'# Create an epsmat merge folder\n',
'if [[ -z $1 ]]; then\n',
' nkp=$((num_kp-1))\n',
'\n',
' mkdir merge\n',
' cd merge\n',
'\n',
' echo "{} $nkp" > epsmat_merge.inp\n'.format(
config['epsilon']['epsilon_cutoff']),
'\n',
' for i in $(seq 2 $num_kp); do\n',
' kpoint=$(sed -n ${i}p ../qpoints)\n',
' echo "${kpoint%?}" >> epsmat_merge.inp\n',
'\n',
' dir="eps$(seq -f "%02g" $i $i)"\n',
' epsmat="epsmat$(seq -f "%02g" $i $i)"\n',
' ln -s ../$dir/epsmat $epsmat\n',
' done\n',
'\n',
' echo "$nkp" >> epsmat_merge.inp\n',
'\n',
' for i in $(seq 2 $num_kp); do\n',
' epsmat="epsmat$(seq -f "%02g" $i $i)"\n',
' echo "$epsmat 1" >> epsmat_merge.inp\n',
' done\n',
' cd ..\n',
'elif [[ $1 == "clean" ]]; then\n',
' rm -rf merge\n',
'fi\n',
'\n',
'ln -sf eps01/eps0mat .\n',
'ln -sf merge/epsmat .\n',
])
with open(qpoints, 'a') as f:
f.write('# Replace this file with all the qpoints for epsilon.inp\n')
helpers.make_executable(setup) |
Python | def create_sigma(config, dirname='.'):
"""Create 2-sigma directory and its input files."""
dirpath = os.path.join(dirname, '2-sigma')
setup = os.path.join(dirpath, '0-setup.sh')
kpoints = os.path.join(dirpath, 'kpoints')
merge = os.path.join(dirpath, '2-merge.sh')
override = {
'sigma': {
'band_index_min': config['pp_in']['vxc_diag_nmin'],
'band_index_max': config['pp_in']['vxc_diag_nmax'],
},
}
config = helpers.deep_merge(config, override)
os.makedirs(dirpath)
with open(setup, 'a') as f:
f.writelines([
'#!/bin/bash\n'
'num_kp=$(cat kpoints | wc -l)\n',
'\n',
'# Create sigma.inp for every kpoints inside the kpoints file\n',
'for i in $(seq 1 $num_kp); do\n',
' dir="sig$(seq -f "%02g" $i $i)"\n',
'\n',
' if [[ -z $1 ]]; then\n',
' mkdir ${dir}\n',
' cd $dir\n',
'\n',
' cat > sigma.inp <<- EOM\n',
])
f.writelines([
'\t\t\t{}\n'.format(x) for x in input_block(
config, 'sigma').split('\n') if x.strip()
])
f.writelines([
'\n',
' begin kpoints\n',
' $(sed -n ${i}p ../kpoints)\n',
' end\n',
' EOM\n',
'\n',
' ln -s ../RHO .\n',
' ln -s ../WFN_inner .\n',
' ln -s ../eps0mat .\n',
' ln -s ../epsmat .\n',
' ln -s ../vxc.dat .\n',
'\n',
' cd ..\n',
' elif [[ $1 == "clean" ]]; then\n',
' rm -rf ${dir}\n',
' fi\n',
'done\n',
])
with open(kpoints, 'a') as f:
f.write('# Replace this file with all the kpoints for sigma.inp\n')
with open(merge, 'a') as f:
f.writelines([
'#!/bin/bash\n',
'num_kp=$(cat kpoints | wc -l)\n',
'\n',
'for i in $(seq 1 $num_kp); do\n',
' dir="sig$(seq -f "%02g" $i $i)"\n',
' cat $dir/eqp0.dat >> eqp0.dat\n',
' cat $dir/eqp1.dat >> eqp1.dat\n',
'done\n',
])
helpers.make_executable(setup)
helpers.make_executable(merge) | def create_sigma(config, dirname='.'):
"""Create 2-sigma directory and its input files."""
dirpath = os.path.join(dirname, '2-sigma')
setup = os.path.join(dirpath, '0-setup.sh')
kpoints = os.path.join(dirpath, 'kpoints')
merge = os.path.join(dirpath, '2-merge.sh')
override = {
'sigma': {
'band_index_min': config['pp_in']['vxc_diag_nmin'],
'band_index_max': config['pp_in']['vxc_diag_nmax'],
},
}
config = helpers.deep_merge(config, override)
os.makedirs(dirpath)
with open(setup, 'a') as f:
f.writelines([
'#!/bin/bash\n'
'num_kp=$(cat kpoints | wc -l)\n',
'\n',
'# Create sigma.inp for every kpoints inside the kpoints file\n',
'for i in $(seq 1 $num_kp); do\n',
' dir="sig$(seq -f "%02g" $i $i)"\n',
'\n',
' if [[ -z $1 ]]; then\n',
' mkdir ${dir}\n',
' cd $dir\n',
'\n',
' cat > sigma.inp <<- EOM\n',
])
f.writelines([
'\t\t\t{}\n'.format(x) for x in input_block(
config, 'sigma').split('\n') if x.strip()
])
f.writelines([
'\n',
' begin kpoints\n',
' $(sed -n ${i}p ../kpoints)\n',
' end\n',
' EOM\n',
'\n',
' ln -s ../RHO .\n',
' ln -s ../WFN_inner .\n',
' ln -s ../eps0mat .\n',
' ln -s ../epsmat .\n',
' ln -s ../vxc.dat .\n',
'\n',
' cd ..\n',
' elif [[ $1 == "clean" ]]; then\n',
' rm -rf ${dir}\n',
' fi\n',
'done\n',
])
with open(kpoints, 'a') as f:
f.write('# Replace this file with all the kpoints for sigma.inp\n')
with open(merge, 'a') as f:
f.writelines([
'#!/bin/bash\n',
'num_kp=$(cat kpoints | wc -l)\n',
'\n',
'for i in $(seq 1 $num_kp); do\n',
' dir="sig$(seq -f "%02g" $i $i)"\n',
' cat $dir/eqp0.dat >> eqp0.dat\n',
' cat $dir/eqp1.dat >> eqp1.dat\n',
'done\n',
])
helpers.make_executable(setup)
helpers.make_executable(merge) |
Python | def create_kernel(config, dirname='.'):
"""Create 3-kernel directory and its input files."""
dirpath = os.path.join(dirname, '3-kernel')
inp = os.path.join(dirpath, 'kernel.inp')
clean = os.path.join(dirpath, 'clean')
os.makedirs(dirpath)
with open(inp, 'a') as f:
f.write(input_block(config, 'kernel'))
with open(clean, 'w') as f:
f.write('#!/bin/bash\n'
'rm bse* *.log *.out 2> /dev/null\n'
)
helpers.make_executable(clean) | def create_kernel(config, dirname='.'):
"""Create 3-kernel directory and its input files."""
dirpath = os.path.join(dirname, '3-kernel')
inp = os.path.join(dirpath, 'kernel.inp')
clean = os.path.join(dirpath, 'clean')
os.makedirs(dirpath)
with open(inp, 'a') as f:
f.write(input_block(config, 'kernel'))
with open(clean, 'w') as f:
f.write('#!/bin/bash\n'
'rm bse* *.log *.out 2> /dev/null\n'
)
helpers.make_executable(clean) |
Python | def create_absorption(config, dirname='.'):
"""Create 4-absorption directory and its input files."""
dirpath = os.path.join(dirname, '4-absorption')
inp = os.path.join(dirpath, 'absorption.inp')
clean = os.path.join(dirpath, 'clean')
override = {
'absorption': {
'number_val_bands_coarse': config['kernel']['number_val_bands'],
'number_cond_bands_coarse': config['kernel']['number_cond_bands'],
},
}
config = helpers.deep_merge(config, override)
os.makedirs(dirpath)
with open(inp, 'a') as f:
f.write(input_block(config, 'absorption'))
with open(clean, 'w') as f:
f.write('#!/bin/bash\n'
'rm *.log *.out absorption_noeh.dat bandstructure.dat '
'dtmat* \\\n'
' eigenvalues_noeh.dat dcmat_norm.dat dvmat_norm.dat '
'epsdiag.dat vmtxel \\\n'
' eqp.dat eqp_q.dat absorption_eh.dat eigenvalues.dat '
'eigenvectors 2> /dev/null\n'
)
helpers.make_executable(clean) | def create_absorption(config, dirname='.'):
"""Create 4-absorption directory and its input files."""
dirpath = os.path.join(dirname, '4-absorption')
inp = os.path.join(dirpath, 'absorption.inp')
clean = os.path.join(dirpath, 'clean')
override = {
'absorption': {
'number_val_bands_coarse': config['kernel']['number_val_bands'],
'number_cond_bands_coarse': config['kernel']['number_cond_bands'],
},
}
config = helpers.deep_merge(config, override)
os.makedirs(dirpath)
with open(inp, 'a') as f:
f.write(input_block(config, 'absorption'))
with open(clean, 'w') as f:
f.write('#!/bin/bash\n'
'rm *.log *.out absorption_noeh.dat bandstructure.dat '
'dtmat* \\\n'
' eigenvalues_noeh.dat dcmat_norm.dat dvmat_norm.dat '
'epsdiag.dat vmtxel \\\n'
' eqp.dat eqp_q.dat absorption_eh.dat eigenvalues.dat '
'eigenvectors 2> /dev/null\n'
)
helpers.make_executable(clean) |
Python | def create_inteqp(config, dirname='.'):
"""Create 5-inteqp directory and its input files."""
dirpath = os.path.join(dirname, '5-inteqp')
inp = os.path.join(dirpath, 'inteqp.inp')
clean = os.path.join(dirpath, 'clean')
os.makedirs(dirpath)
with open(inp, 'a') as f:
f.write(input_block(config, 'inteqp'))
with open(clean, 'w') as f:
f.write('#!/bin/bash\n'
'rm *.log *.out bandstructure.dat dtmat* dcmat_norm.dat '
'dvmat_norm.dat \\\n'
' eqp.dat eqp_q.dat 2> /dev/null\n'
)
helpers.make_executable(clean) | def create_inteqp(config, dirname='.'):
"""Create 5-inteqp directory and its input files."""
dirpath = os.path.join(dirname, '5-inteqp')
inp = os.path.join(dirpath, 'inteqp.inp')
clean = os.path.join(dirpath, 'clean')
os.makedirs(dirpath)
with open(inp, 'a') as f:
f.write(input_block(config, 'inteqp'))
with open(clean, 'w') as f:
f.write('#!/bin/bash\n'
'rm *.log *.out bandstructure.dat dtmat* dcmat_norm.dat '
'dvmat_norm.dat \\\n'
' eqp.dat eqp_q.dat 2> /dev/null\n'
)
helpers.make_executable(clean) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.