repository_name
stringlengths
5
67
func_path_in_repository
stringlengths
4
234
func_name
stringlengths
0
314
whole_func_string
stringlengths
52
3.87M
language
stringclasses
6 values
func_code_string
stringlengths
52
3.87M
func_documentation_string
stringlengths
1
47.2k
func_code_url
stringlengths
85
339
manolomartinez/greg
greg/aux_functions.py
check_directory
def check_directory(placeholders): """ Find out, and create if needed, the directory in which the feed will be downloaded """ feed = placeholders.feed args = feed.args placeholders.directory = "This very directory" # wink, wink placeholders.fullpath = os.path.join( placeholders.directory, placeholders.filename) try: if args["downloaddirectory"]: ensure_dir(args["downloaddirectory"]) placeholders.directory = args["downloaddirectory"] except KeyError: pass download_path = os.path.expanduser( feed.retrieve_config("Download Directory", "~/Podcasts")) subdirectory = feed.retrieve_config( "Create subdirectory", "no") if "no" in subdirectory: placeholders.directory = download_path elif "yes" in subdirectory: subdnametemplate = feed.retrieve_config( "subdirectory_name", "{podcasttitle}") subdname = substitute_placeholders( subdnametemplate, placeholders) placeholders.directory = os.path.join(download_path, subdname) ensure_dir(placeholders.directory) placeholders.fullpath = os.path.join( placeholders.directory, placeholders.filename) return placeholders
python
def check_directory(placeholders): """ Find out, and create if needed, the directory in which the feed will be downloaded """ feed = placeholders.feed args = feed.args placeholders.directory = "This very directory" # wink, wink placeholders.fullpath = os.path.join( placeholders.directory, placeholders.filename) try: if args["downloaddirectory"]: ensure_dir(args["downloaddirectory"]) placeholders.directory = args["downloaddirectory"] except KeyError: pass download_path = os.path.expanduser( feed.retrieve_config("Download Directory", "~/Podcasts")) subdirectory = feed.retrieve_config( "Create subdirectory", "no") if "no" in subdirectory: placeholders.directory = download_path elif "yes" in subdirectory: subdnametemplate = feed.retrieve_config( "subdirectory_name", "{podcasttitle}") subdname = substitute_placeholders( subdnametemplate, placeholders) placeholders.directory = os.path.join(download_path, subdname) ensure_dir(placeholders.directory) placeholders.fullpath = os.path.join( placeholders.directory, placeholders.filename) return placeholders
Find out, and create if needed, the directory in which the feed will be downloaded
https://github.com/manolomartinez/greg/blob/63bb24197c13087a01963ac439cd8380007d9467/greg/aux_functions.py#L116-L147
manolomartinez/greg
greg/aux_functions.py
parse_for_download
def parse_for_download(args): """ Turn an argument such as 4, 6-8, 10 into a list such as [4,6,7,8,10] """ single_arg = "" # in the first bit we put all arguments # together and take out any extra spaces list_of_feeds = [] for arg in args["number"]: single_arg = ''.join([single_arg, " ", arg]) single_arg = single_arg.translate({32: None}) # eliminates spaces for group in single_arg.split(sep=","): if not("-" in group): list_of_feeds.append(group) else: extremes = group.split(sep="-") list_of_feeds = list_of_feeds + [str(x) for x in range( eval(extremes[0]), eval(extremes[1])+1)] return list_of_feeds
python
def parse_for_download(args): """ Turn an argument such as 4, 6-8, 10 into a list such as [4,6,7,8,10] """ single_arg = "" # in the first bit we put all arguments # together and take out any extra spaces list_of_feeds = [] for arg in args["number"]: single_arg = ''.join([single_arg, " ", arg]) single_arg = single_arg.translate({32: None}) # eliminates spaces for group in single_arg.split(sep=","): if not("-" in group): list_of_feeds.append(group) else: extremes = group.split(sep="-") list_of_feeds = list_of_feeds + [str(x) for x in range( eval(extremes[0]), eval(extremes[1])+1)] return list_of_feeds
Turn an argument such as 4, 6-8, 10 into a list such as [4,6,7,8,10]
https://github.com/manolomartinez/greg/blob/63bb24197c13087a01963ac439cd8380007d9467/greg/aux_functions.py#L150-L168
manolomartinez/greg
greg/aux_functions.py
tag
def tag(placeholders): """ Tag the file at podpath with the information in podcast and entry """ # We first recover the name of the file to be tagged... template = placeholders.feed.retrieve_config("file_to_tag", "{filename}") filename = substitute_placeholders(template, placeholders) podpath = os.path.join(placeholders.directory, filename) # ... and this is it # now we create a dictionary of tags and values tagdict = placeholders.feed.defaulttagdict # these are the defaults try: # We do as if there was a section with potential tag info feedoptions = placeholders.feed.config.options(placeholders.name) # this monstruous concatenation of classes... surely a bad idea. tags = [[option.replace("tag_", ""), placeholders.feed.config[ placeholders.name][option]] for option in feedoptions if "tag_" in option] # these are the tags to be filled if tags: for tag in tags: tagdict[tag[0]] = tag[1] except configparser.NoSectionError: pass for tag in tagdict: metadata = substitute_placeholders( tagdict[tag], placeholders) if metadata: stagger.util.set_frames(podpath, {tag: metadata}) else: stagger.util.remove_frames(podpath, tag)
python
def tag(placeholders): """ Tag the file at podpath with the information in podcast and entry """ # We first recover the name of the file to be tagged... template = placeholders.feed.retrieve_config("file_to_tag", "{filename}") filename = substitute_placeholders(template, placeholders) podpath = os.path.join(placeholders.directory, filename) # ... and this is it # now we create a dictionary of tags and values tagdict = placeholders.feed.defaulttagdict # these are the defaults try: # We do as if there was a section with potential tag info feedoptions = placeholders.feed.config.options(placeholders.name) # this monstruous concatenation of classes... surely a bad idea. tags = [[option.replace("tag_", ""), placeholders.feed.config[ placeholders.name][option]] for option in feedoptions if "tag_" in option] # these are the tags to be filled if tags: for tag in tags: tagdict[tag[0]] = tag[1] except configparser.NoSectionError: pass for tag in tagdict: metadata = substitute_placeholders( tagdict[tag], placeholders) if metadata: stagger.util.set_frames(podpath, {tag: metadata}) else: stagger.util.remove_frames(podpath, tag)
Tag the file at podpath with the information in podcast and entry
https://github.com/manolomartinez/greg/blob/63bb24197c13087a01963ac439cd8380007d9467/greg/aux_functions.py#L171-L200
manolomartinez/greg
greg/aux_functions.py
download_handler
def download_handler(feed, placeholders): import shlex """ Parse and execute the download handler """ value = feed.retrieve_config('downloadhandler', 'greg') if value == 'greg': while os.path.isfile(placeholders.fullpath): placeholders.fullpath = placeholders.fullpath + '_' placeholders.filename = placeholders.filename + '_' urlretrieve(placeholders.link, placeholders.fullpath) else: value_list = shlex.split(value) instruction_list = [substitute_placeholders(part, placeholders) for part in value_list] returncode = subprocess.call(instruction_list) if returncode: raise URLError
python
def download_handler(feed, placeholders): import shlex """ Parse and execute the download handler """ value = feed.retrieve_config('downloadhandler', 'greg') if value == 'greg': while os.path.isfile(placeholders.fullpath): placeholders.fullpath = placeholders.fullpath + '_' placeholders.filename = placeholders.filename + '_' urlretrieve(placeholders.link, placeholders.fullpath) else: value_list = shlex.split(value) instruction_list = [substitute_placeholders(part, placeholders) for part in value_list] returncode = subprocess.call(instruction_list) if returncode: raise URLError
Parse and execute the download handler
https://github.com/manolomartinez/greg/blob/63bb24197c13087a01963ac439cd8380007d9467/greg/aux_functions.py#L214-L231
manolomartinez/greg
greg/aux_functions.py
parse_feed_info
def parse_feed_info(infofile): """ Take a feed file in .local/share/greg/data and return a list of links and of dates """ entrylinks = [] linkdates = [] try: with open(infofile, 'r') as previous: for line in previous: entrylinks.append(line.split(sep=' ')[0]) # This is the list of already downloaded entry links linkdates.append(eval(line.split(sep=' ', maxsplit=1)[1])) # This is the list of already downloaded entry dates # Note that entrydates are lists, converted from a # time.struct_time() object except FileNotFoundError: pass return entrylinks, linkdates
python
def parse_feed_info(infofile): """ Take a feed file in .local/share/greg/data and return a list of links and of dates """ entrylinks = [] linkdates = [] try: with open(infofile, 'r') as previous: for line in previous: entrylinks.append(line.split(sep=' ')[0]) # This is the list of already downloaded entry links linkdates.append(eval(line.split(sep=' ', maxsplit=1)[1])) # This is the list of already downloaded entry dates # Note that entrydates are lists, converted from a # time.struct_time() object except FileNotFoundError: pass return entrylinks, linkdates
Take a feed file in .local/share/greg/data and return a list of links and of dates
https://github.com/manolomartinez/greg/blob/63bb24197c13087a01963ac439cd8380007d9467/greg/aux_functions.py#L234-L252
manolomartinez/greg
greg/aux_functions.py
pretty_print
def pretty_print(session, feed): """ Print the dictionary entry of a feed in a nice way. """ if feed in session.feeds: print() feed_info = os.path.join(session.data_dir, feed) entrylinks, linkdates = parse_feed_info(feed_info) print(feed) print("-"*len(feed)) print(''.join([" url: ", session.feeds[feed]["url"]])) if linkdates != []: print(''.join([" Next sync will download from: ", time.strftime( "%d %b %Y %H:%M:%S", tuple(max(linkdates))), "."])) else: print("You don't have a feed called {}.".format(feed), file=sys.stderr, flush=True)
python
def pretty_print(session, feed): """ Print the dictionary entry of a feed in a nice way. """ if feed in session.feeds: print() feed_info = os.path.join(session.data_dir, feed) entrylinks, linkdates = parse_feed_info(feed_info) print(feed) print("-"*len(feed)) print(''.join([" url: ", session.feeds[feed]["url"]])) if linkdates != []: print(''.join([" Next sync will download from: ", time.strftime( "%d %b %Y %H:%M:%S", tuple(max(linkdates))), "."])) else: print("You don't have a feed called {}.".format(feed), file=sys.stderr, flush=True)
Print the dictionary entry of a feed in a nice way.
https://github.com/manolomartinez/greg/blob/63bb24197c13087a01963ac439cd8380007d9467/greg/aux_functions.py#L255-L271
manolomartinez/greg
greg/aux_functions.py
substitute_placeholders
def substitute_placeholders(inputstring, placeholders): """ Take a string with placeholders, and return the strings with substitutions. """ newst = inputstring.format(link=placeholders.link, filename=placeholders.filename, directory=placeholders.directory, fullpath=placeholders.fullpath, title=placeholders.title, filename_title=placeholders.filename_title, date=placeholders.date_string(), podcasttitle=placeholders.podcasttitle, filename_podcasttitle= placeholders.filename_podcasttitle, name=placeholders.name, subtitle=placeholders.sanitizedsubtitle, entrysummary=placeholders.entrysummary) return newst
python
def substitute_placeholders(inputstring, placeholders): """ Take a string with placeholders, and return the strings with substitutions. """ newst = inputstring.format(link=placeholders.link, filename=placeholders.filename, directory=placeholders.directory, fullpath=placeholders.fullpath, title=placeholders.title, filename_title=placeholders.filename_title, date=placeholders.date_string(), podcasttitle=placeholders.podcasttitle, filename_podcasttitle= placeholders.filename_podcasttitle, name=placeholders.name, subtitle=placeholders.sanitizedsubtitle, entrysummary=placeholders.entrysummary) return newst
Take a string with placeholders, and return the strings with substitutions.
https://github.com/manolomartinez/greg/blob/63bb24197c13087a01963ac439cd8380007d9467/greg/aux_functions.py#L274-L291
bwesterb/py-tarjan
src/__init__.py
_tarjan_head
def _tarjan_head(ctx, v): """ Used by @tarjan and @tarjan_iter. This is the head of the main iteration """ ctx.index[v] = len(ctx.index) ctx.lowlink[v] = ctx.index[v] ctx.S.append(v) ctx.S_set.add(v) it = iter(ctx.g.get(v, ())) ctx.T.append((it,False,v,None))
python
def _tarjan_head(ctx, v): """ Used by @tarjan and @tarjan_iter. This is the head of the main iteration """ ctx.index[v] = len(ctx.index) ctx.lowlink[v] = ctx.index[v] ctx.S.append(v) ctx.S_set.add(v) it = iter(ctx.g.get(v, ())) ctx.T.append((it,False,v,None))
Used by @tarjan and @tarjan_iter. This is the head of the main iteration
https://github.com/bwesterb/py-tarjan/blob/60b0e3a1a7b925514fdce2ffbd84e1e246aba6d8/src/__init__.py#L18-L26
bwesterb/py-tarjan
src/__init__.py
_tarjan_body
def _tarjan_body(ctx, it, v): """ Used by @tarjan and @tarjan_iter. This is the body of the main iteration """ for w in it: if w not in ctx.index: ctx.T.append((it,True,v,w)) _tarjan_head(ctx, w) return if w in ctx.S_set: ctx.lowlink[v] = min(ctx.lowlink[v], ctx.index[w]) if ctx.lowlink[v] == ctx.index[v]: scc = [] w = None while v != w: w = ctx.S.pop() scc.append(w) ctx.S_set.remove(w) ctx.ret.append(scc)
python
def _tarjan_body(ctx, it, v): """ Used by @tarjan and @tarjan_iter. This is the body of the main iteration """ for w in it: if w not in ctx.index: ctx.T.append((it,True,v,w)) _tarjan_head(ctx, w) return if w in ctx.S_set: ctx.lowlink[v] = min(ctx.lowlink[v], ctx.index[w]) if ctx.lowlink[v] == ctx.index[v]: scc = [] w = None while v != w: w = ctx.S.pop() scc.append(w) ctx.S_set.remove(w) ctx.ret.append(scc)
Used by @tarjan and @tarjan_iter. This is the body of the main iteration
https://github.com/bwesterb/py-tarjan/blob/60b0e3a1a7b925514fdce2ffbd84e1e246aba6d8/src/__init__.py#L28-L45
bwesterb/py-tarjan
src/__init__.py
tarjan_iter
def tarjan_iter(g): """ Returns the strongly connected components of the graph @g in a topological order. @g is the graph represented as a dictionary { <vertex> : <successors of vertex> }. This function does not recurse. It returns an iterator. """ ctx = TarjanContext( g = g, S = [], S_set = set(), index = {}, lowlink = {}, T = [], ret = []) main_iter = iter(g) while True: try: v = next(main_iter) except StopIteration: return if v not in ctx.index: _tarjan_head(ctx, v) while ctx.T: it, inside, v, w = ctx.T.pop() if inside: ctx.lowlink[v] = min(ctx.lowlink[w], ctx.lowlink[v]) _tarjan_body(ctx, it, v) if ctx.ret: assert len(ctx.ret) == 1 yield ctx.ret.pop()
python
def tarjan_iter(g): """ Returns the strongly connected components of the graph @g in a topological order. @g is the graph represented as a dictionary { <vertex> : <successors of vertex> }. This function does not recurse. It returns an iterator. """ ctx = TarjanContext( g = g, S = [], S_set = set(), index = {}, lowlink = {}, T = [], ret = []) main_iter = iter(g) while True: try: v = next(main_iter) except StopIteration: return if v not in ctx.index: _tarjan_head(ctx, v) while ctx.T: it, inside, v, w = ctx.T.pop() if inside: ctx.lowlink[v] = min(ctx.lowlink[w], ctx.lowlink[v]) _tarjan_body(ctx, it, v) if ctx.ret: assert len(ctx.ret) == 1 yield ctx.ret.pop()
Returns the strongly connected components of the graph @g in a topological order. @g is the graph represented as a dictionary { <vertex> : <successors of vertex> }. This function does not recurse. It returns an iterator.
https://github.com/bwesterb/py-tarjan/blob/60b0e3a1a7b925514fdce2ffbd84e1e246aba6d8/src/__init__.py#L47-L79
bwesterb/py-tarjan
src/__init__.py
tarjan_recursive
def tarjan_recursive(g): """ Returns the strongly connected components of the graph @g in a topological order. @g is the graph represented as a dictionary { <vertex> : <successors of vertex> }. This function recurses --- large graphs may cause a stack overflow. """ S = [] S_set = set() index = {} lowlink = {} ret = [] def visit(v): index[v] = len(index) lowlink[v] = index[v] S.append(v) S_set.add(v) for w in g.get(v,()): if w not in index: visit(w) lowlink[v] = min(lowlink[w], lowlink[v]) elif w in S_set: lowlink[v] = min(lowlink[v], index[w]) if lowlink[v] == index[v]: scc = [] w = None while v != w: w = S.pop() scc.append(w) S_set.remove(w) ret.append(scc) for v in g: if not v in index: visit(v) return ret
python
def tarjan_recursive(g): """ Returns the strongly connected components of the graph @g in a topological order. @g is the graph represented as a dictionary { <vertex> : <successors of vertex> }. This function recurses --- large graphs may cause a stack overflow. """ S = [] S_set = set() index = {} lowlink = {} ret = [] def visit(v): index[v] = len(index) lowlink[v] = index[v] S.append(v) S_set.add(v) for w in g.get(v,()): if w not in index: visit(w) lowlink[v] = min(lowlink[w], lowlink[v]) elif w in S_set: lowlink[v] = min(lowlink[v], index[w]) if lowlink[v] == index[v]: scc = [] w = None while v != w: w = S.pop() scc.append(w) S_set.remove(w) ret.append(scc) for v in g: if not v in index: visit(v) return ret
Returns the strongly connected components of the graph @g in a topological order. @g is the graph represented as a dictionary { <vertex> : <successors of vertex> }. This function recurses --- large graphs may cause a stack overflow.
https://github.com/bwesterb/py-tarjan/blob/60b0e3a1a7b925514fdce2ffbd84e1e246aba6d8/src/__init__.py#L112-L150
mattjj/pylds
pylds/util.py
symm_block_tridiag_matmul
def symm_block_tridiag_matmul(H_diag, H_upper_diag, v): """ Compute matrix-vector product with a symmetric block tridiagonal matrix H and vector v. :param H_diag: block diagonal terms of H :param H_upper_diag: upper block diagonal terms of H :param v: vector to multiple :return: H * v """ T, D, _ = H_diag.shape assert H_diag.ndim == 3 and H_diag.shape[2] == D assert H_upper_diag.shape == (T-1, D, D) assert v.shape == (T, D) out = np.matmul(H_diag, v[:, :, None])[:, :, 0] out[:-1] += np.matmul(H_upper_diag, v[1:][:, :, None])[:, :, 0] out[1:] += np.matmul(np.swapaxes(H_upper_diag, -2, -1), v[:-1][:, :, None])[:, :, 0] return out
python
def symm_block_tridiag_matmul(H_diag, H_upper_diag, v): """ Compute matrix-vector product with a symmetric block tridiagonal matrix H and vector v. :param H_diag: block diagonal terms of H :param H_upper_diag: upper block diagonal terms of H :param v: vector to multiple :return: H * v """ T, D, _ = H_diag.shape assert H_diag.ndim == 3 and H_diag.shape[2] == D assert H_upper_diag.shape == (T-1, D, D) assert v.shape == (T, D) out = np.matmul(H_diag, v[:, :, None])[:, :, 0] out[:-1] += np.matmul(H_upper_diag, v[1:][:, :, None])[:, :, 0] out[1:] += np.matmul(np.swapaxes(H_upper_diag, -2, -1), v[:-1][:, :, None])[:, :, 0] return out
Compute matrix-vector product with a symmetric block tridiagonal matrix H and vector v. :param H_diag: block diagonal terms of H :param H_upper_diag: upper block diagonal terms of H :param v: vector to multiple :return: H * v
https://github.com/mattjj/pylds/blob/e946bfa5aa76e8f8284614561a0f40ffd5d868fb/pylds/util.py#L21-L39
mattjj/pylds
pylds/util.py
convert_block_tridiag_to_banded
def convert_block_tridiag_to_banded(H_diag, H_upper_diag, lower=True): """ convert blocks to banded matrix representation required for scipy. we are using the "lower form." see https://docs.scipy.org/doc/scipy/reference/generated/scipy.linalg.solveh_banded.html """ T, D, _ = H_diag.shape assert H_diag.ndim == 3 and H_diag.shape[2] == D assert H_upper_diag.shape == (T - 1, D, D) H_lower_diag = np.swapaxes(H_upper_diag, -2, -1) ab = np.zeros((2 * D, T * D)) # Fill in blocks along the diagonal for d in range(D): # Get indices of (-d)-th diagonal of H_diag i = np.arange(d, D) j = np.arange(0, D - d) h = np.column_stack((H_diag[:, i, j], np.zeros((T, d)))) ab[d] = h.ravel() # Fill in lower left corner of blocks below the diagonal for d in range(0, D): # Get indices of (-d)-th diagonal of H_diag i = np.arange(d, D) j = np.arange(0, D - d) h = np.column_stack((H_lower_diag[:, i, j], np.zeros((T - 1, d)))) ab[D + d, :D * (T - 1)] = h.ravel() # Fill in upper corner of blocks below the diagonal for d in range(1, D): # Get indices of (+d)-th diagonal of H_lower_diag i = np.arange(0, D - d) j = np.arange(d, D) h = np.column_stack((np.zeros((T - 1, d)), H_lower_diag[:, i, j])) ab[D - d, :D * (T - 1)] += h.ravel() return ab if lower else transpose_lower_banded_matrix(ab)
python
def convert_block_tridiag_to_banded(H_diag, H_upper_diag, lower=True): """ convert blocks to banded matrix representation required for scipy. we are using the "lower form." see https://docs.scipy.org/doc/scipy/reference/generated/scipy.linalg.solveh_banded.html """ T, D, _ = H_diag.shape assert H_diag.ndim == 3 and H_diag.shape[2] == D assert H_upper_diag.shape == (T - 1, D, D) H_lower_diag = np.swapaxes(H_upper_diag, -2, -1) ab = np.zeros((2 * D, T * D)) # Fill in blocks along the diagonal for d in range(D): # Get indices of (-d)-th diagonal of H_diag i = np.arange(d, D) j = np.arange(0, D - d) h = np.column_stack((H_diag[:, i, j], np.zeros((T, d)))) ab[d] = h.ravel() # Fill in lower left corner of blocks below the diagonal for d in range(0, D): # Get indices of (-d)-th diagonal of H_diag i = np.arange(d, D) j = np.arange(0, D - d) h = np.column_stack((H_lower_diag[:, i, j], np.zeros((T - 1, d)))) ab[D + d, :D * (T - 1)] = h.ravel() # Fill in upper corner of blocks below the diagonal for d in range(1, D): # Get indices of (+d)-th diagonal of H_lower_diag i = np.arange(0, D - d) j = np.arange(d, D) h = np.column_stack((np.zeros((T - 1, d)), H_lower_diag[:, i, j])) ab[D - d, :D * (T - 1)] += h.ravel() return ab if lower else transpose_lower_banded_matrix(ab)
convert blocks to banded matrix representation required for scipy. we are using the "lower form." see https://docs.scipy.org/doc/scipy/reference/generated/scipy.linalg.solveh_banded.html
https://github.com/mattjj/pylds/blob/e946bfa5aa76e8f8284614561a0f40ffd5d868fb/pylds/util.py#L64-L101
mattjj/pylds
pylds/util.py
scipy_solve_symm_block_tridiag
def scipy_solve_symm_block_tridiag(H_diag, H_upper_diag, v, ab=None): """ use scipy.linalg.solve_banded to solve a symmetric block tridiagonal system see https://docs.scipy.org/doc/scipy/reference/generated/scipy.linalg.solveh_banded.html """ from scipy.linalg import solveh_banded ab = convert_block_tridiag_to_banded(H_diag, H_upper_diag) \ if ab is None else ab x = solveh_banded(ab, v.ravel(), lower=True) return x.reshape(v.shape)
python
def scipy_solve_symm_block_tridiag(H_diag, H_upper_diag, v, ab=None): """ use scipy.linalg.solve_banded to solve a symmetric block tridiagonal system see https://docs.scipy.org/doc/scipy/reference/generated/scipy.linalg.solveh_banded.html """ from scipy.linalg import solveh_banded ab = convert_block_tridiag_to_banded(H_diag, H_upper_diag) \ if ab is None else ab x = solveh_banded(ab, v.ravel(), lower=True) return x.reshape(v.shape)
use scipy.linalg.solve_banded to solve a symmetric block tridiagonal system see https://docs.scipy.org/doc/scipy/reference/generated/scipy.linalg.solveh_banded.html
https://github.com/mattjj/pylds/blob/e946bfa5aa76e8f8284614561a0f40ffd5d868fb/pylds/util.py#L114-L124
mattjj/pylds
pylds/util.py
sample_block_tridiag
def sample_block_tridiag(H_diag, H_upper_diag): """ helper function for sampling block tridiag gaussians. this is only for speed comparison with the solve approach. """ T, D, _ = H_diag.shape assert H_diag.ndim == 3 and H_diag.shape[2] == D assert H_upper_diag.shape == (T - 1, D, D) J_init = J_11 = J_22 = np.zeros((D, D)) h_init = h_1 = h_2 = np.zeros((D,)) J_21 = np.swapaxes(H_upper_diag, -1, -2) J_node = H_diag h_node = np.zeros((T,D)) y = info_sample(J_init, h_init, 0, J_11, J_21, J_22, h_1, h_2, np.zeros((T-1)), J_node, h_node, np.zeros(T)) return y
python
def sample_block_tridiag(H_diag, H_upper_diag): """ helper function for sampling block tridiag gaussians. this is only for speed comparison with the solve approach. """ T, D, _ = H_diag.shape assert H_diag.ndim == 3 and H_diag.shape[2] == D assert H_upper_diag.shape == (T - 1, D, D) J_init = J_11 = J_22 = np.zeros((D, D)) h_init = h_1 = h_2 = np.zeros((D,)) J_21 = np.swapaxes(H_upper_diag, -1, -2) J_node = H_diag h_node = np.zeros((T,D)) y = info_sample(J_init, h_init, 0, J_11, J_21, J_22, h_1, h_2, np.zeros((T-1)), J_node, h_node, np.zeros(T)) return y
helper function for sampling block tridiag gaussians. this is only for speed comparison with the solve approach.
https://github.com/mattjj/pylds/blob/e946bfa5aa76e8f8284614561a0f40ffd5d868fb/pylds/util.py#L141-L160
mattjj/pylds
pylds/util.py
logdet_symm_block_tridiag
def logdet_symm_block_tridiag(H_diag, H_upper_diag): """ compute the log determinant of a positive definite, symmetric block tridiag matrix. Use the Kalman info filter to do so. Specifically, the KF computes the normalizer: log Z = 1/2 h^T J^{-1} h -1/2 log |J| +n/2 log 2 \pi We set h=0 to get -1/2 log |J| + n/2 log 2 \pi and from this we solve for log |J|. """ T, D, _ = H_diag.shape assert H_diag.ndim == 3 and H_diag.shape[2] == D assert H_upper_diag.shape == (T - 1, D, D) J_init = J_11 = J_22 = np.zeros((D, D)) h_init = h_1 = h_2 = np.zeros((D,)) log_Z_init = 0 J_21 = np.swapaxes(H_upper_diag, -1, -2) log_Z_pair = 0 J_node = H_diag h_node = np.zeros((T, D)) log_Z_node = 0 logZ, _, _ = kalman_info_filter(J_init, h_init, log_Z_init, J_11, J_21, J_22, h_1, h_2, log_Z_pair, J_node, h_node, log_Z_node) # logZ = -1/2 log |J| + n/2 log 2 \pi logdetJ = -2 * (logZ - (T*D) / 2 * np.log(2 * np.pi)) return logdetJ
python
def logdet_symm_block_tridiag(H_diag, H_upper_diag): """ compute the log determinant of a positive definite, symmetric block tridiag matrix. Use the Kalman info filter to do so. Specifically, the KF computes the normalizer: log Z = 1/2 h^T J^{-1} h -1/2 log |J| +n/2 log 2 \pi We set h=0 to get -1/2 log |J| + n/2 log 2 \pi and from this we solve for log |J|. """ T, D, _ = H_diag.shape assert H_diag.ndim == 3 and H_diag.shape[2] == D assert H_upper_diag.shape == (T - 1, D, D) J_init = J_11 = J_22 = np.zeros((D, D)) h_init = h_1 = h_2 = np.zeros((D,)) log_Z_init = 0 J_21 = np.swapaxes(H_upper_diag, -1, -2) log_Z_pair = 0 J_node = H_diag h_node = np.zeros((T, D)) log_Z_node = 0 logZ, _, _ = kalman_info_filter(J_init, h_init, log_Z_init, J_11, J_21, J_22, h_1, h_2, log_Z_pair, J_node, h_node, log_Z_node) # logZ = -1/2 log |J| + n/2 log 2 \pi logdetJ = -2 * (logZ - (T*D) / 2 * np.log(2 * np.pi)) return logdetJ
compute the log determinant of a positive definite, symmetric block tridiag matrix. Use the Kalman info filter to do so. Specifically, the KF computes the normalizer: log Z = 1/2 h^T J^{-1} h -1/2 log |J| +n/2 log 2 \pi We set h=0 to get -1/2 log |J| + n/2 log 2 \pi and from this we solve for log |J|.
https://github.com/mattjj/pylds/blob/e946bfa5aa76e8f8284614561a0f40ffd5d868fb/pylds/util.py#L163-L196
mattjj/pylds
pylds/util.py
compute_symm_block_tridiag_covariances
def compute_symm_block_tridiag_covariances(H_diag, H_upper_diag): """ use the info smoother to solve a symmetric block tridiagonal system """ T, D, _ = H_diag.shape assert H_diag.ndim == 3 and H_diag.shape[2] == D assert H_upper_diag.shape == (T - 1, D, D) J_init = J_11 = J_22 = np.zeros((D, D)) h_init = h_1 = h_2 = np.zeros((D,)) J_21 = np.swapaxes(H_upper_diag, -1, -2) J_node = H_diag h_node = np.zeros((T, D)) _, _, sigmas, E_xt_xtp1 = \ info_E_step(J_init, h_init, 0, J_11, J_21, J_22, h_1, h_2, np.zeros((T-1)), J_node, h_node, np.zeros(T)) return sigmas, E_xt_xtp1
python
def compute_symm_block_tridiag_covariances(H_diag, H_upper_diag): """ use the info smoother to solve a symmetric block tridiagonal system """ T, D, _ = H_diag.shape assert H_diag.ndim == 3 and H_diag.shape[2] == D assert H_upper_diag.shape == (T - 1, D, D) J_init = J_11 = J_22 = np.zeros((D, D)) h_init = h_1 = h_2 = np.zeros((D,)) J_21 = np.swapaxes(H_upper_diag, -1, -2) J_node = H_diag h_node = np.zeros((T, D)) _, _, sigmas, E_xt_xtp1 = \ info_E_step(J_init, h_init, 0, J_11, J_21, J_22, h_1, h_2, np.zeros((T-1)), J_node, h_node, np.zeros(T)) return sigmas, E_xt_xtp1
use the info smoother to solve a symmetric block tridiagonal system
https://github.com/mattjj/pylds/blob/e946bfa5aa76e8f8284614561a0f40ffd5d868fb/pylds/util.py#L199-L218
mattjj/pylds
pylds/states.py
LDSStatesZeroInflatedCountData.resample_zeroinflation_variables
def resample_zeroinflation_variables(self): """ There's no way around the fact that we have to look at every data point, even the zeros here. """ # TODO: move this to cython? T, N, C, D, b = self.T, self.D_emission, self.C, self.D, self.emission_distn.b indptr = [0] indices = [] vals = [] offset = 0 X = np.hstack((self.gaussian_states, self.inputs)) for t in range(T): # Evaluate probability of data y_t = np.zeros(N) ns_t = self.data.indices[self.data.indptr[t]:self.data.indptr[t+1]] y_t[ns_t] = self.data.data[self.data.indptr[t]:self.data.indptr[t+1]] ll = self.emission_distn._elementwise_log_likelihood((X[t], y_t)) ll = ll.ravel() # Evaluate the probability that each emission was "exposed", # i.e. p(z_tn = 1 | y_tn, x_tn) log_p_exposed = np.log(self.rho) + ll log_p_exposed -= np.log(np.exp(log_p_exposed) + (1-self.rho) * (y_t == 0)) # Sample zero inflation mask z_t = np.random.rand(N) < np.exp(log_p_exposed) # Construct the sparse matrix t_inds = np.where(z_t)[0] indices.append(t_inds) vals.append(y_t[t_inds]) offset += t_inds.size indptr.append(offset) # Construct a sparse matrix vals = np.concatenate(vals) indices = np.concatenate(indices) indptr = np.array(indptr) self.masked_data = csr_matrix((vals, indices, indptr), shape=(T, N))
python
def resample_zeroinflation_variables(self): """ There's no way around the fact that we have to look at every data point, even the zeros here. """ # TODO: move this to cython? T, N, C, D, b = self.T, self.D_emission, self.C, self.D, self.emission_distn.b indptr = [0] indices = [] vals = [] offset = 0 X = np.hstack((self.gaussian_states, self.inputs)) for t in range(T): # Evaluate probability of data y_t = np.zeros(N) ns_t = self.data.indices[self.data.indptr[t]:self.data.indptr[t+1]] y_t[ns_t] = self.data.data[self.data.indptr[t]:self.data.indptr[t+1]] ll = self.emission_distn._elementwise_log_likelihood((X[t], y_t)) ll = ll.ravel() # Evaluate the probability that each emission was "exposed", # i.e. p(z_tn = 1 | y_tn, x_tn) log_p_exposed = np.log(self.rho) + ll log_p_exposed -= np.log(np.exp(log_p_exposed) + (1-self.rho) * (y_t == 0)) # Sample zero inflation mask z_t = np.random.rand(N) < np.exp(log_p_exposed) # Construct the sparse matrix t_inds = np.where(z_t)[0] indices.append(t_inds) vals.append(y_t[t_inds]) offset += t_inds.size indptr.append(offset) # Construct a sparse matrix vals = np.concatenate(vals) indices = np.concatenate(indices) indptr = np.array(indptr) self.masked_data = csr_matrix((vals, indices, indptr), shape=(T, N))
There's no way around the fact that we have to look at every data point, even the zeros here.
https://github.com/mattjj/pylds/blob/e946bfa5aa76e8f8284614561a0f40ffd5d868fb/pylds/states.py#L905-L944
mattjj/pylds
pylds/distributions.py
PoissonRegression.expected_log_likelihood
def expected_log_likelihood(self, mus, sigmas, y): """ Compute the expected log likelihood for a mean and covariance of x and an observed value of y. """ # Flatten the covariance T = mus.shape[0] D = self.D_in sigs_vec = sigmas.reshape((T, D ** 2)) # Compute the log likelihood of each column ll = np.zeros((T, self.D_out)) for n in range(self.D_out): an = self.A[n] E_loglmbda = np.dot(mus, an) ll[:,n] += y[:,n] * E_loglmbda # Vectorized log likelihood calculation aa_vec = np.outer(an, an).reshape((D ** 2,)) ll[:,n] = -np.exp(E_loglmbda + 0.5 * np.dot(sigs_vec, aa_vec)) return ll
python
def expected_log_likelihood(self, mus, sigmas, y): """ Compute the expected log likelihood for a mean and covariance of x and an observed value of y. """ # Flatten the covariance T = mus.shape[0] D = self.D_in sigs_vec = sigmas.reshape((T, D ** 2)) # Compute the log likelihood of each column ll = np.zeros((T, self.D_out)) for n in range(self.D_out): an = self.A[n] E_loglmbda = np.dot(mus, an) ll[:,n] += y[:,n] * E_loglmbda # Vectorized log likelihood calculation aa_vec = np.outer(an, an).reshape((D ** 2,)) ll[:,n] = -np.exp(E_loglmbda + 0.5 * np.dot(sigs_vec, aa_vec)) return ll
Compute the expected log likelihood for a mean and covariance of x and an observed value of y.
https://github.com/mattjj/pylds/blob/e946bfa5aa76e8f8284614561a0f40ffd5d868fb/pylds/distributions.py#L55-L79
mattjj/pylds
pylds/distributions.py
BernoulliRegression.max_likelihood
def max_likelihood(self, data, weights=None, stats=None): """ Maximize the likelihood for given data :param data: :param weights: :param stats: :return: """ if isinstance(data, list): x = np.vstack([d[0] for d in data]) y = np.vstack([d[1] for d in data]) elif isinstance(data, tuple): assert len(data) == 2 elif isinstance(data, np.ndarray): x, y = data[:,:self.D_in], data[:, self.D_in:] else: raise Exception("Invalid data type") from sklearn.linear_model import LogisticRegression for n in progprint_xrange(self.D_out): lr = LogisticRegression(fit_intercept=False) lr.fit(x, y[:,n]) self.A[n] = lr.coef_
python
def max_likelihood(self, data, weights=None, stats=None): """ Maximize the likelihood for given data :param data: :param weights: :param stats: :return: """ if isinstance(data, list): x = np.vstack([d[0] for d in data]) y = np.vstack([d[1] for d in data]) elif isinstance(data, tuple): assert len(data) == 2 elif isinstance(data, np.ndarray): x, y = data[:,:self.D_in], data[:, self.D_in:] else: raise Exception("Invalid data type") from sklearn.linear_model import LogisticRegression for n in progprint_xrange(self.D_out): lr = LogisticRegression(fit_intercept=False) lr.fit(x, y[:,n]) self.A[n] = lr.coef_
Maximize the likelihood for given data :param data: :param weights: :param stats: :return:
https://github.com/mattjj/pylds/blob/e946bfa5aa76e8f8284614561a0f40ffd5d868fb/pylds/distributions.py#L204-L226
mattjj/pylds
pylds/laplace.py
_LaplaceApproxLDSStatesBase.log_conditional_likelihood
def log_conditional_likelihood(self, x): """ likelihood \sum_t log p(y_t | x_t) Optionally override this in base classes """ T, D = self.T, self.D_latent assert x.shape == (T, D) ll = 0 for t in range(self.T): ll += self.local_log_likelihood(x[t], self.data[t], self.inputs[t]) return ll
python
def log_conditional_likelihood(self, x): """ likelihood \sum_t log p(y_t | x_t) Optionally override this in base classes """ T, D = self.T, self.D_latent assert x.shape == (T, D) ll = 0 for t in range(self.T): ll += self.local_log_likelihood(x[t], self.data[t], self.inputs[t]) return ll
likelihood \sum_t log p(y_t | x_t) Optionally override this in base classes
https://github.com/mattjj/pylds/blob/e946bfa5aa76e8f8284614561a0f40ffd5d868fb/pylds/laplace.py#L26-L37
mattjj/pylds
pylds/laplace.py
_LaplaceApproxLDSStatesBase.grad_local_log_likelihood
def grad_local_log_likelihood(self, x): """ return d/dxt log p(yt | xt) evaluated at xt Optionally override this in base classes """ T, D = self.T, self.D_latent assert x.shape == (T, D) gfun = grad(self.local_log_likelihood) g = np.zeros((T, D)) for t in range(T): g[t] += gfun(x[t], self.data[t], self.inputs[t]) return g
python
def grad_local_log_likelihood(self, x): """ return d/dxt log p(yt | xt) evaluated at xt Optionally override this in base classes """ T, D = self.T, self.D_latent assert x.shape == (T, D) gfun = grad(self.local_log_likelihood) g = np.zeros((T, D)) for t in range(T): g[t] += gfun(x[t], self.data[t], self.inputs[t]) return g
return d/dxt log p(yt | xt) evaluated at xt Optionally override this in base classes
https://github.com/mattjj/pylds/blob/e946bfa5aa76e8f8284614561a0f40ffd5d868fb/pylds/laplace.py#L39-L51
mattjj/pylds
pylds/laplace.py
_LaplaceApproxLDSStatesBase.hessian_local_log_likelihood
def hessian_local_log_likelihood(self, x): """ return d^2/dxt^2 log p(y | x) for each time bin Optionally override this in base classes """ T, D = self.T, self.D_latent assert x.shape == (T, D) hfun = hessian(self.local_log_likelihood) H_diag = np.zeros((T, D, D)) for t in range(T): H_diag[t] = hfun(x[t], self.data[t], self.inputs[t]) return H_diag
python
def hessian_local_log_likelihood(self, x): """ return d^2/dxt^2 log p(y | x) for each time bin Optionally override this in base classes """ T, D = self.T, self.D_latent assert x.shape == (T, D) hfun = hessian(self.local_log_likelihood) H_diag = np.zeros((T, D, D)) for t in range(T): H_diag[t] = hfun(x[t], self.data[t], self.inputs[t]) return H_diag
return d^2/dxt^2 log p(y | x) for each time bin Optionally override this in base classes
https://github.com/mattjj/pylds/blob/e946bfa5aa76e8f8284614561a0f40ffd5d868fb/pylds/laplace.py#L53-L65
mattjj/pylds
pylds/laplace.py
_LaplaceApproxLDSStatesBase.log_joint
def log_joint(self, x): """ Compute the log joint probability p(x, y) """ T, D = self.T, self.D_latent assert x.shape == (T, D) # prior log p(x) -- quadratic terms J_diag, J_upper_diag = self.sparse_J_prior lp = -0.5 * np.sum(x * symm_block_tridiag_matmul(J_diag, J_upper_diag, x)) # prior log p(x) -- linear terms _, h_init, log_Z_init = self.info_init_params _, _, _, h1, h2, log_Z_dyn = self.info_dynamics_params lp += x[0].dot(h_init) lp += np.sum(x[:-1] * h1) lp += np.sum(x[1:] * h2) # prior log p(x) -- normalization constants lp += log_Z_init lp += np.sum(log_Z_dyn) # likelihood log p(y | x) lp += self.log_conditional_likelihood(x) return lp
python
def log_joint(self, x): """ Compute the log joint probability p(x, y) """ T, D = self.T, self.D_latent assert x.shape == (T, D) # prior log p(x) -- quadratic terms J_diag, J_upper_diag = self.sparse_J_prior lp = -0.5 * np.sum(x * symm_block_tridiag_matmul(J_diag, J_upper_diag, x)) # prior log p(x) -- linear terms _, h_init, log_Z_init = self.info_init_params _, _, _, h1, h2, log_Z_dyn = self.info_dynamics_params lp += x[0].dot(h_init) lp += np.sum(x[:-1] * h1) lp += np.sum(x[1:] * h2) # prior log p(x) -- normalization constants lp += log_Z_init lp += np.sum(log_Z_dyn) # likelihood log p(y | x) lp += self.log_conditional_likelihood(x) return lp
Compute the log joint probability p(x, y)
https://github.com/mattjj/pylds/blob/e946bfa5aa76e8f8284614561a0f40ffd5d868fb/pylds/laplace.py#L82-L107
mattjj/pylds
pylds/laplace.py
_LaplaceApproxLDSStatesBase.sparse_hessian_log_joint
def sparse_hessian_log_joint(self, x): """ The Hessian includes the quadratic terms of the Gaussian LDS prior as well as the Hessian of the local log likelihood. """ T, D = self.T, self.D_latent assert x.shape == (T, D) # Collect the Gaussian LDS prior terms J_diag, J_upper_diag = self.sparse_J_prior H_diag, H_upper_diag = -J_diag, -J_upper_diag # Collect the likelihood terms H_diag += self.hessian_local_log_likelihood(x) # Subtract a little bit to ensure negative definiteness H_diag -= 1e-8 * np.eye(D) return H_diag, H_upper_diag
python
def sparse_hessian_log_joint(self, x): """ The Hessian includes the quadratic terms of the Gaussian LDS prior as well as the Hessian of the local log likelihood. """ T, D = self.T, self.D_latent assert x.shape == (T, D) # Collect the Gaussian LDS prior terms J_diag, J_upper_diag = self.sparse_J_prior H_diag, H_upper_diag = -J_diag, -J_upper_diag # Collect the likelihood terms H_diag += self.hessian_local_log_likelihood(x) # Subtract a little bit to ensure negative definiteness H_diag -= 1e-8 * np.eye(D) return H_diag, H_upper_diag
The Hessian includes the quadratic terms of the Gaussian LDS prior as well as the Hessian of the local log likelihood.
https://github.com/mattjj/pylds/blob/e946bfa5aa76e8f8284614561a0f40ffd5d868fb/pylds/laplace.py#L109-L127
mattjj/pylds
pylds/laplace.py
_LaplaceApproxLDSStatesBase.gradient_log_joint
def gradient_log_joint(self, x): """ The gradient of the log joint probability. For the Gaussian terms, this is d/dx [-1/2 x^T J x + h^T x] = -Jx + h. For the likelihood terms, we have for each time t d/dx log p(yt | xt) """ T, D = self.T, self.D_latent assert x.shape == (T, D) # Collect the Gaussian LDS prior terms _, h_init, _ = self.info_init_params _, _, _, h1, h2, _ = self.info_dynamics_params H_diag, H_upper_diag = self.sparse_J_prior # Compute the gradient from the prior g = -1 * symm_block_tridiag_matmul(H_diag, H_upper_diag, x) g[0] += h_init g[:-1] += h1 g[1:] += h2 # Compute gradient from the likelihood terms g += self.grad_local_log_likelihood(x) return g
python
def gradient_log_joint(self, x): """ The gradient of the log joint probability. For the Gaussian terms, this is d/dx [-1/2 x^T J x + h^T x] = -Jx + h. For the likelihood terms, we have for each time t d/dx log p(yt | xt) """ T, D = self.T, self.D_latent assert x.shape == (T, D) # Collect the Gaussian LDS prior terms _, h_init, _ = self.info_init_params _, _, _, h1, h2, _ = self.info_dynamics_params H_diag, H_upper_diag = self.sparse_J_prior # Compute the gradient from the prior g = -1 * symm_block_tridiag_matmul(H_diag, H_upper_diag, x) g[0] += h_init g[:-1] += h1 g[1:] += h2 # Compute gradient from the likelihood terms g += self.grad_local_log_likelihood(x) return g
The gradient of the log joint probability. For the Gaussian terms, this is d/dx [-1/2 x^T J x + h^T x] = -Jx + h. For the likelihood terms, we have for each time t d/dx log p(yt | xt)
https://github.com/mattjj/pylds/blob/e946bfa5aa76e8f8284614561a0f40ffd5d868fb/pylds/laplace.py#L133-L162
mattjj/pylds
pylds/laplace.py
_LaplaceApproxLDSStatesBase._laplace_approximation_newton
def _laplace_approximation_newton(self, tol=1e-6, stepsz=0.9, verbose=False): """ Solve a block tridiagonal system with message passing. """ from pylds.util import solve_symm_block_tridiag, scipy_solve_symm_block_tridiag scale = self.T * self.D_emission def newton_step(x, stepsz): assert 0 <= stepsz <= 1 g = self.gradient_log_joint(x) H_diag, H_upper_diag = self.sparse_hessian_log_joint(x) Hinv_g = -scipy_solve_symm_block_tridiag(-H_diag / scale, -H_upper_diag / scale, g / scale) return x - stepsz * Hinv_g if verbose: print("Fitting Laplace approximation") itr = [0] def cbk(x): print("Iteration: ", itr[0], "\tObjective: ", (self.log_joint(x) / scale).round(4), "\tAvg Grad: ", (self.gradient_log_joint(x).mean() / scale).round(4)) itr[0] += 1 # Solve for optimal x with Newton's method x = self.gaussian_states dx = np.inf while dx >= tol: xnew = newton_step(x, stepsz) dx = np.mean(abs(xnew - x)) x = xnew if verbose: cbk(x) assert np.all(np.isfinite(x)) if verbose: print("Done") return x
python
def _laplace_approximation_newton(self, tol=1e-6, stepsz=0.9, verbose=False): """ Solve a block tridiagonal system with message passing. """ from pylds.util import solve_symm_block_tridiag, scipy_solve_symm_block_tridiag scale = self.T * self.D_emission def newton_step(x, stepsz): assert 0 <= stepsz <= 1 g = self.gradient_log_joint(x) H_diag, H_upper_diag = self.sparse_hessian_log_joint(x) Hinv_g = -scipy_solve_symm_block_tridiag(-H_diag / scale, -H_upper_diag / scale, g / scale) return x - stepsz * Hinv_g if verbose: print("Fitting Laplace approximation") itr = [0] def cbk(x): print("Iteration: ", itr[0], "\tObjective: ", (self.log_joint(x) / scale).round(4), "\tAvg Grad: ", (self.gradient_log_joint(x).mean() / scale).round(4)) itr[0] += 1 # Solve for optimal x with Newton's method x = self.gaussian_states dx = np.inf while dx >= tol: xnew = newton_step(x, stepsz) dx = np.mean(abs(xnew - x)) x = xnew if verbose: cbk(x) assert np.all(np.isfinite(x)) if verbose: print("Done") return x
Solve a block tridiagonal system with message passing.
https://github.com/mattjj/pylds/blob/e946bfa5aa76e8f8284614561a0f40ffd5d868fb/pylds/laplace.py#L212-L253
mattjj/pylds
pylds/laplace.py
LaplaceApproxPoissonLDSStates.grad_local_log_likelihood
def grad_local_log_likelihood(self, x): """ d/dx y^T Cx + y^T d - exp(Cx+d) = y^T C - exp(Cx+d)^T C = (y - lmbda)^T C """ # Observation likelihoods lmbda = np.exp(np.dot(x, self.C.T) + np.dot(self.inputs, self.D.T)) return (self.data - lmbda).dot(self.C)
python
def grad_local_log_likelihood(self, x): """ d/dx y^T Cx + y^T d - exp(Cx+d) = y^T C - exp(Cx+d)^T C = (y - lmbda)^T C """ # Observation likelihoods lmbda = np.exp(np.dot(x, self.C.T) + np.dot(self.inputs, self.D.T)) return (self.data - lmbda).dot(self.C)
d/dx y^T Cx + y^T d - exp(Cx+d) = y^T C - exp(Cx+d)^T C = (y - lmbda)^T C
https://github.com/mattjj/pylds/blob/e946bfa5aa76e8f8284614561a0f40ffd5d868fb/pylds/laplace.py#L349-L357
mattjj/pylds
pylds/laplace.py
LaplaceApproxPoissonLDSStates.hessian_local_log_likelihood
def hessian_local_log_likelihood(self, x): """ d/dx (y - lmbda)^T C = d/dx -exp(Cx + d)^T C = -C^T exp(Cx + d)^T C """ # Observation likelihoods lmbda = np.exp(np.dot(x, self.C.T) + np.dot(self.inputs, self.D.T)) return np.einsum('tn, ni, nj ->tij', -lmbda, self.C, self.C)
python
def hessian_local_log_likelihood(self, x): """ d/dx (y - lmbda)^T C = d/dx -exp(Cx + d)^T C = -C^T exp(Cx + d)^T C """ # Observation likelihoods lmbda = np.exp(np.dot(x, self.C.T) + np.dot(self.inputs, self.D.T)) return np.einsum('tn, ni, nj ->tij', -lmbda, self.C, self.C)
d/dx (y - lmbda)^T C = d/dx -exp(Cx + d)^T C = -C^T exp(Cx + d)^T C
https://github.com/mattjj/pylds/blob/e946bfa5aa76e8f8284614561a0f40ffd5d868fb/pylds/laplace.py#L359-L366
mattjj/pylds
pylds/laplace.py
LaplaceApproxBernoulliLDSStates.grad_local_log_likelihood
def grad_local_log_likelihood(self, x): """ d/d \psi y \psi - log (1 + exp(\psi)) = y - exp(\psi) / (1 + exp(\psi)) = y - sigma(psi) = y - p d \psi / dx = C d / dx = (y - sigma(psi)) * C """ C, D, u, y = self.C, self.D, self.inputs, self.data psi = x.dot(C.T) + u.dot(D.T) p = 1. / (1 + np.exp(-psi)) return (y - p).dot(C)
python
def grad_local_log_likelihood(self, x): """ d/d \psi y \psi - log (1 + exp(\psi)) = y - exp(\psi) / (1 + exp(\psi)) = y - sigma(psi) = y - p d \psi / dx = C d / dx = (y - sigma(psi)) * C """ C, D, u, y = self.C, self.D, self.inputs, self.data psi = x.dot(C.T) + u.dot(D.T) p = 1. / (1 + np.exp(-psi)) return (y - p).dot(C)
d/d \psi y \psi - log (1 + exp(\psi)) = y - exp(\psi) / (1 + exp(\psi)) = y - sigma(psi) = y - p d \psi / dx = C d / dx = (y - sigma(psi)) * C
https://github.com/mattjj/pylds/blob/e946bfa5aa76e8f8284614561a0f40ffd5d868fb/pylds/laplace.py#L458-L472
mattjj/pylds
pylds/laplace.py
LaplaceApproxBernoulliLDSStates.hessian_local_log_likelihood
def hessian_local_log_likelihood(self, x): """ d/dx (y - p) * C = -dpsi/dx (dp/d\psi) C = -C p (1-p) C """ C, D, u, y = self.C, self.D, self.inputs, self.data psi = x.dot(C.T) + u.dot(D.T) p = 1. / (1 + np.exp(-psi)) dp_dpsi = p * (1 - p) return np.einsum('tn, ni, nj ->tij', -dp_dpsi, self.C, self.C)
python
def hessian_local_log_likelihood(self, x): """ d/dx (y - p) * C = -dpsi/dx (dp/d\psi) C = -C p (1-p) C """ C, D, u, y = self.C, self.D, self.inputs, self.data psi = x.dot(C.T) + u.dot(D.T) p = 1. / (1 + np.exp(-psi)) dp_dpsi = p * (1 - p) return np.einsum('tn, ni, nj ->tij', -dp_dpsi, self.C, self.C)
d/dx (y - p) * C = -dpsi/dx (dp/d\psi) C = -C p (1-p) C
https://github.com/mattjj/pylds/blob/e946bfa5aa76e8f8284614561a0f40ffd5d868fb/pylds/laplace.py#L474-L484
mattjj/pylds
pylds/models.py
ZeroInflatedCountLDS.resample_emission_distn
def resample_emission_distn(self): """ Now for the expensive part... the data is stored in a sparse row format, which is good for updating the latent states (since we primarily rely on dot products with the data, which can be efficiently performed for CSR matrices). However, in order to update the n-th row of the emission matrix, we need to know which counts are observed in the n-th column of data. This involves converting the data to a sparse column format, which can require (time) intensive re-indexing. """ masked_datas = [s.masked_data.tocsc() for s in self.states_list] xs = [np.hstack((s.gaussian_states, s.inputs))for s in self.states_list] for n in range(self.D_obs): # Get the nonzero values of the nth column rowns = [md.indices[md.indptr[n]:md.indptr[n+1]] for md in masked_datas] xns = [x[r] for x,r in zip(xs, rowns)] yns = [s.masked_data.getcol(n).data for s in self.states_list] maskns = [np.ones_like(y, dtype=bool) for y in yns] omegans = [s.omega.getcol(n).data for s in self.states_list] self.emission_distn._resample_row_of_emission_matrix(n, xns, yns, maskns, omegans)
python
def resample_emission_distn(self): """ Now for the expensive part... the data is stored in a sparse row format, which is good for updating the latent states (since we primarily rely on dot products with the data, which can be efficiently performed for CSR matrices). However, in order to update the n-th row of the emission matrix, we need to know which counts are observed in the n-th column of data. This involves converting the data to a sparse column format, which can require (time) intensive re-indexing. """ masked_datas = [s.masked_data.tocsc() for s in self.states_list] xs = [np.hstack((s.gaussian_states, s.inputs))for s in self.states_list] for n in range(self.D_obs): # Get the nonzero values of the nth column rowns = [md.indices[md.indptr[n]:md.indptr[n+1]] for md in masked_datas] xns = [x[r] for x,r in zip(xs, rowns)] yns = [s.masked_data.getcol(n).data for s in self.states_list] maskns = [np.ones_like(y, dtype=bool) for y in yns] omegans = [s.omega.getcol(n).data for s in self.states_list] self.emission_distn._resample_row_of_emission_matrix(n, xns, yns, maskns, omegans)
Now for the expensive part... the data is stored in a sparse row format, which is good for updating the latent states (since we primarily rely on dot products with the data, which can be efficiently performed for CSR matrices). However, in order to update the n-th row of the emission matrix, we need to know which counts are observed in the n-th column of data. This involves converting the data to a sparse column format, which can require (time) intensive re-indexing.
https://github.com/mattjj/pylds/blob/e946bfa5aa76e8f8284614561a0f40ffd5d868fb/pylds/models.py#L434-L456
FirefighterBlu3/python-pam
pam.py
pam.authenticate
def authenticate(self, username, password, service='login', encoding='utf-8', resetcreds=True): """username and password authentication for the given service. Returns True for success, or False for failure. self.code (integer) and self.reason (string) are always stored and may be referenced for the reason why authentication failed. 0/'Success' will be stored for success. Python3 expects bytes() for ctypes inputs. This function will make necessary conversions using the supplied encoding. Inputs: username: username to authenticate password: password in plain text service: PAM service to authenticate against, defaults to 'login' Returns: success: True failure: False """ @conv_func def my_conv(n_messages, messages, p_response, app_data): """Simple conversation function that responds to any prompt where the echo is off with the supplied password""" # Create an array of n_messages response objects addr = calloc(n_messages, sizeof(PamResponse)) response = cast(addr, POINTER(PamResponse)) p_response[0] = response for i in range(n_messages): if messages[i].contents.msg_style == PAM_PROMPT_ECHO_OFF: dst = calloc(len(password)+1, sizeof(c_char)) memmove(dst, cpassword, len(password)) response[i].resp = dst response[i].resp_retcode = 0 return 0 # python3 ctypes prefers bytes if sys.version_info >= (3,): if isinstance(username, str): username = username.encode(encoding) if isinstance(password, str): password = password.encode(encoding) if isinstance(service, str): service = service.encode(encoding) else: if isinstance(username, unicode): username = username.encode(encoding) if isinstance(password, unicode): password = password.encode(encoding) if isinstance(service, unicode): service = service.encode(encoding) if b'\x00' in username or b'\x00' in password or b'\x00' in service: self.code = 4 # PAM_SYSTEM_ERR in Linux-PAM self.reason = 'strings may not contain NUL' return False # do this up front so we can safely throw an exception if there's # anything wrong with it cpassword = c_char_p(password) handle = PamHandle() conv = PamConv(my_conv, 0) retval = pam_start(service, username, byref(conv), byref(handle)) if retval != 0: # This is not an authentication error, something has gone wrong starting up PAM self.code = retval self.reason = "pam_start() failed" return False retval = pam_authenticate(handle, 0) auth_success = retval == 0 if auth_success and resetcreds: retval = pam_setcred(handle, PAM_REINITIALIZE_CRED); # store information to inform the caller why we failed self.code = retval self.reason = pam_strerror(handle, retval) if sys.version_info >= (3,): self.reason = self.reason.decode(encoding) if hasattr(libpam, 'pam_end'): pam_end(handle, retval) return auth_success
python
def authenticate(self, username, password, service='login', encoding='utf-8', resetcreds=True): """username and password authentication for the given service. Returns True for success, or False for failure. self.code (integer) and self.reason (string) are always stored and may be referenced for the reason why authentication failed. 0/'Success' will be stored for success. Python3 expects bytes() for ctypes inputs. This function will make necessary conversions using the supplied encoding. Inputs: username: username to authenticate password: password in plain text service: PAM service to authenticate against, defaults to 'login' Returns: success: True failure: False """ @conv_func def my_conv(n_messages, messages, p_response, app_data): """Simple conversation function that responds to any prompt where the echo is off with the supplied password""" # Create an array of n_messages response objects addr = calloc(n_messages, sizeof(PamResponse)) response = cast(addr, POINTER(PamResponse)) p_response[0] = response for i in range(n_messages): if messages[i].contents.msg_style == PAM_PROMPT_ECHO_OFF: dst = calloc(len(password)+1, sizeof(c_char)) memmove(dst, cpassword, len(password)) response[i].resp = dst response[i].resp_retcode = 0 return 0 # python3 ctypes prefers bytes if sys.version_info >= (3,): if isinstance(username, str): username = username.encode(encoding) if isinstance(password, str): password = password.encode(encoding) if isinstance(service, str): service = service.encode(encoding) else: if isinstance(username, unicode): username = username.encode(encoding) if isinstance(password, unicode): password = password.encode(encoding) if isinstance(service, unicode): service = service.encode(encoding) if b'\x00' in username or b'\x00' in password or b'\x00' in service: self.code = 4 # PAM_SYSTEM_ERR in Linux-PAM self.reason = 'strings may not contain NUL' return False # do this up front so we can safely throw an exception if there's # anything wrong with it cpassword = c_char_p(password) handle = PamHandle() conv = PamConv(my_conv, 0) retval = pam_start(service, username, byref(conv), byref(handle)) if retval != 0: # This is not an authentication error, something has gone wrong starting up PAM self.code = retval self.reason = "pam_start() failed" return False retval = pam_authenticate(handle, 0) auth_success = retval == 0 if auth_success and resetcreds: retval = pam_setcred(handle, PAM_REINITIALIZE_CRED); # store information to inform the caller why we failed self.code = retval self.reason = pam_strerror(handle, retval) if sys.version_info >= (3,): self.reason = self.reason.decode(encoding) if hasattr(libpam, 'pam_end'): pam_end(handle, retval) return auth_success
username and password authentication for the given service. Returns True for success, or False for failure. self.code (integer) and self.reason (string) are always stored and may be referenced for the reason why authentication failed. 0/'Success' will be stored for success. Python3 expects bytes() for ctypes inputs. This function will make necessary conversions using the supplied encoding. Inputs: username: username to authenticate password: password in plain text service: PAM service to authenticate against, defaults to 'login' Returns: success: True failure: False
https://github.com/FirefighterBlu3/python-pam/blob/fe44b334970f421635d9e373b563c9e6566613bd/pam.py#L106-L191
cloudmesh/cloudmesh-common
cloudmesh/common/ssh/ssh_config.py
ssh_config.names
def names(self): """ The names defined in ~/.ssh/config :return: the names """ found_names = [] with open(self.filename) as f: content = f.readlines() for line in content: line = line.strip() if " " in line: attribute, value = line.split(" ", 1) attribute = attribute.strip() value = value.strip() if attribute.lower() in ['host']: found_names.append(value) return found_names
python
def names(self): """ The names defined in ~/.ssh/config :return: the names """ found_names = [] with open(self.filename) as f: content = f.readlines() for line in content: line = line.strip() if " " in line: attribute, value = line.split(" ", 1) attribute = attribute.strip() value = value.strip() if attribute.lower() in ['host']: found_names.append(value) return found_names
The names defined in ~/.ssh/config :return: the names
https://github.com/cloudmesh/cloudmesh-common/blob/ae4fae09cd78205d179ea692dc58f0b0c8fea2b8/cloudmesh/common/ssh/ssh_config.py#L29-L45
cloudmesh/cloudmesh-common
cloudmesh/common/ssh/ssh_config.py
ssh_config.load
def load(self): """list the hosts defined in the ssh config file""" with open(self.filename) as f: content = f.readlines() content = [" ".join(x.split()).strip('\n').lstrip().split(' ', 1) for x in content] # removes duplicated spaces, and splits in two fields, removes leading spaces hosts = {} host = "NA" for line in content: if line[0].startswith('#') or line[0] is '': pass # ignore line else: attribute = line[0] value = line[1] if attribute.lower().strip() in ['Host', 'host']: host = value hosts[host] = {'host': host} else: # In case of special configuration lines, such as port # forwarding, # there would be no 'Host india' line. if host in hosts: hosts[host][attribute] = value # pass self.hosts = hosts
python
def load(self): """list the hosts defined in the ssh config file""" with open(self.filename) as f: content = f.readlines() content = [" ".join(x.split()).strip('\n').lstrip().split(' ', 1) for x in content] # removes duplicated spaces, and splits in two fields, removes leading spaces hosts = {} host = "NA" for line in content: if line[0].startswith('#') or line[0] is '': pass # ignore line else: attribute = line[0] value = line[1] if attribute.lower().strip() in ['Host', 'host']: host = value hosts[host] = {'host': host} else: # In case of special configuration lines, such as port # forwarding, # there would be no 'Host india' line. if host in hosts: hosts[host][attribute] = value # pass self.hosts = hosts
list the hosts defined in the ssh config file
https://github.com/cloudmesh/cloudmesh-common/blob/ae4fae09cd78205d179ea692dc58f0b0c8fea2b8/cloudmesh/common/ssh/ssh_config.py#L48-L73
cloudmesh/cloudmesh-common
cloudmesh/common/ssh/ssh_config.py
ssh_config.execute
def execute(self, name, command): """ execute the command on the named host :param name: the name of the host in config :param command: the command to be executed :return: """ if name in ["localhost"]: r = '\n'.join(Shell.sh("-c", command).split()[-1:]) else: r = '\n'.join(Shell.ssh(name, command).split()[-1:]) return r
python
def execute(self, name, command): """ execute the command on the named host :param name: the name of the host in config :param command: the command to be executed :return: """ if name in ["localhost"]: r = '\n'.join(Shell.sh("-c", command).split()[-1:]) else: r = '\n'.join(Shell.ssh(name, command).split()[-1:]) return r
execute the command on the named host :param name: the name of the host in config :param command: the command to be executed :return:
https://github.com/cloudmesh/cloudmesh-common/blob/ae4fae09cd78205d179ea692dc58f0b0c8fea2b8/cloudmesh/common/ssh/ssh_config.py#L104-L115
cloudmesh/cloudmesh-common
cloudmesh/common/ssh/ssh_config.py
ssh_config.generate
def generate(self, key="india", host="india.futuresystems.org", username=None, force=False, verbose=False): """ adds a host to the config file with given parameters. #TODO: make sure this is better documented :param key: the key :param host: the host :param username: the username :param force: not used :param verbose: prints debug messages :return: """ data = { "host": host, "key": key, "username": username } if verbose and key in self.names(): Console.error("{key} already in ~/.ssh/config".format(**data), traceflag=False) return "" else: entry = dedent(""" Host {key} Hostname {host} User {username} """.format(**data)) try: with open(self.filename, "a") as config_ssh: config_ssh.write(entry) config_ssh.close() self.load() if verbose: Console.ok("Added india to ~/.ssh/config") except Exception as e: if verbose: Console.error(e.message)
python
def generate(self, key="india", host="india.futuresystems.org", username=None, force=False, verbose=False): """ adds a host to the config file with given parameters. #TODO: make sure this is better documented :param key: the key :param host: the host :param username: the username :param force: not used :param verbose: prints debug messages :return: """ data = { "host": host, "key": key, "username": username } if verbose and key in self.names(): Console.error("{key} already in ~/.ssh/config".format(**data), traceflag=False) return "" else: entry = dedent(""" Host {key} Hostname {host} User {username} """.format(**data)) try: with open(self.filename, "a") as config_ssh: config_ssh.write(entry) config_ssh.close() self.load() if verbose: Console.ok("Added india to ~/.ssh/config") except Exception as e: if verbose: Console.error(e.message)
adds a host to the config file with given parameters. #TODO: make sure this is better documented :param key: the key :param host: the host :param username: the username :param force: not used :param verbose: prints debug messages :return:
https://github.com/cloudmesh/cloudmesh-common/blob/ae4fae09cd78205d179ea692dc58f0b0c8fea2b8/cloudmesh/common/ssh/ssh_config.py#L136-L174
cloudmesh/cloudmesh-common
cloudmesh/common/logger.py
LOGGER
def LOGGER(filename): """creates a logger with the given name. You can use it as follows:: log = cloudmesh.common.LOGGER(__file__) log.error("this is an error") log.info("this is an info") log.warning("this is a warning") """ pwd = os.getcwd() name = filename.replace(pwd, "$PWD") try: (first, name) = name.split("site-packages") name += "... site" except: pass loglevel = logging.CRITICAL try: level = grep("loglevel:", config_file( "/cloudmesh_debug.yaml")).strip().split(":")[1].strip().lower() if level.upper() == "DEBUG": loglevel = logging.DEBUG elif level.upper() == "INFO": loglevel = logging.INFO elif level.upper() == "WARNING": loglevel = logging.WARNING elif level.upper() == "ERROR": loglevel = logging.ERROR else: level = logging.CRITICAL except: # print "LOGLEVEL NOT FOUND" loglevel = logging.DEBUG log = logging.getLogger(name) log.setLevel(loglevel) formatter = logging.Formatter( 'CM {0:>50}:%(lineno)s: %(levelname)6s - %(message)s'.format(name)) # formatter = logging.Formatter( # 'CM {0:>50}: %(levelname)6s - %(module)s:%(lineno)s %funcName)s: %(message)s'.format(name)) handler = logging.StreamHandler() handler.setFormatter(formatter) log.addHandler(handler) return log
python
def LOGGER(filename): """creates a logger with the given name. You can use it as follows:: log = cloudmesh.common.LOGGER(__file__) log.error("this is an error") log.info("this is an info") log.warning("this is a warning") """ pwd = os.getcwd() name = filename.replace(pwd, "$PWD") try: (first, name) = name.split("site-packages") name += "... site" except: pass loglevel = logging.CRITICAL try: level = grep("loglevel:", config_file( "/cloudmesh_debug.yaml")).strip().split(":")[1].strip().lower() if level.upper() == "DEBUG": loglevel = logging.DEBUG elif level.upper() == "INFO": loglevel = logging.INFO elif level.upper() == "WARNING": loglevel = logging.WARNING elif level.upper() == "ERROR": loglevel = logging.ERROR else: level = logging.CRITICAL except: # print "LOGLEVEL NOT FOUND" loglevel = logging.DEBUG log = logging.getLogger(name) log.setLevel(loglevel) formatter = logging.Formatter( 'CM {0:>50}:%(lineno)s: %(levelname)6s - %(message)s'.format(name)) # formatter = logging.Formatter( # 'CM {0:>50}: %(levelname)6s - %(module)s:%(lineno)s %funcName)s: %(message)s'.format(name)) handler = logging.StreamHandler() handler.setFormatter(formatter) log.addHandler(handler) return log
creates a logger with the given name. You can use it as follows:: log = cloudmesh.common.LOGGER(__file__) log.error("this is an error") log.info("this is an info") log.warning("this is a warning")
https://github.com/cloudmesh/cloudmesh-common/blob/ae4fae09cd78205d179ea692dc58f0b0c8fea2b8/cloudmesh/common/logger.py#L12-L61
cloudmesh/cloudmesh-common
cloudmesh/common/FlatDict.py
key_prefix_replace
def key_prefix_replace(d, prefix, new_prefix=""): """ replaces the list of prefix in keys of a flattened dict :param d: the flattened dict :param prefix: a list of prefixes that are replaced with a new prefix. Typically this will be "" :type prefix: list of str :param new_prefix: The new prefix. By default it is set to "" :return: the dict with the keys replaced as specified """ items = [] for k, v in d.items(): new_key = k for p in prefix: new_key = new_key.replace(p, new_prefix, 1) items.append((new_key, v)) return dict(items)
python
def key_prefix_replace(d, prefix, new_prefix=""): """ replaces the list of prefix in keys of a flattened dict :param d: the flattened dict :param prefix: a list of prefixes that are replaced with a new prefix. Typically this will be "" :type prefix: list of str :param new_prefix: The new prefix. By default it is set to "" :return: the dict with the keys replaced as specified """ items = [] for k, v in d.items(): new_key = k for p in prefix: new_key = new_key.replace(p, new_prefix, 1) items.append((new_key, v)) return dict(items)
replaces the list of prefix in keys of a flattened dict :param d: the flattened dict :param prefix: a list of prefixes that are replaced with a new prefix. Typically this will be "" :type prefix: list of str :param new_prefix: The new prefix. By default it is set to "" :return: the dict with the keys replaced as specified
https://github.com/cloudmesh/cloudmesh-common/blob/ae4fae09cd78205d179ea692dc58f0b0c8fea2b8/cloudmesh/common/FlatDict.py#L14-L31
cloudmesh/cloudmesh-common
cloudmesh/common/FlatDict.py
flatten
def flatten(d, parent_key='', sep='__'): """ flattens the dict into a one dimensional dictionary :param d: multidimensional dict :param parent_key: replaces from the parent key :param sep: the separation character used when fattening. the default is __ :return: the flattened dict """ # http://stackoverflow.com/questions/6027558/flatten-nested-python-dictionaries-compressing-keys if type(d) == list: flat = [] for entry in d: flat.append(flatten(entry, parent_key=parent_key, sep=sep)) return flat else: items = [] for k, v in d.items(): new_key = parent_key + sep + k if parent_key else k if isinstance(v, collectionsAbc.MutableMapping): items.extend(flatten(v, new_key, sep=sep).items()) else: items.append((new_key, v)) return dict(items)
python
def flatten(d, parent_key='', sep='__'): """ flattens the dict into a one dimensional dictionary :param d: multidimensional dict :param parent_key: replaces from the parent key :param sep: the separation character used when fattening. the default is __ :return: the flattened dict """ # http://stackoverflow.com/questions/6027558/flatten-nested-python-dictionaries-compressing-keys if type(d) == list: flat = [] for entry in d: flat.append(flatten(entry, parent_key=parent_key, sep=sep)) return flat else: items = [] for k, v in d.items(): new_key = parent_key + sep + k if parent_key else k if isinstance(v, collectionsAbc.MutableMapping): items.extend(flatten(v, new_key, sep=sep).items()) else: items.append((new_key, v)) return dict(items)
flattens the dict into a one dimensional dictionary :param d: multidimensional dict :param parent_key: replaces from the parent key :param sep: the separation character used when fattening. the default is __ :return: the flattened dict
https://github.com/cloudmesh/cloudmesh-common/blob/ae4fae09cd78205d179ea692dc58f0b0c8fea2b8/cloudmesh/common/FlatDict.py#L41-L67
cloudmesh/cloudmesh-common
cloudmesh/common/FlatDict.py
FlatDict2.convert
def convert(cls, obj, flatten=True): """ This function converts object into a Dict optionally Flattening it :param obj: Object to be converted :param flatten: boolean to specify if the dict has to be flattened :return dict: the dict of the object (Flattened or Un-flattened) """ dict_result = cls.object_to_dict(obj) if flatten: dict_result = FlatDict(dict_result) return dict_result
python
def convert(cls, obj, flatten=True): """ This function converts object into a Dict optionally Flattening it :param obj: Object to be converted :param flatten: boolean to specify if the dict has to be flattened :return dict: the dict of the object (Flattened or Un-flattened) """ dict_result = cls.object_to_dict(obj) if flatten: dict_result = FlatDict(dict_result) return dict_result
This function converts object into a Dict optionally Flattening it :param obj: Object to be converted :param flatten: boolean to specify if the dict has to be flattened :return dict: the dict of the object (Flattened or Un-flattened)
https://github.com/cloudmesh/cloudmesh-common/blob/ae4fae09cd78205d179ea692dc58f0b0c8fea2b8/cloudmesh/common/FlatDict.py#L143-L153
cloudmesh/cloudmesh-common
cloudmesh/common/FlatDict.py
FlatDict2.object_to_dict
def object_to_dict(cls, obj): """ This function converts Objects into Dictionary """ dict_obj = dict() if obj is not None: if type(obj) == list: dict_list = [] for inst in obj: dict_list.append(cls.object_to_dict(inst)) dict_obj["list"] = dict_list elif not cls.is_primitive(obj): for key in obj.__dict__: # is an object if type(obj.__dict__[key]) == list: dict_list = [] for inst in obj.__dict__[key]: dict_list.append(cls.object_to_dict(inst)) dict_obj[key] = dict_list elif not cls.is_primitive(obj.__dict__[key]): temp_dict = cls.object_to_dict(obj.__dict__[key]) dict_obj[key] = temp_dict else: dict_obj[key] = obj.__dict__[key] elif cls.is_primitive(obj): return obj return dict_obj
python
def object_to_dict(cls, obj): """ This function converts Objects into Dictionary """ dict_obj = dict() if obj is not None: if type(obj) == list: dict_list = [] for inst in obj: dict_list.append(cls.object_to_dict(inst)) dict_obj["list"] = dict_list elif not cls.is_primitive(obj): for key in obj.__dict__: # is an object if type(obj.__dict__[key]) == list: dict_list = [] for inst in obj.__dict__[key]: dict_list.append(cls.object_to_dict(inst)) dict_obj[key] = dict_list elif not cls.is_primitive(obj.__dict__[key]): temp_dict = cls.object_to_dict(obj.__dict__[key]) dict_obj[key] = temp_dict else: dict_obj[key] = obj.__dict__[key] elif cls.is_primitive(obj): return obj return dict_obj
This function converts Objects into Dictionary
https://github.com/cloudmesh/cloudmesh-common/blob/ae4fae09cd78205d179ea692dc58f0b0c8fea2b8/cloudmesh/common/FlatDict.py#L156-L183
cloudmesh/cloudmesh-common
cloudmesh/common/error.py
Error.msg
def msg(cls, error=None, debug=True, trace=True): """ prints the error message :param error: the error message :param debug: only prints it if debug is set to true :param trace: if true prints the trace :return: """ if debug and error is not None: print(error) # TODO: BUG: trace should only be printed if debug is true if trace: print(traceback.format_exc())
python
def msg(cls, error=None, debug=True, trace=True): """ prints the error message :param error: the error message :param debug: only prints it if debug is set to true :param trace: if true prints the trace :return: """ if debug and error is not None: print(error) # TODO: BUG: trace should only be printed if debug is true if trace: print(traceback.format_exc())
prints the error message :param error: the error message :param debug: only prints it if debug is set to true :param trace: if true prints the trace :return:
https://github.com/cloudmesh/cloudmesh-common/blob/ae4fae09cd78205d179ea692dc58f0b0c8fea2b8/cloudmesh/common/error.py#L20-L32
cloudmesh/cloudmesh-common
cloudmesh/common/error.py
Error.traceback
def traceback(cls, error=None, debug=True, trace=True): """ prints the trace :param error: a message preceding the trace :param debug: prints it if debug is set to true :param trace: :return: """ # TODO: if debug: Error.msg(error=error, debug=debug, trace=trace)
python
def traceback(cls, error=None, debug=True, trace=True): """ prints the trace :param error: a message preceding the trace :param debug: prints it if debug is set to true :param trace: :return: """ # TODO: if debug: Error.msg(error=error, debug=debug, trace=trace)
prints the trace :param error: a message preceding the trace :param debug: prints it if debug is set to true :param trace: :return:
https://github.com/cloudmesh/cloudmesh-common/blob/ae4fae09cd78205d179ea692dc58f0b0c8fea2b8/cloudmesh/common/error.py#L35-L44
cloudmesh/cloudmesh-common
cloudmesh/common/console.py
indent
def indent(text, indent=2, width=128): """ indents the given text by the indent specified and wrapping to the given width :param text: the text to print :param indent: indent characters :param width: the width of the text :return: """ return "\n".join( textwrap.wrap(text, width=width, initial_indent=" " * indent, subsequent_indent=" " * indent))
python
def indent(text, indent=2, width=128): """ indents the given text by the indent specified and wrapping to the given width :param text: the text to print :param indent: indent characters :param width: the width of the text :return: """ return "\n".join( textwrap.wrap(text, width=width, initial_indent=" " * indent, subsequent_indent=" " * indent))
indents the given text by the indent specified and wrapping to the given width :param text: the text to print :param indent: indent characters :param width: the width of the text :return:
https://github.com/cloudmesh/cloudmesh-common/blob/ae4fae09cd78205d179ea692dc58f0b0c8fea2b8/cloudmesh/common/console.py#L15-L28
cloudmesh/cloudmesh-common
cloudmesh/common/console.py
Console.set_theme
def set_theme(color=True): """ defines if the console messages are printed in color :param color: if True its printed in color :return: """ if color: Console.theme = Console.theme_color else: Console.theme = Console.theme_bw Console.color = color
python
def set_theme(color=True): """ defines if the console messages are printed in color :param color: if True its printed in color :return: """ if color: Console.theme = Console.theme_color else: Console.theme = Console.theme_bw Console.color = color
defines if the console messages are printed in color :param color: if True its printed in color :return:
https://github.com/cloudmesh/cloudmesh-common/blob/ae4fae09cd78205d179ea692dc58f0b0c8fea2b8/cloudmesh/common/console.py#L100-L110
cloudmesh/cloudmesh-common
cloudmesh/common/console.py
Console.error
def error(cls, message, prefix=True, traceflag=False): """ prints an error message :param message: the message :param prefix: a prefix for the message :param traceflag: if true the stack trace is retrieved and printed :return: """ # print (message, prefix) message = message or "" if prefix: text = "ERROR: " else: text = "" if cls.color: cls.cprint('FAIL', text, str(message)) else: print(cls.txt_msg(text + str(message))) if traceflag and cls.debug: trace = traceback.format_exc().strip() if trace: print() print("Trace:") print("\n ".join(str(trace).splitlines())) print()
python
def error(cls, message, prefix=True, traceflag=False): """ prints an error message :param message: the message :param prefix: a prefix for the message :param traceflag: if true the stack trace is retrieved and printed :return: """ # print (message, prefix) message = message or "" if prefix: text = "ERROR: " else: text = "" if cls.color: cls.cprint('FAIL', text, str(message)) else: print(cls.txt_msg(text + str(message))) if traceflag and cls.debug: trace = traceback.format_exc().strip() if trace: print() print("Trace:") print("\n ".join(str(trace).splitlines())) print()
prints an error message :param message: the message :param prefix: a prefix for the message :param traceflag: if true the stack trace is retrieved and printed :return:
https://github.com/cloudmesh/cloudmesh-common/blob/ae4fae09cd78205d179ea692dc58f0b0c8fea2b8/cloudmesh/common/console.py#L145-L170
cloudmesh/cloudmesh-common
cloudmesh/common/console.py
Console.TODO
def TODO(message, prefix=True, traceflag=True): """ prints an TODO message :param message: the message :param prefix: if set to true it prints TODO: as prefix :param traceflag: if true the stack trace is retrieved and printed :return: """ message = message or "" if prefix: text = "TODO: " else: text = "" if Console.color: Console.cprint('FAIL', text, str(message)) else: print(Console.msg(text + str(message))) trace = traceback.format_exc().strip() if traceflag and trace != "None": print() print("\n".join(str(trace).splitlines())) print()
python
def TODO(message, prefix=True, traceflag=True): """ prints an TODO message :param message: the message :param prefix: if set to true it prints TODO: as prefix :param traceflag: if true the stack trace is retrieved and printed :return: """ message = message or "" if prefix: text = "TODO: " else: text = "" if Console.color: Console.cprint('FAIL', text, str(message)) else: print(Console.msg(text + str(message))) trace = traceback.format_exc().strip() if traceflag and trace != "None": print() print("\n".join(str(trace).splitlines())) print()
prints an TODO message :param message: the message :param prefix: if set to true it prints TODO: as prefix :param traceflag: if true the stack trace is retrieved and printed :return:
https://github.com/cloudmesh/cloudmesh-common/blob/ae4fae09cd78205d179ea692dc58f0b0c8fea2b8/cloudmesh/common/console.py#L173-L196
cloudmesh/cloudmesh-common
cloudmesh/common/console.py
Console.debug_msg
def debug_msg(message): """ print a debug message :param message: the message :return: """ message = message or "" if Console.color: Console.cprint('RED', 'DEBUG: ', message) else: print(Console.msg('DEBUG: ' + message))
python
def debug_msg(message): """ print a debug message :param message: the message :return: """ message = message or "" if Console.color: Console.cprint('RED', 'DEBUG: ', message) else: print(Console.msg('DEBUG: ' + message))
print a debug message :param message: the message :return:
https://github.com/cloudmesh/cloudmesh-common/blob/ae4fae09cd78205d179ea692dc58f0b0c8fea2b8/cloudmesh/common/console.py#L199-L209
cloudmesh/cloudmesh-common
cloudmesh/common/console.py
Console.info
def info(message): """ prints an informational message :param message: the message :return: """ message = message or "" if Console.color: Console.cprint('OKBLUE', "INFO: ", message) else: print(Console.msg("INFO: " + message))
python
def info(message): """ prints an informational message :param message: the message :return: """ message = message or "" if Console.color: Console.cprint('OKBLUE', "INFO: ", message) else: print(Console.msg("INFO: " + message))
prints an informational message :param message: the message :return:
https://github.com/cloudmesh/cloudmesh-common/blob/ae4fae09cd78205d179ea692dc58f0b0c8fea2b8/cloudmesh/common/console.py#L212-L222
cloudmesh/cloudmesh-common
cloudmesh/common/console.py
Console.warning
def warning(message): """ prints a warning :param message: the message :return: """ message = message or "" if Console.color: Console.cprint('WARNING', "WARNING: ", message) else: print(Console.msg("WARNING: " + message))
python
def warning(message): """ prints a warning :param message: the message :return: """ message = message or "" if Console.color: Console.cprint('WARNING', "WARNING: ", message) else: print(Console.msg("WARNING: " + message))
prints a warning :param message: the message :return:
https://github.com/cloudmesh/cloudmesh-common/blob/ae4fae09cd78205d179ea692dc58f0b0c8fea2b8/cloudmesh/common/console.py#L225-L235
cloudmesh/cloudmesh-common
cloudmesh/common/console.py
Console.ok
def ok(message): """ prints an ok message :param message: the message< :return: """ message = message or "" if Console.color: Console.cprint('OKGREEN', "", message) else: print(Console.msg(message))
python
def ok(message): """ prints an ok message :param message: the message< :return: """ message = message or "" if Console.color: Console.cprint('OKGREEN', "", message) else: print(Console.msg(message))
prints an ok message :param message: the message< :return:
https://github.com/cloudmesh/cloudmesh-common/blob/ae4fae09cd78205d179ea692dc58f0b0c8fea2b8/cloudmesh/common/console.py#L238-L248
cloudmesh/cloudmesh-common
cloudmesh/common/console.py
Console.cprint
def cprint(color, prefix, message): """ prints a message in a given color :param color: the color as defined in the theme :param prefix: the prefix (a string) :param message: the message :return: """ message = message or "" prefix = prefix or "" print((Console.theme[color] + prefix + message + Console.theme['ENDC']))
python
def cprint(color, prefix, message): """ prints a message in a given color :param color: the color as defined in the theme :param prefix: the prefix (a string) :param message: the message :return: """ message = message or "" prefix = prefix or "" print((Console.theme[color] + prefix + message + Console.theme['ENDC']))
prints a message in a given color :param color: the color as defined in the theme :param prefix: the prefix (a string) :param message: the message :return:
https://github.com/cloudmesh/cloudmesh-common/blob/ae4fae09cd78205d179ea692dc58f0b0c8fea2b8/cloudmesh/common/console.py#L251-L264
cloudmesh/cloudmesh-common
cloudmesh/common/StopWatch.py
StopWatch.start
def start(cls, name): """ starts a timer with the given name. :param name: the name of the timer :type name: string """ if cls.debug: print("Timer", name, "started ...") cls.timer_start[name] = time.time()
python
def start(cls, name): """ starts a timer with the given name. :param name: the name of the timer :type name: string """ if cls.debug: print("Timer", name, "started ...") cls.timer_start[name] = time.time()
starts a timer with the given name. :param name: the name of the timer :type name: string
https://github.com/cloudmesh/cloudmesh-common/blob/ae4fae09cd78205d179ea692dc58f0b0c8fea2b8/cloudmesh/common/StopWatch.py#L33-L42
cloudmesh/cloudmesh-common
cloudmesh/common/StopWatch.py
StopWatch.stop
def stop(cls, name): """ stops the timer with a given name. :param name: the name of the timer :type name: string """ cls.timer_end[name] = time.time() if cls.debug: print("Timer", name, "stopped ...")
python
def stop(cls, name): """ stops the timer with a given name. :param name: the name of the timer :type name: string """ cls.timer_end[name] = time.time() if cls.debug: print("Timer", name, "stopped ...")
stops the timer with a given name. :param name: the name of the timer :type name: string
https://github.com/cloudmesh/cloudmesh-common/blob/ae4fae09cd78205d179ea692dc58f0b0c8fea2b8/cloudmesh/common/StopWatch.py#L45-L54
cloudmesh/cloudmesh-common
cloudmesh/common/StopWatch.py
StopWatch.get
def get(cls, name): """ returns the time of the timer. :param name: the name of the timer :type name: string :rtype: the elapsed time """ if name in cls.timer_end: cls.timer_elapsed[name] = cls.timer_end[name] - \ cls.timer_start[name] return cls.timer_elapsed[name] else: return "undefined"
python
def get(cls, name): """ returns the time of the timer. :param name: the name of the timer :type name: string :rtype: the elapsed time """ if name in cls.timer_end: cls.timer_elapsed[name] = cls.timer_end[name] - \ cls.timer_start[name] return cls.timer_elapsed[name] else: return "undefined"
returns the time of the timer. :param name: the name of the timer :type name: string :rtype: the elapsed time
https://github.com/cloudmesh/cloudmesh-common/blob/ae4fae09cd78205d179ea692dc58f0b0c8fea2b8/cloudmesh/common/StopWatch.py#L57-L70
cloudmesh/cloudmesh-common
cloudmesh/common/StopWatch.py
StopWatch.print
def print(cls, *args): """ prints a timer. The first argument is the label if it exists, the last is the timer :param args: label, name :return: """ if cls.verbose: if len(args) == 2: print(args[0], str("{0:.2f}".format(cls.get(args[1]))), "s") else: raise Exception("StopWatch: wrong number of arguments")
python
def print(cls, *args): """ prints a timer. The first argument is the label if it exists, the last is the timer :param args: label, name :return: """ if cls.verbose: if len(args) == 2: print(args[0], str("{0:.2f}".format(cls.get(args[1]))), "s") else: raise Exception("StopWatch: wrong number of arguments")
prints a timer. The first argument is the label if it exists, the last is the timer :param args: label, name :return:
https://github.com/cloudmesh/cloudmesh-common/blob/ae4fae09cd78205d179ea692dc58f0b0c8fea2b8/cloudmesh/common/StopWatch.py#L81-L91
cloudmesh/cloudmesh-common
cloudmesh/common/StopWatch.py
StopWatch.benchmark
def benchmark(cls, sysinfo=True): """ prints out all timers in a convenient benchmark tabble :return: :rtype: """ # # PRINT PLATFORM # if sysinfo: data_platform = systeminfo() print(Printer.attribute(data_platform, ["Machine Arribute", "Time/s"])) # # PRINT TIMERS # timers = StopWatch.keys() data_timers = {} for timer in timers: data_timers[timer] = { 'time': round(StopWatch.get(timer), 2), 'timer': timer } for attribute in ["node", "system", "machine", "mac_version", "win_version"]: data_timers[timer][attribute] = data_platform[attribute] # print(Printer.attribute(data_timers, header=["Command", "Time/s"])) print(Printer.write( data_timers, order=["timer", "time", "node", "system", "mac_version", "win_version"] )) print() print(Printer.write( data_timers, order=["timer", "time", "node", "system", "mac_version", "win_version"], output="csv" ))
python
def benchmark(cls, sysinfo=True): """ prints out all timers in a convenient benchmark tabble :return: :rtype: """ # # PRINT PLATFORM # if sysinfo: data_platform = systeminfo() print(Printer.attribute(data_platform, ["Machine Arribute", "Time/s"])) # # PRINT TIMERS # timers = StopWatch.keys() data_timers = {} for timer in timers: data_timers[timer] = { 'time': round(StopWatch.get(timer), 2), 'timer': timer } for attribute in ["node", "system", "machine", "mac_version", "win_version"]: data_timers[timer][attribute] = data_platform[attribute] # print(Printer.attribute(data_timers, header=["Command", "Time/s"])) print(Printer.write( data_timers, order=["timer", "time", "node", "system", "mac_version", "win_version"] )) print() print(Printer.write( data_timers, order=["timer", "time", "node", "system", "mac_version", "win_version"], output="csv" ))
prints out all timers in a convenient benchmark tabble :return: :rtype:
https://github.com/cloudmesh/cloudmesh-common/blob/ae4fae09cd78205d179ea692dc58f0b0c8fea2b8/cloudmesh/common/StopWatch.py#L106-L149
cloudmesh/cloudmesh-common
cloudmesh/common/ConfigDict.py
dprint
def dprint(OD, mode='dict', s="", indent=' ' * 4, level=0): """ a recursive dict printer method that adds indentations TODO: needs better explanation and test example :param OD: the ordered dict :param mode: the mode is dict :param s: TODO :param indent: the indentation characters. default is 4 :param level: the level :return: """ def is_number(s): """ checks if the type of s is a float :param s: :return: """ try: float(s) return True except ValueError: return False def fstr(s): """ return a string or number :param s: teh string or number :return: """ return s if is_number(s) else '"%s"' % s if mode != 'dict': kv_tpl = '("%s", %s)' ST = 'OrderedDict([\n' END = '])' else: kv_tpl = '"%s": %s' ST = '{\n' END = '}' for i, k in enumerate(OD.keys()): if type(OD[k]) in [dict, OrderedDict]: level += 1 s += (level - 1) * indent + kv_tpl % ( k, ST + dprint(OD[k], mode=mode, indent=indent, level=level) + ( level - 1) * indent + END) level -= 1 else: s += level * indent + kv_tpl % (k, fstr(OD[k])) if i != len(OD) - 1: s += "," s += "\n" return s
python
def dprint(OD, mode='dict', s="", indent=' ' * 4, level=0): """ a recursive dict printer method that adds indentations TODO: needs better explanation and test example :param OD: the ordered dict :param mode: the mode is dict :param s: TODO :param indent: the indentation characters. default is 4 :param level: the level :return: """ def is_number(s): """ checks if the type of s is a float :param s: :return: """ try: float(s) return True except ValueError: return False def fstr(s): """ return a string or number :param s: teh string or number :return: """ return s if is_number(s) else '"%s"' % s if mode != 'dict': kv_tpl = '("%s", %s)' ST = 'OrderedDict([\n' END = '])' else: kv_tpl = '"%s": %s' ST = '{\n' END = '}' for i, k in enumerate(OD.keys()): if type(OD[k]) in [dict, OrderedDict]: level += 1 s += (level - 1) * indent + kv_tpl % ( k, ST + dprint(OD[k], mode=mode, indent=indent, level=level) + ( level - 1) * indent + END) level -= 1 else: s += level * indent + kv_tpl % (k, fstr(OD[k])) if i != len(OD) - 1: s += "," s += "\n" return s
a recursive dict printer method that adds indentations TODO: needs better explanation and test example :param OD: the ordered dict :param mode: the mode is dict :param s: TODO :param indent: the indentation characters. default is 4 :param level: the level :return:
https://github.com/cloudmesh/cloudmesh-common/blob/ae4fae09cd78205d179ea692dc58f0b0c8fea2b8/cloudmesh/common/ConfigDict.py#L68-L121
cloudmesh/cloudmesh-common
cloudmesh/common/ConfigDict.py
main
def main(): """ TODO: A test which should actually be moved into a nosetest :return: """ d = ConfigDict("cloudmesh.yaml") print(d, end='') d.info() print(d["meta"]) print(d["meta.kind"]) print(d["meta"]["kind"]) # this does not yet work d.data["cloudmesh"]["profile"]["firstname"] = 'ABC' print(d) d.save() import os os.system("cat cmd3.yaml") print(d.json) print(d.filename) print("YAML") print(d.yaml)
python
def main(): """ TODO: A test which should actually be moved into a nosetest :return: """ d = ConfigDict("cloudmesh.yaml") print(d, end='') d.info() print(d["meta"]) print(d["meta.kind"]) print(d["meta"]["kind"]) # this does not yet work d.data["cloudmesh"]["profile"]["firstname"] = 'ABC' print(d) d.save() import os os.system("cat cmd3.yaml") print(d.json) print(d.filename) print("YAML") print(d.yaml)
TODO: A test which should actually be moved into a nosetest :return:
https://github.com/cloudmesh/cloudmesh-common/blob/ae4fae09cd78205d179ea692dc58f0b0c8fea2b8/cloudmesh/common/ConfigDict.py#L478-L503
cloudmesh/cloudmesh-common
cloudmesh/common/ConfigDict.py
Config.check_file_for_tabs
def check_file_for_tabs(cls, filename, verbose=True): """identifies if the file contains tabs and returns True if it does. It also prints the location of the lines and columns. If verbose is set to False, the location is not printed. :param verbose: if true prints issues :param filename: the filename :type filename: str :rtype: True if there are tabs in the file """ filename = path_expand(filename) file_contains_tabs = False with open(filename, 'r') as f: lines = f.read().split("\n") line_no = 1 for line in lines: if "\t" in line: file_contains_tabs = True location = [ i for i in range(len(line)) if line.startswith('\t', i)] if verbose: print("Tab found in line", line_no, "and column(s)", location) line_no += 1 return file_contains_tabs
python
def check_file_for_tabs(cls, filename, verbose=True): """identifies if the file contains tabs and returns True if it does. It also prints the location of the lines and columns. If verbose is set to False, the location is not printed. :param verbose: if true prints issues :param filename: the filename :type filename: str :rtype: True if there are tabs in the file """ filename = path_expand(filename) file_contains_tabs = False with open(filename, 'r') as f: lines = f.read().split("\n") line_no = 1 for line in lines: if "\t" in line: file_contains_tabs = True location = [ i for i in range(len(line)) if line.startswith('\t', i)] if verbose: print("Tab found in line", line_no, "and column(s)", location) line_no += 1 return file_contains_tabs
identifies if the file contains tabs and returns True if it does. It also prints the location of the lines and columns. If verbose is set to False, the location is not printed. :param verbose: if true prints issues :param filename: the filename :type filename: str :rtype: True if there are tabs in the file
https://github.com/cloudmesh/cloudmesh-common/blob/ae4fae09cd78205d179ea692dc58f0b0c8fea2b8/cloudmesh/common/ConfigDict.py#L130-L156
cloudmesh/cloudmesh-common
cloudmesh/common/ConfigDict.py
Config.path_expand
def path_expand(cls, path): """ expands the path while replacing environment variables, ./, and ~/ :param path: the path to be expanded :type path: string :return:the new path :rtype: string """ current_dir = "." + os.path.sep if path.startswith(current_dir): cwd = str(os.getcwd()) path = path.replace(current_dir, cwd, 1) location = os.path.expandvars(os.path.expanduser(path)) return location
python
def path_expand(cls, path): """ expands the path while replacing environment variables, ./, and ~/ :param path: the path to be expanded :type path: string :return:the new path :rtype: string """ current_dir = "." + os.path.sep if path.startswith(current_dir): cwd = str(os.getcwd()) path = path.replace(current_dir, cwd, 1) location = os.path.expandvars(os.path.expanduser(path)) return location
expands the path while replacing environment variables, ./, and ~/ :param path: the path to be expanded :type path: string :return:the new path :rtype: string
https://github.com/cloudmesh/cloudmesh-common/blob/ae4fae09cd78205d179ea692dc58f0b0c8fea2b8/cloudmesh/common/ConfigDict.py#L159-L172
cloudmesh/cloudmesh-common
cloudmesh/common/ConfigDict.py
Config.find_file
def find_file(cls, filename, load_order=None, verbose=False): """ find the specified file in the list of directories that are given in the array load_order :param filename: the file name :type filename: str :param load_order: an array with path names in with the filename is looked for. :type load_order: list of str :param verbose: :type verbose: bool :return: file name if successful :rtype: string if the file exists or None otherwise """ if load_order is None: load_order = [".", os.path.join("~", ".cloudmesh")] for path in load_order: name = Config.path_expand(path + os.path.sep + filename) if verbose: print("try finding file", name) if os.path.isfile(name): if verbose: print("Found File", name) return name return None
python
def find_file(cls, filename, load_order=None, verbose=False): """ find the specified file in the list of directories that are given in the array load_order :param filename: the file name :type filename: str :param load_order: an array with path names in with the filename is looked for. :type load_order: list of str :param verbose: :type verbose: bool :return: file name if successful :rtype: string if the file exists or None otherwise """ if load_order is None: load_order = [".", os.path.join("~", ".cloudmesh")] for path in load_order: name = Config.path_expand(path + os.path.sep + filename) if verbose: print("try finding file", name) if os.path.isfile(name): if verbose: print("Found File", name) return name return None
find the specified file in the list of directories that are given in the array load_order :param filename: the file name :type filename: str :param load_order: an array with path names in with the filename is looked for. :type load_order: list of str :param verbose: :type verbose: bool :return: file name if successful :rtype: string if the file exists or None otherwise
https://github.com/cloudmesh/cloudmesh-common/blob/ae4fae09cd78205d179ea692dc58f0b0c8fea2b8/cloudmesh/common/ConfigDict.py#L175-L198
cloudmesh/cloudmesh-common
cloudmesh/common/ConfigDict.py
ConfigDict.load
def load(self, filename): """ loads the configuration from the yaml filename :param filename: :type filename: string :return: """ # print ("LOAD CONFIGDICT", filename) self.data = BaseConfigDict(filename=Config.path_expand(filename)) try: version = str(self.data["meta"]["version"]) if version not in self.versions: Console.error("The yaml file version must be {}".format( ', '.join(self.versions))) sys.exit(1) except Exception as e: Console.error( "Your yaml file ~/.cloudmesh/cloudmesh.yaml is not up to date.", traceflag=False) Console.error(e.message, traceflag=False) sys.exit(1)
python
def load(self, filename): """ loads the configuration from the yaml filename :param filename: :type filename: string :return: """ # print ("LOAD CONFIGDICT", filename) self.data = BaseConfigDict(filename=Config.path_expand(filename)) try: version = str(self.data["meta"]["version"]) if version not in self.versions: Console.error("The yaml file version must be {}".format( ', '.join(self.versions))) sys.exit(1) except Exception as e: Console.error( "Your yaml file ~/.cloudmesh/cloudmesh.yaml is not up to date.", traceflag=False) Console.error(e.message, traceflag=False) sys.exit(1)
loads the configuration from the yaml filename :param filename: :type filename: string :return:
https://github.com/cloudmesh/cloudmesh-common/blob/ae4fae09cd78205d179ea692dc58f0b0c8fea2b8/cloudmesh/common/ConfigDict.py#L264-L285
cloudmesh/cloudmesh-common
cloudmesh/common/ConfigDict.py
ConfigDict.write
def write(self, filename=None, output="dict", attribute_indent=4): """ This method writes the dict into various output formats. This includes a dict, json, and yaml :param filename: the file in which the dict is written :param output: is a string that is either "dict", "json", "yaml" :param attribute_indent: character indentation of nested attributes in """ if filename is not None: location = path_expand(filename) else: location = self['meta']['location'] # with open('data.yml', 'w') as outfile: # outfile.write( yaml.dump(data, default_flow_style=True) ) # Make a backup self.make_a_copy(location) f = os.open(location, os.O_CREAT | os.O_TRUNC | os.O_WRONLY, stat.S_IRUSR | stat.S_IWUSR) if output == "json": os.write(f, self.json()) elif output in ['yml', 'yaml']: # d = dict(self) # os.write(f, yaml.dump(d, default_flow_style=False)) os.write(f, ordered_dump(OrderedDict(self), Dumper=yaml.SafeDumper, default_flow_style=False, indent=attribute_indent)) elif output == "print": os.write(f, str(custom_print(self, attribute_indent))) else: os.write(f, self.dump()) os.close(f)
python
def write(self, filename=None, output="dict", attribute_indent=4): """ This method writes the dict into various output formats. This includes a dict, json, and yaml :param filename: the file in which the dict is written :param output: is a string that is either "dict", "json", "yaml" :param attribute_indent: character indentation of nested attributes in """ if filename is not None: location = path_expand(filename) else: location = self['meta']['location'] # with open('data.yml', 'w') as outfile: # outfile.write( yaml.dump(data, default_flow_style=True) ) # Make a backup self.make_a_copy(location) f = os.open(location, os.O_CREAT | os.O_TRUNC | os.O_WRONLY, stat.S_IRUSR | stat.S_IWUSR) if output == "json": os.write(f, self.json()) elif output in ['yml', 'yaml']: # d = dict(self) # os.write(f, yaml.dump(d, default_flow_style=False)) os.write(f, ordered_dump(OrderedDict(self), Dumper=yaml.SafeDumper, default_flow_style=False, indent=attribute_indent)) elif output == "print": os.write(f, str(custom_print(self, attribute_indent))) else: os.write(f, self.dump()) os.close(f)
This method writes the dict into various output formats. This includes a dict, json, and yaml :param filename: the file in which the dict is written :param output: is a string that is either "dict", "json", "yaml" :param attribute_indent: character indentation of nested attributes in
https://github.com/cloudmesh/cloudmesh-common/blob/ae4fae09cd78205d179ea692dc58f0b0c8fea2b8/cloudmesh/common/ConfigDict.py#L288-L322
cloudmesh/cloudmesh-common
cloudmesh/common/ConfigDict.py
ConfigDict.make_a_copy
def make_a_copy(self, location=None): """ Creates a backup of the file specified in the location. The backup filename appends a .bak.NO where number is a number that is not yet used in the backup directory. TODO: This function should be moved to another file maybe XShell :param location: the location of the file to be backed up """ import shutil destination = backup_name(location) shutil.copyfile(location, destination)
python
def make_a_copy(self, location=None): """ Creates a backup of the file specified in the location. The backup filename appends a .bak.NO where number is a number that is not yet used in the backup directory. TODO: This function should be moved to another file maybe XShell :param location: the location of the file to be backed up """ import shutil destination = backup_name(location) shutil.copyfile(location, destination)
Creates a backup of the file specified in the location. The backup filename appends a .bak.NO where number is a number that is not yet used in the backup directory. TODO: This function should be moved to another file maybe XShell :param location: the location of the file to be backed up
https://github.com/cloudmesh/cloudmesh-common/blob/ae4fae09cd78205d179ea692dc58f0b0c8fea2b8/cloudmesh/common/ConfigDict.py#L324-L334
cloudmesh/cloudmesh-common
cloudmesh/common/ConfigDict.py
ConfigDict.save
def save(self, filename=None): """ saves the configuration in the given filename, if it is none the filename at load time is used. :param filename: the file name :type filename: string :return: """ content = self.data.yaml() with open(Config.path_expand(ConfigDict.filename), 'w') as f: f.write(content)
python
def save(self, filename=None): """ saves the configuration in the given filename, if it is none the filename at load time is used. :param filename: the file name :type filename: string :return: """ content = self.data.yaml() with open(Config.path_expand(ConfigDict.filename), 'w') as f: f.write(content)
saves the configuration in the given filename, if it is none the filename at load time is used. :param filename: the file name :type filename: string :return:
https://github.com/cloudmesh/cloudmesh-common/blob/ae4fae09cd78205d179ea692dc58f0b0c8fea2b8/cloudmesh/common/ConfigDict.py#L336-L346
cloudmesh/cloudmesh-common
cloudmesh/common/ConfigDict.py
ConfigDict.json
def json(self, start=None): """ :param start: start key in dot notation returns the dict in json format :return: json string version :rtype: string """ if start is not None: data = self.data[start] return json.dumps(self.data, indent=4)
python
def json(self, start=None): """ :param start: start key in dot notation returns the dict in json format :return: json string version :rtype: string """ if start is not None: data = self.data[start] return json.dumps(self.data, indent=4)
:param start: start key in dot notation returns the dict in json format :return: json string version :rtype: string
https://github.com/cloudmesh/cloudmesh-common/blob/ae4fae09cd78205d179ea692dc58f0b0c8fea2b8/cloudmesh/common/ConfigDict.py#L411-L420
cloudmesh/cloudmesh-common
cloudmesh/common/ConfigDict.py
ConfigDict.getUser
def getUser(cls, cloud): """ gets the username for a specified cloud. TODO: works currently only for opensatck. :param cloud: the name of the cloud :return: """ try: config = d = ConfigDict("cloudmesh.yaml") d = ConfigDict("cloudmesh.yaml") # # bug: cloud is none when adding a group # config = d["cloudmesh"]["clouds"][cloud] credentials = config["credentials"] cloud_type = config["cm_type"] if cloud_type == "openstack": return credentials["OS_USERNAME"] else: raise ValueError("getUser for this cloud type not yet " "supported: {}".format(cloud)) except Exception as ex: Console.error("problem getting user")
python
def getUser(cls, cloud): """ gets the username for a specified cloud. TODO: works currently only for opensatck. :param cloud: the name of the cloud :return: """ try: config = d = ConfigDict("cloudmesh.yaml") d = ConfigDict("cloudmesh.yaml") # # bug: cloud is none when adding a group # config = d["cloudmesh"]["clouds"][cloud] credentials = config["credentials"] cloud_type = config["cm_type"] if cloud_type == "openstack": return credentials["OS_USERNAME"] else: raise ValueError("getUser for this cloud type not yet " "supported: {}".format(cloud)) except Exception as ex: Console.error("problem getting user")
gets the username for a specified cloud. TODO: works currently only for opensatck. :param cloud: the name of the cloud :return:
https://github.com/cloudmesh/cloudmesh-common/blob/ae4fae09cd78205d179ea692dc58f0b0c8fea2b8/cloudmesh/common/ConfigDict.py#L434-L460
cloudmesh/cloudmesh-common
cloudmesh/common/locations.py
config_dir_setup
def config_dir_setup(filename): """ sets the config file and makes sure the directory exists if it has not yet been created. :param filename: :return: """ path = os.path.dirname(filename) if not os.path.isdir(path): Shell.mkdir(path)
python
def config_dir_setup(filename): """ sets the config file and makes sure the directory exists if it has not yet been created. :param filename: :return: """ path = os.path.dirname(filename) if not os.path.isdir(path): Shell.mkdir(path)
sets the config file and makes sure the directory exists if it has not yet been created. :param filename: :return:
https://github.com/cloudmesh/cloudmesh-common/blob/ae4fae09cd78205d179ea692dc58f0b0c8fea2b8/cloudmesh/common/locations.py#L37-L45
cloudmesh/cloudmesh-common
cloudmesh/common/Shell.py
main
def main(): """ a test that should actually be added into a nosetest :return: """ shell = Shell() print(shell.terminal_type()) r = shell.execute('pwd') # copy line replace print(r) # shell.list() # print json.dumps(shell.command, indent=4) # test some commands without args """ for cmd in ['whoami', 'pwd']: r = shell._execute(cmd) print ("---------------------") print ("Command: {:}".format(cmd)) print ("{:}".format(r)) print ("---------------------") """ r = shell.execute('ls', ["-l", "-a"]) print(r) r = shell.execute('ls', "-l -a") print(r) r = shell.ls("-aux") print(r) r = shell.ls("-a", "-u", "-x") print(r) r = shell.pwd() print(r)
python
def main(): """ a test that should actually be added into a nosetest :return: """ shell = Shell() print(shell.terminal_type()) r = shell.execute('pwd') # copy line replace print(r) # shell.list() # print json.dumps(shell.command, indent=4) # test some commands without args """ for cmd in ['whoami', 'pwd']: r = shell._execute(cmd) print ("---------------------") print ("Command: {:}".format(cmd)) print ("{:}".format(r)) print ("---------------------") """ r = shell.execute('ls', ["-l", "-a"]) print(r) r = shell.execute('ls', "-l -a") print(r) r = shell.ls("-aux") print(r) r = shell.ls("-a", "-u", "-x") print(r) r = shell.pwd() print(r)
a test that should actually be added into a nosetest :return:
https://github.com/cloudmesh/cloudmesh-common/blob/ae4fae09cd78205d179ea692dc58f0b0c8fea2b8/cloudmesh/common/Shell.py#L790-L828
cloudmesh/cloudmesh-common
cloudmesh/common/Shell.py
Shell.get_python
def get_python(cls): """ returns the python and pip version :return: python version, pip version """ python_version = sys.version_info[:3] v_string = [str(i) for i in python_version] python_version_s = '.'.join(v_string) # pip_version = pip.__version__ pip_version = Shell.pip("--version").split()[1] return python_version_s, pip_version
python
def get_python(cls): """ returns the python and pip version :return: python version, pip version """ python_version = sys.version_info[:3] v_string = [str(i) for i in python_version] python_version_s = '.'.join(v_string) # pip_version = pip.__version__ pip_version = Shell.pip("--version").split()[1] return python_version_s, pip_version
returns the python and pip version :return: python version, pip version
https://github.com/cloudmesh/cloudmesh-common/blob/ae4fae09cd78205d179ea692dc58f0b0c8fea2b8/cloudmesh/common/Shell.py#L213-L224
cloudmesh/cloudmesh-common
cloudmesh/common/Shell.py
Shell.check_python
def check_python(cls): """ checks if the python version is supported :return: True if it is supported """ python_version = sys.version_info[:3] v_string = [str(i) for i in python_version] if python_version[0] == 2: python_version_s = '.'.join(v_string) if (python_version[0] == 2) and (python_version[1] >= 7) and ( python_version[2] >= 9): print( "You are running a supported version of python: {:}".format( python_version_s)) else: print( "WARNING: You are running an unsupported version of python: {:}".format( python_version_s)) print(" We recommend you update your python") elif python_version[0] == 3: python_version_s = '.'.join(v_string) if (python_version[0] == 3) and (python_version[1] >= 7) and ( python_version[2] >= 0): print( "You are running a supported version of python: {:}".format( python_version_s)) else: print( "WARNING: You are running an unsupported version of python: {:}".format( python_version_s)) print(" We recommend you update your python") # pip_version = pip.__version__ python_version, pip_version = cls.get_python() if int(pip_version.split(".")[0]) >= 18: print("You are running a supported version of pip: " + str( pip_version)) else: print("WARNING: You are running an old version of pip: " + str( pip_version)) print(" We recommend you update your pip with \n") print(" pip install -U pip\n")
python
def check_python(cls): """ checks if the python version is supported :return: True if it is supported """ python_version = sys.version_info[:3] v_string = [str(i) for i in python_version] if python_version[0] == 2: python_version_s = '.'.join(v_string) if (python_version[0] == 2) and (python_version[1] >= 7) and ( python_version[2] >= 9): print( "You are running a supported version of python: {:}".format( python_version_s)) else: print( "WARNING: You are running an unsupported version of python: {:}".format( python_version_s)) print(" We recommend you update your python") elif python_version[0] == 3: python_version_s = '.'.join(v_string) if (python_version[0] == 3) and (python_version[1] >= 7) and ( python_version[2] >= 0): print( "You are running a supported version of python: {:}".format( python_version_s)) else: print( "WARNING: You are running an unsupported version of python: {:}".format( python_version_s)) print(" We recommend you update your python") # pip_version = pip.__version__ python_version, pip_version = cls.get_python() if int(pip_version.split(".")[0]) >= 18: print("You are running a supported version of pip: " + str( pip_version)) else: print("WARNING: You are running an old version of pip: " + str( pip_version)) print(" We recommend you update your pip with \n") print(" pip install -U pip\n")
checks if the python version is supported :return: True if it is supported
https://github.com/cloudmesh/cloudmesh-common/blob/ae4fae09cd78205d179ea692dc58f0b0c8fea2b8/cloudmesh/common/Shell.py#L227-L276
cloudmesh/cloudmesh-common
cloudmesh/common/Shell.py
Shell.VBoxManage
def VBoxManage(cls, *args): """ executes VboxManage with the given arguments :param args: :return: """ if platform.system().lower() == "darwin": command = "/Applications/VirtualBox.app/Contents/MacOS/VBoxManage" else: command = 'VBoxManage' return cls.execute(command, args)
python
def VBoxManage(cls, *args): """ executes VboxManage with the given arguments :param args: :return: """ if platform.system().lower() == "darwin": command = "/Applications/VirtualBox.app/Contents/MacOS/VBoxManage" else: command = 'VBoxManage' return cls.execute(command, args)
executes VboxManage with the given arguments :param args: :return:
https://github.com/cloudmesh/cloudmesh-common/blob/ae4fae09cd78205d179ea692dc58f0b0c8fea2b8/cloudmesh/common/Shell.py#L340-L351
cloudmesh/cloudmesh-common
cloudmesh/common/Shell.py
Shell.ping
def ping(cls, host=None, count=1): """ execute ping :param host: the host to ping :param count: the number of pings :return: """ option = '-n' if platform.system().lower() == 'windows' else '-c' return cls.execute('ping', "{option} {count} {host}".format(option=option, count=count, host=host))
python
def ping(cls, host=None, count=1): """ execute ping :param host: the host to ping :param count: the number of pings :return: """ option = '-n' if platform.system().lower() == 'windows' else '-c' return cls.execute('ping', "{option} {count} {host}".format(option=option, count=count, host=host))
execute ping :param host: the host to ping :param count: the number of pings :return:
https://github.com/cloudmesh/cloudmesh-common/blob/ae4fae09cd78205d179ea692dc58f0b0c8fea2b8/cloudmesh/common/Shell.py#L435-L446
cloudmesh/cloudmesh-common
cloudmesh/common/Shell.py
Shell.remove_line_with
def remove_line_with(cls, lines, what): """ returns all lines that do not contain what :param lines: :param what: :return: """ result = [] for line in lines: if what not in line: result = result + [line] return result
python
def remove_line_with(cls, lines, what): """ returns all lines that do not contain what :param lines: :param what: :return: """ result = [] for line in lines: if what not in line: result = result + [line] return result
returns all lines that do not contain what :param lines: :param what: :return:
https://github.com/cloudmesh/cloudmesh-common/blob/ae4fae09cd78205d179ea692dc58f0b0c8fea2b8/cloudmesh/common/Shell.py#L584-L595
cloudmesh/cloudmesh-common
cloudmesh/common/Shell.py
Shell.find_lines_with
def find_lines_with(cls, lines, what): """ returns all lines that contain what :param lines: :param what: :return: """ result = [] for line in lines: if what in line: result = result + [line] return result
python
def find_lines_with(cls, lines, what): """ returns all lines that contain what :param lines: :param what: :return: """ result = [] for line in lines: if what in line: result = result + [line] return result
returns all lines that contain what :param lines: :param what: :return:
https://github.com/cloudmesh/cloudmesh-common/blob/ae4fae09cd78205d179ea692dc58f0b0c8fea2b8/cloudmesh/common/Shell.py#L598-L609
cloudmesh/cloudmesh-common
cloudmesh/common/Shell.py
Shell.find_cygwin_executables
def find_cygwin_executables(cls): """ find the executables in cygwin """ exe_paths = glob.glob(cls.cygwin_path + r'\*.exe') # print cls.cygwin_path # list all *.exe in cygwin path, use glob for c in exe_paths: exe = c.split('\\') name = exe[1].split('.')[0] # command['windows'][name] = "{:}\{:}.exe".format(cygwin_path, c) cls.command['windows'][name] = c
python
def find_cygwin_executables(cls): """ find the executables in cygwin """ exe_paths = glob.glob(cls.cygwin_path + r'\*.exe') # print cls.cygwin_path # list all *.exe in cygwin path, use glob for c in exe_paths: exe = c.split('\\') name = exe[1].split('.')[0] # command['windows'][name] = "{:}\{:}.exe".format(cygwin_path, c) cls.command['windows'][name] = c
find the executables in cygwin
https://github.com/cloudmesh/cloudmesh-common/blob/ae4fae09cd78205d179ea692dc58f0b0c8fea2b8/cloudmesh/common/Shell.py#L622-L633
cloudmesh/cloudmesh-common
cloudmesh/common/Shell.py
Shell.terminal_type
def terminal_type(cls): """ returns darwin, cygwin, cmd, or linux """ what = sys.platform kind = 'UNDEFINED_TERMINAL_TYPE' if 'linux' in what: kind = 'linux' elif 'darwin' in what: kind = 'darwin' elif 'cygwin' in what: kind = 'cygwin' elif 'windows' in what: kind = 'windows' return kind
python
def terminal_type(cls): """ returns darwin, cygwin, cmd, or linux """ what = sys.platform kind = 'UNDEFINED_TERMINAL_TYPE' if 'linux' in what: kind = 'linux' elif 'darwin' in what: kind = 'darwin' elif 'cygwin' in what: kind = 'cygwin' elif 'windows' in what: kind = 'windows' return kind
returns darwin, cygwin, cmd, or linux
https://github.com/cloudmesh/cloudmesh-common/blob/ae4fae09cd78205d179ea692dc58f0b0c8fea2b8/cloudmesh/common/Shell.py#L636-L652
cloudmesh/cloudmesh-common
cloudmesh/common/Shell.py
Shell.execute
def execute(cls, cmd, arguments="", shell=False, cwd=None, traceflag=True, witherror=True): """Run Shell command :param witherror: if set to False the error will not be printed :param traceflag: if set to true the trace is printed in case of an error :param cwd: the current working directory in whcih the command is supposed to be executed. :param shell: if set to true the subprocess is called as part of a shell :param cmd: command to run :param arguments: we do not know yet :return: """ # print "--------------" result = None terminal = cls.terminal_type() # print cls.command os_command = [cmd] if terminal in ['linux', 'windows']: os_command = [cmd] elif 'cygwin' in terminal: if not cls.command_exists(cmd): print("ERROR: the command could not be found", cmd) return else: os_command = [cls.command[cls.operating_system()][cmd]] if isinstance(arguments, list): os_command = os_command + arguments elif isinstance(arguments, tuple): os_command = os_command + list(arguments) elif isinstance(arguments, str): os_command = os_command + arguments.split() else: print("ERROR: Wrong parameter type", type(arguments)) if cwd is None: cwd = os.getcwd() try: if shell: result = subprocess.check_output( os_command, stderr=subprocess.STDOUT, shell=True, cwd=cwd) else: result = subprocess.check_output( os_command, # shell=True, stderr=subprocess.STDOUT, cwd=cwd) except: if witherror: Console.error("problem executing subprocess", traceflag=traceflag) if result is not None: result = result.strip().decode() return result
python
def execute(cls, cmd, arguments="", shell=False, cwd=None, traceflag=True, witherror=True): """Run Shell command :param witherror: if set to False the error will not be printed :param traceflag: if set to true the trace is printed in case of an error :param cwd: the current working directory in whcih the command is supposed to be executed. :param shell: if set to true the subprocess is called as part of a shell :param cmd: command to run :param arguments: we do not know yet :return: """ # print "--------------" result = None terminal = cls.terminal_type() # print cls.command os_command = [cmd] if terminal in ['linux', 'windows']: os_command = [cmd] elif 'cygwin' in terminal: if not cls.command_exists(cmd): print("ERROR: the command could not be found", cmd) return else: os_command = [cls.command[cls.operating_system()][cmd]] if isinstance(arguments, list): os_command = os_command + arguments elif isinstance(arguments, tuple): os_command = os_command + list(arguments) elif isinstance(arguments, str): os_command = os_command + arguments.split() else: print("ERROR: Wrong parameter type", type(arguments)) if cwd is None: cwd = os.getcwd() try: if shell: result = subprocess.check_output( os_command, stderr=subprocess.STDOUT, shell=True, cwd=cwd) else: result = subprocess.check_output( os_command, # shell=True, stderr=subprocess.STDOUT, cwd=cwd) except: if witherror: Console.error("problem executing subprocess", traceflag=traceflag) if result is not None: result = result.strip().decode() return result
Run Shell command :param witherror: if set to False the error will not be printed :param traceflag: if set to true the trace is printed in case of an error :param cwd: the current working directory in whcih the command is supposed to be executed. :param shell: if set to true the subprocess is called as part of a shell :param cmd: command to run :param arguments: we do not know yet :return:
https://github.com/cloudmesh/cloudmesh-common/blob/ae4fae09cd78205d179ea692dc58f0b0c8fea2b8/cloudmesh/common/Shell.py#L681-L742
cloudmesh/cloudmesh-common
cloudmesh/common/Shell.py
Shell.mkdir
def mkdir(cls, directory): """ creates a directory with all its parents in ots name :param directory: the path of the directory :return: """ directory = path_expand(directory) try: os.makedirs(directory) except OSError as e: # EEXIST (errno 17) occurs under two conditions when the path exists: # - it is a file # - it is a directory # # if it is a file, this is a valid error, otherwise, all # is fine. if e.errno == errno.EEXIST and os.path.isdir(directory): pass else: raise
python
def mkdir(cls, directory): """ creates a directory with all its parents in ots name :param directory: the path of the directory :return: """ directory = path_expand(directory) try: os.makedirs(directory) except OSError as e: # EEXIST (errno 17) occurs under two conditions when the path exists: # - it is a file # - it is a directory # # if it is a file, this is a valid error, otherwise, all # is fine. if e.errno == errno.EEXIST and os.path.isdir(directory): pass else: raise
creates a directory with all its parents in ots name :param directory: the path of the directory :return:
https://github.com/cloudmesh/cloudmesh-common/blob/ae4fae09cd78205d179ea692dc58f0b0c8fea2b8/cloudmesh/common/Shell.py#L745-L765
cloudmesh/cloudmesh-common
cloudmesh/common/TableParser.py
TableParser.clean
def clean(self, line): """ :param line: cleans the string :return: """ # print ("-" + line + "-") if line == '': line = 'None' if self.is_lower: line = line.lower() if line == "user ": # for slurm which has "user" and "user " line = "userid" for convert in self.change: line = line.replace(convert[0], convert[1]) if self.is_strip: line = line.strip() return line.strip(' ')
python
def clean(self, line): """ :param line: cleans the string :return: """ # print ("-" + line + "-") if line == '': line = 'None' if self.is_lower: line = line.lower() if line == "user ": # for slurm which has "user" and "user " line = "userid" for convert in self.change: line = line.replace(convert[0], convert[1]) if self.is_strip: line = line.strip() return line.strip(' ')
:param line: cleans the string :return:
https://github.com/cloudmesh/cloudmesh-common/blob/ae4fae09cd78205d179ea692dc58f0b0c8fea2b8/cloudmesh/common/TableParser.py#L76-L92
cloudmesh/cloudmesh-common
cloudmesh/common/TableParser.py
TableParser._get_headers
def _get_headers(self): """ assumes comment have been stripped with extract :return: """ header = self.lines[0] self.lines = self.lines[1:] self.headers = \ [self.clean(h) for h in header.split(self.seperator)] if self.is_strip: self.headers = self.headers[1:-1] return self.headers
python
def _get_headers(self): """ assumes comment have been stripped with extract :return: """ header = self.lines[0] self.lines = self.lines[1:] self.headers = \ [self.clean(h) for h in header.split(self.seperator)] if self.is_strip: self.headers = self.headers[1:-1] return self.headers
assumes comment have been stripped with extract :return:
https://github.com/cloudmesh/cloudmesh-common/blob/ae4fae09cd78205d179ea692dc58f0b0c8fea2b8/cloudmesh/common/TableParser.py#L104-L117
cloudmesh/cloudmesh-common
cloudmesh/common/util.py
tempdir
def tempdir(*args, **kwargs): """A contextmanager to work in an auto-removed temporary directory Arguments are passed through to tempfile.mkdtemp example: >>> with tempdir() as path: ... pass """ d = tempfile.mkdtemp(*args, **kwargs) try: yield d finally: shutil.rmtree(d)
python
def tempdir(*args, **kwargs): """A contextmanager to work in an auto-removed temporary directory Arguments are passed through to tempfile.mkdtemp example: >>> with tempdir() as path: ... pass """ d = tempfile.mkdtemp(*args, **kwargs) try: yield d finally: shutil.rmtree(d)
A contextmanager to work in an auto-removed temporary directory Arguments are passed through to tempfile.mkdtemp example: >>> with tempdir() as path: ... pass
https://github.com/cloudmesh/cloudmesh-common/blob/ae4fae09cd78205d179ea692dc58f0b0c8fea2b8/cloudmesh/common/util.py#L34-L49
cloudmesh/cloudmesh-common
cloudmesh/common/util.py
exponential_backoff
def exponential_backoff(fn, sleeptime_s_max=30 * 60): """Calls `fn` until it returns True, with an exponentially increasing wait time between calls""" sleeptime_ms = 500 while True: if fn(): return True else: print('Sleeping {} ms'.format(sleeptime_ms)) time.sleep(sleeptime_ms / 1000.0) sleeptime_ms *= 2 if sleeptime_ms / 1000.0 > sleeptime_s_max: return False
python
def exponential_backoff(fn, sleeptime_s_max=30 * 60): """Calls `fn` until it returns True, with an exponentially increasing wait time between calls""" sleeptime_ms = 500 while True: if fn(): return True else: print('Sleeping {} ms'.format(sleeptime_ms)) time.sleep(sleeptime_ms / 1000.0) sleeptime_ms *= 2 if sleeptime_ms / 1000.0 > sleeptime_s_max: return False
Calls `fn` until it returns True, with an exponentially increasing wait time between calls
https://github.com/cloudmesh/cloudmesh-common/blob/ae4fae09cd78205d179ea692dc58f0b0c8fea2b8/cloudmesh/common/util.py#L52-L65
cloudmesh/cloudmesh-common
cloudmesh/common/util.py
search
def search(lines, pattern): """ return all lines that match the pattern #TODO: we need an example :param lines: :param pattern: :return: """ p = pattern.replace("*", ".*") test = re.compile(p) result = [] for l in lines: if test.search(l): result.append(l) return result
python
def search(lines, pattern): """ return all lines that match the pattern #TODO: we need an example :param lines: :param pattern: :return: """ p = pattern.replace("*", ".*") test = re.compile(p) result = [] for l in lines: if test.search(l): result.append(l) return result
return all lines that match the pattern #TODO: we need an example :param lines: :param pattern: :return:
https://github.com/cloudmesh/cloudmesh-common/blob/ae4fae09cd78205d179ea692dc58f0b0c8fea2b8/cloudmesh/common/util.py#L68-L83
cloudmesh/cloudmesh-common
cloudmesh/common/util.py
grep
def grep(pattern, filename): """Very simple grep that returns the first matching line in a file. String matching only, does not do REs as currently implemented. """ try: # for line in file # if line matches pattern: # return line return next((L for L in open(filename) if L.find(pattern) >= 0)) except StopIteration: return ''
python
def grep(pattern, filename): """Very simple grep that returns the first matching line in a file. String matching only, does not do REs as currently implemented. """ try: # for line in file # if line matches pattern: # return line return next((L for L in open(filename) if L.find(pattern) >= 0)) except StopIteration: return ''
Very simple grep that returns the first matching line in a file. String matching only, does not do REs as currently implemented.
https://github.com/cloudmesh/cloudmesh-common/blob/ae4fae09cd78205d179ea692dc58f0b0c8fea2b8/cloudmesh/common/util.py#L86-L96
cloudmesh/cloudmesh-common
cloudmesh/common/util.py
path_expand
def path_expand(text): """ returns a string with expanded variable. :param text: the path to be expanded, which can include ~ and environment $ variables :param text: string """ result = os.path.expandvars(os.path.expanduser(text)) # template = Template(text) # result = template.substitute(os.environ) if result.startswith("."): result = result.replace(".", os.getcwd(), 1) return result
python
def path_expand(text): """ returns a string with expanded variable. :param text: the path to be expanded, which can include ~ and environment $ variables :param text: string """ result = os.path.expandvars(os.path.expanduser(text)) # template = Template(text) # result = template.substitute(os.environ) if result.startswith("."): result = result.replace(".", os.getcwd(), 1) return result
returns a string with expanded variable. :param text: the path to be expanded, which can include ~ and environment $ variables :param text: string
https://github.com/cloudmesh/cloudmesh-common/blob/ae4fae09cd78205d179ea692dc58f0b0c8fea2b8/cloudmesh/common/util.py#L99-L114
cloudmesh/cloudmesh-common
cloudmesh/common/util.py
convert_from_unicode
def convert_from_unicode(data): """ converts unicode data to a string :param data: the data to convert :return: """ # if isinstance(data, basestring): if isinstance(data, str): return str(data) elif isinstance(data, collectionsAbc.Mapping): return dict(map(convert_from_unicode, data.items())) elif isinstance(data, collectionsAbc.Iterable): return type(data)(map(convert_from_unicode, data)) else: return data
python
def convert_from_unicode(data): """ converts unicode data to a string :param data: the data to convert :return: """ # if isinstance(data, basestring): if isinstance(data, str): return str(data) elif isinstance(data, collectionsAbc.Mapping): return dict(map(convert_from_unicode, data.items())) elif isinstance(data, collectionsAbc.Iterable): return type(data)(map(convert_from_unicode, data)) else: return data
converts unicode data to a string :param data: the data to convert :return:
https://github.com/cloudmesh/cloudmesh-common/blob/ae4fae09cd78205d179ea692dc58f0b0c8fea2b8/cloudmesh/common/util.py#L117-L132
cloudmesh/cloudmesh-common
cloudmesh/common/util.py
yn_choice
def yn_choice(message, default='y', tries=None): """asks for a yes/no question. :param tries: the number of tries :param message: the message containing the question :param default: the default answer """ # http://stackoverflow.com/questions/3041986/python-command-line-yes-no-input""" choices = 'Y/n' if default.lower() in ('y', 'yes') else 'y/N' if tries is None: choice = input("%s (%s) " % (message, choices)) values = ('y', 'yes', '') if default == 'y' else ('y', 'yes') return True if choice.strip().lower() in values else False else: while tries > 0: choice = input( "%s (%s) (%s)" % (message, choices, "'q' to discard")) choice = choice.strip().lower() if choice in ['y', 'yes']: return True elif choice in ['n', 'no', 'q']: return False else: print("Invalid input...") tries -= 1
python
def yn_choice(message, default='y', tries=None): """asks for a yes/no question. :param tries: the number of tries :param message: the message containing the question :param default: the default answer """ # http://stackoverflow.com/questions/3041986/python-command-line-yes-no-input""" choices = 'Y/n' if default.lower() in ('y', 'yes') else 'y/N' if tries is None: choice = input("%s (%s) " % (message, choices)) values = ('y', 'yes', '') if default == 'y' else ('y', 'yes') return True if choice.strip().lower() in values else False else: while tries > 0: choice = input( "%s (%s) (%s)" % (message, choices, "'q' to discard")) choice = choice.strip().lower() if choice in ['y', 'yes']: return True elif choice in ['n', 'no', 'q']: return False else: print("Invalid input...") tries -= 1
asks for a yes/no question. :param tries: the number of tries :param message: the message containing the question :param default: the default answer
https://github.com/cloudmesh/cloudmesh-common/blob/ae4fae09cd78205d179ea692dc58f0b0c8fea2b8/cloudmesh/common/util.py#L135-L159
cloudmesh/cloudmesh-common
cloudmesh/common/util.py
banner
def banner(txt=None, c="#", debug=True, label=None, color=None): """prints a banner of the form with a frame of # around the txt:: ############################ # txt ############################ :param color: prints in the given color :param label: adds a label :param debug: prints only if debug is true :param txt: a text message to be printed :type txt: string :param c: the character used instead of c :type c: character """ output = "" if debug: output = "\n" output += "# " + 70 * c + "\n" if label is not None: output += "# " + label + "\n" output += "# " + 70 * c + "\n" if txt is not None: for line in txt.split("\n"): output += "# " + line + "\n" output += "# " + 70 * c + "\n" if color is None: color = "BLUE" Console.cprint(color, "", output)
python
def banner(txt=None, c="#", debug=True, label=None, color=None): """prints a banner of the form with a frame of # around the txt:: ############################ # txt ############################ :param color: prints in the given color :param label: adds a label :param debug: prints only if debug is true :param txt: a text message to be printed :type txt: string :param c: the character used instead of c :type c: character """ output = "" if debug: output = "\n" output += "# " + 70 * c + "\n" if label is not None: output += "# " + label + "\n" output += "# " + 70 * c + "\n" if txt is not None: for line in txt.split("\n"): output += "# " + line + "\n" output += "# " + 70 * c + "\n" if color is None: color = "BLUE" Console.cprint(color, "", output)
prints a banner of the form with a frame of # around the txt:: ############################ # txt ############################ :param color: prints in the given color :param label: adds a label :param debug: prints only if debug is true :param txt: a text message to be printed :type txt: string :param c: the character used instead of c :type c: character
https://github.com/cloudmesh/cloudmesh-common/blob/ae4fae09cd78205d179ea692dc58f0b0c8fea2b8/cloudmesh/common/util.py#L162-L191
cloudmesh/cloudmesh-common
cloudmesh/common/util.py
str_banner
def str_banner(txt=None, c="#", debug=True): """prints a banner of the form with a frame of # around the txt:: ############################ # txt ############################ :param debug: return "" if not in debug :type debug: boolean :param txt: a text message to be printed :type txt: string :param c: the character used instead of c :type c: character """ line = "" if debug: line += "\n" line += "# " + str(70 * c) if txt is not None: line += "# " + txt line += "# " + str(70 * c) return line
python
def str_banner(txt=None, c="#", debug=True): """prints a banner of the form with a frame of # around the txt:: ############################ # txt ############################ :param debug: return "" if not in debug :type debug: boolean :param txt: a text message to be printed :type txt: string :param c: the character used instead of c :type c: character """ line = "" if debug: line += "\n" line += "# " + str(70 * c) if txt is not None: line += "# " + txt line += "# " + str(70 * c) return line
prints a banner of the form with a frame of # around the txt:: ############################ # txt ############################ :param debug: return "" if not in debug :type debug: boolean :param txt: a text message to be printed :type txt: string :param c: the character used instead of c :type c: character
https://github.com/cloudmesh/cloudmesh-common/blob/ae4fae09cd78205d179ea692dc58f0b0c8fea2b8/cloudmesh/common/util.py#L194-L215
cloudmesh/cloudmesh-common
cloudmesh/common/util.py
HEADING
def HEADING(txt=None, c="#"): """ Prints a message to stdout with #### surrounding it. This is useful for nosetests to better distinguish them. :param c: uses the given char to wrap the header :param txt: a text message to be printed :type txt: string """ frame = inspect.getouterframes(inspect.currentframe()) filename = frame[1][1].replace(os.getcwd(), "") line = frame[1][2] - 1 method = frame[1][3] msg = "{}\n# {} {} {}".format(txt, method, filename, line) print() banner(msg, c=c)
python
def HEADING(txt=None, c="#"): """ Prints a message to stdout with #### surrounding it. This is useful for nosetests to better distinguish them. :param c: uses the given char to wrap the header :param txt: a text message to be printed :type txt: string """ frame = inspect.getouterframes(inspect.currentframe()) filename = frame[1][1].replace(os.getcwd(), "") line = frame[1][2] - 1 method = frame[1][3] msg = "{}\n# {} {} {}".format(txt, method, filename, line) print() banner(msg, c=c)
Prints a message to stdout with #### surrounding it. This is useful for nosetests to better distinguish them. :param c: uses the given char to wrap the header :param txt: a text message to be printed :type txt: string
https://github.com/cloudmesh/cloudmesh-common/blob/ae4fae09cd78205d179ea692dc58f0b0c8fea2b8/cloudmesh/common/util.py#L219-L236
cloudmesh/cloudmesh-common
cloudmesh/common/util.py
backup_name
def backup_name(filename): """ :param filename: given a filename creates a backup name of the form filename.bak.1. If the filename already exists the number will be increased as much as needed so the file does not exist in the given location. The filename can consists a path and is expanded with ~ and environment variables. :type filename: string :rtype: string """ location = path_expand(filename) n = 0 found = True backup = None while found: n += 1 backup = "{0}.bak.{1}".format(location, n) found = os.path.isfile(backup) return backup
python
def backup_name(filename): """ :param filename: given a filename creates a backup name of the form filename.bak.1. If the filename already exists the number will be increased as much as needed so the file does not exist in the given location. The filename can consists a path and is expanded with ~ and environment variables. :type filename: string :rtype: string """ location = path_expand(filename) n = 0 found = True backup = None while found: n += 1 backup = "{0}.bak.{1}".format(location, n) found = os.path.isfile(backup) return backup
:param filename: given a filename creates a backup name of the form filename.bak.1. If the filename already exists the number will be increased as much as needed so the file does not exist in the given location. The filename can consists a path and is expanded with ~ and environment variables. :type filename: string :rtype: string
https://github.com/cloudmesh/cloudmesh-common/blob/ae4fae09cd78205d179ea692dc58f0b0c8fea2b8/cloudmesh/common/util.py#L239-L258
cloudmesh/cloudmesh-common
cloudmesh/common/util.py
auto_create_version
def auto_create_version(class_name, version, filename="__init__.py"): """ creates a version number in the __init__.py file. it can be accessed with __version__ :param class_name: :param version: :param filename: :return: """ version_filename = Path( "{classname}/{filename}".format(classname=class_name, filename=filename)) with open(version_filename, "r") as f: content = f.read() if content != '__version__ = "{0}"'.format(version): banner("Updating version to {0}".format(version)) with open(version_filename, "w") as text_file: text_file.write('__version__ = "{0:s}"'.format(version))
python
def auto_create_version(class_name, version, filename="__init__.py"): """ creates a version number in the __init__.py file. it can be accessed with __version__ :param class_name: :param version: :param filename: :return: """ version_filename = Path( "{classname}/{filename}".format(classname=class_name, filename=filename)) with open(version_filename, "r") as f: content = f.read() if content != '__version__ = "{0}"'.format(version): banner("Updating version to {0}".format(version)) with open(version_filename, "w") as text_file: text_file.write('__version__ = "{0:s}"'.format(version))
creates a version number in the __init__.py file. it can be accessed with __version__ :param class_name: :param version: :param filename: :return:
https://github.com/cloudmesh/cloudmesh-common/blob/ae4fae09cd78205d179ea692dc58f0b0c8fea2b8/cloudmesh/common/util.py#L261-L279
cloudmesh/cloudmesh-common
cloudmesh/common/util.py
readfile
def readfile(filename): """ returns the content of a file :param filename: the filename :return: """ with open(path_expand(filename), 'r') as f: content = f.read() return content
python
def readfile(filename): """ returns the content of a file :param filename: the filename :return: """ with open(path_expand(filename), 'r') as f: content = f.read() return content
returns the content of a file :param filename: the filename :return:
https://github.com/cloudmesh/cloudmesh-common/blob/ae4fae09cd78205d179ea692dc58f0b0c8fea2b8/cloudmesh/common/util.py#L319-L327
cloudmesh/cloudmesh-common
cloudmesh/common/util.py
writefile
def writefile(filename, content): """ writes the content into the file :param filename: the filename :param content: teh content :return: """ with open(path_expand(filename), 'w') as outfile: outfile.write(content)
python
def writefile(filename, content): """ writes the content into the file :param filename: the filename :param content: teh content :return: """ with open(path_expand(filename), 'w') as outfile: outfile.write(content)
writes the content into the file :param filename: the filename :param content: teh content :return:
https://github.com/cloudmesh/cloudmesh-common/blob/ae4fae09cd78205d179ea692dc58f0b0c8fea2b8/cloudmesh/common/util.py#L330-L338
cloudmesh/cloudmesh-common
cloudmesh/common/util.py
generate_password
def generate_password(length=8, lower=True, upper=True, number=True): """ generates a simple password. We should not really use this in production. :param length: the length of the password :param lower: True of lower case characters are allowed :param upper: True if upper case characters are allowed :param number: True if numbers are allowed :return: """ lletters = "abcdefghijklmnopqrstuvwxyz" uletters = "ABCDEFGHIJKLMNOPQRSTUVWXYZ" # This doesn't guarantee both lower and upper cases will show up alphabet = lletters + uletters digit = "0123456789" mypw = "" def _random_character(texts): return texts[random.randrange(len(texts))] if not lower: alphabet = uletters elif not upper: alphabet = lletters for i in range(length): # last half length will be filled with numbers if number and i >= int(length / 2): mypw = mypw + _random_character(digit) else: mypw = mypw + _random_character(alphabet) return mypw
python
def generate_password(length=8, lower=True, upper=True, number=True): """ generates a simple password. We should not really use this in production. :param length: the length of the password :param lower: True of lower case characters are allowed :param upper: True if upper case characters are allowed :param number: True if numbers are allowed :return: """ lletters = "abcdefghijklmnopqrstuvwxyz" uletters = "ABCDEFGHIJKLMNOPQRSTUVWXYZ" # This doesn't guarantee both lower and upper cases will show up alphabet = lletters + uletters digit = "0123456789" mypw = "" def _random_character(texts): return texts[random.randrange(len(texts))] if not lower: alphabet = uletters elif not upper: alphabet = lletters for i in range(length): # last half length will be filled with numbers if number and i >= int(length / 2): mypw = mypw + _random_character(digit) else: mypw = mypw + _random_character(alphabet) return mypw
generates a simple password. We should not really use this in production. :param length: the length of the password :param lower: True of lower case characters are allowed :param upper: True if upper case characters are allowed :param number: True if numbers are allowed :return:
https://github.com/cloudmesh/cloudmesh-common/blob/ae4fae09cd78205d179ea692dc58f0b0c8fea2b8/cloudmesh/common/util.py#L342-L372
cloudmesh/cloudmesh-common
cloudmesh/common/BaseConfigDict.py
check_file_for_tabs
def check_file_for_tabs(filename, verbose=True): """identifies if the file contains tabs and returns True if it does. It also prints the location of the lines and columns. If verbose is set to False, the location is not printed. :param verbose: if true prints information about issues :param filename: the filename :rtype: True if there are tabs in the file """ file_contains_tabs = False with open(filename) as f: lines = f.read().split("\n") line_no = 1 for line in lines: if "\t" in line: file_contains_tabs = True location = [ i for i in range(len(line)) if line.startswith('\t', i)] if verbose: Console.error("Tab found in line {} and column(s) {}" .format(line_no, str(location).replace("[", "").replace( "]", "")), traceflag=False) line_no += 1 return file_contains_tabs
python
def check_file_for_tabs(filename, verbose=True): """identifies if the file contains tabs and returns True if it does. It also prints the location of the lines and columns. If verbose is set to False, the location is not printed. :param verbose: if true prints information about issues :param filename: the filename :rtype: True if there are tabs in the file """ file_contains_tabs = False with open(filename) as f: lines = f.read().split("\n") line_no = 1 for line in lines: if "\t" in line: file_contains_tabs = True location = [ i for i in range(len(line)) if line.startswith('\t', i)] if verbose: Console.error("Tab found in line {} and column(s) {}" .format(line_no, str(location).replace("[", "").replace( "]", "")), traceflag=False) line_no += 1 return file_contains_tabs
identifies if the file contains tabs and returns True if it does. It also prints the location of the lines and columns. If verbose is set to False, the location is not printed. :param verbose: if true prints information about issues :param filename: the filename :rtype: True if there are tabs in the file
https://github.com/cloudmesh/cloudmesh-common/blob/ae4fae09cd78205d179ea692dc58f0b0c8fea2b8/cloudmesh/common/BaseConfigDict.py#L31-L57
cloudmesh/cloudmesh-common
cloudmesh/common/BaseConfigDict.py
ordered_load
def ordered_load(stream, Loader=yaml.Loader, object_pairs_hook=OrderedDict): """ Loads an ordered dict into a yaml while preserving the order :param stream: the name of the stream :param Loader: the yam loader (such as yaml.SafeLoader) :param object_pairs_hook: the ordered dict """ # noinspection PyClassHasNoInit class OrderedLoader(Loader): """ A helper class to define an Ordered Loader """ pass def construct_mapping(loader, node): """ construct a flattened node mapping :param loader: the loader :param node: the node dict :return: """ loader.flatten_mapping(node) return object_pairs_hook(loader.construct_pairs(node)) OrderedLoader.add_constructor( yaml.resolver.BaseResolver.DEFAULT_MAPPING_TAG, construct_mapping) return yaml.load(stream, OrderedLoader)
python
def ordered_load(stream, Loader=yaml.Loader, object_pairs_hook=OrderedDict): """ Loads an ordered dict into a yaml while preserving the order :param stream: the name of the stream :param Loader: the yam loader (such as yaml.SafeLoader) :param object_pairs_hook: the ordered dict """ # noinspection PyClassHasNoInit class OrderedLoader(Loader): """ A helper class to define an Ordered Loader """ pass def construct_mapping(loader, node): """ construct a flattened node mapping :param loader: the loader :param node: the node dict :return: """ loader.flatten_mapping(node) return object_pairs_hook(loader.construct_pairs(node)) OrderedLoader.add_constructor( yaml.resolver.BaseResolver.DEFAULT_MAPPING_TAG, construct_mapping) return yaml.load(stream, OrderedLoader)
Loads an ordered dict into a yaml while preserving the order :param stream: the name of the stream :param Loader: the yam loader (such as yaml.SafeLoader) :param object_pairs_hook: the ordered dict
https://github.com/cloudmesh/cloudmesh-common/blob/ae4fae09cd78205d179ea692dc58f0b0c8fea2b8/cloudmesh/common/BaseConfigDict.py#L63-L92