repo
stringlengths
7
55
path
stringlengths
4
127
func_name
stringlengths
1
88
original_string
stringlengths
75
19.8k
language
stringclasses
1 value
code
stringlengths
75
19.8k
code_tokens
list
docstring
stringlengths
3
17.3k
docstring_tokens
list
sha
stringlengths
40
40
url
stringlengths
87
242
partition
stringclasses
1 value
JawboneHealth/jhhalchemy
jhhalchemy/model/__init__.py
Base.read_by
def read_by(cls, removed=False, **kwargs): """ filter_by query helper that handles soft delete logic. If your query conditions require expressions, use read. :param removed: whether to include soft-deleted rows :param kwargs: where clause mappings to pass to filter_by :return: row object generator """ if not removed: kwargs['time_removed'] = 0 return cls.query.filter_by(**kwargs)
python
def read_by(cls, removed=False, **kwargs): """ filter_by query helper that handles soft delete logic. If your query conditions require expressions, use read. :param removed: whether to include soft-deleted rows :param kwargs: where clause mappings to pass to filter_by :return: row object generator """ if not removed: kwargs['time_removed'] = 0 return cls.query.filter_by(**kwargs)
[ "def", "read_by", "(", "cls", ",", "removed", "=", "False", ",", "*", "*", "kwargs", ")", ":", "if", "not", "removed", ":", "kwargs", "[", "'time_removed'", "]", "=", "0", "return", "cls", ".", "query", ".", "filter_by", "(", "*", "*", "kwargs", ")" ]
filter_by query helper that handles soft delete logic. If your query conditions require expressions, use read. :param removed: whether to include soft-deleted rows :param kwargs: where clause mappings to pass to filter_by :return: row object generator
[ "filter_by", "query", "helper", "that", "handles", "soft", "delete", "logic", ".", "If", "your", "query", "conditions", "require", "expressions", "use", "read", "." ]
ca0011d644e404561a142c9d7f0a8a569f1f4f27
https://github.com/JawboneHealth/jhhalchemy/blob/ca0011d644e404561a142c9d7f0a8a569f1f4f27/jhhalchemy/model/__init__.py#L45-L55
train
JawboneHealth/jhhalchemy
jhhalchemy/model/__init__.py
Base.read
def read(cls, *criteria, **kwargs): """ filter query helper that handles soft delete logic. If your query conditions do not require expressions, consider using read_by. :param criteria: where clause conditions :param kwargs: set removed=True if you want soft-deleted rows :return: row object generator """ if not kwargs.get('removed', False): return cls.query.filter(cls.time_removed == 0, *criteria) return cls.query.filter(*criteria)
python
def read(cls, *criteria, **kwargs): """ filter query helper that handles soft delete logic. If your query conditions do not require expressions, consider using read_by. :param criteria: where clause conditions :param kwargs: set removed=True if you want soft-deleted rows :return: row object generator """ if not kwargs.get('removed', False): return cls.query.filter(cls.time_removed == 0, *criteria) return cls.query.filter(*criteria)
[ "def", "read", "(", "cls", ",", "*", "criteria", ",", "*", "*", "kwargs", ")", ":", "if", "not", "kwargs", ".", "get", "(", "'removed'", ",", "False", ")", ":", "return", "cls", ".", "query", ".", "filter", "(", "cls", ".", "time_removed", "==", "0", ",", "*", "criteria", ")", "return", "cls", ".", "query", ".", "filter", "(", "*", "criteria", ")" ]
filter query helper that handles soft delete logic. If your query conditions do not require expressions, consider using read_by. :param criteria: where clause conditions :param kwargs: set removed=True if you want soft-deleted rows :return: row object generator
[ "filter", "query", "helper", "that", "handles", "soft", "delete", "logic", ".", "If", "your", "query", "conditions", "do", "not", "require", "expressions", "consider", "using", "read_by", "." ]
ca0011d644e404561a142c9d7f0a8a569f1f4f27
https://github.com/JawboneHealth/jhhalchemy/blob/ca0011d644e404561a142c9d7f0a8a569f1f4f27/jhhalchemy/model/__init__.py#L58-L69
train
JawboneHealth/jhhalchemy
jhhalchemy/model/__init__.py
Base.delete
def delete(self, session, commit=True, soft=True): """ Delete a row from the DB. :param session: flask_sqlalchemy session object :param commit: whether to issue the commit :param soft: whether this is a soft delete (i.e., update time_removed) """ if soft: self.time_removed = sqlalchemy.func.unix_timestamp() else: session.delete(self) if commit: session.commit()
python
def delete(self, session, commit=True, soft=True): """ Delete a row from the DB. :param session: flask_sqlalchemy session object :param commit: whether to issue the commit :param soft: whether this is a soft delete (i.e., update time_removed) """ if soft: self.time_removed = sqlalchemy.func.unix_timestamp() else: session.delete(self) if commit: session.commit()
[ "def", "delete", "(", "self", ",", "session", ",", "commit", "=", "True", ",", "soft", "=", "True", ")", ":", "if", "soft", ":", "self", ".", "time_removed", "=", "sqlalchemy", ".", "func", ".", "unix_timestamp", "(", ")", "else", ":", "session", ".", "delete", "(", "self", ")", "if", "commit", ":", "session", ".", "commit", "(", ")" ]
Delete a row from the DB. :param session: flask_sqlalchemy session object :param commit: whether to issue the commit :param soft: whether this is a soft delete (i.e., update time_removed)
[ "Delete", "a", "row", "from", "the", "DB", "." ]
ca0011d644e404561a142c9d7f0a8a569f1f4f27
https://github.com/JawboneHealth/jhhalchemy/blob/ca0011d644e404561a142c9d7f0a8a569f1f4f27/jhhalchemy/model/__init__.py#L71-L85
train
gebn/wood
wood/entities.py
Entity.walk_paths
def walk_paths(self, base: Optional[pathlib.PurePath] = pathlib.PurePath()) \ -> Iterator[pathlib.PurePath]: """ Recursively traverse all paths inside this entity, including the entity itself. :param base: The base path to prepend to the entity name. :return: An iterator of paths. """ raise NotImplementedError()
python
def walk_paths(self, base: Optional[pathlib.PurePath] = pathlib.PurePath()) \ -> Iterator[pathlib.PurePath]: """ Recursively traverse all paths inside this entity, including the entity itself. :param base: The base path to prepend to the entity name. :return: An iterator of paths. """ raise NotImplementedError()
[ "def", "walk_paths", "(", "self", ",", "base", ":", "Optional", "[", "pathlib", ".", "PurePath", "]", "=", "pathlib", ".", "PurePath", "(", ")", ")", "->", "Iterator", "[", "pathlib", ".", "PurePath", "]", ":", "raise", "NotImplementedError", "(", ")" ]
Recursively traverse all paths inside this entity, including the entity itself. :param base: The base path to prepend to the entity name. :return: An iterator of paths.
[ "Recursively", "traverse", "all", "paths", "inside", "this", "entity", "including", "the", "entity", "itself", "." ]
efc71879890dbd2f2d7a0b1a65ed22a0843139dd
https://github.com/gebn/wood/blob/efc71879890dbd2f2d7a0b1a65ed22a0843139dd/wood/entities.py#L27-L37
train
gebn/wood
wood/entities.py
Entity._walk_paths
def _walk_paths(self, base: pathlib.PurePath) \ -> Iterator[pathlib.PurePath]: """ Internal helper for walking paths. This is required to exclude the name of the root entity from the walk. :param base: The base path to prepend to the entity name. :return: An iterator of paths. """ return self.walk_paths(base)
python
def _walk_paths(self, base: pathlib.PurePath) \ -> Iterator[pathlib.PurePath]: """ Internal helper for walking paths. This is required to exclude the name of the root entity from the walk. :param base: The base path to prepend to the entity name. :return: An iterator of paths. """ return self.walk_paths(base)
[ "def", "_walk_paths", "(", "self", ",", "base", ":", "pathlib", ".", "PurePath", ")", "->", "Iterator", "[", "pathlib", ".", "PurePath", "]", ":", "return", "self", ".", "walk_paths", "(", "base", ")" ]
Internal helper for walking paths. This is required to exclude the name of the root entity from the walk. :param base: The base path to prepend to the entity name. :return: An iterator of paths.
[ "Internal", "helper", "for", "walking", "paths", ".", "This", "is", "required", "to", "exclude", "the", "name", "of", "the", "root", "entity", "from", "the", "walk", "." ]
efc71879890dbd2f2d7a0b1a65ed22a0843139dd
https://github.com/gebn/wood/blob/efc71879890dbd2f2d7a0b1a65ed22a0843139dd/wood/entities.py#L39-L48
train
gebn/wood
wood/entities.py
Entity.from_path
def from_path(cls, path: pathlib.Path) -> 'Entity': """ Create an entity from a local path. :param path: The path to the entity, either a file or directory. :return: An entity instance representing the path. """ if path.is_file(): return File.from_path(path) return Directory.from_path(path)
python
def from_path(cls, path: pathlib.Path) -> 'Entity': """ Create an entity from a local path. :param path: The path to the entity, either a file or directory. :return: An entity instance representing the path. """ if path.is_file(): return File.from_path(path) return Directory.from_path(path)
[ "def", "from_path", "(", "cls", ",", "path", ":", "pathlib", ".", "Path", ")", "->", "'Entity'", ":", "if", "path", ".", "is_file", "(", ")", ":", "return", "File", ".", "from_path", "(", "path", ")", "return", "Directory", ".", "from_path", "(", "path", ")" ]
Create an entity from a local path. :param path: The path to the entity, either a file or directory. :return: An entity instance representing the path.
[ "Create", "an", "entity", "from", "a", "local", "path", "." ]
efc71879890dbd2f2d7a0b1a65ed22a0843139dd
https://github.com/gebn/wood/blob/efc71879890dbd2f2d7a0b1a65ed22a0843139dd/wood/entities.py#L61-L70
train
gebn/wood
wood/entities.py
File._md5
def _md5(path: pathlib.PurePath): """ Calculate the MD5 checksum of a file. :param path: The path of the file whose checksum to calculate. :return: The lowercase hex representation of the file's MD5 checksum, exactly 32 chars long. """ hash_ = hashlib.md5() with open(path, 'rb') as f: for chunk in iter(lambda: f.read(4096), b''): hash_.update(chunk) return hash_.hexdigest()
python
def _md5(path: pathlib.PurePath): """ Calculate the MD5 checksum of a file. :param path: The path of the file whose checksum to calculate. :return: The lowercase hex representation of the file's MD5 checksum, exactly 32 chars long. """ hash_ = hashlib.md5() with open(path, 'rb') as f: for chunk in iter(lambda: f.read(4096), b''): hash_.update(chunk) return hash_.hexdigest()
[ "def", "_md5", "(", "path", ":", "pathlib", ".", "PurePath", ")", ":", "hash_", "=", "hashlib", ".", "md5", "(", ")", "with", "open", "(", "path", ",", "'rb'", ")", "as", "f", ":", "for", "chunk", "in", "iter", "(", "lambda", ":", "f", ".", "read", "(", "4096", ")", ",", "b''", ")", ":", "hash_", ".", "update", "(", "chunk", ")", "return", "hash_", ".", "hexdigest", "(", ")" ]
Calculate the MD5 checksum of a file. :param path: The path of the file whose checksum to calculate. :return: The lowercase hex representation of the file's MD5 checksum, exactly 32 chars long.
[ "Calculate", "the", "MD5", "checksum", "of", "a", "file", "." ]
efc71879890dbd2f2d7a0b1a65ed22a0843139dd
https://github.com/gebn/wood/blob/efc71879890dbd2f2d7a0b1a65ed22a0843139dd/wood/entities.py#L119-L131
train
gebn/wood
wood/entities.py
File.from_path
def from_path(cls, path: pathlib.Path) -> 'File': """ Create a file entity from a file path. :param path: The path of the file. :return: A file entity instance representing the file. :raises ValueError: If the path does not point to a file. """ if not path.is_file(): raise ValueError('Path does not point to a file') return File(path.name, path.stat().st_size, cls._md5(path))
python
def from_path(cls, path: pathlib.Path) -> 'File': """ Create a file entity from a file path. :param path: The path of the file. :return: A file entity instance representing the file. :raises ValueError: If the path does not point to a file. """ if not path.is_file(): raise ValueError('Path does not point to a file') return File(path.name, path.stat().st_size, cls._md5(path))
[ "def", "from_path", "(", "cls", ",", "path", ":", "pathlib", ".", "Path", ")", "->", "'File'", ":", "if", "not", "path", ".", "is_file", "(", ")", ":", "raise", "ValueError", "(", "'Path does not point to a file'", ")", "return", "File", "(", "path", ".", "name", ",", "path", ".", "stat", "(", ")", ".", "st_size", ",", "cls", ".", "_md5", "(", "path", ")", ")" ]
Create a file entity from a file path. :param path: The path of the file. :return: A file entity instance representing the file. :raises ValueError: If the path does not point to a file.
[ "Create", "a", "file", "entity", "from", "a", "file", "path", "." ]
efc71879890dbd2f2d7a0b1a65ed22a0843139dd
https://github.com/gebn/wood/blob/efc71879890dbd2f2d7a0b1a65ed22a0843139dd/wood/entities.py#L134-L144
train
gebn/wood
wood/entities.py
Directory.from_path
def from_path(cls, path: pathlib.Path) -> 'Directory': """ Create a directory entity from a directory path. :param path: The path of the directory. :return: A directory entity instance representing the directory. :raises ValueError: If the path does not point to a directory. """ if not path.is_dir(): raise ValueError('Path does not point to a directory') return Directory(path.name, {entity.name: Entity.from_path(entity) for entity in path.iterdir()})
python
def from_path(cls, path: pathlib.Path) -> 'Directory': """ Create a directory entity from a directory path. :param path: The path of the directory. :return: A directory entity instance representing the directory. :raises ValueError: If the path does not point to a directory. """ if not path.is_dir(): raise ValueError('Path does not point to a directory') return Directory(path.name, {entity.name: Entity.from_path(entity) for entity in path.iterdir()})
[ "def", "from_path", "(", "cls", ",", "path", ":", "pathlib", ".", "Path", ")", "->", "'Directory'", ":", "if", "not", "path", ".", "is_dir", "(", ")", ":", "raise", "ValueError", "(", "'Path does not point to a directory'", ")", "return", "Directory", "(", "path", ".", "name", ",", "{", "entity", ".", "name", ":", "Entity", ".", "from_path", "(", "entity", ")", "for", "entity", "in", "path", ".", "iterdir", "(", ")", "}", ")" ]
Create a directory entity from a directory path. :param path: The path of the directory. :return: A directory entity instance representing the directory. :raises ValueError: If the path does not point to a directory.
[ "Create", "a", "directory", "entity", "from", "a", "directory", "path", "." ]
efc71879890dbd2f2d7a0b1a65ed22a0843139dd
https://github.com/gebn/wood/blob/efc71879890dbd2f2d7a0b1a65ed22a0843139dd/wood/entities.py#L186-L197
train
rhayes777/PyAutoFit
autofit/optimize/grid_search.py
GridSearchResult.best_result
def best_result(self): """ The best result of the grid search. That is, the result output by the non linear search that had the highest maximum figure of merit. Returns ------- best_result: Result """ best_result = None for result in self.results: if best_result is None or result.figure_of_merit > best_result.figure_of_merit: best_result = result return best_result
python
def best_result(self): """ The best result of the grid search. That is, the result output by the non linear search that had the highest maximum figure of merit. Returns ------- best_result: Result """ best_result = None for result in self.results: if best_result is None or result.figure_of_merit > best_result.figure_of_merit: best_result = result return best_result
[ "def", "best_result", "(", "self", ")", ":", "best_result", "=", "None", "for", "result", "in", "self", ".", "results", ":", "if", "best_result", "is", "None", "or", "result", ".", "figure_of_merit", ">", "best_result", ".", "figure_of_merit", ":", "best_result", "=", "result", "return", "best_result" ]
The best result of the grid search. That is, the result output by the non linear search that had the highest maximum figure of merit. Returns ------- best_result: Result
[ "The", "best", "result", "of", "the", "grid", "search", ".", "That", "is", "the", "result", "output", "by", "the", "non", "linear", "search", "that", "had", "the", "highest", "maximum", "figure", "of", "merit", "." ]
a9e6144abb08edfc6a6906c4030d7119bf8d3e14
https://github.com/rhayes777/PyAutoFit/blob/a9e6144abb08edfc6a6906c4030d7119bf8d3e14/autofit/optimize/grid_search.py#L36-L49
train
rhayes777/PyAutoFit
autofit/optimize/grid_search.py
GridSearch.make_lists
def make_lists(self, grid_priors): """ Produces a list of lists of floats, where each list of floats represents the values in each dimension for one step of the grid search. Parameters ---------- grid_priors: [p.Prior] A list of priors that are to be searched using the grid search. Returns ------- lists: [[float]] """ return optimizer.make_lists(len(grid_priors), step_size=self.hyper_step_size, centre_steps=False)
python
def make_lists(self, grid_priors): """ Produces a list of lists of floats, where each list of floats represents the values in each dimension for one step of the grid search. Parameters ---------- grid_priors: [p.Prior] A list of priors that are to be searched using the grid search. Returns ------- lists: [[float]] """ return optimizer.make_lists(len(grid_priors), step_size=self.hyper_step_size, centre_steps=False)
[ "def", "make_lists", "(", "self", ",", "grid_priors", ")", ":", "return", "optimizer", ".", "make_lists", "(", "len", "(", "grid_priors", ")", ",", "step_size", "=", "self", ".", "hyper_step_size", ",", "centre_steps", "=", "False", ")" ]
Produces a list of lists of floats, where each list of floats represents the values in each dimension for one step of the grid search. Parameters ---------- grid_priors: [p.Prior] A list of priors that are to be searched using the grid search. Returns ------- lists: [[float]]
[ "Produces", "a", "list", "of", "lists", "of", "floats", "where", "each", "list", "of", "floats", "represents", "the", "values", "in", "each", "dimension", "for", "one", "step", "of", "the", "grid", "search", "." ]
a9e6144abb08edfc6a6906c4030d7119bf8d3e14
https://github.com/rhayes777/PyAutoFit/blob/a9e6144abb08edfc6a6906c4030d7119bf8d3e14/autofit/optimize/grid_search.py#L147-L161
train
rhayes777/PyAutoFit
autofit/optimize/grid_search.py
GridSearch.fit
def fit(self, analysis, grid_priors): """ Fit an analysis with a set of grid priors. The grid priors are priors associated with the model mapper of this instance that are replaced by uniform priors for each step of the grid search. Parameters ---------- analysis: non_linear.Analysis An analysis used to determine the fitness of a given model instance grid_priors: [p.Prior] A list of priors to be substituted for uniform priors across the grid. Returns ------- result: GridSearchResult An object that comprises the results from each individual fit """ grid_priors = list(set(grid_priors)) results = [] lists = self.make_lists(grid_priors) results_list = [list(map(self.variable.name_for_prior, grid_priors)) + ["figure_of_merit"]] def write_results(): with open("{}/results".format(self.phase_output_path), "w+") as f: f.write("\n".join(map(lambda ls: ", ".join( map(lambda value: "{:.2f}".format(value) if isinstance(value, float) else str(value), ls)), results_list))) for values in lists: arguments = self.make_arguments(values, grid_priors) model_mapper = self.variable.mapper_from_partial_prior_arguments(arguments) labels = [] for prior in arguments.values(): labels.append( "{}_{:.2f}_{:.2f}".format(model_mapper.name_for_prior(prior), prior.lower_limit, prior.upper_limit)) name_path = "{}{}/{}".format(self.phase_name, self.phase_tag, "_".join(labels)) optimizer_instance = self.optimizer_instance(model_mapper, name_path) optimizer_instance.constant = self.constant result = optimizer_instance.fit(analysis) results.append(result) results_list.append([*[prior.lower_limit for prior in arguments.values()], result.figure_of_merit]) write_results() return GridSearchResult(results, lists)
python
def fit(self, analysis, grid_priors): """ Fit an analysis with a set of grid priors. The grid priors are priors associated with the model mapper of this instance that are replaced by uniform priors for each step of the grid search. Parameters ---------- analysis: non_linear.Analysis An analysis used to determine the fitness of a given model instance grid_priors: [p.Prior] A list of priors to be substituted for uniform priors across the grid. Returns ------- result: GridSearchResult An object that comprises the results from each individual fit """ grid_priors = list(set(grid_priors)) results = [] lists = self.make_lists(grid_priors) results_list = [list(map(self.variable.name_for_prior, grid_priors)) + ["figure_of_merit"]] def write_results(): with open("{}/results".format(self.phase_output_path), "w+") as f: f.write("\n".join(map(lambda ls: ", ".join( map(lambda value: "{:.2f}".format(value) if isinstance(value, float) else str(value), ls)), results_list))) for values in lists: arguments = self.make_arguments(values, grid_priors) model_mapper = self.variable.mapper_from_partial_prior_arguments(arguments) labels = [] for prior in arguments.values(): labels.append( "{}_{:.2f}_{:.2f}".format(model_mapper.name_for_prior(prior), prior.lower_limit, prior.upper_limit)) name_path = "{}{}/{}".format(self.phase_name, self.phase_tag, "_".join(labels)) optimizer_instance = self.optimizer_instance(model_mapper, name_path) optimizer_instance.constant = self.constant result = optimizer_instance.fit(analysis) results.append(result) results_list.append([*[prior.lower_limit for prior in arguments.values()], result.figure_of_merit]) write_results() return GridSearchResult(results, lists)
[ "def", "fit", "(", "self", ",", "analysis", ",", "grid_priors", ")", ":", "grid_priors", "=", "list", "(", "set", "(", "grid_priors", ")", ")", "results", "=", "[", "]", "lists", "=", "self", ".", "make_lists", "(", "grid_priors", ")", "results_list", "=", "[", "list", "(", "map", "(", "self", ".", "variable", ".", "name_for_prior", ",", "grid_priors", ")", ")", "+", "[", "\"figure_of_merit\"", "]", "]", "def", "write_results", "(", ")", ":", "with", "open", "(", "\"{}/results\"", ".", "format", "(", "self", ".", "phase_output_path", ")", ",", "\"w+\"", ")", "as", "f", ":", "f", ".", "write", "(", "\"\\n\"", ".", "join", "(", "map", "(", "lambda", "ls", ":", "\", \"", ".", "join", "(", "map", "(", "lambda", "value", ":", "\"{:.2f}\"", ".", "format", "(", "value", ")", "if", "isinstance", "(", "value", ",", "float", ")", "else", "str", "(", "value", ")", ",", "ls", ")", ")", ",", "results_list", ")", ")", ")", "for", "values", "in", "lists", ":", "arguments", "=", "self", ".", "make_arguments", "(", "values", ",", "grid_priors", ")", "model_mapper", "=", "self", ".", "variable", ".", "mapper_from_partial_prior_arguments", "(", "arguments", ")", "labels", "=", "[", "]", "for", "prior", "in", "arguments", ".", "values", "(", ")", ":", "labels", ".", "append", "(", "\"{}_{:.2f}_{:.2f}\"", ".", "format", "(", "model_mapper", ".", "name_for_prior", "(", "prior", ")", ",", "prior", ".", "lower_limit", ",", "prior", ".", "upper_limit", ")", ")", "name_path", "=", "\"{}{}/{}\"", ".", "format", "(", "self", ".", "phase_name", ",", "self", ".", "phase_tag", ",", "\"_\"", ".", "join", "(", "labels", ")", ")", "optimizer_instance", "=", "self", ".", "optimizer_instance", "(", "model_mapper", ",", "name_path", ")", "optimizer_instance", ".", "constant", "=", "self", ".", "constant", "result", "=", "optimizer_instance", ".", "fit", "(", "analysis", ")", "results", ".", "append", "(", "result", ")", "results_list", ".", "append", "(", "[", "*", "[", "prior", ".", "lower_limit", "for", "prior", "in", "arguments", ".", "values", "(", ")", "]", ",", "result", ".", "figure_of_merit", "]", ")", "write_results", "(", ")", "return", "GridSearchResult", "(", "results", ",", "lists", ")" ]
Fit an analysis with a set of grid priors. The grid priors are priors associated with the model mapper of this instance that are replaced by uniform priors for each step of the grid search. Parameters ---------- analysis: non_linear.Analysis An analysis used to determine the fitness of a given model instance grid_priors: [p.Prior] A list of priors to be substituted for uniform priors across the grid. Returns ------- result: GridSearchResult An object that comprises the results from each individual fit
[ "Fit", "an", "analysis", "with", "a", "set", "of", "grid", "priors", ".", "The", "grid", "priors", "are", "priors", "associated", "with", "the", "model", "mapper", "of", "this", "instance", "that", "are", "replaced", "by", "uniform", "priors", "for", "each", "step", "of", "the", "grid", "search", "." ]
a9e6144abb08edfc6a6906c4030d7119bf8d3e14
https://github.com/rhayes777/PyAutoFit/blob/a9e6144abb08edfc6a6906c4030d7119bf8d3e14/autofit/optimize/grid_search.py#L181-L229
train
Nic30/hwtGraph
hwtGraph/elk/fromHwt/resolveSharedConnections.py
portTryReduce
def portTryReduce(root: LNode, port: LPort): """ Check if majority of children is connected to same port if it is the case reduce children and connect this port instead children :note: use reduceUselessAssignments, extractSplits, flattenTrees before this function to maximize it's effect """ if not port.children: return for p in port.children: portTryReduce(root, p) target_nodes = {} ch_cnt = countDirectlyConnected(port, target_nodes) if not target_nodes: # disconnected port return new_target, children_edge_to_destroy = max(target_nodes.items(), key=lambda x: len(x[1])) cnt = len(children_edge_to_destroy) if cnt < ch_cnt / 2 or cnt == 1 and ch_cnt == 2: # too small to few shared connection to reduce return children_to_destroy = set() on_target_children_to_destroy = set() for child, edge in children_edge_to_destroy: if child.direction == PortType.OUTPUT: target_ch = edge.dsts elif child.direction == PortType.INPUT: target_ch = edge.srcs else: raise ValueError(child.direction) if len(target_ch) != 1: raise NotImplementedError("multiple connected nodes", target_ch) target_ch = target_ch[0] try: assert target_ch.parent is new_target, ( target_ch, target_ch.parent, new_target) except AssertionError: print('Wrong target:\n', edge.src, "\n", edge.dst, "\n", target_ch.parent, "\n", new_target) raise if child.direction == PortType.OUTPUT: edge.removeTarget(target_ch) elif child.direction == PortType.INPUT: edge.removeTarget(child) if not edge.srcs or not edge.dsts: edge.remove() if not target_ch.incomingEdges and not target_ch.outgoingEdges: # disconnect selected children from this port and target on_target_children_to_destroy.add(target_ch) if not child.incomingEdges and not child.outgoingEdges: children_to_destroy.add(child) # destroy children of new target and this port if possible port.children = [ ch for ch in port.children if ch not in children_to_destroy] new_target.children = [ ch for ch in new_target.children if ch not in on_target_children_to_destroy] # connect this port to new target as it was connected by children before # [TODO] names for new edges if port.direction == PortType.OUTPUT: root.addEdge(port, new_target) elif port.direction == PortType.INPUT: root.addEdge(new_target, port) else: raise NotImplementedError(port.direction)
python
def portTryReduce(root: LNode, port: LPort): """ Check if majority of children is connected to same port if it is the case reduce children and connect this port instead children :note: use reduceUselessAssignments, extractSplits, flattenTrees before this function to maximize it's effect """ if not port.children: return for p in port.children: portTryReduce(root, p) target_nodes = {} ch_cnt = countDirectlyConnected(port, target_nodes) if not target_nodes: # disconnected port return new_target, children_edge_to_destroy = max(target_nodes.items(), key=lambda x: len(x[1])) cnt = len(children_edge_to_destroy) if cnt < ch_cnt / 2 or cnt == 1 and ch_cnt == 2: # too small to few shared connection to reduce return children_to_destroy = set() on_target_children_to_destroy = set() for child, edge in children_edge_to_destroy: if child.direction == PortType.OUTPUT: target_ch = edge.dsts elif child.direction == PortType.INPUT: target_ch = edge.srcs else: raise ValueError(child.direction) if len(target_ch) != 1: raise NotImplementedError("multiple connected nodes", target_ch) target_ch = target_ch[0] try: assert target_ch.parent is new_target, ( target_ch, target_ch.parent, new_target) except AssertionError: print('Wrong target:\n', edge.src, "\n", edge.dst, "\n", target_ch.parent, "\n", new_target) raise if child.direction == PortType.OUTPUT: edge.removeTarget(target_ch) elif child.direction == PortType.INPUT: edge.removeTarget(child) if not edge.srcs or not edge.dsts: edge.remove() if not target_ch.incomingEdges and not target_ch.outgoingEdges: # disconnect selected children from this port and target on_target_children_to_destroy.add(target_ch) if not child.incomingEdges and not child.outgoingEdges: children_to_destroy.add(child) # destroy children of new target and this port if possible port.children = [ ch for ch in port.children if ch not in children_to_destroy] new_target.children = [ ch for ch in new_target.children if ch not in on_target_children_to_destroy] # connect this port to new target as it was connected by children before # [TODO] names for new edges if port.direction == PortType.OUTPUT: root.addEdge(port, new_target) elif port.direction == PortType.INPUT: root.addEdge(new_target, port) else: raise NotImplementedError(port.direction)
[ "def", "portTryReduce", "(", "root", ":", "LNode", ",", "port", ":", "LPort", ")", ":", "if", "not", "port", ".", "children", ":", "return", "for", "p", "in", "port", ".", "children", ":", "portTryReduce", "(", "root", ",", "p", ")", "target_nodes", "=", "{", "}", "ch_cnt", "=", "countDirectlyConnected", "(", "port", ",", "target_nodes", ")", "if", "not", "target_nodes", ":", "# disconnected port", "return", "new_target", ",", "children_edge_to_destroy", "=", "max", "(", "target_nodes", ".", "items", "(", ")", ",", "key", "=", "lambda", "x", ":", "len", "(", "x", "[", "1", "]", ")", ")", "cnt", "=", "len", "(", "children_edge_to_destroy", ")", "if", "cnt", "<", "ch_cnt", "/", "2", "or", "cnt", "==", "1", "and", "ch_cnt", "==", "2", ":", "# too small to few shared connection to reduce", "return", "children_to_destroy", "=", "set", "(", ")", "on_target_children_to_destroy", "=", "set", "(", ")", "for", "child", ",", "edge", "in", "children_edge_to_destroy", ":", "if", "child", ".", "direction", "==", "PortType", ".", "OUTPUT", ":", "target_ch", "=", "edge", ".", "dsts", "elif", "child", ".", "direction", "==", "PortType", ".", "INPUT", ":", "target_ch", "=", "edge", ".", "srcs", "else", ":", "raise", "ValueError", "(", "child", ".", "direction", ")", "if", "len", "(", "target_ch", ")", "!=", "1", ":", "raise", "NotImplementedError", "(", "\"multiple connected nodes\"", ",", "target_ch", ")", "target_ch", "=", "target_ch", "[", "0", "]", "try", ":", "assert", "target_ch", ".", "parent", "is", "new_target", ",", "(", "target_ch", ",", "target_ch", ".", "parent", ",", "new_target", ")", "except", "AssertionError", ":", "print", "(", "'Wrong target:\\n'", ",", "edge", ".", "src", ",", "\"\\n\"", ",", "edge", ".", "dst", ",", "\"\\n\"", ",", "target_ch", ".", "parent", ",", "\"\\n\"", ",", "new_target", ")", "raise", "if", "child", ".", "direction", "==", "PortType", ".", "OUTPUT", ":", "edge", ".", "removeTarget", "(", "target_ch", ")", "elif", "child", ".", "direction", "==", "PortType", ".", "INPUT", ":", "edge", ".", "removeTarget", "(", "child", ")", "if", "not", "edge", ".", "srcs", "or", "not", "edge", ".", "dsts", ":", "edge", ".", "remove", "(", ")", "if", "not", "target_ch", ".", "incomingEdges", "and", "not", "target_ch", ".", "outgoingEdges", ":", "# disconnect selected children from this port and target", "on_target_children_to_destroy", ".", "add", "(", "target_ch", ")", "if", "not", "child", ".", "incomingEdges", "and", "not", "child", ".", "outgoingEdges", ":", "children_to_destroy", ".", "add", "(", "child", ")", "# destroy children of new target and this port if possible", "port", ".", "children", "=", "[", "ch", "for", "ch", "in", "port", ".", "children", "if", "ch", "not", "in", "children_to_destroy", "]", "new_target", ".", "children", "=", "[", "ch", "for", "ch", "in", "new_target", ".", "children", "if", "ch", "not", "in", "on_target_children_to_destroy", "]", "# connect this port to new target as it was connected by children before", "# [TODO] names for new edges", "if", "port", ".", "direction", "==", "PortType", ".", "OUTPUT", ":", "root", ".", "addEdge", "(", "port", ",", "new_target", ")", "elif", "port", ".", "direction", "==", "PortType", ".", "INPUT", ":", "root", ".", "addEdge", "(", "new_target", ",", "port", ")", "else", ":", "raise", "NotImplementedError", "(", "port", ".", "direction", ")" ]
Check if majority of children is connected to same port if it is the case reduce children and connect this port instead children :note: use reduceUselessAssignments, extractSplits, flattenTrees before this function to maximize it's effect
[ "Check", "if", "majority", "of", "children", "is", "connected", "to", "same", "port", "if", "it", "is", "the", "case", "reduce", "children", "and", "connect", "this", "port", "instead", "children" ]
6b7d4fdd759f263a0fdd2736f02f123e44e4354f
https://github.com/Nic30/hwtGraph/blob/6b7d4fdd759f263a0fdd2736f02f123e44e4354f/hwtGraph/elk/fromHwt/resolveSharedConnections.py#L6-L84
train
Nic30/hwtGraph
hwtGraph/elk/fromHwt/resolveSharedConnections.py
resolveSharedConnections
def resolveSharedConnections(root: LNode): """ Walk all ports on all nodes and group subinterface connections to only parent interface connection if it is possible """ for ch in root.children: resolveSharedConnections(ch) for ch in root.children: for p in ch.iterPorts(): portTryReduce(root, p)
python
def resolveSharedConnections(root: LNode): """ Walk all ports on all nodes and group subinterface connections to only parent interface connection if it is possible """ for ch in root.children: resolveSharedConnections(ch) for ch in root.children: for p in ch.iterPorts(): portTryReduce(root, p)
[ "def", "resolveSharedConnections", "(", "root", ":", "LNode", ")", ":", "for", "ch", "in", "root", ".", "children", ":", "resolveSharedConnections", "(", "ch", ")", "for", "ch", "in", "root", ".", "children", ":", "for", "p", "in", "ch", ".", "iterPorts", "(", ")", ":", "portTryReduce", "(", "root", ",", "p", ")" ]
Walk all ports on all nodes and group subinterface connections to only parent interface connection if it is possible
[ "Walk", "all", "ports", "on", "all", "nodes", "and", "group", "subinterface", "connections", "to", "only", "parent", "interface", "connection", "if", "it", "is", "possible" ]
6b7d4fdd759f263a0fdd2736f02f123e44e4354f
https://github.com/Nic30/hwtGraph/blob/6b7d4fdd759f263a0fdd2736f02f123e44e4354f/hwtGraph/elk/fromHwt/resolveSharedConnections.py#L87-L97
train
Nic30/hwtGraph
hwtGraph/elk/fromHwt/resolveSharedConnections.py
countDirectlyConnected
def countDirectlyConnected(port: LPort, result: dict) -> int: """ Count how many ports are directly connected to other nodes :return: cumulative sum of port counts """ inEdges = port.incomingEdges outEdges = port.outgoingEdges if port.children: ch_cnt = 0 # try: # assert not inEdges, (port, port.children, inEdges) # assert not outEdges, (port, port.children, outEdges) # except AssertionError: # raise for ch in port.children: ch_cnt += countDirectlyConnected(ch, result) return ch_cnt elif not inEdges and not outEdges: # this port is not connected, just check if it expected state if port.direction == PortType.INPUT: if port.originObj is not None: assert not port.originObj.src.drivers, port.originObj else: print("Warning", port, "not connected") return 0 else: connectedElemCnt = 0 for e in inEdges: connectedElemCnt += len(e.srcs) if connectedElemCnt > 1: return 0 for e in outEdges: connectedElemCnt += len(e.dsts) if connectedElemCnt > 1: return 0 if connectedElemCnt != 1: return 0 if inEdges: e = inEdges[0] else: e = outEdges[0] # if is connected to different port if e.srcs[0].name != e.dsts[0].name: return 0 if e.srcs[0] is port: p = e.dsts[0].parent else: # (can be hyperedge and then this does not have to be) # assert e.dsts[0] is port, (e, port) p = e.srcs[0].parent # if is part of interface which can be reduced if not isinstance(p, LNode): connections = result.get(p, []) connections.append((port, e)) result[p] = connections return 1
python
def countDirectlyConnected(port: LPort, result: dict) -> int: """ Count how many ports are directly connected to other nodes :return: cumulative sum of port counts """ inEdges = port.incomingEdges outEdges = port.outgoingEdges if port.children: ch_cnt = 0 # try: # assert not inEdges, (port, port.children, inEdges) # assert not outEdges, (port, port.children, outEdges) # except AssertionError: # raise for ch in port.children: ch_cnt += countDirectlyConnected(ch, result) return ch_cnt elif not inEdges and not outEdges: # this port is not connected, just check if it expected state if port.direction == PortType.INPUT: if port.originObj is not None: assert not port.originObj.src.drivers, port.originObj else: print("Warning", port, "not connected") return 0 else: connectedElemCnt = 0 for e in inEdges: connectedElemCnt += len(e.srcs) if connectedElemCnt > 1: return 0 for e in outEdges: connectedElemCnt += len(e.dsts) if connectedElemCnt > 1: return 0 if connectedElemCnt != 1: return 0 if inEdges: e = inEdges[0] else: e = outEdges[0] # if is connected to different port if e.srcs[0].name != e.dsts[0].name: return 0 if e.srcs[0] is port: p = e.dsts[0].parent else: # (can be hyperedge and then this does not have to be) # assert e.dsts[0] is port, (e, port) p = e.srcs[0].parent # if is part of interface which can be reduced if not isinstance(p, LNode): connections = result.get(p, []) connections.append((port, e)) result[p] = connections return 1
[ "def", "countDirectlyConnected", "(", "port", ":", "LPort", ",", "result", ":", "dict", ")", "->", "int", ":", "inEdges", "=", "port", ".", "incomingEdges", "outEdges", "=", "port", ".", "outgoingEdges", "if", "port", ".", "children", ":", "ch_cnt", "=", "0", "# try:", "# assert not inEdges, (port, port.children, inEdges)", "# assert not outEdges, (port, port.children, outEdges)", "# except AssertionError:", "# raise", "for", "ch", "in", "port", ".", "children", ":", "ch_cnt", "+=", "countDirectlyConnected", "(", "ch", ",", "result", ")", "return", "ch_cnt", "elif", "not", "inEdges", "and", "not", "outEdges", ":", "# this port is not connected, just check if it expected state", "if", "port", ".", "direction", "==", "PortType", ".", "INPUT", ":", "if", "port", ".", "originObj", "is", "not", "None", ":", "assert", "not", "port", ".", "originObj", ".", "src", ".", "drivers", ",", "port", ".", "originObj", "else", ":", "print", "(", "\"Warning\"", ",", "port", ",", "\"not connected\"", ")", "return", "0", "else", ":", "connectedElemCnt", "=", "0", "for", "e", "in", "inEdges", ":", "connectedElemCnt", "+=", "len", "(", "e", ".", "srcs", ")", "if", "connectedElemCnt", ">", "1", ":", "return", "0", "for", "e", "in", "outEdges", ":", "connectedElemCnt", "+=", "len", "(", "e", ".", "dsts", ")", "if", "connectedElemCnt", ">", "1", ":", "return", "0", "if", "connectedElemCnt", "!=", "1", ":", "return", "0", "if", "inEdges", ":", "e", "=", "inEdges", "[", "0", "]", "else", ":", "e", "=", "outEdges", "[", "0", "]", "# if is connected to different port", "if", "e", ".", "srcs", "[", "0", "]", ".", "name", "!=", "e", ".", "dsts", "[", "0", "]", ".", "name", ":", "return", "0", "if", "e", ".", "srcs", "[", "0", "]", "is", "port", ":", "p", "=", "e", ".", "dsts", "[", "0", "]", ".", "parent", "else", ":", "# (can be hyperedge and then this does not have to be)", "# assert e.dsts[0] is port, (e, port)", "p", "=", "e", ".", "srcs", "[", "0", "]", ".", "parent", "# if is part of interface which can be reduced", "if", "not", "isinstance", "(", "p", ",", "LNode", ")", ":", "connections", "=", "result", ".", "get", "(", "p", ",", "[", "]", ")", "connections", ".", "append", "(", "(", "port", ",", "e", ")", ")", "result", "[", "p", "]", "=", "connections", "return", "1" ]
Count how many ports are directly connected to other nodes :return: cumulative sum of port counts
[ "Count", "how", "many", "ports", "are", "directly", "connected", "to", "other", "nodes" ]
6b7d4fdd759f263a0fdd2736f02f123e44e4354f
https://github.com/Nic30/hwtGraph/blob/6b7d4fdd759f263a0fdd2736f02f123e44e4354f/hwtGraph/elk/fromHwt/resolveSharedConnections.py#L110-L176
train
redhat-openstack/python-tripleo-helper
tripleohelper/ovb_baremetal.py
Baremetal.deploy
def deploy(self, image_name, ip, flavor='m1.small'): """Create the node. This method should only be called by the BaremetalFactory. """ body_value = { "port": { "admin_state_up": True, "name": self.name + '_provision', "network_id": os_utils.get_network_id(self.nova_api, 'provision_bob'), 'fixed_ips': [{'ip_address': ip}]}} response = self.neutron.create_port(body=body_value) self._provision_port_id = response['port']['id'] self.mac = response['port']['mac_address'] image_id_to_boot_from = os_utils.get_image_id(self.nova_api, image_name) flavor_id = os_utils.get_flavor_id(self.nova_api, flavor) # TODO(Gonéri): We don't need keypair for the BM nodes keypair_id = os_utils.get_keypair_id(self.nova_api, self._keypair) # Ensure with get DHCP lease on the provision network first nics = [{'port-id': self._provision_port_id}] self._os_instance = os_provisioner.build_openstack_instance( self.nova_api, self.name, image_id_to_boot_from, flavor_id, keypair_id, nics) if not self._os_instance: LOG.error("deployment has failed") raise Exception() os_provisioner.add_provision_security_group(self.nova_api) os_utils.add_security_groups(self._os_instance, ['provision']) os_utils.add_security_groups(self._os_instance, self._security_groups) LOG.info("add security groups '%s'" % self._security_groups) LOG.info("instance '%s' ready to use" % self.name) # the instance should be off for Ironic self._os_instance.stop()
python
def deploy(self, image_name, ip, flavor='m1.small'): """Create the node. This method should only be called by the BaremetalFactory. """ body_value = { "port": { "admin_state_up": True, "name": self.name + '_provision', "network_id": os_utils.get_network_id(self.nova_api, 'provision_bob'), 'fixed_ips': [{'ip_address': ip}]}} response = self.neutron.create_port(body=body_value) self._provision_port_id = response['port']['id'] self.mac = response['port']['mac_address'] image_id_to_boot_from = os_utils.get_image_id(self.nova_api, image_name) flavor_id = os_utils.get_flavor_id(self.nova_api, flavor) # TODO(Gonéri): We don't need keypair for the BM nodes keypair_id = os_utils.get_keypair_id(self.nova_api, self._keypair) # Ensure with get DHCP lease on the provision network first nics = [{'port-id': self._provision_port_id}] self._os_instance = os_provisioner.build_openstack_instance( self.nova_api, self.name, image_id_to_boot_from, flavor_id, keypair_id, nics) if not self._os_instance: LOG.error("deployment has failed") raise Exception() os_provisioner.add_provision_security_group(self.nova_api) os_utils.add_security_groups(self._os_instance, ['provision']) os_utils.add_security_groups(self._os_instance, self._security_groups) LOG.info("add security groups '%s'" % self._security_groups) LOG.info("instance '%s' ready to use" % self.name) # the instance should be off for Ironic self._os_instance.stop()
[ "def", "deploy", "(", "self", ",", "image_name", ",", "ip", ",", "flavor", "=", "'m1.small'", ")", ":", "body_value", "=", "{", "\"port\"", ":", "{", "\"admin_state_up\"", ":", "True", ",", "\"name\"", ":", "self", ".", "name", "+", "'_provision'", ",", "\"network_id\"", ":", "os_utils", ".", "get_network_id", "(", "self", ".", "nova_api", ",", "'provision_bob'", ")", ",", "'fixed_ips'", ":", "[", "{", "'ip_address'", ":", "ip", "}", "]", "}", "}", "response", "=", "self", ".", "neutron", ".", "create_port", "(", "body", "=", "body_value", ")", "self", ".", "_provision_port_id", "=", "response", "[", "'port'", "]", "[", "'id'", "]", "self", ".", "mac", "=", "response", "[", "'port'", "]", "[", "'mac_address'", "]", "image_id_to_boot_from", "=", "os_utils", ".", "get_image_id", "(", "self", ".", "nova_api", ",", "image_name", ")", "flavor_id", "=", "os_utils", ".", "get_flavor_id", "(", "self", ".", "nova_api", ",", "flavor", ")", "# TODO(Gonéri): We don't need keypair for the BM nodes", "keypair_id", "=", "os_utils", ".", "get_keypair_id", "(", "self", ".", "nova_api", ",", "self", ".", "_keypair", ")", "# Ensure with get DHCP lease on the provision network first", "nics", "=", "[", "{", "'port-id'", ":", "self", ".", "_provision_port_id", "}", "]", "self", ".", "_os_instance", "=", "os_provisioner", ".", "build_openstack_instance", "(", "self", ".", "nova_api", ",", "self", ".", "name", ",", "image_id_to_boot_from", ",", "flavor_id", ",", "keypair_id", ",", "nics", ")", "if", "not", "self", ".", "_os_instance", ":", "LOG", ".", "error", "(", "\"deployment has failed\"", ")", "raise", "Exception", "(", ")", "os_provisioner", ".", "add_provision_security_group", "(", "self", ".", "nova_api", ")", "os_utils", ".", "add_security_groups", "(", "self", ".", "_os_instance", ",", "[", "'provision'", "]", ")", "os_utils", ".", "add_security_groups", "(", "self", ".", "_os_instance", ",", "self", ".", "_security_groups", ")", "LOG", ".", "info", "(", "\"add security groups '%s'\"", "%", "self", ".", "_security_groups", ")", "LOG", ".", "info", "(", "\"instance '%s' ready to use\"", "%", "self", ".", "name", ")", "# the instance should be off for Ironic", "self", ".", "_os_instance", ".", "stop", "(", ")" ]
Create the node. This method should only be called by the BaremetalFactory.
[ "Create", "the", "node", "." ]
bfa165538335edb1088170c7a92f097167225c81
https://github.com/redhat-openstack/python-tripleo-helper/blob/bfa165538335edb1088170c7a92f097167225c81/tripleohelper/ovb_baremetal.py#L47-L88
train
redhat-openstack/python-tripleo-helper
tripleohelper/ovb_baremetal.py
Baremetal.pxe_netboot
def pxe_netboot(self, filename): """Specify which file ipxe should load during the netboot.""" new_port = { 'extra_dhcp_opts': [ {'opt_name': 'bootfile-name', 'opt_value': 'http://192.0.2.240:8088/' + filename, 'ip_version': 4, }, {'opt_name': 'tftp-server', 'opt_value': '192.0.2.240', 'ip_version': '4'}, {'opt_name': 'server-ip-address', 'opt_value': '192.0.2.240', 'ip_version': '4'} ] } self.neutron.update_port(self._provision_port_id, {'port': new_port})
python
def pxe_netboot(self, filename): """Specify which file ipxe should load during the netboot.""" new_port = { 'extra_dhcp_opts': [ {'opt_name': 'bootfile-name', 'opt_value': 'http://192.0.2.240:8088/' + filename, 'ip_version': 4, }, {'opt_name': 'tftp-server', 'opt_value': '192.0.2.240', 'ip_version': '4'}, {'opt_name': 'server-ip-address', 'opt_value': '192.0.2.240', 'ip_version': '4'} ] } self.neutron.update_port(self._provision_port_id, {'port': new_port})
[ "def", "pxe_netboot", "(", "self", ",", "filename", ")", ":", "new_port", "=", "{", "'extra_dhcp_opts'", ":", "[", "{", "'opt_name'", ":", "'bootfile-name'", ",", "'opt_value'", ":", "'http://192.0.2.240:8088/'", "+", "filename", ",", "'ip_version'", ":", "4", ",", "}", ",", "{", "'opt_name'", ":", "'tftp-server'", ",", "'opt_value'", ":", "'192.0.2.240'", ",", "'ip_version'", ":", "'4'", "}", ",", "{", "'opt_name'", ":", "'server-ip-address'", ",", "'opt_value'", ":", "'192.0.2.240'", ",", "'ip_version'", ":", "'4'", "}", "]", "}", "self", ".", "neutron", ".", "update_port", "(", "self", ".", "_provision_port_id", ",", "{", "'port'", ":", "new_port", "}", ")" ]
Specify which file ipxe should load during the netboot.
[ "Specify", "which", "file", "ipxe", "should", "load", "during", "the", "netboot", "." ]
bfa165538335edb1088170c7a92f097167225c81
https://github.com/redhat-openstack/python-tripleo-helper/blob/bfa165538335edb1088170c7a92f097167225c81/tripleohelper/ovb_baremetal.py#L94-L103
train
redhat-openstack/python-tripleo-helper
tripleohelper/ovb_baremetal.py
BaremetalFactory.initialize
def initialize(self, size=2): """Populate the node poll. :param size: the number of node to create. """ # The IP should be in this range, this is the default DHCP range used by the introspection. # inspection_iprange = 192.0.2.100,192.0.2.120 for i in range(0, size): self.nodes.append( Baremetal( self.nova_api, self.neutron, self._keypair, self._key_filename, self._security_groups, name='baremetal_%d' % i)) with concurrent.futures.ThreadPoolExecutor(max_workers=5) as executor: for bm_node in self.nodes: future = executor.submit( bm_node.deploy, 'ipxe.usb', '192.0.2.%d' % self._idx, flavor='m1.large') self._idx += 1 bm_node._future = future for bm_node in self.nodes: bm_node._future.result() pm_addr = self.bmc.register_host(bm_node.name) self.instackenv.append({ "pm_type": "pxe_ipmitool", "mac": [bm_node.mac], # TODO(Gonéri): We should get these informations from the baremetal node's flavor "cpu": "4", "memory": "8196", "disk": "80", "arch": "x86_64", "pm_user": "admin", "pm_password": "password", "pm_addr": pm_addr }) self.bmc.ssh_pool.stop_all()
python
def initialize(self, size=2): """Populate the node poll. :param size: the number of node to create. """ # The IP should be in this range, this is the default DHCP range used by the introspection. # inspection_iprange = 192.0.2.100,192.0.2.120 for i in range(0, size): self.nodes.append( Baremetal( self.nova_api, self.neutron, self._keypair, self._key_filename, self._security_groups, name='baremetal_%d' % i)) with concurrent.futures.ThreadPoolExecutor(max_workers=5) as executor: for bm_node in self.nodes: future = executor.submit( bm_node.deploy, 'ipxe.usb', '192.0.2.%d' % self._idx, flavor='m1.large') self._idx += 1 bm_node._future = future for bm_node in self.nodes: bm_node._future.result() pm_addr = self.bmc.register_host(bm_node.name) self.instackenv.append({ "pm_type": "pxe_ipmitool", "mac": [bm_node.mac], # TODO(Gonéri): We should get these informations from the baremetal node's flavor "cpu": "4", "memory": "8196", "disk": "80", "arch": "x86_64", "pm_user": "admin", "pm_password": "password", "pm_addr": pm_addr }) self.bmc.ssh_pool.stop_all()
[ "def", "initialize", "(", "self", ",", "size", "=", "2", ")", ":", "# The IP should be in this range, this is the default DHCP range used by the introspection.", "# inspection_iprange = 192.0.2.100,192.0.2.120", "for", "i", "in", "range", "(", "0", ",", "size", ")", ":", "self", ".", "nodes", ".", "append", "(", "Baremetal", "(", "self", ".", "nova_api", ",", "self", ".", "neutron", ",", "self", ".", "_keypair", ",", "self", ".", "_key_filename", ",", "self", ".", "_security_groups", ",", "name", "=", "'baremetal_%d'", "%", "i", ")", ")", "with", "concurrent", ".", "futures", ".", "ThreadPoolExecutor", "(", "max_workers", "=", "5", ")", "as", "executor", ":", "for", "bm_node", "in", "self", ".", "nodes", ":", "future", "=", "executor", ".", "submit", "(", "bm_node", ".", "deploy", ",", "'ipxe.usb'", ",", "'192.0.2.%d'", "%", "self", ".", "_idx", ",", "flavor", "=", "'m1.large'", ")", "self", ".", "_idx", "+=", "1", "bm_node", ".", "_future", "=", "future", "for", "bm_node", "in", "self", ".", "nodes", ":", "bm_node", ".", "_future", ".", "result", "(", ")", "pm_addr", "=", "self", ".", "bmc", ".", "register_host", "(", "bm_node", ".", "name", ")", "self", ".", "instackenv", ".", "append", "(", "{", "\"pm_type\"", ":", "\"pxe_ipmitool\"", ",", "\"mac\"", ":", "[", "bm_node", ".", "mac", "]", ",", "# TODO(Gonéri): We should get these informations from the baremetal node's flavor", "\"cpu\"", ":", "\"4\"", ",", "\"memory\"", ":", "\"8196\"", ",", "\"disk\"", ":", "\"80\"", ",", "\"arch\"", ":", "\"x86_64\"", ",", "\"pm_user\"", ":", "\"admin\"", ",", "\"pm_password\"", ":", "\"password\"", ",", "\"pm_addr\"", ":", "pm_addr", "}", ")", "self", ".", "bmc", ".", "ssh_pool", ".", "stop_all", "(", ")" ]
Populate the node poll. :param size: the number of node to create.
[ "Populate", "the", "node", "poll", "." ]
bfa165538335edb1088170c7a92f097167225c81
https://github.com/redhat-openstack/python-tripleo-helper/blob/bfa165538335edb1088170c7a92f097167225c81/tripleohelper/ovb_baremetal.py#L141-L181
train
redhat-openstack/python-tripleo-helper
tripleohelper/ovb_baremetal.py
BaremetalFactory.create_bmc
def create_bmc(self, os_username, os_password, os_project_id, os_auth_url): """Deploy the BMC machine. This machine hosts the ipmi servers, each ipmi server is associated to a baremetal node and has its own IP. """ bmc = ovb_bmc.OvbBmc( nova_api=self.nova_api, neutron=self.neutron, keypair=self._keypair, key_filename=self._key_filename, security_groups=self._security_groups, image_name='Fedora 23 x86_64', ip='192.0.2.254', os_username=os_username, os_password=os_password, os_project_id=os_project_id, os_auth_url=os_auth_url) return bmc
python
def create_bmc(self, os_username, os_password, os_project_id, os_auth_url): """Deploy the BMC machine. This machine hosts the ipmi servers, each ipmi server is associated to a baremetal node and has its own IP. """ bmc = ovb_bmc.OvbBmc( nova_api=self.nova_api, neutron=self.neutron, keypair=self._keypair, key_filename=self._key_filename, security_groups=self._security_groups, image_name='Fedora 23 x86_64', ip='192.0.2.254', os_username=os_username, os_password=os_password, os_project_id=os_project_id, os_auth_url=os_auth_url) return bmc
[ "def", "create_bmc", "(", "self", ",", "os_username", ",", "os_password", ",", "os_project_id", ",", "os_auth_url", ")", ":", "bmc", "=", "ovb_bmc", ".", "OvbBmc", "(", "nova_api", "=", "self", ".", "nova_api", ",", "neutron", "=", "self", ".", "neutron", ",", "keypair", "=", "self", ".", "_keypair", ",", "key_filename", "=", "self", ".", "_key_filename", ",", "security_groups", "=", "self", ".", "_security_groups", ",", "image_name", "=", "'Fedora 23 x86_64'", ",", "ip", "=", "'192.0.2.254'", ",", "os_username", "=", "os_username", ",", "os_password", "=", "os_password", ",", "os_project_id", "=", "os_project_id", ",", "os_auth_url", "=", "os_auth_url", ")", "return", "bmc" ]
Deploy the BMC machine. This machine hosts the ipmi servers, each ipmi server is associated to a baremetal node and has its own IP.
[ "Deploy", "the", "BMC", "machine", "." ]
bfa165538335edb1088170c7a92f097167225c81
https://github.com/redhat-openstack/python-tripleo-helper/blob/bfa165538335edb1088170c7a92f097167225c81/tripleohelper/ovb_baremetal.py#L213-L231
train
unt-libraries/pyuntl
pyuntl/untldoc.py
untlxml2py
def untlxml2py(untl_filename): """Parse a UNTL XML file object into a pyuntl element tree. You can also pass this a string as file input like so: import StringIO untlxml2py(StringIO.StringIO(untl_string)) """ # Create a stack to hold parents. parent_stack = [] # Use iterparse to open the file and loop through elements. for event, element in iterparse(untl_filename, events=('start', 'end')): if NAMESPACE_REGEX.search(element.tag, 0): element_tag = NAMESPACE_REGEX.search(element.tag, 0).group(1) else: element_tag = element.tag # Process the element if it exists in UNTL. if element_tag in PYUNTL_DISPATCH: # If it is the element's opening tag, # add it to the parent stack. if event == 'start': parent_stack.append(PYUNTL_DISPATCH[element_tag]()) # If it is the element's closing tag, # remove element from stack. Add qualifier and content. elif event == 'end': child = parent_stack.pop() if element.text is not None: content = element.text.strip() if content != '': child.set_content(element.text) if element.get('qualifier', False): child.set_qualifier(element.get('qualifier')) # Add the element to its parent. if len(parent_stack) > 0: parent_stack[-1].add_child(child) # If it doesn't have a parent, it is the root element, # so return it. else: return child else: raise PyuntlException( 'Element "%s" not in UNTL dispatch.' % (element_tag) )
python
def untlxml2py(untl_filename): """Parse a UNTL XML file object into a pyuntl element tree. You can also pass this a string as file input like so: import StringIO untlxml2py(StringIO.StringIO(untl_string)) """ # Create a stack to hold parents. parent_stack = [] # Use iterparse to open the file and loop through elements. for event, element in iterparse(untl_filename, events=('start', 'end')): if NAMESPACE_REGEX.search(element.tag, 0): element_tag = NAMESPACE_REGEX.search(element.tag, 0).group(1) else: element_tag = element.tag # Process the element if it exists in UNTL. if element_tag in PYUNTL_DISPATCH: # If it is the element's opening tag, # add it to the parent stack. if event == 'start': parent_stack.append(PYUNTL_DISPATCH[element_tag]()) # If it is the element's closing tag, # remove element from stack. Add qualifier and content. elif event == 'end': child = parent_stack.pop() if element.text is not None: content = element.text.strip() if content != '': child.set_content(element.text) if element.get('qualifier', False): child.set_qualifier(element.get('qualifier')) # Add the element to its parent. if len(parent_stack) > 0: parent_stack[-1].add_child(child) # If it doesn't have a parent, it is the root element, # so return it. else: return child else: raise PyuntlException( 'Element "%s" not in UNTL dispatch.' % (element_tag) )
[ "def", "untlxml2py", "(", "untl_filename", ")", ":", "# Create a stack to hold parents.", "parent_stack", "=", "[", "]", "# Use iterparse to open the file and loop through elements.", "for", "event", ",", "element", "in", "iterparse", "(", "untl_filename", ",", "events", "=", "(", "'start'", ",", "'end'", ")", ")", ":", "if", "NAMESPACE_REGEX", ".", "search", "(", "element", ".", "tag", ",", "0", ")", ":", "element_tag", "=", "NAMESPACE_REGEX", ".", "search", "(", "element", ".", "tag", ",", "0", ")", ".", "group", "(", "1", ")", "else", ":", "element_tag", "=", "element", ".", "tag", "# Process the element if it exists in UNTL.", "if", "element_tag", "in", "PYUNTL_DISPATCH", ":", "# If it is the element's opening tag,", "# add it to the parent stack.", "if", "event", "==", "'start'", ":", "parent_stack", ".", "append", "(", "PYUNTL_DISPATCH", "[", "element_tag", "]", "(", ")", ")", "# If it is the element's closing tag,", "# remove element from stack. Add qualifier and content.", "elif", "event", "==", "'end'", ":", "child", "=", "parent_stack", ".", "pop", "(", ")", "if", "element", ".", "text", "is", "not", "None", ":", "content", "=", "element", ".", "text", ".", "strip", "(", ")", "if", "content", "!=", "''", ":", "child", ".", "set_content", "(", "element", ".", "text", ")", "if", "element", ".", "get", "(", "'qualifier'", ",", "False", ")", ":", "child", ".", "set_qualifier", "(", "element", ".", "get", "(", "'qualifier'", ")", ")", "# Add the element to its parent.", "if", "len", "(", "parent_stack", ")", ">", "0", ":", "parent_stack", "[", "-", "1", "]", ".", "add_child", "(", "child", ")", "# If it doesn't have a parent, it is the root element,", "# so return it.", "else", ":", "return", "child", "else", ":", "raise", "PyuntlException", "(", "'Element \"%s\" not in UNTL dispatch.'", "%", "(", "element_tag", ")", ")" ]
Parse a UNTL XML file object into a pyuntl element tree. You can also pass this a string as file input like so: import StringIO untlxml2py(StringIO.StringIO(untl_string))
[ "Parse", "a", "UNTL", "XML", "file", "object", "into", "a", "pyuntl", "element", "tree", "." ]
f92413302897dab948aac18ee9e482ace0187bd4
https://github.com/unt-libraries/pyuntl/blob/f92413302897dab948aac18ee9e482ace0187bd4/pyuntl/untldoc.py#L45-L87
train
unt-libraries/pyuntl
pyuntl/untldoc.py
untldict2py
def untldict2py(untl_dict): """Convert a UNTL dictionary into a Python object.""" # Create the root element. untl_root = PYUNTL_DISPATCH['metadata']() untl_py_list = [] for element_name, element_list in untl_dict.items(): # Loop through the element dictionaries in the element list. for element_dict in element_list: qualifier = element_dict.get('qualifier', None) content = element_dict.get('content', None) child_list = [] # Handle content that is children elements. if isinstance(content, dict): for key, value in content.items(): child_list.append( PYUNTL_DISPATCH[key](content=value), ) # Create the UNTL element that will have children elements # added to it. if qualifier is not None: untl_element = PYUNTL_DISPATCH[element_name]( qualifier=qualifier ) else: untl_element = PYUNTL_DISPATCH[element_name]() # Add the element's children to the element. for child in child_list: untl_element.add_child(child) # If not child element, create the element and # add qualifier and content as available. elif content is not None and qualifier is not None: untl_element = PYUNTL_DISPATCH[element_name]( qualifier=qualifier, content=content, ) elif qualifier is not None: untl_element = PYUNTL_DISPATCH[element_name]( qualifier=qualifier, ) elif content is not None: untl_element = PYUNTL_DISPATCH[element_name]( content=content, ) # Create element that only has children. elif len(child_list) > 0: untl_element = PYUNTL_DISPATCH[element_name]() # Add the UNTL element to the Python element list. untl_py_list.append(untl_element) # Add the UNTL elements to the root element. for untl_element in untl_py_list: untl_root.add_child(untl_element) return untl_root
python
def untldict2py(untl_dict): """Convert a UNTL dictionary into a Python object.""" # Create the root element. untl_root = PYUNTL_DISPATCH['metadata']() untl_py_list = [] for element_name, element_list in untl_dict.items(): # Loop through the element dictionaries in the element list. for element_dict in element_list: qualifier = element_dict.get('qualifier', None) content = element_dict.get('content', None) child_list = [] # Handle content that is children elements. if isinstance(content, dict): for key, value in content.items(): child_list.append( PYUNTL_DISPATCH[key](content=value), ) # Create the UNTL element that will have children elements # added to it. if qualifier is not None: untl_element = PYUNTL_DISPATCH[element_name]( qualifier=qualifier ) else: untl_element = PYUNTL_DISPATCH[element_name]() # Add the element's children to the element. for child in child_list: untl_element.add_child(child) # If not child element, create the element and # add qualifier and content as available. elif content is not None and qualifier is not None: untl_element = PYUNTL_DISPATCH[element_name]( qualifier=qualifier, content=content, ) elif qualifier is not None: untl_element = PYUNTL_DISPATCH[element_name]( qualifier=qualifier, ) elif content is not None: untl_element = PYUNTL_DISPATCH[element_name]( content=content, ) # Create element that only has children. elif len(child_list) > 0: untl_element = PYUNTL_DISPATCH[element_name]() # Add the UNTL element to the Python element list. untl_py_list.append(untl_element) # Add the UNTL elements to the root element. for untl_element in untl_py_list: untl_root.add_child(untl_element) return untl_root
[ "def", "untldict2py", "(", "untl_dict", ")", ":", "# Create the root element.", "untl_root", "=", "PYUNTL_DISPATCH", "[", "'metadata'", "]", "(", ")", "untl_py_list", "=", "[", "]", "for", "element_name", ",", "element_list", "in", "untl_dict", ".", "items", "(", ")", ":", "# Loop through the element dictionaries in the element list.", "for", "element_dict", "in", "element_list", ":", "qualifier", "=", "element_dict", ".", "get", "(", "'qualifier'", ",", "None", ")", "content", "=", "element_dict", ".", "get", "(", "'content'", ",", "None", ")", "child_list", "=", "[", "]", "# Handle content that is children elements.", "if", "isinstance", "(", "content", ",", "dict", ")", ":", "for", "key", ",", "value", "in", "content", ".", "items", "(", ")", ":", "child_list", ".", "append", "(", "PYUNTL_DISPATCH", "[", "key", "]", "(", "content", "=", "value", ")", ",", ")", "# Create the UNTL element that will have children elements", "# added to it.", "if", "qualifier", "is", "not", "None", ":", "untl_element", "=", "PYUNTL_DISPATCH", "[", "element_name", "]", "(", "qualifier", "=", "qualifier", ")", "else", ":", "untl_element", "=", "PYUNTL_DISPATCH", "[", "element_name", "]", "(", ")", "# Add the element's children to the element.", "for", "child", "in", "child_list", ":", "untl_element", ".", "add_child", "(", "child", ")", "# If not child element, create the element and", "# add qualifier and content as available.", "elif", "content", "is", "not", "None", "and", "qualifier", "is", "not", "None", ":", "untl_element", "=", "PYUNTL_DISPATCH", "[", "element_name", "]", "(", "qualifier", "=", "qualifier", ",", "content", "=", "content", ",", ")", "elif", "qualifier", "is", "not", "None", ":", "untl_element", "=", "PYUNTL_DISPATCH", "[", "element_name", "]", "(", "qualifier", "=", "qualifier", ",", ")", "elif", "content", "is", "not", "None", ":", "untl_element", "=", "PYUNTL_DISPATCH", "[", "element_name", "]", "(", "content", "=", "content", ",", ")", "# Create element that only has children.", "elif", "len", "(", "child_list", ")", ">", "0", ":", "untl_element", "=", "PYUNTL_DISPATCH", "[", "element_name", "]", "(", ")", "# Add the UNTL element to the Python element list.", "untl_py_list", ".", "append", "(", "untl_element", ")", "# Add the UNTL elements to the root element.", "for", "untl_element", "in", "untl_py_list", ":", "untl_root", ".", "add_child", "(", "untl_element", ")", "return", "untl_root" ]
Convert a UNTL dictionary into a Python object.
[ "Convert", "a", "UNTL", "dictionary", "into", "a", "Python", "object", "." ]
f92413302897dab948aac18ee9e482ace0187bd4
https://github.com/unt-libraries/pyuntl/blob/f92413302897dab948aac18ee9e482ace0187bd4/pyuntl/untldoc.py#L121-L172
train
unt-libraries/pyuntl
pyuntl/untldoc.py
post2pydict
def post2pydict(post, ignore_list): """Convert the UNTL posted data to a Python dictionary.""" root_element = PYUNTL_DISPATCH['metadata']() untl_form_dict = {} # Turn the posted data into usable data # (otherwise the value lists get messed up). form_post = dict(post.copy()) # Loop through all the field lists. for key, value_list in form_post.items(): if key not in ignore_list: # Split the key into the element_tag (ex. title) # and element attribute (ex. qualifier, content). (element_tag, element_attribute) = key.split('-', 1) if element_tag not in untl_form_dict: untl_form_dict[element_tag] = () # Add the value list to the dictionary. untl_form_dict[element_tag] += (element_attribute, value_list), for element_tag, attribute_tuple in untl_form_dict.items(): # Get the count of attributes/content in the element tuple. attribute_count = len(attribute_tuple) # Get the count of the first attribute's values. value_count = len(attribute_tuple[0][1]) # Check to see that all attribute/content values align numerically. for i in range(0, attribute_count): if not len(attribute_tuple[i][1]) == value_count: raise PyuntlException('Field values did not match up ' 'numerically for %s' % (element_tag)) # Create a value loop to get all values from the tuple. for i in range(0, value_count): untl_element = None content = '' qualifier = '' child_list = [] # Loop through the attributes. # attribute_tuple[j][0] represents the attribute/content name. # attribute_tuple[j][1][i] represents the # current attribute/content value. for j in range(0, attribute_count): if attribute_tuple[j][0] == 'content': content = unicode(attribute_tuple[j][1][i]) elif attribute_tuple[j][0] == 'qualifier': qualifier = attribute_tuple[j][1][i] # Create a child UNTL element from the data. else: # If the child has content, append it to the child list. if attribute_tuple[j][1][i] != '': child_tag = attribute_tuple[j][0] # Check if the child is the attribute of the element. if child_tag in PARENT_FORM: qualifier = attribute_tuple[j][1][i] # Else, the child is a normal child of the element. else: child_list.append( PYUNTL_DISPATCH[attribute_tuple[j][0]]( content=attribute_tuple[j][1][i] ) ) # Create the UNTL element. if content != '' and qualifier != '': untl_element = PYUNTL_DISPATCH[element_tag](content=content, qualifier=qualifier) elif content != '': untl_element = PYUNTL_DISPATCH[element_tag](content=content) elif qualifier != '': untl_element = PYUNTL_DISPATCH[element_tag](qualifier=qualifier) # This element only has children elements. elif len(child_list) > 0: untl_element = PYUNTL_DISPATCH[element_tag]() # If the element has children, add them. if len(child_list) > 0 and untl_element is not None: for child in child_list: untl_element.add_child(child) # Add the UNTL element to the root element. if untl_element is not None: root_element.add_child(untl_element) return root_element.create_element_dict()
python
def post2pydict(post, ignore_list): """Convert the UNTL posted data to a Python dictionary.""" root_element = PYUNTL_DISPATCH['metadata']() untl_form_dict = {} # Turn the posted data into usable data # (otherwise the value lists get messed up). form_post = dict(post.copy()) # Loop through all the field lists. for key, value_list in form_post.items(): if key not in ignore_list: # Split the key into the element_tag (ex. title) # and element attribute (ex. qualifier, content). (element_tag, element_attribute) = key.split('-', 1) if element_tag not in untl_form_dict: untl_form_dict[element_tag] = () # Add the value list to the dictionary. untl_form_dict[element_tag] += (element_attribute, value_list), for element_tag, attribute_tuple in untl_form_dict.items(): # Get the count of attributes/content in the element tuple. attribute_count = len(attribute_tuple) # Get the count of the first attribute's values. value_count = len(attribute_tuple[0][1]) # Check to see that all attribute/content values align numerically. for i in range(0, attribute_count): if not len(attribute_tuple[i][1]) == value_count: raise PyuntlException('Field values did not match up ' 'numerically for %s' % (element_tag)) # Create a value loop to get all values from the tuple. for i in range(0, value_count): untl_element = None content = '' qualifier = '' child_list = [] # Loop through the attributes. # attribute_tuple[j][0] represents the attribute/content name. # attribute_tuple[j][1][i] represents the # current attribute/content value. for j in range(0, attribute_count): if attribute_tuple[j][0] == 'content': content = unicode(attribute_tuple[j][1][i]) elif attribute_tuple[j][0] == 'qualifier': qualifier = attribute_tuple[j][1][i] # Create a child UNTL element from the data. else: # If the child has content, append it to the child list. if attribute_tuple[j][1][i] != '': child_tag = attribute_tuple[j][0] # Check if the child is the attribute of the element. if child_tag in PARENT_FORM: qualifier = attribute_tuple[j][1][i] # Else, the child is a normal child of the element. else: child_list.append( PYUNTL_DISPATCH[attribute_tuple[j][0]]( content=attribute_tuple[j][1][i] ) ) # Create the UNTL element. if content != '' and qualifier != '': untl_element = PYUNTL_DISPATCH[element_tag](content=content, qualifier=qualifier) elif content != '': untl_element = PYUNTL_DISPATCH[element_tag](content=content) elif qualifier != '': untl_element = PYUNTL_DISPATCH[element_tag](qualifier=qualifier) # This element only has children elements. elif len(child_list) > 0: untl_element = PYUNTL_DISPATCH[element_tag]() # If the element has children, add them. if len(child_list) > 0 and untl_element is not None: for child in child_list: untl_element.add_child(child) # Add the UNTL element to the root element. if untl_element is not None: root_element.add_child(untl_element) return root_element.create_element_dict()
[ "def", "post2pydict", "(", "post", ",", "ignore_list", ")", ":", "root_element", "=", "PYUNTL_DISPATCH", "[", "'metadata'", "]", "(", ")", "untl_form_dict", "=", "{", "}", "# Turn the posted data into usable data", "# (otherwise the value lists get messed up).", "form_post", "=", "dict", "(", "post", ".", "copy", "(", ")", ")", "# Loop through all the field lists.", "for", "key", ",", "value_list", "in", "form_post", ".", "items", "(", ")", ":", "if", "key", "not", "in", "ignore_list", ":", "# Split the key into the element_tag (ex. title)", "# and element attribute (ex. qualifier, content).", "(", "element_tag", ",", "element_attribute", ")", "=", "key", ".", "split", "(", "'-'", ",", "1", ")", "if", "element_tag", "not", "in", "untl_form_dict", ":", "untl_form_dict", "[", "element_tag", "]", "=", "(", ")", "# Add the value list to the dictionary.", "untl_form_dict", "[", "element_tag", "]", "+=", "(", "element_attribute", ",", "value_list", ")", ",", "for", "element_tag", ",", "attribute_tuple", "in", "untl_form_dict", ".", "items", "(", ")", ":", "# Get the count of attributes/content in the element tuple.", "attribute_count", "=", "len", "(", "attribute_tuple", ")", "# Get the count of the first attribute's values.", "value_count", "=", "len", "(", "attribute_tuple", "[", "0", "]", "[", "1", "]", ")", "# Check to see that all attribute/content values align numerically.", "for", "i", "in", "range", "(", "0", ",", "attribute_count", ")", ":", "if", "not", "len", "(", "attribute_tuple", "[", "i", "]", "[", "1", "]", ")", "==", "value_count", ":", "raise", "PyuntlException", "(", "'Field values did not match up '", "'numerically for %s'", "%", "(", "element_tag", ")", ")", "# Create a value loop to get all values from the tuple.", "for", "i", "in", "range", "(", "0", ",", "value_count", ")", ":", "untl_element", "=", "None", "content", "=", "''", "qualifier", "=", "''", "child_list", "=", "[", "]", "# Loop through the attributes.", "# attribute_tuple[j][0] represents the attribute/content name.", "# attribute_tuple[j][1][i] represents the", "# current attribute/content value.", "for", "j", "in", "range", "(", "0", ",", "attribute_count", ")", ":", "if", "attribute_tuple", "[", "j", "]", "[", "0", "]", "==", "'content'", ":", "content", "=", "unicode", "(", "attribute_tuple", "[", "j", "]", "[", "1", "]", "[", "i", "]", ")", "elif", "attribute_tuple", "[", "j", "]", "[", "0", "]", "==", "'qualifier'", ":", "qualifier", "=", "attribute_tuple", "[", "j", "]", "[", "1", "]", "[", "i", "]", "# Create a child UNTL element from the data.", "else", ":", "# If the child has content, append it to the child list.", "if", "attribute_tuple", "[", "j", "]", "[", "1", "]", "[", "i", "]", "!=", "''", ":", "child_tag", "=", "attribute_tuple", "[", "j", "]", "[", "0", "]", "# Check if the child is the attribute of the element.", "if", "child_tag", "in", "PARENT_FORM", ":", "qualifier", "=", "attribute_tuple", "[", "j", "]", "[", "1", "]", "[", "i", "]", "# Else, the child is a normal child of the element.", "else", ":", "child_list", ".", "append", "(", "PYUNTL_DISPATCH", "[", "attribute_tuple", "[", "j", "]", "[", "0", "]", "]", "(", "content", "=", "attribute_tuple", "[", "j", "]", "[", "1", "]", "[", "i", "]", ")", ")", "# Create the UNTL element.", "if", "content", "!=", "''", "and", "qualifier", "!=", "''", ":", "untl_element", "=", "PYUNTL_DISPATCH", "[", "element_tag", "]", "(", "content", "=", "content", ",", "qualifier", "=", "qualifier", ")", "elif", "content", "!=", "''", ":", "untl_element", "=", "PYUNTL_DISPATCH", "[", "element_tag", "]", "(", "content", "=", "content", ")", "elif", "qualifier", "!=", "''", ":", "untl_element", "=", "PYUNTL_DISPATCH", "[", "element_tag", "]", "(", "qualifier", "=", "qualifier", ")", "# This element only has children elements.", "elif", "len", "(", "child_list", ")", ">", "0", ":", "untl_element", "=", "PYUNTL_DISPATCH", "[", "element_tag", "]", "(", ")", "# If the element has children, add them.", "if", "len", "(", "child_list", ")", ">", "0", "and", "untl_element", "is", "not", "None", ":", "for", "child", "in", "child_list", ":", "untl_element", ".", "add_child", "(", "child", ")", "# Add the UNTL element to the root element.", "if", "untl_element", "is", "not", "None", ":", "root_element", ".", "add_child", "(", "untl_element", ")", "return", "root_element", ".", "create_element_dict", "(", ")" ]
Convert the UNTL posted data to a Python dictionary.
[ "Convert", "the", "UNTL", "posted", "data", "to", "a", "Python", "dictionary", "." ]
f92413302897dab948aac18ee9e482ace0187bd4
https://github.com/unt-libraries/pyuntl/blob/f92413302897dab948aac18ee9e482ace0187bd4/pyuntl/untldoc.py#L175-L251
train
unt-libraries/pyuntl
pyuntl/untldoc.py
untlpy2dcpy
def untlpy2dcpy(untl_elements, **kwargs): """Convert the UNTL elements structure into a DC structure. kwargs can be passed to the function for certain effects: ark: Takes an ark string and creates an identifier element out of it. domain_name: Takes a domain string and creates an ark URL from it (ark and domain_name must be passed together to work properly). resolve_values: Converts abbreviated content into resolved vocabulary labels. resolve_urls: Converts abbreviated content into resolved vocabulary URLs. verbose_vocabularies: Uses the verbose vocabularies passed to the function instead of this function being required to retrieve them. # Create a DC Python object from a UNTL XML file. from pyuntl.untldoc import untlxml2py untl_elements = untlxml2py(untl_filename) # Or pass a file-like object. # OR Create a DC Python object from a UNTL dictionary. from pyuntl.untldoc import untldict2py untl_elements = untldict2py(untl_dict) # Convert to UNTL Python object to DC Python object. dc_elements = untlpy2dcpy(untl_elements) dc_dict = dcpy2dict(dc_elements) # Output DC in a specified string format. from pyuntl.untldoc import generate_dc_xml, generate_dc_json, generate_dc_txt # Create a DC XML string. generate_dc_xml(dc_dict) # Create a DC JSON string. generate_dc_json(dc_dict) # Create a DC text string. generate_dc_txt(dc_dict) """ sDate = None eDate = None ark = kwargs.get('ark', None) domain_name = kwargs.get('domain_name', None) scheme = kwargs.get('scheme', 'http') resolve_values = kwargs.get('resolve_values', None) resolve_urls = kwargs.get('resolve_urls', None) verbose_vocabularies = kwargs.get('verbose_vocabularies', None) # If either resolvers were requested, get the vocabulary data. if resolve_values or resolve_urls: if verbose_vocabularies: # If the vocabularies were passed to the function, use them. vocab_data = verbose_vocabularies else: # Otherwise, retrieve them using the pyuntl method. vocab_data = retrieve_vocab() else: vocab_data = None # Create the DC parent element. dc_root = DC_CONVERSION_DISPATCH['dc']() for element in untl_elements.children: # Check if the UNTL element should be converted to DC. if element.tag in DC_CONVERSION_DISPATCH: # Check if the element has its content stored in children nodes. if element.children: dc_element = DC_CONVERSION_DISPATCH[element.tag]( qualifier=element.qualifier, children=element.children, resolve_values=resolve_values, resolve_urls=resolve_urls, vocab_data=vocab_data, ) # It is a normal element. else: dc_element = DC_CONVERSION_DISPATCH[element.tag]( qualifier=element.qualifier, content=element.content, resolve_values=resolve_values, resolve_urls=resolve_urls, vocab_data=vocab_data, ) if element.tag == 'coverage': # Handle start and end dates. if element.qualifier == 'sDate': sDate = dc_element elif element.qualifier == 'eDate': eDate = dc_element # Otherwise, add the coverage element to the structure. else: dc_root.add_child(dc_element) # Add non coverage DC element to the structure. elif dc_element: dc_root.add_child(dc_element) # If the domain and ark were specified # try to turn them into indentifier elements. if ark and domain_name: # Create and add the permalink identifier. permalink_identifier = DC_CONVERSION_DISPATCH['identifier']( qualifier='permalink', domain_name=domain_name, ark=ark, scheme=scheme ) dc_root.add_child(permalink_identifier) # Create and add the ark identifier. ark_identifier = DC_CONVERSION_DISPATCH['identifier']( qualifier='ark', content=ark, ) dc_root.add_child(ark_identifier) if sDate and eDate: # If a start and end date exist, combine them into one element. dc_element = DC_CONVERSION_DISPATCH['coverage']( content='%s-%s' % (sDate.content, eDate.content), ) dc_root.add_child(dc_element) elif sDate: dc_root.add_child(sDate) elif eDate: dc_root.add_child(eDate) return dc_root
python
def untlpy2dcpy(untl_elements, **kwargs): """Convert the UNTL elements structure into a DC structure. kwargs can be passed to the function for certain effects: ark: Takes an ark string and creates an identifier element out of it. domain_name: Takes a domain string and creates an ark URL from it (ark and domain_name must be passed together to work properly). resolve_values: Converts abbreviated content into resolved vocabulary labels. resolve_urls: Converts abbreviated content into resolved vocabulary URLs. verbose_vocabularies: Uses the verbose vocabularies passed to the function instead of this function being required to retrieve them. # Create a DC Python object from a UNTL XML file. from pyuntl.untldoc import untlxml2py untl_elements = untlxml2py(untl_filename) # Or pass a file-like object. # OR Create a DC Python object from a UNTL dictionary. from pyuntl.untldoc import untldict2py untl_elements = untldict2py(untl_dict) # Convert to UNTL Python object to DC Python object. dc_elements = untlpy2dcpy(untl_elements) dc_dict = dcpy2dict(dc_elements) # Output DC in a specified string format. from pyuntl.untldoc import generate_dc_xml, generate_dc_json, generate_dc_txt # Create a DC XML string. generate_dc_xml(dc_dict) # Create a DC JSON string. generate_dc_json(dc_dict) # Create a DC text string. generate_dc_txt(dc_dict) """ sDate = None eDate = None ark = kwargs.get('ark', None) domain_name = kwargs.get('domain_name', None) scheme = kwargs.get('scheme', 'http') resolve_values = kwargs.get('resolve_values', None) resolve_urls = kwargs.get('resolve_urls', None) verbose_vocabularies = kwargs.get('verbose_vocabularies', None) # If either resolvers were requested, get the vocabulary data. if resolve_values or resolve_urls: if verbose_vocabularies: # If the vocabularies were passed to the function, use them. vocab_data = verbose_vocabularies else: # Otherwise, retrieve them using the pyuntl method. vocab_data = retrieve_vocab() else: vocab_data = None # Create the DC parent element. dc_root = DC_CONVERSION_DISPATCH['dc']() for element in untl_elements.children: # Check if the UNTL element should be converted to DC. if element.tag in DC_CONVERSION_DISPATCH: # Check if the element has its content stored in children nodes. if element.children: dc_element = DC_CONVERSION_DISPATCH[element.tag]( qualifier=element.qualifier, children=element.children, resolve_values=resolve_values, resolve_urls=resolve_urls, vocab_data=vocab_data, ) # It is a normal element. else: dc_element = DC_CONVERSION_DISPATCH[element.tag]( qualifier=element.qualifier, content=element.content, resolve_values=resolve_values, resolve_urls=resolve_urls, vocab_data=vocab_data, ) if element.tag == 'coverage': # Handle start and end dates. if element.qualifier == 'sDate': sDate = dc_element elif element.qualifier == 'eDate': eDate = dc_element # Otherwise, add the coverage element to the structure. else: dc_root.add_child(dc_element) # Add non coverage DC element to the structure. elif dc_element: dc_root.add_child(dc_element) # If the domain and ark were specified # try to turn them into indentifier elements. if ark and domain_name: # Create and add the permalink identifier. permalink_identifier = DC_CONVERSION_DISPATCH['identifier']( qualifier='permalink', domain_name=domain_name, ark=ark, scheme=scheme ) dc_root.add_child(permalink_identifier) # Create and add the ark identifier. ark_identifier = DC_CONVERSION_DISPATCH['identifier']( qualifier='ark', content=ark, ) dc_root.add_child(ark_identifier) if sDate and eDate: # If a start and end date exist, combine them into one element. dc_element = DC_CONVERSION_DISPATCH['coverage']( content='%s-%s' % (sDate.content, eDate.content), ) dc_root.add_child(dc_element) elif sDate: dc_root.add_child(sDate) elif eDate: dc_root.add_child(eDate) return dc_root
[ "def", "untlpy2dcpy", "(", "untl_elements", ",", "*", "*", "kwargs", ")", ":", "sDate", "=", "None", "eDate", "=", "None", "ark", "=", "kwargs", ".", "get", "(", "'ark'", ",", "None", ")", "domain_name", "=", "kwargs", ".", "get", "(", "'domain_name'", ",", "None", ")", "scheme", "=", "kwargs", ".", "get", "(", "'scheme'", ",", "'http'", ")", "resolve_values", "=", "kwargs", ".", "get", "(", "'resolve_values'", ",", "None", ")", "resolve_urls", "=", "kwargs", ".", "get", "(", "'resolve_urls'", ",", "None", ")", "verbose_vocabularies", "=", "kwargs", ".", "get", "(", "'verbose_vocabularies'", ",", "None", ")", "# If either resolvers were requested, get the vocabulary data.", "if", "resolve_values", "or", "resolve_urls", ":", "if", "verbose_vocabularies", ":", "# If the vocabularies were passed to the function, use them.", "vocab_data", "=", "verbose_vocabularies", "else", ":", "# Otherwise, retrieve them using the pyuntl method.", "vocab_data", "=", "retrieve_vocab", "(", ")", "else", ":", "vocab_data", "=", "None", "# Create the DC parent element.", "dc_root", "=", "DC_CONVERSION_DISPATCH", "[", "'dc'", "]", "(", ")", "for", "element", "in", "untl_elements", ".", "children", ":", "# Check if the UNTL element should be converted to DC.", "if", "element", ".", "tag", "in", "DC_CONVERSION_DISPATCH", ":", "# Check if the element has its content stored in children nodes.", "if", "element", ".", "children", ":", "dc_element", "=", "DC_CONVERSION_DISPATCH", "[", "element", ".", "tag", "]", "(", "qualifier", "=", "element", ".", "qualifier", ",", "children", "=", "element", ".", "children", ",", "resolve_values", "=", "resolve_values", ",", "resolve_urls", "=", "resolve_urls", ",", "vocab_data", "=", "vocab_data", ",", ")", "# It is a normal element.", "else", ":", "dc_element", "=", "DC_CONVERSION_DISPATCH", "[", "element", ".", "tag", "]", "(", "qualifier", "=", "element", ".", "qualifier", ",", "content", "=", "element", ".", "content", ",", "resolve_values", "=", "resolve_values", ",", "resolve_urls", "=", "resolve_urls", ",", "vocab_data", "=", "vocab_data", ",", ")", "if", "element", ".", "tag", "==", "'coverage'", ":", "# Handle start and end dates.", "if", "element", ".", "qualifier", "==", "'sDate'", ":", "sDate", "=", "dc_element", "elif", "element", ".", "qualifier", "==", "'eDate'", ":", "eDate", "=", "dc_element", "# Otherwise, add the coverage element to the structure.", "else", ":", "dc_root", ".", "add_child", "(", "dc_element", ")", "# Add non coverage DC element to the structure.", "elif", "dc_element", ":", "dc_root", ".", "add_child", "(", "dc_element", ")", "# If the domain and ark were specified", "# try to turn them into indentifier elements.", "if", "ark", "and", "domain_name", ":", "# Create and add the permalink identifier.", "permalink_identifier", "=", "DC_CONVERSION_DISPATCH", "[", "'identifier'", "]", "(", "qualifier", "=", "'permalink'", ",", "domain_name", "=", "domain_name", ",", "ark", "=", "ark", ",", "scheme", "=", "scheme", ")", "dc_root", ".", "add_child", "(", "permalink_identifier", ")", "# Create and add the ark identifier.", "ark_identifier", "=", "DC_CONVERSION_DISPATCH", "[", "'identifier'", "]", "(", "qualifier", "=", "'ark'", ",", "content", "=", "ark", ",", ")", "dc_root", ".", "add_child", "(", "ark_identifier", ")", "if", "sDate", "and", "eDate", ":", "# If a start and end date exist, combine them into one element.", "dc_element", "=", "DC_CONVERSION_DISPATCH", "[", "'coverage'", "]", "(", "content", "=", "'%s-%s'", "%", "(", "sDate", ".", "content", ",", "eDate", ".", "content", ")", ",", ")", "dc_root", ".", "add_child", "(", "dc_element", ")", "elif", "sDate", ":", "dc_root", ".", "add_child", "(", "sDate", ")", "elif", "eDate", ":", "dc_root", ".", "add_child", "(", "eDate", ")", "return", "dc_root" ]
Convert the UNTL elements structure into a DC structure. kwargs can be passed to the function for certain effects: ark: Takes an ark string and creates an identifier element out of it. domain_name: Takes a domain string and creates an ark URL from it (ark and domain_name must be passed together to work properly). resolve_values: Converts abbreviated content into resolved vocabulary labels. resolve_urls: Converts abbreviated content into resolved vocabulary URLs. verbose_vocabularies: Uses the verbose vocabularies passed to the function instead of this function being required to retrieve them. # Create a DC Python object from a UNTL XML file. from pyuntl.untldoc import untlxml2py untl_elements = untlxml2py(untl_filename) # Or pass a file-like object. # OR Create a DC Python object from a UNTL dictionary. from pyuntl.untldoc import untldict2py untl_elements = untldict2py(untl_dict) # Convert to UNTL Python object to DC Python object. dc_elements = untlpy2dcpy(untl_elements) dc_dict = dcpy2dict(dc_elements) # Output DC in a specified string format. from pyuntl.untldoc import generate_dc_xml, generate_dc_json, generate_dc_txt # Create a DC XML string. generate_dc_xml(dc_dict) # Create a DC JSON string. generate_dc_json(dc_dict) # Create a DC text string. generate_dc_txt(dc_dict)
[ "Convert", "the", "UNTL", "elements", "structure", "into", "a", "DC", "structure", "." ]
f92413302897dab948aac18ee9e482ace0187bd4
https://github.com/unt-libraries/pyuntl/blob/f92413302897dab948aac18ee9e482ace0187bd4/pyuntl/untldoc.py#L254-L372
train
unt-libraries/pyuntl
pyuntl/untldoc.py
untlpy2highwirepy
def untlpy2highwirepy(untl_elements, **kwargs): """Convert a UNTL Python object to a highwire Python object.""" highwire_list = [] title = None publisher = None creation = None escape = kwargs.get('escape', False) for element in untl_elements.children: # If the UNTL element should be converted to highwire, # create highwire element. if element.tag in HIGHWIRE_CONVERSION_DISPATCH: highwire_element = HIGHWIRE_CONVERSION_DISPATCH[element.tag]( qualifier=element.qualifier, content=element.content, children=element.children, escape=escape, ) if highwire_element: if element.tag == 'title': if element.qualifier != 'officialtitle' and not title: title = highwire_element elif element.qualifier == 'officialtitle': title = highwire_element elif element.tag == 'publisher': if not publisher: # This is the first publisher element. publisher = highwire_element highwire_list.append(publisher) elif element.tag == 'date': # If a creation date hasn't been found yet, # verify this date is acceptable. if not creation and element.qualifier == 'creation': if highwire_element.content: creation = highwire_element if creation: highwire_list.append(creation) # Otherwise, add the element to the list if it has content. elif highwire_element.content: highwire_list.append(highwire_element) # If the title was found, add it to the list. if title: highwire_list.append(title) return highwire_list
python
def untlpy2highwirepy(untl_elements, **kwargs): """Convert a UNTL Python object to a highwire Python object.""" highwire_list = [] title = None publisher = None creation = None escape = kwargs.get('escape', False) for element in untl_elements.children: # If the UNTL element should be converted to highwire, # create highwire element. if element.tag in HIGHWIRE_CONVERSION_DISPATCH: highwire_element = HIGHWIRE_CONVERSION_DISPATCH[element.tag]( qualifier=element.qualifier, content=element.content, children=element.children, escape=escape, ) if highwire_element: if element.tag == 'title': if element.qualifier != 'officialtitle' and not title: title = highwire_element elif element.qualifier == 'officialtitle': title = highwire_element elif element.tag == 'publisher': if not publisher: # This is the first publisher element. publisher = highwire_element highwire_list.append(publisher) elif element.tag == 'date': # If a creation date hasn't been found yet, # verify this date is acceptable. if not creation and element.qualifier == 'creation': if highwire_element.content: creation = highwire_element if creation: highwire_list.append(creation) # Otherwise, add the element to the list if it has content. elif highwire_element.content: highwire_list.append(highwire_element) # If the title was found, add it to the list. if title: highwire_list.append(title) return highwire_list
[ "def", "untlpy2highwirepy", "(", "untl_elements", ",", "*", "*", "kwargs", ")", ":", "highwire_list", "=", "[", "]", "title", "=", "None", "publisher", "=", "None", "creation", "=", "None", "escape", "=", "kwargs", ".", "get", "(", "'escape'", ",", "False", ")", "for", "element", "in", "untl_elements", ".", "children", ":", "# If the UNTL element should be converted to highwire,", "# create highwire element.", "if", "element", ".", "tag", "in", "HIGHWIRE_CONVERSION_DISPATCH", ":", "highwire_element", "=", "HIGHWIRE_CONVERSION_DISPATCH", "[", "element", ".", "tag", "]", "(", "qualifier", "=", "element", ".", "qualifier", ",", "content", "=", "element", ".", "content", ",", "children", "=", "element", ".", "children", ",", "escape", "=", "escape", ",", ")", "if", "highwire_element", ":", "if", "element", ".", "tag", "==", "'title'", ":", "if", "element", ".", "qualifier", "!=", "'officialtitle'", "and", "not", "title", ":", "title", "=", "highwire_element", "elif", "element", ".", "qualifier", "==", "'officialtitle'", ":", "title", "=", "highwire_element", "elif", "element", ".", "tag", "==", "'publisher'", ":", "if", "not", "publisher", ":", "# This is the first publisher element.", "publisher", "=", "highwire_element", "highwire_list", ".", "append", "(", "publisher", ")", "elif", "element", ".", "tag", "==", "'date'", ":", "# If a creation date hasn't been found yet,", "# verify this date is acceptable.", "if", "not", "creation", "and", "element", ".", "qualifier", "==", "'creation'", ":", "if", "highwire_element", ".", "content", ":", "creation", "=", "highwire_element", "if", "creation", ":", "highwire_list", ".", "append", "(", "creation", ")", "# Otherwise, add the element to the list if it has content.", "elif", "highwire_element", ".", "content", ":", "highwire_list", ".", "append", "(", "highwire_element", ")", "# If the title was found, add it to the list.", "if", "title", ":", "highwire_list", ".", "append", "(", "title", ")", "return", "highwire_list" ]
Convert a UNTL Python object to a highwire Python object.
[ "Convert", "a", "UNTL", "Python", "object", "to", "a", "highwire", "Python", "object", "." ]
f92413302897dab948aac18ee9e482ace0187bd4
https://github.com/unt-libraries/pyuntl/blob/f92413302897dab948aac18ee9e482ace0187bd4/pyuntl/untldoc.py#L375-L417
train
unt-libraries/pyuntl
pyuntl/untldoc.py
untlpydict2dcformatteddict
def untlpydict2dcformatteddict(untl_dict, **kwargs): """Convert a UNTL data dictionary to a formatted DC data dictionary.""" ark = kwargs.get('ark', None) domain_name = kwargs.get('domain_name', None) scheme = kwargs.get('scheme', 'http') resolve_values = kwargs.get('resolve_values', None) resolve_urls = kwargs.get('resolve_urls', None) verbose_vocabularies = kwargs.get('verbose_vocabularies', None) # Get the UNTL object. untl_py = untldict2py(untl_dict) # Convert it to a DC object. dc_py = untlpy2dcpy( untl_py, ark=ark, domain_name=domain_name, resolve_values=resolve_values, resolve_urls=resolve_urls, verbose_vocabularies=verbose_vocabularies, scheme=scheme ) # Return a formatted DC dictionary. return dcpy2formatteddcdict(dc_py)
python
def untlpydict2dcformatteddict(untl_dict, **kwargs): """Convert a UNTL data dictionary to a formatted DC data dictionary.""" ark = kwargs.get('ark', None) domain_name = kwargs.get('domain_name', None) scheme = kwargs.get('scheme', 'http') resolve_values = kwargs.get('resolve_values', None) resolve_urls = kwargs.get('resolve_urls', None) verbose_vocabularies = kwargs.get('verbose_vocabularies', None) # Get the UNTL object. untl_py = untldict2py(untl_dict) # Convert it to a DC object. dc_py = untlpy2dcpy( untl_py, ark=ark, domain_name=domain_name, resolve_values=resolve_values, resolve_urls=resolve_urls, verbose_vocabularies=verbose_vocabularies, scheme=scheme ) # Return a formatted DC dictionary. return dcpy2formatteddcdict(dc_py)
[ "def", "untlpydict2dcformatteddict", "(", "untl_dict", ",", "*", "*", "kwargs", ")", ":", "ark", "=", "kwargs", ".", "get", "(", "'ark'", ",", "None", ")", "domain_name", "=", "kwargs", ".", "get", "(", "'domain_name'", ",", "None", ")", "scheme", "=", "kwargs", ".", "get", "(", "'scheme'", ",", "'http'", ")", "resolve_values", "=", "kwargs", ".", "get", "(", "'resolve_values'", ",", "None", ")", "resolve_urls", "=", "kwargs", ".", "get", "(", "'resolve_urls'", ",", "None", ")", "verbose_vocabularies", "=", "kwargs", ".", "get", "(", "'verbose_vocabularies'", ",", "None", ")", "# Get the UNTL object.", "untl_py", "=", "untldict2py", "(", "untl_dict", ")", "# Convert it to a DC object.", "dc_py", "=", "untlpy2dcpy", "(", "untl_py", ",", "ark", "=", "ark", ",", "domain_name", "=", "domain_name", ",", "resolve_values", "=", "resolve_values", ",", "resolve_urls", "=", "resolve_urls", ",", "verbose_vocabularies", "=", "verbose_vocabularies", ",", "scheme", "=", "scheme", ")", "# Return a formatted DC dictionary.", "return", "dcpy2formatteddcdict", "(", "dc_py", ")" ]
Convert a UNTL data dictionary to a formatted DC data dictionary.
[ "Convert", "a", "UNTL", "data", "dictionary", "to", "a", "formatted", "DC", "data", "dictionary", "." ]
f92413302897dab948aac18ee9e482ace0187bd4
https://github.com/unt-libraries/pyuntl/blob/f92413302897dab948aac18ee9e482ace0187bd4/pyuntl/untldoc.py#L420-L441
train
unt-libraries/pyuntl
pyuntl/untldoc.py
formatted_dc_dict
def formatted_dc_dict(dc_dict): """Change the formatting of the DC data dictionary. Change the passed in DC data dictionary into a dictionary with a list of values for each element. i.e. {'publisher': ['someone', 'someone else'], 'title': ['a title'],} """ for key, element_list in dc_dict.items(): new_element_list = [] # Add the content for each element to the new element list. for element in element_list: new_element_list.append(element['content']) dc_dict[key] = new_element_list return dc_dict
python
def formatted_dc_dict(dc_dict): """Change the formatting of the DC data dictionary. Change the passed in DC data dictionary into a dictionary with a list of values for each element. i.e. {'publisher': ['someone', 'someone else'], 'title': ['a title'],} """ for key, element_list in dc_dict.items(): new_element_list = [] # Add the content for each element to the new element list. for element in element_list: new_element_list.append(element['content']) dc_dict[key] = new_element_list return dc_dict
[ "def", "formatted_dc_dict", "(", "dc_dict", ")", ":", "for", "key", ",", "element_list", "in", "dc_dict", ".", "items", "(", ")", ":", "new_element_list", "=", "[", "]", "# Add the content for each element to the new element list.", "for", "element", "in", "element_list", ":", "new_element_list", ".", "append", "(", "element", "[", "'content'", "]", ")", "dc_dict", "[", "key", "]", "=", "new_element_list", "return", "dc_dict" ]
Change the formatting of the DC data dictionary. Change the passed in DC data dictionary into a dictionary with a list of values for each element. i.e. {'publisher': ['someone', 'someone else'], 'title': ['a title'],}
[ "Change", "the", "formatting", "of", "the", "DC", "data", "dictionary", "." ]
f92413302897dab948aac18ee9e482ace0187bd4
https://github.com/unt-libraries/pyuntl/blob/f92413302897dab948aac18ee9e482ace0187bd4/pyuntl/untldoc.py#L457-L470
train
unt-libraries/pyuntl
pyuntl/untldoc.py
generate_dc_xml
def generate_dc_xml(dc_dict): """Generate a DC XML string.""" # Define the root namespace. root_namespace = '{%s}' % DC_NAMESPACES['oai_dc'] # Set the elements namespace URL. elements_namespace = '{%s}' % DC_NAMESPACES['dc'] schema_location = ('http://www.openarchives.org/OAI/2.0/oai_dc/ ' 'http://www.openarchives.org/OAI/2.0/oai_dc.xsd') root_attributes = { '{%s}schemaLocation' % XSI: schema_location, } # Return the DC XML string. return pydict2xmlstring( dc_dict, ordering=DC_ORDER, root_label='dc', root_namespace=root_namespace, elements_namespace=elements_namespace, namespace_map=DC_NAMESPACES, root_attributes=root_attributes, )
python
def generate_dc_xml(dc_dict): """Generate a DC XML string.""" # Define the root namespace. root_namespace = '{%s}' % DC_NAMESPACES['oai_dc'] # Set the elements namespace URL. elements_namespace = '{%s}' % DC_NAMESPACES['dc'] schema_location = ('http://www.openarchives.org/OAI/2.0/oai_dc/ ' 'http://www.openarchives.org/OAI/2.0/oai_dc.xsd') root_attributes = { '{%s}schemaLocation' % XSI: schema_location, } # Return the DC XML string. return pydict2xmlstring( dc_dict, ordering=DC_ORDER, root_label='dc', root_namespace=root_namespace, elements_namespace=elements_namespace, namespace_map=DC_NAMESPACES, root_attributes=root_attributes, )
[ "def", "generate_dc_xml", "(", "dc_dict", ")", ":", "# Define the root namespace.", "root_namespace", "=", "'{%s}'", "%", "DC_NAMESPACES", "[", "'oai_dc'", "]", "# Set the elements namespace URL.", "elements_namespace", "=", "'{%s}'", "%", "DC_NAMESPACES", "[", "'dc'", "]", "schema_location", "=", "(", "'http://www.openarchives.org/OAI/2.0/oai_dc/ '", "'http://www.openarchives.org/OAI/2.0/oai_dc.xsd'", ")", "root_attributes", "=", "{", "'{%s}schemaLocation'", "%", "XSI", ":", "schema_location", ",", "}", "# Return the DC XML string.", "return", "pydict2xmlstring", "(", "dc_dict", ",", "ordering", "=", "DC_ORDER", ",", "root_label", "=", "'dc'", ",", "root_namespace", "=", "root_namespace", ",", "elements_namespace", "=", "elements_namespace", ",", "namespace_map", "=", "DC_NAMESPACES", ",", "root_attributes", "=", "root_attributes", ",", ")" ]
Generate a DC XML string.
[ "Generate", "a", "DC", "XML", "string", "." ]
f92413302897dab948aac18ee9e482ace0187bd4
https://github.com/unt-libraries/pyuntl/blob/f92413302897dab948aac18ee9e482ace0187bd4/pyuntl/untldoc.py#L473-L493
train
unt-libraries/pyuntl
pyuntl/untldoc.py
generate_dc_json
def generate_dc_json(dc_dict): """Generate DC JSON data. Returns data as a JSON formatted string. """ formatted_dict = formatted_dc_dict(dc_dict) return json.dumps(formatted_dict, sort_keys=True, indent=4)
python
def generate_dc_json(dc_dict): """Generate DC JSON data. Returns data as a JSON formatted string. """ formatted_dict = formatted_dc_dict(dc_dict) return json.dumps(formatted_dict, sort_keys=True, indent=4)
[ "def", "generate_dc_json", "(", "dc_dict", ")", ":", "formatted_dict", "=", "formatted_dc_dict", "(", "dc_dict", ")", "return", "json", ".", "dumps", "(", "formatted_dict", ",", "sort_keys", "=", "True", ",", "indent", "=", "4", ")" ]
Generate DC JSON data. Returns data as a JSON formatted string.
[ "Generate", "DC", "JSON", "data", "." ]
f92413302897dab948aac18ee9e482ace0187bd4
https://github.com/unt-libraries/pyuntl/blob/f92413302897dab948aac18ee9e482ace0187bd4/pyuntl/untldoc.py#L496-L502
train
unt-libraries/pyuntl
pyuntl/untldoc.py
highwirepy2dict
def highwirepy2dict(highwire_elements): """Convert a list of highwire elements into a dictionary. The dictionary returned contains the elements in the UNTL dict format. """ highwire_dict = {} # Make a list of content dictionaries for each element name. for element in highwire_elements: if element.name not in highwire_dict: highwire_dict[element.name] = [] highwire_dict[element.name].append({'content': element.content}) return highwire_dict
python
def highwirepy2dict(highwire_elements): """Convert a list of highwire elements into a dictionary. The dictionary returned contains the elements in the UNTL dict format. """ highwire_dict = {} # Make a list of content dictionaries for each element name. for element in highwire_elements: if element.name not in highwire_dict: highwire_dict[element.name] = [] highwire_dict[element.name].append({'content': element.content}) return highwire_dict
[ "def", "highwirepy2dict", "(", "highwire_elements", ")", ":", "highwire_dict", "=", "{", "}", "# Make a list of content dictionaries for each element name.", "for", "element", "in", "highwire_elements", ":", "if", "element", ".", "name", "not", "in", "highwire_dict", ":", "highwire_dict", "[", "element", ".", "name", "]", "=", "[", "]", "highwire_dict", "[", "element", ".", "name", "]", ".", "append", "(", "{", "'content'", ":", "element", ".", "content", "}", ")", "return", "highwire_dict" ]
Convert a list of highwire elements into a dictionary. The dictionary returned contains the elements in the UNTL dict format.
[ "Convert", "a", "list", "of", "highwire", "elements", "into", "a", "dictionary", "." ]
f92413302897dab948aac18ee9e482ace0187bd4
https://github.com/unt-libraries/pyuntl/blob/f92413302897dab948aac18ee9e482ace0187bd4/pyuntl/untldoc.py#L510-L522
train
unt-libraries/pyuntl
pyuntl/untldoc.py
generate_highwire_json
def generate_highwire_json(highwire_elements): """Convert highwire elements into a JSON structure. Returns data as a JSON formatted string. """ highwire_dict = highwirepy2dict(highwire_elements) return json.dumps(highwire_dict, sort_keys=True, indent=4)
python
def generate_highwire_json(highwire_elements): """Convert highwire elements into a JSON structure. Returns data as a JSON formatted string. """ highwire_dict = highwirepy2dict(highwire_elements) return json.dumps(highwire_dict, sort_keys=True, indent=4)
[ "def", "generate_highwire_json", "(", "highwire_elements", ")", ":", "highwire_dict", "=", "highwirepy2dict", "(", "highwire_elements", ")", "return", "json", ".", "dumps", "(", "highwire_dict", ",", "sort_keys", "=", "True", ",", "indent", "=", "4", ")" ]
Convert highwire elements into a JSON structure. Returns data as a JSON formatted string.
[ "Convert", "highwire", "elements", "into", "a", "JSON", "structure", "." ]
f92413302897dab948aac18ee9e482ace0187bd4
https://github.com/unt-libraries/pyuntl/blob/f92413302897dab948aac18ee9e482ace0187bd4/pyuntl/untldoc.py#L530-L536
train
unt-libraries/pyuntl
pyuntl/untldoc.py
dcdict2rdfpy
def dcdict2rdfpy(dc_dict): """Convert a DC dictionary into an RDF Python object.""" ark_prefix = 'ark: ark:' uri = URIRef('') # Create the RDF Python object. rdf_py = ConjunctiveGraph() # Set DC namespace definition. DC = Namespace('http://purl.org/dc/elements/1.1/') # Get the ark for the subject URI from the ark identifier. for element_value in dc_dict['identifier']: if element_value['content'].startswith(ark_prefix): uri = URIRef( element_value['content'].replace( ark_prefix, 'info:ark' ) ) # Bind the prefix/namespace pair. rdf_py.bind('dc', DC) # Get the values for each element in the ordered DC elements. for element_name in DC_ORDER: element_value_list = dc_dict.get(element_name, []) # Add the values to the RDF object. for element_value in element_value_list: # Handle URL values differently. if ('http' in element_value['content'] and ' ' not in element_value['content']): rdf_py.add(( uri, DC[element_name], URIRef(element_value['content']) )) else: rdf_py.add(( uri, DC[element_name], Literal(element_value['content']) )) return rdf_py
python
def dcdict2rdfpy(dc_dict): """Convert a DC dictionary into an RDF Python object.""" ark_prefix = 'ark: ark:' uri = URIRef('') # Create the RDF Python object. rdf_py = ConjunctiveGraph() # Set DC namespace definition. DC = Namespace('http://purl.org/dc/elements/1.1/') # Get the ark for the subject URI from the ark identifier. for element_value in dc_dict['identifier']: if element_value['content'].startswith(ark_prefix): uri = URIRef( element_value['content'].replace( ark_prefix, 'info:ark' ) ) # Bind the prefix/namespace pair. rdf_py.bind('dc', DC) # Get the values for each element in the ordered DC elements. for element_name in DC_ORDER: element_value_list = dc_dict.get(element_name, []) # Add the values to the RDF object. for element_value in element_value_list: # Handle URL values differently. if ('http' in element_value['content'] and ' ' not in element_value['content']): rdf_py.add(( uri, DC[element_name], URIRef(element_value['content']) )) else: rdf_py.add(( uri, DC[element_name], Literal(element_value['content']) )) return rdf_py
[ "def", "dcdict2rdfpy", "(", "dc_dict", ")", ":", "ark_prefix", "=", "'ark: ark:'", "uri", "=", "URIRef", "(", "''", ")", "# Create the RDF Python object.", "rdf_py", "=", "ConjunctiveGraph", "(", ")", "# Set DC namespace definition.", "DC", "=", "Namespace", "(", "'http://purl.org/dc/elements/1.1/'", ")", "# Get the ark for the subject URI from the ark identifier.", "for", "element_value", "in", "dc_dict", "[", "'identifier'", "]", ":", "if", "element_value", "[", "'content'", "]", ".", "startswith", "(", "ark_prefix", ")", ":", "uri", "=", "URIRef", "(", "element_value", "[", "'content'", "]", ".", "replace", "(", "ark_prefix", ",", "'info:ark'", ")", ")", "# Bind the prefix/namespace pair.", "rdf_py", ".", "bind", "(", "'dc'", ",", "DC", ")", "# Get the values for each element in the ordered DC elements.", "for", "element_name", "in", "DC_ORDER", ":", "element_value_list", "=", "dc_dict", ".", "get", "(", "element_name", ",", "[", "]", ")", "# Add the values to the RDF object.", "for", "element_value", "in", "element_value_list", ":", "# Handle URL values differently.", "if", "(", "'http'", "in", "element_value", "[", "'content'", "]", "and", "' '", "not", "in", "element_value", "[", "'content'", "]", ")", ":", "rdf_py", ".", "add", "(", "(", "uri", ",", "DC", "[", "element_name", "]", ",", "URIRef", "(", "element_value", "[", "'content'", "]", ")", ")", ")", "else", ":", "rdf_py", ".", "add", "(", "(", "uri", ",", "DC", "[", "element_name", "]", ",", "Literal", "(", "element_value", "[", "'content'", "]", ")", ")", ")", "return", "rdf_py" ]
Convert a DC dictionary into an RDF Python object.
[ "Convert", "a", "DC", "dictionary", "into", "an", "RDF", "Python", "object", "." ]
f92413302897dab948aac18ee9e482ace0187bd4
https://github.com/unt-libraries/pyuntl/blob/f92413302897dab948aac18ee9e482ace0187bd4/pyuntl/untldoc.py#L545-L581
train
unt-libraries/pyuntl
pyuntl/untldoc.py
add_empty_fields
def add_empty_fields(untl_dict): """Add empty values if UNTL fields don't have values.""" # Iterate the ordered UNTL XML element list to determine # which elements are missing from the untl_dict. for element in UNTL_XML_ORDER: if element not in untl_dict: # Try to create an element with content and qualifier. try: py_object = PYUNTL_DISPATCH[element]( content='', qualifier='', ) except: # Try to create an element with content. try: py_object = PYUNTL_DISPATCH[element](content='') except: # Try to create an element without content. try: py_object = PYUNTL_DISPATCH[element]() except: raise PyuntlException( 'Could not add empty element field.' ) else: untl_dict[element] = [{'content': {}}] else: # Handle element without children. if not py_object.contained_children: untl_dict[element] = [{'content': ''}] else: untl_dict[element] = [{'content': {}}] else: # Handle element without children. if not py_object.contained_children: untl_dict[element] = [{'content': '', 'qualifier': ''}] else: untl_dict[element] = [{'content': {}, 'qualifier': ''}] # Add empty contained children. for child in py_object.contained_children: untl_dict[element][0].setdefault('content', {}) untl_dict[element][0]['content'][child] = '' return untl_dict
python
def add_empty_fields(untl_dict): """Add empty values if UNTL fields don't have values.""" # Iterate the ordered UNTL XML element list to determine # which elements are missing from the untl_dict. for element in UNTL_XML_ORDER: if element not in untl_dict: # Try to create an element with content and qualifier. try: py_object = PYUNTL_DISPATCH[element]( content='', qualifier='', ) except: # Try to create an element with content. try: py_object = PYUNTL_DISPATCH[element](content='') except: # Try to create an element without content. try: py_object = PYUNTL_DISPATCH[element]() except: raise PyuntlException( 'Could not add empty element field.' ) else: untl_dict[element] = [{'content': {}}] else: # Handle element without children. if not py_object.contained_children: untl_dict[element] = [{'content': ''}] else: untl_dict[element] = [{'content': {}}] else: # Handle element without children. if not py_object.contained_children: untl_dict[element] = [{'content': '', 'qualifier': ''}] else: untl_dict[element] = [{'content': {}, 'qualifier': ''}] # Add empty contained children. for child in py_object.contained_children: untl_dict[element][0].setdefault('content', {}) untl_dict[element][0]['content'][child] = '' return untl_dict
[ "def", "add_empty_fields", "(", "untl_dict", ")", ":", "# Iterate the ordered UNTL XML element list to determine", "# which elements are missing from the untl_dict.", "for", "element", "in", "UNTL_XML_ORDER", ":", "if", "element", "not", "in", "untl_dict", ":", "# Try to create an element with content and qualifier.", "try", ":", "py_object", "=", "PYUNTL_DISPATCH", "[", "element", "]", "(", "content", "=", "''", ",", "qualifier", "=", "''", ",", ")", "except", ":", "# Try to create an element with content.", "try", ":", "py_object", "=", "PYUNTL_DISPATCH", "[", "element", "]", "(", "content", "=", "''", ")", "except", ":", "# Try to create an element without content.", "try", ":", "py_object", "=", "PYUNTL_DISPATCH", "[", "element", "]", "(", ")", "except", ":", "raise", "PyuntlException", "(", "'Could not add empty element field.'", ")", "else", ":", "untl_dict", "[", "element", "]", "=", "[", "{", "'content'", ":", "{", "}", "}", "]", "else", ":", "# Handle element without children.", "if", "not", "py_object", ".", "contained_children", ":", "untl_dict", "[", "element", "]", "=", "[", "{", "'content'", ":", "''", "}", "]", "else", ":", "untl_dict", "[", "element", "]", "=", "[", "{", "'content'", ":", "{", "}", "}", "]", "else", ":", "# Handle element without children.", "if", "not", "py_object", ".", "contained_children", ":", "untl_dict", "[", "element", "]", "=", "[", "{", "'content'", ":", "''", ",", "'qualifier'", ":", "''", "}", "]", "else", ":", "untl_dict", "[", "element", "]", "=", "[", "{", "'content'", ":", "{", "}", ",", "'qualifier'", ":", "''", "}", "]", "# Add empty contained children.", "for", "child", "in", "py_object", ".", "contained_children", ":", "untl_dict", "[", "element", "]", "[", "0", "]", ".", "setdefault", "(", "'content'", ",", "{", "}", ")", "untl_dict", "[", "element", "]", "[", "0", "]", "[", "'content'", "]", "[", "child", "]", "=", "''", "return", "untl_dict" ]
Add empty values if UNTL fields don't have values.
[ "Add", "empty", "values", "if", "UNTL", "fields", "don", "t", "have", "values", "." ]
f92413302897dab948aac18ee9e482ace0187bd4
https://github.com/unt-libraries/pyuntl/blob/f92413302897dab948aac18ee9e482ace0187bd4/pyuntl/untldoc.py#L606-L648
train
unt-libraries/pyuntl
pyuntl/untldoc.py
add_empty_etd_ms_fields
def add_empty_etd_ms_fields(etd_ms_dict): """Add empty values for ETD_MS fields that don't have values.""" # Determine which ETD MS elements are missing from the etd_ms_dict. for element in ETD_MS_ORDER: if element not in etd_ms_dict: # Try to create an element with content and qualifier. try: py_object = ETD_MS_CONVERSION_DISPATCH[element]( content='', qualifier='', ) except: # Try to create an element with content. try: py_object = ETD_MS_CONVERSION_DISPATCH[element](content='') except: # Try to create an element without content. try: py_object = ETD_MS_CONVERSION_DISPATCH[element]() except: raise PyuntlException( 'Could not add empty element field.' ) else: etd_ms_dict[element] = [{'content': {}}] else: # Handle element without children. if not py_object.contained_children: etd_ms_dict[element] = [{'content': ''}] else: etd_ms_dict[element] = [{'content': {}}] else: # Handle element without children. if py_object: if not py_object.contained_children: etd_ms_dict[element] = [{'content': '', 'qualifier': ''}] else: etd_ms_dict[element] = [{'content': {}, 'qualifier': ''}] # Add empty contained children. if py_object: for child in py_object.contained_children: etd_ms_dict[element][0].setdefault('content', {}) etd_ms_dict[element][0]['content'][child] = '' return etd_ms_dict
python
def add_empty_etd_ms_fields(etd_ms_dict): """Add empty values for ETD_MS fields that don't have values.""" # Determine which ETD MS elements are missing from the etd_ms_dict. for element in ETD_MS_ORDER: if element not in etd_ms_dict: # Try to create an element with content and qualifier. try: py_object = ETD_MS_CONVERSION_DISPATCH[element]( content='', qualifier='', ) except: # Try to create an element with content. try: py_object = ETD_MS_CONVERSION_DISPATCH[element](content='') except: # Try to create an element without content. try: py_object = ETD_MS_CONVERSION_DISPATCH[element]() except: raise PyuntlException( 'Could not add empty element field.' ) else: etd_ms_dict[element] = [{'content': {}}] else: # Handle element without children. if not py_object.contained_children: etd_ms_dict[element] = [{'content': ''}] else: etd_ms_dict[element] = [{'content': {}}] else: # Handle element without children. if py_object: if not py_object.contained_children: etd_ms_dict[element] = [{'content': '', 'qualifier': ''}] else: etd_ms_dict[element] = [{'content': {}, 'qualifier': ''}] # Add empty contained children. if py_object: for child in py_object.contained_children: etd_ms_dict[element][0].setdefault('content', {}) etd_ms_dict[element][0]['content'][child] = '' return etd_ms_dict
[ "def", "add_empty_etd_ms_fields", "(", "etd_ms_dict", ")", ":", "# Determine which ETD MS elements are missing from the etd_ms_dict.", "for", "element", "in", "ETD_MS_ORDER", ":", "if", "element", "not", "in", "etd_ms_dict", ":", "# Try to create an element with content and qualifier.", "try", ":", "py_object", "=", "ETD_MS_CONVERSION_DISPATCH", "[", "element", "]", "(", "content", "=", "''", ",", "qualifier", "=", "''", ",", ")", "except", ":", "# Try to create an element with content.", "try", ":", "py_object", "=", "ETD_MS_CONVERSION_DISPATCH", "[", "element", "]", "(", "content", "=", "''", ")", "except", ":", "# Try to create an element without content.", "try", ":", "py_object", "=", "ETD_MS_CONVERSION_DISPATCH", "[", "element", "]", "(", ")", "except", ":", "raise", "PyuntlException", "(", "'Could not add empty element field.'", ")", "else", ":", "etd_ms_dict", "[", "element", "]", "=", "[", "{", "'content'", ":", "{", "}", "}", "]", "else", ":", "# Handle element without children.", "if", "not", "py_object", ".", "contained_children", ":", "etd_ms_dict", "[", "element", "]", "=", "[", "{", "'content'", ":", "''", "}", "]", "else", ":", "etd_ms_dict", "[", "element", "]", "=", "[", "{", "'content'", ":", "{", "}", "}", "]", "else", ":", "# Handle element without children.", "if", "py_object", ":", "if", "not", "py_object", ".", "contained_children", ":", "etd_ms_dict", "[", "element", "]", "=", "[", "{", "'content'", ":", "''", ",", "'qualifier'", ":", "''", "}", "]", "else", ":", "etd_ms_dict", "[", "element", "]", "=", "[", "{", "'content'", ":", "{", "}", ",", "'qualifier'", ":", "''", "}", "]", "# Add empty contained children.", "if", "py_object", ":", "for", "child", "in", "py_object", ".", "contained_children", ":", "etd_ms_dict", "[", "element", "]", "[", "0", "]", ".", "setdefault", "(", "'content'", ",", "{", "}", ")", "etd_ms_dict", "[", "element", "]", "[", "0", "]", "[", "'content'", "]", "[", "child", "]", "=", "''", "return", "etd_ms_dict" ]
Add empty values for ETD_MS fields that don't have values.
[ "Add", "empty", "values", "for", "ETD_MS", "fields", "that", "don", "t", "have", "values", "." ]
f92413302897dab948aac18ee9e482ace0187bd4
https://github.com/unt-libraries/pyuntl/blob/f92413302897dab948aac18ee9e482ace0187bd4/pyuntl/untldoc.py#L651-L696
train
unt-libraries/pyuntl
pyuntl/untldoc.py
find_untl_errors
def find_untl_errors(untl_dict, **kwargs): """Add empty required qualifiers to create valid UNTL.""" fix_errors = kwargs.get('fix_errors', False) error_dict = {} # Loop through all elements that require qualifiers. for element_name in REQUIRES_QUALIFIER: # Loop through the existing elements that require qualifers. for element in untl_dict.get(element_name, []): error_dict[element_name] = 'no_qualifier' # If it should be fixed, set an empty qualifier # if it doesn't have one. if fix_errors: element.setdefault('qualifier', '') # Combine the error dict and UNTL dict into a dict. found_data = { 'untl_dict': untl_dict, 'error_dict': error_dict, } return found_data
python
def find_untl_errors(untl_dict, **kwargs): """Add empty required qualifiers to create valid UNTL.""" fix_errors = kwargs.get('fix_errors', False) error_dict = {} # Loop through all elements that require qualifiers. for element_name in REQUIRES_QUALIFIER: # Loop through the existing elements that require qualifers. for element in untl_dict.get(element_name, []): error_dict[element_name] = 'no_qualifier' # If it should be fixed, set an empty qualifier # if it doesn't have one. if fix_errors: element.setdefault('qualifier', '') # Combine the error dict and UNTL dict into a dict. found_data = { 'untl_dict': untl_dict, 'error_dict': error_dict, } return found_data
[ "def", "find_untl_errors", "(", "untl_dict", ",", "*", "*", "kwargs", ")", ":", "fix_errors", "=", "kwargs", ".", "get", "(", "'fix_errors'", ",", "False", ")", "error_dict", "=", "{", "}", "# Loop through all elements that require qualifiers.", "for", "element_name", "in", "REQUIRES_QUALIFIER", ":", "# Loop through the existing elements that require qualifers.", "for", "element", "in", "untl_dict", ".", "get", "(", "element_name", ",", "[", "]", ")", ":", "error_dict", "[", "element_name", "]", "=", "'no_qualifier'", "# If it should be fixed, set an empty qualifier", "# if it doesn't have one.", "if", "fix_errors", ":", "element", ".", "setdefault", "(", "'qualifier'", ",", "''", ")", "# Combine the error dict and UNTL dict into a dict.", "found_data", "=", "{", "'untl_dict'", ":", "untl_dict", ",", "'error_dict'", ":", "error_dict", ",", "}", "return", "found_data" ]
Add empty required qualifiers to create valid UNTL.
[ "Add", "empty", "required", "qualifiers", "to", "create", "valid", "UNTL", "." ]
f92413302897dab948aac18ee9e482ace0187bd4
https://github.com/unt-libraries/pyuntl/blob/f92413302897dab948aac18ee9e482ace0187bd4/pyuntl/untldoc.py#L699-L717
train
unt-libraries/pyuntl
pyuntl/untldoc.py
untlpy2etd_ms
def untlpy2etd_ms(untl_elements, **kwargs): """Convert the UNTL elements structure into an ETD_MS structure. kwargs can be passed to the function for certain effects. """ degree_children = {} date_exists = False seen_creation = False # Make the root element. etd_ms_root = ETD_MS_CONVERSION_DISPATCH['thesis']() for element in untl_elements.children: etd_ms_element = None # Convert the UNTL element to etd_ms where applicable. if element.tag in ETD_MS_CONVERSION_DISPATCH: # Create the etd_ms_element if the element's content # is stored in children nodes. if element.children: etd_ms_element = ETD_MS_CONVERSION_DISPATCH[element.tag]( qualifier=element.qualifier, children=element.children, ) # If we hit a degree element, make just that one. elif element.tag == 'degree': # Make a dict of the degree children information. if element.qualifier in ['name', 'level', 'discipline', 'grantor']: degree_children[element.qualifier] = element.content # For date elements, limit to first instance of creation date. elif element.tag == 'date': if element.qualifier == 'creation': # If the root already has a date, delete the child. for child in etd_ms_root.children: if child.tag == 'date': del child if not seen_creation: date_exists = False seen_creation = True if not date_exists: # Create the etd_ms element. etd_ms_element = ETD_MS_CONVERSION_DISPATCH[element.tag]( qualifier=element.qualifier, content=element.content, ) date_exists = True # It is a normal element. elif element.tag not in ['date', 'degree']: # Create the etd_ms_element. etd_ms_element = ETD_MS_CONVERSION_DISPATCH[element.tag]( qualifier=element.qualifier, content=element.content, ) # Add the element to the structure if the element exists. if etd_ms_element: etd_ms_root.add_child(etd_ms_element) if element.tag == 'meta': # Initialize ark to False because it may not exist yet. ark = False # Iterate through children and look for ark. for i in etd_ms_root.children: if i.tag == 'identifier' and i.content.startswith( 'http://digital.library.unt.edu/' ): ark = True # If the ark doesn't yet exist, try and create it. if not ark: # Reset for future tests. ark = False if element.qualifier == 'ark': ark = element.content if ark is not None: # Create the ark identifier element and add it. ark_identifier = ETD_MS_CONVERSION_DISPATCH['identifier']( ark=ark, ) etd_ms_root.add_child(ark_identifier) # If children exist for the degree, make a degree element. if degree_children: degree_element = ETD_MS_CONVERSION_DISPATCH['degree']() # When we have all the elements stored, add the children to the # degree node. degree_child_element = None for k, v in degree_children.iteritems(): # Create the individual classes for degrees. degree_child_element = ETD_MS_DEGREE_DISPATCH[k]( content=v, ) # If the keys in degree_children are valid, # add it to the child. if degree_child_element: degree_element.add_child(degree_child_element) etd_ms_root.add_child(degree_element) return etd_ms_root
python
def untlpy2etd_ms(untl_elements, **kwargs): """Convert the UNTL elements structure into an ETD_MS structure. kwargs can be passed to the function for certain effects. """ degree_children = {} date_exists = False seen_creation = False # Make the root element. etd_ms_root = ETD_MS_CONVERSION_DISPATCH['thesis']() for element in untl_elements.children: etd_ms_element = None # Convert the UNTL element to etd_ms where applicable. if element.tag in ETD_MS_CONVERSION_DISPATCH: # Create the etd_ms_element if the element's content # is stored in children nodes. if element.children: etd_ms_element = ETD_MS_CONVERSION_DISPATCH[element.tag]( qualifier=element.qualifier, children=element.children, ) # If we hit a degree element, make just that one. elif element.tag == 'degree': # Make a dict of the degree children information. if element.qualifier in ['name', 'level', 'discipline', 'grantor']: degree_children[element.qualifier] = element.content # For date elements, limit to first instance of creation date. elif element.tag == 'date': if element.qualifier == 'creation': # If the root already has a date, delete the child. for child in etd_ms_root.children: if child.tag == 'date': del child if not seen_creation: date_exists = False seen_creation = True if not date_exists: # Create the etd_ms element. etd_ms_element = ETD_MS_CONVERSION_DISPATCH[element.tag]( qualifier=element.qualifier, content=element.content, ) date_exists = True # It is a normal element. elif element.tag not in ['date', 'degree']: # Create the etd_ms_element. etd_ms_element = ETD_MS_CONVERSION_DISPATCH[element.tag]( qualifier=element.qualifier, content=element.content, ) # Add the element to the structure if the element exists. if etd_ms_element: etd_ms_root.add_child(etd_ms_element) if element.tag == 'meta': # Initialize ark to False because it may not exist yet. ark = False # Iterate through children and look for ark. for i in etd_ms_root.children: if i.tag == 'identifier' and i.content.startswith( 'http://digital.library.unt.edu/' ): ark = True # If the ark doesn't yet exist, try and create it. if not ark: # Reset for future tests. ark = False if element.qualifier == 'ark': ark = element.content if ark is not None: # Create the ark identifier element and add it. ark_identifier = ETD_MS_CONVERSION_DISPATCH['identifier']( ark=ark, ) etd_ms_root.add_child(ark_identifier) # If children exist for the degree, make a degree element. if degree_children: degree_element = ETD_MS_CONVERSION_DISPATCH['degree']() # When we have all the elements stored, add the children to the # degree node. degree_child_element = None for k, v in degree_children.iteritems(): # Create the individual classes for degrees. degree_child_element = ETD_MS_DEGREE_DISPATCH[k]( content=v, ) # If the keys in degree_children are valid, # add it to the child. if degree_child_element: degree_element.add_child(degree_child_element) etd_ms_root.add_child(degree_element) return etd_ms_root
[ "def", "untlpy2etd_ms", "(", "untl_elements", ",", "*", "*", "kwargs", ")", ":", "degree_children", "=", "{", "}", "date_exists", "=", "False", "seen_creation", "=", "False", "# Make the root element.", "etd_ms_root", "=", "ETD_MS_CONVERSION_DISPATCH", "[", "'thesis'", "]", "(", ")", "for", "element", "in", "untl_elements", ".", "children", ":", "etd_ms_element", "=", "None", "# Convert the UNTL element to etd_ms where applicable.", "if", "element", ".", "tag", "in", "ETD_MS_CONVERSION_DISPATCH", ":", "# Create the etd_ms_element if the element's content", "# is stored in children nodes.", "if", "element", ".", "children", ":", "etd_ms_element", "=", "ETD_MS_CONVERSION_DISPATCH", "[", "element", ".", "tag", "]", "(", "qualifier", "=", "element", ".", "qualifier", ",", "children", "=", "element", ".", "children", ",", ")", "# If we hit a degree element, make just that one.", "elif", "element", ".", "tag", "==", "'degree'", ":", "# Make a dict of the degree children information.", "if", "element", ".", "qualifier", "in", "[", "'name'", ",", "'level'", ",", "'discipline'", ",", "'grantor'", "]", ":", "degree_children", "[", "element", ".", "qualifier", "]", "=", "element", ".", "content", "# For date elements, limit to first instance of creation date.", "elif", "element", ".", "tag", "==", "'date'", ":", "if", "element", ".", "qualifier", "==", "'creation'", ":", "# If the root already has a date, delete the child.", "for", "child", "in", "etd_ms_root", ".", "children", ":", "if", "child", ".", "tag", "==", "'date'", ":", "del", "child", "if", "not", "seen_creation", ":", "date_exists", "=", "False", "seen_creation", "=", "True", "if", "not", "date_exists", ":", "# Create the etd_ms element.", "etd_ms_element", "=", "ETD_MS_CONVERSION_DISPATCH", "[", "element", ".", "tag", "]", "(", "qualifier", "=", "element", ".", "qualifier", ",", "content", "=", "element", ".", "content", ",", ")", "date_exists", "=", "True", "# It is a normal element.", "elif", "element", ".", "tag", "not", "in", "[", "'date'", ",", "'degree'", "]", ":", "# Create the etd_ms_element.", "etd_ms_element", "=", "ETD_MS_CONVERSION_DISPATCH", "[", "element", ".", "tag", "]", "(", "qualifier", "=", "element", ".", "qualifier", ",", "content", "=", "element", ".", "content", ",", ")", "# Add the element to the structure if the element exists.", "if", "etd_ms_element", ":", "etd_ms_root", ".", "add_child", "(", "etd_ms_element", ")", "if", "element", ".", "tag", "==", "'meta'", ":", "# Initialize ark to False because it may not exist yet.", "ark", "=", "False", "# Iterate through children and look for ark.", "for", "i", "in", "etd_ms_root", ".", "children", ":", "if", "i", ".", "tag", "==", "'identifier'", "and", "i", ".", "content", ".", "startswith", "(", "'http://digital.library.unt.edu/'", ")", ":", "ark", "=", "True", "# If the ark doesn't yet exist, try and create it.", "if", "not", "ark", ":", "# Reset for future tests.", "ark", "=", "False", "if", "element", ".", "qualifier", "==", "'ark'", ":", "ark", "=", "element", ".", "content", "if", "ark", "is", "not", "None", ":", "# Create the ark identifier element and add it.", "ark_identifier", "=", "ETD_MS_CONVERSION_DISPATCH", "[", "'identifier'", "]", "(", "ark", "=", "ark", ",", ")", "etd_ms_root", ".", "add_child", "(", "ark_identifier", ")", "# If children exist for the degree, make a degree element.", "if", "degree_children", ":", "degree_element", "=", "ETD_MS_CONVERSION_DISPATCH", "[", "'degree'", "]", "(", ")", "# When we have all the elements stored, add the children to the", "# degree node.", "degree_child_element", "=", "None", "for", "k", ",", "v", "in", "degree_children", ".", "iteritems", "(", ")", ":", "# Create the individual classes for degrees.", "degree_child_element", "=", "ETD_MS_DEGREE_DISPATCH", "[", "k", "]", "(", "content", "=", "v", ",", ")", "# If the keys in degree_children are valid,", "# add it to the child.", "if", "degree_child_element", ":", "degree_element", ".", "add_child", "(", "degree_child_element", ")", "etd_ms_root", ".", "add_child", "(", "degree_element", ")", "return", "etd_ms_root" ]
Convert the UNTL elements structure into an ETD_MS structure. kwargs can be passed to the function for certain effects.
[ "Convert", "the", "UNTL", "elements", "structure", "into", "an", "ETD_MS", "structure", "." ]
f92413302897dab948aac18ee9e482ace0187bd4
https://github.com/unt-libraries/pyuntl/blob/f92413302897dab948aac18ee9e482ace0187bd4/pyuntl/untldoc.py#L720-L813
train
unt-libraries/pyuntl
pyuntl/untldoc.py
etd_ms_dict2xmlfile
def etd_ms_dict2xmlfile(filename, metadata_dict): """Create an ETD MS XML file.""" try: f = open(filename, 'w') f.write(generate_etd_ms_xml(metadata_dict).encode("utf-8")) f.close() except: raise MetadataGeneratorException( 'Failed to create an XML file. Filename: %s' % (filename) )
python
def etd_ms_dict2xmlfile(filename, metadata_dict): """Create an ETD MS XML file.""" try: f = open(filename, 'w') f.write(generate_etd_ms_xml(metadata_dict).encode("utf-8")) f.close() except: raise MetadataGeneratorException( 'Failed to create an XML file. Filename: %s' % (filename) )
[ "def", "etd_ms_dict2xmlfile", "(", "filename", ",", "metadata_dict", ")", ":", "try", ":", "f", "=", "open", "(", "filename", ",", "'w'", ")", "f", ".", "write", "(", "generate_etd_ms_xml", "(", "metadata_dict", ")", ".", "encode", "(", "\"utf-8\"", ")", ")", "f", ".", "close", "(", ")", "except", ":", "raise", "MetadataGeneratorException", "(", "'Failed to create an XML file. Filename: %s'", "%", "(", "filename", ")", ")" ]
Create an ETD MS XML file.
[ "Create", "an", "ETD", "MS", "XML", "file", "." ]
f92413302897dab948aac18ee9e482ace0187bd4
https://github.com/unt-libraries/pyuntl/blob/f92413302897dab948aac18ee9e482ace0187bd4/pyuntl/untldoc.py#L826-L835
train
rhayes777/PyAutoFit
autofit/tools/fit.py
DataFit.signal_to_noise_map
def signal_to_noise_map(self): """The signal-to-noise_map of the data and noise-map which are fitted.""" signal_to_noise_map = np.divide(self.data, self.noise_map) signal_to_noise_map[signal_to_noise_map < 0] = 0 return signal_to_noise_map
python
def signal_to_noise_map(self): """The signal-to-noise_map of the data and noise-map which are fitted.""" signal_to_noise_map = np.divide(self.data, self.noise_map) signal_to_noise_map[signal_to_noise_map < 0] = 0 return signal_to_noise_map
[ "def", "signal_to_noise_map", "(", "self", ")", ":", "signal_to_noise_map", "=", "np", ".", "divide", "(", "self", ".", "data", ",", "self", ".", "noise_map", ")", "signal_to_noise_map", "[", "signal_to_noise_map", "<", "0", "]", "=", "0", "return", "signal_to_noise_map" ]
The signal-to-noise_map of the data and noise-map which are fitted.
[ "The", "signal", "-", "to", "-", "noise_map", "of", "the", "data", "and", "noise", "-", "map", "which", "are", "fitted", "." ]
a9e6144abb08edfc6a6906c4030d7119bf8d3e14
https://github.com/rhayes777/PyAutoFit/blob/a9e6144abb08edfc6a6906c4030d7119bf8d3e14/autofit/tools/fit.py#L59-L63
train
althonos/moclo
moclo/moclo/core/parts.py
AbstractPart.structure
def structure(cls): # type: () -> Text """Get the part structure, as a DNA regex pattern. The structure of most parts can be obtained automatically from the part signature and the restriction enzyme used in the Golden Gate assembly. Warning: If overloading this method, the returned pattern must include 3 capture groups to capture the following features: 1. The upstream (5') overhang sequence 2. The vector placeholder sequence 3. The downstream (3') overhang sequence """ if cls.signature is NotImplemented: raise NotImplementedError("no signature defined") up = cls.cutter.elucidate() down = str(Seq(up).reverse_complement()) ovhg = cls.cutter.ovhgseq upsig, downsig = cls.signature if cls.cutter.is_5overhang(): upsite = "^{}_".format(ovhg) downsite = "_{}^".format(Seq(ovhg).reverse_complement()) else: upsite = "_{}^".format(ovhg) downsite = "^{}_".format(Seq(ovhg).reverse_complement()) if issubclass(cls, AbstractModule): return "".join( [ up.replace(upsite, "({})(".format(upsig)), "N*", down.replace(downsite, ")({})".format(downsig)), ] ) elif issubclass(cls, AbstractVector): return "".join( [ down.replace(downsite, "({})(".format(downsig)), "N*", up.replace(upsite, ")({})".format(upsig)), ] ) else: raise RuntimeError("Part must be either a module or a vector!")
python
def structure(cls): # type: () -> Text """Get the part structure, as a DNA regex pattern. The structure of most parts can be obtained automatically from the part signature and the restriction enzyme used in the Golden Gate assembly. Warning: If overloading this method, the returned pattern must include 3 capture groups to capture the following features: 1. The upstream (5') overhang sequence 2. The vector placeholder sequence 3. The downstream (3') overhang sequence """ if cls.signature is NotImplemented: raise NotImplementedError("no signature defined") up = cls.cutter.elucidate() down = str(Seq(up).reverse_complement()) ovhg = cls.cutter.ovhgseq upsig, downsig = cls.signature if cls.cutter.is_5overhang(): upsite = "^{}_".format(ovhg) downsite = "_{}^".format(Seq(ovhg).reverse_complement()) else: upsite = "_{}^".format(ovhg) downsite = "^{}_".format(Seq(ovhg).reverse_complement()) if issubclass(cls, AbstractModule): return "".join( [ up.replace(upsite, "({})(".format(upsig)), "N*", down.replace(downsite, ")({})".format(downsig)), ] ) elif issubclass(cls, AbstractVector): return "".join( [ down.replace(downsite, "({})(".format(downsig)), "N*", up.replace(upsite, ")({})".format(upsig)), ] ) else: raise RuntimeError("Part must be either a module or a vector!")
[ "def", "structure", "(", "cls", ")", ":", "# type: () -> Text", "if", "cls", ".", "signature", "is", "NotImplemented", ":", "raise", "NotImplementedError", "(", "\"no signature defined\"", ")", "up", "=", "cls", ".", "cutter", ".", "elucidate", "(", ")", "down", "=", "str", "(", "Seq", "(", "up", ")", ".", "reverse_complement", "(", ")", ")", "ovhg", "=", "cls", ".", "cutter", ".", "ovhgseq", "upsig", ",", "downsig", "=", "cls", ".", "signature", "if", "cls", ".", "cutter", ".", "is_5overhang", "(", ")", ":", "upsite", "=", "\"^{}_\"", ".", "format", "(", "ovhg", ")", "downsite", "=", "\"_{}^\"", ".", "format", "(", "Seq", "(", "ovhg", ")", ".", "reverse_complement", "(", ")", ")", "else", ":", "upsite", "=", "\"_{}^\"", ".", "format", "(", "ovhg", ")", "downsite", "=", "\"^{}_\"", ".", "format", "(", "Seq", "(", "ovhg", ")", ".", "reverse_complement", "(", ")", ")", "if", "issubclass", "(", "cls", ",", "AbstractModule", ")", ":", "return", "\"\"", ".", "join", "(", "[", "up", ".", "replace", "(", "upsite", ",", "\"({})(\"", ".", "format", "(", "upsig", ")", ")", ",", "\"N*\"", ",", "down", ".", "replace", "(", "downsite", ",", "\")({})\"", ".", "format", "(", "downsig", ")", ")", ",", "]", ")", "elif", "issubclass", "(", "cls", ",", "AbstractVector", ")", ":", "return", "\"\"", ".", "join", "(", "[", "down", ".", "replace", "(", "downsite", ",", "\"({})(\"", ".", "format", "(", "downsig", ")", ")", ",", "\"N*\"", ",", "up", ".", "replace", "(", "upsite", ",", "\")({})\"", ".", "format", "(", "upsig", ")", ")", ",", "]", ")", "else", ":", "raise", "RuntimeError", "(", "\"Part must be either a module or a vector!\"", ")" ]
Get the part structure, as a DNA regex pattern. The structure of most parts can be obtained automatically from the part signature and the restriction enzyme used in the Golden Gate assembly. Warning: If overloading this method, the returned pattern must include 3 capture groups to capture the following features: 1. The upstream (5') overhang sequence 2. The vector placeholder sequence 3. The downstream (3') overhang sequence
[ "Get", "the", "part", "structure", "as", "a", "DNA", "regex", "pattern", "." ]
28a03748df8a2fa43f0c0c8098ca64d11559434e
https://github.com/althonos/moclo/blob/28a03748df8a2fa43f0c0c8098ca64d11559434e/moclo/moclo/core/parts.py#L49-L99
train
althonos/moclo
moclo/moclo/core/parts.py
AbstractPart.characterize
def characterize(cls, record): """Load the record in a concrete subclass of this type. """ classes = list(cls.__subclasses__()) if not isabstract(cls): classes.append(cls) for subclass in classes: entity = subclass(record) if entity.is_valid(): return entity raise RuntimeError("could not find the type for '{}'".format(record.id))
python
def characterize(cls, record): """Load the record in a concrete subclass of this type. """ classes = list(cls.__subclasses__()) if not isabstract(cls): classes.append(cls) for subclass in classes: entity = subclass(record) if entity.is_valid(): return entity raise RuntimeError("could not find the type for '{}'".format(record.id))
[ "def", "characterize", "(", "cls", ",", "record", ")", ":", "classes", "=", "list", "(", "cls", ".", "__subclasses__", "(", ")", ")", "if", "not", "isabstract", "(", "cls", ")", ":", "classes", ".", "append", "(", "cls", ")", "for", "subclass", "in", "classes", ":", "entity", "=", "subclass", "(", "record", ")", "if", "entity", ".", "is_valid", "(", ")", ":", "return", "entity", "raise", "RuntimeError", "(", "\"could not find the type for '{}'\"", ".", "format", "(", "record", ".", "id", ")", ")" ]
Load the record in a concrete subclass of this type.
[ "Load", "the", "record", "in", "a", "concrete", "subclass", "of", "this", "type", "." ]
28a03748df8a2fa43f0c0c8098ca64d11559434e
https://github.com/althonos/moclo/blob/28a03748df8a2fa43f0c0c8098ca64d11559434e/moclo/moclo/core/parts.py#L102-L112
train
NikolayDachev/jadm
lib/paramiko-1.14.1/paramiko/transport.py
Transport.global_request
def global_request(self, kind, data=None, wait=True): """ Make a global request to the remote host. These are normally extensions to the SSH2 protocol. :param str kind: name of the request. :param tuple data: an optional tuple containing additional data to attach to the request. :param bool wait: ``True`` if this method should not return until a response is received; ``False`` otherwise. :return: a `.Message` containing possible additional data if the request was successful (or an empty `.Message` if ``wait`` was ``False``); ``None`` if the request was denied. """ if wait: self.completion_event = threading.Event() m = Message() m.add_byte(cMSG_GLOBAL_REQUEST) m.add_string(kind) m.add_boolean(wait) if data is not None: m.add(*data) self._log(DEBUG, 'Sending global request "%s"' % kind) self._send_user_message(m) if not wait: return None while True: self.completion_event.wait(0.1) if not self.active: return None if self.completion_event.isSet(): break return self.global_response
python
def global_request(self, kind, data=None, wait=True): """ Make a global request to the remote host. These are normally extensions to the SSH2 protocol. :param str kind: name of the request. :param tuple data: an optional tuple containing additional data to attach to the request. :param bool wait: ``True`` if this method should not return until a response is received; ``False`` otherwise. :return: a `.Message` containing possible additional data if the request was successful (or an empty `.Message` if ``wait`` was ``False``); ``None`` if the request was denied. """ if wait: self.completion_event = threading.Event() m = Message() m.add_byte(cMSG_GLOBAL_REQUEST) m.add_string(kind) m.add_boolean(wait) if data is not None: m.add(*data) self._log(DEBUG, 'Sending global request "%s"' % kind) self._send_user_message(m) if not wait: return None while True: self.completion_event.wait(0.1) if not self.active: return None if self.completion_event.isSet(): break return self.global_response
[ "def", "global_request", "(", "self", ",", "kind", ",", "data", "=", "None", ",", "wait", "=", "True", ")", ":", "if", "wait", ":", "self", ".", "completion_event", "=", "threading", ".", "Event", "(", ")", "m", "=", "Message", "(", ")", "m", ".", "add_byte", "(", "cMSG_GLOBAL_REQUEST", ")", "m", ".", "add_string", "(", "kind", ")", "m", ".", "add_boolean", "(", "wait", ")", "if", "data", "is", "not", "None", ":", "m", ".", "add", "(", "*", "data", ")", "self", ".", "_log", "(", "DEBUG", ",", "'Sending global request \"%s\"'", "%", "kind", ")", "self", ".", "_send_user_message", "(", "m", ")", "if", "not", "wait", ":", "return", "None", "while", "True", ":", "self", ".", "completion_event", ".", "wait", "(", "0.1", ")", "if", "not", "self", ".", "active", ":", "return", "None", "if", "self", ".", "completion_event", ".", "isSet", "(", ")", ":", "break", "return", "self", ".", "global_response" ]
Make a global request to the remote host. These are normally extensions to the SSH2 protocol. :param str kind: name of the request. :param tuple data: an optional tuple containing additional data to attach to the request. :param bool wait: ``True`` if this method should not return until a response is received; ``False`` otherwise. :return: a `.Message` containing possible additional data if the request was successful (or an empty `.Message` if ``wait`` was ``False``); ``None`` if the request was denied.
[ "Make", "a", "global", "request", "to", "the", "remote", "host", ".", "These", "are", "normally", "extensions", "to", "the", "SSH2", "protocol", "." ]
12bb550445edfcd87506f7cba7a6a35d413c5511
https://github.com/NikolayDachev/jadm/blob/12bb550445edfcd87506f7cba7a6a35d413c5511/lib/paramiko-1.14.1/paramiko/transport.py#L777-L812
train
NikolayDachev/jadm
lib/paramiko-1.14.1/paramiko/transport.py
Transport._activate_inbound
def _activate_inbound(self): """switch on newly negotiated encryption parameters for inbound traffic""" block_size = self._cipher_info[self.remote_cipher]['block-size'] if self.server_mode: IV_in = self._compute_key('A', block_size) key_in = self._compute_key('C', self._cipher_info[self.remote_cipher]['key-size']) else: IV_in = self._compute_key('B', block_size) key_in = self._compute_key('D', self._cipher_info[self.remote_cipher]['key-size']) engine = self._get_cipher(self.remote_cipher, key_in, IV_in) mac_size = self._mac_info[self.remote_mac]['size'] mac_engine = self._mac_info[self.remote_mac]['class'] # initial mac keys are done in the hash's natural size (not the potentially truncated # transmission size) if self.server_mode: mac_key = self._compute_key('E', mac_engine().digest_size) else: mac_key = self._compute_key('F', mac_engine().digest_size) self.packetizer.set_inbound_cipher(engine, block_size, mac_engine, mac_size, mac_key) compress_in = self._compression_info[self.remote_compression][1] if (compress_in is not None) and ((self.remote_compression != '[email protected]') or self.authenticated): self._log(DEBUG, 'Switching on inbound compression ...') self.packetizer.set_inbound_compressor(compress_in())
python
def _activate_inbound(self): """switch on newly negotiated encryption parameters for inbound traffic""" block_size = self._cipher_info[self.remote_cipher]['block-size'] if self.server_mode: IV_in = self._compute_key('A', block_size) key_in = self._compute_key('C', self._cipher_info[self.remote_cipher]['key-size']) else: IV_in = self._compute_key('B', block_size) key_in = self._compute_key('D', self._cipher_info[self.remote_cipher]['key-size']) engine = self._get_cipher(self.remote_cipher, key_in, IV_in) mac_size = self._mac_info[self.remote_mac]['size'] mac_engine = self._mac_info[self.remote_mac]['class'] # initial mac keys are done in the hash's natural size (not the potentially truncated # transmission size) if self.server_mode: mac_key = self._compute_key('E', mac_engine().digest_size) else: mac_key = self._compute_key('F', mac_engine().digest_size) self.packetizer.set_inbound_cipher(engine, block_size, mac_engine, mac_size, mac_key) compress_in = self._compression_info[self.remote_compression][1] if (compress_in is not None) and ((self.remote_compression != '[email protected]') or self.authenticated): self._log(DEBUG, 'Switching on inbound compression ...') self.packetizer.set_inbound_compressor(compress_in())
[ "def", "_activate_inbound", "(", "self", ")", ":", "block_size", "=", "self", ".", "_cipher_info", "[", "self", ".", "remote_cipher", "]", "[", "'block-size'", "]", "if", "self", ".", "server_mode", ":", "IV_in", "=", "self", ".", "_compute_key", "(", "'A'", ",", "block_size", ")", "key_in", "=", "self", ".", "_compute_key", "(", "'C'", ",", "self", ".", "_cipher_info", "[", "self", ".", "remote_cipher", "]", "[", "'key-size'", "]", ")", "else", ":", "IV_in", "=", "self", ".", "_compute_key", "(", "'B'", ",", "block_size", ")", "key_in", "=", "self", ".", "_compute_key", "(", "'D'", ",", "self", ".", "_cipher_info", "[", "self", ".", "remote_cipher", "]", "[", "'key-size'", "]", ")", "engine", "=", "self", ".", "_get_cipher", "(", "self", ".", "remote_cipher", ",", "key_in", ",", "IV_in", ")", "mac_size", "=", "self", ".", "_mac_info", "[", "self", ".", "remote_mac", "]", "[", "'size'", "]", "mac_engine", "=", "self", ".", "_mac_info", "[", "self", ".", "remote_mac", "]", "[", "'class'", "]", "# initial mac keys are done in the hash's natural size (not the potentially truncated", "# transmission size)", "if", "self", ".", "server_mode", ":", "mac_key", "=", "self", ".", "_compute_key", "(", "'E'", ",", "mac_engine", "(", ")", ".", "digest_size", ")", "else", ":", "mac_key", "=", "self", ".", "_compute_key", "(", "'F'", ",", "mac_engine", "(", ")", ".", "digest_size", ")", "self", ".", "packetizer", ".", "set_inbound_cipher", "(", "engine", ",", "block_size", ",", "mac_engine", ",", "mac_size", ",", "mac_key", ")", "compress_in", "=", "self", ".", "_compression_info", "[", "self", ".", "remote_compression", "]", "[", "1", "]", "if", "(", "compress_in", "is", "not", "None", ")", "and", "(", "(", "self", ".", "remote_compression", "!=", "'[email protected]'", ")", "or", "self", ".", "authenticated", ")", ":", "self", ".", "_log", "(", "DEBUG", ",", "'Switching on inbound compression ...'", ")", "self", ".", "packetizer", ".", "set_inbound_compressor", "(", "compress_in", "(", ")", ")" ]
switch on newly negotiated encryption parameters for inbound traffic
[ "switch", "on", "newly", "negotiated", "encryption", "parameters", "for", "inbound", "traffic" ]
12bb550445edfcd87506f7cba7a6a35d413c5511
https://github.com/NikolayDachev/jadm/blob/12bb550445edfcd87506f7cba7a6a35d413c5511/lib/paramiko-1.14.1/paramiko/transport.py#L1702-L1724
train
redhat-openstack/python-tripleo-helper
tripleohelper/server.py
Server.enable_user
def enable_user(self, user): """Enable the root account on the remote host. Since the host may have been deployed using a Cloud image, it may not be possible to use the 'root' account. This method ensure the root account is enable, if this is not the case, it will try to get the name of admin user and use it to re-enable the root account. """ if user in self.ssh_pool._ssh_clients: return if user == 'root': _root_ssh_client = ssh.SshClient( hostname=self.hostname, user='root', key_filename=self._key_filename, via_ip=self.via_ip) # connect as a root user _root_ssh_client.start() result, _ = _root_ssh_client.run('uname -a') image_user = None # check if root is not allowed if 'Please login as the user "cloud-user"' in result: image_user = 'cloud-user' _root_ssh_client.stop() elif 'Please login as the user "fedora" rather than the user "root"' in result: image_user = 'fedora' _root_ssh_client.stop() elif 'Please login as the user "centos" rather than the user "root"' in result: image_user = 'centos' _root_ssh_client.stop() if image_user: self.enable_user(image_user) LOG.info('enabling the root user') _cmd = "sudo sed -i 's,.*ssh-rsa,ssh-rsa,' /root/.ssh/authorized_keys" self.ssh_pool.run(image_user, _cmd) _root_ssh_client.start() self.ssh_pool.add_ssh_client('root', _root_ssh_client) return # add the cloud user to the ssh pool self.ssh_pool.build_ssh_client( hostname=self.hostname, user=user, key_filename=self._key_filename, via_ip=self.via_ip)
python
def enable_user(self, user): """Enable the root account on the remote host. Since the host may have been deployed using a Cloud image, it may not be possible to use the 'root' account. This method ensure the root account is enable, if this is not the case, it will try to get the name of admin user and use it to re-enable the root account. """ if user in self.ssh_pool._ssh_clients: return if user == 'root': _root_ssh_client = ssh.SshClient( hostname=self.hostname, user='root', key_filename=self._key_filename, via_ip=self.via_ip) # connect as a root user _root_ssh_client.start() result, _ = _root_ssh_client.run('uname -a') image_user = None # check if root is not allowed if 'Please login as the user "cloud-user"' in result: image_user = 'cloud-user' _root_ssh_client.stop() elif 'Please login as the user "fedora" rather than the user "root"' in result: image_user = 'fedora' _root_ssh_client.stop() elif 'Please login as the user "centos" rather than the user "root"' in result: image_user = 'centos' _root_ssh_client.stop() if image_user: self.enable_user(image_user) LOG.info('enabling the root user') _cmd = "sudo sed -i 's,.*ssh-rsa,ssh-rsa,' /root/.ssh/authorized_keys" self.ssh_pool.run(image_user, _cmd) _root_ssh_client.start() self.ssh_pool.add_ssh_client('root', _root_ssh_client) return # add the cloud user to the ssh pool self.ssh_pool.build_ssh_client( hostname=self.hostname, user=user, key_filename=self._key_filename, via_ip=self.via_ip)
[ "def", "enable_user", "(", "self", ",", "user", ")", ":", "if", "user", "in", "self", ".", "ssh_pool", ".", "_ssh_clients", ":", "return", "if", "user", "==", "'root'", ":", "_root_ssh_client", "=", "ssh", ".", "SshClient", "(", "hostname", "=", "self", ".", "hostname", ",", "user", "=", "'root'", ",", "key_filename", "=", "self", ".", "_key_filename", ",", "via_ip", "=", "self", ".", "via_ip", ")", "# connect as a root user", "_root_ssh_client", ".", "start", "(", ")", "result", ",", "_", "=", "_root_ssh_client", ".", "run", "(", "'uname -a'", ")", "image_user", "=", "None", "# check if root is not allowed", "if", "'Please login as the user \"cloud-user\"'", "in", "result", ":", "image_user", "=", "'cloud-user'", "_root_ssh_client", ".", "stop", "(", ")", "elif", "'Please login as the user \"fedora\" rather than the user \"root\"'", "in", "result", ":", "image_user", "=", "'fedora'", "_root_ssh_client", ".", "stop", "(", ")", "elif", "'Please login as the user \"centos\" rather than the user \"root\"'", "in", "result", ":", "image_user", "=", "'centos'", "_root_ssh_client", ".", "stop", "(", ")", "if", "image_user", ":", "self", ".", "enable_user", "(", "image_user", ")", "LOG", ".", "info", "(", "'enabling the root user'", ")", "_cmd", "=", "\"sudo sed -i 's,.*ssh-rsa,ssh-rsa,' /root/.ssh/authorized_keys\"", "self", ".", "ssh_pool", ".", "run", "(", "image_user", ",", "_cmd", ")", "_root_ssh_client", ".", "start", "(", ")", "self", ".", "ssh_pool", ".", "add_ssh_client", "(", "'root'", ",", "_root_ssh_client", ")", "return", "# add the cloud user to the ssh pool", "self", ".", "ssh_pool", ".", "build_ssh_client", "(", "hostname", "=", "self", ".", "hostname", ",", "user", "=", "user", ",", "key_filename", "=", "self", ".", "_key_filename", ",", "via_ip", "=", "self", ".", "via_ip", ")" ]
Enable the root account on the remote host. Since the host may have been deployed using a Cloud image, it may not be possible to use the 'root' account. This method ensure the root account is enable, if this is not the case, it will try to get the name of admin user and use it to re-enable the root account.
[ "Enable", "the", "root", "account", "on", "the", "remote", "host", "." ]
bfa165538335edb1088170c7a92f097167225c81
https://github.com/redhat-openstack/python-tripleo-helper/blob/bfa165538335edb1088170c7a92f097167225c81/tripleohelper/server.py#L54-L103
train
redhat-openstack/python-tripleo-helper
tripleohelper/server.py
Server.send_file
def send_file(self, local_path, remote_path, user='root', unix_mode=None): """Upload a local file on the remote host. """ self.enable_user(user) return self.ssh_pool.send_file(user, local_path, remote_path, unix_mode=unix_mode)
python
def send_file(self, local_path, remote_path, user='root', unix_mode=None): """Upload a local file on the remote host. """ self.enable_user(user) return self.ssh_pool.send_file(user, local_path, remote_path, unix_mode=unix_mode)
[ "def", "send_file", "(", "self", ",", "local_path", ",", "remote_path", ",", "user", "=", "'root'", ",", "unix_mode", "=", "None", ")", ":", "self", ".", "enable_user", "(", "user", ")", "return", "self", ".", "ssh_pool", ".", "send_file", "(", "user", ",", "local_path", ",", "remote_path", ",", "unix_mode", "=", "unix_mode", ")" ]
Upload a local file on the remote host.
[ "Upload", "a", "local", "file", "on", "the", "remote", "host", "." ]
bfa165538335edb1088170c7a92f097167225c81
https://github.com/redhat-openstack/python-tripleo-helper/blob/bfa165538335edb1088170c7a92f097167225c81/tripleohelper/server.py#L105-L109
train
redhat-openstack/python-tripleo-helper
tripleohelper/server.py
Server.send_dir
def send_dir(self, local_path, remote_path, user='root'): """Upload a directory on the remote host. """ self.enable_user(user) return self.ssh_pool.send_dir(user, local_path, remote_path)
python
def send_dir(self, local_path, remote_path, user='root'): """Upload a directory on the remote host. """ self.enable_user(user) return self.ssh_pool.send_dir(user, local_path, remote_path)
[ "def", "send_dir", "(", "self", ",", "local_path", ",", "remote_path", ",", "user", "=", "'root'", ")", ":", "self", ".", "enable_user", "(", "user", ")", "return", "self", ".", "ssh_pool", ".", "send_dir", "(", "user", ",", "local_path", ",", "remote_path", ")" ]
Upload a directory on the remote host.
[ "Upload", "a", "directory", "on", "the", "remote", "host", "." ]
bfa165538335edb1088170c7a92f097167225c81
https://github.com/redhat-openstack/python-tripleo-helper/blob/bfa165538335edb1088170c7a92f097167225c81/tripleohelper/server.py#L111-L115
train
redhat-openstack/python-tripleo-helper
tripleohelper/server.py
Server.create_file
def create_file(self, path, content, mode='w', user='root'): """Create a file on the remote host. """ self.enable_user(user) return self.ssh_pool.create_file(user, path, content, mode)
python
def create_file(self, path, content, mode='w', user='root'): """Create a file on the remote host. """ self.enable_user(user) return self.ssh_pool.create_file(user, path, content, mode)
[ "def", "create_file", "(", "self", ",", "path", ",", "content", ",", "mode", "=", "'w'", ",", "user", "=", "'root'", ")", ":", "self", ".", "enable_user", "(", "user", ")", "return", "self", ".", "ssh_pool", ".", "create_file", "(", "user", ",", "path", ",", "content", ",", "mode", ")" ]
Create a file on the remote host.
[ "Create", "a", "file", "on", "the", "remote", "host", "." ]
bfa165538335edb1088170c7a92f097167225c81
https://github.com/redhat-openstack/python-tripleo-helper/blob/bfa165538335edb1088170c7a92f097167225c81/tripleohelper/server.py#L121-L125
train
redhat-openstack/python-tripleo-helper
tripleohelper/server.py
Server.yum_install
def yum_install(self, packages, ignore_error=False): """Install some packages on the remote host. :param packages: ist of packages to install. """ return self.run('yum install -y --quiet ' + ' '.join(packages), ignore_error=ignore_error, retry=5)
python
def yum_install(self, packages, ignore_error=False): """Install some packages on the remote host. :param packages: ist of packages to install. """ return self.run('yum install -y --quiet ' + ' '.join(packages), ignore_error=ignore_error, retry=5)
[ "def", "yum_install", "(", "self", ",", "packages", ",", "ignore_error", "=", "False", ")", ":", "return", "self", ".", "run", "(", "'yum install -y --quiet '", "+", "' '", ".", "join", "(", "packages", ")", ",", "ignore_error", "=", "ignore_error", ",", "retry", "=", "5", ")" ]
Install some packages on the remote host. :param packages: ist of packages to install.
[ "Install", "some", "packages", "on", "the", "remote", "host", "." ]
bfa165538335edb1088170c7a92f097167225c81
https://github.com/redhat-openstack/python-tripleo-helper/blob/bfa165538335edb1088170c7a92f097167225c81/tripleohelper/server.py#L141-L146
train
redhat-openstack/python-tripleo-helper
tripleohelper/server.py
Server.rhsm_register
def rhsm_register(self, rhsm): """Register the host on the RHSM. :param rhsm: a dict of parameters (login, password, pool_id) """ # Get rhsm credentials login = rhsm.get('login') password = rhsm.get('password', os.environ.get('RHN_PW')) pool_id = rhsm.get('pool_id') # Ensure the RHEL beta channel are disabled self.run('rm /etc/pki/product/69.pem', ignore_error=True) custom_log = 'subscription-manager register --username %s --password *******' % login self.run( 'subscription-manager register --username %s --password "%s"' % ( login, password), success_status=(0, 64), custom_log=custom_log, retry=3) if pool_id: self.run('subscription-manager attach --pool %s' % pool_id) else: self.run('subscription-manager attach --auto') self.rhsm_active = True
python
def rhsm_register(self, rhsm): """Register the host on the RHSM. :param rhsm: a dict of parameters (login, password, pool_id) """ # Get rhsm credentials login = rhsm.get('login') password = rhsm.get('password', os.environ.get('RHN_PW')) pool_id = rhsm.get('pool_id') # Ensure the RHEL beta channel are disabled self.run('rm /etc/pki/product/69.pem', ignore_error=True) custom_log = 'subscription-manager register --username %s --password *******' % login self.run( 'subscription-manager register --username %s --password "%s"' % ( login, password), success_status=(0, 64), custom_log=custom_log, retry=3) if pool_id: self.run('subscription-manager attach --pool %s' % pool_id) else: self.run('subscription-manager attach --auto') self.rhsm_active = True
[ "def", "rhsm_register", "(", "self", ",", "rhsm", ")", ":", "# Get rhsm credentials", "login", "=", "rhsm", ".", "get", "(", "'login'", ")", "password", "=", "rhsm", ".", "get", "(", "'password'", ",", "os", ".", "environ", ".", "get", "(", "'RHN_PW'", ")", ")", "pool_id", "=", "rhsm", ".", "get", "(", "'pool_id'", ")", "# Ensure the RHEL beta channel are disabled", "self", ".", "run", "(", "'rm /etc/pki/product/69.pem'", ",", "ignore_error", "=", "True", ")", "custom_log", "=", "'subscription-manager register --username %s --password *******'", "%", "login", "self", ".", "run", "(", "'subscription-manager register --username %s --password \"%s\"'", "%", "(", "login", ",", "password", ")", ",", "success_status", "=", "(", "0", ",", "64", ")", ",", "custom_log", "=", "custom_log", ",", "retry", "=", "3", ")", "if", "pool_id", ":", "self", ".", "run", "(", "'subscription-manager attach --pool %s'", "%", "pool_id", ")", "else", ":", "self", ".", "run", "(", "'subscription-manager attach --auto'", ")", "self", ".", "rhsm_active", "=", "True" ]
Register the host on the RHSM. :param rhsm: a dict of parameters (login, password, pool_id)
[ "Register", "the", "host", "on", "the", "RHSM", "." ]
bfa165538335edb1088170c7a92f097167225c81
https://github.com/redhat-openstack/python-tripleo-helper/blob/bfa165538335edb1088170c7a92f097167225c81/tripleohelper/server.py#L155-L177
train
redhat-openstack/python-tripleo-helper
tripleohelper/server.py
Server.enable_repositories
def enable_repositories(self, repositories): """Enable a list of RHSM repositories. :param repositories: a dict in this format: [{'type': 'rhsm_channel', 'name': 'rhel-7-server-rpms'}] """ for r in repositories: if r['type'] != 'rhsm_channel': continue if r['name'] not in self.rhsm_channels: self.rhsm_channels.append(r['name']) if self.rhsm_active: subscription_cmd = "subscription-manager repos '--disable=*' --enable=" + ' --enable='.join( self.rhsm_channels) self.run(subscription_cmd) repo_files = [r for r in repositories if r['type'] == 'yum_repo'] for repo_file in repo_files: self.create_file(repo_file['dest'], repo_file['content']) packages = [r['name'] for r in repositories if r['type'] == 'package'] if packages: self.yum_install(packages)
python
def enable_repositories(self, repositories): """Enable a list of RHSM repositories. :param repositories: a dict in this format: [{'type': 'rhsm_channel', 'name': 'rhel-7-server-rpms'}] """ for r in repositories: if r['type'] != 'rhsm_channel': continue if r['name'] not in self.rhsm_channels: self.rhsm_channels.append(r['name']) if self.rhsm_active: subscription_cmd = "subscription-manager repos '--disable=*' --enable=" + ' --enable='.join( self.rhsm_channels) self.run(subscription_cmd) repo_files = [r for r in repositories if r['type'] == 'yum_repo'] for repo_file in repo_files: self.create_file(repo_file['dest'], repo_file['content']) packages = [r['name'] for r in repositories if r['type'] == 'package'] if packages: self.yum_install(packages)
[ "def", "enable_repositories", "(", "self", ",", "repositories", ")", ":", "for", "r", "in", "repositories", ":", "if", "r", "[", "'type'", "]", "!=", "'rhsm_channel'", ":", "continue", "if", "r", "[", "'name'", "]", "not", "in", "self", ".", "rhsm_channels", ":", "self", ".", "rhsm_channels", ".", "append", "(", "r", "[", "'name'", "]", ")", "if", "self", ".", "rhsm_active", ":", "subscription_cmd", "=", "\"subscription-manager repos '--disable=*' --enable=\"", "+", "' --enable='", ".", "join", "(", "self", ".", "rhsm_channels", ")", "self", ".", "run", "(", "subscription_cmd", ")", "repo_files", "=", "[", "r", "for", "r", "in", "repositories", "if", "r", "[", "'type'", "]", "==", "'yum_repo'", "]", "for", "repo_file", "in", "repo_files", ":", "self", ".", "create_file", "(", "repo_file", "[", "'dest'", "]", ",", "repo_file", "[", "'content'", "]", ")", "packages", "=", "[", "r", "[", "'name'", "]", "for", "r", "in", "repositories", "if", "r", "[", "'type'", "]", "==", "'package'", "]", "if", "packages", ":", "self", ".", "yum_install", "(", "packages", ")" ]
Enable a list of RHSM repositories. :param repositories: a dict in this format: [{'type': 'rhsm_channel', 'name': 'rhel-7-server-rpms'}]
[ "Enable", "a", "list", "of", "RHSM", "repositories", "." ]
bfa165538335edb1088170c7a92f097167225c81
https://github.com/redhat-openstack/python-tripleo-helper/blob/bfa165538335edb1088170c7a92f097167225c81/tripleohelper/server.py#L179-L202
train
redhat-openstack/python-tripleo-helper
tripleohelper/server.py
Server.create_stack_user
def create_stack_user(self): """Create the stack user on the machine. """ self.run('adduser -m stack', success_status=(0, 9)) self.create_file('/etc/sudoers.d/stack', 'stack ALL=(root) NOPASSWD:ALL\n') self.run('mkdir -p /home/stack/.ssh') self.run('cp /root/.ssh/authorized_keys /home/stack/.ssh/authorized_keys') self.run('chown -R stack:stack /home/stack/.ssh') self.run('chmod 700 /home/stack/.ssh') self.run('chmod 600 /home/stack/.ssh/authorized_keys') self.ssh_pool.build_ssh_client(self.hostname, 'stack', self._key_filename, self.via_ip)
python
def create_stack_user(self): """Create the stack user on the machine. """ self.run('adduser -m stack', success_status=(0, 9)) self.create_file('/etc/sudoers.d/stack', 'stack ALL=(root) NOPASSWD:ALL\n') self.run('mkdir -p /home/stack/.ssh') self.run('cp /root/.ssh/authorized_keys /home/stack/.ssh/authorized_keys') self.run('chown -R stack:stack /home/stack/.ssh') self.run('chmod 700 /home/stack/.ssh') self.run('chmod 600 /home/stack/.ssh/authorized_keys') self.ssh_pool.build_ssh_client(self.hostname, 'stack', self._key_filename, self.via_ip)
[ "def", "create_stack_user", "(", "self", ")", ":", "self", ".", "run", "(", "'adduser -m stack'", ",", "success_status", "=", "(", "0", ",", "9", ")", ")", "self", ".", "create_file", "(", "'/etc/sudoers.d/stack'", ",", "'stack ALL=(root) NOPASSWD:ALL\\n'", ")", "self", ".", "run", "(", "'mkdir -p /home/stack/.ssh'", ")", "self", ".", "run", "(", "'cp /root/.ssh/authorized_keys /home/stack/.ssh/authorized_keys'", ")", "self", ".", "run", "(", "'chown -R stack:stack /home/stack/.ssh'", ")", "self", ".", "run", "(", "'chmod 700 /home/stack/.ssh'", ")", "self", ".", "run", "(", "'chmod 600 /home/stack/.ssh/authorized_keys'", ")", "self", ".", "ssh_pool", ".", "build_ssh_client", "(", "self", ".", "hostname", ",", "'stack'", ",", "self", ".", "_key_filename", ",", "self", ".", "via_ip", ")" ]
Create the stack user on the machine.
[ "Create", "the", "stack", "user", "on", "the", "machine", "." ]
bfa165538335edb1088170c7a92f097167225c81
https://github.com/redhat-openstack/python-tripleo-helper/blob/bfa165538335edb1088170c7a92f097167225c81/tripleohelper/server.py#L204-L216
train
redhat-openstack/python-tripleo-helper
tripleohelper/server.py
Server.fetch_image
def fetch_image(self, path, dest, user='root'): """Store in the user home directory an image from a remote location. """ self.run('test -f %s || curl -L -s -o %s %s' % (dest, dest, path), user=user, ignore_error=True)
python
def fetch_image(self, path, dest, user='root'): """Store in the user home directory an image from a remote location. """ self.run('test -f %s || curl -L -s -o %s %s' % (dest, dest, path), user=user, ignore_error=True)
[ "def", "fetch_image", "(", "self", ",", "path", ",", "dest", ",", "user", "=", "'root'", ")", ":", "self", ".", "run", "(", "'test -f %s || curl -L -s -o %s %s'", "%", "(", "dest", ",", "dest", ",", "path", ")", ",", "user", "=", "user", ",", "ignore_error", "=", "True", ")" ]
Store in the user home directory an image from a remote location.
[ "Store", "in", "the", "user", "home", "directory", "an", "image", "from", "a", "remote", "location", "." ]
bfa165538335edb1088170c7a92f097167225c81
https://github.com/redhat-openstack/python-tripleo-helper/blob/bfa165538335edb1088170c7a92f097167225c81/tripleohelper/server.py#L218-L222
train
redhat-openstack/python-tripleo-helper
tripleohelper/server.py
Server.clean_system
def clean_system(self): """Clean up unnecessary packages from the system. """ self.run('systemctl disable NetworkManager', success_status=(0, 1)) self.run('systemctl stop NetworkManager', success_status=(0, 5)) self.run('pkill -9 dhclient', success_status=(0, 1)) self.yum_remove(['cloud-init', 'NetworkManager']) self.run('systemctl enable network') self.run('systemctl restart network')
python
def clean_system(self): """Clean up unnecessary packages from the system. """ self.run('systemctl disable NetworkManager', success_status=(0, 1)) self.run('systemctl stop NetworkManager', success_status=(0, 5)) self.run('pkill -9 dhclient', success_status=(0, 1)) self.yum_remove(['cloud-init', 'NetworkManager']) self.run('systemctl enable network') self.run('systemctl restart network')
[ "def", "clean_system", "(", "self", ")", ":", "self", ".", "run", "(", "'systemctl disable NetworkManager'", ",", "success_status", "=", "(", "0", ",", "1", ")", ")", "self", ".", "run", "(", "'systemctl stop NetworkManager'", ",", "success_status", "=", "(", "0", ",", "5", ")", ")", "self", ".", "run", "(", "'pkill -9 dhclient'", ",", "success_status", "=", "(", "0", ",", "1", ")", ")", "self", ".", "yum_remove", "(", "[", "'cloud-init'", ",", "'NetworkManager'", "]", ")", "self", ".", "run", "(", "'systemctl enable network'", ")", "self", ".", "run", "(", "'systemctl restart network'", ")" ]
Clean up unnecessary packages from the system.
[ "Clean", "up", "unnecessary", "packages", "from", "the", "system", "." ]
bfa165538335edb1088170c7a92f097167225c81
https://github.com/redhat-openstack/python-tripleo-helper/blob/bfa165538335edb1088170c7a92f097167225c81/tripleohelper/server.py#L232-L240
train
redhat-openstack/python-tripleo-helper
tripleohelper/server.py
Server.yum_update
def yum_update(self, allow_reboot=False): """Do a yum update on the system. :param allow_reboot: If True and if a new kernel has been installed, the system will be rebooted """ self.run('yum clean all') self.run('test -f /usr/bin/subscription-manager && subscription-manager repos --list-enabled', ignore_error=True) self.run('yum repolist') self.run('yum update -y --quiet', retry=3) # reboot if a new initrd has been generated since the boot if allow_reboot: self.run('grubby --set-default $(ls /boot/vmlinuz-*.x86_64|tail -1)') default_kernel = self.run('grubby --default-kernel')[0].rstrip() cur_kernel = self.run('uname -r')[0].rstrip() if cur_kernel not in default_kernel: self.run('reboot', ignore_error=True) self.ssh_pool.stop_all()
python
def yum_update(self, allow_reboot=False): """Do a yum update on the system. :param allow_reboot: If True and if a new kernel has been installed, the system will be rebooted """ self.run('yum clean all') self.run('test -f /usr/bin/subscription-manager && subscription-manager repos --list-enabled', ignore_error=True) self.run('yum repolist') self.run('yum update -y --quiet', retry=3) # reboot if a new initrd has been generated since the boot if allow_reboot: self.run('grubby --set-default $(ls /boot/vmlinuz-*.x86_64|tail -1)') default_kernel = self.run('grubby --default-kernel')[0].rstrip() cur_kernel = self.run('uname -r')[0].rstrip() if cur_kernel not in default_kernel: self.run('reboot', ignore_error=True) self.ssh_pool.stop_all()
[ "def", "yum_update", "(", "self", ",", "allow_reboot", "=", "False", ")", ":", "self", ".", "run", "(", "'yum clean all'", ")", "self", ".", "run", "(", "'test -f /usr/bin/subscription-manager && subscription-manager repos --list-enabled'", ",", "ignore_error", "=", "True", ")", "self", ".", "run", "(", "'yum repolist'", ")", "self", ".", "run", "(", "'yum update -y --quiet'", ",", "retry", "=", "3", ")", "# reboot if a new initrd has been generated since the boot", "if", "allow_reboot", ":", "self", ".", "run", "(", "'grubby --set-default $(ls /boot/vmlinuz-*.x86_64|tail -1)'", ")", "default_kernel", "=", "self", ".", "run", "(", "'grubby --default-kernel'", ")", "[", "0", "]", ".", "rstrip", "(", ")", "cur_kernel", "=", "self", ".", "run", "(", "'uname -r'", ")", "[", "0", "]", ".", "rstrip", "(", ")", "if", "cur_kernel", "not", "in", "default_kernel", ":", "self", ".", "run", "(", "'reboot'", ",", "ignore_error", "=", "True", ")", "self", ".", "ssh_pool", ".", "stop_all", "(", ")" ]
Do a yum update on the system. :param allow_reboot: If True and if a new kernel has been installed, the system will be rebooted
[ "Do", "a", "yum", "update", "on", "the", "system", "." ]
bfa165538335edb1088170c7a92f097167225c81
https://github.com/redhat-openstack/python-tripleo-helper/blob/bfa165538335edb1088170c7a92f097167225c81/tripleohelper/server.py#L242-L260
train
JawboneHealth/jhhalchemy
jhhalchemy/model/time_order.py
get_by_range
def get_by_range(model_cls, *args, **kwargs): """ Get ordered list of models for the specified time range. The timestamp on the earliest model will likely occur before start_timestamp. This is to ensure that we return the models for the entire range. :param model_cls: the class of the model to return :param args: arguments specific to the model class :param kwargs: start_timestamp and end_timestamp (see below) as well as keyword args specific to the model class :keyword start_timestamp: the most recent models set before this and all after, defaults to 0 :keyword end_timestamp: only models set before (and including) this timestamp, defaults to now :return: model generator """ start_timestamp = kwargs.get('start_timestamp') end_timestamp = kwargs.get('end_timestamp') if (start_timestamp is not None) and (end_timestamp is not None) and (start_timestamp > end_timestamp): raise InvalidTimestampRange models = model_cls.read_time_range(*args, end_timestamp=end_timestamp).order_by(model_cls.time_order) # # start time -> Loop through until you find one set before or on start # if start_timestamp is not None: index = 0 for index, model in enumerate(models, start=1): if model.timestamp <= start_timestamp: break models = models[:index] return models
python
def get_by_range(model_cls, *args, **kwargs): """ Get ordered list of models for the specified time range. The timestamp on the earliest model will likely occur before start_timestamp. This is to ensure that we return the models for the entire range. :param model_cls: the class of the model to return :param args: arguments specific to the model class :param kwargs: start_timestamp and end_timestamp (see below) as well as keyword args specific to the model class :keyword start_timestamp: the most recent models set before this and all after, defaults to 0 :keyword end_timestamp: only models set before (and including) this timestamp, defaults to now :return: model generator """ start_timestamp = kwargs.get('start_timestamp') end_timestamp = kwargs.get('end_timestamp') if (start_timestamp is not None) and (end_timestamp is not None) and (start_timestamp > end_timestamp): raise InvalidTimestampRange models = model_cls.read_time_range(*args, end_timestamp=end_timestamp).order_by(model_cls.time_order) # # start time -> Loop through until you find one set before or on start # if start_timestamp is not None: index = 0 for index, model in enumerate(models, start=1): if model.timestamp <= start_timestamp: break models = models[:index] return models
[ "def", "get_by_range", "(", "model_cls", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "start_timestamp", "=", "kwargs", ".", "get", "(", "'start_timestamp'", ")", "end_timestamp", "=", "kwargs", ".", "get", "(", "'end_timestamp'", ")", "if", "(", "start_timestamp", "is", "not", "None", ")", "and", "(", "end_timestamp", "is", "not", "None", ")", "and", "(", "start_timestamp", ">", "end_timestamp", ")", ":", "raise", "InvalidTimestampRange", "models", "=", "model_cls", ".", "read_time_range", "(", "*", "args", ",", "end_timestamp", "=", "end_timestamp", ")", ".", "order_by", "(", "model_cls", ".", "time_order", ")", "#", "# start time -> Loop through until you find one set before or on start", "#", "if", "start_timestamp", "is", "not", "None", ":", "index", "=", "0", "for", "index", ",", "model", "in", "enumerate", "(", "models", ",", "start", "=", "1", ")", ":", "if", "model", ".", "timestamp", "<=", "start_timestamp", ":", "break", "models", "=", "models", "[", ":", "index", "]", "return", "models" ]
Get ordered list of models for the specified time range. The timestamp on the earliest model will likely occur before start_timestamp. This is to ensure that we return the models for the entire range. :param model_cls: the class of the model to return :param args: arguments specific to the model class :param kwargs: start_timestamp and end_timestamp (see below) as well as keyword args specific to the model class :keyword start_timestamp: the most recent models set before this and all after, defaults to 0 :keyword end_timestamp: only models set before (and including) this timestamp, defaults to now :return: model generator
[ "Get", "ordered", "list", "of", "models", "for", "the", "specified", "time", "range", ".", "The", "timestamp", "on", "the", "earliest", "model", "will", "likely", "occur", "before", "start_timestamp", ".", "This", "is", "to", "ensure", "that", "we", "return", "the", "models", "for", "the", "entire", "range", "." ]
ca0011d644e404561a142c9d7f0a8a569f1f4f27
https://github.com/JawboneHealth/jhhalchemy/blob/ca0011d644e404561a142c9d7f0a8a569f1f4f27/jhhalchemy/model/time_order.py#L69-L99
train
JawboneHealth/jhhalchemy
jhhalchemy/model/time_order.py
TimeOrderMixin.read_time_range
def read_time_range(cls, *args, **kwargs): """ Get all timezones set within a given time. Uses time_dsc_index SELECT * FROM <table> WHERE time_order <= -<start_timestamp> AND time_order >= -<end_timestamp> :param args: SQLAlchemy filter criteria, (e.g., uid == uid, type == 1) :param kwargs: start_timestamp and end_timestamp are the only kwargs, they specify the range (inclusive) :return: model generator """ criteria = list(args) start = kwargs.get('start_timestamp') end = kwargs.get('end_timestamp') if start is not None: criteria.append(cls.time_order <= -start) if end is not None: criteria.append(cls.time_order >= -end) return cls.read(*criteria)
python
def read_time_range(cls, *args, **kwargs): """ Get all timezones set within a given time. Uses time_dsc_index SELECT * FROM <table> WHERE time_order <= -<start_timestamp> AND time_order >= -<end_timestamp> :param args: SQLAlchemy filter criteria, (e.g., uid == uid, type == 1) :param kwargs: start_timestamp and end_timestamp are the only kwargs, they specify the range (inclusive) :return: model generator """ criteria = list(args) start = kwargs.get('start_timestamp') end = kwargs.get('end_timestamp') if start is not None: criteria.append(cls.time_order <= -start) if end is not None: criteria.append(cls.time_order >= -end) return cls.read(*criteria)
[ "def", "read_time_range", "(", "cls", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "criteria", "=", "list", "(", "args", ")", "start", "=", "kwargs", ".", "get", "(", "'start_timestamp'", ")", "end", "=", "kwargs", ".", "get", "(", "'end_timestamp'", ")", "if", "start", "is", "not", "None", ":", "criteria", ".", "append", "(", "cls", ".", "time_order", "<=", "-", "start", ")", "if", "end", "is", "not", "None", ":", "criteria", ".", "append", "(", "cls", ".", "time_order", ">=", "-", "end", ")", "return", "cls", ".", "read", "(", "*", "criteria", ")" ]
Get all timezones set within a given time. Uses time_dsc_index SELECT * FROM <table> WHERE time_order <= -<start_timestamp> AND time_order >= -<end_timestamp> :param args: SQLAlchemy filter criteria, (e.g., uid == uid, type == 1) :param kwargs: start_timestamp and end_timestamp are the only kwargs, they specify the range (inclusive) :return: model generator
[ "Get", "all", "timezones", "set", "within", "a", "given", "time", ".", "Uses", "time_dsc_index" ]
ca0011d644e404561a142c9d7f0a8a569f1f4f27
https://github.com/JawboneHealth/jhhalchemy/blob/ca0011d644e404561a142c9d7f0a8a569f1f4f27/jhhalchemy/model/time_order.py#L34-L54
train
geophysics-ubonn/crtomo_tools
lib/crtomo/parManager.py
ParMan.add_data
def add_data(self, data, metadata=None): """Add data to the parameter set Parameters ---------- data: numpy.ndarray one or more parameter sets. It must either be 1D or 2D, with the first dimension the number of parameter sets (K), and the second the number of elements (N): K x N metadata: object, optional the provided object will be stored in in the metadata dict and can be received with the ID that is returned. If multiple (K) datasets are added at ones, provide a list of objects with len K. Returns ------- int, ID ID which can be used to access the parameter set Examples -------- >>> # suppose that grid is a fully initialized grid oject with 100 # elements parman = ParMan(grid) # one_data_set = np.ones(100) cid = parman.add_data(one_data_set) print(parman.parsets[cid]) two_data_sets = np.ones((2, 100)) cids = parman.add_data(two_data_sets) print(cids) [0, ] [1, 2] """ subdata = np.atleast_2d(data) # we try to accommodate transposed input if subdata.shape[1] != self.grid.nr_of_elements: if subdata.shape[0] == self.grid.nr_of_elements: subdata = subdata.T else: raise Exception( 'Number of values does not match the number of ' + 'elements in the grid' ) # now make sure that metadata can be zipped with the subdata K = subdata.shape[0] if metadata is not None: if K > 1: if(not isinstance(metadata, (list, tuple)) or len(metadata) != K): raise Exception('metadata does not fit the provided data') else: # K == 1 metadata = [metadata, ] if metadata is None: metadata = [None for i in range(0, K)] return_ids = [] for dataset, meta in zip(subdata, metadata): cid = self._get_next_index() self.parsets[cid] = dataset self.metadata[cid] = meta return_ids.append(cid) if len(return_ids) == 1: return return_ids[0] else: return return_ids
python
def add_data(self, data, metadata=None): """Add data to the parameter set Parameters ---------- data: numpy.ndarray one or more parameter sets. It must either be 1D or 2D, with the first dimension the number of parameter sets (K), and the second the number of elements (N): K x N metadata: object, optional the provided object will be stored in in the metadata dict and can be received with the ID that is returned. If multiple (K) datasets are added at ones, provide a list of objects with len K. Returns ------- int, ID ID which can be used to access the parameter set Examples -------- >>> # suppose that grid is a fully initialized grid oject with 100 # elements parman = ParMan(grid) # one_data_set = np.ones(100) cid = parman.add_data(one_data_set) print(parman.parsets[cid]) two_data_sets = np.ones((2, 100)) cids = parman.add_data(two_data_sets) print(cids) [0, ] [1, 2] """ subdata = np.atleast_2d(data) # we try to accommodate transposed input if subdata.shape[1] != self.grid.nr_of_elements: if subdata.shape[0] == self.grid.nr_of_elements: subdata = subdata.T else: raise Exception( 'Number of values does not match the number of ' + 'elements in the grid' ) # now make sure that metadata can be zipped with the subdata K = subdata.shape[0] if metadata is not None: if K > 1: if(not isinstance(metadata, (list, tuple)) or len(metadata) != K): raise Exception('metadata does not fit the provided data') else: # K == 1 metadata = [metadata, ] if metadata is None: metadata = [None for i in range(0, K)] return_ids = [] for dataset, meta in zip(subdata, metadata): cid = self._get_next_index() self.parsets[cid] = dataset self.metadata[cid] = meta return_ids.append(cid) if len(return_ids) == 1: return return_ids[0] else: return return_ids
[ "def", "add_data", "(", "self", ",", "data", ",", "metadata", "=", "None", ")", ":", "subdata", "=", "np", ".", "atleast_2d", "(", "data", ")", "# we try to accommodate transposed input", "if", "subdata", ".", "shape", "[", "1", "]", "!=", "self", ".", "grid", ".", "nr_of_elements", ":", "if", "subdata", ".", "shape", "[", "0", "]", "==", "self", ".", "grid", ".", "nr_of_elements", ":", "subdata", "=", "subdata", ".", "T", "else", ":", "raise", "Exception", "(", "'Number of values does not match the number of '", "+", "'elements in the grid'", ")", "# now make sure that metadata can be zipped with the subdata", "K", "=", "subdata", ".", "shape", "[", "0", "]", "if", "metadata", "is", "not", "None", ":", "if", "K", ">", "1", ":", "if", "(", "not", "isinstance", "(", "metadata", ",", "(", "list", ",", "tuple", ")", ")", "or", "len", "(", "metadata", ")", "!=", "K", ")", ":", "raise", "Exception", "(", "'metadata does not fit the provided data'", ")", "else", ":", "# K == 1", "metadata", "=", "[", "metadata", ",", "]", "if", "metadata", "is", "None", ":", "metadata", "=", "[", "None", "for", "i", "in", "range", "(", "0", ",", "K", ")", "]", "return_ids", "=", "[", "]", "for", "dataset", ",", "meta", "in", "zip", "(", "subdata", ",", "metadata", ")", ":", "cid", "=", "self", ".", "_get_next_index", "(", ")", "self", ".", "parsets", "[", "cid", "]", "=", "dataset", "self", ".", "metadata", "[", "cid", "]", "=", "meta", "return_ids", ".", "append", "(", "cid", ")", "if", "len", "(", "return_ids", ")", "==", "1", ":", "return", "return_ids", "[", "0", "]", "else", ":", "return", "return_ids" ]
Add data to the parameter set Parameters ---------- data: numpy.ndarray one or more parameter sets. It must either be 1D or 2D, with the first dimension the number of parameter sets (K), and the second the number of elements (N): K x N metadata: object, optional the provided object will be stored in in the metadata dict and can be received with the ID that is returned. If multiple (K) datasets are added at ones, provide a list of objects with len K. Returns ------- int, ID ID which can be used to access the parameter set Examples -------- >>> # suppose that grid is a fully initialized grid oject with 100 # elements parman = ParMan(grid) # one_data_set = np.ones(100) cid = parman.add_data(one_data_set) print(parman.parsets[cid]) two_data_sets = np.ones((2, 100)) cids = parman.add_data(two_data_sets) print(cids) [0, ] [1, 2]
[ "Add", "data", "to", "the", "parameter", "set" ]
27c3e21a557f8df1c12455b96c4c2e00e08a5b4a
https://github.com/geophysics-ubonn/crtomo_tools/blob/27c3e21a557f8df1c12455b96c4c2e00e08a5b4a/lib/crtomo/parManager.py#L40-L112
train
geophysics-ubonn/crtomo_tools
lib/crtomo/parManager.py
ParMan.load_model_from_file
def load_model_from_file(self, filename): """Load one parameter set from a file which contains one value per line No row is skipped. Parameters ---------- filename : string, file path Filename to loaded data from Returns ------- pid : int ID of parameter set """ assert os.path.isfile(filename) data = np.loadtxt(filename).squeeze() assert len(data.shape) == 1 pid = self.add_data(data) return pid
python
def load_model_from_file(self, filename): """Load one parameter set from a file which contains one value per line No row is skipped. Parameters ---------- filename : string, file path Filename to loaded data from Returns ------- pid : int ID of parameter set """ assert os.path.isfile(filename) data = np.loadtxt(filename).squeeze() assert len(data.shape) == 1 pid = self.add_data(data) return pid
[ "def", "load_model_from_file", "(", "self", ",", "filename", ")", ":", "assert", "os", ".", "path", ".", "isfile", "(", "filename", ")", "data", "=", "np", ".", "loadtxt", "(", "filename", ")", ".", "squeeze", "(", ")", "assert", "len", "(", "data", ".", "shape", ")", "==", "1", "pid", "=", "self", ".", "add_data", "(", "data", ")", "return", "pid" ]
Load one parameter set from a file which contains one value per line No row is skipped. Parameters ---------- filename : string, file path Filename to loaded data from Returns ------- pid : int ID of parameter set
[ "Load", "one", "parameter", "set", "from", "a", "file", "which", "contains", "one", "value", "per", "line" ]
27c3e21a557f8df1c12455b96c4c2e00e08a5b4a
https://github.com/geophysics-ubonn/crtomo_tools/blob/27c3e21a557f8df1c12455b96c4c2e00e08a5b4a/lib/crtomo/parManager.py#L171-L190
train
geophysics-ubonn/crtomo_tools
lib/crtomo/parManager.py
ParMan.load_from_sens_file
def load_from_sens_file(self, filename): """Load real and imaginary parts from a sens.dat file generated by CRMod Parameters ---------- filename: string filename of sensitivity file Returns ------- nid_re: int ID of real part of sensitivities nid_im: int ID of imaginary part of sensitivities """ sens_data = np.loadtxt(filename, skiprows=1) nid_re = self.add_data(sens_data[:, 2]) nid_im = self.add_data(sens_data[:, 3]) return nid_re, nid_im
python
def load_from_sens_file(self, filename): """Load real and imaginary parts from a sens.dat file generated by CRMod Parameters ---------- filename: string filename of sensitivity file Returns ------- nid_re: int ID of real part of sensitivities nid_im: int ID of imaginary part of sensitivities """ sens_data = np.loadtxt(filename, skiprows=1) nid_re = self.add_data(sens_data[:, 2]) nid_im = self.add_data(sens_data[:, 3]) return nid_re, nid_im
[ "def", "load_from_sens_file", "(", "self", ",", "filename", ")", ":", "sens_data", "=", "np", ".", "loadtxt", "(", "filename", ",", "skiprows", "=", "1", ")", "nid_re", "=", "self", ".", "add_data", "(", "sens_data", "[", ":", ",", "2", "]", ")", "nid_im", "=", "self", ".", "add_data", "(", "sens_data", "[", ":", ",", "3", "]", ")", "return", "nid_re", ",", "nid_im" ]
Load real and imaginary parts from a sens.dat file generated by CRMod Parameters ---------- filename: string filename of sensitivity file Returns ------- nid_re: int ID of real part of sensitivities nid_im: int ID of imaginary part of sensitivities
[ "Load", "real", "and", "imaginary", "parts", "from", "a", "sens", ".", "dat", "file", "generated", "by", "CRMod" ]
27c3e21a557f8df1c12455b96c4c2e00e08a5b4a
https://github.com/geophysics-ubonn/crtomo_tools/blob/27c3e21a557f8df1c12455b96c4c2e00e08a5b4a/lib/crtomo/parManager.py#L192-L211
train
geophysics-ubonn/crtomo_tools
lib/crtomo/parManager.py
ParMan.save_to_rho_file
def save_to_rho_file(self, filename, cid_mag, cid_pha=None): """Save one or two parameter sets in the rho.dat forward model format Parameters ---------- filename: string (file path) output filename cid_mag: int ID of magnitude parameter set cid_pha: int, optional ID of phase parameter set. If not set, will be set to zeros. """ mag_data = self.parsets[cid_mag] if cid_pha is None: pha_data = np.zeros(mag_data.shape) else: pha_data = self.parsets[cid_pha] with open(filename, 'wb') as fid: fid.write( bytes( '{0}\n'.format(self.grid.nr_of_elements), 'utf-8', ) ) np.savetxt( fid, np.vstack(( mag_data, pha_data, )).T, fmt='%f %f' )
python
def save_to_rho_file(self, filename, cid_mag, cid_pha=None): """Save one or two parameter sets in the rho.dat forward model format Parameters ---------- filename: string (file path) output filename cid_mag: int ID of magnitude parameter set cid_pha: int, optional ID of phase parameter set. If not set, will be set to zeros. """ mag_data = self.parsets[cid_mag] if cid_pha is None: pha_data = np.zeros(mag_data.shape) else: pha_data = self.parsets[cid_pha] with open(filename, 'wb') as fid: fid.write( bytes( '{0}\n'.format(self.grid.nr_of_elements), 'utf-8', ) ) np.savetxt( fid, np.vstack(( mag_data, pha_data, )).T, fmt='%f %f' )
[ "def", "save_to_rho_file", "(", "self", ",", "filename", ",", "cid_mag", ",", "cid_pha", "=", "None", ")", ":", "mag_data", "=", "self", ".", "parsets", "[", "cid_mag", "]", "if", "cid_pha", "is", "None", ":", "pha_data", "=", "np", ".", "zeros", "(", "mag_data", ".", "shape", ")", "else", ":", "pha_data", "=", "self", ".", "parsets", "[", "cid_pha", "]", "with", "open", "(", "filename", ",", "'wb'", ")", "as", "fid", ":", "fid", ".", "write", "(", "bytes", "(", "'{0}\\n'", ".", "format", "(", "self", ".", "grid", ".", "nr_of_elements", ")", ",", "'utf-8'", ",", ")", ")", "np", ".", "savetxt", "(", "fid", ",", "np", ".", "vstack", "(", "(", "mag_data", ",", "pha_data", ",", ")", ")", ".", "T", ",", "fmt", "=", "'%f %f'", ")" ]
Save one or two parameter sets in the rho.dat forward model format Parameters ---------- filename: string (file path) output filename cid_mag: int ID of magnitude parameter set cid_pha: int, optional ID of phase parameter set. If not set, will be set to zeros.
[ "Save", "one", "or", "two", "parameter", "sets", "in", "the", "rho", ".", "dat", "forward", "model", "format" ]
27c3e21a557f8df1c12455b96c4c2e00e08a5b4a
https://github.com/geophysics-ubonn/crtomo_tools/blob/27c3e21a557f8df1c12455b96c4c2e00e08a5b4a/lib/crtomo/parManager.py#L213-L246
train
geophysics-ubonn/crtomo_tools
lib/crtomo/parManager.py
ParMan._clean_pid
def _clean_pid(self, pid): """if pid is a number, don't do anything. If pid is a list with one entry, strip the list and return the number. If pid contains more than one entries, do nothing. """ if isinstance(pid, (list, tuple)): if len(pid) == 1: return pid[0] else: return pid return pid
python
def _clean_pid(self, pid): """if pid is a number, don't do anything. If pid is a list with one entry, strip the list and return the number. If pid contains more than one entries, do nothing. """ if isinstance(pid, (list, tuple)): if len(pid) == 1: return pid[0] else: return pid return pid
[ "def", "_clean_pid", "(", "self", ",", "pid", ")", ":", "if", "isinstance", "(", "pid", ",", "(", "list", ",", "tuple", ")", ")", ":", "if", "len", "(", "pid", ")", "==", "1", ":", "return", "pid", "[", "0", "]", "else", ":", "return", "pid", "return", "pid" ]
if pid is a number, don't do anything. If pid is a list with one entry, strip the list and return the number. If pid contains more than one entries, do nothing.
[ "if", "pid", "is", "a", "number", "don", "t", "do", "anything", ".", "If", "pid", "is", "a", "list", "with", "one", "entry", "strip", "the", "list", "and", "return", "the", "number", ".", "If", "pid", "contains", "more", "than", "one", "entries", "do", "nothing", "." ]
27c3e21a557f8df1c12455b96c4c2e00e08a5b4a
https://github.com/geophysics-ubonn/crtomo_tools/blob/27c3e21a557f8df1c12455b96c4c2e00e08a5b4a/lib/crtomo/parManager.py#L261-L271
train
geophysics-ubonn/crtomo_tools
lib/crtomo/parManager.py
ParMan.modify_area
def modify_area(self, pid, xmin, xmax, zmin, zmax, value): """Modify the given dataset in the rectangular area given by the parameters and assign all parameters inside this area the given value. Partially contained elements are treated as INSIDE the area, i.e., they are assigned new values. Parameters ---------- pid: int id of the parameter set to modify xmin: float smallest x value of the area to modify xmax: float largest x value of the area to modify zmin: float smallest z value of the area to modify zmin: float largest z value of the area to modify value: float this value is assigned to all parameters of the area Examples -------- >>> import crtomo.tdManager as CRtdm tdman = CRtdm.tdMan( elem_file='GRID/elem.dat', elec_file='GRID/elec.dat', ) pid = tdman.parman.add_empty_dataset(value=1) tdman.parman.modify_area( pid, xmin=0, xmax=2, zmin=-2, zmin=-0.5, value=2, ) fig, ax = tdman.plot.plot_elements_to_ax(pid) fig.savefig('out.png') """ area_polygon = shapgeo.Polygon( ((xmin, zmax), (xmax, zmax), (xmax, zmin), (xmin, zmin)) ) self.modify_polygon(pid, area_polygon, value)
python
def modify_area(self, pid, xmin, xmax, zmin, zmax, value): """Modify the given dataset in the rectangular area given by the parameters and assign all parameters inside this area the given value. Partially contained elements are treated as INSIDE the area, i.e., they are assigned new values. Parameters ---------- pid: int id of the parameter set to modify xmin: float smallest x value of the area to modify xmax: float largest x value of the area to modify zmin: float smallest z value of the area to modify zmin: float largest z value of the area to modify value: float this value is assigned to all parameters of the area Examples -------- >>> import crtomo.tdManager as CRtdm tdman = CRtdm.tdMan( elem_file='GRID/elem.dat', elec_file='GRID/elec.dat', ) pid = tdman.parman.add_empty_dataset(value=1) tdman.parman.modify_area( pid, xmin=0, xmax=2, zmin=-2, zmin=-0.5, value=2, ) fig, ax = tdman.plot.plot_elements_to_ax(pid) fig.savefig('out.png') """ area_polygon = shapgeo.Polygon( ((xmin, zmax), (xmax, zmax), (xmax, zmin), (xmin, zmin)) ) self.modify_polygon(pid, area_polygon, value)
[ "def", "modify_area", "(", "self", ",", "pid", ",", "xmin", ",", "xmax", ",", "zmin", ",", "zmax", ",", "value", ")", ":", "area_polygon", "=", "shapgeo", ".", "Polygon", "(", "(", "(", "xmin", ",", "zmax", ")", ",", "(", "xmax", ",", "zmax", ")", ",", "(", "xmax", ",", "zmin", ")", ",", "(", "xmin", ",", "zmin", ")", ")", ")", "self", ".", "modify_polygon", "(", "pid", ",", "area_polygon", ",", "value", ")" ]
Modify the given dataset in the rectangular area given by the parameters and assign all parameters inside this area the given value. Partially contained elements are treated as INSIDE the area, i.e., they are assigned new values. Parameters ---------- pid: int id of the parameter set to modify xmin: float smallest x value of the area to modify xmax: float largest x value of the area to modify zmin: float smallest z value of the area to modify zmin: float largest z value of the area to modify value: float this value is assigned to all parameters of the area Examples -------- >>> import crtomo.tdManager as CRtdm tdman = CRtdm.tdMan( elem_file='GRID/elem.dat', elec_file='GRID/elec.dat', ) pid = tdman.parman.add_empty_dataset(value=1) tdman.parman.modify_area( pid, xmin=0, xmax=2, zmin=-2, zmin=-0.5, value=2, ) fig, ax = tdman.plot.plot_elements_to_ax(pid) fig.savefig('out.png')
[ "Modify", "the", "given", "dataset", "in", "the", "rectangular", "area", "given", "by", "the", "parameters", "and", "assign", "all", "parameters", "inside", "this", "area", "the", "given", "value", "." ]
27c3e21a557f8df1c12455b96c4c2e00e08a5b4a
https://github.com/geophysics-ubonn/crtomo_tools/blob/27c3e21a557f8df1c12455b96c4c2e00e08a5b4a/lib/crtomo/parManager.py#L273-L319
train
geophysics-ubonn/crtomo_tools
lib/crtomo/parManager.py
ParMan.extract_points
def extract_points(self, pid, points): """Extract values at certain points in the grid from a given parameter set. Cells are selected by interpolating the centroids of the cells towards the line using a "nearest" scheme. Note that data is only returned for the points provided. If you want to extract multiple data points along a line, defined by start and end point, use the **extract_along_line** function. Parameters ---------- pid: int The parameter id to extract values from points: Nx2 numpy.ndarray (x, y) pairs Returns ------- values: numpy.ndarray (n x 1) data values for extracted data points """ xy = self.grid.get_element_centroids() data = self.parsets[pid] iobj = spi.NearestNDInterpolator(xy, data) values = iobj(points) return values
python
def extract_points(self, pid, points): """Extract values at certain points in the grid from a given parameter set. Cells are selected by interpolating the centroids of the cells towards the line using a "nearest" scheme. Note that data is only returned for the points provided. If you want to extract multiple data points along a line, defined by start and end point, use the **extract_along_line** function. Parameters ---------- pid: int The parameter id to extract values from points: Nx2 numpy.ndarray (x, y) pairs Returns ------- values: numpy.ndarray (n x 1) data values for extracted data points """ xy = self.grid.get_element_centroids() data = self.parsets[pid] iobj = spi.NearestNDInterpolator(xy, data) values = iobj(points) return values
[ "def", "extract_points", "(", "self", ",", "pid", ",", "points", ")", ":", "xy", "=", "self", ".", "grid", ".", "get_element_centroids", "(", ")", "data", "=", "self", ".", "parsets", "[", "pid", "]", "iobj", "=", "spi", ".", "NearestNDInterpolator", "(", "xy", ",", "data", ")", "values", "=", "iobj", "(", "points", ")", "return", "values" ]
Extract values at certain points in the grid from a given parameter set. Cells are selected by interpolating the centroids of the cells towards the line using a "nearest" scheme. Note that data is only returned for the points provided. If you want to extract multiple data points along a line, defined by start and end point, use the **extract_along_line** function. Parameters ---------- pid: int The parameter id to extract values from points: Nx2 numpy.ndarray (x, y) pairs Returns ------- values: numpy.ndarray (n x 1) data values for extracted data points
[ "Extract", "values", "at", "certain", "points", "in", "the", "grid", "from", "a", "given", "parameter", "set", ".", "Cells", "are", "selected", "by", "interpolating", "the", "centroids", "of", "the", "cells", "towards", "the", "line", "using", "a", "nearest", "scheme", "." ]
27c3e21a557f8df1c12455b96c4c2e00e08a5b4a
https://github.com/geophysics-ubonn/crtomo_tools/blob/27c3e21a557f8df1c12455b96c4c2e00e08a5b4a/lib/crtomo/parManager.py#L373-L399
train
geophysics-ubonn/crtomo_tools
lib/crtomo/parManager.py
ParMan.extract_along_line
def extract_along_line(self, pid, xy0, xy1, N=10): """Extract parameter values along a given line. Parameters ---------- pid: int The parameter id to extract values from xy0: tuple A tupe with (x,y) start point coordinates xy1: tuple A tupe with (x,y) end point coordinates N: integer, optional The number of values to extract along the line (including start and end point) Returns ------- values: numpy.ndarray (n x 1) data values for extracted data points """ assert N >= 2 xy0 = np.array(xy0).squeeze() xy1 = np.array(xy1).squeeze() assert xy0.size == 2 assert xy1.size == 2 # compute points points = [(x, y) for x, y in zip( np.linspace(xy0[0], xy1[0], N), np.linspace(xy0[1], xy1[1], N) )] result = self.extract_points(pid, points) results_xyv = np.hstack(( points, result[:, np.newaxis] )) return results_xyv
python
def extract_along_line(self, pid, xy0, xy1, N=10): """Extract parameter values along a given line. Parameters ---------- pid: int The parameter id to extract values from xy0: tuple A tupe with (x,y) start point coordinates xy1: tuple A tupe with (x,y) end point coordinates N: integer, optional The number of values to extract along the line (including start and end point) Returns ------- values: numpy.ndarray (n x 1) data values for extracted data points """ assert N >= 2 xy0 = np.array(xy0).squeeze() xy1 = np.array(xy1).squeeze() assert xy0.size == 2 assert xy1.size == 2 # compute points points = [(x, y) for x, y in zip( np.linspace(xy0[0], xy1[0], N), np.linspace(xy0[1], xy1[1], N) )] result = self.extract_points(pid, points) results_xyv = np.hstack(( points, result[:, np.newaxis] )) return results_xyv
[ "def", "extract_along_line", "(", "self", ",", "pid", ",", "xy0", ",", "xy1", ",", "N", "=", "10", ")", ":", "assert", "N", ">=", "2", "xy0", "=", "np", ".", "array", "(", "xy0", ")", ".", "squeeze", "(", ")", "xy1", "=", "np", ".", "array", "(", "xy1", ")", ".", "squeeze", "(", ")", "assert", "xy0", ".", "size", "==", "2", "assert", "xy1", ".", "size", "==", "2", "# compute points", "points", "=", "[", "(", "x", ",", "y", ")", "for", "x", ",", "y", "in", "zip", "(", "np", ".", "linspace", "(", "xy0", "[", "0", "]", ",", "xy1", "[", "0", "]", ",", "N", ")", ",", "np", ".", "linspace", "(", "xy0", "[", "1", "]", ",", "xy1", "[", "1", "]", ",", "N", ")", ")", "]", "result", "=", "self", ".", "extract_points", "(", "pid", ",", "points", ")", "results_xyv", "=", "np", ".", "hstack", "(", "(", "points", ",", "result", "[", ":", ",", "np", ".", "newaxis", "]", ")", ")", "return", "results_xyv" ]
Extract parameter values along a given line. Parameters ---------- pid: int The parameter id to extract values from xy0: tuple A tupe with (x,y) start point coordinates xy1: tuple A tupe with (x,y) end point coordinates N: integer, optional The number of values to extract along the line (including start and end point) Returns ------- values: numpy.ndarray (n x 1) data values for extracted data points
[ "Extract", "parameter", "values", "along", "a", "given", "line", "." ]
27c3e21a557f8df1c12455b96c4c2e00e08a5b4a
https://github.com/geophysics-ubonn/crtomo_tools/blob/27c3e21a557f8df1c12455b96c4c2e00e08a5b4a/lib/crtomo/parManager.py#L401-L437
train
geophysics-ubonn/crtomo_tools
lib/crtomo/parManager.py
ParMan.extract_polygon_area
def extract_polygon_area(self, pid, polygon_points): """Extract all data points whose element centroid lies within the given polygon. Parameters ---------- Returns ------- """ polygon = shapgeo.Polygon(polygon_points) xy = self.grid.get_element_centroids() in_poly = [] for nr, point in enumerate(xy): if shapgeo.Point(point).within(polygon): in_poly.append(nr) values = self.parsets[pid][in_poly] return np.array(in_poly), values
python
def extract_polygon_area(self, pid, polygon_points): """Extract all data points whose element centroid lies within the given polygon. Parameters ---------- Returns ------- """ polygon = shapgeo.Polygon(polygon_points) xy = self.grid.get_element_centroids() in_poly = [] for nr, point in enumerate(xy): if shapgeo.Point(point).within(polygon): in_poly.append(nr) values = self.parsets[pid][in_poly] return np.array(in_poly), values
[ "def", "extract_polygon_area", "(", "self", ",", "pid", ",", "polygon_points", ")", ":", "polygon", "=", "shapgeo", ".", "Polygon", "(", "polygon_points", ")", "xy", "=", "self", ".", "grid", ".", "get_element_centroids", "(", ")", "in_poly", "=", "[", "]", "for", "nr", ",", "point", "in", "enumerate", "(", "xy", ")", ":", "if", "shapgeo", ".", "Point", "(", "point", ")", ".", "within", "(", "polygon", ")", ":", "in_poly", ".", "append", "(", "nr", ")", "values", "=", "self", ".", "parsets", "[", "pid", "]", "[", "in_poly", "]", "return", "np", ".", "array", "(", "in_poly", ")", ",", "values" ]
Extract all data points whose element centroid lies within the given polygon. Parameters ---------- Returns -------
[ "Extract", "all", "data", "points", "whose", "element", "centroid", "lies", "within", "the", "given", "polygon", "." ]
27c3e21a557f8df1c12455b96c4c2e00e08a5b4a
https://github.com/geophysics-ubonn/crtomo_tools/blob/27c3e21a557f8df1c12455b96c4c2e00e08a5b4a/lib/crtomo/parManager.py#L439-L457
train
geophysics-ubonn/crtomo_tools
src/grid_homogenize.py
rotate_point
def rotate_point(xorigin, yorigin, x, y, angle): """Rotate the given point by angle """ rotx = (x - xorigin) * np.cos(angle) - (y - yorigin) * np.sin(angle) roty = (x - yorigin) * np.sin(angle) + (y - yorigin) * np.cos(angle) return rotx, roty
python
def rotate_point(xorigin, yorigin, x, y, angle): """Rotate the given point by angle """ rotx = (x - xorigin) * np.cos(angle) - (y - yorigin) * np.sin(angle) roty = (x - yorigin) * np.sin(angle) + (y - yorigin) * np.cos(angle) return rotx, roty
[ "def", "rotate_point", "(", "xorigin", ",", "yorigin", ",", "x", ",", "y", ",", "angle", ")", ":", "rotx", "=", "(", "x", "-", "xorigin", ")", "*", "np", ".", "cos", "(", "angle", ")", "-", "(", "y", "-", "yorigin", ")", "*", "np", ".", "sin", "(", "angle", ")", "roty", "=", "(", "x", "-", "yorigin", ")", "*", "np", ".", "sin", "(", "angle", ")", "+", "(", "y", "-", "yorigin", ")", "*", "np", ".", "cos", "(", "angle", ")", "return", "rotx", ",", "roty" ]
Rotate the given point by angle
[ "Rotate", "the", "given", "point", "by", "angle" ]
27c3e21a557f8df1c12455b96c4c2e00e08a5b4a
https://github.com/geophysics-ubonn/crtomo_tools/blob/27c3e21a557f8df1c12455b96c4c2e00e08a5b4a/src/grid_homogenize.py#L96-L101
train
geophysics-ubonn/crtomo_tools
src/cr_get_modelling_errors.py
get_R_mod
def get_R_mod(options, rho0): """Compute synthetic measurements over a homogeneous half-space """ tomodir = tdManager.tdMan( elem_file=options.elem_file, elec_file=options.elec_file, config_file=options.config_file, ) # set model tomodir.add_homogeneous_model(magnitude=rho0) # only interested in magnitudes Z = tomodir.measurements()[:, 0] return Z
python
def get_R_mod(options, rho0): """Compute synthetic measurements over a homogeneous half-space """ tomodir = tdManager.tdMan( elem_file=options.elem_file, elec_file=options.elec_file, config_file=options.config_file, ) # set model tomodir.add_homogeneous_model(magnitude=rho0) # only interested in magnitudes Z = tomodir.measurements()[:, 0] return Z
[ "def", "get_R_mod", "(", "options", ",", "rho0", ")", ":", "tomodir", "=", "tdManager", ".", "tdMan", "(", "elem_file", "=", "options", ".", "elem_file", ",", "elec_file", "=", "options", ".", "elec_file", ",", "config_file", "=", "options", ".", "config_file", ",", ")", "# set model", "tomodir", ".", "add_homogeneous_model", "(", "magnitude", "=", "rho0", ")", "# only interested in magnitudes", "Z", "=", "tomodir", ".", "measurements", "(", ")", "[", ":", ",", "0", "]", "return", "Z" ]
Compute synthetic measurements over a homogeneous half-space
[ "Compute", "synthetic", "measurements", "over", "a", "homogeneous", "half", "-", "space" ]
27c3e21a557f8df1c12455b96c4c2e00e08a5b4a
https://github.com/geophysics-ubonn/crtomo_tools/blob/27c3e21a557f8df1c12455b96c4c2e00e08a5b4a/src/cr_get_modelling_errors.py#L110-L125
train
rhayes777/PyAutoFit
autofit/tools/path_util.py
make_and_return_path_from_path_and_folder_names
def make_and_return_path_from_path_and_folder_names(path, folder_names): """ For a given path, create a directory structure composed of a set of folders and return the path to the \ inner-most folder. For example, if path='/path/to/folders', and folder_names=['folder1', 'folder2'], the directory created will be '/path/to/folders/folder1/folder2/' and the returned path will be '/path/to/folders/folder1/folder2/'. If the folders already exist, routine continues as normal. Parameters ---------- path : str The path where the directories are created. folder_names : [str] The names of the folders which are created in the path directory. Returns ------- path A string specifying the path to the inner-most folder created. Examples -------- path = '/path/to/folders' path = make_and_return_path(path=path, folder_names=['folder1', 'folder2']. """ for folder_name in folder_names: path += folder_name + '/' try: os.makedirs(path) except FileExistsError: pass return path
python
def make_and_return_path_from_path_and_folder_names(path, folder_names): """ For a given path, create a directory structure composed of a set of folders and return the path to the \ inner-most folder. For example, if path='/path/to/folders', and folder_names=['folder1', 'folder2'], the directory created will be '/path/to/folders/folder1/folder2/' and the returned path will be '/path/to/folders/folder1/folder2/'. If the folders already exist, routine continues as normal. Parameters ---------- path : str The path where the directories are created. folder_names : [str] The names of the folders which are created in the path directory. Returns ------- path A string specifying the path to the inner-most folder created. Examples -------- path = '/path/to/folders' path = make_and_return_path(path=path, folder_names=['folder1', 'folder2']. """ for folder_name in folder_names: path += folder_name + '/' try: os.makedirs(path) except FileExistsError: pass return path
[ "def", "make_and_return_path_from_path_and_folder_names", "(", "path", ",", "folder_names", ")", ":", "for", "folder_name", "in", "folder_names", ":", "path", "+=", "folder_name", "+", "'/'", "try", ":", "os", ".", "makedirs", "(", "path", ")", "except", "FileExistsError", ":", "pass", "return", "path" ]
For a given path, create a directory structure composed of a set of folders and return the path to the \ inner-most folder. For example, if path='/path/to/folders', and folder_names=['folder1', 'folder2'], the directory created will be '/path/to/folders/folder1/folder2/' and the returned path will be '/path/to/folders/folder1/folder2/'. If the folders already exist, routine continues as normal. Parameters ---------- path : str The path where the directories are created. folder_names : [str] The names of the folders which are created in the path directory. Returns ------- path A string specifying the path to the inner-most folder created. Examples -------- path = '/path/to/folders' path = make_and_return_path(path=path, folder_names=['folder1', 'folder2'].
[ "For", "a", "given", "path", "create", "a", "directory", "structure", "composed", "of", "a", "set", "of", "folders", "and", "return", "the", "path", "to", "the", "\\", "inner", "-", "most", "folder", "." ]
a9e6144abb08edfc6a6906c4030d7119bf8d3e14
https://github.com/rhayes777/PyAutoFit/blob/a9e6144abb08edfc6a6906c4030d7119bf8d3e14/autofit/tools/path_util.py#L42-L77
train
redhat-openstack/python-tripleo-helper
tripleohelper/ovb_bmc.py
OvbBmc.register_host
def register_host(self, bm_instance): """Register an existing nova VM. A new interface will be attached to the BMC host with a new IP. An openstackbmc service will be binded to this IP. Once the VM has been registered, it is possible to use IPMI on this IP to start or stop the virtual machine. """ bmc_ip = '10.130.%d.100' % (self._bmc_range_start + self._nic_cpt) bmc_net = '10.130.%d.0' % (self._bmc_range_start + self._nic_cpt) bmc_gw = '10.130.%d.1' % (self._bmc_range_start + self._nic_cpt) device = 'eth%d' % (2 + self._nic_cpt) body_create_subnet = { 'subnets': [{ 'name': 'bmc_' + device, 'cidr': bmc_net + '/24', 'ip_version': 4, 'network_id': self._bmc_net['id']}]} subnet_id = self.neutron.create_subnet(body=body_create_subnet)['subnets'][0]['id'] self.attach_subnet_to_router(subnet_id) self.os_instance.interface_attach(None, self._bmc_net['id'], bmc_ip) content = """ DEVICE="{device}" BOOTPROTO=static IPADDR={bmc_ip} NETMASK=255.255.255.0 ONBOOT=yes """ self.create_file( '/etc/sysconfig/network-scripts/ifcfg-%s' % device, content=content.format(device=device, bmc_ip=bmc_ip, bmc_gw=bmc_gw)) content = """ 192.0.2.0/24 via {bmc_gw} """ self.create_file( '/etc/sysconfig/network-scripts/route-%s' % device, content=content.format(bmc_gw=bmc_gw)) self.run('ifup %s' % device) # Ensure the outgoing traffic go through the correct NIC to avoid spoofing # protection # TODO(Gonéri): This should be persistant. self.run('ip rule add from %s table %d' % (bmc_ip, self._nic_cpt + 2)) self.run('ip route add default via %s dev %s table %d' % (bmc_gw, device, self._nic_cpt + 2)) content = """ [Unit] Description=openstack-bmc {bm_instance} Service [Service] ExecStart=/usr/local/bin/openstackbmc --os-user {os_username} --os-password {os_password} --os-project-id {os_project_id} --os-auth-url {os_auth_url} --instance {bm_instance} --address {bmc_ip} User=root StandardOutput=kmsg+console StandardError=inherit Restart=always [Install] WantedBy=multi-user.target """ unit = 'openstack-bmc-%d.service' % self._nic_cpt self.create_file( '/usr/lib/systemd/system/%s' % unit, content.format( os_username=self.os_username, os_password=protect_password(self.os_password), os_project_id=self.os_project_id, os_auth_url=self.os_auth_url, bm_instance=bm_instance, bmc_ip=bmc_ip)) self.run('systemctl enable %s' % unit) self.run('systemctl start %s' % unit) self._nic_cpt += 1 return bmc_ip
python
def register_host(self, bm_instance): """Register an existing nova VM. A new interface will be attached to the BMC host with a new IP. An openstackbmc service will be binded to this IP. Once the VM has been registered, it is possible to use IPMI on this IP to start or stop the virtual machine. """ bmc_ip = '10.130.%d.100' % (self._bmc_range_start + self._nic_cpt) bmc_net = '10.130.%d.0' % (self._bmc_range_start + self._nic_cpt) bmc_gw = '10.130.%d.1' % (self._bmc_range_start + self._nic_cpt) device = 'eth%d' % (2 + self._nic_cpt) body_create_subnet = { 'subnets': [{ 'name': 'bmc_' + device, 'cidr': bmc_net + '/24', 'ip_version': 4, 'network_id': self._bmc_net['id']}]} subnet_id = self.neutron.create_subnet(body=body_create_subnet)['subnets'][0]['id'] self.attach_subnet_to_router(subnet_id) self.os_instance.interface_attach(None, self._bmc_net['id'], bmc_ip) content = """ DEVICE="{device}" BOOTPROTO=static IPADDR={bmc_ip} NETMASK=255.255.255.0 ONBOOT=yes """ self.create_file( '/etc/sysconfig/network-scripts/ifcfg-%s' % device, content=content.format(device=device, bmc_ip=bmc_ip, bmc_gw=bmc_gw)) content = """ 192.0.2.0/24 via {bmc_gw} """ self.create_file( '/etc/sysconfig/network-scripts/route-%s' % device, content=content.format(bmc_gw=bmc_gw)) self.run('ifup %s' % device) # Ensure the outgoing traffic go through the correct NIC to avoid spoofing # protection # TODO(Gonéri): This should be persistant. self.run('ip rule add from %s table %d' % (bmc_ip, self._nic_cpt + 2)) self.run('ip route add default via %s dev %s table %d' % (bmc_gw, device, self._nic_cpt + 2)) content = """ [Unit] Description=openstack-bmc {bm_instance} Service [Service] ExecStart=/usr/local/bin/openstackbmc --os-user {os_username} --os-password {os_password} --os-project-id {os_project_id} --os-auth-url {os_auth_url} --instance {bm_instance} --address {bmc_ip} User=root StandardOutput=kmsg+console StandardError=inherit Restart=always [Install] WantedBy=multi-user.target """ unit = 'openstack-bmc-%d.service' % self._nic_cpt self.create_file( '/usr/lib/systemd/system/%s' % unit, content.format( os_username=self.os_username, os_password=protect_password(self.os_password), os_project_id=self.os_project_id, os_auth_url=self.os_auth_url, bm_instance=bm_instance, bmc_ip=bmc_ip)) self.run('systemctl enable %s' % unit) self.run('systemctl start %s' % unit) self._nic_cpt += 1 return bmc_ip
[ "def", "register_host", "(", "self", ",", "bm_instance", ")", ":", "bmc_ip", "=", "'10.130.%d.100'", "%", "(", "self", ".", "_bmc_range_start", "+", "self", ".", "_nic_cpt", ")", "bmc_net", "=", "'10.130.%d.0'", "%", "(", "self", ".", "_bmc_range_start", "+", "self", ".", "_nic_cpt", ")", "bmc_gw", "=", "'10.130.%d.1'", "%", "(", "self", ".", "_bmc_range_start", "+", "self", ".", "_nic_cpt", ")", "device", "=", "'eth%d'", "%", "(", "2", "+", "self", ".", "_nic_cpt", ")", "body_create_subnet", "=", "{", "'subnets'", ":", "[", "{", "'name'", ":", "'bmc_'", "+", "device", ",", "'cidr'", ":", "bmc_net", "+", "'/24'", ",", "'ip_version'", ":", "4", ",", "'network_id'", ":", "self", ".", "_bmc_net", "[", "'id'", "]", "}", "]", "}", "subnet_id", "=", "self", ".", "neutron", ".", "create_subnet", "(", "body", "=", "body_create_subnet", ")", "[", "'subnets'", "]", "[", "0", "]", "[", "'id'", "]", "self", ".", "attach_subnet_to_router", "(", "subnet_id", ")", "self", ".", "os_instance", ".", "interface_attach", "(", "None", ",", "self", ".", "_bmc_net", "[", "'id'", "]", ",", "bmc_ip", ")", "content", "=", "\"\"\"\nDEVICE=\"{device}\"\nBOOTPROTO=static\nIPADDR={bmc_ip}\nNETMASK=255.255.255.0\nONBOOT=yes\n\"\"\"", "self", ".", "create_file", "(", "'/etc/sysconfig/network-scripts/ifcfg-%s'", "%", "device", ",", "content", "=", "content", ".", "format", "(", "device", "=", "device", ",", "bmc_ip", "=", "bmc_ip", ",", "bmc_gw", "=", "bmc_gw", ")", ")", "content", "=", "\"\"\"\n192.0.2.0/24 via {bmc_gw}\n\"\"\"", "self", ".", "create_file", "(", "'/etc/sysconfig/network-scripts/route-%s'", "%", "device", ",", "content", "=", "content", ".", "format", "(", "bmc_gw", "=", "bmc_gw", ")", ")", "self", ".", "run", "(", "'ifup %s'", "%", "device", ")", "# Ensure the outgoing traffic go through the correct NIC to avoid spoofing", "# protection", "# TODO(Gonéri): This should be persistant.", "self", ".", "run", "(", "'ip rule add from %s table %d'", "%", "(", "bmc_ip", ",", "self", ".", "_nic_cpt", "+", "2", ")", ")", "self", ".", "run", "(", "'ip route add default via %s dev %s table %d'", "%", "(", "bmc_gw", ",", "device", ",", "self", ".", "_nic_cpt", "+", "2", ")", ")", "content", "=", "\"\"\"\n[Unit]\nDescription=openstack-bmc {bm_instance} Service\n[Service]\nExecStart=/usr/local/bin/openstackbmc --os-user {os_username} --os-password {os_password} --os-project-id {os_project_id} --os-auth-url {os_auth_url} --instance {bm_instance} --address {bmc_ip}\nUser=root\nStandardOutput=kmsg+console\nStandardError=inherit\nRestart=always\n[Install]\nWantedBy=multi-user.target\n\"\"\"", "unit", "=", "'openstack-bmc-%d.service'", "%", "self", ".", "_nic_cpt", "self", ".", "create_file", "(", "'/usr/lib/systemd/system/%s'", "%", "unit", ",", "content", ".", "format", "(", "os_username", "=", "self", ".", "os_username", ",", "os_password", "=", "protect_password", "(", "self", ".", "os_password", ")", ",", "os_project_id", "=", "self", ".", "os_project_id", ",", "os_auth_url", "=", "self", ".", "os_auth_url", ",", "bm_instance", "=", "bm_instance", ",", "bmc_ip", "=", "bmc_ip", ")", ")", "self", ".", "run", "(", "'systemctl enable %s'", "%", "unit", ")", "self", ".", "run", "(", "'systemctl start %s'", "%", "unit", ")", "self", ".", "_nic_cpt", "+=", "1", "return", "bmc_ip" ]
Register an existing nova VM. A new interface will be attached to the BMC host with a new IP. An openstackbmc service will be binded to this IP. Once the VM has been registered, it is possible to use IPMI on this IP to start or stop the virtual machine.
[ "Register", "an", "existing", "nova", "VM", "." ]
bfa165538335edb1088170c7a92f097167225c81
https://github.com/redhat-openstack/python-tripleo-helper/blob/bfa165538335edb1088170c7a92f097167225c81/tripleohelper/ovb_bmc.py#L116-L189
train
gofed/gofedlib
gofedlib/go/snapshot.py
Snapshot.Godeps
def Godeps(self): """Return the snapshot in Godeps.json form """ dict = [] for package in sorted(self._packages.keys()): dict.append({ "ImportPath": str(package), "Rev": str(self._packages[package]) }) return dict
python
def Godeps(self): """Return the snapshot in Godeps.json form """ dict = [] for package in sorted(self._packages.keys()): dict.append({ "ImportPath": str(package), "Rev": str(self._packages[package]) }) return dict
[ "def", "Godeps", "(", "self", ")", ":", "dict", "=", "[", "]", "for", "package", "in", "sorted", "(", "self", ".", "_packages", ".", "keys", "(", ")", ")", ":", "dict", ".", "append", "(", "{", "\"ImportPath\"", ":", "str", "(", "package", ")", ",", "\"Rev\"", ":", "str", "(", "self", ".", "_packages", "[", "package", "]", ")", "}", ")", "return", "dict" ]
Return the snapshot in Godeps.json form
[ "Return", "the", "snapshot", "in", "Godeps", ".", "json", "form" ]
0674c248fe3d8706f98f912996b65af469f96b10
https://github.com/gofed/gofedlib/blob/0674c248fe3d8706f98f912996b65af469f96b10/gofedlib/go/snapshot.py#L47-L57
train
gofed/gofedlib
gofedlib/go/snapshot.py
Snapshot.GLOGFILE
def GLOGFILE(self): """Return the snapshot in GLOGFILE form """ lines = [] for package in sorted(self._packages.keys()): lines.append("%s %s" % (str(package), str(self._packages[package]))) return "\n".join(lines)
python
def GLOGFILE(self): """Return the snapshot in GLOGFILE form """ lines = [] for package in sorted(self._packages.keys()): lines.append("%s %s" % (str(package), str(self._packages[package]))) return "\n".join(lines)
[ "def", "GLOGFILE", "(", "self", ")", ":", "lines", "=", "[", "]", "for", "package", "in", "sorted", "(", "self", ".", "_packages", ".", "keys", "(", ")", ")", ":", "lines", ".", "append", "(", "\"%s %s\"", "%", "(", "str", "(", "package", ")", ",", "str", "(", "self", ".", "_packages", "[", "package", "]", ")", ")", ")", "return", "\"\\n\"", ".", "join", "(", "lines", ")" ]
Return the snapshot in GLOGFILE form
[ "Return", "the", "snapshot", "in", "GLOGFILE", "form" ]
0674c248fe3d8706f98f912996b65af469f96b10
https://github.com/gofed/gofedlib/blob/0674c248fe3d8706f98f912996b65af469f96b10/gofedlib/go/snapshot.py#L59-L66
train
gofed/gofedlib
gofedlib/go/snapshot.py
Snapshot.Glide
def Glide(self): """Return the snapshot in glide.lock form """ dict = { "hash": "???", "updated": str(datetime.datetime.now(tz=pytz.utc).isoformat()), "imports": [], } decomposer = ImportPathsDecomposerBuilder().buildLocalDecomposer() decomposer.decompose(self._packages.keys()) classes = decomposer.classes() for ipp in classes: dep = { "name": ipp, "version": str(self._packages[classes[ipp][0]]) } if len(classes[ipp]) > 1 or classes[ipp][0] != ipp: dep["subpackages"] = map(lambda l: l[len(ipp)+1:], classes[ipp]) dict["imports"].append(dep) return yaml.dump(dict, default_flow_style=False)
python
def Glide(self): """Return the snapshot in glide.lock form """ dict = { "hash": "???", "updated": str(datetime.datetime.now(tz=pytz.utc).isoformat()), "imports": [], } decomposer = ImportPathsDecomposerBuilder().buildLocalDecomposer() decomposer.decompose(self._packages.keys()) classes = decomposer.classes() for ipp in classes: dep = { "name": ipp, "version": str(self._packages[classes[ipp][0]]) } if len(classes[ipp]) > 1 or classes[ipp][0] != ipp: dep["subpackages"] = map(lambda l: l[len(ipp)+1:], classes[ipp]) dict["imports"].append(dep) return yaml.dump(dict, default_flow_style=False)
[ "def", "Glide", "(", "self", ")", ":", "dict", "=", "{", "\"hash\"", ":", "\"???\"", ",", "\"updated\"", ":", "str", "(", "datetime", ".", "datetime", ".", "now", "(", "tz", "=", "pytz", ".", "utc", ")", ".", "isoformat", "(", ")", ")", ",", "\"imports\"", ":", "[", "]", ",", "}", "decomposer", "=", "ImportPathsDecomposerBuilder", "(", ")", ".", "buildLocalDecomposer", "(", ")", "decomposer", ".", "decompose", "(", "self", ".", "_packages", ".", "keys", "(", ")", ")", "classes", "=", "decomposer", ".", "classes", "(", ")", "for", "ipp", "in", "classes", ":", "dep", "=", "{", "\"name\"", ":", "ipp", ",", "\"version\"", ":", "str", "(", "self", ".", "_packages", "[", "classes", "[", "ipp", "]", "[", "0", "]", "]", ")", "}", "if", "len", "(", "classes", "[", "ipp", "]", ")", ">", "1", "or", "classes", "[", "ipp", "]", "[", "0", "]", "!=", "ipp", ":", "dep", "[", "\"subpackages\"", "]", "=", "map", "(", "lambda", "l", ":", "l", "[", "len", "(", "ipp", ")", "+", "1", ":", "]", ",", "classes", "[", "ipp", "]", ")", "dict", "[", "\"imports\"", "]", ".", "append", "(", "dep", ")", "return", "yaml", ".", "dump", "(", "dict", ",", "default_flow_style", "=", "False", ")" ]
Return the snapshot in glide.lock form
[ "Return", "the", "snapshot", "in", "glide", ".", "lock", "form" ]
0674c248fe3d8706f98f912996b65af469f96b10
https://github.com/gofed/gofedlib/blob/0674c248fe3d8706f98f912996b65af469f96b10/gofedlib/go/snapshot.py#L68-L91
train
thiagopbueno/tf-rddlsim
tfrddlsim/viz/navigation_visualizer.py
NavigationVisualizer.render
def render(self, trajectories: Tuple[NonFluents, Fluents, Fluents, Fluents, np.array], batch: Optional[int] = None) -> None: '''Render the simulated state-action `trajectories` for Navigation domain. Args: stats: Performance statistics. trajectories: NonFluents, states, actions, interms and rewards. batch: Number of batches to render. ''' non_fluents, initial_state, states, actions, interms, rewards = trajectories non_fluents = dict(non_fluents) states = dict((name, fluent[0]) for name, fluent in states) actions = dict((name, fluent[0]) for name, fluent in actions) rewards = rewards[0] idx = self._compiler.rddl.domain.state_fluent_ordering.index('location/1') start = initial_state[idx][0] g = non_fluents['GOAL/1'] path = states['location/1'] deltas = actions['move/1'] centers = non_fluents['DECELERATION_ZONE_CENTER/2'] decays = non_fluents['DECELERATION_ZONE_DECAY/1'] zones = [(x, y, d) for (x, y), d in zip(centers, decays)] self._ax1 = plt.gca() self._render_state_space() self._render_start_and_goal_positions(start, g) self._render_deceleration_zones(zones) self._render_state_action_trajectory(start, path, deltas) plt.title('Navigation', fontweight='bold') plt.legend(loc='lower right') plt.show()
python
def render(self, trajectories: Tuple[NonFluents, Fluents, Fluents, Fluents, np.array], batch: Optional[int] = None) -> None: '''Render the simulated state-action `trajectories` for Navigation domain. Args: stats: Performance statistics. trajectories: NonFluents, states, actions, interms and rewards. batch: Number of batches to render. ''' non_fluents, initial_state, states, actions, interms, rewards = trajectories non_fluents = dict(non_fluents) states = dict((name, fluent[0]) for name, fluent in states) actions = dict((name, fluent[0]) for name, fluent in actions) rewards = rewards[0] idx = self._compiler.rddl.domain.state_fluent_ordering.index('location/1') start = initial_state[idx][0] g = non_fluents['GOAL/1'] path = states['location/1'] deltas = actions['move/1'] centers = non_fluents['DECELERATION_ZONE_CENTER/2'] decays = non_fluents['DECELERATION_ZONE_DECAY/1'] zones = [(x, y, d) for (x, y), d in zip(centers, decays)] self._ax1 = plt.gca() self._render_state_space() self._render_start_and_goal_positions(start, g) self._render_deceleration_zones(zones) self._render_state_action_trajectory(start, path, deltas) plt.title('Navigation', fontweight='bold') plt.legend(loc='lower right') plt.show()
[ "def", "render", "(", "self", ",", "trajectories", ":", "Tuple", "[", "NonFluents", ",", "Fluents", ",", "Fluents", ",", "Fluents", ",", "np", ".", "array", "]", ",", "batch", ":", "Optional", "[", "int", "]", "=", "None", ")", "->", "None", ":", "non_fluents", ",", "initial_state", ",", "states", ",", "actions", ",", "interms", ",", "rewards", "=", "trajectories", "non_fluents", "=", "dict", "(", "non_fluents", ")", "states", "=", "dict", "(", "(", "name", ",", "fluent", "[", "0", "]", ")", "for", "name", ",", "fluent", "in", "states", ")", "actions", "=", "dict", "(", "(", "name", ",", "fluent", "[", "0", "]", ")", "for", "name", ",", "fluent", "in", "actions", ")", "rewards", "=", "rewards", "[", "0", "]", "idx", "=", "self", ".", "_compiler", ".", "rddl", ".", "domain", ".", "state_fluent_ordering", ".", "index", "(", "'location/1'", ")", "start", "=", "initial_state", "[", "idx", "]", "[", "0", "]", "g", "=", "non_fluents", "[", "'GOAL/1'", "]", "path", "=", "states", "[", "'location/1'", "]", "deltas", "=", "actions", "[", "'move/1'", "]", "centers", "=", "non_fluents", "[", "'DECELERATION_ZONE_CENTER/2'", "]", "decays", "=", "non_fluents", "[", "'DECELERATION_ZONE_DECAY/1'", "]", "zones", "=", "[", "(", "x", ",", "y", ",", "d", ")", "for", "(", "x", ",", "y", ")", ",", "d", "in", "zip", "(", "centers", ",", "decays", ")", "]", "self", ".", "_ax1", "=", "plt", ".", "gca", "(", ")", "self", ".", "_render_state_space", "(", ")", "self", ".", "_render_start_and_goal_positions", "(", "start", ",", "g", ")", "self", ".", "_render_deceleration_zones", "(", "zones", ")", "self", ".", "_render_state_action_trajectory", "(", "start", ",", "path", ",", "deltas", ")", "plt", ".", "title", "(", "'Navigation'", ",", "fontweight", "=", "'bold'", ")", "plt", ".", "legend", "(", "loc", "=", "'lower right'", ")", "plt", ".", "show", "(", ")" ]
Render the simulated state-action `trajectories` for Navigation domain. Args: stats: Performance statistics. trajectories: NonFluents, states, actions, interms and rewards. batch: Number of batches to render.
[ "Render", "the", "simulated", "state", "-", "action", "trajectories", "for", "Navigation", "domain", "." ]
d7102a0ad37d179dbb23141640254ea383d3b43f
https://github.com/thiagopbueno/tf-rddlsim/blob/d7102a0ad37d179dbb23141640254ea383d3b43f/tfrddlsim/viz/navigation_visualizer.py#L44-L82
train
rhayes777/PyAutoFit
autofit/optimize/non_linear.py
persistent_timer
def persistent_timer(func): """ Times the execution of a function. If the process is stopped and restarted then timing is continued using saved files. Parameters ---------- func Some function to be timed Returns ------- timed_function The same function with a timer attached. """ @functools.wraps(func) def timed_function(optimizer_instance, *args, **kwargs): start_time_path = "{}/.start_time".format(optimizer_instance.phase_output_path) try: with open(start_time_path) as f: start = float(f.read()) except FileNotFoundError: start = time.time() with open(start_time_path, "w+") as f: f.write(str(start)) result = func(optimizer_instance, *args, **kwargs) execution_time = str(dt.timedelta(seconds=time.time() - start)) logger.info("{} took {} to run".format( optimizer_instance.phase_name, execution_time )) with open("{}/execution_time".format(optimizer_instance.phase_output_path), "w+") as f: f.write(execution_time) return result return timed_function
python
def persistent_timer(func): """ Times the execution of a function. If the process is stopped and restarted then timing is continued using saved files. Parameters ---------- func Some function to be timed Returns ------- timed_function The same function with a timer attached. """ @functools.wraps(func) def timed_function(optimizer_instance, *args, **kwargs): start_time_path = "{}/.start_time".format(optimizer_instance.phase_output_path) try: with open(start_time_path) as f: start = float(f.read()) except FileNotFoundError: start = time.time() with open(start_time_path, "w+") as f: f.write(str(start)) result = func(optimizer_instance, *args, **kwargs) execution_time = str(dt.timedelta(seconds=time.time() - start)) logger.info("{} took {} to run".format( optimizer_instance.phase_name, execution_time )) with open("{}/execution_time".format(optimizer_instance.phase_output_path), "w+") as f: f.write(execution_time) return result return timed_function
[ "def", "persistent_timer", "(", "func", ")", ":", "@", "functools", ".", "wraps", "(", "func", ")", "def", "timed_function", "(", "optimizer_instance", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "start_time_path", "=", "\"{}/.start_time\"", ".", "format", "(", "optimizer_instance", ".", "phase_output_path", ")", "try", ":", "with", "open", "(", "start_time_path", ")", "as", "f", ":", "start", "=", "float", "(", "f", ".", "read", "(", ")", ")", "except", "FileNotFoundError", ":", "start", "=", "time", ".", "time", "(", ")", "with", "open", "(", "start_time_path", ",", "\"w+\"", ")", "as", "f", ":", "f", ".", "write", "(", "str", "(", "start", ")", ")", "result", "=", "func", "(", "optimizer_instance", ",", "*", "args", ",", "*", "*", "kwargs", ")", "execution_time", "=", "str", "(", "dt", ".", "timedelta", "(", "seconds", "=", "time", ".", "time", "(", ")", "-", "start", ")", ")", "logger", ".", "info", "(", "\"{} took {} to run\"", ".", "format", "(", "optimizer_instance", ".", "phase_name", ",", "execution_time", ")", ")", "with", "open", "(", "\"{}/execution_time\"", ".", "format", "(", "optimizer_instance", ".", "phase_output_path", ")", ",", "\"w+\"", ")", "as", "f", ":", "f", ".", "write", "(", "execution_time", ")", "return", "result", "return", "timed_function" ]
Times the execution of a function. If the process is stopped and restarted then timing is continued using saved files. Parameters ---------- func Some function to be timed Returns ------- timed_function The same function with a timer attached.
[ "Times", "the", "execution", "of", "a", "function", ".", "If", "the", "process", "is", "stopped", "and", "restarted", "then", "timing", "is", "continued", "using", "saved", "files", "." ]
a9e6144abb08edfc6a6906c4030d7119bf8d3e14
https://github.com/rhayes777/PyAutoFit/blob/a9e6144abb08edfc6a6906c4030d7119bf8d3e14/autofit/optimize/non_linear.py#L110-L149
train
rhayes777/PyAutoFit
autofit/optimize/non_linear.py
NonLinearOptimizer.backup_path
def backup_path(self) -> str: """ The path to the backed up optimizer folder. """ return "{}/{}/{}{}/optimizer_backup".format(conf.instance.output_path, self.phase_path, self.phase_name, self.phase_tag)
python
def backup_path(self) -> str: """ The path to the backed up optimizer folder. """ return "{}/{}/{}{}/optimizer_backup".format(conf.instance.output_path, self.phase_path, self.phase_name, self.phase_tag)
[ "def", "backup_path", "(", "self", ")", "->", "str", ":", "return", "\"{}/{}/{}{}/optimizer_backup\"", ".", "format", "(", "conf", ".", "instance", ".", "output_path", ",", "self", ".", "phase_path", ",", "self", ".", "phase_name", ",", "self", ".", "phase_tag", ")" ]
The path to the backed up optimizer folder.
[ "The", "path", "to", "the", "backed", "up", "optimizer", "folder", "." ]
a9e6144abb08edfc6a6906c4030d7119bf8d3e14
https://github.com/rhayes777/PyAutoFit/blob/a9e6144abb08edfc6a6906c4030d7119bf8d3e14/autofit/optimize/non_linear.py#L213-L218
train
rhayes777/PyAutoFit
autofit/optimize/non_linear.py
NonLinearOptimizer.backup
def backup(self): """ Copy files from the sym-linked optimizer folder to the backup folder in the workspace. """ try: shutil.rmtree(self.backup_path) except FileNotFoundError: pass try: shutil.copytree(self.opt_path, self.backup_path) except shutil.Error as e: logger.exception(e)
python
def backup(self): """ Copy files from the sym-linked optimizer folder to the backup folder in the workspace. """ try: shutil.rmtree(self.backup_path) except FileNotFoundError: pass try: shutil.copytree(self.opt_path, self.backup_path) except shutil.Error as e: logger.exception(e)
[ "def", "backup", "(", "self", ")", ":", "try", ":", "shutil", ".", "rmtree", "(", "self", ".", "backup_path", ")", "except", "FileNotFoundError", ":", "pass", "try", ":", "shutil", ".", "copytree", "(", "self", ".", "opt_path", ",", "self", ".", "backup_path", ")", "except", "shutil", ".", "Error", "as", "e", ":", "logger", ".", "exception", "(", "e", ")" ]
Copy files from the sym-linked optimizer folder to the backup folder in the workspace.
[ "Copy", "files", "from", "the", "sym", "-", "linked", "optimizer", "folder", "to", "the", "backup", "folder", "in", "the", "workspace", "." ]
a9e6144abb08edfc6a6906c4030d7119bf8d3e14
https://github.com/rhayes777/PyAutoFit/blob/a9e6144abb08edfc6a6906c4030d7119bf8d3e14/autofit/optimize/non_linear.py#L256-L267
train
rhayes777/PyAutoFit
autofit/optimize/non_linear.py
NonLinearOptimizer.restore
def restore(self): """ Copy files from the backup folder to the sym-linked optimizer folder. """ if os.path.exists(self.backup_path): for file in glob.glob(self.backup_path + "/*"): shutil.copy(file, self.path)
python
def restore(self): """ Copy files from the backup folder to the sym-linked optimizer folder. """ if os.path.exists(self.backup_path): for file in glob.glob(self.backup_path + "/*"): shutil.copy(file, self.path)
[ "def", "restore", "(", "self", ")", ":", "if", "os", ".", "path", ".", "exists", "(", "self", ".", "backup_path", ")", ":", "for", "file", "in", "glob", ".", "glob", "(", "self", ".", "backup_path", "+", "\"/*\"", ")", ":", "shutil", ".", "copy", "(", "file", ",", "self", ".", "path", ")" ]
Copy files from the backup folder to the sym-linked optimizer folder.
[ "Copy", "files", "from", "the", "backup", "folder", "to", "the", "sym", "-", "linked", "optimizer", "folder", "." ]
a9e6144abb08edfc6a6906c4030d7119bf8d3e14
https://github.com/rhayes777/PyAutoFit/blob/a9e6144abb08edfc6a6906c4030d7119bf8d3e14/autofit/optimize/non_linear.py#L269-L275
train
rhayes777/PyAutoFit
autofit/optimize/non_linear.py
NonLinearOptimizer.config
def config(self, attribute_name, attribute_type=str): """ Get a config field from this optimizer's section in non_linear.ini by a key and value type. Parameters ---------- attribute_name: str The analysis_path of the field attribute_type: type The type of the value Returns ------- attribute An attribute for the key with the specified type. """ return self.named_config.get(self.__class__.__name__, attribute_name, attribute_type)
python
def config(self, attribute_name, attribute_type=str): """ Get a config field from this optimizer's section in non_linear.ini by a key and value type. Parameters ---------- attribute_name: str The analysis_path of the field attribute_type: type The type of the value Returns ------- attribute An attribute for the key with the specified type. """ return self.named_config.get(self.__class__.__name__, attribute_name, attribute_type)
[ "def", "config", "(", "self", ",", "attribute_name", ",", "attribute_type", "=", "str", ")", ":", "return", "self", ".", "named_config", ".", "get", "(", "self", ".", "__class__", ".", "__name__", ",", "attribute_name", ",", "attribute_type", ")" ]
Get a config field from this optimizer's section in non_linear.ini by a key and value type. Parameters ---------- attribute_name: str The analysis_path of the field attribute_type: type The type of the value Returns ------- attribute An attribute for the key with the specified type.
[ "Get", "a", "config", "field", "from", "this", "optimizer", "s", "section", "in", "non_linear", ".", "ini", "by", "a", "key", "and", "value", "type", "." ]
a9e6144abb08edfc6a6906c4030d7119bf8d3e14
https://github.com/rhayes777/PyAutoFit/blob/a9e6144abb08edfc6a6906c4030d7119bf8d3e14/autofit/optimize/non_linear.py#L277-L293
train
rhayes777/PyAutoFit
autofit/optimize/non_linear.py
MultiNest.weighted_sample_instance_from_weighted_samples
def weighted_sample_instance_from_weighted_samples(self, index): """Setup a model instance of a weighted sample, including its weight and likelihood. Parameters ----------- index : int The index of the weighted sample to return. """ model, weight, likelihood = self.weighted_sample_model_from_weighted_samples(index) self._weighted_sample_model = model return self.variable.instance_from_physical_vector(model), weight, likelihood
python
def weighted_sample_instance_from_weighted_samples(self, index): """Setup a model instance of a weighted sample, including its weight and likelihood. Parameters ----------- index : int The index of the weighted sample to return. """ model, weight, likelihood = self.weighted_sample_model_from_weighted_samples(index) self._weighted_sample_model = model return self.variable.instance_from_physical_vector(model), weight, likelihood
[ "def", "weighted_sample_instance_from_weighted_samples", "(", "self", ",", "index", ")", ":", "model", ",", "weight", ",", "likelihood", "=", "self", ".", "weighted_sample_model_from_weighted_samples", "(", "index", ")", "self", ".", "_weighted_sample_model", "=", "model", "return", "self", ".", "variable", ".", "instance_from_physical_vector", "(", "model", ")", ",", "weight", ",", "likelihood" ]
Setup a model instance of a weighted sample, including its weight and likelihood. Parameters ----------- index : int The index of the weighted sample to return.
[ "Setup", "a", "model", "instance", "of", "a", "weighted", "sample", "including", "its", "weight", "and", "likelihood", "." ]
a9e6144abb08edfc6a6906c4030d7119bf8d3e14
https://github.com/rhayes777/PyAutoFit/blob/a9e6144abb08edfc6a6906c4030d7119bf8d3e14/autofit/optimize/non_linear.py#L724-L736
train
rhayes777/PyAutoFit
autofit/optimize/non_linear.py
MultiNest.weighted_sample_model_from_weighted_samples
def weighted_sample_model_from_weighted_samples(self, index): """From a weighted sample return the model, weight and likelihood hood. NOTE: GetDist reads the log likelihood from the weighted_sample.txt file (column 2), which are defined as \ -2.0*likelihood. This routine converts these back to likelihood. Parameters ----------- index : int The index of the weighted sample to return. """ return list(self.pdf.samples[index]), self.pdf.weights[index], -0.5 * self.pdf.loglikes[index]
python
def weighted_sample_model_from_weighted_samples(self, index): """From a weighted sample return the model, weight and likelihood hood. NOTE: GetDist reads the log likelihood from the weighted_sample.txt file (column 2), which are defined as \ -2.0*likelihood. This routine converts these back to likelihood. Parameters ----------- index : int The index of the weighted sample to return. """ return list(self.pdf.samples[index]), self.pdf.weights[index], -0.5 * self.pdf.loglikes[index]
[ "def", "weighted_sample_model_from_weighted_samples", "(", "self", ",", "index", ")", ":", "return", "list", "(", "self", ".", "pdf", ".", "samples", "[", "index", "]", ")", ",", "self", ".", "pdf", ".", "weights", "[", "index", "]", ",", "-", "0.5", "*", "self", ".", "pdf", ".", "loglikes", "[", "index", "]" ]
From a weighted sample return the model, weight and likelihood hood. NOTE: GetDist reads the log likelihood from the weighted_sample.txt file (column 2), which are defined as \ -2.0*likelihood. This routine converts these back to likelihood. Parameters ----------- index : int The index of the weighted sample to return.
[ "From", "a", "weighted", "sample", "return", "the", "model", "weight", "and", "likelihood", "hood", "." ]
a9e6144abb08edfc6a6906c4030d7119bf8d3e14
https://github.com/rhayes777/PyAutoFit/blob/a9e6144abb08edfc6a6906c4030d7119bf8d3e14/autofit/optimize/non_linear.py#L738-L749
train
Riminder/python-riminder-api
riminder/hmacutils.py
compare_digest
def compare_digest(a, b): """Compare 2 hash digest.""" py_version = sys.version_info[0] if py_version >= 3: return _compare_digest_py3(a, b) return _compare_digest_py2(a, b)
python
def compare_digest(a, b): """Compare 2 hash digest.""" py_version = sys.version_info[0] if py_version >= 3: return _compare_digest_py3(a, b) return _compare_digest_py2(a, b)
[ "def", "compare_digest", "(", "a", ",", "b", ")", ":", "py_version", "=", "sys", ".", "version_info", "[", "0", "]", "if", "py_version", ">=", "3", ":", "return", "_compare_digest_py3", "(", "a", ",", "b", ")", "return", "_compare_digest_py2", "(", "a", ",", "b", ")" ]
Compare 2 hash digest.
[ "Compare", "2", "hash", "digest", "." ]
01279f0ece08cf3d1dd45f76de6d9edf7fafec90
https://github.com/Riminder/python-riminder-api/blob/01279f0ece08cf3d1dd45f76de6d9edf7fafec90/riminder/hmacutils.py#L15-L20
train
thiagopbueno/tf-rddlsim
tfrddlsim/viz/generic_visualizer.py
GenericVisualizer._render_trajectories
def _render_trajectories(self, trajectories: Tuple[NonFluents, Fluents, Fluents, Fluents, np.array]) -> None: '''Prints the first batch of simulated `trajectories`. Args: trajectories: NonFluents, states, actions, interms and rewards. ''' if self._verbose: non_fluents, initial_state, states, actions, interms, rewards = trajectories shape = states[0][1].shape batch_size, horizon, = shape[0], shape[1] states = [(s[0], s[1][0]) for s in states] interms = [(f[0], f[1][0]) for f in interms] actions = [(a[0], a[1][0]) for a in actions] rewards = np.reshape(rewards, [batch_size, horizon])[0] self._render_batch(non_fluents, states, actions, interms, rewards)
python
def _render_trajectories(self, trajectories: Tuple[NonFluents, Fluents, Fluents, Fluents, np.array]) -> None: '''Prints the first batch of simulated `trajectories`. Args: trajectories: NonFluents, states, actions, interms and rewards. ''' if self._verbose: non_fluents, initial_state, states, actions, interms, rewards = trajectories shape = states[0][1].shape batch_size, horizon, = shape[0], shape[1] states = [(s[0], s[1][0]) for s in states] interms = [(f[0], f[1][0]) for f in interms] actions = [(a[0], a[1][0]) for a in actions] rewards = np.reshape(rewards, [batch_size, horizon])[0] self._render_batch(non_fluents, states, actions, interms, rewards)
[ "def", "_render_trajectories", "(", "self", ",", "trajectories", ":", "Tuple", "[", "NonFluents", ",", "Fluents", ",", "Fluents", ",", "Fluents", ",", "np", ".", "array", "]", ")", "->", "None", ":", "if", "self", ".", "_verbose", ":", "non_fluents", ",", "initial_state", ",", "states", ",", "actions", ",", "interms", ",", "rewards", "=", "trajectories", "shape", "=", "states", "[", "0", "]", "[", "1", "]", ".", "shape", "batch_size", ",", "horizon", ",", "=", "shape", "[", "0", "]", ",", "shape", "[", "1", "]", "states", "=", "[", "(", "s", "[", "0", "]", ",", "s", "[", "1", "]", "[", "0", "]", ")", "for", "s", "in", "states", "]", "interms", "=", "[", "(", "f", "[", "0", "]", ",", "f", "[", "1", "]", "[", "0", "]", ")", "for", "f", "in", "interms", "]", "actions", "=", "[", "(", "a", "[", "0", "]", ",", "a", "[", "1", "]", "[", "0", "]", ")", "for", "a", "in", "actions", "]", "rewards", "=", "np", ".", "reshape", "(", "rewards", ",", "[", "batch_size", ",", "horizon", "]", ")", "[", "0", "]", "self", ".", "_render_batch", "(", "non_fluents", ",", "states", ",", "actions", ",", "interms", ",", "rewards", ")" ]
Prints the first batch of simulated `trajectories`. Args: trajectories: NonFluents, states, actions, interms and rewards.
[ "Prints", "the", "first", "batch", "of", "simulated", "trajectories", "." ]
d7102a0ad37d179dbb23141640254ea383d3b43f
https://github.com/thiagopbueno/tf-rddlsim/blob/d7102a0ad37d179dbb23141640254ea383d3b43f/tfrddlsim/viz/generic_visualizer.py#L50-L65
train
thiagopbueno/tf-rddlsim
tfrddlsim/viz/generic_visualizer.py
GenericVisualizer._render_batch
def _render_batch(self, non_fluents: NonFluents, states: Fluents, actions: Fluents, interms: Fluents, rewards: np.array, horizon: Optional[int] = None) -> None: '''Prints `non_fluents`, `states`, `actions`, `interms` and `rewards` for given `horizon`. Args: states (Sequence[Tuple[str, np.array]]): A state trajectory. actions (Sequence[Tuple[str, np.array]]): An action trajectory. interms (Sequence[Tuple[str, np.array]]): An interm state trajectory. rewards (np.array): Sequence of rewards (1-dimensional array). horizon (Optional[int]): Number of timesteps. ''' if horizon is None: horizon = len(states[0][1]) self._render_round_init(horizon, non_fluents) for t in range(horizon): s = [(s[0], s[1][t]) for s in states] f = [(f[0], f[1][t]) for f in interms] a = [(a[0], a[1][t]) for a in actions] r = rewards[t] self._render_timestep(t, s, a, f, r) self._render_round_end(rewards)
python
def _render_batch(self, non_fluents: NonFluents, states: Fluents, actions: Fluents, interms: Fluents, rewards: np.array, horizon: Optional[int] = None) -> None: '''Prints `non_fluents`, `states`, `actions`, `interms` and `rewards` for given `horizon`. Args: states (Sequence[Tuple[str, np.array]]): A state trajectory. actions (Sequence[Tuple[str, np.array]]): An action trajectory. interms (Sequence[Tuple[str, np.array]]): An interm state trajectory. rewards (np.array): Sequence of rewards (1-dimensional array). horizon (Optional[int]): Number of timesteps. ''' if horizon is None: horizon = len(states[0][1]) self._render_round_init(horizon, non_fluents) for t in range(horizon): s = [(s[0], s[1][t]) for s in states] f = [(f[0], f[1][t]) for f in interms] a = [(a[0], a[1][t]) for a in actions] r = rewards[t] self._render_timestep(t, s, a, f, r) self._render_round_end(rewards)
[ "def", "_render_batch", "(", "self", ",", "non_fluents", ":", "NonFluents", ",", "states", ":", "Fluents", ",", "actions", ":", "Fluents", ",", "interms", ":", "Fluents", ",", "rewards", ":", "np", ".", "array", ",", "horizon", ":", "Optional", "[", "int", "]", "=", "None", ")", "->", "None", ":", "if", "horizon", "is", "None", ":", "horizon", "=", "len", "(", "states", "[", "0", "]", "[", "1", "]", ")", "self", ".", "_render_round_init", "(", "horizon", ",", "non_fluents", ")", "for", "t", "in", "range", "(", "horizon", ")", ":", "s", "=", "[", "(", "s", "[", "0", "]", ",", "s", "[", "1", "]", "[", "t", "]", ")", "for", "s", "in", "states", "]", "f", "=", "[", "(", "f", "[", "0", "]", ",", "f", "[", "1", "]", "[", "t", "]", ")", "for", "f", "in", "interms", "]", "a", "=", "[", "(", "a", "[", "0", "]", ",", "a", "[", "1", "]", "[", "t", "]", ")", "for", "a", "in", "actions", "]", "r", "=", "rewards", "[", "t", "]", "self", ".", "_render_timestep", "(", "t", ",", "s", ",", "a", ",", "f", ",", "r", ")", "self", ".", "_render_round_end", "(", "rewards", ")" ]
Prints `non_fluents`, `states`, `actions`, `interms` and `rewards` for given `horizon`. Args: states (Sequence[Tuple[str, np.array]]): A state trajectory. actions (Sequence[Tuple[str, np.array]]): An action trajectory. interms (Sequence[Tuple[str, np.array]]): An interm state trajectory. rewards (np.array): Sequence of rewards (1-dimensional array). horizon (Optional[int]): Number of timesteps.
[ "Prints", "non_fluents", "states", "actions", "interms", "and", "rewards", "for", "given", "horizon", "." ]
d7102a0ad37d179dbb23141640254ea383d3b43f
https://github.com/thiagopbueno/tf-rddlsim/blob/d7102a0ad37d179dbb23141640254ea383d3b43f/tfrddlsim/viz/generic_visualizer.py#L67-L91
train
thiagopbueno/tf-rddlsim
tfrddlsim/viz/generic_visualizer.py
GenericVisualizer._render_timestep
def _render_timestep(self, t: int, s: Fluents, a: Fluents, f: Fluents, r: np.float32) -> None: '''Prints fluents and rewards for the given timestep `t`. Args: t (int): timestep s (Sequence[Tuple[str], np.array]: State fluents. a (Sequence[Tuple[str], np.array]: Action fluents. f (Sequence[Tuple[str], np.array]: Interm state fluents. r (np.float32): Reward. ''' print("============================") print("TIME = {}".format(t)) print("============================") fluent_variables = self._compiler.rddl.action_fluent_variables self._render_fluent_timestep('action', a, fluent_variables) fluent_variables = self._compiler.rddl.interm_fluent_variables self._render_fluent_timestep('interms', f, fluent_variables) fluent_variables = self._compiler.rddl.state_fluent_variables self._render_fluent_timestep('states', s, fluent_variables) self._render_reward(r)
python
def _render_timestep(self, t: int, s: Fluents, a: Fluents, f: Fluents, r: np.float32) -> None: '''Prints fluents and rewards for the given timestep `t`. Args: t (int): timestep s (Sequence[Tuple[str], np.array]: State fluents. a (Sequence[Tuple[str], np.array]: Action fluents. f (Sequence[Tuple[str], np.array]: Interm state fluents. r (np.float32): Reward. ''' print("============================") print("TIME = {}".format(t)) print("============================") fluent_variables = self._compiler.rddl.action_fluent_variables self._render_fluent_timestep('action', a, fluent_variables) fluent_variables = self._compiler.rddl.interm_fluent_variables self._render_fluent_timestep('interms', f, fluent_variables) fluent_variables = self._compiler.rddl.state_fluent_variables self._render_fluent_timestep('states', s, fluent_variables) self._render_reward(r)
[ "def", "_render_timestep", "(", "self", ",", "t", ":", "int", ",", "s", ":", "Fluents", ",", "a", ":", "Fluents", ",", "f", ":", "Fluents", ",", "r", ":", "np", ".", "float32", ")", "->", "None", ":", "print", "(", "\"============================\"", ")", "print", "(", "\"TIME = {}\"", ".", "format", "(", "t", ")", ")", "print", "(", "\"============================\"", ")", "fluent_variables", "=", "self", ".", "_compiler", ".", "rddl", ".", "action_fluent_variables", "self", ".", "_render_fluent_timestep", "(", "'action'", ",", "a", ",", "fluent_variables", ")", "fluent_variables", "=", "self", ".", "_compiler", ".", "rddl", ".", "interm_fluent_variables", "self", ".", "_render_fluent_timestep", "(", "'interms'", ",", "f", ",", "fluent_variables", ")", "fluent_variables", "=", "self", ".", "_compiler", ".", "rddl", ".", "state_fluent_variables", "self", ".", "_render_fluent_timestep", "(", "'states'", ",", "s", ",", "fluent_variables", ")", "self", ".", "_render_reward", "(", "r", ")" ]
Prints fluents and rewards for the given timestep `t`. Args: t (int): timestep s (Sequence[Tuple[str], np.array]: State fluents. a (Sequence[Tuple[str], np.array]: Action fluents. f (Sequence[Tuple[str], np.array]: Interm state fluents. r (np.float32): Reward.
[ "Prints", "fluents", "and", "rewards", "for", "the", "given", "timestep", "t", "." ]
d7102a0ad37d179dbb23141640254ea383d3b43f
https://github.com/thiagopbueno/tf-rddlsim/blob/d7102a0ad37d179dbb23141640254ea383d3b43f/tfrddlsim/viz/generic_visualizer.py#L93-L115
train
thiagopbueno/tf-rddlsim
tfrddlsim/viz/generic_visualizer.py
GenericVisualizer._render_fluent_timestep
def _render_fluent_timestep(self, fluent_type: str, fluents: Sequence[Tuple[str, np.array]], fluent_variables: Sequence[Tuple[str, List[str]]]) -> None: '''Prints `fluents` of given `fluent_type` as list of instantiated variables with corresponding values. Args: fluent_type (str): Fluent type. fluents (Sequence[Tuple[str, np.array]]): List of pairs (fluent_name, fluent_values). fluent_variables (Sequence[Tuple[str, List[str]]]): List of pairs (fluent_name, args). ''' for fluent_pair, variable_list in zip(fluents, fluent_variables): name, fluent = fluent_pair _, variables = variable_list print(name) fluent = fluent.flatten() for variable, value in zip(variables, fluent): print('- {}: {} = {}'.format(fluent_type, variable, value)) print()
python
def _render_fluent_timestep(self, fluent_type: str, fluents: Sequence[Tuple[str, np.array]], fluent_variables: Sequence[Tuple[str, List[str]]]) -> None: '''Prints `fluents` of given `fluent_type` as list of instantiated variables with corresponding values. Args: fluent_type (str): Fluent type. fluents (Sequence[Tuple[str, np.array]]): List of pairs (fluent_name, fluent_values). fluent_variables (Sequence[Tuple[str, List[str]]]): List of pairs (fluent_name, args). ''' for fluent_pair, variable_list in zip(fluents, fluent_variables): name, fluent = fluent_pair _, variables = variable_list print(name) fluent = fluent.flatten() for variable, value in zip(variables, fluent): print('- {}: {} = {}'.format(fluent_type, variable, value)) print()
[ "def", "_render_fluent_timestep", "(", "self", ",", "fluent_type", ":", "str", ",", "fluents", ":", "Sequence", "[", "Tuple", "[", "str", ",", "np", ".", "array", "]", "]", ",", "fluent_variables", ":", "Sequence", "[", "Tuple", "[", "str", ",", "List", "[", "str", "]", "]", "]", ")", "->", "None", ":", "for", "fluent_pair", ",", "variable_list", "in", "zip", "(", "fluents", ",", "fluent_variables", ")", ":", "name", ",", "fluent", "=", "fluent_pair", "_", ",", "variables", "=", "variable_list", "print", "(", "name", ")", "fluent", "=", "fluent", ".", "flatten", "(", ")", "for", "variable", ",", "value", "in", "zip", "(", "variables", ",", "fluent", ")", ":", "print", "(", "'- {}: {} = {}'", ".", "format", "(", "fluent_type", ",", "variable", ",", "value", ")", ")", "print", "(", ")" ]
Prints `fluents` of given `fluent_type` as list of instantiated variables with corresponding values. Args: fluent_type (str): Fluent type. fluents (Sequence[Tuple[str, np.array]]): List of pairs (fluent_name, fluent_values). fluent_variables (Sequence[Tuple[str, List[str]]]): List of pairs (fluent_name, args).
[ "Prints", "fluents", "of", "given", "fluent_type", "as", "list", "of", "instantiated", "variables", "with", "corresponding", "values", "." ]
d7102a0ad37d179dbb23141640254ea383d3b43f
https://github.com/thiagopbueno/tf-rddlsim/blob/d7102a0ad37d179dbb23141640254ea383d3b43f/tfrddlsim/viz/generic_visualizer.py#L117-L136
train
thiagopbueno/tf-rddlsim
tfrddlsim/viz/generic_visualizer.py
GenericVisualizer._render_reward
def _render_reward(self, r: np.float32) -> None: '''Prints reward `r`.''' print("reward = {:.4f}".format(float(r))) print()
python
def _render_reward(self, r: np.float32) -> None: '''Prints reward `r`.''' print("reward = {:.4f}".format(float(r))) print()
[ "def", "_render_reward", "(", "self", ",", "r", ":", "np", ".", "float32", ")", "->", "None", ":", "print", "(", "\"reward = {:.4f}\"", ".", "format", "(", "float", "(", "r", ")", ")", ")", "print", "(", ")" ]
Prints reward `r`.
[ "Prints", "reward", "r", "." ]
d7102a0ad37d179dbb23141640254ea383d3b43f
https://github.com/thiagopbueno/tf-rddlsim/blob/d7102a0ad37d179dbb23141640254ea383d3b43f/tfrddlsim/viz/generic_visualizer.py#L138-L141
train
thiagopbueno/tf-rddlsim
tfrddlsim/viz/generic_visualizer.py
GenericVisualizer._render_round_init
def _render_round_init(self, horizon: int, non_fluents: NonFluents) -> None: '''Prints round init information about `horizon` and `non_fluents`.''' print('*********************************************************') print('>>> ROUND INIT, horizon = {}'.format(horizon)) print('*********************************************************') fluent_variables = self._compiler.rddl.non_fluent_variables self._render_fluent_timestep('non-fluents', non_fluents, fluent_variables)
python
def _render_round_init(self, horizon: int, non_fluents: NonFluents) -> None: '''Prints round init information about `horizon` and `non_fluents`.''' print('*********************************************************') print('>>> ROUND INIT, horizon = {}'.format(horizon)) print('*********************************************************') fluent_variables = self._compiler.rddl.non_fluent_variables self._render_fluent_timestep('non-fluents', non_fluents, fluent_variables)
[ "def", "_render_round_init", "(", "self", ",", "horizon", ":", "int", ",", "non_fluents", ":", "NonFluents", ")", "->", "None", ":", "print", "(", "'*********************************************************'", ")", "print", "(", "'>>> ROUND INIT, horizon = {}'", ".", "format", "(", "horizon", ")", ")", "print", "(", "'*********************************************************'", ")", "fluent_variables", "=", "self", ".", "_compiler", ".", "rddl", ".", "non_fluent_variables", "self", ".", "_render_fluent_timestep", "(", "'non-fluents'", ",", "non_fluents", ",", "fluent_variables", ")" ]
Prints round init information about `horizon` and `non_fluents`.
[ "Prints", "round", "init", "information", "about", "horizon", "and", "non_fluents", "." ]
d7102a0ad37d179dbb23141640254ea383d3b43f
https://github.com/thiagopbueno/tf-rddlsim/blob/d7102a0ad37d179dbb23141640254ea383d3b43f/tfrddlsim/viz/generic_visualizer.py#L143-L149
train
thiagopbueno/tf-rddlsim
tfrddlsim/viz/generic_visualizer.py
GenericVisualizer._render_round_end
def _render_round_end(self, rewards: np.array) -> None: '''Prints round end information about `rewards`.''' print("*********************************************************") print(">>> ROUND END") print("*********************************************************") total_reward = np.sum(rewards) print("==> Objective value = {}".format(total_reward)) print("==> rewards = {}".format(list(rewards))) print()
python
def _render_round_end(self, rewards: np.array) -> None: '''Prints round end information about `rewards`.''' print("*********************************************************") print(">>> ROUND END") print("*********************************************************") total_reward = np.sum(rewards) print("==> Objective value = {}".format(total_reward)) print("==> rewards = {}".format(list(rewards))) print()
[ "def", "_render_round_end", "(", "self", ",", "rewards", ":", "np", ".", "array", ")", "->", "None", ":", "print", "(", "\"*********************************************************\"", ")", "print", "(", "\">>> ROUND END\"", ")", "print", "(", "\"*********************************************************\"", ")", "total_reward", "=", "np", ".", "sum", "(", "rewards", ")", "print", "(", "\"==> Objective value = {}\"", ".", "format", "(", "total_reward", ")", ")", "print", "(", "\"==> rewards = {}\"", ".", "format", "(", "list", "(", "rewards", ")", ")", ")", "print", "(", ")" ]
Prints round end information about `rewards`.
[ "Prints", "round", "end", "information", "about", "rewards", "." ]
d7102a0ad37d179dbb23141640254ea383d3b43f
https://github.com/thiagopbueno/tf-rddlsim/blob/d7102a0ad37d179dbb23141640254ea383d3b43f/tfrddlsim/viz/generic_visualizer.py#L151-L159
train
edx/edx-celeryutils
celery_utils/persist_on_failure.py
_truncate_to_field
def _truncate_to_field(model, field_name, value): """ Shorten data to fit in the specified model field. If the data were too big for the field, it would cause a failure to insert, so we shorten it, truncating in the middle (because valuable information often shows up at the end. """ field = model._meta.get_field(field_name) # pylint: disable=protected-access if len(value) > field.max_length: midpoint = field.max_length // 2 len_after_midpoint = field.max_length - midpoint first = value[:midpoint] sep = '...' last = value[len(value) - len_after_midpoint + len(sep):] value = sep.join([first, last]) return value
python
def _truncate_to_field(model, field_name, value): """ Shorten data to fit in the specified model field. If the data were too big for the field, it would cause a failure to insert, so we shorten it, truncating in the middle (because valuable information often shows up at the end. """ field = model._meta.get_field(field_name) # pylint: disable=protected-access if len(value) > field.max_length: midpoint = field.max_length // 2 len_after_midpoint = field.max_length - midpoint first = value[:midpoint] sep = '...' last = value[len(value) - len_after_midpoint + len(sep):] value = sep.join([first, last]) return value
[ "def", "_truncate_to_field", "(", "model", ",", "field_name", ",", "value", ")", ":", "field", "=", "model", ".", "_meta", ".", "get_field", "(", "field_name", ")", "# pylint: disable=protected-access", "if", "len", "(", "value", ")", ">", "field", ".", "max_length", ":", "midpoint", "=", "field", ".", "max_length", "//", "2", "len_after_midpoint", "=", "field", ".", "max_length", "-", "midpoint", "first", "=", "value", "[", ":", "midpoint", "]", "sep", "=", "'...'", "last", "=", "value", "[", "len", "(", "value", ")", "-", "len_after_midpoint", "+", "len", "(", "sep", ")", ":", "]", "value", "=", "sep", ".", "join", "(", "[", "first", ",", "last", "]", ")", "return", "value" ]
Shorten data to fit in the specified model field. If the data were too big for the field, it would cause a failure to insert, so we shorten it, truncating in the middle (because valuable information often shows up at the end.
[ "Shorten", "data", "to", "fit", "in", "the", "specified", "model", "field", "." ]
d8745f5f0929ad154fad779a19fbefe7f51e9498
https://github.com/edx/edx-celeryutils/blob/d8745f5f0929ad154fad779a19fbefe7f51e9498/celery_utils/persist_on_failure.py#L45-L61
train
edx/edx-celeryutils
celery_utils/persist_on_failure.py
PersistOnFailureTask.on_failure
def on_failure(self, exc, task_id, args, kwargs, einfo): """ If the task fails, persist a record of the task. """ if not FailedTask.objects.filter(task_id=task_id, datetime_resolved=None).exists(): FailedTask.objects.create( task_name=_truncate_to_field(FailedTask, 'task_name', self.name), task_id=task_id, # Fixed length UUID: No need to truncate args=args, kwargs=kwargs, exc=_truncate_to_field(FailedTask, 'exc', repr(exc)), ) super(PersistOnFailureTask, self).on_failure(exc, task_id, args, kwargs, einfo)
python
def on_failure(self, exc, task_id, args, kwargs, einfo): """ If the task fails, persist a record of the task. """ if not FailedTask.objects.filter(task_id=task_id, datetime_resolved=None).exists(): FailedTask.objects.create( task_name=_truncate_to_field(FailedTask, 'task_name', self.name), task_id=task_id, # Fixed length UUID: No need to truncate args=args, kwargs=kwargs, exc=_truncate_to_field(FailedTask, 'exc', repr(exc)), ) super(PersistOnFailureTask, self).on_failure(exc, task_id, args, kwargs, einfo)
[ "def", "on_failure", "(", "self", ",", "exc", ",", "task_id", ",", "args", ",", "kwargs", ",", "einfo", ")", ":", "if", "not", "FailedTask", ".", "objects", ".", "filter", "(", "task_id", "=", "task_id", ",", "datetime_resolved", "=", "None", ")", ".", "exists", "(", ")", ":", "FailedTask", ".", "objects", ".", "create", "(", "task_name", "=", "_truncate_to_field", "(", "FailedTask", ",", "'task_name'", ",", "self", ".", "name", ")", ",", "task_id", "=", "task_id", ",", "# Fixed length UUID: No need to truncate", "args", "=", "args", ",", "kwargs", "=", "kwargs", ",", "exc", "=", "_truncate_to_field", "(", "FailedTask", ",", "'exc'", ",", "repr", "(", "exc", ")", ")", ",", ")", "super", "(", "PersistOnFailureTask", ",", "self", ")", ".", "on_failure", "(", "exc", ",", "task_id", ",", "args", ",", "kwargs", ",", "einfo", ")" ]
If the task fails, persist a record of the task.
[ "If", "the", "task", "fails", "persist", "a", "record", "of", "the", "task", "." ]
d8745f5f0929ad154fad779a19fbefe7f51e9498
https://github.com/edx/edx-celeryutils/blob/d8745f5f0929ad154fad779a19fbefe7f51e9498/celery_utils/persist_on_failure.py#L22-L34
train
thiagopbueno/tf-rddlsim
tfrddlsim/viz/abstract_visualizer.py
Visualizer.render
def render(self, trajectories: Tuple[NonFluents, Fluents, Fluents, Fluents, np.array], batch: Optional[int] = None) -> None: '''Renders the simulated `trajectories` for the given `batch`. Args: trajectories: NonFluents, states, actions, interms and rewards. batch: Number of batches to render. ''' raise NotImplementedError
python
def render(self, trajectories: Tuple[NonFluents, Fluents, Fluents, Fluents, np.array], batch: Optional[int] = None) -> None: '''Renders the simulated `trajectories` for the given `batch`. Args: trajectories: NonFluents, states, actions, interms and rewards. batch: Number of batches to render. ''' raise NotImplementedError
[ "def", "render", "(", "self", ",", "trajectories", ":", "Tuple", "[", "NonFluents", ",", "Fluents", ",", "Fluents", ",", "Fluents", ",", "np", ".", "array", "]", ",", "batch", ":", "Optional", "[", "int", "]", "=", "None", ")", "->", "None", ":", "raise", "NotImplementedError" ]
Renders the simulated `trajectories` for the given `batch`. Args: trajectories: NonFluents, states, actions, interms and rewards. batch: Number of batches to render.
[ "Renders", "the", "simulated", "trajectories", "for", "the", "given", "batch", "." ]
d7102a0ad37d179dbb23141640254ea383d3b43f
https://github.com/thiagopbueno/tf-rddlsim/blob/d7102a0ad37d179dbb23141640254ea383d3b43f/tfrddlsim/viz/abstract_visualizer.py#L41-L50
train
grundprinzip/pyxplorer
pyxplorer/types.py
Column.distribution
def distribution(self, limit=1024): """ Build the distribution of distinct values """ res = self._qexec("%s, count(*) as __cnt" % self.name(), group="%s" % self.name(), order="__cnt DESC LIMIT %d" % limit) dist = [] cnt = self._table.size() for i, r in enumerate(res): dist.append(list(r) + [i, r[1] / float(cnt)]) self._distribution = pd.DataFrame(dist, columns=["value", "cnt", "r", "fraction"]) self._distribution.index = self._distribution.r return self._distribution
python
def distribution(self, limit=1024): """ Build the distribution of distinct values """ res = self._qexec("%s, count(*) as __cnt" % self.name(), group="%s" % self.name(), order="__cnt DESC LIMIT %d" % limit) dist = [] cnt = self._table.size() for i, r in enumerate(res): dist.append(list(r) + [i, r[1] / float(cnt)]) self._distribution = pd.DataFrame(dist, columns=["value", "cnt", "r", "fraction"]) self._distribution.index = self._distribution.r return self._distribution
[ "def", "distribution", "(", "self", ",", "limit", "=", "1024", ")", ":", "res", "=", "self", ".", "_qexec", "(", "\"%s, count(*) as __cnt\"", "%", "self", ".", "name", "(", ")", ",", "group", "=", "\"%s\"", "%", "self", ".", "name", "(", ")", ",", "order", "=", "\"__cnt DESC LIMIT %d\"", "%", "limit", ")", "dist", "=", "[", "]", "cnt", "=", "self", ".", "_table", ".", "size", "(", ")", "for", "i", ",", "r", "in", "enumerate", "(", "res", ")", ":", "dist", ".", "append", "(", "list", "(", "r", ")", "+", "[", "i", ",", "r", "[", "1", "]", "/", "float", "(", "cnt", ")", "]", ")", "self", ".", "_distribution", "=", "pd", ".", "DataFrame", "(", "dist", ",", "columns", "=", "[", "\"value\"", ",", "\"cnt\"", ",", "\"r\"", ",", "\"fraction\"", "]", ")", "self", ".", "_distribution", ".", "index", "=", "self", ".", "_distribution", ".", "r", "return", "self", ".", "_distribution" ]
Build the distribution of distinct values
[ "Build", "the", "distribution", "of", "distinct", "values" ]
34c1d166cfef4a94aeb6d5fcb3cbb726d48146e2
https://github.com/grundprinzip/pyxplorer/blob/34c1d166cfef4a94aeb6d5fcb3cbb726d48146e2/pyxplorer/types.py#L91-L105
train
gofed/gofedlib
gofedlib/distribution/distributionnameparser.py
DistributionNameParser.parse
def parse(self, name): """Parse distribution string :param name: distribution string, e.g. "Fedora 23" :type name: string """ name = name.strip() groups = self._parseFedora(name) if groups: self._signature = DistributionNameSignature("Fedora", groups.group(1)) return self raise ValueError("Distribution name '%s' not recognized" % name)
python
def parse(self, name): """Parse distribution string :param name: distribution string, e.g. "Fedora 23" :type name: string """ name = name.strip() groups = self._parseFedora(name) if groups: self._signature = DistributionNameSignature("Fedora", groups.group(1)) return self raise ValueError("Distribution name '%s' not recognized" % name)
[ "def", "parse", "(", "self", ",", "name", ")", ":", "name", "=", "name", ".", "strip", "(", ")", "groups", "=", "self", ".", "_parseFedora", "(", "name", ")", "if", "groups", ":", "self", ".", "_signature", "=", "DistributionNameSignature", "(", "\"Fedora\"", ",", "groups", ".", "group", "(", "1", ")", ")", "return", "self", "raise", "ValueError", "(", "\"Distribution name '%s' not recognized\"", "%", "name", ")" ]
Parse distribution string :param name: distribution string, e.g. "Fedora 23" :type name: string
[ "Parse", "distribution", "string" ]
0674c248fe3d8706f98f912996b65af469f96b10
https://github.com/gofed/gofedlib/blob/0674c248fe3d8706f98f912996b65af469f96b10/gofedlib/distribution/distributionnameparser.py#L52-L64
train
zalando-stups/lizzy-client
lizzy_client/token.py
get_token
def get_token(url: str, scopes: str, credentials_dir: str) -> dict: """ Get access token info. """ tokens.configure(url=url, dir=credentials_dir) tokens.manage('lizzy', [scopes]) tokens.start() return tokens.get('lizzy')
python
def get_token(url: str, scopes: str, credentials_dir: str) -> dict: """ Get access token info. """ tokens.configure(url=url, dir=credentials_dir) tokens.manage('lizzy', [scopes]) tokens.start() return tokens.get('lizzy')
[ "def", "get_token", "(", "url", ":", "str", ",", "scopes", ":", "str", ",", "credentials_dir", ":", "str", ")", "->", "dict", ":", "tokens", ".", "configure", "(", "url", "=", "url", ",", "dir", "=", "credentials_dir", ")", "tokens", ".", "manage", "(", "'lizzy'", ",", "[", "scopes", "]", ")", "tokens", ".", "start", "(", ")", "return", "tokens", ".", "get", "(", "'lizzy'", ")" ]
Get access token info.
[ "Get", "access", "token", "info", "." ]
0af9733ca5a25ebd0a9dc1453f2a7592efcee56a
https://github.com/zalando-stups/lizzy-client/blob/0af9733ca5a25ebd0a9dc1453f2a7592efcee56a/lizzy_client/token.py#L4-L13
train
peterbe/gg
gg/builtins/config.py
config
def config(config, fork_name="", origin_name=""): """Setting various configuration options""" state = read(config.configfile) any_set = False if fork_name: update(config.configfile, {"FORK_NAME": fork_name}) success_out("fork-name set to: {}".format(fork_name)) any_set = True if origin_name: update(config.configfile, {"ORIGIN_NAME": origin_name}) success_out("origin-name set to: {}".format(origin_name)) any_set = True if not any_set: info_out("Fork-name: {}".format(state["FORK_NAME"]))
python
def config(config, fork_name="", origin_name=""): """Setting various configuration options""" state = read(config.configfile) any_set = False if fork_name: update(config.configfile, {"FORK_NAME": fork_name}) success_out("fork-name set to: {}".format(fork_name)) any_set = True if origin_name: update(config.configfile, {"ORIGIN_NAME": origin_name}) success_out("origin-name set to: {}".format(origin_name)) any_set = True if not any_set: info_out("Fork-name: {}".format(state["FORK_NAME"]))
[ "def", "config", "(", "config", ",", "fork_name", "=", "\"\"", ",", "origin_name", "=", "\"\"", ")", ":", "state", "=", "read", "(", "config", ".", "configfile", ")", "any_set", "=", "False", "if", "fork_name", ":", "update", "(", "config", ".", "configfile", ",", "{", "\"FORK_NAME\"", ":", "fork_name", "}", ")", "success_out", "(", "\"fork-name set to: {}\"", ".", "format", "(", "fork_name", ")", ")", "any_set", "=", "True", "if", "origin_name", ":", "update", "(", "config", ".", "configfile", ",", "{", "\"ORIGIN_NAME\"", ":", "origin_name", "}", ")", "success_out", "(", "\"origin-name set to: {}\"", ".", "format", "(", "origin_name", ")", ")", "any_set", "=", "True", "if", "not", "any_set", ":", "info_out", "(", "\"Fork-name: {}\"", ".", "format", "(", "state", "[", "\"FORK_NAME\"", "]", ")", ")" ]
Setting various configuration options
[ "Setting", "various", "configuration", "options" ]
2aace5bdb4a9b1cb65bea717784edf54c63b7bad
https://github.com/peterbe/gg/blob/2aace5bdb4a9b1cb65bea717784edf54c63b7bad/gg/builtins/config.py#L22-L35
train
geophysics-ubonn/crtomo_tools
lib/crtomo/eitManager.py
eitMan.set_area_to_sip_signature
def set_area_to_sip_signature(self, xmin, xmax, zmin, zmax, spectrum): """Parameterize the eit instance by supplying one SIP spectrum and the area to apply to. Parameters ---------- xmin : float Minimum x coordinate of the area xmax : float Maximum x coordinate of the area zmin : float Minimum z coordinate of the area zmax : float Maximum z coordinate of the area spectrum : sip_response SIP spectrum to use for parameterization """ assert isinstance(spectrum, (sip_response, sip_response2)) assert np.all(self.frequencies == spectrum.frequencies) for frequency, rmag, rpha in zip( self.frequencies, spectrum.rmag, spectrum.rpha): td = self.tds[frequency] pidm, pidp = td.a['forward_model'] td.parman.modify_area(pidm, xmin, xmax, zmin, zmax, rmag) td.parman.modify_area(pidp, xmin, xmax, zmin, zmax, rpha)
python
def set_area_to_sip_signature(self, xmin, xmax, zmin, zmax, spectrum): """Parameterize the eit instance by supplying one SIP spectrum and the area to apply to. Parameters ---------- xmin : float Minimum x coordinate of the area xmax : float Maximum x coordinate of the area zmin : float Minimum z coordinate of the area zmax : float Maximum z coordinate of the area spectrum : sip_response SIP spectrum to use for parameterization """ assert isinstance(spectrum, (sip_response, sip_response2)) assert np.all(self.frequencies == spectrum.frequencies) for frequency, rmag, rpha in zip( self.frequencies, spectrum.rmag, spectrum.rpha): td = self.tds[frequency] pidm, pidp = td.a['forward_model'] td.parman.modify_area(pidm, xmin, xmax, zmin, zmax, rmag) td.parman.modify_area(pidp, xmin, xmax, zmin, zmax, rpha)
[ "def", "set_area_to_sip_signature", "(", "self", ",", "xmin", ",", "xmax", ",", "zmin", ",", "zmax", ",", "spectrum", ")", ":", "assert", "isinstance", "(", "spectrum", ",", "(", "sip_response", ",", "sip_response2", ")", ")", "assert", "np", ".", "all", "(", "self", ".", "frequencies", "==", "spectrum", ".", "frequencies", ")", "for", "frequency", ",", "rmag", ",", "rpha", "in", "zip", "(", "self", ".", "frequencies", ",", "spectrum", ".", "rmag", ",", "spectrum", ".", "rpha", ")", ":", "td", "=", "self", ".", "tds", "[", "frequency", "]", "pidm", ",", "pidp", "=", "td", ".", "a", "[", "'forward_model'", "]", "td", ".", "parman", ".", "modify_area", "(", "pidm", ",", "xmin", ",", "xmax", ",", "zmin", ",", "zmax", ",", "rmag", ")", "td", ".", "parman", ".", "modify_area", "(", "pidp", ",", "xmin", ",", "xmax", ",", "zmin", ",", "zmax", ",", "rpha", ")" ]
Parameterize the eit instance by supplying one SIP spectrum and the area to apply to. Parameters ---------- xmin : float Minimum x coordinate of the area xmax : float Maximum x coordinate of the area zmin : float Minimum z coordinate of the area zmax : float Maximum z coordinate of the area spectrum : sip_response SIP spectrum to use for parameterization
[ "Parameterize", "the", "eit", "instance", "by", "supplying", "one", "SIP", "spectrum", "and", "the", "area", "to", "apply", "to", "." ]
27c3e21a557f8df1c12455b96c4c2e00e08a5b4a
https://github.com/geophysics-ubonn/crtomo_tools/blob/27c3e21a557f8df1c12455b96c4c2e00e08a5b4a/lib/crtomo/eitManager.py#L163-L188
train
geophysics-ubonn/crtomo_tools
lib/crtomo/eitManager.py
eitMan.add_homogeneous_model
def add_homogeneous_model(self, magnitude, phase=0, frequency=None): """Add homogeneous models to one or all tomodirs. Register those as forward models Parameters ---------- magnitude : float Value of homogeneous magnitude model phase : float, optional Value of homogeneous phase model. Default 0 frequency : float, optional Frequency of of the tomodir to use. If None, then apply to all tomodirs. Default is None. """ if frequency is None: frequencies = self.frequencies else: assert isinstance(frequency, Number) frequencies = [frequency, ] for freq in frequencies: pidm, pidp = self.tds[freq].add_homogeneous_model(magnitude, phase) self.a['forward_rmag'][freq] = pidm self.a['forward_rpha'][freq] = pidp
python
def add_homogeneous_model(self, magnitude, phase=0, frequency=None): """Add homogeneous models to one or all tomodirs. Register those as forward models Parameters ---------- magnitude : float Value of homogeneous magnitude model phase : float, optional Value of homogeneous phase model. Default 0 frequency : float, optional Frequency of of the tomodir to use. If None, then apply to all tomodirs. Default is None. """ if frequency is None: frequencies = self.frequencies else: assert isinstance(frequency, Number) frequencies = [frequency, ] for freq in frequencies: pidm, pidp = self.tds[freq].add_homogeneous_model(magnitude, phase) self.a['forward_rmag'][freq] = pidm self.a['forward_rpha'][freq] = pidp
[ "def", "add_homogeneous_model", "(", "self", ",", "magnitude", ",", "phase", "=", "0", ",", "frequency", "=", "None", ")", ":", "if", "frequency", "is", "None", ":", "frequencies", "=", "self", ".", "frequencies", "else", ":", "assert", "isinstance", "(", "frequency", ",", "Number", ")", "frequencies", "=", "[", "frequency", ",", "]", "for", "freq", "in", "frequencies", ":", "pidm", ",", "pidp", "=", "self", ".", "tds", "[", "freq", "]", ".", "add_homogeneous_model", "(", "magnitude", ",", "phase", ")", "self", ".", "a", "[", "'forward_rmag'", "]", "[", "freq", "]", "=", "pidm", "self", ".", "a", "[", "'forward_rpha'", "]", "[", "freq", "]", "=", "pidp" ]
Add homogeneous models to one or all tomodirs. Register those as forward models Parameters ---------- magnitude : float Value of homogeneous magnitude model phase : float, optional Value of homogeneous phase model. Default 0 frequency : float, optional Frequency of of the tomodir to use. If None, then apply to all tomodirs. Default is None.
[ "Add", "homogeneous", "models", "to", "one", "or", "all", "tomodirs", ".", "Register", "those", "as", "forward", "models" ]
27c3e21a557f8df1c12455b96c4c2e00e08a5b4a
https://github.com/geophysics-ubonn/crtomo_tools/blob/27c3e21a557f8df1c12455b96c4c2e00e08a5b4a/lib/crtomo/eitManager.py#L195-L218
train
geophysics-ubonn/crtomo_tools
lib/crtomo/eitManager.py
eitMan.apply_crtomo_cfg
def apply_crtomo_cfg(self): """Set the global crtomo_cfg for all frequencies """ for key in sorted(self.tds.keys()): self.tds[key].crtomo_cfg = self.crtomo_cfg.copy()
python
def apply_crtomo_cfg(self): """Set the global crtomo_cfg for all frequencies """ for key in sorted(self.tds.keys()): self.tds[key].crtomo_cfg = self.crtomo_cfg.copy()
[ "def", "apply_crtomo_cfg", "(", "self", ")", ":", "for", "key", "in", "sorted", "(", "self", ".", "tds", ".", "keys", "(", ")", ")", ":", "self", ".", "tds", "[", "key", "]", ".", "crtomo_cfg", "=", "self", ".", "crtomo_cfg", ".", "copy", "(", ")" ]
Set the global crtomo_cfg for all frequencies
[ "Set", "the", "global", "crtomo_cfg", "for", "all", "frequencies" ]
27c3e21a557f8df1c12455b96c4c2e00e08a5b4a
https://github.com/geophysics-ubonn/crtomo_tools/blob/27c3e21a557f8df1c12455b96c4c2e00e08a5b4a/lib/crtomo/eitManager.py#L273-L277
train
geophysics-ubonn/crtomo_tools
lib/crtomo/eitManager.py
eitMan.apply_noise_models
def apply_noise_models(self): """Set the global noise_model for all frequencies """ for key in sorted(self.tds.keys()): self.tds[key].noise_model = self.noise_model
python
def apply_noise_models(self): """Set the global noise_model for all frequencies """ for key in sorted(self.tds.keys()): self.tds[key].noise_model = self.noise_model
[ "def", "apply_noise_models", "(", "self", ")", ":", "for", "key", "in", "sorted", "(", "self", ".", "tds", ".", "keys", "(", ")", ")", ":", "self", ".", "tds", "[", "key", "]", ".", "noise_model", "=", "self", ".", "noise_model" ]
Set the global noise_model for all frequencies
[ "Set", "the", "global", "noise_model", "for", "all", "frequencies" ]
27c3e21a557f8df1c12455b96c4c2e00e08a5b4a
https://github.com/geophysics-ubonn/crtomo_tools/blob/27c3e21a557f8df1c12455b96c4c2e00e08a5b4a/lib/crtomo/eitManager.py#L279-L283
train
geophysics-ubonn/crtomo_tools
lib/crtomo/eitManager.py
eitMan.load_inversion_results
def load_inversion_results(self, sipdir): """Given an sEIT inversion directory, load inversion results and store the corresponding parameter ids in self.assignments Note that all previous data stored in this instance of the eitManager will be overwritten, if required! """ # load frequencies and initialize tomodir objects for all frequencies frequency_file = sipdir + os.sep + 'frequencies.dat' frequencies = np.loadtxt(frequency_file) self._init_frequencies(frequencies) # cycle through all tomodirs on disc and load the data for nr, (frequency_key, item) in enumerate(sorted(self.tds.items())): for label in ('rmag', 'rpha', 'cre', 'cim'): if label not in self.assigments: self.a[label] = {} tdir = sipdir + os.sep + 'invmod' + os.sep + '{:02}_{:.6f}'.format( nr, frequency_key) + os.sep rmag_file = sorted(glob(tdir + 'inv/*.mag'))[-1] rmag_data = np.loadtxt(rmag_file, skiprows=1)[:, 2] pid_rmag = item.parman.add_data(rmag_data) self.a['rmag'][frequency_key] = pid_rmag rpha_file = sorted(glob(tdir + 'inv/*.pha'))[-1] rpha_data = np.loadtxt(rpha_file, skiprows=1)[:, 2] pid_rpha = item.parman.add_data(rpha_data) self.a['rpha'][frequency_key] = pid_rpha sigma_file = sorted(glob(tdir + 'inv/*.sig'))[-1] sigma_data = np.loadtxt(sigma_file, skiprows=1) pid_cre = item.parman.add_data(sigma_data[:, 0]) pid_cim = item.parman.add_data(sigma_data[:, 1]) self.a['cre'][frequency_key] = pid_cre self.a['cim'][frequency_key] = pid_cim
python
def load_inversion_results(self, sipdir): """Given an sEIT inversion directory, load inversion results and store the corresponding parameter ids in self.assignments Note that all previous data stored in this instance of the eitManager will be overwritten, if required! """ # load frequencies and initialize tomodir objects for all frequencies frequency_file = sipdir + os.sep + 'frequencies.dat' frequencies = np.loadtxt(frequency_file) self._init_frequencies(frequencies) # cycle through all tomodirs on disc and load the data for nr, (frequency_key, item) in enumerate(sorted(self.tds.items())): for label in ('rmag', 'rpha', 'cre', 'cim'): if label not in self.assigments: self.a[label] = {} tdir = sipdir + os.sep + 'invmod' + os.sep + '{:02}_{:.6f}'.format( nr, frequency_key) + os.sep rmag_file = sorted(glob(tdir + 'inv/*.mag'))[-1] rmag_data = np.loadtxt(rmag_file, skiprows=1)[:, 2] pid_rmag = item.parman.add_data(rmag_data) self.a['rmag'][frequency_key] = pid_rmag rpha_file = sorted(glob(tdir + 'inv/*.pha'))[-1] rpha_data = np.loadtxt(rpha_file, skiprows=1)[:, 2] pid_rpha = item.parman.add_data(rpha_data) self.a['rpha'][frequency_key] = pid_rpha sigma_file = sorted(glob(tdir + 'inv/*.sig'))[-1] sigma_data = np.loadtxt(sigma_file, skiprows=1) pid_cre = item.parman.add_data(sigma_data[:, 0]) pid_cim = item.parman.add_data(sigma_data[:, 1]) self.a['cre'][frequency_key] = pid_cre self.a['cim'][frequency_key] = pid_cim
[ "def", "load_inversion_results", "(", "self", ",", "sipdir", ")", ":", "# load frequencies and initialize tomodir objects for all frequencies", "frequency_file", "=", "sipdir", "+", "os", ".", "sep", "+", "'frequencies.dat'", "frequencies", "=", "np", ".", "loadtxt", "(", "frequency_file", ")", "self", ".", "_init_frequencies", "(", "frequencies", ")", "# cycle through all tomodirs on disc and load the data", "for", "nr", ",", "(", "frequency_key", ",", "item", ")", "in", "enumerate", "(", "sorted", "(", "self", ".", "tds", ".", "items", "(", ")", ")", ")", ":", "for", "label", "in", "(", "'rmag'", ",", "'rpha'", ",", "'cre'", ",", "'cim'", ")", ":", "if", "label", "not", "in", "self", ".", "assigments", ":", "self", ".", "a", "[", "label", "]", "=", "{", "}", "tdir", "=", "sipdir", "+", "os", ".", "sep", "+", "'invmod'", "+", "os", ".", "sep", "+", "'{:02}_{:.6f}'", ".", "format", "(", "nr", ",", "frequency_key", ")", "+", "os", ".", "sep", "rmag_file", "=", "sorted", "(", "glob", "(", "tdir", "+", "'inv/*.mag'", ")", ")", "[", "-", "1", "]", "rmag_data", "=", "np", ".", "loadtxt", "(", "rmag_file", ",", "skiprows", "=", "1", ")", "[", ":", ",", "2", "]", "pid_rmag", "=", "item", ".", "parman", ".", "add_data", "(", "rmag_data", ")", "self", ".", "a", "[", "'rmag'", "]", "[", "frequency_key", "]", "=", "pid_rmag", "rpha_file", "=", "sorted", "(", "glob", "(", "tdir", "+", "'inv/*.pha'", ")", ")", "[", "-", "1", "]", "rpha_data", "=", "np", ".", "loadtxt", "(", "rpha_file", ",", "skiprows", "=", "1", ")", "[", ":", ",", "2", "]", "pid_rpha", "=", "item", ".", "parman", ".", "add_data", "(", "rpha_data", ")", "self", ".", "a", "[", "'rpha'", "]", "[", "frequency_key", "]", "=", "pid_rpha", "sigma_file", "=", "sorted", "(", "glob", "(", "tdir", "+", "'inv/*.sig'", ")", ")", "[", "-", "1", "]", "sigma_data", "=", "np", ".", "loadtxt", "(", "sigma_file", ",", "skiprows", "=", "1", ")", "pid_cre", "=", "item", ".", "parman", ".", "add_data", "(", "sigma_data", "[", ":", ",", "0", "]", ")", "pid_cim", "=", "item", ".", "parman", ".", "add_data", "(", "sigma_data", "[", ":", ",", "1", "]", ")", "self", ".", "a", "[", "'cre'", "]", "[", "frequency_key", "]", "=", "pid_cre", "self", ".", "a", "[", "'cim'", "]", "[", "frequency_key", "]", "=", "pid_cim" ]
Given an sEIT inversion directory, load inversion results and store the corresponding parameter ids in self.assignments Note that all previous data stored in this instance of the eitManager will be overwritten, if required!
[ "Given", "an", "sEIT", "inversion", "directory", "load", "inversion", "results", "and", "store", "the", "corresponding", "parameter", "ids", "in", "self", ".", "assignments" ]
27c3e21a557f8df1c12455b96c4c2e00e08a5b4a
https://github.com/geophysics-ubonn/crtomo_tools/blob/27c3e21a557f8df1c12455b96c4c2e00e08a5b4a/lib/crtomo/eitManager.py#L305-L341
train
geophysics-ubonn/crtomo_tools
lib/crtomo/eitManager.py
eitMan.plot_forward_models
def plot_forward_models(self, maglim=None, phalim=None, **kwargs): """Create plots of the forward models Returns ------- mag_fig: dict Dictionary containing the figure and axes objects of the magnitude plots """ return_dict = {} N = len(self.frequencies) nrx = min(N, 4) nrz = int(np.ceil(N / nrx)) for index, key, limits in zip( (0, 1), ('rmag', 'rpha'), (maglim, phalim)): if limits is None: cbmin = None cbmax = None else: cbmin = limits[0] cbmax = limits[1] fig, axes = plt.subplots( nrz, nrx, figsize=(16 / 2.54, nrz * 3 / 2.54), sharex=True, sharey=True, ) for ax in axes.flat: ax.set_visible(False) for ax, frequency in zip(axes.flat, self.frequencies): ax.set_visible(True) td = self.tds[frequency] pids = td.a['forward_model'] td.plot.plot_elements_to_ax( pids[index], ax=ax, plot_colorbar=True, cbposition='horizontal', cbmin=cbmin, cbmax=cbmax, **kwargs ) for ax in axes[0:-1, :].flat: ax.set_xlabel('') for ax in axes[:, 1:].flat: ax.set_ylabel('') fig.tight_layout() return_dict[key] = { 'fig': fig, 'axes': axes, } return return_dict
python
def plot_forward_models(self, maglim=None, phalim=None, **kwargs): """Create plots of the forward models Returns ------- mag_fig: dict Dictionary containing the figure and axes objects of the magnitude plots """ return_dict = {} N = len(self.frequencies) nrx = min(N, 4) nrz = int(np.ceil(N / nrx)) for index, key, limits in zip( (0, 1), ('rmag', 'rpha'), (maglim, phalim)): if limits is None: cbmin = None cbmax = None else: cbmin = limits[0] cbmax = limits[1] fig, axes = plt.subplots( nrz, nrx, figsize=(16 / 2.54, nrz * 3 / 2.54), sharex=True, sharey=True, ) for ax in axes.flat: ax.set_visible(False) for ax, frequency in zip(axes.flat, self.frequencies): ax.set_visible(True) td = self.tds[frequency] pids = td.a['forward_model'] td.plot.plot_elements_to_ax( pids[index], ax=ax, plot_colorbar=True, cbposition='horizontal', cbmin=cbmin, cbmax=cbmax, **kwargs ) for ax in axes[0:-1, :].flat: ax.set_xlabel('') for ax in axes[:, 1:].flat: ax.set_ylabel('') fig.tight_layout() return_dict[key] = { 'fig': fig, 'axes': axes, } return return_dict
[ "def", "plot_forward_models", "(", "self", ",", "maglim", "=", "None", ",", "phalim", "=", "None", ",", "*", "*", "kwargs", ")", ":", "return_dict", "=", "{", "}", "N", "=", "len", "(", "self", ".", "frequencies", ")", "nrx", "=", "min", "(", "N", ",", "4", ")", "nrz", "=", "int", "(", "np", ".", "ceil", "(", "N", "/", "nrx", ")", ")", "for", "index", ",", "key", ",", "limits", "in", "zip", "(", "(", "0", ",", "1", ")", ",", "(", "'rmag'", ",", "'rpha'", ")", ",", "(", "maglim", ",", "phalim", ")", ")", ":", "if", "limits", "is", "None", ":", "cbmin", "=", "None", "cbmax", "=", "None", "else", ":", "cbmin", "=", "limits", "[", "0", "]", "cbmax", "=", "limits", "[", "1", "]", "fig", ",", "axes", "=", "plt", ".", "subplots", "(", "nrz", ",", "nrx", ",", "figsize", "=", "(", "16", "/", "2.54", ",", "nrz", "*", "3", "/", "2.54", ")", ",", "sharex", "=", "True", ",", "sharey", "=", "True", ",", ")", "for", "ax", "in", "axes", ".", "flat", ":", "ax", ".", "set_visible", "(", "False", ")", "for", "ax", ",", "frequency", "in", "zip", "(", "axes", ".", "flat", ",", "self", ".", "frequencies", ")", ":", "ax", ".", "set_visible", "(", "True", ")", "td", "=", "self", ".", "tds", "[", "frequency", "]", "pids", "=", "td", ".", "a", "[", "'forward_model'", "]", "td", ".", "plot", ".", "plot_elements_to_ax", "(", "pids", "[", "index", "]", ",", "ax", "=", "ax", ",", "plot_colorbar", "=", "True", ",", "cbposition", "=", "'horizontal'", ",", "cbmin", "=", "cbmin", ",", "cbmax", "=", "cbmax", ",", "*", "*", "kwargs", ")", "for", "ax", "in", "axes", "[", "0", ":", "-", "1", ",", ":", "]", ".", "flat", ":", "ax", ".", "set_xlabel", "(", "''", ")", "for", "ax", "in", "axes", "[", ":", ",", "1", ":", "]", ".", "flat", ":", "ax", ".", "set_ylabel", "(", "''", ")", "fig", ".", "tight_layout", "(", ")", "return_dict", "[", "key", "]", "=", "{", "'fig'", ":", "fig", ",", "'axes'", ":", "axes", ",", "}", "return", "return_dict" ]
Create plots of the forward models Returns ------- mag_fig: dict Dictionary containing the figure and axes objects of the magnitude plots
[ "Create", "plots", "of", "the", "forward", "models" ]
27c3e21a557f8df1c12455b96c4c2e00e08a5b4a
https://github.com/geophysics-ubonn/crtomo_tools/blob/27c3e21a557f8df1c12455b96c4c2e00e08a5b4a/lib/crtomo/eitManager.py#L402-L460
train
geophysics-ubonn/crtomo_tools
lib/crtomo/eitManager.py
eitMan.add_to_configs
def add_to_configs(self, configs): """Add configurations to all tomodirs Parameters ---------- configs : :class:`numpy.ndarray` Nx4 numpy array with abmn configurations """ for f, td in self.tds.items(): td.configs.add_to_configs(configs)
python
def add_to_configs(self, configs): """Add configurations to all tomodirs Parameters ---------- configs : :class:`numpy.ndarray` Nx4 numpy array with abmn configurations """ for f, td in self.tds.items(): td.configs.add_to_configs(configs)
[ "def", "add_to_configs", "(", "self", ",", "configs", ")", ":", "for", "f", ",", "td", "in", "self", ".", "tds", ".", "items", "(", ")", ":", "td", ".", "configs", ".", "add_to_configs", "(", "configs", ")" ]
Add configurations to all tomodirs Parameters ---------- configs : :class:`numpy.ndarray` Nx4 numpy array with abmn configurations
[ "Add", "configurations", "to", "all", "tomodirs" ]
27c3e21a557f8df1c12455b96c4c2e00e08a5b4a
https://github.com/geophysics-ubonn/crtomo_tools/blob/27c3e21a557f8df1c12455b96c4c2e00e08a5b4a/lib/crtomo/eitManager.py#L462-L472
train