Search is not available for this dataset
text
stringlengths 75
104k
|
---|
def read_stats(self):
""" Read current statistics from chassis.
:return: dictionary {stream: {tx: {stat name: stat value}} rx: {tpld: {stat group {stat name: value}}}}
"""
self.tx_statistics = TgnObjectsDict()
for port in self.session.ports.values():
for stream in port.streams.values():
self.tx_statistics[stream] = stream.read_stats()
tpld_statistics = XenaTpldsStats(self.session).read_stats()
self.statistics = TgnObjectsDict()
for stream, stream_stats in self.tx_statistics.items():
self.statistics[stream] = OrderedDict()
self.statistics[stream]['tx'] = stream_stats
self.statistics[stream]['rx'] = OrderedDict()
stream_tpld = stream.get_attribute('ps_tpldid')
for tpld, tpld_stats in tpld_statistics.items():
if tpld.id == stream_tpld:
self.statistics[stream]['rx'][tpld] = tpld_stats
return self.statistics |
def read_stats(self):
""" Read current statistics from chassis.
:return: dictionary {tpld full index {group name {stat name: stat value}}}
"""
self.statistics = TgnObjectsDict()
for port in self.session.ports.values():
for tpld in port.tplds.values():
self.statistics[tpld] = tpld.read_stats()
return self.statistics |
def get_user_sets(client_id, user_id):
"""Find all user sets."""
data = api_call('get', 'users/{}/sets'.format(user_id), client_id=client_id)
return [WordSet.from_dict(wordset) for wordset in data] |
def print_user_sets(wordsets, print_terms):
"""Print all user sets by title. If 'print_terms', also prints all terms of all user sets.
:param wordsets: List of WordSet.
:param print_terms: If True, also prints all terms of all user sets.
"""
if not wordsets:
print('No sets found')
else:
print('Found sets: {}'.format(len(wordsets)))
for wordset in wordsets:
print(' {}'.format(wordset))
if print_terms:
for term in wordset.terms:
print(' {}'.format(term)) |
def get_common_terms(*api_envs):
"""Get all term duplicates across all user word sets as a list of
(title of first word set, title of second word set, set of terms) tuples."""
common_terms = []
# pylint: disable=no-value-for-parameter
wordsets = get_user_sets(*api_envs)
# pylint: enable=no-value-for-parameter
for wordset1, wordset2 in combinations(wordsets, 2):
common = wordset1.has_common(wordset2)
if common:
common_terms.append((wordset1.title, wordset2.title, common))
return common_terms |
def print_common_terms(common_terms):
"""Print common terms for each pair of word sets.
:param common_terms: Output of get_common_terms().
"""
if not common_terms:
print('No duplicates')
else:
for set_pair in common_terms:
set1, set2, terms = set_pair
print('{} and {} have in common:'.format(set1, set2))
for term in terms:
print(' {}'.format(term)) |
def delete_term(set_id, term_id, access_token):
"""Delete the given term."""
api_call('delete', 'sets/{}/terms/{}'.format(set_id, term_id), access_token=access_token) |
def add_term(set_id, term, access_token):
"""Add the given term to the given set.
:param term: Instance of Term.
"""
api_call('post', 'sets/{}/terms'.format(set_id), term.to_dict(), access_token=access_token) |
def reset_term_stats(set_id, term_id, client_id, user_id, access_token):
"""Reset the stats of a term by deleting and re-creating it."""
found_sets = [user_set for user_set in get_user_sets(client_id, user_id)
if user_set.set_id == set_id]
if len(found_sets) != 1:
raise ValueError('{} set(s) found with id {}'.format(len(found_sets), set_id))
found_terms = [term for term in found_sets[0].terms if term.term_id == term_id]
if len(found_terms) != 1:
raise ValueError('{} term(s) found with id {}'.format(len(found_terms), term_id))
term = found_terms[0]
if term.image.url:
# Creating a term with an image requires an "image identifier", which you get by uploading
# an image via https://quizlet.com/api/2.0/docs/images , which can only be used by Quizlet
# PLUS members.
raise NotImplementedError('"{}" has an image and is thus not supported'.format(term))
print('Deleting "{}"...'.format(term))
delete_term(set_id, term_id, access_token)
print('Re-creating "{}"...'.format(term))
add_term(set_id, term, access_token)
print('Done') |
def createController(self, key, attributes, ipmi, printer=False):
""" Function createController
Create a controller node
@param key: The host name or ID
@param attributes:The payload of the host creation
@param printer: - False for no creation progression message
- True to get creation progression printed on STDOUT
- Printer class containig a status method for enhanced
print. def printer.status(status, msg, eol=eol)
@return RETURN: The API result
"""
if key not in self:
self.printer = printer
self.async = False
# Create the VM in foreman
self.__printProgression__('In progress',
key + ' creation: push in Foreman',
eol='\r')
self.api.create('hosts', attributes, async=self.async)
self[key]['interfaces'].append(ipmi)
# Wait for puppet catalog to be applied
# self.waitPuppetCatalogToBeApplied(key)
self.reload()
self[key]['build'] = 'true'
self[key]['boot'] = 'pxe'
self[key]['power'] = 'cycle'
return self[key] |
def waitPuppetCatalogToBeApplied(self, key, sleepTime=5):
""" Function waitPuppetCatalogToBeApplied
Wait for puppet catalog to be applied
@param key: The host name or ID
@return RETURN: None
"""
# Wait for puppet catalog to be applied
loop_stop = False
while not loop_stop:
status = self[key].getStatus()
if status == 'No Changes' or status == 'Active':
self.__printProgression__(True,
key + ' creation: provisioning OK')
loop_stop = True
elif status == 'Error':
self.__printProgression__(False,
key + ' creation: Error - '
'Error during provisioning')
loop_stop = True
return False
else:
self.__printProgression__('In progress',
key + ' creation: provisioning ({})'
.format(status),
eol='\r')
time.sleep(sleepTime) |
def createVM(self, key, attributes, printer=False):
""" Function createVM
Create a Virtual Machine
The creation of a VM with libVirt is a bit complexe.
We first create the element in foreman, the ask to start before
the result of the creation.
To do so, we make async calls to the API and check the results
@param key: The host name or ID
@param attributes:The payload of the host creation
@param printer: - False for no creation progression message
- True to get creation progression printed on STDOUT
- Printer class containig a status method for enhanced
print. def printer.status(status, msg, eol=eol)
@return RETURN: The API result
"""
self.printer = printer
self.async = False
# Create the VM in foreman
# NOTA: with 1.8 it will return 422 'Failed to login via SSH'
self.__printProgression__('In progress',
key + ' creation: push in Foreman', eol='\r')
asyncCreation = self.api.create('hosts', attributes, async=self.async)
# Wait before asking to power on the VM
# sleep = 5
# for i in range(0, sleep):
# time.sleep(1)
# self.__printProgression__('In progress',
# key + ' creation: start in {0}s'
# .format(sleep - i),
# eol='\r')
# Power on the VM
self.__printProgression__('In progress',
key + ' creation: starting', eol='\r')
powerOn = self[key].powerOn()
# Show Power on result
if powerOn['power']:
self.__printProgression__('In progress',
key + ' creation: wait for end of boot',
eol='\r')
else:
self.__printProgression__(False,
key + ' creation: Error - ' +
str(powerOn))
return False
# Show creation result
# NOTA: with 1.8 it will return 422 'Failed to login via SSH'
# if asyncCreation.result().status_code is 200:
# self.__printProgression__('In progress',
# key + ' creation: created',
# eol='\r')
# else:
# self.__printProgression__(False,
# key + ' creation: Error - ' +
# str(asyncCreation.result()
# .status_code) + ' - ' +
# str(asyncCreation.result().text))
# return False
# Wait for puppet catalog to be applied
self.waitPuppetCatalogToBeApplied(key)
return self[key]['id'] |
def get(router, organization, email):
"""
:rtype: User
"""
log.info("Getting user: %s" % email)
resp = router.get_users(org_id=organization.id).json()
ids = [x['id'] for x in resp if x['email'] == email]
if len(ids):
user = User(organization, ids[0]).init_router(router)
return user
else:
raise exceptions.NotFoundError('User with email: %s not found' % email) |
def construct(self, mapping: dict, **kwargs):
"""
Construct an object from a mapping
:param mapping: the constructor definition, with ``__type__`` name and keyword arguments
:param kwargs: additional keyword arguments to pass to the constructor
"""
assert '__type__' not in kwargs and '__args__' not in kwargs
mapping = {**mapping, **kwargs}
factory_fqdn = mapping.pop('__type__')
factory = self.load_name(factory_fqdn)
args = mapping.pop('__args__', [])
return factory(*args, **mapping) |
def load_name(absolute_name: str):
"""Load an object based on an absolute, dotted name"""
path = absolute_name.split('.')
try:
__import__(absolute_name)
except ImportError:
try:
obj = sys.modules[path[0]]
except KeyError:
raise ModuleNotFoundError('No module named %r' % path[0])
else:
for component in path[1:]:
try:
obj = getattr(obj, component)
except AttributeError as err:
raise ConfigurationError(what='no such object %r: %s' % (absolute_name, err))
return obj
else: # ImportError is not raised if ``absolute_name`` points to a valid module
return sys.modules[absolute_name] |
def patch(self, path, value=None):
""" Set specified value to yaml path.
Example:
patch('application/components/child/configuration/__locator.application-id','777')
Will change child app ID to 777
"""
# noinspection PyShadowingNames
def pathGet(dictionary, path):
for item in path.split("/"):
dictionary = dictionary[item]
return dictionary
# noinspection PyShadowingNames
def pathSet(dictionary, path, value):
path = path.split("/")
key = path[-1]
dictionary = pathGet(dictionary, "/".join(path[:-1]))
dictionary[key] = value
# noinspection PyShadowingNames
def pathRm(dictionary, path):
path = path.split("/")
key = path[-1]
dictionary = pathGet(dictionary, "/".join(path[:-1]))
del dictionary[key]
src = yaml.load(self.content)
if value:
pathSet(src, path, value)
else:
pathRm(src, path)
self._raw_content = yaml.safe_dump(src, default_flow_style=False)
return True |
async def _await_all(self):
"""Async component of _run"""
delay = 0.0
# we run a top-level nursery that automatically reaps/cancels for us
async with trio.open_nursery() as nursery:
while self.running.is_set():
await self._start_payloads(nursery=nursery)
await trio.sleep(delay)
delay = min(delay + 0.1, 1.0)
# cancel the scope to cancel all payloads
nursery.cancel_scope.cancel() |
async def _start_payloads(self, nursery):
"""Start all queued payloads"""
with self._lock:
for coroutine in self._payloads:
nursery.start_soon(coroutine)
self._payloads.clear()
await trio.sleep(0) |
def contains_list(longer, shorter):
"""Check if longer list starts with shorter list"""
if len(longer) <= len(shorter):
return False
for a, b in zip(shorter, longer):
if a != b:
return False
return True |
def load(f, dict_=dict):
"""Load and parse toml from a file object
An additional argument `dict_` is used to specify the output type
"""
if not f.read:
raise ValueError('The first parameter needs to be a file object, ',
'%r is passed' % type(f))
return loads(f.read(), dict_) |
def loads(content, dict_=dict):
"""Parse a toml string
An additional argument `dict_` is used to specify the output type
"""
if not isinstance(content, basestring):
raise ValueError('The first parameter needs to be a string object, ',
'%r is passed' % type(content))
decoder = Decoder(content, dict_)
decoder.parse()
return decoder.data |
def convert(self, line=None, is_end=True):
"""Read the line content and return the converted value
:param line: the line to feed to converter
:param is_end: if set to True, will raise an error if
the line has something remaining.
"""
if line is not None:
self.line = line
if not self.line:
raise TomlDecodeError(self.parser.lineno,
'EOF is hit!')
token = None
self.line = self.line.lstrip()
for key, pattern in self.patterns:
m = pattern.match(self.line)
if m:
self.line = self.line[m.end():]
handler = getattr(self, 'convert_%s' % key)
token = handler(m)
break
else:
raise TomlDecodeError(self.parser.lineno,
'Parsing error: %r' % self.line)
if is_end and not BLANK_RE.match(self.line):
raise TomlDecodeError(self.parser.lineno,
'Something is remained: %r' % self.line)
return token |
def parse(self, data=None, table_name=None):
"""Parse the lines from index i
:param data: optional, store the parsed result to it when specified
:param table_name: when inside a table array, it is the table name
"""
temp = self.dict_()
sub_table = None
is_array = False
line = ''
while True:
line = self._readline()
if not line:
self._store_table(sub_table, temp, is_array, data=data)
break # EOF
if BLANK_RE.match(line):
continue
if TABLE_RE.match(line):
next_table = self.split_string(
TABLE_RE.match(line).group(1), '.', False)
if table_name and not contains_list(next_table, table_name):
self._store_table(sub_table, temp, is_array, data=data)
break
table = cut_list(next_table, table_name)
if sub_table == table:
raise TomlDecodeError(self.lineno, 'Duplicate table name'
'in origin: %r' % sub_table)
else: # different table name
self._store_table(sub_table, temp, is_array, data=data)
sub_table = table
is_array = False
elif TABLE_ARRAY_RE.match(line):
next_table = self.split_string(
TABLE_ARRAY_RE.match(line).group(1), '.', False)
if table_name and not contains_list(next_table, table_name):
# Out of current loop
# write current data dict to table dict
self._store_table(sub_table, temp, is_array, data=data)
break
table = cut_list(next_table, table_name)
if sub_table == table and not is_array:
raise TomlDecodeError(self.lineno, 'Duplicate name of '
'table and array of table: %r'
% sub_table)
else: # Begin a nested loop
# Write any temp data to table dict
self._store_table(sub_table, temp, is_array, data=data)
sub_table = table
is_array = True
self.parse(temp, next_table)
elif KEY_RE.match(line):
m = KEY_RE.match(line)
keys = self.split_string(m.group(1), '.')
value = self.converter.convert(line[m.end():])
if value is None:
raise TomlDecodeError(self.lineno, 'Value is missing')
self._store_table(keys[:-1], {keys[-1]: value}, data=temp)
else:
raise TomlDecodeError(self.lineno,
'Pattern is not recognized: %r' % line)
# Rollback to the last line for next parse
# This will do nothing if EOF is hit
self.instream.seek(self.instream.tell() - len(line))
self.lineno -= 1 |
def split_string(self, string, splitter='.', allow_empty=True):
"""Split the string with respect of quotes"""
i = 0
rv = []
need_split = False
while i < len(string):
m = re.compile(_KEY_NAME).match(string, i)
if not need_split and m:
i = m.end()
body = m.group(1)
if body[:3] == '"""':
body = self.converter.unescape(body[3:-3])
elif body[:3] == "'''":
body = body[3:-3]
elif body[0] == '"':
body = self.converter.unescape(body[1:-1])
elif body[0] == "'":
body = body[1:-1]
if not allow_empty and not body:
raise TomlDecodeError(
self.lineno,
'Empty section name is not allowed: %r' % string)
rv.append(body)
need_split = True
elif need_split and string[i] == splitter:
need_split = False
i += 1
continue
else:
raise TomlDecodeError(self.lineno,
'Illegal section name: %r' % string)
if not need_split:
raise TomlDecodeError(
self.lineno,
'Empty section name is not allowed: %r' % string)
return rv |
def transform_source(text):
'''removes a "where" clause which is identified by the use of "where"
as an identifier and ends at the first DEDENT (i.e. decrease in indentation)'''
toks = tokenize.generate_tokens(StringIO(text).readline)
result = []
where_clause = False
for toktype, tokvalue, _, _, _ in toks:
if toktype == tokenize.NAME and tokvalue == "where":
where_clause = True
elif where_clause and toktype == tokenize.DEDENT:
where_clause = False
continue
if not where_clause:
result.append((toktype, tokvalue))
return tokenize.untokenize(result) |
def is_visible(self):
"""
Return a boolean if the page is visible in navigation.
Pages must have show in navigation set. Regular pages must be published (published and
have a current version - checked with `is_published`), pages with a glitter app associated
don't need any page versions.
"""
if self.glitter_app_name:
visible = self.show_in_navigation
else:
visible = self.show_in_navigation and self.is_published
return visible |
def main(args=sys.argv[1:]):
"""
Main function, called from CLI script
:return:
"""
import mcpartools
parser = argparse.ArgumentParser()
parser.add_argument('-V', '--version',
action='version',
version=mcpartools.__version__)
parser.add_argument('-v', '--verbose',
action='count',
default=0,
help='Give more output. Option is additive, '
'and can be used up to 3 times')
parser.add_argument('-q', '--quiet',
action='count',
default=0,
help='Be silent')
parser.add_argument('-w', '--workspace',
type=str,
help='workspace directory')
parser.add_argument('-m', '--mc_run_template',
type=str,
default=None,
help='path to optional MC run script')
parser.add_argument('-s', '--scheduler_options',
type=str,
default=None,
help='optional scheduler options: path to a file or list of options in square brackets')
parser.add_argument('-e', '--mc_engine_options',
type=str,
default=None,
help='optional MC engine options: path to a file or list of options in square brackets')
parser.add_argument('-x', '--external_files',
nargs='+', # list may be empty
type=str,
help='list of external files to be copied into each job working directory')
parser.add_argument('-b', '--batch',
type=str,
default=None,
choices=[b.id for b in SchedulerDiscover.supported],
help='Available batch systems: {}'.format([b.id for b in SchedulerDiscover.supported]))
parser.add_argument('-c', '--collect',
type=str,
default='mv',
choices=Options.collect_methods,
help='Available collect methods')
parser.add_argument('-p', '--particle_no',
dest='particle_no',
metavar='particle_no',
type=int,
required=True,
help='number of primary particles per job')
parser.add_argument('-j', '--jobs_no',
type=int,
required=True,
help='number of parallel jobs')
parser.add_argument('input',
type=str,
help='path to input configuration')
# TODO add grouping of options
args = parser.parse_args(args)
if args.quiet:
if args.quiet == 1:
level = "WARNING"
elif args.quiet == 2:
level = "ERROR"
else:
level = "CRITICAL"
elif args.verbose:
level = "DEBUG"
else:
level = "INFO"
logging.basicConfig(level=level)
opt = Options(args)
generator = Generator(options=opt)
ret_code = generator.run()
return ret_code |
def build_node_tree(self, source_paths):
"""
Build a node tree.
"""
import uqbar.apis
root = PackageNode()
# Build node tree, top-down
for source_path in sorted(
source_paths, key=lambda x: uqbar.apis.source_path_to_package_path(x)
):
package_path = uqbar.apis.source_path_to_package_path(source_path)
parts = package_path.split(".")
if not self.document_private_modules and any(
part.startswith("_") for part in parts
):
continue
# Find parent node.
parent_node = root
if len(parts) > 1:
parent_package_path = ".".join(parts[:-1])
try:
parent_node = root[parent_package_path]
except KeyError:
parent_node = root
try:
if parent_node is root:
# Backfill missing parent node.
grandparent_node = root
if len(parts) > 2:
grandparent_node = root[
parent_package_path.rpartition(".")[0]
]
parent_node = PackageNode(name=parent_package_path)
grandparent_node.append(parent_node)
grandparent_node[:] = sorted(
grandparent_node, key=lambda x: x.package_path
)
except KeyError:
parent_node = root
# Create or update child node.
node_class = ModuleNode
if source_path.name == "__init__.py":
node_class = PackageNode
try:
# If the child exists, it was previously backfilled.
child_node = root[package_path]
child_node.source_path = source_path
except KeyError:
# Otherwise it needs to be created and appended to the parent.
child_node = node_class(name=package_path, source_path=source_path)
parent_node.append(child_node)
parent_node[:] = sorted(parent_node, key=lambda x: x.package_path)
# Build documenters, bottom-up.
# This allows parent documenters to easily aggregate their children.
for node in root.depth_first(top_down=False):
kwargs = dict(
document_private_members=self.document_private_members,
member_documenter_classes=self.member_documenter_classes,
)
if isinstance(node, ModuleNode):
node.documenter = self.module_documenter_class(
node.package_path, **kwargs
)
else:
# Collect references to child modules and packages.
node.documenter = self.module_documenter_class(
node.package_path,
module_documenters=[
child.documenter
for child in node
if child.documenter is not None
],
**kwargs,
)
if (
not self.document_empty_modules
and not node.documenter.module_documenters
and not node.documenter.member_documenters
):
node.parent.remove(node)
return root |
def validate_unique(self):
"""
Add this method because django doesn't validate correctly because required fields are
excluded.
"""
unique_checks, date_checks = self.instance._get_unique_checks(exclude=[])
errors = self.instance._perform_unique_checks(unique_checks)
if errors:
self.add_error(None, errors) |
def prepare_monitor(tenant=tenant, user=user, password=password, organization=organization, zone_name=zone_name):
"""
:param tenant: tenant url
:param user: user's email
:param password: user's password
:param zone_name: (optional) zone_name
:return:
"""
router = PrivatePath(tenant, verify_codes=False)
payload = {
"firstName": "AllSeeingEye",
"lastName": "Monitor",
"email": user,
"password": password,
"accept": "true"
}
try:
router.post_quick_sign_up(data=payload)
except exceptions.ApiUnauthorizedError:
pass
platform = QubellPlatform.connect(tenant=tenant, user=user, password=password)
org = platform.organization(name=organization)
if zone_name:
zone = org.zones[zone_name]
else:
zone = org.zone
env = org.environment(name="Monitor for "+zone.name, zone=zone.id)
env.init_common_services(with_cloud_account=False, zone_name=zone_name)
# todo: move to env
policy_name = lambda policy: "{}.{}".format(policy.get('action'), policy.get('parameter'))
env_data = env.json()
key_id = [p for p in env_data['policies'] if 'provisionVms.publicKeyId' == policy_name(p)][0].get('value')
with env as envbulk:
envbulk.add_marker('monitor')
envbulk.add_property('publicKeyId', 'string', key_id)
monitor = Manifest(file=os.path.join(os.path.dirname(__file__), './monitor_manifests/monitor.yml'))
monitor_child = Manifest(file=os.path.join(os.path.dirname(__file__), './monitor_manifests/monitor_child.yml'))
org.application(manifest=monitor_child, name='monitor-child')
app = org.application(manifest=monitor, name='monitor')
return platform, org.id, app.id, env.id |
def launch(self, timeout=2):
"""
Hierapp instance, with environment dependencies:
- can be launched within short timeout
- auto-destroys shortly
"""
self.start_time = time.time()
self.end_time = time.time()
instance = self.app.launch(environment=self.env)
time.sleep(2) # Instance need time to appear in ui
assert instance.running(timeout=timeout), "Monitor didn't get Active state"
launched = instance.status == 'Active'
instance.reschedule_workflow(workflow_name='destroy', timestamp=self.destroy_interval)
assert instance.destroyed(timeout=timeout), "Monitor didn't get Destroyed after short time"
stopped = instance.status == 'Destroyed'
instance.force_remove()
self.end_time = time.time()
self.status = launched and stopped |
def clone(self):
"""
Do not initialize again since everything is ready to launch app.
:return: Initialized monitor instance
"""
return Monitor(org=self.org, app=self.app, env=self.env) |
def from_dict(raw_data):
"""Create Image from raw dictionary data."""
url = None
width = None
height = None
try:
url = raw_data['url']
width = raw_data['width']
height = raw_data['height']
except KeyError:
raise ValueError('Unexpected image json structure')
except TypeError:
# Happens when raw_data is None, i.e. when a term has no image:
pass
return Image(url, width, height) |
def to_dict(self):
"""Convert Image into raw dictionary data."""
if not self.url:
return None
return {
'url': self.url,
'width': self.width,
'height': self.height
} |
def from_dict(raw_data):
"""Create Term from raw dictionary data."""
try:
definition = raw_data['definition']
term_id = raw_data['id']
image = Image.from_dict(raw_data['image'])
rank = raw_data['rank']
term = raw_data['term']
return Term(definition, term_id, image, rank, term)
except KeyError:
raise ValueError('Unexpected term json structure') |
def to_dict(self):
"""Convert Term into raw dictionary data."""
return {
'definition': self.definition,
'id': self.term_id,
'image': self.image.to_dict(),
'rank': self.rank,
'term': self.term
} |
def has_common(self, other):
"""Return set of common words between two word sets."""
if not isinstance(other, WordSet):
raise ValueError('Can compare only WordSets')
return self.term_set & other.term_set |
def from_dict(raw_data):
"""Create WordSet from raw dictionary data."""
try:
set_id = raw_data['id']
title = raw_data['title']
terms = [Term.from_dict(term) for term in raw_data['terms']]
return WordSet(set_id, title, terms)
except KeyError:
raise ValueError('Unexpected set json structure') |
def to_dict(self):
"""Convert WordSet into raw dictionary data."""
return {
'id': self.set_id,
'title': self.title,
'terms': [term.to_dict() for term in self.terms]
} |
def release(ctx, yes, latest):
"""Create a new release in github
"""
m = RepoManager(ctx.obj['agile'])
api = m.github_repo()
if latest:
latest = api.releases.latest()
if latest:
click.echo(latest['tag_name'])
elif m.can_release('sandbox'):
branch = m.info['branch']
version = m.validate_version()
name = 'v%s' % version
body = ['Release %s from agiletoolkit' % name]
data = dict(
tag_name=name,
target_commitish=branch,
name=name,
body='\n\n'.join(body),
draft=False,
prerelease=False
)
if yes:
data = api.releases.create(data=data)
m.message('Successfully created a new Github release')
click.echo(niceJson(data))
else:
click.echo('skipped') |
def on_builder_inited(app):
"""
Hooks into Sphinx's ``builder-inited`` event.
"""
app.cache_db_path = ":memory:"
if app.config["uqbar_book_use_cache"]:
logger.info(bold("[uqbar-book]"), nonl=True)
logger.info(" initializing cache db")
app.connection = uqbar.book.sphinx.create_cache_db(app.cache_db_path) |
def on_config_inited(app, config):
"""
Hooks into Sphinx's ``config-inited`` event.
"""
extension_paths = config["uqbar_book_extensions"] or [
"uqbar.book.extensions.GraphExtension"
]
app.uqbar_book_extensions = []
for extension_path in extension_paths:
module_name, _, class_name = extension_path.rpartition(".")
module = importlib.import_module(module_name)
extension_class = getattr(module, class_name)
extension_class.setup_sphinx(app)
app.uqbar_book_extensions.append(extension_class) |
def on_doctree_read(app, document):
"""
Hooks into Sphinx's ``doctree-read`` event.
"""
literal_blocks = uqbar.book.sphinx.collect_literal_blocks(document)
cache_mapping = uqbar.book.sphinx.group_literal_blocks_by_cache_path(literal_blocks)
node_mapping = {}
use_cache = bool(app.config["uqbar_book_use_cache"])
for cache_path, literal_block_groups in cache_mapping.items():
kwargs = dict(
extensions=app.uqbar_book_extensions,
setup_lines=app.config["uqbar_book_console_setup"],
teardown_lines=app.config["uqbar_book_console_teardown"],
use_black=bool(app.config["uqbar_book_use_black"]),
)
for literal_blocks in literal_block_groups:
try:
if use_cache:
local_node_mapping = uqbar.book.sphinx.interpret_code_blocks_with_cache(
literal_blocks, cache_path, app.connection, **kwargs
)
else:
local_node_mapping = uqbar.book.sphinx.interpret_code_blocks(
literal_blocks, **kwargs
)
node_mapping.update(local_node_mapping)
except ConsoleError as exception:
message = exception.args[0].splitlines()[-1]
logger.warning(message, location=exception.args[1])
if app.config["uqbar_book_strict"]:
raise
uqbar.book.sphinx.rebuild_document(document, node_mapping) |
def on_build_finished(app, exception):
"""
Hooks into Sphinx's ``build-finished`` event.
"""
if not app.config["uqbar_book_use_cache"]:
return
logger.info("")
for row in app.connection.execute("SELECT path, hits FROM cache ORDER BY path"):
path, hits = row
if not hits:
continue
logger.info(bold("[uqbar-book]"), nonl=True)
logger.info(" Cache hits for {}: {}".format(path, hits)) |
def handle_class(signature_node, module, object_name, cache):
"""
Styles ``autoclass`` entries.
Adds ``abstract`` prefix to abstract classes.
"""
class_ = getattr(module, object_name, None)
if class_ is None:
return
if class_ not in cache:
cache[class_] = {}
attributes = inspect.classify_class_attrs(class_)
for attribute in attributes:
cache[class_][attribute.name] = attribute
if inspect.isabstract(class_):
emphasis = nodes.emphasis("abstract ", "abstract ", classes=["property"])
signature_node.insert(0, emphasis) |
def handle_method(signature_node, module, object_name, cache):
"""
Styles ``automethod`` entries.
Adds ``abstract`` prefix to abstract methods.
Adds link to originating class for inherited methods.
"""
*class_names, attr_name = object_name.split(".") # Handle nested classes
class_ = module
for class_name in class_names:
class_ = getattr(class_, class_name, None)
if class_ is None:
return
attr = getattr(class_, attr_name)
try:
inspected_attr = cache[class_][attr_name]
defining_class = inspected_attr.defining_class
except KeyError:
# TODO: This is a hack to handle bad interaction between enum and inspect
defining_class = class_
if defining_class is not class_:
reftarget = "{}.{}".format(defining_class.__module__, defining_class.__name__)
xref_node = addnodes.pending_xref(
"", refdomain="py", refexplicit=True, reftype="class", reftarget=reftarget
)
name_node = nodes.literal(
"", "{}".format(defining_class.__name__), classes=["descclassname"]
)
xref_node.append(name_node)
desc_annotation = list(signature_node.traverse(addnodes.desc_annotation))
index = len(desc_annotation)
class_annotation = addnodes.desc_addname()
class_annotation.extend([nodes.Text("("), xref_node, nodes.Text(").")])
class_annotation["xml:space"] = "preserve"
signature_node.insert(index, class_annotation)
else:
is_overridden = False
for class_ in defining_class.__mro__[1:]:
if hasattr(class_, attr_name):
is_overridden = True
if is_overridden:
emphasis = nodes.emphasis(
"overridden ", "overridden ", classes=["property"]
)
signature_node.insert(0, emphasis)
if getattr(attr, "__isabstractmethod__", False):
emphasis = nodes.emphasis("abstract", "abstract", classes=["property"])
signature_node.insert(0, emphasis) |
def on_doctree_read(app, document):
"""
Hooks into Sphinx's ``doctree-read`` event.
"""
cache: Dict[type, Dict[str, object]] = {}
for desc_node in document.traverse(addnodes.desc):
if desc_node.get("domain") != "py":
continue
signature_node = desc_node.traverse(addnodes.desc_signature)[0]
module_name = signature_node.get("module")
object_name = signature_node.get("fullname")
object_type = desc_node.get("objtype")
module = importlib.import_module(module_name)
if object_type == "class":
handle_class(signature_node, module, object_name, cache)
elif object_type in ("method", "attribute", "staticmethod", "classmethod"):
handle_method(signature_node, module, object_name, cache) |
def on_builder_inited(app):
"""
Hooks into Sphinx's ``builder-inited`` event.
Used for copying over CSS files to theme directory.
"""
local_css_path = pathlib.Path(__file__).parent / "uqbar.css"
theme_css_path = (
pathlib.Path(app.srcdir) / app.config.html_static_path[0] / "uqbar.css"
)
with local_css_path.open("r") as file_pointer:
local_css_contents = file_pointer.read()
uqbar.io.write(local_css_contents, theme_css_path) |
def setup(app) -> Dict[str, Any]:
"""
Sets up Sphinx extension.
"""
app.connect("doctree-read", on_doctree_read)
app.connect("builder-inited", on_builder_inited)
app.add_css_file("uqbar.css")
app.add_node(
nodes.classifier, override=True, html=(visit_classifier, depart_classifier)
)
app.add_node(
nodes.definition, override=True, html=(visit_definition, depart_definition)
)
app.add_node(nodes.term, override=True, html=(visit_term, depart_term))
return {
"version": uqbar.__version__,
"parallel_read_safe": True,
"parallel_write_safe": True,
} |
def init_xena(api, logger, owner, ip=None, port=57911):
""" Create XenaManager object.
:param api: cli/rest
:param logger: python logger
:param owner: owner of the scripting session
:param ip: rest server IP
:param port: rest server TCP port
:return: Xena object
:rtype: XenaApp
"""
if api == ApiType.socket:
api_wrapper = XenaCliWrapper(logger)
elif api == ApiType.rest:
api_wrapper = XenaRestWrapper(logger, ip, port)
return XenaApp(logger, owner, api_wrapper) |
def add_chassis(self, chassis, port=22611, password='xena'):
""" Add chassis.
XenaManager-2G -> Add Chassis.
:param chassis: chassis IP address
:param port: chassis port number
:param password: chassis password
:return: newly created chassis
:rtype: xenamanager.xena_app.XenaChassis
"""
if chassis not in self.chassis_list:
try:
XenaChassis(self, chassis, port, password)
except Exception as error:
self.objects.pop('{}/{}'.format(self.owner, chassis))
raise error
return self.chassis_list[chassis] |
def inventory(self):
""" Get inventory for all chassis. """
for chassis in self.chassis_list.values():
chassis.inventory(modules_inventory=True) |
def reserve_ports(self, locations, force=False, reset=True):
""" Reserve ports and reset factory defaults.
XenaManager-2G -> Reserve/Relinquish Port.
XenaManager-2G -> Reserve Port.
:param locations: list of ports locations in the form <ip/slot/port> to reserve
:param force: True - take forcefully. False - fail if port is reserved by other user
:param reset: True - reset port, False - leave port configuration
:return: ports dictionary (index: object)
"""
for location in locations:
ip, module, port = location.split('/')
self.chassis_list[ip].reserve_ports(['{}/{}'.format(module, port)], force, reset)
return self.ports |
def start_traffic(self, blocking=False, *ports):
""" Start traffic on list of ports.
:param blocking: True - start traffic and wait until traffic ends, False - start traffic and return.
:param ports: list of ports to start traffic on. Default - all session ports.
"""
for chassis, chassis_ports in self._per_chassis_ports(*self._get_operation_ports(*ports)).items():
chassis.start_traffic(False, *chassis_ports)
if blocking:
for chassis, chassis_ports in self._per_chassis_ports(*self._get_operation_ports(*ports)).items():
chassis.wait_traffic(*chassis_ports) |
def stop_traffic(self, *ports):
""" Stop traffic on list of ports.
:param ports: list of ports to stop traffic on. Default - all session ports.
"""
for chassis, chassis_ports in self._per_chassis_ports(*self._get_operation_ports(*ports)).items():
chassis.stop_traffic(*chassis_ports) |
def ports(self):
"""
:return: dictionary {name: object} of all ports.
"""
ports = {}
for chassis in self.chassis_list.values():
ports.update({str(p): p for p in chassis.get_objects_by_type('port')})
return ports |
def inventory(self, modules_inventory=False):
""" Get chassis inventory.
:param modules_inventory: True - read modules inventory, false - don't read.
"""
self.c_info = self.get_attributes()
for m_index, m_portcounts in enumerate(self.c_info['c_portcounts'].split()):
if int(m_portcounts):
module = XenaModule(parent=self, index=m_index)
if modules_inventory:
module.inventory() |
def reserve_ports(self, locations, force=False, reset=True):
""" Reserve ports and reset factory defaults.
XenaManager-2G -> Reserve/Relinquish Port.
XenaManager-2G -> Reset port.
:param locations: list of ports locations in the form <module/port> to reserve
:param force: True - take forcefully, False - fail if port is reserved by other user
:param reset: True - reset port, False - leave port configuration
:return: ports dictionary (index: object)
"""
for location in locations:
port = XenaPort(parent=self, index=location)
port.reserve(force)
if reset:
port.reset()
return self.ports |
def start_traffic(self, blocking=False, *ports):
""" Start traffic on list of ports.
:param blocking: True - start traffic and wait until traffic ends, False - start traffic and return.
:param ports: list of ports to start traffic on. Default - all session ports.
"""
self._traffic_command('on', *ports)
if blocking:
self.wait_traffic(*ports) |
def modules(self):
"""
:return: dictionary {index: object} of all modules.
"""
if not self.get_objects_by_type('module'):
self.inventory()
return {int(c.index): c for c in self.get_objects_by_type('module')} |
def inventory(self):
""" Get module inventory. """
self.m_info = self.get_attributes()
if 'NOTCFP' in self.m_info['m_cfptype']:
a = self.get_attribute('m_portcount')
m_portcount = int(a)
else:
m_portcount = int(self.get_attribute('m_cfpconfig').split()[0])
for p_index in range(m_portcount):
XenaPort(parent=self, index='{}/{}'.format(self.index, p_index)).inventory() |
def ports(self):
"""
:return: dictionary {index: object} of all ports.
"""
if not self.get_objects_by_type('port'):
self.inventory()
return {int(p.index.split('/')[1]): p for p in self.get_objects_by_type('port')} |
def run(entry_point, drivers, loop = None):
''' This is a runner wrapping the cyclotron "run" implementation. It takes
an additional parameter to provide a custom asyncio mainloop.
'''
program = setup(entry_point, drivers)
dispose = program.run()
if loop == None:
loop = asyncio.get_event_loop()
loop.run_forever()
dispose() |
def register(model, admin=None, category=None):
""" Decorator to registering you Admin class. """
def _model_admin_wrapper(admin_class):
site.register(model, admin_class=admin_class)
if category:
site.register_block(model, category)
return admin_class
return _model_admin_wrapper |
def has_glitter_edit_permission(self, request, obj):
"""
Return a boolean if a user has edit access to the glitter object/page this object is on.
"""
# We're testing for the edit permission here with the glitter object - not the current
# object, not the change permission. Once a user has edit access to an object they can edit
# all content on it.
permission_name = '{}.edit_{}'.format(
obj._meta.app_label, obj._meta.model_name,
)
has_permission = (
request.user.has_perm(permission_name) or
request.user.has_perm(permission_name, obj=obj)
)
return has_permission |
def change_view(self, request, object_id, form_url='', extra_context=None):
"""The 'change' admin view for this model."""
obj = self.get_object(request, unquote(object_id))
if obj is None:
raise Http404(_('%(name)s object with primary key %(key)r does not exist.') % {
'name': force_text(self.opts.verbose_name),
'key': escape(object_id),
})
if not self.has_change_permission(request, obj):
raise PermissionDenied
content_block = obj.content_block
version = content_block.obj_version
# Version must not be saved, and must belong to this user
if version.version_number or version.owner != request.user:
raise PermissionDenied
return super().change_view(request, object_id, form_url, extra_context) |
def response_change(self, request, obj):
"""Determine the HttpResponse for the change_view stage."""
opts = self.opts.app_label, self.opts.model_name
pk_value = obj._get_pk_val()
if '_continue' in request.POST:
msg = _(
'The %(name)s block was changed successfully. You may edit it again below.'
) % {'name': force_text(self.opts.verbose_name)}
self.message_user(request, msg, messages.SUCCESS)
# We redirect to the save and continue page, which updates the
# parent window in javascript and redirects back to the edit page
# in javascript.
return HttpResponseRedirect(reverse(
'admin:%s_%s_continue' % opts,
args=(pk_value,),
current_app=self.admin_site.name
))
# Update column and close popup - don't bother with a message as they won't see it
return self.response_rerender(request, obj, 'admin/glitter/update_column.html') |
def get_filter_item(name: str, operation: bytes, value: bytes) -> bytes:
"""
A field could be found for this term, try to get filter string for it.
"""
assert isinstance(name, str)
assert isinstance(value, bytes)
if operation is None:
return filter_format(b"(%s=%s)", [name, value])
elif operation == "contains":
assert value != ""
return filter_format(b"(%s=*%s*)", [name, value])
else:
raise ValueError("Unknown search operation %s" % operation) |
def get_filter(q: tldap.Q, fields: Dict[str, tldap.fields.Field], pk: str):
"""
Translate the Q tree into a filter string to search for, or None
if no results possible.
"""
# check the details are valid
if q.negated and len(q.children) == 1:
op = b"!"
elif q.connector == tldap.Q.AND:
op = b"&"
elif q.connector == tldap.Q.OR:
op = b"|"
else:
raise ValueError("Invalid value of op found")
# scan through every child
search = []
for child in q.children:
# if this child is a node, then descend into it
if isinstance(child, tldap.Q):
search.append(get_filter(child, fields, pk))
else:
# otherwise get the values in this node
name, value = child
# split the name if possible
name, _, operation = name.rpartition("__")
if name == "":
name, operation = operation, None
# replace pk with the real attribute
if name == "pk":
name = pk
# DN is a special case
if name == "dn":
dn_name = "entryDN:"
if isinstance(value, list):
s = []
for v in value:
assert isinstance(v, str)
v = v.encode('utf_8')
s.append(get_filter_item(dn_name, operation, v))
search.append("(&".join(search) + ")")
# or process just the single value
else:
assert isinstance(value, str)
v = value.encode('utf_8')
search.append(get_filter_item(dn_name, operation, v))
continue
# try to find field associated with name
field = fields[name]
if isinstance(value, list) and len(value) == 1:
value = value[0]
assert isinstance(value, str)
# process as list
if isinstance(value, list):
s = []
for v in value:
v = field.value_to_filter(v)
s.append(get_filter_item(name, operation, v))
search.append(b"(&".join(search) + b")")
# or process just the single value
else:
value = field.value_to_filter(value)
search.append(get_filter_item(name, operation, value))
# output the results
if len(search) == 1 and not q.negated:
# just one non-negative term, return it
return search[0]
else:
# multiple terms
return b"(" + op + b"".join(search) + b")" |
def program_name(self):
r"""The name of the script, callable from the command line.
"""
name = "-".join(
word.lower() for word in uqbar.strings.delimit_words(type(self).__name__)
)
return name |
def node_is_result_assignment(node: ast.AST) -> bool:
"""
Args:
node: An ``ast`` node.
Returns:
bool: ``node`` corresponds to the code ``result =``, assignment to the
``result `` variable.
Note:
Performs a very weak test that the line starts with 'result =' rather
than testing the tokens.
"""
# `.first_token` is added by asttokens
token = node.first_token # type: ignore
return token.line.strip().startswith('result =') |
def node_is_noop(node: ast.AST) -> bool:
"""
Node does nothing.
"""
return isinstance(node.value, ast.Str) if isinstance(node, ast.Expr) else isinstance(node, ast.Pass) |
def function_is_noop(function_node: ast.FunctionDef) -> bool:
"""
Function does nothing - is just ``pass`` or docstring.
"""
return all(node_is_noop(n) for n in function_node.body) |
def add_node_parents(root: ast.AST) -> None:
"""
Adds "parent" attribute to all child nodes of passed node.
Code taken from https://stackoverflow.com/a/43311383/1286705
"""
for node in ast.walk(root):
for child in ast.iter_child_nodes(node):
child.parent = node |
def build_footprint(node: ast.AST, first_line_no: int) -> Set[int]:
"""
Generates a list of lines that the passed node covers, relative to the
marked lines list - i.e. start of function is line 0.
"""
return set(
range(
get_first_token(node).start[0] - first_line_no,
get_last_token(node).end[0] - first_line_no + 1,
)
) |
def filter_arrange_nodes(nodes: List[ast.stmt], max_line_number: int) -> List[ast.stmt]:
"""
Finds all nodes that are before the ``max_line_number`` and are not
docstrings or ``pass``.
"""
return [
node for node in nodes if node.lineno < max_line_number and not isinstance(node, ast.Pass)
and not (isinstance(node, ast.Expr) and isinstance(node.value, ast.Str))
] |
def filter_assert_nodes(nodes: List[ast.stmt], min_line_number: int) -> List[ast.stmt]:
"""
Finds all nodes that are after the ``min_line_number``
"""
return [node for node in nodes if node.lineno > min_line_number] |
def find_stringy_lines(tree: ast.AST, first_line_no: int) -> Set[int]:
"""
Finds all lines that contain a string in a tree, usually a function. These
lines will be ignored when searching for blank lines.
"""
str_footprints = set()
for node in ast.walk(tree):
if isinstance(node, ast.Str):
str_footprints.update(build_footprint(node, first_line_no))
return str_footprints |
def check_all(self) -> Generator[AAAError, None, None]:
"""
Run everything required for checking this function.
Returns:
A generator of errors.
Raises:
ValidationError: A non-recoverable linting error is found.
"""
# Function def
if function_is_noop(self.node):
return
self.mark_bl()
self.mark_def()
# ACT
# Load act block and kick out when none is found
self.act_node = self.load_act_node()
self.act_block = Block.build_act(self.act_node.node, self.node)
act_block_first_line_no, act_block_last_line_no = self.act_block.get_span(0)
# ARRANGE
self.arrange_block = Block.build_arrange(self.node.body, act_block_first_line_no)
# ASSERT
assert self.act_node
self.assert_block = Block.build_assert(self.node.body, act_block_last_line_no)
# SPACING
for block in ['arrange', 'act', 'assert']:
self_block = getattr(self, '{}_block'.format(block))
try:
span = self_block.get_span(self.first_line_no)
except EmptyBlock:
continue
self.line_markers.update(span, self_block.line_type)
yield from self.line_markers.check_arrange_act_spacing()
yield from self.line_markers.check_act_assert_spacing()
yield from self.line_markers.check_blank_lines() |
def load_act_node(self) -> ActNode:
"""
Raises:
ValidationError: AAA01 when no act block is found and AAA02 when
multiple act blocks are found.
"""
act_nodes = ActNode.build_body(self.node.body)
if not act_nodes:
raise ValidationError(self.first_line_no, self.node.col_offset, 'AAA01 no Act block found in test')
# Allow `pytest.raises` and `self.assertRaises()` in assert nodes - if
# any of the additional nodes are `pytest.raises`, then raise
for a_n in act_nodes[1:]:
if a_n.block_type in [ActNodeType.marked_act, ActNodeType.result_assignment]:
raise ValidationError(
self.first_line_no,
self.node.col_offset,
'AAA02 multiple Act blocks found in test',
)
return act_nodes[0] |
def get_line_relative_to_node(self, target_node: ast.AST, offset: int) -> str:
"""
Raises:
IndexError: when ``offset`` takes the request out of bounds of this
Function's lines.
"""
return self.lines[target_node.lineno - self.node.lineno + offset] |
def mark_def(self) -> int:
"""
Marks up this Function's definition lines (including decorators) into
the ``line_markers`` attribute.
Returns:
Number of lines found for the definition.
Note:
Does not spot the closing ``):`` of a function when it occurs on
its own line.
Note:
Can not use ``helpers.build_footprint()`` because function nodes
cover the whole function. In this case, just the def lines are
wanted with any decorators.
"""
first_line = get_first_token(self.node).start[0] - self.first_line_no # Should usually be 0
try:
end_token = get_last_token(self.node.args.args[-1])
except IndexError:
# Fn has no args, so end of function is the fn def itself...
end_token = get_first_token(self.node)
last_line = end_token.end[0] - self.first_line_no
self.line_markers.update((first_line, last_line), LineType.func_def)
return last_line - first_line + 1 |
def mark_bl(self) -> int:
"""
Mark unprocessed lines that have no content and no string nodes
covering them as blank line BL.
Returns:
Number of blank lines found with no stringy parent node.
"""
counter = 0
stringy_lines = find_stringy_lines(self.node, self.first_line_no)
for relative_line_number, line in enumerate(self.lines):
if relative_line_number not in stringy_lines and line.strip() == '':
counter += 1
self.line_markers[relative_line_number] = LineType.blank_line
return counter |
def enhance(self):
""" Function enhance
Enhance the object with new item or enhanced items
"""
self.update({'puppetclasses':
SubDict(self.api, self.objName,
self.payloadObj, self.key,
SubItemPuppetClasses)})
self.update({'parameters':
SubDict(self.api, self.objName,
self.payloadObj, self.key,
SubItemParameter)})
self.update({'interfaces':
SubDict(self.api, self.objName,
self.payloadObj, self.key,
SubItemInterface)})
self.update({'smart_class_parameters':
SubDict(self.api, self.objName,
self.payloadObj, self.key,
SubItemSmartClassParameter)}) |
def getParamFromEnv(self, var, default=''):
""" Function getParamFromEnv
Search a parameter in the host environment
@param var: the var name
@param hostgroup: the hostgroup item linked to this host
@param default: default value
@return RETURN: the value
"""
if self.getParam(var):
return self.getParam(var)
if self.hostgroup:
if self.hostgroup.getParam(var):
return self.hostgroup.getParam(var)
if self.domain.getParam('password'):
return self.domain.getParam('password')
else:
return default |
def getUserData(self,
hostgroup,
domain,
defaultPwd='',
defaultSshKey='',
proxyHostname='',
tplFolder='metadata/templates/'):
""" Function getUserData
Generate a userdata script for metadata server from Foreman API
@param domain: the domain item linked to this host
@param hostgroup: the hostgroup item linked to this host
@param defaultPwd: the default password if no password is specified
in the host>hostgroup>domain params
@param defaultSshKey: the default ssh key if no password is specified
in the host>hostgroup>domain params
@param proxyHostname: hostname of the smartproxy
@param tplFolder: the templates folder
@return RETURN: the user data
"""
if 'user-data' in self.keys():
return self['user-data']
else:
self.hostgroup = hostgroup
self.domain = domain
if proxyHostname == '':
proxyHostname = 'foreman.' + domain['name']
password = self.getParamFromEnv('password', defaultPwd)
sshauthkeys = self.getParamFromEnv('global_sshkey', defaultSshKey)
with open(tplFolder+'puppet.conf', 'r') as puppet_file:
p = MyTemplate(puppet_file.read())
content = p.substitute(foremanHostname=proxyHostname)
enc_puppet_file = base64.b64encode(bytes(content, 'utf-8'))
with open(tplFolder+'cloud-init.tpl', 'r') as content_file:
s = MyTemplate(content_file.read())
if sshauthkeys:
sshauthkeys = ' - '+sshauthkeys
self.userdata = s.substitute(
password=password,
fqdn=self['name'],
sshauthkeys=sshauthkeys,
foremanurlbuilt="http://{}/unattended/built"
.format(proxyHostname),
puppet_conf_content=enc_puppet_file.decode('utf-8'))
return self.userdata |
def register_payload(self, *payloads, flavour: ModuleType):
"""Queue one or more payload for execution after its runner is started"""
for payload in payloads:
self._logger.debug('registering payload %s (%s)', NameRepr(payload), NameRepr(flavour))
self.runners[flavour].register_payload(payload) |
def run_payload(self, payload, *, flavour: ModuleType):
"""Execute one payload after its runner is started and return its output"""
return self.runners[flavour].run_payload(payload) |
def run(self):
"""Run all runners, blocking until completion or error"""
self._logger.info('starting all runners')
try:
with self._lock:
assert not self.running.set(), 'cannot re-run: %s' % self
self.running.set()
thread_runner = self.runners[threading]
for runner in self.runners.values():
if runner is not thread_runner:
thread_runner.register_payload(runner.run)
if threading.current_thread() == threading.main_thread():
asyncio_main_run(root_runner=thread_runner)
else:
thread_runner.run()
except Exception as err:
self._logger.exception('runner terminated: %s', err)
raise RuntimeError from err
finally:
self._stop_runners()
self._logger.info('stopped all runners')
self.running.clear() |
def formfield_for_dbfield(self, db_field, **kwargs):
"""
Hook for specifying the form Field instance for a given database Field
instance.
If kwargs are given, they're passed to the form Field's constructor.
"""
formfield = super().formfield_for_dbfield(db_field, **kwargs)
if db_field.name == 'image':
formfield.widget = ImageRelatedFieldWidgetWrapper(
ImageSelect(), db_field.rel, self.admin_site, can_add_related=True,
can_change_related=True,
)
return formfield |
def compare_schemas(one, two):
"""Compare two structures that represents JSON schemas.
For comparison you can't use normal comparison, because in JSON schema
lists DO NOT keep order (and Python lists do), so this must be taken into
account during comparison.
Note this wont check all configurations, only first one that seems to
match, which can lead to wrong results.
:param one: First schema to compare.
:param two: Second schema to compare.
:rtype: `bool`
"""
one = _normalize_string_type(one)
two = _normalize_string_type(two)
_assert_same_types(one, two)
if isinstance(one, list):
return _compare_lists(one, two)
elif isinstance(one, dict):
return _compare_dicts(one, two)
elif isinstance(one, SCALAR_TYPES):
return one == two
elif one is None:
return one is two
else:
raise RuntimeError('Not allowed type "{type}"'.format(
type=type(one).__name__)) |
def is_ecma_regex(regex):
"""Check if given regex is of type ECMA 262 or not.
:rtype: bool
"""
parts = regex.split('/')
if len(parts) == 1:
return False
if len(parts) < 3:
raise ValueError('Given regex isn\'t ECMA regex nor Python regex.')
parts.pop()
parts.append('')
raw_regex = '/'.join(parts)
if raw_regex.startswith('/') and raw_regex.endswith('/'):
return True
return False |
def convert_ecma_regex_to_python(value):
"""Convert ECMA 262 regex to Python tuple with regex and flags.
If given value is already Python regex it will be returned unchanged.
:param string value: ECMA regex.
:return: 2-tuple with `regex` and `flags`
:rtype: namedtuple
"""
if not is_ecma_regex(value):
return PythonRegex(value, [])
parts = value.split('/')
flags = parts.pop()
try:
result_flags = [ECMA_TO_PYTHON_FLAGS[f] for f in flags]
except KeyError:
raise ValueError('Wrong flags "{}".'.format(flags))
return PythonRegex('/'.join(parts[1:]), result_flags) |
def convert_python_regex_to_ecma(value, flags=[]):
"""Convert Python regex to ECMA 262 regex.
If given value is already ECMA regex it will be returned unchanged.
:param string value: Python regex.
:param list flags: List of flags (allowed flags: `re.I`, `re.M`)
:return: ECMA 262 regex
:rtype: str
"""
if is_ecma_regex(value):
return value
result_flags = [PYTHON_TO_ECMA_FLAGS[f] for f in flags]
result_flags = ''.join(result_flags)
return '/{value}/{flags}'.format(value=value, flags=result_flags) |
def populate(self, **values):
"""Populate values to fields. Skip non-existing."""
values = values.copy()
fields = list(self.iterate_with_name())
for _, structure_name, field in fields:
if structure_name in values:
field.__set__(self, values.pop(structure_name))
for name, _, field in fields:
if name in values:
field.__set__(self, values.pop(name)) |
def get_field(self, field_name):
"""Get field associated with given attribute."""
for attr_name, field in self:
if field_name == attr_name:
return field
raise errors.FieldNotFound('Field not found', field_name) |
def validate(self):
"""Explicitly validate all the fields."""
for name, field in self:
try:
field.validate_for_object(self)
except ValidationError as error:
raise ValidationError(
"Error for field '{name}'.".format(name=name),
error,
) |
def iterate_over_fields(cls):
"""Iterate through fields as `(attribute_name, field_instance)`."""
for attr in dir(cls):
clsattr = getattr(cls, attr)
if isinstance(clsattr, BaseField):
yield attr, clsattr |
def iterate_with_name(cls):
"""Iterate over fields, but also give `structure_name`.
Format is `(attribute_name, structue_name, field_instance)`.
Structure name is name under which value is seen in structure and
schema (in primitives) and only there.
"""
for attr_name, field in cls.iterate_over_fields():
structure_name = field.structue_name(attr_name)
yield attr_name, structure_name, field |
def parse_value(self, value):
"""Cast value to `int`, e.g. from string or long"""
parsed = super(IntField, self).parse_value(value)
if parsed is None:
return parsed
return int(parsed) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.