Search is not available for this dataset
text
stringlengths 75
104k
|
---|
def sat2in4(pos, neg=tuple(), vartype=dimod.BINARY, name='2-in-4'):
"""Two-in-four (2-in-4) satisfiability.
Args:
pos (iterable):
Variable labels, as an iterable, for non-negated variables of the constraint.
Exactly four variables are specified by `pos` and `neg` together.
neg (tuple):
Variable labels, as an iterable, for negated variables of the constraint.
Exactly four variables are specified by `pos` and `neg` together.
vartype (Vartype, optional, default='BINARY'): Variable type. Accepted
input values:
* Vartype.SPIN, 'SPIN', {-1, 1}
* Vartype.BINARY, 'BINARY', {0, 1}
name (str, optional, default='2-in-4'): Name for the constraint.
Returns:
Constraint(:obj:`.Constraint`): Constraint that is satisfied when its variables are
assigned values that satisfy a two-in-four satisfiability problem.
Examples:
>>> import dwavebinarycsp
>>> import dwavebinarycsp.factories.constraint.sat as sat
>>> csp = dwavebinarycsp.ConstraintSatisfactionProblem(dwavebinarycsp.BINARY)
>>> csp.add_constraint(sat.sat2in4(['w', 'x', 'y', 'z'], vartype='BINARY', name='sat1'))
>>> csp.check({'w': 1, 'x': 1, 'y': 0, 'z': 0})
True
"""
pos = tuple(pos)
neg = tuple(neg)
variables = pos + neg
if len(variables) != 4:
raise ValueError("")
if neg and (len(neg) < 4):
# because 2-in-4 sat is symmetric, all negated is the same as none negated
const = sat2in4(pos=variables, vartype=vartype) # make one that has no negations
for v in neg:
const.flip_variable(v)
const.name = name # overwrite the name directly
return const
# we can just construct them directly for speed
if vartype is dimod.BINARY:
configurations = frozenset([(0, 0, 1, 1),
(0, 1, 0, 1),
(1, 0, 0, 1),
(0, 1, 1, 0),
(1, 0, 1, 0),
(1, 1, 0, 0)])
else:
# SPIN, vartype is checked by the decorator
configurations = frozenset([(-1, -1, +1, +1),
(-1, +1, -1, +1),
(+1, -1, -1, +1),
(-1, +1, +1, -1),
(+1, -1, +1, -1),
(+1, +1, -1, -1)])
def func(a, b, c, d):
if a == b:
return (b != c) and (c == d)
elif a == c:
# a != b
return b == d
else:
# a != b, a != c => b == c
return a == d
return Constraint(func, configurations, variables, vartype=vartype, name=name)
|
def random_2in4sat(num_variables, num_clauses, vartype=dimod.BINARY, satisfiable=True):
"""Random two-in-four (2-in-4) constraint satisfaction problem.
Args:
num_variables (integer): Number of variables (at least four).
num_clauses (integer): Number of constraints that together constitute the
constraint satisfaction problem.
vartype (Vartype, optional, default='BINARY'): Variable type. Accepted
input values:
* Vartype.SPIN, 'SPIN', {-1, 1}
* Vartype.BINARY, 'BINARY', {0, 1}
satisfiable (bool, optional, default=True): True if the CSP can be satisfied.
Returns:
CSP (:obj:`.ConstraintSatisfactionProblem`): CSP that is satisfied when its variables
are assigned values that satisfy a two-in-four satisfiability problem.
Examples:
This example creates a CSP with 6 variables and two random constraints and checks
whether a particular assignment of variables satisifies it.
>>> import dwavebinarycsp
>>> import dwavebinarycsp.factories as sat
>>> csp = sat.random_2in4sat(6, 2)
>>> csp.constraints # doctest: +SKIP
[Constraint.from_configurations(frozenset({(1, 0, 1, 0), (1, 0, 0, 1), (1, 1, 1, 1), (0, 1, 1, 0), (0, 0, 0, 0),
(0, 1, 0, 1)}), (2, 4, 0, 1), Vartype.BINARY, name='2-in-4'),
Constraint.from_configurations(frozenset({(1, 0, 1, 1), (1, 1, 0, 1), (1, 1, 1, 0), (0, 0, 0, 1),
(0, 1, 0, 0), (0, 0, 1, 0)}), (1, 2, 4, 5), Vartype.BINARY, name='2-in-4')]
>>> csp.check({0: 1, 1: 0, 2: 1, 3: 1, 4: 0, 5: 0}) # doctest: +SKIP
True
"""
if num_variables < 4:
raise ValueError("a 2in4 problem needs at least 4 variables")
if num_clauses > 16 * _nchoosek(num_variables, 4): # 16 different negation patterns
raise ValueError("too many clauses")
# also checks the vartype argument
csp = ConstraintSatisfactionProblem(vartype)
variables = list(range(num_variables))
constraints = set()
if satisfiable:
values = tuple(vartype.value)
planted_solution = {v: choice(values) for v in variables}
configurations = [(0, 0, 1, 1), (0, 1, 0, 1), (1, 0, 0, 1),
(0, 1, 1, 0), (1, 0, 1, 0), (1, 1, 0, 0)]
while len(constraints) < num_clauses:
# sort the variables because constraints are hashed on configurations/variables
# because 2-in-4 sat is symmetric, we would not get a hash conflict for different
# variable orders
constraint_variables = sorted(sample(variables, 4))
# pick (uniformly) a configuration and determine which variables we need to negate to
# match the chosen configuration
config = choice(configurations)
pos = tuple(v for idx, v in enumerate(constraint_variables) if config[idx] == (planted_solution[v] > 0))
neg = tuple(v for idx, v in enumerate(constraint_variables) if config[idx] != (planted_solution[v] > 0))
const = sat2in4(pos=pos, neg=neg, vartype=vartype)
assert const.check(planted_solution)
constraints.add(const)
else:
while len(constraints) < num_clauses:
# sort the variables because constraints are hashed on configurations/variables
# because 2-in-4 sat is symmetric, we would not get a hash conflict for different
# variable orders
constraint_variables = sorted(sample(variables, 4))
# randomly determine negations
pos = tuple(v for v in constraint_variables if random() > .5)
neg = tuple(v for v in constraint_variables if v not in pos)
const = sat2in4(pos=pos, neg=neg, vartype=vartype)
constraints.add(const)
for const in constraints:
csp.add_constraint(const)
# in case any variables didn't make it in
for v in variables:
csp.add_variable(v)
return csp
|
def random_xorsat(num_variables, num_clauses, vartype=dimod.BINARY, satisfiable=True):
"""Random XOR constraint satisfaction problem.
Args:
num_variables (integer): Number of variables (at least three).
num_clauses (integer): Number of constraints that together constitute the
constraint satisfaction problem.
vartype (Vartype, optional, default='BINARY'): Variable type. Accepted
input values:
* Vartype.SPIN, 'SPIN', {-1, 1}
* Vartype.BINARY, 'BINARY', {0, 1}
satisfiable (bool, optional, default=True): True if the CSP can be satisfied.
Returns:
CSP (:obj:`.ConstraintSatisfactionProblem`): CSP that is satisfied when its variables
are assigned values that satisfy a XOR satisfiability problem.
Examples:
This example creates a CSP with 5 variables and two random constraints and checks
whether a particular assignment of variables satisifies it.
>>> import dwavebinarycsp
>>> import dwavebinarycsp.factories as sat
>>> csp = sat.random_xorsat(5, 2)
>>> csp.constraints # doctest: +SKIP
[Constraint.from_configurations(frozenset({(1, 0, 0), (1, 1, 1), (0, 1, 0), (0, 0, 1)}), (4, 3, 0),
Vartype.BINARY, name='XOR (0 flipped)'),
Constraint.from_configurations(frozenset({(1, 1, 0), (0, 1, 1), (0, 0, 0), (1, 0, 1)}), (2, 0, 4),
Vartype.BINARY, name='XOR (2 flipped) (0 flipped)')]
>>> csp.check({0: 1, 1: 0, 2: 0, 3: 1, 4: 1}) # doctest: +SKIP
True
"""
if num_variables < 3:
raise ValueError("a xor problem needs at least 3 variables")
if num_clauses > 8 * _nchoosek(num_variables, 3): # 8 different negation patterns
raise ValueError("too many clauses")
# also checks the vartype argument
csp = ConstraintSatisfactionProblem(vartype)
variables = list(range(num_variables))
constraints = set()
if satisfiable:
values = tuple(vartype.value)
planted_solution = {v: choice(values) for v in variables}
configurations = [(0, 0, 0), (0, 1, 1), (1, 0, 1), (1, 1, 0)]
while len(constraints) < num_clauses:
# because constraints are hashed on configurations/variables, and because the inputs
# to xor can be swapped without loss of generality, we can order them
x, y, z = sample(variables, 3)
if y > x:
x, y = y, x
# get the constraint
const = xor_gate([x, y, z], vartype=vartype)
# pick (uniformly) a configuration and determine which variables we need to negate to
# match the chosen configuration
config = choice(configurations)
for idx, v in enumerate(const.variables):
if config[idx] != (planted_solution[v] > 0):
const.flip_variable(v)
assert const.check(planted_solution)
constraints.add(const)
else:
while len(constraints) < num_clauses:
# because constraints are hashed on configurations/variables, and because the inputs
# to xor can be swapped without loss of generality, we can order them
x, y, z = sample(variables, 3)
if y > x:
x, y = y, x
# get the constraint
const = xor_gate([x, y, z], vartype=vartype)
# randomly flip each variable in the constraint
for idx, v in enumerate(const.variables):
if random() > .5:
const.flip_variable(v)
assert const.check(planted_solution)
constraints.add(const)
for const in constraints:
csp.add_constraint(const)
# in case any variables didn't make it in
for v in variables:
csp.add_variable(v)
return csp
|
def kwarg_decorator(func):
"""
Turns a function that accepts a single arg and some kwargs in to a
decorator that can optionally be called with kwargs:
.. code-block:: python
@kwarg_decorator
def my_decorator(func, bar=True, baz=None):
...
@my_decorator
def my_func():
pass
@my_decorator(bar=False)
def my_other_func():
pass
"""
@wraps(func)
def decorator(arg=None, **kwargs):
if arg is None:
return lambda arg: decorator(arg, **kwargs)
return func(arg, **kwargs)
return decorator
|
def signature_matches(func, args=(), kwargs={}):
"""
Work out if a function is callable with some args or not.
"""
try:
sig = inspect.signature(func)
sig.bind(*args, **kwargs)
except TypeError:
return False
else:
return True
|
def last_arg_decorator(func):
"""
Allows a function to be used as either a decorator with args, or called as
a normal function.
@last_arg_decorator
def register_a_thing(foo, func, bar=True):
..
# Called as a decorator
@register_a_thing("abc", bar=False)
def my_func():
...
# Called as a normal function call
def my_other_func():
...
register_a_thing("def", my_other_func, bar=True)
"""
@wraps(func)
def decorator(*args, **kwargs):
if signature_matches(func, args, kwargs):
return func(*args, **kwargs)
else:
return lambda last: func(*(args + (last,)), **kwargs)
return decorator
|
def register_chooser(self, chooser, **kwargs):
"""Adds a model chooser definition to the registry."""
if not issubclass(chooser, Chooser):
return self.register_simple_chooser(chooser, **kwargs)
self.choosers[chooser.model] = chooser(**kwargs)
return chooser
|
def register_simple_chooser(self, model, **kwargs):
"""
Generates a model chooser definition from a model, and adds it to the
registry.
"""
name = '{}Chooser'.format(model._meta.object_name)
attrs = {'model': model}
attrs.update(kwargs)
chooser = type(name, (Chooser,), attrs)
self.register_chooser(chooser)
return model
|
def instance_from_str(instance_str):
"""
Given an instance string in the form "app.Model:pk", returns a tuple of
``(model, instance)``. If the pk part is empty, ``instance`` will be
``None``. Raises ``ValueError`` on invalid model strings or missing
instances.
"""
match = instance_str_re.match(instance_str)
if not match:
raise ValueError("Invalid instance string")
model_string = match.group(1)
try:
model = apps.get_model(model_string)
except (LookupError, ValueError):
raise ValueError("Invalid instance string")
pk = match.group(2)
if pk:
try:
return model, model._default_manager.get(pk=pk)
except model.DoesNotExist:
raise ValueError("Invalid instance string")
return model, None
|
def formatter(self, api_client, data, newval):
"""Get audio-related fields
Try to find fields for the audio url for specified preferred quality
level, or next-lowest available quality url otherwise.
"""
url_map = data.get("audioUrlMap")
audio_url = data.get("audioUrl")
# Only an audio URL, not a quality map. This happens for most of the
# mobile client tokens and some of the others now. In this case
# substitute the empirically determined default values in the format
# used by the rest of the function so downstream consumers continue to
# work.
if audio_url and not url_map:
url_map = {
BaseAPIClient.HIGH_AUDIO_QUALITY: {
"audioUrl": audio_url,
"bitrate": 64,
"encoding": "aacplus",
}
}
elif not url_map: # No audio url available (e.g. ad tokens)
return None
valid_audio_formats = [BaseAPIClient.HIGH_AUDIO_QUALITY,
BaseAPIClient.MED_AUDIO_QUALITY,
BaseAPIClient.LOW_AUDIO_QUALITY]
# Only iterate over sublist, starting at preferred audio quality, or
# from the beginning of the list if nothing is found. Ensures that the
# bitrate used will always be the same or lower quality than was
# specified to prevent audio from skipping for slow connections.
preferred_quality = api_client.default_audio_quality
if preferred_quality in valid_audio_formats:
i = valid_audio_formats.index(preferred_quality)
valid_audio_formats = valid_audio_formats[i:]
for quality in valid_audio_formats:
audio_url = url_map.get(quality)
if audio_url:
return audio_url[self.field]
return audio_url[self.field] if audio_url else None
|
def formatter(self, api_client, data, newval):
"""Parse additional url fields and map them to inputs
Attempt to create a dictionary with keys being user input, and
response being the returned URL
"""
if newval is None:
return None
user_param = data['_paramAdditionalUrls']
urls = {}
if isinstance(newval, str):
urls[user_param[0]] = newval
else:
for key, url in zip(user_param, newval):
urls[key] = url
return urls
|
def from_json_list(cls, api_client, data):
"""Convert a list of JSON values to a list of models
"""
return [cls.from_json(api_client, item) for item in data]
|
def populate_fields(api_client, instance, data):
"""Populate all fields of a model with data
Given a model with a PandoraModel superclass will enumerate all
declared fields on that model and populate the values of their Field
and SyntheticField classes. All declared fields will have a value after
this function runs even if they are missing from the incoming JSON.
"""
for key, value in instance.__class__._fields.items():
default = getattr(value, "default", None)
newval = data.get(value.field, default)
if isinstance(value, SyntheticField):
newval = value.formatter(api_client, data, newval)
setattr(instance, key, newval)
continue
model_class = getattr(value, "model", None)
if newval and model_class:
if isinstance(newval, list):
newval = model_class.from_json_list(api_client, newval)
else:
newval = model_class.from_json(api_client, newval)
if newval and value.formatter:
newval = value.formatter(api_client, newval)
setattr(instance, key, newval)
|
def from_json(cls, api_client, data):
"""Convert one JSON value to a model object
"""
self = cls(api_client)
PandoraModel.populate_fields(api_client, self, data)
return self
|
def _base_repr(self, and_also=None):
"""Common repr logic for subclasses to hook
"""
items = [
"=".join((key, repr(getattr(self, key))))
for key in sorted(self._fields.keys())]
if items:
output = ", ".join(items)
else:
output = None
if and_also:
return "{}({}, {})".format(self.__class__.__name__,
output, and_also)
else:
return "{}({})".format(self.__class__.__name__, output)
|
def _send_cmd(self, cmd):
"""Write command to remote process
"""
self._process.stdin.write("{}\n".format(cmd).encode("utf-8"))
self._process.stdin.flush()
|
def _ensure_started(self):
"""Ensure player backing process is started
"""
if self._process and self._process.poll() is None:
return
if not getattr(self, "_cmd"):
raise RuntimeError("Player command is not configured")
log.debug("Starting playback command: %r", self._cmd)
self._process = SilentPopen(self._cmd)
self._post_start()
|
def play(self, song):
"""Play a new song from a Pandora model
Returns once the stream starts but does not shut down the remote audio
output backend process. Calls the input callback when the user has
input.
"""
self._callbacks.play(song)
self._load_track(song)
time.sleep(2) # Give the backend time to load the track
while True:
try:
self._callbacks.pre_poll()
self._ensure_started()
self._loop_hook()
readers, _, _ = select.select(
self._get_select_readers(), [], [], 1)
for handle in readers:
if handle.fileno() == self._control_fd:
self._callbacks.input(handle.readline().strip(), song)
else:
value = self._read_from_process(handle)
if self._player_stopped(value):
return
finally:
self._callbacks.post_poll()
|
def play_station(self, station):
"""Play the station until something ends it
This function will run forever until termintated by calling
end_station.
"""
for song in iterate_forever(station.get_playlist):
try:
self.play(song)
except StopIteration:
self.stop()
return
|
def _post_start(self):
"""Set stdout to non-blocking
VLC does not always return a newline when reading status so in order to
be lazy and still use the read API without caring about how much output
there is we switch stdout to nonblocking mode and just read a large
chunk of datin order to be lazy and still use the read API without
caring about how much output there is we switch stdout to nonblocking
mode and just read a large chunk of data.
"""
flags = fcntl.fcntl(self._process.stdout, fcntl.F_GETFL)
fcntl.fcntl(self._process.stdout, fcntl.F_SETFL, flags | os.O_NONBLOCK)
|
def station_selection_menu(self, error=None):
"""Format a station menu and make the user select a station
"""
self.screen.clear()
if error:
self.screen.print_error("{}\n".format(error))
for i, station in enumerate(self.stations):
i = "{:>3}".format(i)
print("{}: {}".format(Colors.yellow(i), station.name))
return self.stations[self.screen.get_integer("Station: ")]
|
def play(self, song):
"""Play callback
"""
if song.is_ad:
print("{} ".format(Colors.cyan("Advertisement")))
else:
print("{} by {}".format(Colors.cyan(song.song_name),
Colors.yellow(song.artist_name)))
|
def input(self, input, song):
"""Input callback, handles key presses
"""
try:
cmd = getattr(self, self.CMD_MAP[input][1])
except (IndexError, KeyError):
return self.screen.print_error(
"Invalid command {!r}!".format(input))
cmd(song)
|
def retries(max_tries, exceptions=(Exception,)):
"""Function decorator implementing retrying logic.
exceptions: A tuple of exception classes; default (Exception,)
The decorator will call the function up to max_tries times if it raises
an exception.
By default it catches instances of the Exception class and subclasses.
This will recover after all but the most fatal errors. You may specify a
custom tuple of exception classes with the 'exceptions' argument; the
function will only be retried if it raises one of the specified
exceptions.
"""
def decorator(func):
def function(*args, **kwargs):
retries_left = max_tries
while retries_left > 0:
try:
retries_left -= 1
return func(*args, **kwargs)
except exceptions as exc:
# Don't retry for PandoraExceptions - unlikely that result
# will change for same set of input parameters.
if isinstance(exc, PandoraException):
raise
if retries_left > 0:
time.sleep(delay_exponential(
0.5, 2, max_tries - retries_left))
else:
raise
return function
return decorator
|
def delay_exponential(base, growth_factor, attempts):
"""Calculate time to sleep based on exponential function.
The format is::
base * growth_factor ^ (attempts - 1)
If ``base`` is set to 'rand' then a random number between
0 and 1 will be used as the base.
Base must be greater than 0, otherwise a ValueError will be
raised.
"""
if base == 'rand':
base = random.random()
elif base <= 0:
raise ValueError("The 'base' param must be greater than 0, "
"got: {}".format(base))
time_to_sleep = base * (growth_factor ** (attempts - 1))
return time_to_sleep
|
def iterate_forever(func, *args, **kwargs):
"""Iterate over a finite iterator forever
When the iterator is exhausted will call the function again to generate a
new iterator and keep iterating.
"""
output = func(*args, **kwargs)
while True:
try:
playlist_item = next(output)
playlist_item.prepare_playback()
yield playlist_item
except StopIteration:
output = func(*args, **kwargs)
|
def get_integer(prompt):
"""Gather user input and convert it to an integer
Will keep trying till the user enters an interger or until they ^C the
program.
"""
while True:
try:
return int(input(prompt).strip())
except ValueError:
print(Colors.red("Invalid Input!"))
|
def collect(self, dataset_readers_list):
"""collect results
Returns:
a list of results
"""
ret = [ ]
for i, collector in enumerate(self.components):
report = ProgressReport(name='collecting results', done=(i + 1), total=len(self.components))
alphatwirl.progressbar.report_progress(report)
ret.append(collector.collect([(dataset, tuple(r.readers[i] for r in readerComposites))
for dataset, readerComposites in dataset_readers_list]))
return ret
|
def open(self):
"""open the drop box
You need to call this method before starting putting packages.
Returns
-------
None
"""
self.workingArea.open()
self.runid_pkgidx_map = { }
self.runid_to_return = deque()
|
def put(self, package):
"""put a task
This method places a task in the working area and have the
dispatcher execute it.
If you need to put multiple tasks, it can be much faster to
use `put_multiple()` than to use this method multiple times
depending of the dispatcher.
Parameters
----------
package : callable
A task
Returns
-------
int
A package index assigned by the working area
"""
pkgidx = self.workingArea.put_package(package)
logger = logging.getLogger(__name__)
logger.info('submitting {}'.format(self.workingArea.package_relpath(pkgidx)))
runid = self.dispatcher.run(self.workingArea, pkgidx)
self.runid_pkgidx_map[runid] = pkgidx
return pkgidx
|
def put_multiple(self, packages):
"""put tasks
This method places multiple tasks in the working area and have
the dispatcher execute them.
Parameters
----------
packages : list(callable)
A list of tasks
Returns
-------
list(int)
Package indices assigned by the working area
"""
pkgidxs = [self.workingArea.put_package(p) for p in packages]
logger = logging.getLogger(__name__)
logger.info('submitting {}'.format(
', '.join(['{}'.format(self.workingArea.package_relpath(i)) for i in pkgidxs])
))
runids = self.dispatcher.run_multiple(self.workingArea, pkgidxs)
self.runid_pkgidx_map.update(zip(runids, pkgidxs))
return pkgidxs
|
def receive(self):
"""return pairs of package indices and results of all tasks
This method waits until all tasks finish.
Returns
-------
list
A list of pairs of package indices and results
"""
ret = [ ] # a list of (pkgid, result)
while True:
if self.runid_pkgidx_map:
self.runid_to_return.extend(self.dispatcher.poll())
ret.extend(self._collect_all_finished_pkgidx_result_pairs())
if not self.runid_pkgidx_map:
break
time.sleep(self.sleep)
ret = sorted(ret, key=itemgetter(0))
return ret
|
def poll(self):
"""return pairs of package indices and results of finished tasks
This method does not wait for tasks to finish.
Returns
-------
list
A list of pairs of package indices and results
"""
self.runid_to_return.extend(self.dispatcher.poll())
ret = self._collect_all_finished_pkgidx_result_pairs()
return ret
|
def receive_one(self):
"""return a pair of a package index and result of a task
This method waits until a tasks finishes. It returns `None` if
no task is running.
Returns
-------
tuple or None
A pair of a package index and result. `None` if no tasks
is running.
"""
if not self.runid_pkgidx_map:
return None
while True:
if not self.runid_to_return:
self.runid_to_return.extend(self.dispatcher.poll())
ret = self._collect_next_finished_pkgidx_result_pair()
if ret is not None:
break
if self.runid_pkgidx_map:
time.sleep(self.sleep)
return ret
|
def run_multiple(self, eventLoops):
"""run the event loops in the background.
Args:
eventLoops (list): a list of event loops to run
"""
self.nruns += len(eventLoops)
return self.communicationChannel.put_multiple(eventLoops)
|
def poll(self):
"""Return pairs of run ids and results of finish event loops.
"""
ret = self.communicationChannel.receive_finished()
self.nruns -= len(ret)
return ret
|
def receive_one(self):
"""Return a pair of a run id and a result.
This method waits until an event loop finishes.
This method returns None if no loop is running.
"""
if self.nruns == 0:
return None
ret = self.communicationChannel.receive_one()
if ret is not None:
self.nruns -= 1
return ret
|
def receive(self):
"""Return pairs of run ids and results.
This method waits until all event loops finish
"""
ret = self.communicationChannel.receive_all()
self.nruns -= len(ret)
if self.nruns > 0:
import logging
logger = logging.getLogger(__name__)
logger.warning(
'too few results received: {} results received, {} more expected'.format(
len(ret), self.nruns))
elif self.nruns < 0:
import logging
logger = logging.getLogger(__name__)
logger.warning(
'too many results received: {} results received, {} too many'.format(
len(ret), -self.nruns))
return ret
|
def end(self):
"""wait until all event loops end and returns the results.
"""
results = self.communicationChannel.receive()
if self.nruns != len(results):
import logging
logger = logging.getLogger(__name__)
# logger.setLevel(logging.DEBUG)
logger.warning(
'too few results received: {} results received, {} expected'.format(
len(results),
self.nruns
))
return results
|
def key_vals_dict_to_tuple_list(key_vals_dict, fill=float('nan')):
"""Convert ``key_vals_dict`` to `tuple_list``.
Args:
key_vals_dict (dict): The first parameter.
fill: a value to fill missing data
Returns:
A list of tuples
"""
tuple_list = [ ]
if not key_vals_dict: return tuple_list
vlen = max([len(vs) for vs in itertools.chain(*key_vals_dict.values())])
for k, vs in key_vals_dict.items():
try:
tuple_list.extend([k + tuple(v) + (fill, )*(vlen - len(v)) for v in vs])
except TypeError:
# assume k is not a tuple
tuple_list.extend([(k, ) + tuple(v) + (fill, )*(vlen - len(v)) for v in vs])
return tuple_list
|
def open(self):
"""Open the working area
Returns
-------
None
"""
self.path = self._prepare_dir(self.topdir)
self._copy_executable(area_path=self.path)
self._save_logging_levels(area_path=self.path)
self._put_python_modules(modules=self.python_modules, area_path=self.path)
|
def put_package(self, package):
"""Put a package
Parameters
----------
package :
a task package
Returns
-------
int
A package index
"""
self.last_package_index += 1
package_index = self.last_package_index
package_fullpath = self.package_fullpath(package_index)
# e.g., '{path}/tpd_20161129_122841_HnpcmF/task_00009.p.gz'
with gzip.open(package_fullpath, 'wb') as f:
pickle.dump(package, f, protocol=pickle.HIGHEST_PROTOCOL)
f.close()
result_fullpath = self.result_fullpath(package_index)
# e.g., '{path}/tpd_20161129_122841_HnpcmF/results/task_00009/result.p.gz'
result_dir = os.path.dirname(result_fullpath)
# e.g., '{path}/tpd_20161129_122841_HnpcmF/results/task_00009'
alphatwirl.mkdir_p(result_dir)
return package_index
|
def collect_result(self, package_index):
"""Collect the result of a task
Parameters
----------
package_index :
a package index
Returns
-------
obj
The result of the task
"""
result_fullpath = self.result_fullpath(package_index)
# e.g., '{path}/tpd_20161129_122841_HnpcmF/results/task_00009/result.p.gz'
try:
with gzip.open(result_fullpath, 'rb') as f:
result = pickle.load(f)
except Exception as e:
logger = logging.getLogger(__name__)
logger.warning(e)
return None
return result
|
def package_fullpath(self, package_index):
"""Returns the full path of the package
This method returns the full path to the package. This method
simply constructs the path based on the convention and doesn't
check if the package actually exists.
Parameters
----------
package_index :
a package index
Returns
-------
str
the full path to the package
"""
ret = os.path.join(self.path, self.package_relpath(package_index))
# e.g., '{path}/tpd_20161129_122841_HnpcmF/task_00009.p.gz'
return ret
|
def result_relpath(self, package_index):
"""Returns the relative path of the result
This method returns the path to the result relative to the
top dir of the working area. This method simply constructs the
path based on the convention and doesn't check if the result
actually exists.
Parameters
----------
package_index :
a package index
Returns
-------
str
the relative path to the result
"""
dirname = 'task_{:05d}'.format(package_index)
# e.g., 'task_00009'
ret = os.path.join('results', dirname, 'result.p.gz')
# e.g., 'results/task_00009/result.p.gz'
return ret
|
def result_fullpath(self, package_index):
"""Returns the full path of the result
This method returns the full path to the result. This method
simply constructs the path based on the convention and doesn't
check if the result actually exists.
Parameters
----------
package_index :
a package index
Returns
-------
str
the full path to the result
"""
ret = os.path.join(self.path, self.result_relpath(package_index))
# e.g., '{path}/tpd_20161129_122841_HnpcmF/results/task_00009/result.p.gz'
return ret
|
def run_multiple(self, workingArea, package_indices):
"""Submit multiple jobs
Parameters
----------
workingArea :
A workingArea
package_indices : list(int)
A list of package indices
Returns
-------
list(str)
The list of the run IDs of the jobs
"""
if not package_indices:
return [ ]
job_desc = self._compose_job_desc(workingArea, package_indices)
clusterprocids = submit_jobs(job_desc, cwd=workingArea.path)
# TODO: make configurable
clusterids = clusterprocids2clusterids(clusterprocids)
for clusterid in clusterids:
change_job_priority([clusterid], 10)
self.clusterprocids_outstanding.extend(clusterprocids)
return clusterprocids
|
def poll(self):
"""Return the run IDs of the finished jobs
Returns
-------
list(str)
The list of the run IDs of the finished jobs
"""
clusterids = clusterprocids2clusterids(self.clusterprocids_outstanding)
clusterprocid_status_list = query_status_for(clusterids)
# e.g., [['1730126.0', 2], ['1730127.0', 2], ['1730129.1', 1], ['1730130.0', 1]]
if clusterprocid_status_list:
clusterprocids, statuses = zip(*clusterprocid_status_list)
else:
clusterprocids, statuses = (), ()
clusterprocids_finished = [i for i in self.clusterprocids_outstanding if i not in clusterprocids]
self.clusterprocids_finished.extend(clusterprocids_finished)
self.clusterprocids_outstanding[:] = clusterprocids
# logging
counter = collections.Counter(statuses)
messages = [ ]
if counter:
messages.append(', '.join(['{}: {}'.format(HTCONDOR_JOBSTATUS[k], counter[k]) for k in counter.keys()]))
if self.clusterprocids_finished:
messages.append('Finished {}'.format(len(self.clusterprocids_finished)))
logger = logging.getLogger(__name__)
logger.info(', '.join(messages))
return clusterprocids_finished
|
def wait(self):
"""Wait until all jobs finish and return the run IDs of the finished jobs
Returns
-------
list(str)
The list of the run IDs of the finished jobs
"""
sleep = 5
while True:
if self.clusterprocids_outstanding:
self.poll()
if not self.clusterprocids_outstanding:
break
time.sleep(sleep)
return self.clusterprocids_finished
|
def failed_runids(self, runids):
"""Provide the run IDs of failed jobs
Returns
-------
None
"""
# remove failed clusterprocids from self.clusterprocids_finished
# so that len(self.clusterprocids_finished)) becomes the number
# of the successfully finished jobs
for i in runids:
try:
self.clusterprocids_finished.remove(i)
except ValueError:
pass
|
def atpbar(iterable, name=None):
"""Progress bar
"""
try:
len_ = len(iterable)
except TypeError:
logger = logging.getLogger(__name__)
logging.warning('length is unknown: {!r}'.format(iterable))
logging.warning('atpbar is turned off')
return iterable
if name is None:
name = repr(iterable)
return Atpbar(iterable, name=name, len_=len_)
|
def getArrays(self, tree, branchName):
"""return the array.array objects for the branch and its counter branch
This method returns a pair of the array.array objects. The first one is
for the given tree and branch name. The second one is for its counter
branch. The second one will be None when the branch does not have a
counter. A pair of None will be returned when the tree does not have
the branch.
"""
itsArray = self._getArray(tree, branchName)
if itsArray is None: return None, None
itsCountArray = self._getCounterArray(tree, branchName)
return itsArray, itsCountArray
|
def begin(self):
"""begin
"""
if self.isopen: return
self.dropbox.open()
self.isopen = True
|
def put(self, task, *args, **kwargs):
"""put a task and its arguments
If you need to put multiple tasks, it can be faster to put
multiple tasks with `put_multiple()` than to use this method
multiple times.
Parameters
----------
task : a function
A function to be executed
args : list
A list of positional arguments to the `task`
kwargs : dict
A dict with keyword arguments to the `task`
Returns
-------
int, str, or any hashable and sortable
A task ID. IDs are sortable in the order in which the
corresponding tasks are put.
"""
if not self.isopen:
logger = logging.getLogger(__name__)
logger.warning('the drop box is not open')
return
package = TaskPackage(task=task, args=args, kwargs=kwargs)
return self.dropbox.put(package)
|
def put_multiple(self, task_args_kwargs_list):
"""put a list of tasks and their arguments
This method can be used to put multiple tasks at once. Calling
this method once with multiple tasks can be much faster than
calling `put()` multiple times.
Parameters
----------
task_args_kwargs_list : list
A list of lists with three items that can be parameters of
`put()`, i.e., `task`, `args`, `kwargs`.
Returns
-------
list
A list of task IDs.
"""
if not self.isopen:
logger = logging.getLogger(__name__)
logger.warning('the drop box is not open')
return
packages = [ ]
for t in task_args_kwargs_list:
try:
task = t['task']
args = t.get('args', ())
kwargs = t.get('kwargs', {})
package = TaskPackage(task=task, args=args, kwargs=kwargs)
except TypeError:
package = TaskPackage(task=t, args=(), kwargs={})
packages.append(package)
return self.dropbox.put_multiple(packages)
|
def receive_finished(self):
"""return a list of pairs of IDs and results of finished tasks.
This method doesn't wait for tasks to finish. It returns IDs
and results which have already finished.
Returns
-------
list
A list of pairs of IDs and results
"""
if not self.isopen:
logger = logging.getLogger(__name__)
logger.warning('the drop box is not open')
return
return self.dropbox.poll()
|
def receive_one(self):
"""return a pair of an ID and a result of a task.
This method waits for a task to finish.
Returns
-------
An ID and a result of a task. `None` if no task is running.
"""
if not self.isopen:
logger = logging.getLogger(__name__)
logger.warning('the drop box is not open')
return
return self.dropbox.receive_one()
|
def receive_all(self):
"""return a list of pairs of IDs and results of all tasks.
This method waits for all tasks to finish.
Returns
-------
list
A list of pairs of IDs and results
"""
if not self.isopen:
logger = logging.getLogger(__name__)
logger.warning('the drop box is not open')
return
return self.dropbox.receive()
|
def receive(self):
"""return a list results of all tasks.
This method waits for all tasks to finish.
Returns
-------
list
A list of results of the tasks. The results are sorted in
the order in which the tasks are put.
"""
pkgidx_result_pairs = self.receive_all()
if pkgidx_result_pairs is None:
return
results = [r for _, r in pkgidx_result_pairs]
return results
|
def end(self):
"""end
"""
if not self.isopen: return
self.dropbox.close()
self.isopen = False
|
def expand_path_cfg(path_cfg, alias_dict={ }, overriding_kargs={ }):
"""expand a path config
Args:
path_cfg (str, tuple, dict): a config for path
alias_dict (dict): a dict for aliases
overriding_kargs (dict): to be used for recursive call
"""
if isinstance(path_cfg, str):
return _expand_str(path_cfg, alias_dict, overriding_kargs)
if isinstance(path_cfg, dict):
return _expand_dict(path_cfg, alias_dict)
# assume tuple or list
return _expand_tuple(path_cfg, alias_dict, overriding_kargs)
|
def _expand_str(path_cfg, alias_dict, overriding_kargs):
"""expand a path config given as a string
"""
if path_cfg in alias_dict:
# e.g., path_cfg = 'var_cut'
return _expand_str_alias(path_cfg, alias_dict, overriding_kargs)
# e.g., path_cfg = 'ev : {low} <= ev.var[0] < {high}'
return _expand_for_lambda_str(path_cfg, alias_dict, overriding_kargs)
|
def _expand_str_alias(path_cfg, alias_dict, overriding_kargs):
"""expand a path config given as a string
Args:
path_cfg (str): an alias
alias_dict (dict):
overriding_kargs (dict):
"""
# e.g.,
# path_cfg = 'var_cut'
new_path_cfg = alias_dict[path_cfg]
# e.g., ('ev : {low} <= ev.var[0] < {high}', {'low': 10, 'high': 200})
new_overriding_kargs = dict(alias=path_cfg)
# e.g., {'alias': 'var_cut'}
new_overriding_kargs.update(overriding_kargs)
# e.g., {'alias': 'var_cut', 'name': 'var_cut25', 'low': 25}
return expand_path_cfg(new_path_cfg, alias_dict,new_overriding_kargs)
|
def _expand_tuple(path_cfg, alias_dict, overriding_kargs):
"""expand a path config given as a tuple
"""
# e.g.,
# path_cfg = ('ev : {low} <= ev.var[0] < {high}', {'low': 10, 'high': 200})
# overriding_kargs = {'alias': 'var_cut', 'name': 'var_cut25', 'low': 25}
new_path_cfg = path_cfg[0]
# e.g., 'ev : {low} <= ev.var[0] < {high}'
new_overriding_kargs = path_cfg[1].copy()
# e.g., {'low': 10, 'high': 200}
new_overriding_kargs.update(overriding_kargs)
# e.g., {'low': 25, 'high': 200, 'alias': 'var_cut', 'name': 'var_cut25'}
return expand_path_cfg(
new_path_cfg,
overriding_kargs=new_overriding_kargs,
alias_dict=alias_dict
)
|
def poll(self):
"""check if the jobs are running and return a list of pids for
finished jobs
"""
finished_procs = [p for p in self.running_procs if p.poll() is not None]
self.running_procs = collections.deque([p for p in self.running_procs if p not in finished_procs])
for proc in finished_procs:
stdout, stderr = proc.communicate()
## proc.communicate() returns (stdout, stderr) when
## self.pipe = True. Otherwise they are (None, None)
finished_pids = [p.pid for p in finished_procs]
self.finished_pids.extend(finished_pids)
logger = logging.getLogger(__name__)
messages = 'Running: {}, Finished: {}'.format(len(self.running_procs), len(self.finished_pids))
logger.info(messages)
return finished_pids
|
def wait(self):
"""wait until all jobs finish and return a list of pids
"""
finished_pids = [ ]
while self.running_procs:
finished_pids.extend(self.poll())
return finished_pids
|
def getVector(self, tree, branchName):
"""return the ROOT.vector object for the branch.
"""
if (tree, branchName) in self.__class__.addressDict:
return self.__class__.addressDict[(tree, branchName)]
itsVector = self._getVector(tree, branchName)
self.__class__.addressDict[(tree, branchName)] = itsVector
return itsVector
|
def build_parallel(parallel_mode, quiet=True, processes=4,
user_modules=None, dispatcher_options=None):
"""initializes `Parallel`
Parameters
----------
parallel_mode : str
"multiprocessing" (default), "htcondor" or "subprocess"
quiet : bool, optional
if True, progress bars will not be shown in the "multiprocessing" mode.
process : int, optional
The number of processes when ``parallel_mode`` is
"multiprocessing"
user_modules : list, optional
The names of modules to be sent to worker nodes when
parallel_mode is "htcondor"
dispatcher_options : dict, optional
Options to dispatcher
Returns
-------
parallel
an instance of the class `Parallel`
"""
if user_modules is None:
user_modules = [ ]
if dispatcher_options is None:
dispatcher_options = dict()
dispatchers = ('subprocess', 'htcondor')
parallel_modes = ('multiprocessing', ) + dispatchers
default_parallel_mode = 'multiprocessing'
if not parallel_mode in parallel_modes:
logger = logging.getLogger(__name__)
logger.warning('unknown parallel_mode "{}", use default "{}"'.format(
parallel_mode, default_parallel_mode
))
parallel_mode = default_parallel_mode
if parallel_mode == 'multiprocessing':
if quiet:
atpbar.disable()
return _build_parallel_multiprocessing(processes=processes)
return _build_parallel_dropbox(
parallel_mode=parallel_mode,
user_modules=user_modules,
dispatcher_options=dispatcher_options
)
|
def configure(self, component, all_dependencies):
''' Ensure all config-time files have been generated. Return a
dictionary of generated items.
'''
r = {}
builddir = self.buildroot
# only dependencies which are actually valid can contribute to the
# config data (which includes the versions of all dependencies in its
# build info) if the dependencies aren't available we can't tell what
# version they are. Anything missing here should always be a test
# dependency that isn't going to be used, otherwise the yotta build
# command will fail before we get here
available_dependencies = OrderedDict((k, v) for k, v in all_dependencies.items() if v)
self.set_toplevel_definitions = ''
if self.build_info_include_file is None:
self.build_info_include_file, build_info_definitions = self.getBuildInfo(component.path, builddir)
self.set_toplevel_definitions += build_info_definitions
if self.config_include_file is None:
self.config_include_file, config_definitions, self.config_json_file = self._getConfigData(available_dependencies, component, builddir, self.build_info_include_file)
self.set_toplevel_definitions += config_definitions
self.configured = True
return {
'merged_config_include': self.config_include_file,
'merged_config_json': self.config_json_file,
'build_info_include': self.build_info_include_file
}
|
def generateRecursive(self, component, all_components, builddir=None, modbuilddir=None, processed_components=None, application=None):
''' generate top-level CMakeLists for this component and its
dependencies: the CMakeLists are all generated in self.buildroot,
which MUST be out-of-source
!!! NOTE: experimenting with a slightly different way of doing
things here, this function is a generator that yields any errors
produced, so the correct use is:
for error in gen.generateRecursive(...):
print(error)
'''
assert(self.configured)
if builddir is None:
builddir = self.buildroot
if modbuilddir is None:
modbuilddir = os.path.join(builddir, 'ym')
if processed_components is None:
processed_components = dict()
if not self.target:
yield 'Target "%s" is not a valid build target' % self.target
toplevel = not len(processed_components)
logger.debug('generate build files: %s (target=%s)' % (component, self.target))
# because of the way c-family language includes work we need to put the
# public header directories of all components that this component
# depends on (directly OR indirectly) into the search path, which means
# we need to first enumerate all the direct and indirect dependencies
recursive_deps = component.getDependenciesRecursive(
available_components = all_components,
target = self.target,
available_only = True,
test = True
)
dependencies = component.getDependencies(
all_components,
target = self.target,
available_only = True,
test = True
)
for name, dep in dependencies.items():
# if dep is a test dependency, then it might not be required (if
# we're not building tests). We don't actually know at this point
if not dep:
if dep.isTestDependency():
logger.debug('Test dependency "%s" of "%s" is not installed.' % (name, component))
else:
yield 'Required dependency "%s" of "%s" is not installed.' % (name, component)
# ensure this component is assumed to have been installed before we
# check for its dependencies, in case it has a circular dependency on
# itself
processed_components[component.getName()] = component
new_dependencies = OrderedDict([(name,c) for name,c in dependencies.items() if c and not name in processed_components])
self.generate(builddir, modbuilddir, component, new_dependencies, dependencies, recursive_deps, application, toplevel)
logger.debug('recursive deps of %s:' % component)
for d in recursive_deps.values():
logger.debug(' %s' % d)
processed_components.update(new_dependencies)
for name, c in new_dependencies.items():
for error in self.generateRecursive(
c, all_components, os.path.join(modbuilddir, name), modbuilddir, processed_components, application=application
):
yield error
|
def _validateListedSubdirsExist(self, component):
''' Return true if all the subdirectories which this component lists in
its module.json file exist (although their validity is otherwise
not checked).
If they don't, warning messages are printed.
'''
lib_subdirs = component.getLibs(explicit_only=True)
bin_subdirs = component.getBinaries()
ok = True
for d in lib_subdirs:
if not os.path.exists(os.path.join(component.path, d)):
logger.warning(
"lib directory \"%s\" doesn't exist but is listed in the module.json file of %s", d, component
)
ok = False
for d in bin_subdirs:
if not os.path.exists(os.path.join(component.path, d)):
logger.warning(
"bin directory \"%s\" doesn't exist but is listed in the module.json file of %s", d, component
)
ok = False
return ok
|
def _listSubDirectories(self, component, toplevel):
''' return: {
manual: [list of subdirectories with manual CMakeLists],
auto: [list of pairs: (subdirectories name to autogenerate, a list of source files in that dir)],
bin: {dictionary of subdirectory name to binary name},
lib: {dictionary of subdirectory name to binary name},
test: [list of directories that build tests],
resource: [list of directories that contain resources]
}
'''
manual_subdirs = []
auto_subdirs = []
header_subdirs = []
lib_subdirs = component.getLibs()
bin_subdirs = component.getBinaries()
test_subdirs = []
resource_subdirs = []
# if the application or library is set to get the sources from top level ("."),
# they'll be acumulated into a single array (top_sources below).
top_sources = []
start_on_top = "." in [os.path.normpath(x) for x in list(lib_subdirs.keys()) + list(bin_subdirs.keys())]
for f in sorted(os.listdir(component.path)):
if f in Ignore_Subdirs or f.startswith('.') or f.startswith('_'):
continue
check_cmakefile_path = os.path.join(f, 'CMakeLists.txt')
if os.path.isfile(os.path.join(component.path, check_cmakefile_path)) and not \
component.ignores(check_cmakefile_path):
self.checkStandardSourceDir(f, component)
# if the subdirectory has a CMakeLists.txt in it (and it isn't
# ignored), then delegate to that:
manual_subdirs.append(f)
# tests only supported in the `test` directory for now
if f in ('test',):
test_subdirs.append(f)
else:
if os.path.isfile(os.path.join(component.path, f)):
# top level source: check if it should be included
if not component.ignores(f) and start_on_top:
sf = self.createSourceFile(f, os.path.join(component.path, f), ".")
if sf is not None:
top_sources.append(sf)
else:
# otherwise, if the directory has source files, and is listed
# as a source/test directory, generate a CMakeLists in the
# corresponding temporary directory, and add that.
sources = self.containsSourceFiles(os.path.join(component.path, f), component)
if sources:
if f in ('test',):
auto_subdirs.append((f, sources))
test_subdirs.append(f)
elif start_on_top:
# include the sources in this directory only if it's not
# a potential test directory
from yotta.lib import validate
if not validate.isPotentialTestDir(f):
top_sources.extend(sources)
if f == component.getName():
header_subdirs.append((f, sources))
elif os.path.normpath(f) in [fsutils.fullySplitPath(x)[0] for x in lib_subdirs] or \
os.path.normpath(f) in [fsutils.fullySplitPath(x)[0] for x in bin_subdirs]:
for full_subpath in list(lib_subdirs.keys()) + list(bin_subdirs.keys()):
if fsutils.fullySplitPath(full_subpath)[0] == os.path.normpath(f):
# this might be a sub-sub directory, in which
# case we need to re-calculate the sources just
# for the part we care about:
sources = self.containsSourceFiles(os.path.join(component.path, full_subpath), component)
auto_subdirs.append((full_subpath, sources))
elif f == component.getName():
header_subdirs.append((f, sources))
elif toplevel and \
((f in ('test',)) or \
(os.path.normpath(f) in lib_subdirs or start_on_top) or \
(os.path.normpath(f) in bin_subdirs or start_on_top) and not \
component.ignores(f)):
# (if there aren't any source files then do nothing)
# !!! FIXME: ensure this warning is covered in tests
logger.warning("subdirectory \"%s\" of %s was ignored because it doesn't appear to contain any source files", f, component)
# 'resource' directory also has special meaning, but there's no
# pattern for the files which might be in here:
if f in ('resource',):
resource_subdirs.append(os.path.join(component.path, f))
# issue a warning if a differently cased or common misspelling of a
# standard directory name was encountered:
check_directory_name_cases = list(lib_subdirs.keys()) + list(bin_subdirs.keys()) + ['test', 'resource']
if f.lower() in check_directory_name_cases + ['src'] and not \
f in check_directory_name_cases and not \
component.ignores(f):
self.checkStandardSourceDir(f, component)
if top_sources:
# all the top level sources are grouped into a single cmake-generated directory
# which is given the same name as the component
auto_subdirs.append((component.getName(), top_sources))
return {
"manual": manual_subdirs,
"auto": auto_subdirs,
"headers": header_subdirs,
"bin": {component.getName(): component.getName()} if (start_on_top and component.isApplication()) else bin_subdirs,
"lib": {component.getName(): component.getName()} if (start_on_top and not component.isApplication()) else lib_subdirs,
"test": test_subdirs,
"resource": resource_subdirs
}
|
def _getConfigData(self, all_dependencies, component, builddir, build_info_header_path):
''' returns (path_to_config_header, cmake_set_definitions) '''
# ordered_json, , read/write ordered json, internal
from yotta.lib import ordered_json
add_defs_header = ''
set_definitions = ''
# !!! backwards-compatible "TARGET_LIKE" definitions for the top-level
# of the config. NB: THESE WILL GO AWAY
definitions = []
definitions.append(('TARGET', sanitizePreprocessorSymbol(self.target.getName())))
definitions.append(('TARGET_LIKE_%s' % sanitizePreprocessorSymbol(self.target.getName()),None))
# make the path to the build-info header available both to CMake and
# in the preprocessor:
full_build_info_header_path = replaceBackslashes(os.path.abspath(build_info_header_path))
logger.debug('build info header include path: "%s"', full_build_info_header_path)
definitions.append(('YOTTA_BUILD_INFO_HEADER', '"'+full_build_info_header_path+'"'))
for target in self.target.getSimilarTo_Deprecated():
if '*' not in target:
definitions.append(('TARGET_LIKE_%s' % sanitizePreprocessorSymbol(target),None))
merged_config = self.target.getMergedConfig()
logger.debug('target configuration data: %s', merged_config)
definitions += self._definitionsForConfig(merged_config, ['YOTTA', 'CFG'])
add_defs_header += '// yotta config data (including backwards-compatible definitions)\n'
for k, v in definitions:
if v is not None:
add_defs_header += '#define %s %s\n' % (k, v)
set_definitions += 'set(%s %s)\n' % (k, v)
else:
add_defs_header += '#define %s\n' % k
set_definitions += 'set(%s TRUE)\n' % k
add_defs_header += '\n// version definitions\n'
for dep in list(all_dependencies.values()) + [component]:
add_defs_header += "#define YOTTA_%s_VERSION_STRING \"%s\"\n" % (sanitizePreprocessorSymbol(dep.getName()), str(dep.getVersion()))
add_defs_header += "#define YOTTA_%s_VERSION_MAJOR %d\n" % (sanitizePreprocessorSymbol(dep.getName()), dep.getVersion().major())
add_defs_header += "#define YOTTA_%s_VERSION_MINOR %d\n" % (sanitizePreprocessorSymbol(dep.getName()), dep.getVersion().minor())
add_defs_header += "#define YOTTA_%s_VERSION_PATCH %d\n" % (sanitizePreprocessorSymbol(dep.getName()), dep.getVersion().patch())
# add the component's definitions
defines = component.getDefines()
if defines:
add_defs_header += "\n// direct definitions (defines.json)\n"
for name, value in defines.items():
add_defs_header += "#define %s %s\n" % (name, value)
add_defs_header += '\n'
# use -include <definitions header> instead of lots of separate
# defines... this is compiler specific, but currently testing it
# out for gcc-compatible compilers only:
config_include_file = os.path.join(builddir, 'yotta_config.h')
config_json_file = os.path.join(builddir, 'yotta_config.json')
set_definitions += 'set(YOTTA_CONFIG_MERGED_JSON_FILE \"%s\")\n' % replaceBackslashes(os.path.abspath(config_json_file))
self._writeFile(
config_include_file,
'#ifndef __YOTTA_CONFIG_H__\n'+
'#define __YOTTA_CONFIG_H__\n'+
add_defs_header+
'#endif // ndef __YOTTA_CONFIG_H__\n'
)
self._writeFile(
config_json_file,
ordered_json.dumps(merged_config)
)
return (config_include_file, set_definitions, config_json_file)
|
def getBuildInfo(self, sourcedir, builddir):
''' Write the build info header file, and return (path_to_written_header, set_cmake_definitions) '''
cmake_defs = ''
preproc_defs = '// yotta build info, #include YOTTA_BUILD_INFO_HEADER to access\n'
# standard library modules
import datetime
# vcs, , represent version controlled directories, internal
from yotta.lib import vcs
now = datetime.datetime.utcnow()
vcs_instance = vcs.getVCS(sourcedir)
if self.build_uuid is None:
import uuid
self.build_uuid = uuid.uuid4()
definitions = [
('YOTTA_BUILD_YEAR', now.year, 'UTC year'),
('YOTTA_BUILD_MONTH', now.month, 'UTC month 1-12'),
('YOTTA_BUILD_DAY', now.day, 'UTC day 1-31'),
('YOTTA_BUILD_HOUR', now.hour, 'UTC hour 0-24'),
('YOTTA_BUILD_MINUTE', now.minute, 'UTC minute 0-59'),
('YOTTA_BUILD_SECOND', now.second, 'UTC second 0-61'),
('YOTTA_BUILD_UUID', self.build_uuid, 'unique random UUID for each build'),
]
if vcs_instance is not None:
commit_id = None
repotype = vcs_instance.__class__.__name__
try:
commit_id = vcs_instance.getCommitId()
except vcs.VCSNotInstalled as e:
logger.warning('%s is not installed, VCS status build info is not available', repotype)
commit_id = None
except vcs.VCSError as e:
logger.debug('%s', e)
logger.warning(
'error detecting build info: "%s", build info is not available to the build. Please check that this is a valid %s repository!',
str(e).split('\n')[0],
repotype
)
if commit_id is not None:
clean_state = int(vcs_instance.isClean())
description = vcs_instance.getDescription()
definitions += [
('YOTTA_BUILD_VCS_ID', commit_id, 'git or mercurial hash'),
('YOTTA_BUILD_VCS_CLEAN', clean_state, 'evaluates true if the version control system was clean, otherwise false'),
('YOTTA_BUILD_VCS_DESCRIPTION', description, 'git describe or mercurial equivalent')
]
for d in definitions:
preproc_defs += '#define %s %s // %s\n' % d
cmake_defs += 'set(%s "%s") # %s\n' % d
buildinfo_include_file = os.path.join(builddir, 'yotta_build_info.h')
self._writeFile(
buildinfo_include_file,
'#ifndef __YOTTA_BUILD_INFO_H__\n'+
'#define __YOTTA_BUILD_INFO_H__\n'+
preproc_defs+
'#endif // ndef __YOTTA_BUILD_INFO_H__\n'
)
return (buildinfo_include_file, cmake_defs)
|
def generate(
self, builddir, modbuilddir, component, active_dependencies, immediate_dependencies, all_dependencies, application, toplevel
):
''' active_dependencies is the dictionary of components that need to be
built for this component, but will not already have been built for
another component.
'''
include_root_dirs = ''
if application is not None and component is not application:
include_root_dirs += 'include_directories("%s")\n' % replaceBackslashes(application.path)
include_sys_dirs = ''
include_other_dirs = ''
for name, c in itertools.chain(((component.getName(), component),), all_dependencies.items()):
if c is not component and c.isTestDependency():
continue
include_root_dirs += 'include_directories("%s")\n' % replaceBackslashes(c.path)
dep_sys_include_dirs = c.getExtraSysIncludes()
for d in dep_sys_include_dirs:
include_sys_dirs += 'include_directories(SYSTEM "%s")\n' % replaceBackslashes(os.path.join(c.path, d))
dep_extra_include_dirs = c.getExtraIncludes()
for d in dep_extra_include_dirs:
include_other_dirs += 'include_directories("%s")\n' % replaceBackslashes(os.path.join(c.path, d))
add_depend_subdirs = ''
for name, c in active_dependencies.items():
depend_subdir = replaceBackslashes(os.path.join(modbuilddir, name))
relpath = replaceBackslashes(os.path.relpath(depend_subdir, self.buildroot))
add_depend_subdirs += \
'add_subdirectory(\n' \
' "%s"\n' \
' "${CMAKE_BINARY_DIR}/%s"\n' \
')\n' \
% (depend_subdir, relpath)
delegate_to_existing = None
delegate_build_dir = None
module_is_empty = False
if os.path.isfile(os.path.join(component.path, 'CMakeLists.txt')) and not component.ignores('CMakeLists.txt'):
# adding custom CMake is a promise to generate a library: so the
# module is never empty in this case.
delegate_to_existing = component.path
add_own_subdirs = []
logger.debug("delegate to build dir: %s", builddir)
delegate_build_dir = os.path.join(builddir, 'existing')
else:
# !!! TODO: if they don't exist, that should possibly be a fatal
# error, not just a warning
self._validateListedSubdirsExist(component)
subdirs = self._listSubDirectories(component, toplevel)
manual_subdirs = subdirs['manual']
autogen_subdirs = subdirs['auto']
binary_subdirs = subdirs['bin']
lib_subdirs = subdirs['lib']
test_subdirs = subdirs['test']
resource_subdirs = subdirs['resource']
header_subdirs = subdirs['headers']
logger.debug("%s lib subdirs: %s, bin subdirs: %s", component, lib_subdirs, binary_subdirs)
add_own_subdirs = []
for f in manual_subdirs:
if os.path.isfile(os.path.join(component.path, f, 'CMakeLists.txt')):
# if this module is a test dependency, then don't recurse
# to building its own tests.
if f in test_subdirs and component.isTestDependency():
continue
add_own_subdirs.append(
(os.path.join(component.path, f), f)
)
# names of all directories at this level with stuff in: used to figure
# out what to link automatically
all_subdirs = manual_subdirs + [x[0] for x in autogen_subdirs]
# first check if this module is empty:
if component.isTestDependency():
if len(autogen_subdirs) + len(add_own_subdirs) == 0:
module_is_empty = True
else:
if len(autogen_subdirs) + len(add_own_subdirs) <= len(test_subdirs):
module_is_empty = True
# autogenerate CMakeLists for subdirectories as appropriate:
for f, source_files in autogen_subdirs:
if f in test_subdirs:
# if this module is a test dependency, then don't recurse
# to building its own tests.
if component.isTestDependency():
continue
self.generateTestDirList(
builddir, f, source_files, component, immediate_dependencies, toplevel=toplevel, module_is_empty=module_is_empty
)
else:
if f in binary_subdirs:
is_executable = True
object_name = binary_subdirs[f]
else:
# not a test subdir or binary subdir: it must be a lib
# subdir
assert(f in lib_subdirs)
object_name = lib_subdirs[f]
for header_dir, header_files in header_subdirs:
source_files.extend(header_files)
self.generateSubDirList(
builddir = builddir,
dirname = f,
source_files = source_files,
component = component,
all_subdirs = all_subdirs,
immediate_dependencies = immediate_dependencies,
object_name = object_name,
resource_subdirs = resource_subdirs,
is_executable = (f in binary_subdirs)
)
add_own_subdirs.append(
(os.path.join(builddir, f), f)
)
# from now on, completely forget that this component had any tests
# if it is itself a test dependency:
if component.isTestDependency():
test_subdirs = []
# if we're not building anything other than tests, and this is a
# library module (not a binary) then we need to generate a dummy
# library so that this component can still be linked against
if module_is_empty:
if len(binary_subdirs):
logger.warning('nothing to build!')
else:
add_own_subdirs.append(self.createDummyLib(
component, builddir, [x[0] for x in immediate_dependencies.items() if not x[1].isTestDependency()]
))
toolchain_file_path = os.path.join(builddir, 'toolchain.cmake')
if toplevel:
# generate the top-level toolchain file:
template = jinja_environment.get_template('toolchain.cmake')
file_contents = template.render({ #pylint: disable=no-member
# toolchain files are provided in hierarchy
# order, but the template needs them in reverse
# order (base-first):
"toolchain_files": self.target.getToolchainFiles()
})
self._writeFile(toolchain_file_path, file_contents)
# generate the top-level CMakeLists.txt
template = jinja_environment.get_template('base_CMakeLists.txt')
relpath = os.path.relpath(builddir, self.buildroot)
file_contents = template.render({ #pylint: disable=no-member
"toplevel": toplevel,
"target_name": self.target.getName(),
"set_definitions": self.set_toplevel_definitions,
"toolchain_file": toolchain_file_path,
"component": component,
"relpath": relpath,
"include_root_dirs": include_root_dirs,
"include_sys_dirs": include_sys_dirs,
"include_other_dirs": include_other_dirs,
"add_depend_subdirs": add_depend_subdirs,
"add_own_subdirs": add_own_subdirs,
"config_include_file": self.config_include_file,
"delegate_to": delegate_to_existing,
"delegate_build_dir": delegate_build_dir,
"active_dependencies": active_dependencies,
"module_is_empty": module_is_empty,
"cmake_includes": self.target.getAdditionalIncludes()
})
self._writeFile(os.path.join(builddir, 'CMakeLists.txt'), file_contents)
|
def _handleAuth(fn):
''' Decorator to re-try API calls after asking the user for authentication. '''
@functools.wraps(fn)
def wrapped(*args, **kwargs):
# if yotta is being run noninteractively, then we never retry, but we
# do call auth.authorizeUser, so that a login URL can be displayed:
interactive = globalconf.get('interactive')
def retryWithAuthOrRaise(original_exception):
# in all cases ask for auth, so that in non-interactive mode a
# login URL is displayed
auth.authorizeUser(provider='github', interactive=interactive)
if not interactive:
raise original_exception
else:
logger.debug('trying with authtoken: %s', settings.getProperty('github', 'authtoken'))
return fn(*args, **kwargs)
# authorised requests have a higher rate limit, but display a warning
# message in this case, as the user might not expect the requirement to
# auth:
def handleRateLimitExceeded(original_exception):
if not _userAuthedWithGithub():
logger.warning('github rate limit for anonymous requests exceeded: you must log in')
return retryWithAuthOrRaise(original_exception)
else:
raise original_exception
try:
return fn(*args, **kwargs)
except requests.exceptions.HTTPError as e:
if e.response.status_code == 403:
# 403 = rate limit exceeded
return handleRateLimitExceeded(e)
if e.response.status_code == 401:
# 401 = unauthorised
return retryWithAuthOrRaise(e)
raise
except github.BadCredentialsException as e:
logger.debug("github: bad credentials")
return retryWithAuthOrRaise(e)
except github.UnknownObjectException as e:
logger.debug("github: unknown object")
# some endpoints return 404 if the user doesn't have access, maybe
# it would be better to prompt for another username and password,
# and store multiple tokens that we can try for each request....
# but for now we assume that if the user is logged in then a 404
# really is a 404
if not _userAuthedWithGithub():
logger.info('failed to fetch Github object, re-trying with authentication...')
return retryWithAuthOrRaise(e)
raise
except github.RateLimitExceededException as e:
return handleRateLimitExceeded(e)
except github.GithubException as e:
if e.status == 403:
# 403 = rate limit exceeded
return handleRateLimitExceeded(e)
raise
return wrapped
|
def _getTags(repo):
''' return a dictionary of {tag: tarball_url}'''
logger.debug('get tags for %s', repo)
g = Github(settings.getProperty('github', 'authtoken'))
repo = g.get_repo(repo)
tags = repo.get_tags()
logger.debug('tags for %s: %s', repo, [t.name for t in tags])
return {t.name: _ensureDomainPrefixed(t.tarball_url) for t in tags}
|
def _getTipArchiveURL(repo):
''' return a string containing a tarball url '''
g = Github(settings.getProperty('github', 'authtoken'))
repo = g.get_repo(repo)
return repo.get_archive_link('tarball')
|
def _getCommitArchiveURL(repo, commit):
''' return a string containing a tarball url '''
g = Github(settings.getProperty('github', 'authtoken'))
repo = g.get_repo(repo)
return repo.get_archive_link('tarball', commit)
|
def _getTarball(url, into_directory, cache_key, origin_info=None):
'''unpack the specified tarball url into the specified directory'''
try:
access_common.unpackFromCache(cache_key, into_directory)
except KeyError as e:
tok = settings.getProperty('github', 'authtoken')
headers = {}
if tok is not None:
headers['Authorization'] = 'token ' + str(tok)
logger.debug('GET %s', url)
response = requests.get(url, allow_redirects=True, stream=True, headers=headers)
response.raise_for_status()
logger.debug('getting file: %s', url)
logger.debug('headers: %s', response.headers)
response.raise_for_status()
# github doesn't exposes hashes of the archives being downloaded as far
# as I can tell :(
access_common.unpackTarballStream(
stream = response,
into_directory = into_directory,
hash = {},
cache_key = cache_key,
origin_info = origin_info
)
|
def createFromSource(cls, vs, name=None):
''' returns a github component for any github url (including
git+ssh:// git+http:// etc. or None if this is not a Github URL.
For all of these we use the github api to grab a tarball, because
that's faster.
Normally version will be empty, unless the original url was of the
form: 'owner/repo @version' or 'url://...#version', which can be used
to grab a particular tagged version.
(Note that for github components we ignore the component name - it
doesn't have to match the github module name)
'''
return GithubComponent(vs.location, vs.spec, vs.semantic_spec, name)
|
def availableVersions(self):
''' return a list of Version objects, each with a tarball URL set '''
r = []
for t in self._getTags():
logger.debug("available version tag: %s", t)
# ignore empty tags:
if not len(t[0].strip()):
continue
try:
r.append(GithubComponentVersion(t[0], t[0], url=t[1], name=self.name, cache_key=None))
except ValueError:
logger.debug('invalid version tag: %s', t)
return r
|
def availableTags(self):
''' return a list of GithubComponentVersion objects for all tags
'''
return [
GithubComponentVersion(
'', t[0], t[1], self.name, cache_key=_createCacheKey('tag', t[0], t[1], self.name)
) for t in self._getTags()
]
|
def availableBranches(self):
''' return a list of GithubComponentVersion objects for the tip of each branch
'''
return [
GithubComponentVersion(
'', b[0], b[1], self.name, cache_key=None
) for b in _getBranchHeads(self.repo).items()
]
|
def commitVersion(self):
''' return a GithubComponentVersion object for a specific commit if valid
'''
import re
commit_match = re.match('^[a-f0-9]{7,40}$', self.tagOrBranchSpec(), re.I)
if commit_match:
return GithubComponentVersion(
'', '', _getCommitArchiveURL(self.repo, self.tagOrBranchSpec()), self.name, cache_key=None
)
return None
|
def createFromSource(cls, vs, name=None):
''' returns a hg component for any hg:// url, or None if this is not
a hg component.
Normally version will be empty, unless the original url was of the
form 'hg+ssh://...#version', which can be used to grab a particular
tagged version.
'''
# strip hg of the url scheme:
if vs.location.startswith('hg+'):
location = vs.location[3:]
else:
location = vs.location
return HGComponent(location, vs.spec)
|
def dropRootPrivs(fn):
''' decorator to drop su/sudo privilages before running a function on
unix/linux.
The *real* uid is modified, so privileges are permanently dropped for
the process. (i.e. make sure you don't need to do
If there is a SUDO_UID environment variable, then we drop to that,
otherwise we drop to nobody.
'''
def wrapped_fn(*args, **kwargs):
q = multiprocessing.Queue()
p = multiprocessing.Process(target=_dropPrivsReturnViaQueue, args=(q, fn, args, kwargs))
p.start()
r = None
e = None
while True:
msg = q.get()
if msg[0] == 'return':
r = msg[1]
if msg[0] == 'exception':
e = msg[1](msg[2])
if msg[0] == 'finish':
# if the command raised an exception, propagate this:
if e is not None:
raise e #pylint: disable=raising-bad-type
return r
return wrapped_fn
|
def installAndBuild(args, following_args):
''' Perform the build command, but provide detailed error information.
Returns {status:0, build_status:0, generate_status:0, install_status:0} on success.
If status: is nonzero there was some sort of error. Other properties
are optional, and may not be set if that step was not attempted.
'''
build_status = generate_status = install_status = 0
if not hasattr(args, 'build_targets'):
vars(args)['build_targets'] = []
if 'test' in args.build_targets:
logging.error('Cannot build "test". Use "yotta test" to run tests.')
return {'status':1}
cwd = os.getcwd()
c = validate.currentDirectoryModule()
if not c:
return {'status':1}
try:
target, errors = c.satisfyTarget(args.target, additional_config=args.config)
except access_common.AccessException as e:
logging.error(e)
return {'status':1}
if errors:
for error in errors:
logging.error(error)
return {'status':1}
# run the install command before building, we need to add some options the
# install command expects to be present to do this:
vars(args)['component'] = None
vars(args)['act_globally'] = False
if not hasattr(args, 'install_test_deps'):
if 'all_tests' in args.build_targets:
vars(args)['install_test_deps'] = 'all'
elif not len(args.build_targets):
vars(args)['install_test_deps'] = 'own'
else:
# If the named build targets include tests from other modules, we
# need to install the deps for those modules. To do this we need to
# be able to tell which module a library belongs to, which is not
# straightforward (especially if there is custom cmake involved).
# That's why this is 'all', and not 'none'.
vars(args)['install_test_deps'] = 'all'
# install may exit non-zero for non-fatal errors (such as incompatible
# version specs), which it will display
install_status = install.execCommand(args, [])
builddir = os.path.join(cwd, 'build', target.getName())
all_deps = c.getDependenciesRecursive(
target = target,
available_components = [(c.getName(), c)],
test = True
)
# if a dependency is missing the build will almost certainly fail, so don't try
missing = 0
for d in all_deps.values():
if not d and not (d.isTestDependency() and args.install_test_deps != 'all'):
logging.error('%s not available' % os.path.split(d.path)[1])
missing += 1
if missing:
logging.error('Missing dependencies prevent build. Use `yotta ls` to list them.')
return {'status': 1, 'install_status':install_status, 'missing_status':missing}
generator = cmakegen.CMakeGen(builddir, target)
# only pass available dependencies to
config = generator.configure(c, all_deps)
logging.debug("config done, merged config: %s", config['merged_config_json'])
script_environment = {
'YOTTA_MERGED_CONFIG_FILE': config['merged_config_json']
}
# run pre-generate scripts for all components:
runScriptWithModules(c, all_deps.values(), 'preGenerate', script_environment)
app = c if len(c.getBinaries()) else None
for error in generator.generateRecursive(c, all_deps, builddir, application=app):
logging.error(error)
generate_status = 1
logging.debug("generate done.")
# run pre-build scripts for all components:
runScriptWithModules(c, all_deps.values(), 'preBuild', script_environment)
if (not hasattr(args, 'generate_only')) or (not args.generate_only):
error = target.build(
builddir, c, args, release_build=args.release_build,
build_args=following_args, targets=args.build_targets,
release_no_debug_info_build=args.release_no_debug_info_build
)
if error:
logging.error(error)
build_status = 1
else:
# post-build scripts only get run if we were successful:
runScriptWithModules(c, all_deps.values(), 'postBuild', script_environment)
if install_status:
logging.warning(
"There were also errors installing and resolving dependencies, "+
"which may have caused the build failure: see above, or run "+
"`yotta install` for details."
)
return {
'status': build_status or generate_status or install_status,
'missing_status': missing,
'build_status': build_status,
'generate_status': generate_status,
'install_status': install_status
}
|
def _returnRequestError(fn):
''' Decorator that captures requests.exceptions.RequestException errors
and returns them as an error message. If no error occurs the reture
value of the wrapped function is returned (normally None). '''
@functools.wraps(fn)
def wrapped(*args, **kwargs):
try:
return fn(*args, **kwargs)
except requests.exceptions.RequestException as e:
return "server returned status %s: %s" % (e.response.status_code, e.message)
return wrapped
|
def _handleAuth(fn):
''' Decorator to re-try API calls after asking the user for authentication. '''
@functools.wraps(fn)
def wrapped(*args, **kwargs):
# auth, , authenticate users, internal
from yotta.lib import auth
# if yotta is being run noninteractively, then we never retry, but we
# do call auth.authorizeUser, so that a login URL can be displayed:
interactive = globalconf.get('interactive')
try:
return fn(*args, **kwargs)
except requests.exceptions.HTTPError as e:
if e.response.status_code == requests.codes.unauthorized: #pylint: disable=no-member
logger.debug('%s unauthorised', fn)
# any provider is sufficient for registry auth
auth.authorizeUser(provider=None, interactive=interactive)
if interactive:
logger.debug('retrying after authentication...')
return fn(*args, **kwargs)
raise
return wrapped
|
def _friendlyAuthError(fn):
''' Decorator to print a friendly you-are-not-authorised message. Use
**outside** the _handleAuth decorator to only print the message after
the user has been given a chance to login. '''
@functools.wraps(fn)
def wrapped(*args, **kwargs):
try:
return fn(*args, **kwargs)
except requests.exceptions.HTTPError as e:
if e.response.status_code == requests.codes.unauthorized: #pylint: disable=no-member
logger.error('insufficient permission')
elif e.response.status_code == requests.codes.bad and 'jwt has expired' in e.response.text.lower(): #pylint: disable=no-member
logger.error('server returned status %s: %s', e.response.status_code, e.response.text)
logger.error('Check that your system clock is set accurately!')
else:
logger.error('server returned status %s: %s', e.response.status_code, e.response.text)
raise
return wrapped
|
def _raiseUnavailableFor401(message):
''' Returns a decorator to swallow a requests exception for modules that
are not accessible without logging in, and turn it into an Unavailable
exception.
'''
def __raiseUnavailableFor401(fn):
def wrapped(*args, **kwargs):
try:
return fn(*args, **kwargs)
except requests.exceptions.HTTPError as e:
if e.response.status_code == requests.codes.unauthorized:
raise access_common.Unavailable(message)
else:
raise
return wrapped
return __raiseUnavailableFor401
|
def publish(namespace, name, version, description_file, tar_file, readme_file,
readme_file_ext, registry=None):
''' Publish a tarblob to the registry, if the request fails, an exception
is raised, which either triggers re-authentication, or is turned into a
return value by the decorators. (If successful, the decorated function
returns None)
'''
registry = registry or Registry_Base_URL
url = '%s/%s/%s/versions/%s' % (
registry,
namespace,
name,
version
)
if readme_file_ext == '.md':
readme_section_name = 'readme.md'
elif readme_file_ext == '':
readme_section_name = 'readme'
else:
raise ValueError('unsupported readme type: "%s"' % readme_file_ext)
# description file is in place as text (so read it), tar file is a file
body = OrderedDict([('metadata', (None, description_file.read(),'application/json')),
('tarball',('tarball', tar_file)),
(readme_section_name, (readme_section_name, readme_file))])
headers = _headersForRegistry(registry)
response = requests.put(url, headers=headers, files=body)
response.raise_for_status()
return None
|
def unpublish(namespace, name, version, registry=None):
''' Try to unpublish a recently published version. Return any errors that
occur.
'''
registry = registry or Registry_Base_URL
url = '%s/%s/%s/versions/%s' % (
registry,
namespace,
name,
version
)
headers = _headersForRegistry(registry)
response = requests.delete(url, headers=headers)
response.raise_for_status()
return None
|
def listOwners(namespace, name, registry=None):
''' List the owners of a module or target (owners are the people with
permission to publish versions and add/remove the owners).
'''
registry = registry or Registry_Base_URL
url = '%s/%s/%s/owners' % (
registry,
namespace,
name
)
request_headers = _headersForRegistry(registry)
response = requests.get(url, headers=request_headers)
if response.status_code == 404:
logger.error('no such %s, "%s"' % (namespace[:-1], name))
return None
# raise exceptions for other errors - the auth decorators handle these and
# re-try if appropriate
response.raise_for_status()
return ordered_json.loads(response.text)
|
def removeOwner(namespace, name, owner, registry=None):
''' Remove an owner for a module or target (owners are the people with
permission to publish versions and add/remove the owners).
'''
registry = registry or Registry_Base_URL
url = '%s/%s/%s/owners/%s' % (
registry,
namespace,
name,
owner
)
request_headers = _headersForRegistry(registry)
response = requests.delete(url, headers=request_headers)
if response.status_code == 404:
logger.error('no such %s, "%s"' % (namespace[:-1], name))
return
# raise exceptions for other errors - the auth decorators handle these and
# re-try if appropriate
response.raise_for_status()
return True
|
def search(query='', keywords=[], registry=None):
''' generator of objects returned by the search endpoint (both modules and
targets).
Query is a full-text search (description, name, keywords), keywords
search only the module/target description keywords lists.
If both parameters are specified the search is the intersection of the
two queries.
'''
registry = registry or Registry_Base_URL
url = '%s/search' % registry
headers = _headersForRegistry(registry)
params = {
'skip': 0,
'limit': 50
}
if len(query):
params['query'] = query
if len(keywords):
params['keywords[]'] = keywords
while True:
response = requests.get(url, headers=headers, params=params)
response.raise_for_status()
objects = ordered_json.loads(response.text)
if len(objects):
for o in objects:
yield o
params['skip'] += params['limit']
else:
break
|
def setAPIKey(registry, api_key):
''' Set the api key for accessing a registry. This is only necessary for
development/test registries.
'''
if (registry is None) or (registry == Registry_Base_URL):
return
sources = _getSources()
source = None
for s in sources:
if _sourceMatches(s, registry):
source = s
if source is None:
source = {
'type':'registry',
'url':registry,
}
sources.append(source)
source['apikey'] = api_key
settings.set('sources', sources)
|
def getPublicKey(registry=None):
''' Return the user's public key (generating and saving a new key pair if necessary) '''
registry = registry or Registry_Base_URL
pubkey_pem = None
if _isPublicRegistry(registry):
pubkey_pem = settings.getProperty('keys', 'public')
else:
for s in _getSources():
if _sourceMatches(s, registry):
if 'keys' in s and s['keys'] and 'public' in s['keys']:
pubkey_pem = s['keys']['public']
break
if not pubkey_pem:
pubkey_pem, privatekey_pem = _generateAndSaveKeys()
else:
# settings are unicode, we should be able to safely decode to ascii for
# the key though, as it will either be hex or PEM encoded:
pubkey_pem = pubkey_pem.encode('ascii')
# if the key doesn't look like PEM, it might be hex-encided-DER (which we
# used historically), so try loading that:
if b'-----BEGIN PUBLIC KEY-----' in pubkey_pem:
pubkey = serialization.load_pem_public_key(pubkey_pem, default_backend())
else:
pubkey_der = binascii.unhexlify(pubkey_pem)
pubkey = serialization.load_der_public_key(pubkey_der, default_backend())
return _pubkeyWireFormat(pubkey)
|
def getAuthData(registry=None):
''' Poll the registry to get the result of a completed authentication
(which, depending on the authentication the user chose or was directed
to, will include a github or other access token)
'''
registry = registry or Registry_Base_URL
url = '%s/tokens' % (
registry
)
request_headers = _headersForRegistry(registry)
logger.debug('poll for tokens... %s', request_headers)
try:
response = requests.get(url, headers=request_headers)
except requests.RequestException as e:
logger.debug(str(e))
return None
if response.status_code == requests.codes.unauthorized: #pylint: disable=no-member
logger.debug('Unauthorised')
return None
elif response.status_code == requests.codes.not_found: #pylint: disable=no-member
logger.debug('Not Found')
return None
body = response.text
logger.debug('auth data response: %s' % body);
r = {}
parsed_response = ordered_json.loads(body)
if 'error' in parsed_response:
raise AuthError(parsed_response['error'])
for token in parsed_response:
if 'provider' in token and token['provider'] and 'accessToken' in token:
r[token['provider']] = token['accessToken']
break
logger.debug('parsed auth tokens %s' % r);
return r
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.