sentence1
stringlengths 52
3.87M
| sentence2
stringlengths 1
47.2k
| label
stringclasses 1
value |
---|---|---|
def confidence_interval_arr(data, conf=0.95):
r""" Computes element-wise confidence intervals from a sample of ndarrays
Given a sample of arbitrarily shaped ndarrays, computes element-wise confidence intervals
Parameters
----------
data : ndarray (K, (shape))
ndarray of ndarrays, the first index is a sample index, the remaining indexes are specific to the
array of interest
conf : float, optional, default = 0.95
confidence interval
Return
------
lower : ndarray(shape)
element-wise lower bounds
upper : ndarray(shape)
element-wise upper bounds
"""
if conf < 0 or conf > 1:
raise ValueError('Not a meaningful confidence level: '+str(conf))
# list or 1D-array? then fuse it
if types.is_list(data) or (isinstance(data, np.ndarray) and np.ndim(data) == 1):
newshape = tuple([len(data)] + list(data[0].shape))
newdata = np.zeros(newshape)
for i in range(len(data)):
newdata[i, :] = data[i]
data = newdata
# do we have an array now? if yes go, if no fail
if types.is_float_array(data):
I = _indexes(data[0])
lower = np.zeros(data[0].shape)
upper = np.zeros(data[0].shape)
for i in I:
col = _column(data, i)
m, lower[i], upper[i] = confidence_interval(col, conf)
# return
return lower, upper
else:
raise TypeError('data cannot be converted to an ndarray') | r""" Computes element-wise confidence intervals from a sample of ndarrays
Given a sample of arbitrarily shaped ndarrays, computes element-wise confidence intervals
Parameters
----------
data : ndarray (K, (shape))
ndarray of ndarrays, the first index is a sample index, the remaining indexes are specific to the
array of interest
conf : float, optional, default = 0.95
confidence interval
Return
------
lower : ndarray(shape)
element-wise lower bounds
upper : ndarray(shape)
element-wise upper bounds | entailment |
def status(self, remote=False):
"""
Return the connection status, both locally and remotely.
The local connection status is a dictionary that gives:
* the count of multiple queries sent to the server.
* the count of single queries sent to the server.
* the count of actions sent to the server.
* the count of actions executed successfully by the server.
* the count of actions queued to go to the server.
The remote connection status includes whether the server is live,
as well as data about version and build. The server data is
cached, unless the remote flag is specified.
:param remote: whether to query the server for its latest status
:return: tuple of status dicts: (local, server).
"""
if remote:
components = urlparse.urlparse(self.endpoint)
try:
result = self.session.get(components[0] + "://" + components[1] + "/status", timeout=self.timeout)
except Exception as e:
if self.logger: self.logger.debug("Failed to connect to server for status: %s", e)
result = None
if result and result.status_code == 200:
self.server_status = result.json()
self.server_status["endpoint"] = self.endpoint
elif result:
if self.logger: self.logger.debug("Server status response not understandable: Status: %d, Body: %s",
result.status_code, result.text)
self.server_status = {"endpoint": self.endpoint,
"status": ("Unexpected HTTP status " + str(result.status_code) + " at: " +
strftime("%d %b %Y %H:%M:%S +0000", gmtime()))}
else:
self.server_status = {"endpoint": self.endpoint,
"status": "Unreachable at: " + strftime("%d %b %Y %H:%M:%S +0000", gmtime())}
return self.local_status, self.server_status | Return the connection status, both locally and remotely.
The local connection status is a dictionary that gives:
* the count of multiple queries sent to the server.
* the count of single queries sent to the server.
* the count of actions sent to the server.
* the count of actions executed successfully by the server.
* the count of actions queued to go to the server.
The remote connection status includes whether the server is live,
as well as data about version and build. The server data is
cached, unless the remote flag is specified.
:param remote: whether to query the server for its latest status
:return: tuple of status dicts: (local, server). | entailment |
def query_single(self, object_type, url_params, query_params=None):
# type: (str, list, dict) -> dict
"""
Query for a single object.
:param object_type: string query type (e.g., "users" or "groups")
:param url_params: required list of strings to provide as additional URL components
:param query_params: optional dictionary of query options
:return: the found object (a dictionary), which is empty if none were found
"""
# Server API convention (v2) is that the pluralized object type goes into the endpoint
# but the object type is the key in the response dictionary for the returned object.
self.local_status["single-query-count"] += 1
query_type = object_type + "s" # poor man's plural
query_path = "/organizations/{}/{}".format(self.org_id, query_type)
for component in url_params if url_params else []:
query_path += "/" + urlparse.quote(component, safe='/@')
if query_params: query_path += "?" + urlparse.urlencode(query_params)
try:
result = self.make_call(query_path)
body = result.json()
except RequestError as re:
if re.result.status_code == 404:
if self.logger: self.logger.debug("Ran %s query: %s %s (0 found)",
object_type, url_params, query_params)
return {}
else:
raise re
if body.get("result") == "success":
value = body.get(object_type, {})
if self.logger: self.logger.debug("Ran %s query: %s %s (1 found)", object_type, url_params, query_params)
return value
else:
raise ClientError("OK status but no 'success' result", result) | Query for a single object.
:param object_type: string query type (e.g., "users" or "groups")
:param url_params: required list of strings to provide as additional URL components
:param query_params: optional dictionary of query options
:return: the found object (a dictionary), which is empty if none were found | entailment |
def query_multiple(self, object_type, page=0, url_params=None, query_params=None):
# type: (str, int, list, dict) -> tuple
"""
Query for a page of objects. Defaults to the (0-based) first page.
Sadly, the sort order is undetermined.
:param object_type: string constant query type: either "user" or "group")
:param page: numeric page (0-based) of results to get (up to 200 in a page)
:param url_params: optional list of strings to provide as additional URL components
:param query_params: optional dictionary of query options
:return: tuple (list of returned dictionaries (one for each query result), bool for whether this is last page)
"""
# As of 2017-10-01, we are moving to to different URLs for user and user-group queries,
# and these endpoints have different conventions for pagination. For the time being,
# we are also preserving the more general "group" query capability.
self.local_status["multiple-query-count"] += 1
if object_type in ("user", "group"):
query_path = "/{}s/{}/{:d}".format(object_type, self.org_id, page)
if url_params: query_path += "/" + "/".join([urlparse.quote(c) for c in url_params])
if query_params: query_path += "?" + urlparse.urlencode(query_params)
elif object_type == "user-group":
query_path = "/{}/user-groups".format(self.org_id)
if url_params: query_path += "/" + "/".join([urlparse.quote(c) for c in url_params])
query_path += "?page={:d}".format(page+1)
if query_params: query_path += "&" + urlparse.urlencode(query_params)
else:
raise ArgumentError("Unknown query object type ({}): must be 'user' or 'group'".format(object_type))
try:
result = self.make_call(query_path)
body = result.json()
except RequestError as re:
if re.result.status_code == 404:
if self.logger: self.logger.debug("Ran %s query: %s %s (0 found)",
object_type, url_params, query_params)
return [], True
else:
raise re
if object_type in ("user", "group"):
if body.get("result") == "success":
values = body.get(object_type + "s", [])
last_page = body.get("lastPage", False)
if self.logger: self.logger.debug("Ran multi-%s query: %s %s (page %d: %d found)",
object_type, url_params, query_params, page, len(values))
return values, last_page
else:
raise ClientError("OK status but no 'success' result", result)
elif object_type == "user-group":
page_number = result.headers.get("X-Current-Page", "1")
page_count = result.headers.get("X-Page-Count", "1")
if self.logger: self.logger.debug("Ran multi-group query: %s %s (page %d: %d found)",
url_params, query_params, page, len(body))
return body, int(page_number) >= int(page_count)
else:
# this would actually be caught above, but we use a parallel construction in both places
# to make it easy to add query object types
raise ArgumentError("Unknown query object type ({}): must be 'user' or 'group'".format(object_type)) | Query for a page of objects. Defaults to the (0-based) first page.
Sadly, the sort order is undetermined.
:param object_type: string constant query type: either "user" or "group")
:param page: numeric page (0-based) of results to get (up to 200 in a page)
:param url_params: optional list of strings to provide as additional URL components
:param query_params: optional dictionary of query options
:return: tuple (list of returned dictionaries (one for each query result), bool for whether this is last page) | entailment |
def execute_single(self, action, immediate=False):
"""
Execute a single action (containing commands on a single object).
Normally, since actions are batched so as to be most efficient about execution,
but if you want this command sent immediately (and all prior queued commands
sent earlier in this command's batch), specify a True value for the immediate flag.
Since any command can fill the current batch, your command may be submitted
even if you don't specify the immediate flag. So don't think of this always
being a queue call if immedidate=False.
Returns the number of actions in the queue, that got sent, and that executed successfully.
:param action: the Action to be executed
:param immediate: whether the Action should be executed immediately
:return: the number of actions in the queue, that got sent, and that executed successfully.
"""
return self.execute_multiple([action], immediate=immediate) | Execute a single action (containing commands on a single object).
Normally, since actions are batched so as to be most efficient about execution,
but if you want this command sent immediately (and all prior queued commands
sent earlier in this command's batch), specify a True value for the immediate flag.
Since any command can fill the current batch, your command may be submitted
even if you don't specify the immediate flag. So don't think of this always
being a queue call if immedidate=False.
Returns the number of actions in the queue, that got sent, and that executed successfully.
:param action: the Action to be executed
:param immediate: whether the Action should be executed immediately
:return: the number of actions in the queue, that got sent, and that executed successfully. | entailment |
def execute_multiple(self, actions, immediate=True):
"""
Execute multiple Actions (each containing commands on a single object).
Normally, the actions are sent for execution immediately (possibly preceded
by earlier queued commands), but if you are going for maximum efficiency
you can set immeediate=False which will cause the connection to wait
and batch as many actions as allowed in each server call.
Since any command can fill the current batch, one or more of your commands may be submitted
even if you don't specify the immediate flag. So don't think of this call as always
being a queue call when immedidate=False.
Returns the number of actions left in the queue, that got sent, and that executed successfully.
NOTE: This is where we throttle the number of commands per action. So the number
of actions we were given may not be the same as the number we queue or send to the server.
NOTE: If the server gives us a response we don't understand, we note that and continue
processing as usual. Then, at the end of the batch, we throw in order to warn the client
that we had a problem understanding the server.
:param actions: the list of Action objects to be executed
:param immediate: whether to immediately send them to the server
:return: tuple: the number of actions in the queue, that got sent, and that executed successfully.
"""
# throttling part 1: split up each action into smaller actions, as needed
# optionally split large lists of groups in add/remove commands (if action supports it)
split_actions = []
exceptions = []
for a in actions:
if len(a.commands) == 0:
if self.logger: self.logger.warning("Sending action with no commands: %s", a.frame)
# maybe_split_groups is a UserAction attribute, so the call may throw an AttributeError
try:
if a.maybe_split_groups(self.throttle_groups):
if self.logger: self.logger.debug(
"Throttling actions %s to have a maximum of %d entries in group lists.",
a.frame, self.throttle_groups)
except AttributeError:
pass
if len(a.commands) > self.throttle_commands:
if self.logger: self.logger.debug("Throttling action %s to have a maximum of %d commands.",
a.frame, self.throttle_commands)
split_actions += a.split(self.throttle_commands)
else:
split_actions.append(a)
actions = self.action_queue + split_actions
# throttling part 2: execute the action list in batches, as needed
sent = completed = 0
batch_size = self.throttle_actions
min_size = 1 if immediate else batch_size
while len(actions) >= min_size:
batch, actions = actions[0:batch_size], actions[batch_size:]
if self.logger: self.logger.debug("Executing %d actions (%d remaining).", len(batch), len(actions))
sent += len(batch)
try:
completed += self._execute_batch(batch)
except Exception as e:
exceptions.append(e)
self.action_queue = actions
self.local_status["actions-queued"] = queued = len(actions)
self.local_status["actions-sent"] += sent
self.local_status["actions-completed"] += completed
if exceptions:
raise BatchError(exceptions, queued, sent, completed)
return queued, sent, completed | Execute multiple Actions (each containing commands on a single object).
Normally, the actions are sent for execution immediately (possibly preceded
by earlier queued commands), but if you are going for maximum efficiency
you can set immeediate=False which will cause the connection to wait
and batch as many actions as allowed in each server call.
Since any command can fill the current batch, one or more of your commands may be submitted
even if you don't specify the immediate flag. So don't think of this call as always
being a queue call when immedidate=False.
Returns the number of actions left in the queue, that got sent, and that executed successfully.
NOTE: This is where we throttle the number of commands per action. So the number
of actions we were given may not be the same as the number we queue or send to the server.
NOTE: If the server gives us a response we don't understand, we note that and continue
processing as usual. Then, at the end of the batch, we throw in order to warn the client
that we had a problem understanding the server.
:param actions: the list of Action objects to be executed
:param immediate: whether to immediately send them to the server
:return: tuple: the number of actions in the queue, that got sent, and that executed successfully. | entailment |
def _execute_batch(self, actions):
"""
Execute a single batch of Actions.
For each action that has a problem, we annotate the action with the
error information for that action, and we return the number of
successful actions in the batch.
:param actions: the list of Action objects to be executed
:return: count of successful actions
"""
wire_form = [a.wire_dict() for a in actions]
if self.test_mode:
result = self.make_call("/action/%s?testOnly=true" % self.org_id, wire_form)
else:
result = self.make_call("/action/%s" % self.org_id, wire_form)
body = result.json()
if body.get("errors", None) is None:
if body.get("result") != "success":
if self.logger: self.logger.warning("Server action result: no errors, but no success:\n%s", body)
return len(actions)
try:
if body.get("result") == "success":
if self.logger: self.logger.warning("Server action result: errors, but success report:\n%s", body)
for error in body["errors"]:
actions[error["index"]].report_command_error(error)
except:
raise ClientError(str(body), result)
return body.get("completed", 0) | Execute a single batch of Actions.
For each action that has a problem, we annotate the action with the
error information for that action, and we return the number of
successful actions in the batch.
:param actions: the list of Action objects to be executed
:return: count of successful actions | entailment |
def make_call(self, path, body=None, delete=False):
"""
Make a single UMAPI call with error handling and retry on temporary failure.
:param path: the string endpoint path for the call
:param body: (optional) list of dictionaries to be serialized into the request body
:return: the requests.result object (on 200 response), raise error otherwise
"""
if body:
request_body = json.dumps(body)
def call():
return self.session.post(self.endpoint + path, auth=self.auth, data=request_body, timeout=self.timeout)
else:
if not delete:
def call():
return self.session.get(self.endpoint + path, auth=self.auth, timeout=self.timeout)
else:
def call():
return self.session.delete(self.endpoint + path, auth=self.auth, timeout=self.timeout)
start_time = time()
result = None
for num_attempts in range(1, self.retry_max_attempts + 1):
try:
result = call()
if result.status_code in [200,201,204]:
return result
elif result.status_code in [429, 502, 503, 504]:
if self.logger: self.logger.warning("UMAPI timeout...service unavailable (code %d on try %d)",
result.status_code, num_attempts)
retry_wait = 0
if "Retry-After" in result.headers:
advice = result.headers["Retry-After"]
advised_time = parsedate_tz(advice)
if advised_time is not None:
# header contains date
retry_wait = int(mktime_tz(advised_time) - time())
else:
# header contains delta seconds
retry_wait = int(advice)
if retry_wait <= 0:
# use exponential back-off with random delay
delay = randint(0, self.retry_random_delay)
retry_wait = (int(pow(2, num_attempts - 1)) * self.retry_first_delay) + delay
elif 201 <= result.status_code < 400:
raise ClientError("Unexpected HTTP Status {:d}: {}".format(result.status_code, result.text), result)
elif 400 <= result.status_code < 500:
raise RequestError(result)
else:
raise ServerError(result)
except requests.Timeout:
if self.logger: self.logger.warning("UMAPI connection timeout...(%d seconds on try %d)",
self.timeout, num_attempts)
retry_wait = 0
result = None
if num_attempts < self.retry_max_attempts:
if retry_wait > 0:
if self.logger: self.logger.warning("Next retry in %d seconds...", retry_wait)
sleep(retry_wait)
else:
if self.logger: self.logger.warning("Immediate retry...")
total_time = int(time() - start_time)
if self.logger: self.logger.error("UMAPI timeout...giving up after %d attempts (%d seconds).",
self.retry_max_attempts, total_time)
raise UnavailableError(self.retry_max_attempts, total_time, result) | Make a single UMAPI call with error handling and retry on temporary failure.
:param path: the string endpoint path for the call
:param body: (optional) list of dictionaries to be serialized into the request body
:return: the requests.result object (on 200 response), raise error otherwise | entailment |
def paginate(query, org_id, max_pages=maxsize, max_records=maxsize):
"""
Paginate through all results of a UMAPI query
:param query: a query method from a UMAPI instance (callable as a function)
:param org_id: the organization being queried
:param max_pages: the max number of pages to collect before returning (default all)
:param max_records: the max number of records to collect before returning (default all)
:return: the queried records
"""
page_count = 0
record_count = 0
records = []
while page_count < max_pages and record_count < max_records:
res = make_call(query, org_id, page_count)
page_count += 1
# the following incredibly ugly piece of code is very fragile.
# the problem is that we are a "dumb helper" that doesn't understand
# the semantics of the UMAPI or know which query we were given.
if "groups" in res:
records += res["groups"]
elif "users" in res:
records += res["users"]
record_count = len(records)
if res.get("lastPage"):
break
return records | Paginate through all results of a UMAPI query
:param query: a query method from a UMAPI instance (callable as a function)
:param org_id: the organization being queried
:param max_pages: the max number of pages to collect before returning (default all)
:param max_records: the max number of records to collect before returning (default all)
:return: the queried records | entailment |
def make_call(query, org_id, page):
"""
Make a single UMAPI call with error handling and server-controlled throttling.
(Adapted from sample code at https://www.adobe.io/products/usermanagement/docs/samples#retry)
:param query: a query method from a UMAPI instance (callable as a function)
:param org_id: the organization being queried
:param page: the page number of the desired result set
:return: the json (dictionary) received from the server (if any)
"""
wait_time = 0
num_attempts = 0
while num_attempts < retry_max_attempts:
if wait_time > 0:
sleep(wait_time)
wait_time = 0
try:
num_attempts += 1
return query(org_id, page)
except UMAPIRetryError as e:
logger.warning("UMAPI service temporarily unavailable (attempt %d) -- %s", num_attempts, e.res.status_code)
if "Retry-After" in e.res.headers:
advice = e.res.headers["Retry-After"]
advised_time = parsedate_tz(advice)
if advised_time is not None:
# header contains date
wait_time = int(mktime_tz(advised_time) - time())
else:
# header contains delta seconds
wait_time = int(advice)
if wait_time <= 0:
# use exponential back-off with random delay
delay = randint(0, retry_random_delay_max)
wait_time = (int(pow(2, num_attempts)) * retry_exponential_backoff_factor) + delay
logger.warning("Next retry in %d seconds...", wait_time)
continue
except UMAPIRequestError as e:
logger.warning("UMAPI error processing request -- %s", e.code)
return {}
except UMAPIError as e:
logger.warning("HTTP error processing request -- %s: %s", e.res.status_code, e.res.text)
return {}
logger.error("UMAPI timeout...giving up on results page %d after %d attempts.", page, retry_max_attempts)
return {} | Make a single UMAPI call with error handling and server-controlled throttling.
(Adapted from sample code at https://www.adobe.io/products/usermanagement/docs/samples#retry)
:param query: a query method from a UMAPI instance (callable as a function)
:param org_id: the organization being queried
:param page: the page number of the desired result set
:return: the json (dictionary) received from the server (if any) | entailment |
def do(self, **kwargs):
"""
Here for compatibility with legacy clients only - DO NOT USE!!!
This is sort of mix of "append" and "insert": it puts commands in the list,
with some half smarts about which commands go at the front or back.
If you add multiple commands to the back in one call, they will get added sorted by command name.
:param kwargs: the commands in key=val format
:return: the Action, so you can do Action(...).do(...).do(...)
"""
# add "create" / "add" / "removeFrom" first
for k, v in list(six.iteritems(kwargs)):
if k.startswith("create") or k.startswith("addAdobe") or k.startswith("removeFrom"):
self.commands.append({k: v})
del kwargs[k]
# now do the other actions, in a canonical order (to avoid py2/py3 variations)
for k, v in sorted(six.iteritems(kwargs)):
if k in ['add', 'remove']:
self.commands.append({k: {"product": v}})
else:
self.commands.append({k: v})
return self | Here for compatibility with legacy clients only - DO NOT USE!!!
This is sort of mix of "append" and "insert": it puts commands in the list,
with some half smarts about which commands go at the front or back.
If you add multiple commands to the back in one call, they will get added sorted by command name.
:param kwargs: the commands in key=val format
:return: the Action, so you can do Action(...).do(...).do(...) | entailment |
def split(self, max_commands):
"""
Split this action into an equivalent list of actions, each of which have at most max_commands commands.
:param max_commands: max number of commands allowed in any action
:return: the list of commands created from this one
"""
a_prior = Action(**self.frame)
a_prior.commands = list(self.commands)
self.split_actions = [a_prior]
while len(a_prior.commands) > max_commands:
a_next = Action(**self.frame)
a_prior.commands, a_next.commands = a_prior.commands[0:max_commands], a_prior.commands[max_commands:]
self.split_actions.append(a_next)
a_prior = a_next
return self.split_actions | Split this action into an equivalent list of actions, each of which have at most max_commands commands.
:param max_commands: max number of commands allowed in any action
:return: the list of commands created from this one | entailment |
def append(self, **kwargs):
"""
Add commands at the end of the sequence.
Be careful: because this runs in Python 2.x, the order of the kwargs dict may not match
the order in which the args were specified. Thus, if you care about specific ordering,
you must make multiple calls to append in that order. Luckily, append returns
the Action so you can compose easily: Action(...).append(...).append(...).
See also insert, below.
:param kwargs: the key/value pairs to add
:return: the action
"""
for k, v in six.iteritems(kwargs):
self.commands.append({k: v})
return self | Add commands at the end of the sequence.
Be careful: because this runs in Python 2.x, the order of the kwargs dict may not match
the order in which the args were specified. Thus, if you care about specific ordering,
you must make multiple calls to append in that order. Luckily, append returns
the Action so you can compose easily: Action(...).append(...).append(...).
See also insert, below.
:param kwargs: the key/value pairs to add
:return: the action | entailment |
def insert(self, **kwargs):
"""
Insert commands at the beginning of the sequence.
This is provided because certain commands
have to come first (such as user creation), but may be need to beadded
after other commands have already been specified.
Later calls to insert put their commands before those in the earlier calls.
Also, since the order of iterated kwargs is not guaranteed (in Python 2.x),
you should really only call insert with one keyword at a time. See the doc of append
for more details.
:param kwargs: the key/value pair to append first
:return: the action, so you can append Action(...).insert(...).append(...)
"""
for k, v in six.iteritems(kwargs):
self.commands.insert(0, {k: v})
return self | Insert commands at the beginning of the sequence.
This is provided because certain commands
have to come first (such as user creation), but may be need to beadded
after other commands have already been specified.
Later calls to insert put their commands before those in the earlier calls.
Also, since the order of iterated kwargs is not guaranteed (in Python 2.x),
you should really only call insert with one keyword at a time. See the doc of append
for more details.
:param kwargs: the key/value pair to append first
:return: the action, so you can append Action(...).insert(...).append(...) | entailment |
def report_command_error(self, error_dict):
"""
Report a server error executing a command.
We keep track of the command's position in the command list,
and we add annotation of what the command was, to the error.
:param error_dict: The server's error dict for the error encountered
"""
error = dict(error_dict)
error["command"] = self.commands[error_dict["step"]]
error["target"] = self.frame
del error["index"] # throttling can change which action this was in the batch
del error["step"] # throttling can change which step this was in the action
self.errors.append(error) | Report a server error executing a command.
We keep track of the command's position in the command list,
and we add annotation of what the command was, to the error.
:param error_dict: The server's error dict for the error encountered | entailment |
def execution_errors(self):
"""
Return a list of commands that encountered execution errors, with the error.
Each dictionary entry gives the command dictionary and the error dictionary
:return: list of commands that gave errors, with their error information
"""
if self.split_actions:
# throttling split this action, get errors from the split
return [dict(e) for s in self.split_actions for e in s.errors]
else:
return [dict(e) for e in self.errors] | Return a list of commands that encountered execution errors, with the error.
Each dictionary entry gives the command dictionary and the error dictionary
:return: list of commands that gave errors, with their error information | entailment |
def maybe_split_groups(self, max_groups):
"""
Check if group lists in add/remove directives should be split and split them if needed
:param max_groups: Max group list size
:return: True if at least one command was split, False if none were split
"""
split_commands = []
# return True if we split at least once
maybe_split = False
valid_step_keys = ['add', 'addRoles', 'remove']
for command in self.commands:
# commands are assumed to contain a single key
step_key, step_args = next(six.iteritems(command))
if step_key not in valid_step_keys or not isinstance(step_args, dict):
split_commands.append(command)
continue
new_commands = [command]
while True:
new_command = {step_key: {}}
for group_type, groups in six.iteritems(command[step_key]):
if len(groups) > max_groups:
command[step_key][group_type], new_command[step_key][group_type] = \
groups[0:max_groups], groups[max_groups:]
if new_command[step_key]:
new_commands.append(new_command)
command = new_command
maybe_split = True
else:
break
split_commands += new_commands
self.commands = split_commands
return maybe_split | Check if group lists in add/remove directives should be split and split them if needed
:param max_groups: Max group list size
:return: True if at least one command was split, False if none were split | entailment |
def reload(self):
"""
Rerun the query (lazily).
The results will contain any values on the server side that have changed since the last run.
:return: None
"""
self._results = []
self._next_item_index = 0
self._next_page_index = 0
self._last_page_seen = False | Rerun the query (lazily).
The results will contain any values on the server side that have changed since the last run.
:return: None | entailment |
def _next_page(self):
"""
Fetch the next page of the query.
"""
if self._last_page_seen:
raise StopIteration
new, self._last_page_seen = self.conn.query_multiple(self.object_type, self._next_page_index,
self.url_params, self.query_params)
self._next_page_index += 1
if len(new) == 0:
self._last_page_seen = True # don't bother with next page if nothing was returned
else:
self._results += new | Fetch the next page of the query. | entailment |
def all_results(self):
"""
Eagerly fetch all the results.
This can be called after already doing some amount of iteration, and it will return
all the previously-iterated results as well as any results that weren't yet iterated.
:return: a list of all the results.
"""
while not self._last_page_seen:
self._next_page()
self._next_item_index = len(self._results)
return list(self._results) | Eagerly fetch all the results.
This can be called after already doing some amount of iteration, and it will return
all the previously-iterated results as well as any results that weren't yet iterated.
:return: a list of all the results. | entailment |
def _fetch_result(self):
"""
Fetch the queried object.
"""
self._result = self.conn.query_single(self.object_type, self.url_params, self.query_params) | Fetch the queried object. | entailment |
def create(self, first_name=None, last_name=None, country=None, email=None,
on_conflict=IfAlreadyExistsOptions.ignoreIfAlreadyExists):
"""
Create the user on the Adobe back end.
See [Issue 32](https://github.com/adobe-apiplatform/umapi-client.py/issues/32): because
we retry create calls if they time out, the default conflict handling for creation is to ignore the
create call if the user already exists (possibly from an earlier call that timed out).
:param first_name: (optional) user first name
:param last_name: (optional) user last name
:param country: (optional except for Federated ID) user 2-letter ISO country code
:param email: user email, if not already specified at create time
:param on_conflict: IfAlreadyExistsOption (or string name thereof) controlling creation of existing users
:return: the User, so you can do User(...).create(...).add_to_groups(...)
"""
# first validate the params: email, on_conflict, first_name, last_name, country
create_params = {}
if email is None:
if not self.email:
raise ArgumentError("You must specify email when creating a user")
elif self.email is None:
self.email = email
elif self.email.lower() != email.lower():
raise ArgumentError("Specified email (%s) doesn't match user's email (%s)" % (email, self.email))
create_params["email"] = self.email
if on_conflict in IfAlreadyExistsOptions.__members__:
on_conflict = IfAlreadyExistsOptions[on_conflict]
if on_conflict not in IfAlreadyExistsOptions:
raise ArgumentError("on_conflict must be one of {}".format([o.name for o in IfAlreadyExistsOptions]))
if on_conflict != IfAlreadyExistsOptions.errorIfAlreadyExists:
create_params["option"] = on_conflict.name
if first_name: create_params["firstname"] = first_name # per issue #54 now allowed for all identity types
if last_name: create_params["lastname"] = last_name # per issue #54 now allowed for all identity types
if country: create_params["country"] = country # per issue #55 should not be defaulted
# each type is created using a different call
if self.id_type == IdentityTypes.adobeID:
return self.insert(addAdobeID=dict(**create_params))
elif self.id_type == IdentityTypes.enterpriseID:
return self.insert(createEnterpriseID=dict(**create_params))
else:
return self.insert(createFederatedID=dict(**create_params)) | Create the user on the Adobe back end.
See [Issue 32](https://github.com/adobe-apiplatform/umapi-client.py/issues/32): because
we retry create calls if they time out, the default conflict handling for creation is to ignore the
create call if the user already exists (possibly from an earlier call that timed out).
:param first_name: (optional) user first name
:param last_name: (optional) user last name
:param country: (optional except for Federated ID) user 2-letter ISO country code
:param email: user email, if not already specified at create time
:param on_conflict: IfAlreadyExistsOption (or string name thereof) controlling creation of existing users
:return: the User, so you can do User(...).create(...).add_to_groups(...) | entailment |
def update(self, email=None, username=None, first_name=None, last_name=None, country=None):
"""
Update values on an existing user. See the API docs for what kinds of update are possible.
:param email: new email for this user
:param username: new username for this user
:param first_name: new first name for this user
:param last_name: new last name for this user
:param country: new country for this user
:return: the User, so you can do User(...).update(...).add_to_groups(...)
"""
if username and self.id_type != IdentityTypes.federatedID:
raise ArgumentError("You cannot set username except for a federated ID")
if username and '@' in username and not email:
raise ArgumentError("Cannot update email-type username when email is not specified")
if email and username and email.lower() == username.lower():
raise ArgumentError("Specify just email to set both email and username for a federated ID")
updates = {}
for k, v in six.iteritems(dict(email=email, username=username,
firstname=first_name, lastname=last_name,
country=country)):
if v:
updates[k] = v
return self.append(update=updates) | Update values on an existing user. See the API docs for what kinds of update are possible.
:param email: new email for this user
:param username: new username for this user
:param first_name: new first name for this user
:param last_name: new last name for this user
:param country: new country for this user
:return: the User, so you can do User(...).update(...).add_to_groups(...) | entailment |
def add_to_groups(self, groups=None, all_groups=False, group_type=None):
"""
Add user to some (typically PLC) groups. Note that, if you add to no groups, the effect
is simply to do an "add to organization Everybody group", so we let that be done.
:param groups: list of group names the user should be added to
:param all_groups: a boolean meaning add to all (don't specify groups or group_type in this case)
:param group_type: the type of group (defaults to "product")
:return: the User, so you can do User(...).add_to_groups(...).add_role(...)
"""
if all_groups:
if groups or group_type:
raise ArgumentError("When adding to all groups, do not specify specific groups or types")
glist = "all"
else:
if not groups:
groups = []
if not group_type:
group_type = GroupTypes.product
elif group_type in GroupTypes.__members__:
group_type = GroupTypes[group_type]
if group_type not in GroupTypes:
raise ArgumentError("You must specify a GroupType value for argument group_type")
glist = {group_type.name: [group for group in groups]}
return self.append(add=glist) | Add user to some (typically PLC) groups. Note that, if you add to no groups, the effect
is simply to do an "add to organization Everybody group", so we let that be done.
:param groups: list of group names the user should be added to
:param all_groups: a boolean meaning add to all (don't specify groups or group_type in this case)
:param group_type: the type of group (defaults to "product")
:return: the User, so you can do User(...).add_to_groups(...).add_role(...) | entailment |
def remove_from_groups(self, groups=None, all_groups=False, group_type=None):
"""
Remove user from some PLC groups, or all of them.
:param groups: list of group names the user should be removed from
:param all_groups: a boolean meaning remove from all (don't specify groups or group_type in this case)
:param group_type: the type of group (defaults to "product")
:return: the User, so you can do User(...).remove_from_groups(...).add_role(...)
"""
if all_groups:
if groups or group_type:
raise ArgumentError("When removing from all groups, do not specify specific groups or types")
glist = "all"
else:
if not groups:
raise ArgumentError("You must specify groups from which to remove the user")
if not group_type:
group_type = GroupTypes.product
elif group_type in GroupTypes.__members__:
group_type = GroupTypes[group_type]
if group_type not in GroupTypes:
raise ArgumentError("You must specify a GroupType value for argument group_type")
glist = {group_type.name: [group for group in groups]}
return self.append(remove=glist) | Remove user from some PLC groups, or all of them.
:param groups: list of group names the user should be removed from
:param all_groups: a boolean meaning remove from all (don't specify groups or group_type in this case)
:param group_type: the type of group (defaults to "product")
:return: the User, so you can do User(...).remove_from_groups(...).add_role(...) | entailment |
def add_role(self, groups=None, role_type=RoleTypes.admin):
"""
Make user have a role (typically PLC admin) with respect to some PLC groups.
:param groups: list of group names the user should have this role for
:param role_type: the role (defaults to "admin")
:return: the User, so you can do User(...).add_role(...).add_to_groups(...)
"""
if not groups:
raise ArgumentError("You must specify groups to which to add the role for this user")
if role_type in RoleTypes.__members__:
role_type = RoleTypes[role_type]
if role_type not in RoleTypes:
raise ArgumentError("You must specify a RoleType value for argument role_type")
glist = {role_type.name: [group for group in groups]}
return self.append(addRoles=glist) | Make user have a role (typically PLC admin) with respect to some PLC groups.
:param groups: list of group names the user should have this role for
:param role_type: the role (defaults to "admin")
:return: the User, so you can do User(...).add_role(...).add_to_groups(...) | entailment |
def remove_role(self, groups=None, role_type=RoleTypes.admin):
"""
Remove user from a role (typically admin) of some groups.
:param groups: list of group names the user should NOT have this role for
:param role_type: the type of role (defaults to "admin")
:return: the User, so you can do User(...).remove_role(...).remove_from_groups(...)
"""
if not groups:
raise ArgumentError("You must specify groups from which to remove the role for this user")
if role_type in RoleTypes.__members__:
role_type = RoleTypes[role_type]
if role_type not in RoleTypes:
raise ArgumentError("You must specify a RoleType value for argument role_type")
glist = {role_type.name: [group for group in groups]}
return self.append(removeRoles=glist) | Remove user from a role (typically admin) of some groups.
:param groups: list of group names the user should NOT have this role for
:param role_type: the type of role (defaults to "admin")
:return: the User, so you can do User(...).remove_role(...).remove_from_groups(...) | entailment |
def remove_from_organization(self, delete_account=False):
"""
Remove a user from the organization's list of visible users. Optionally also delete the account.
Deleting the account can only be done if the organization owns the account's domain.
:param delete_account: Whether to delete the account after removing from the organization (default false)
:return: None, because you cannot follow this command with another.
"""
self.append(removeFromOrg={"deleteAccount": True if delete_account else False})
return None | Remove a user from the organization's list of visible users. Optionally also delete the account.
Deleting the account can only be done if the organization owns the account's domain.
:param delete_account: Whether to delete the account after removing from the organization (default false)
:return: None, because you cannot follow this command with another. | entailment |
def delete_account(self):
"""
Delete a user's account.
Deleting the user's account can only be done if the user's domain is controlled by the authorized organization,
and removing the account will also remove the user from all organizations with access to that domain.
:return: None, because you cannot follow this command with another.
"""
if self.id_type == IdentityTypes.adobeID:
raise ArgumentError("You cannot delete an Adobe ID account.")
self.append(removeFromDomain={})
return None | Delete a user's account.
Deleting the user's account can only be done if the user's domain is controlled by the authorized organization,
and removing the account will also remove the user from all organizations with access to that domain.
:return: None, because you cannot follow this command with another. | entailment |
def _validate(cls, group_name):
"""
Validates the group name
Input values must be strings (standard or unicode). Throws ArgumentError if any input is invalid
:param group_name: name of group
"""
if group_name and not cls._group_name_regex.match(group_name):
raise ArgumentError("'%s': Illegal group name" % (group_name,))
if group_name and len(group_name) > 255:
raise ArgumentError("'%s': Group name is too long" % (group_name,)) | Validates the group name
Input values must be strings (standard or unicode). Throws ArgumentError if any input is invalid
:param group_name: name of group | entailment |
def add_to_products(self, products=None, all_products=False):
"""
Add user group to some product license configuration groups (PLCs), or all of them.
:param products: list of product names the user should be added to
:param all_products: a boolean meaning add to all (don't specify products in this case)
:return: the Group, so you can do Group(...).add_to_products(...).add_users(...)
"""
if all_products:
if products:
raise ArgumentError("When adding to all products, do not specify specific products")
plist = "all"
else:
if not products:
raise ArgumentError("You must specify products to which to add the user group")
plist = {GroupTypes.productConfiguration.name: [product for product in products]}
return self.append(add=plist) | Add user group to some product license configuration groups (PLCs), or all of them.
:param products: list of product names the user should be added to
:param all_products: a boolean meaning add to all (don't specify products in this case)
:return: the Group, so you can do Group(...).add_to_products(...).add_users(...) | entailment |
def remove_from_products(self, products=None, all_products=False):
"""
Remove user group from some product license configuration groups (PLCs), or all of them.
:param products: list of product names the user group should be removed from
:param all_products: a boolean meaning remove from all (don't specify products in this case)
:return: the Group, so you can do Group(...).remove_from_products(...).add_users(...)
"""
if all_products:
if products:
raise ArgumentError("When removing from all products, do not specify specific products")
plist = "all"
else:
if not products:
raise ArgumentError("You must specify products from which to remove the user group")
plist = {GroupTypes.productConfiguration.name: [product for product in products]}
return self.append(remove=plist) | Remove user group from some product license configuration groups (PLCs), or all of them.
:param products: list of product names the user group should be removed from
:param all_products: a boolean meaning remove from all (don't specify products in this case)
:return: the Group, so you can do Group(...).remove_from_products(...).add_users(...) | entailment |
def add_users(self, users=None):
"""
Add users (specified by email address) to this user group.
In case of ambiguity (two users with same email address), the non-AdobeID user is preferred.
:param users: list of emails for users to add to the group.
:return: the Group, so you can do Group(...).add_users(...).add_to_products(...)
"""
if not users:
raise ArgumentError("You must specify emails for users to add to the user group")
ulist = {"user": [user for user in users]}
return self.append(add=ulist) | Add users (specified by email address) to this user group.
In case of ambiguity (two users with same email address), the non-AdobeID user is preferred.
:param users: list of emails for users to add to the group.
:return: the Group, so you can do Group(...).add_users(...).add_to_products(...) | entailment |
def remove_users(self, users=None):
"""
Remove users (specified by email address) from this user group.
In case of ambiguity (two users with same email address), the non-AdobeID user is preferred.
:param users: list of emails for users to remove from the group.
:return: the Group, so you can do Group(...).remove_users(...).add_to_products(...)
"""
if not users:
raise ArgumentError("You must specify emails for users to remove from the user group")
ulist = {"user": [user for user in users]}
return self.append(remove=ulist) | Remove users (specified by email address) from this user group.
In case of ambiguity (two users with same email address), the non-AdobeID user is preferred.
:param users: list of emails for users to remove from the group.
:return: the Group, so you can do Group(...).remove_users(...).add_to_products(...) | entailment |
def scale2x(self, surface):
"""
Scales using the AdvanceMAME Scale2X algorithm which does a
'jaggie-less' scale of bitmap graphics.
"""
assert(self._scale == 2)
return self._pygame.transform.scale2x(surface) | Scales using the AdvanceMAME Scale2X algorithm which does a
'jaggie-less' scale of bitmap graphics. | entailment |
def smoothscale(self, surface):
"""
Smooth scaling using MMX or SSE extensions if available
"""
return self._pygame.transform.smoothscale(surface, self._output_size) | Smooth scaling using MMX or SSE extensions if available | entailment |
def identity(self, surface):
"""
Fast scale operation that does not sample the results
"""
return self._pygame.transform.scale(surface, self._output_size) | Fast scale operation that does not sample the results | entailment |
def led_matrix(self, surface):
"""
Transforms the input surface into an LED matrix (1 pixel = 1 LED)
"""
scale = self._led_on.get_width()
w, h = self._input_size
pix = self._pygame.PixelArray(surface)
img = self._pygame.Surface((w * scale, h * scale))
for y in range(h):
for x in range(w):
led = self._led_on if pix[x, y] & 0xFFFFFF > 0 else self._led_off
img.blit(led, (x * scale, y * scale))
return img | Transforms the input surface into an LED matrix (1 pixel = 1 LED) | entailment |
def rgb2short(r, g, b):
"""
Converts RGB values to the nearest equivalent xterm-256 color.
"""
# Using list of snap points, convert RGB value to cube indexes
r, g, b = [len(tuple(s for s in snaps if s < x)) for x in (r, g, b)]
# Simple colorcube transform
return (r * 36) + (g * 6) + b + 16 | Converts RGB values to the nearest equivalent xterm-256 color. | entailment |
def to_surface(self, image, alpha=1.0):
"""
Converts a :py:mod:`PIL.Image` into a :class:`pygame.Surface`,
transforming it according to the ``transform`` and ``scale``
constructor arguments.
"""
assert(0.0 <= alpha <= 1.0)
if alpha < 1.0:
im = image.convert("RGBA")
black = Image.new(im.mode, im.size, "black")
im = Image.blend(black, im, alpha)
else:
im = image.convert("RGB")
mode = im.mode
size = im.size
data = im.tobytes()
del im
surface = self._pygame.image.fromstring(data, size, mode)
return self._transform(surface) | Converts a :py:mod:`PIL.Image` into a :class:`pygame.Surface`,
transforming it according to the ``transform`` and ``scale``
constructor arguments. | entailment |
def display(self, image):
"""
Takes a :py:mod:`PIL.Image` and dumps it to a numbered PNG file.
"""
assert(image.size == self.size)
self._last_image = image
self._count += 1
filename = self._file_template.format(self._count)
image = self.preprocess(image)
surface = self.to_surface(image, alpha=self._contrast)
logger.debug("Writing: {0}".format(filename))
self._pygame.image.save(surface, filename) | Takes a :py:mod:`PIL.Image` and dumps it to a numbered PNG file. | entailment |
def display(self, image):
"""
Takes an image, scales it according to the nominated transform, and
stores it for later building into an animated GIF.
"""
assert(image.size == self.size)
self._last_image = image
image = self.preprocess(image)
surface = self.to_surface(image, alpha=self._contrast)
rawbytes = self._pygame.image.tostring(surface, "RGB", False)
im = Image.frombytes("RGB", surface.get_size(), rawbytes)
self._images.append(im)
self._count += 1
logger.debug("Recording frame: {0}".format(self._count))
if self._max_frames and self._count >= self._max_frames:
sys.exit(0) | Takes an image, scales it according to the nominated transform, and
stores it for later building into an animated GIF. | entailment |
def display(self, image):
"""
Takes a :py:mod:`PIL.Image` and renders it to a pygame display surface.
"""
assert(image.size == self.size)
self._last_image = image
image = self.preprocess(image)
self._clock.tick(self._fps)
self._pygame.event.pump()
if self._abort():
self._pygame.quit()
sys.exit()
surface = self.to_surface(image, alpha=self._contrast)
if self._screen is None:
self._screen = self._pygame.display.set_mode(surface.get_size())
self._screen.blit(surface, (0, 0))
self._pygame.display.flip() | Takes a :py:mod:`PIL.Image` and renders it to a pygame display surface. | entailment |
def _char_density(self, c, font=ImageFont.load_default()):
"""
Count the number of black pixels in a rendered character.
"""
image = Image.new('1', font.getsize(c), color=255)
draw = ImageDraw.Draw(image)
draw.text((0, 0), c, fill="white", font=font)
return collections.Counter(image.getdata())[0] | Count the number of black pixels in a rendered character. | entailment |
def _generate_art(self, image, width, height):
"""
Return an iterator that produces the ascii art.
"""
# Characters aren't square, so scale the output by the aspect ratio of a charater
height = int(height * self._char_width / float(self._char_height))
image = image.resize((width, height), Image.ANTIALIAS).convert("RGB")
for (r, g, b) in image.getdata():
greyscale = int(0.299 * r + 0.587 * g + 0.114 * b)
ch = self._chars[int(greyscale / 255. * (len(self._chars) - 1) + 0.5)]
yield (ch, rgb2short(r, g, b)) | Return an iterator that produces the ascii art. | entailment |
def display(self, image):
"""
Takes a :py:mod:`PIL.Image` and renders it to the current terminal as
ASCII-art.
"""
assert(image.size == self.size)
self._last_image = image
surface = self.to_surface(self.preprocess(image), alpha=self._contrast)
rawbytes = self._pygame.image.tostring(surface, "RGB", False)
image = Image.frombytes("RGB", surface.get_size(), rawbytes)
scr_width = self._stdscr.getmaxyx()[1]
scale = float(scr_width) / image.width
self._stdscr.erase()
self._stdscr.move(0, 0)
try:
for (ch, color) in self._generate_art(image, int(image.width * scale), int(image.height * scale)):
self._stdscr.addstr(ch, curses.color_pair(color))
except curses.error:
# End of screen reached
pass
self._stdscr.refresh() | Takes a :py:mod:`PIL.Image` and renders it to the current terminal as
ASCII-art. | entailment |
def _generate_art(self, image, width, height):
"""
Return an iterator that produces the ascii art.
"""
image = image.resize((width, height), Image.ANTIALIAS).convert("RGB")
pixels = list(image.getdata())
for y in range(0, height - 1, 2):
for x in range(width):
i = y * width + x
bg = rgb2short(*(pixels[i]))
fg = rgb2short(*(pixels[i + width]))
yield (fg, bg) | Return an iterator that produces the ascii art. | entailment |
def _CSI(self, cmd):
"""
Control sequence introducer
"""
sys.stdout.write('\x1b[')
sys.stdout.write(cmd) | Control sequence introducer | entailment |
def display(self, image):
"""
Takes a :py:mod:`PIL.Image` and renders it to the current terminal as
ASCII-blocks.
"""
assert(image.size == self.size)
self._last_image = image
surface = self.to_surface(self.preprocess(image), alpha=self._contrast)
rawbytes = self._pygame.image.tostring(surface, "RGB", False)
image = Image.frombytes("RGB", surface.get_size(), rawbytes)
scr_width = self._terminal_size()[1]
scale = float(scr_width) / image.width
self._CSI('1;1H') # Move to top/left
for (fg, bg) in self._generate_art(image, int(image.width * scale), int(image.height * scale)):
self._CSI('38;5;{0};48;5;{1}m'.format(fg, bg))
sys.stdout.write('▄')
self._CSI('0m')
sys.stdout.flush() | Takes a :py:mod:`PIL.Image` and renders it to the current terminal as
ASCII-blocks. | entailment |
def chunk_sequence(sequence, chunk_length):
"""Yield successive n-sized chunks from l."""
for index in range(0, len(sequence), chunk_length):
yield sequence[index:index + chunk_length] | Yield successive n-sized chunks from l. | entailment |
def _filter_invalid_routes(routes, board, railroad):
"""
Given a collection of routes, returns a new set containing only valid routes. Invalid routes removed:
- contain less than 2 cities, or
- go through Chicago using an impassable exit
- only contain Chicago as a station, but don't use the correct exit path
This fltering after the fact keeps the path finding algorithm simpler. It allows groups of 3 cells to be considered
(important for the Chicago checks), which would be tricky, since the algorithm operates on pairs of cells (at the
time of writing).
"""
chicago_space = board.get_space(CHICAGO_CELL)
chicago_neighbor_cells = [cell for cell in CHICAGO_CELL.neighbors.values() if cell != CHICAGO_CONNECTIONS_CELL]
stations = board.stations(railroad.name)
# A sieve style filter. If a condition isn't met, iteration continues to the next item. Items meeting all conditions
# are added to valid_routes at the end of the loop iteration.
valid_routes = set()
for route in routes:
# A route must connect at least 2 cities.
if len(route.cities) < 2:
continue
# A route cannot run from east to east
if isinstance(route.cities[0], EastTerminalCity) and isinstance(route.cities[-1], EastTerminalCity):
continue
# If the route goes through Chicago and isn't [C5, D6], ensure the path it took either contains its station or is unblocked
if route.contains_cell(CHICAGO_CONNECTIONS_CELL) and len(route.cities) != 2:
# Finds the subroute which starts at Chicago and is 3 tiles long. That is, it will go [C5, D6, chicago exit]
all_chicago_subroutes = [subroute for subroute in route.subroutes(CHICAGO_CONNECTIONS_CELL) if len(subroute) == 3]
chicago_subroute = all_chicago_subroutes[0] if all_chicago_subroutes else None
for cell in chicago_neighbor_cells:
chicago_exit = chicago_subroute and chicago_subroute.contains_cell(cell)
if chicago_exit and chicago_space.passable(cell, railroad):
break
else:
continue
# Each route must contain at least 1 station
stations_on_route = [station for station in stations if route.contains_cell(station.cell)]
if not stations_on_route:
continue
# If the only station is Chicago, the path must be [D6, C5], or exit through the appropriate side.
elif [CHICAGO_CELL] == [station.cell for station in stations_on_route]:
exit_cell = board.get_space(CHICAGO_CELL).get_station_exit_cell(stations_on_route[0])
chicago_exit_route = Route.create([chicago_space, board.get_space(exit_cell)])
if not (len(route) == 2 and route.contains_cell(CHICAGO_CONNECTIONS_CELL)) and not route.overlap(chicago_exit_route):
continue
valid_routes.add(route)
return valid_routes | Given a collection of routes, returns a new set containing only valid routes. Invalid routes removed:
- contain less than 2 cities, or
- go through Chicago using an impassable exit
- only contain Chicago as a station, but don't use the correct exit path
This fltering after the fact keeps the path finding algorithm simpler. It allows groups of 3 cells to be considered
(important for the Chicago checks), which would be tricky, since the algorithm operates on pairs of cells (at the
time of writing). | entailment |
def find(path,
level=None,
message=None,
time_lower=None, time_upper=None,
case_sensitive=False): # pragma: no cover
"""
Filter log message.
**中文文档**
根据level名称, message中的关键字, 和log的时间的区间, 筛选出相关的日志
"""
if level:
level = level.upper() # level name has to be capitalized.
if not case_sensitive:
message = message.lower()
with open(path, "r") as f:
result = Result(path=path,
level=level, message=message,
time_lower=time_lower, time_upper=time_upper,
case_sensitive=case_sensitive,
)
for line in f:
try:
_time, _level, _message = [i.strip() for i in line.split(";")]
if level:
if _level != level:
continue
if time_lower:
if _time < time_lower:
continue
if time_upper:
if _time > time_upper:
continue
if message:
if not case_sensitive:
_message = _message.lower()
if message not in _message:
continue
result.lines.append(line)
except Exception as e:
print(e)
return result | Filter log message.
**中文文档**
根据level名称, message中的关键字, 和log的时间的区间, 筛选出相关的日志 | entailment |
def get_logger_by_name(name=None, rand_name=False, charset=Charset.HEX):
"""
Get a logger by name.
:param name: None / str, logger name.
:param rand_name: if True, ``name`` will be ignored, a random name will be used.
"""
if rand_name:
name = rand_str(charset)
logger = logging.getLogger(name)
return logger | Get a logger by name.
:param name: None / str, logger name.
:param rand_name: if True, ``name`` will be ignored, a random name will be used. | entailment |
def debug(self, msg, indent=0, **kwargs):
"""invoke ``self.logger.debug``"""
return self.logger.debug(self._indent(msg, indent), **kwargs) | invoke ``self.logger.debug`` | entailment |
def info(self, msg, indent=0, **kwargs):
"""invoke ``self.info.debug``"""
return self.logger.info(self._indent(msg, indent), **kwargs) | invoke ``self.info.debug`` | entailment |
def warning(self, msg, indent=0, **kwargs):
"""invoke ``self.logger.warning``"""
return self.logger.warning(self._indent(msg, indent), **kwargs) | invoke ``self.logger.warning`` | entailment |
def error(self, msg, indent=0, **kwargs):
"""invoke ``self.logger.error``"""
return self.logger.error(self._indent(msg, indent), **kwargs) | invoke ``self.logger.error`` | entailment |
def critical(self, msg, indent=0, **kwargs):
"""invoke ``self.logger.critical``"""
return self.logger.critical(self._indent(msg, indent), **kwargs) | invoke ``self.logger.critical`` | entailment |
def show(self, msg, indent=0, style="", **kwargs):
"""
Print message to console, indent format may apply.
"""
if self.enable_verbose:
new_msg = self.MessageTemplate.with_style.format(
indent=self.tab * indent,
style=style,
msg=msg,
)
print(new_msg, **kwargs) | Print message to console, indent format may apply. | entailment |
def remove_all_handler(self):
"""
Unlink the file handler association.
"""
for handler in self.logger.handlers[:]:
self.logger.removeHandler(handler)
self._handler_cache.append(handler) | Unlink the file handler association. | entailment |
def recover_all_handler(self):
"""
Relink the file handler association you just removed.
"""
for handler in self._handler_cache:
self.logger.addHandler(handler)
self._handler_cache = list() | Relink the file handler association you just removed. | entailment |
def from_protobuf(cls, proto: LinkItemProto) -> LinkItem:
"""
Constructor from protobuf.
:param proto: protobuf structure
:type proto: ~unidown.plugin.protobuf.link_item_pb2.LinkItemProto
:return: the LinkItem
:rtype: ~unidown.plugin.link_item.LinkItem
:raises ValueError: name of LinkItem does not exist inside the protobuf or is empty
"""
if proto.name == '':
raise ValueError("name of LinkItem does not exist or is empty inside the protobuf.")
return cls(proto.name, Timestamp.ToDatetime(proto.time)) | Constructor from protobuf.
:param proto: protobuf structure
:type proto: ~unidown.plugin.protobuf.link_item_pb2.LinkItemProto
:return: the LinkItem
:rtype: ~unidown.plugin.link_item.LinkItem
:raises ValueError: name of LinkItem does not exist inside the protobuf or is empty | entailment |
def to_protobuf(self) -> LinkItemProto:
"""
Create protobuf item.
:return: protobuf structure
:rtype: ~unidown.plugin.protobuf.link_item_pb2.LinkItemProto
"""
result = LinkItemProto()
result.name = self._name
result.time.CopyFrom(datetime_to_timestamp(self._time))
return result | Create protobuf item.
:return: protobuf structure
:rtype: ~unidown.plugin.protobuf.link_item_pb2.LinkItemProto | entailment |
def update(self, fname):
"""
Adds a handler to save to a file. Includes debug stuff.
"""
ltfh = FileHandler(fname)
self._log.addHandler(ltfh) | Adds a handler to save to a file. Includes debug stuff. | entailment |
def delete_dir_rec(path: Path):
"""
Delete a folder recursive.
:param path: folder to deleted
:type path: ~pathlib.Path
"""
if not path.exists() or not path.is_dir():
return
for sub in path.iterdir():
if sub.is_dir():
delete_dir_rec(sub)
else:
sub.unlink()
path.rmdir() | Delete a folder recursive.
:param path: folder to deleted
:type path: ~pathlib.Path | entailment |
def create_dir_rec(path: Path):
"""
Create a folder recursive.
:param path: path
:type path: ~pathlib.Path
"""
if not path.exists():
Path.mkdir(path, parents=True, exist_ok=True) | Create a folder recursive.
:param path: path
:type path: ~pathlib.Path | entailment |
def datetime_to_timestamp(time: datetime) -> Timestamp:
"""
Convert datetime to protobuf.timestamp.
:param time: time
:type time: ~datetime.datetime
:return: protobuf.timestamp
:rtype: ~google.protobuf.timestamp_pb2.Timestamp
"""
protime = Timestamp()
protime.FromDatetime(time)
return protime | Convert datetime to protobuf.timestamp.
:param time: time
:type time: ~datetime.datetime
:return: protobuf.timestamp
:rtype: ~google.protobuf.timestamp_pb2.Timestamp | entailment |
def print_plugin_list(plugins: Dict[str, pkg_resources.EntryPoint]):
"""
Prints all registered plugins and checks if they can be loaded or not.
:param plugins: plugins
:type plugins: Dict[str, ~pkg_resources.EntryPoint]
"""
for trigger, entry_point in plugins.items():
try:
plugin_class = entry_point.load()
version = str(plugin_class._info.version)
print(
f"{trigger} (ok)\n"
f" {version}"
)
except Exception:
print(
f"{trigger} (failed)"
) | Prints all registered plugins and checks if they can be loaded or not.
:param plugins: plugins
:type plugins: Dict[str, ~pkg_resources.EntryPoint] | entailment |
def overlap(xl1, yl1, nx1, ny1, xl2, yl2, nx2, ny2):
"""
Determines whether two windows overlap
"""
return (xl2 < xl1+nx1 and xl2+nx2 > xl1 and
yl2 < yl1+ny1 and yl2+ny2 > yl1) | Determines whether two windows overlap | entailment |
def saveJSON(g, data, backup=False):
"""
Saves the current setup to disk.
g : hcam_drivers.globals.Container
Container with globals
data : dict
The current setup in JSON compatible dictionary format.
backup : bool
If we are saving a backup on close, don't prompt for filename
"""
if not backup:
fname = filedialog.asksaveasfilename(
defaultextension='.json',
filetypes=[('json files', '.json'), ],
initialdir=g.cpars['app_directory']
)
else:
fname = os.path.join(os.path.expanduser('~/.hdriver'), 'app.json')
if not fname:
g.clog.warn('Aborted save to disk')
return False
with open(fname, 'w') as of:
of.write(
json.dumps(data, sort_keys=True, indent=4,
separators=(',', ': '))
)
g.clog.info('Saved setup to' + fname)
return True | Saves the current setup to disk.
g : hcam_drivers.globals.Container
Container with globals
data : dict
The current setup in JSON compatible dictionary format.
backup : bool
If we are saving a backup on close, don't prompt for filename | entailment |
def postJSON(g, data):
"""
Posts the current setup to the camera and data servers.
g : hcam_drivers.globals.Container
Container with globals
data : dict
The current setup in JSON compatible dictionary format.
"""
g.clog.debug('Entering postJSON')
# encode data as json
json_data = json.dumps(data).encode('utf-8')
# Send the xml to the server
url = urllib.parse.urljoin(g.cpars['hipercam_server'], g.SERVER_POST_PATH)
g.clog.debug('Server URL = ' + url)
opener = urllib.request.build_opener()
g.clog.debug('content length = ' + str(len(json_data)))
req = urllib.request.Request(url, data=json_data, headers={'Content-type': 'application/json'})
response = opener.open(req, timeout=15).read()
g.rlog.debug('Server response: ' + response.decode())
csr = ReadServer(response, status_msg=False)
if not csr.ok:
g.clog.warn('Server response was not OK')
g.rlog.warn('postJSON response: ' + response.decode())
g.clog.warn('Server error = ' + csr.err)
return False
# now try to setup nodding server if appropriate
if g.cpars['telins_name'] == 'GTC':
url = urllib.parse.urljoin(g.cpars['gtc_offset_server'], 'setup')
g.clog.debug('Offset Server URL = ' + url)
opener = urllib.request.build_opener()
try:
req = urllib.request.Request(url, data=json_data, headers={'Content-type': 'application/json'})
response = opener.open(req, timeout=5).read().decode()
except Exception as err:
g.clog.warn('Could not communicate with GTC offsetter')
g.clog.warn(str(err))
return False
g.rlog.info('Offset Server Response: ' + response)
if not json.loads(response)['status'] == 'OK':
g.clog.warn('Offset Server response was not OK')
return False
g.clog.debug('Leaving postJSON')
return True | Posts the current setup to the camera and data servers.
g : hcam_drivers.globals.Container
Container with globals
data : dict
The current setup in JSON compatible dictionary format. | entailment |
def createJSON(g, full=True):
"""
Create JSON compatible dictionary from current settings
Parameters
----------
g : hcam_drivers.globals.Container
Container with globals
"""
data = dict()
if 'gps_attached' not in g.cpars:
data['gps_attached'] = 1
else:
data['gps_attached'] = 1 if g.cpars['gps_attached'] else 0
data['appdata'] = g.ipars.dumpJSON()
data['user'] = g.rpars.dumpJSON()
if full:
data['hardware'] = g.ccd_hw.dumpJSON()
data['tcs'] = g.info.dumpJSON()
if g.cpars['telins_name'].lower() == 'gtc' and has_corba:
try:
s = get_telescope_server()
data['gtc_headers'] = dict(
create_header_from_telpars(s.getTelescopeParams())
)
except:
g.clog.warn('cannot get GTC headers from telescope server')
return data | Create JSON compatible dictionary from current settings
Parameters
----------
g : hcam_drivers.globals.Container
Container with globals | entailment |
def insertFITSHDU(g):
"""
Uploads a table of TCS data to the servers, which is appended onto a run.
Arguments
---------
g : hcam_drivers.globals.Container
the Container object of application globals
"""
if not g.cpars['hcam_server_on']:
g.clog.warn('insertFITSHDU: servers are not active')
return False
run_number = getRunNumber(g)
tcs_table = g.info.tcs_table
g.clog.info('Adding TCS table data to run{:04d}.fits'.format(run_number))
url = g.cpars['hipercam_server'] + 'addhdu'
try:
fd = StringIO()
ascii.write(tcs_table, format='ecsv', output=fd)
files = {'file': fd.getvalue()}
r = requests.post(url, data={'run': 'run{:04d}.fits'.format(run_number)},
files=files)
fd.close()
rs = ReadServer(r.content, status_msg=False)
if rs.ok:
g.clog.info('Response from server was OK')
return True
else:
g.clog.warn('Response from server was not OK')
g.clog.warn('Reason: ' + rs.err)
return False
except Exception as err:
g.clog.warn('insertFITSHDU failed')
g.clog.warn(str(err)) | Uploads a table of TCS data to the servers, which is appended onto a run.
Arguments
---------
g : hcam_drivers.globals.Container
the Container object of application globals | entailment |
def execCommand(g, command, timeout=10):
"""
Executes a command by sending it to the rack server
Arguments:
g : hcam_drivers.globals.Container
the Container object of application globals
command : (string)
the command (see below)
Possible commands are:
start : starts a run
stop : stops a run
abort : aborts a run
online : bring ESO control server online and power up hardware
off : put ESO control server in idle state and power down
standby : server can communicate, but child processes disabled
reset : resets the NGC controller front end
Returns True/False according to whether the command
succeeded or not.
"""
if not g.cpars['hcam_server_on']:
g.clog.warn('execCommand: servers are not active')
return False
try:
url = g.cpars['hipercam_server'] + command
g.clog.info('execCommand, command = "' + command + '"')
response = urllib.request.urlopen(url, timeout=timeout)
rs = ReadServer(response.read(), status_msg=False)
g.rlog.info('Server response =\n' + rs.resp())
if rs.ok:
g.clog.info('Response from server was OK')
return True
else:
g.clog.warn('Response from server was not OK')
g.clog.warn('Reason: ' + rs.err)
return False
except urllib.error.URLError as err:
g.clog.warn('execCommand failed')
g.clog.warn(str(err))
return False | Executes a command by sending it to the rack server
Arguments:
g : hcam_drivers.globals.Container
the Container object of application globals
command : (string)
the command (see below)
Possible commands are:
start : starts a run
stop : stops a run
abort : aborts a run
online : bring ESO control server online and power up hardware
off : put ESO control server in idle state and power down
standby : server can communicate, but child processes disabled
reset : resets the NGC controller front end
Returns True/False according to whether the command
succeeded or not. | entailment |
def isRunActive(g):
"""
Polls the data server to see if a run is active
"""
if g.cpars['hcam_server_on']:
url = g.cpars['hipercam_server'] + 'summary'
response = urllib.request.urlopen(url, timeout=2)
rs = ReadServer(response.read(), status_msg=True)
if not rs.ok:
raise DriverError('isRunActive error: ' + str(rs.err))
if rs.state == 'idle':
return False
elif rs.state == 'active':
return True
else:
raise DriverError('isRunActive error, state = ' + rs.state)
else:
raise DriverError('isRunActive error: servers are not active') | Polls the data server to see if a run is active | entailment |
def getFrameNumber(g):
"""
Polls the data server to find the current frame number.
Throws an exceotion if it cannot determine it.
"""
if not g.cpars['hcam_server_on']:
raise DriverError('getRunNumber error: servers are not active')
url = g.cpars['hipercam_server'] + 'status/DET.FRAM2.NO'
response = urllib.request.urlopen(url, timeout=2)
rs = ReadServer(response.read(), status_msg=False)
try:
msg = rs.msg
except:
raise DriverError('getFrameNumber error: no message found')
try:
frame_no = int(msg.split()[1])
except:
raise DriverError('getFrameNumber error: invalid msg ' + msg)
return frame_no | Polls the data server to find the current frame number.
Throws an exceotion if it cannot determine it. | entailment |
def getRunNumber(g):
"""
Polls the data server to find the current run number. Throws
exceptions if it can't determine it.
"""
if not g.cpars['hcam_server_on']:
raise DriverError('getRunNumber error: servers are not active')
url = g.cpars['hipercam_server'] + 'summary'
response = urllib.request.urlopen(url, timeout=2)
rs = ReadServer(response.read(), status_msg=True)
if rs.ok:
return rs.run
else:
raise DriverError('getRunNumber error: ' + str(rs.err)) | Polls the data server to find the current run number. Throws
exceptions if it can't determine it. | entailment |
def checkSimbad(g, target, maxobj=5, timeout=5):
"""
Sends off a request to Simbad to check whether a target is recognised.
Returns with a list of results, or raises an exception if it times out
"""
url = 'http://simbad.u-strasbg.fr/simbad/sim-script'
q = 'set limit ' + str(maxobj) + \
'\nformat object form1 "Target: %IDLIST(1) | %COO(A D;ICRS)"\nquery ' \
+ target
query = urllib.parse.urlencode({'submit': 'submit script', 'script': q})
resp = urllib.request.urlopen(url, query.encode(), timeout)
data = False
error = False
results = []
for line in resp:
line = line.decode()
if line.startswith('::data::'):
data = True
if line.startswith('::error::'):
error = True
if data and line.startswith('Target:'):
name, coords = line[7:].split(' | ')
results.append(
{'Name': name.strip(), 'Position': coords.strip(),
'Frame': 'ICRS'})
resp.close()
if error and len(results):
g.clog.warn('drivers.check: Simbad: there appear to be some ' +
'results but an error was unexpectedly raised.')
return results | Sends off a request to Simbad to check whether a target is recognised.
Returns with a list of results, or raises an exception if it times out | entailment |
def run(self):
"""
Version of run that traps Exceptions and stores
them in the fifo
"""
try:
threading.Thread.run(self)
except Exception:
t, v, tb = sys.exc_info()
error = traceback.format_exception_only(t, v)[0][:-1]
tback = (self.name + ' Traceback (most recent call last):\n' +
''.join(traceback.format_tb(tb)))
self.fifo.put((self.name, error, tback)) | Version of run that traps Exceptions and stores
them in the fifo | entailment |
def rand_str(charset, length=32):
"""
Generate random string.
"""
return "".join([random.choice(charset) for _ in range(length)]) | Generate random string. | entailment |
def get_root(w):
"""
Simple method to access root for a widget
"""
next_level = w
while next_level.master:
next_level = next_level.master
return next_level | Simple method to access root for a widget | entailment |
def addStyle(w):
"""
Styles the GUI: global fonts and colours.
Parameters
----------
w : tkinter.tk
widget element to style
"""
# access global container in root widget
root = get_root(w)
g = root.globals
fsize = g.cpars['font_size']
family = g.cpars['font_family']
# Default font
g.DEFAULT_FONT = font.nametofont("TkDefaultFont")
g.DEFAULT_FONT.configure(size=fsize, weight='bold', family=family)
w.option_add('*Font', g.DEFAULT_FONT)
# Menu font
g.MENU_FONT = font.nametofont("TkMenuFont")
g.MENU_FONT.configure(family=family)
w.option_add('*Menu.Font', g.MENU_FONT)
# Entry font
g.ENTRY_FONT = font.nametofont("TkTextFont")
g.ENTRY_FONT.configure(size=fsize, family=family)
w.option_add('*Entry.Font', g.ENTRY_FONT)
# position and size
# root.geometry("320x240+325+200")
# Default colours. Note there is a difference between
# specifying 'background' with a capital B or lowercase b
w.option_add('*background', g.COL['main'])
w.option_add('*HighlightBackground', g.COL['main'])
w.config(background=g.COL['main']) | Styles the GUI: global fonts and colours.
Parameters
----------
w : tkinter.tk
widget element to style | entailment |
def init(main_dir: Path, logfile_path: Path, log_level: str):
"""
Initialize the _downloader. TODO.
:param main_dir: main directory
:type main_dir: ~pathlib.Path
:param logfile_path: logfile path
:type logfile_path: ~pathlib.Path
:param log_level: logging level
:type log_level: str
"""
dynamic_data.reset()
dynamic_data.init_dirs(main_dir, logfile_path)
dynamic_data.check_dirs()
tools.create_dir_rec(dynamic_data.MAIN_DIR)
tools.create_dir_rec(dynamic_data.TEMP_DIR)
tools.create_dir_rec(dynamic_data.DOWNLOAD_DIR)
tools.create_dir_rec(dynamic_data.SAVESTAT_DIR)
tools.create_dir_rec(Path.resolve(dynamic_data.LOGFILE_PATH).parent)
dynamic_data.LOG_LEVEL = log_level
logging.basicConfig(filename=dynamic_data.LOGFILE_PATH, filemode='a', level=dynamic_data.LOG_LEVEL,
format='%(asctime)s.%(msecs)03d | %(levelname)s - %(name)s | %(module)s.%(funcName)s: %('
'message)s',
datefmt='%Y.%m.%d %H:%M:%S')
logging.captureWarnings(True)
cores = multiprocessing.cpu_count()
dynamic_data.USING_CORES = min(4, max(1, cores - 1))
info = f"{static_data.NAME} {static_data.VERSION}\n\n" \
f"System: {platform.system()} - {platform.version()} - {platform.machine()} - {cores} cores\n" \
f"Python: {platform.python_version()} - {' - '.join(platform.python_build())}\n" \
f"Arguments: main={main_dir.resolve()} | logfile={logfile_path.resolve()} | loglevel={log_level}\n" \
f"Using cores: {dynamic_data.USING_CORES}\n\n"
with dynamic_data.LOGFILE_PATH.open(mode='w', encoding="utf8") as writer:
writer.write(info)
dynamic_data.AVAIL_PLUGINS = APlugin.get_plugins() | Initialize the _downloader. TODO.
:param main_dir: main directory
:type main_dir: ~pathlib.Path
:param logfile_path: logfile path
:type logfile_path: ~pathlib.Path
:param log_level: logging level
:type log_level: str | entailment |
def download_from_plugin(plugin: APlugin):
"""
Download routine.
1. get newest update time
2. load savestate
3. compare last update time with savestate time
4. get download links
5. compare with savestate
6. download new/updated data
7. check downloads
8. update savestate
9. write new savestate
:param plugin: plugin
:type plugin: ~unidown.plugin.a_plugin.APlugin
"""
# get last update date
plugin.log.info('Get last update')
plugin.update_last_update()
# load old save state
save_state = plugin.load_save_state()
if plugin.last_update <= save_state.last_update:
plugin.log.info('No update. Nothing to do.')
return
# get download links
plugin.log.info('Get download links')
plugin.update_download_links()
# compare with save state
down_link_item_dict = plugin.get_updated_data(save_state.link_item_dict)
plugin.log.info('Compared with save state: ' + str(len(plugin.download_data)))
if not down_link_item_dict:
plugin.log.info('No new data. Nothing to do.')
return
# download new/updated data
plugin.log.info(f"Download new {plugin.unit}s: {len(down_link_item_dict)}")
plugin.download(down_link_item_dict, plugin.download_path, 'Download new ' + plugin.unit + 's', plugin.unit)
# check which downloads are succeeded
succeed_link_item_dict, lost_link_item_dict = plugin.check_download(down_link_item_dict, plugin.download_path)
plugin.log.info(f"Downloaded: {len(succeed_link_item_dict)}/{len(down_link_item_dict)}")
# update savestate link_item_dict with succeeded downloads dict
plugin.log.info('Update savestate')
plugin.update_dict(save_state.link_item_dict, succeed_link_item_dict)
# write new savestate
plugin.log.info('Write savestate')
plugin.save_save_state(save_state.link_item_dict) | Download routine.
1. get newest update time
2. load savestate
3. compare last update time with savestate time
4. get download links
5. compare with savestate
6. download new/updated data
7. check downloads
8. update savestate
9. write new savestate
:param plugin: plugin
:type plugin: ~unidown.plugin.a_plugin.APlugin | entailment |
def run(plugin_name: str, options: List[str] = None) -> PluginState:
"""
Run a plugin so use the download routine and clean up after.
:param plugin_name: name of plugin
:type plugin_name: str
:param options: parameters which will be send to the plugin initialization
:type options: List[str]
:return: success
:rtype: ~unidown.plugin.plugin_state.PluginState
"""
if options is None:
options = []
if plugin_name not in dynamic_data.AVAIL_PLUGINS:
msg = 'Plugin ' + plugin_name + ' was not found.'
logging.error(msg)
print(msg)
return PluginState.NOT_FOUND
try:
plugin_class = dynamic_data.AVAIL_PLUGINS[plugin_name].load()
plugin = plugin_class(options)
except Exception:
msg = 'Plugin ' + plugin_name + ' crashed while loading.'
logging.exception(msg)
print(msg + ' Check log for more information.')
return PluginState.LOAD_CRASH
else:
logging.info('Loaded plugin: ' + plugin_name)
try:
download_from_plugin(plugin)
plugin.clean_up()
except PluginException as ex:
msg = f"Plugin {plugin.name} stopped working. Reason: {'unknown' if (ex.msg == '') else ex.msg}"
logging.error(msg)
print(msg)
return PluginState.RUN_FAIL
except Exception:
msg = 'Plugin ' + plugin.name + ' crashed.'
logging.exception(msg)
print(msg + ' Check log for more information.')
return PluginState.RUN_CRASH
else:
logging.info(plugin.name + ' ends without errors.')
return PluginState.END_SUCCESS | Run a plugin so use the download routine and clean up after.
:param plugin_name: name of plugin
:type plugin_name: str
:param options: parameters which will be send to the plugin initialization
:type options: List[str]
:return: success
:rtype: ~unidown.plugin.plugin_state.PluginState | entailment |
def check_update():
"""
Check for app updates and print/log them.
"""
logging.info('Check for app updates.')
try:
update = updater.check_for_app_updates()
except Exception:
logging.exception('Check for updates failed.')
return
if update:
print("!!! UPDATE AVAILABLE !!!\n"
"" + static_data.PROJECT_URL + "\n\n")
logging.info("Update available: " + static_data.PROJECT_URL)
else:
logging.info("No update available.") | Check for app updates and print/log them. | entailment |
def get_newest_app_version() -> Version:
"""
Download the version tag from remote.
:return: version from remote
:rtype: ~packaging.version.Version
"""
with urllib3.PoolManager(cert_reqs='CERT_REQUIRED', ca_certs=certifi.where()) as p_man:
pypi_json = p_man.urlopen('GET', static_data.PYPI_JSON_URL).data.decode('utf-8')
releases = json.loads(pypi_json).get('releases', [])
online_version = Version('0.0.0')
for release in releases:
cur_version = Version(release)
if not cur_version.is_prerelease:
online_version = max(online_version, cur_version)
return online_version | Download the version tag from remote.
:return: version from remote
:rtype: ~packaging.version.Version | entailment |
def setupNodding(self):
"""
Setup Nodding for GTC
"""
g = get_root(self).globals
if not self.nod():
# re-enable clear mode box if not drift
if not self.isDrift():
self.clear.enable()
# clear existing nod pattern
self.nodPattern = {}
self.check()
return
# Do nothing if we're not at the GTC
if g.cpars['telins_name'] != 'GTC':
messagebox.showerror('Error', 'Cannot dither WHT')
self.nod.set(False)
self.nodPattern = {}
return
# check for drift mode and bomb out
if self.isDrift():
messagebox.showerror('Error', 'Cannot dither telescope in drift mode')
self.nod.set(False)
self.nodPattern = {}
return
# check for clear not enabled and warn
if not self.clear():
if not messagebox.askokcancel('Warning',
'Dithering telescope will enable clear mode. Continue?'):
self.nod.set(False)
self.nodPattern = {}
return
# Ask for nod pattern
try:
home = expanduser('~')
fname = filedialog.askopenfilename(
title='Open offsets text file',
defaultextension='.txt',
filetypes=[('text files', '.txt')],
initialdir=home)
if not fname:
g.clog.warn('Aborted load from disk')
raise ValueError
ra, dec = np.loadtxt(fname).T
if len(ra) != len(dec):
g.clog.warn('Mismatched lengths of RA and Dec offsets')
raise ValueError
data = dict(
ra=ra.tolist(),
dec=dec.tolist()
)
except:
g.clog.warn('Setting dither pattern failed. Disabling dithering')
self.nod.set(False)
self.nodPattern = {}
return
# store nodding on ipars object
self.nodPattern = data
# enable clear mode
self.clear.set(True)
# update
self.check() | Setup Nodding for GTC | entailment |
def dumpJSON(self):
"""
Encodes current parameters to JSON compatible dictionary
"""
numexp = self.number.get()
expTime, _, _, _, _ = self.timing()
if numexp == 0:
numexp = -1
data = dict(
numexp=self.number.value(),
app=self.app.value(),
led_flsh=self.led(),
dummy_out=self.dummy(),
fast_clks=self.fastClk(),
readout=self.readSpeed(),
dwell=self.expose.value(),
exptime=expTime,
oscan=self.oscan(),
oscany=self.oscany(),
xbin=self.wframe.xbin.value(),
ybin=self.wframe.ybin.value(),
multipliers=self.nmult.getall(),
clear=self.clear()
)
# only allow nodding in clear mode, even if GUI has got confused
if data['clear'] and self.nodPattern:
data['nodpattern'] = self.nodPattern
# no mixing clear and multipliers, no matter what GUI says
if data['clear']:
data['multipliers'] = [1 for i in self.nmult.getall()]
# add window mode
if not self.isFF():
if self.isDrift():
# no clear, multipliers or oscan in drift
for setting in ('clear', 'oscan', 'oscany'):
data[setting] = 0
data['multipliers'] = [1 for i in self.nmult.getall()]
for iw, (xsl, xsr, ys, nx, ny) in enumerate(self.wframe):
data['x{}start_left'.format(iw+1)] = xsl
data['x{}start_right'.format(iw+1)] = xsr
data['y{}start'.format(iw+1)] = ys
data['y{}size'.format(iw+1)] = ny
data['x{}size'.format(iw+1)] = nx
else:
# no oscany in window mode
data['oscany'] = 0
for iw, (xsll, xsul, xslr, xsur, ys, nx, ny) in enumerate(self.wframe):
data['x{}start_upperleft'.format(iw+1)] = xsul
data['x{}start_lowerleft'.format(iw+1)] = xsll
data['x{}start_upperright'.format(iw+1)] = xsur
data['x{}start_lowerright'.format(iw+1)] = xslr
data['y{}start'.format(iw+1)] = ys
data['x{}size'.format(iw+1)] = nx
data['y{}size'.format(iw+1)] = ny
return data | Encodes current parameters to JSON compatible dictionary | entailment |
def loadJSON(self, json_string):
"""
Loads in an application saved in JSON format.
"""
g = get_root(self).globals
data = json.loads(json_string)['appdata']
# first set the parameters which change regardless of mode
# number of exposures
numexp = data.get('numexp', 0)
if numexp == -1:
numexp = 0
self.number.set(numexp)
# Overscan (x, y)
if 'oscan' in data:
self.oscan.set(data['oscan'])
if 'oscany' in data:
self.oscan.set(data['oscany'])
# LED setting
self.led.set(data.get('led_flsh', 0))
# Dummy output enabled
self.dummy.set(data.get('dummy_out', 0))
# Fast clocking option?
self.fastClk.set(data.get('fast_clks', 0))
# readout speed
self.readSpeed.set(data.get('readout', 'Slow'))
# dwell
dwell = data.get('dwell', 0)
self.expose.set(str(float(dwell)))
# multipliers
mult_values = data.get('multipliers',
(1, 1, 1, 1, 1))
self.nmult.setall(mult_values)
# look for nodpattern in data
nodPattern = data.get('nodpattern', {})
if nodPattern and g.cpars['telins_name'] == 'GTC':
self.nodPattern = nodPattern
self.nod.set(True)
self.clear.set(True)
else:
self.nodPattern = {}
self.nod.set(False)
# binning
self.quad_frame.xbin.set(data.get('xbin', 1))
self.quad_frame.ybin.set(data.get('ybin', 1))
self.drift_frame.xbin.set(data.get('xbin', 1))
self.drift_frame.ybin.set(data.get('ybin', 1))
# now for the behaviour which depends on mode
if 'app' in data:
self.app.set(data['app'])
app = data['app']
if app == 'Drift':
# disable clear mode in drift
self.clear.set(0)
# only one pair allowed
self.wframe.npair.set(1)
# set the window pair values
labels = ('x1start_left', 'y1start',
'x1start_right', 'x1size',
'y1size')
if not all(label in data for label in labels):
raise DriverError('Drift mode application missing window params')
# now actually set them
self.wframe.xsl[0].set(data['x1start_left'])
self.wframe.xsr[0].set(data['x1start_right'])
self.wframe.ys[0].set(data['y1start'])
self.wframe.nx[0].set(data['x1size'])
self.wframe.ny[0].set(data['y1size'])
self.wframe.check()
elif app == 'FullFrame':
# enable clear mode if set
self.clear.set(data.get('clear', 0))
elif app == 'Windows':
# enable clear mode if set
self.clear.set(data.get('clear', 0))
nquad = 0
for nw in range(2):
labels = ('x{0}start_lowerleft y{0}start x{0}start_upperleft x{0}start_upperright ' +
'x{0}start_lowerright x{0}size y{0}size').format(nw+1).split()
if all(label in data for label in labels):
xsll = data[labels[0]]
xslr = data[labels[4]]
xsul = data[labels[2]]
xsur = data[labels[3]]
ys = data[labels[1]]
nx = data[labels[5]]
ny = data[labels[6]]
self.wframe.xsll[nw].set(xsll)
self.wframe.xslr[nw].set(xslr)
self.wframe.xsul[nw].set(xsul)
self.wframe.xsur[nw].set(xsur)
self.wframe.ys[nw].set(ys)
self.wframe.nx[nw].set(nx)
self.wframe.ny[nw].set(ny)
nquad += 1
else:
break
self.wframe.nquad.set(nquad)
self.wframe.check() | Loads in an application saved in JSON format. | entailment |
def check(self, *args):
"""
Callback to check validity of instrument parameters.
Performs the following tasks:
- spots and flags overlapping windows or null window parameters
- flags windows with invalid dimensions given the binning parameter
- sets the correct number of enabled windows
- disables or enables clear and nod buttons depending on drift mode or not
- checks for window synchronisation, enabling sync button if required
- enables or disables start button if settings are OK
Returns
-------
status : bool
True or False according to whether the settings are OK.
"""
status = True
g = get_root(self).globals
# clear errors on binning (may be set later if FF)
xbinw, ybinw = self.wframe.xbin, self.wframe.ybin
xbinw.config(bg=g.COL['main'])
ybinw.config(bg=g.COL['main'])
# keep binning factors of drift mode and windowed mode up to date
oframe, aframe = ((self.quad_frame, self.drift_frame) if self.drift_frame.winfo_ismapped()
else (self.drift_frame, self.quad_frame))
xbin, ybin = aframe.xbin.value(), aframe.ybin.value()
oframe.xbin.set(xbin)
oframe.ybin.set(ybin)
if not self.frozen:
if self.clear() or self.isDrift():
# disable nmult in clear or drift mode
self.nmult.disable()
else:
self.nmult.enable()
if self.isDrift():
self.clearLab.config(state='disable')
self.nodLab.config(state='disable')
if not self.drift_frame.winfo_ismapped():
self.quad_frame.grid_forget()
self.drift_frame.grid(row=10, column=0, columnspan=3,
sticky=tk.W+tk.N)
if not self.frozen:
self.oscany.config(state='disable')
self.oscan.config(state='disable')
self.clear.config(state='disable')
self.nod.config(state='disable')
self.wframe.enable()
status = self.wframe.check()
elif self.isFF():
# special case check of binning from window frame
if 1024 % xbin != 0:
status = False
xbinw.config(bg=g.COL['error'])
elif (1024 // xbin) % 4 != 0:
status = False
xbinw.config(bg=g.COL['error'])
if 512 % ybin != 0:
status = False
ybinw.config(bg=g.COL['error'])
if not self.quad_frame.winfo_ismapped():
self.drift_frame.grid_forget()
self.quad_frame.grid(row=10, column=0, columnspan=3,
sticky=tk.W+tk.N)
self.clearLab.config(state='normal')
if g.cpars['telins_name'] == 'GTC':
self.nodLab.config(state='normal')
else:
self.nodLab.config(state='disable')
if not self.frozen:
self.oscany.config(state='normal')
self.oscan.config(state='normal')
self.clear.config(state='normal')
if g.cpars['telins_name'] == 'GTC':
self.nod.config(state='normal')
else:
self.nod.config(state='disable')
self.wframe.disable()
else:
self.clearLab.config(state='normal')
if g.cpars['telins_name'] == 'GTC':
self.nodLab.config(state='normal')
else:
self.nodLab.config(state='disable')
if not self.quad_frame.winfo_ismapped():
self.drift_frame.grid_forget()
self.quad_frame.grid(row=10, column=0, columnspan=3,
sticky=tk.W+tk.N)
if not self.frozen:
self.oscany.config(state='disable')
self.oscan.config(state='normal')
self.clear.config(state='normal')
if g.cpars['telins_name'] == 'GTC':
self.nod.config(state='normal')
else:
self.nod.config(state='disable')
self.wframe.enable()
status = self.wframe.check()
# exposure delay
if self.expose.ok():
self.expose.config(bg=g.COL['main'])
else:
self.expose.config(bg=g.COL['warn'])
status = False
# don't allow binning other than 1, 2 in overscan or prescan mode
if self.oscan() or self.oscany():
if xbin not in (1, 2):
status = False
xbinw.config(bg=g.COL['error'])
if ybin not in (1, 2):
status = False
ybinw.config(bg=g.COL['error'])
# disable clear if nodding enabled. re-enable if not drift
if not self.frozen:
if self.nod() or self.nodPattern:
self.clear.config(state='disabled')
self.clearLab.config(state='disabled')
elif not self.isDrift():
self.clear.config(state='normal')
self.clearLab.config(state='normal')
# allow posting if parameters are OK. update count and SN estimates too
if status:
if (g.cpars['hcam_server_on'] and g.cpars['eso_server_online'] and
g.observe.start['state'] == 'disabled' and
not isRunActive(g)):
g.observe.start.enable()
g.count.update()
else:
g.observe.start.disable()
return status | Callback to check validity of instrument parameters.
Performs the following tasks:
- spots and flags overlapping windows or null window parameters
- flags windows with invalid dimensions given the binning parameter
- sets the correct number of enabled windows
- disables or enables clear and nod buttons depending on drift mode or not
- checks for window synchronisation, enabling sync button if required
- enables or disables start button if settings are OK
Returns
-------
status : bool
True or False according to whether the settings are OK. | entailment |
def freeze(self):
"""
Freeze all settings so they cannot be altered
"""
self.app.disable()
self.clear.disable()
self.nod.disable()
self.led.disable()
self.dummy.disable()
self.readSpeed.disable()
self.expose.disable()
self.number.disable()
self.wframe.disable(everything=True)
self.nmult.disable()
self.frozen = True | Freeze all settings so they cannot be altered | entailment |
def unfreeze(self):
"""
Reverse of freeze
"""
self.app.enable()
self.clear.enable()
self.nod.enable()
self.led.enable()
self.dummy.enable()
self.readSpeed.enable()
self.expose.enable()
self.number.enable()
self.wframe.enable()
self.nmult.enable()
self.frozen = False | Reverse of freeze | entailment |
def getRtplotWins(self):
""""
Returns a string suitable to sending off to rtplot when
it asks for window parameters. Returns null string '' if
the windows are not OK. This operates on the basis of
trying to send something back, even if it might not be
OK as a window setup. Note that we have to take care
here not to update any GUI components because this is
called outside of the main thread.
"""
try:
if self.isFF():
return 'fullframe\r\n'
elif self.isDrift():
xbin = self.wframe.xbin.value()
ybin = self.wframe.ybin.value()
nwin = 2*self.wframe.npair.value()
ret = str(xbin) + ' ' + str(ybin) + ' ' + str(nwin) + '\r\n'
for xsl, xsr, ys, nx, ny in self.wframe:
ret += '{:d} {:d} {:d} {:d}\r\n'.format(
xsl, ys, nx, ny
)
ret += '{:d} {:d} {:d} {:d}'.format(
xsr, ys, nx, ny
)
return ret
else:
xbin = self.wframe.xbin.value()
ybin = self.wframe.ybin.value()
nwin = 4*self.wframe.nquad.value()
ret = str(xbin) + ' ' + str(ybin) + ' ' + str(nwin) + '\r\n'
for xsll, xsul, xslr, xsur, ys, nx, ny in self.wframe:
ret += '{:d} {:d} {:d} {:d}\r\n'.format(
xsll, ys, nx, ny
)
ret += '{:d} {:d} {:d} {:d}\r\n'.format(
xsul, 1025 - ys - ny, nx, ny
)
ret += '{:d} {:d} {:d} {:d}\r\n'.format(
xslr, ys, nx, ny
)
ret += '{:d} {:d} {:d} {:d}\r\n'.format(
xsur, 1025 - ys - ny, nx, ny
)
return ret
except:
return '' | Returns a string suitable to sending off to rtplot when
it asks for window parameters. Returns null string '' if
the windows are not OK. This operates on the basis of
trying to send something back, even if it might not be
OK as a window setup. Note that we have to take care
here not to update any GUI components because this is
called outside of the main thread. | entailment |
def timing(self):
"""
Estimates timing information for the current setup. You should
run a check on the instrument parameters before calling this.
Returns: (expTime, deadTime, cycleTime, dutyCycle)
expTime : exposure time per frame (seconds)
deadTime : dead time per frame (seconds)
cycleTime : sampling time (cadence), (seconds)
dutyCycle : percentage time exposing.
frameRate : number of frames per second
"""
# drift mode y/n?
isDriftMode = self.isDrift()
# FF y/n?
isFF = self.isFF()
# Set the readout speed
readSpeed = self.readSpeed()
if readSpeed == 'Fast' and self.dummy():
video = VIDEO_FAST
elif readSpeed == 'Slow' and self.dummy():
video = VIDEO_SLOW
elif not self.dummy():
video = VIDEO_SLOW_SE
else:
raise DriverError('InstPars.timing: readout speed = ' +
readSpeed + ' not recognised.')
if self.fastClk():
DUMP_TIME = DUMP_TIME_FAST
VCLOCK_FRAME = VCLOCK_FAST
VCLOCK_STORAGE = VCLOCK_FAST
HCLOCK = HCLOCK_FAST
else:
DUMP_TIME = DUMP_TIME_SLOW
VCLOCK_FRAME = VCLOCK_FRAME_SLOW
VCLOCK_STORAGE = VCLOCK_STORAGE_SLOW
HCLOCK = HCLOCK_SLOW
# clear chip on/off?
lclear = not isDriftMode and self.clear()
# overscan read or not
oscan = not isDriftMode and self.oscan()
oscany = not isDriftMode and self.oscany()
# get exposure delay
expose = self.expose.value()
# window parameters
xbin = self.wframe.xbin.value()
ybin = self.wframe.ybin.value()
if isDriftMode:
nwin = 1 # number of windows per output
dys = self.wframe.ys[0].value() - 1
dnx = self.wframe.nx[0].value()
dny = self.wframe.ny[0].value()
dxsl = self.wframe.xsl[0].value()
dxsr = self.wframe.xsr[0].value()
# differential shift needed to line both
# windows up with the edge of the chip
diffshift = abs(dxsl - 1 - (2*FFX - dxsr - dnx + 1))
elif isFF:
nwin = 1
ys, nx, ny = [0], [1024], [512]
else:
ys, nx, ny = [], [], []
xse, xsf, xsg, xsh = [], [], [], []
nwin = self.wframe.nquad.value()
for xsll, xsul, xslr, xsur, ysv, nxv, nyv in self.wframe:
xse.append(xsll - 1)
xsf.append(2049 - xslr - nxv)
xsg.append(2049 - xsur - nxv)
xsh.append(xsul - 1)
ys.append(ysv-1)
nx.append(nxv)
ny.append(nyv)
# convert timing parameters to seconds
expose_delay = expose
# clear chip by VCLOCK-ing the image and area and dumping storage area (x5)
if lclear:
clear_time = 5*(FFY*VCLOCK_FRAME + FFY*DUMP_TIME)
else:
clear_time = 0.0
if isDriftMode:
# for drift mode, we need the number of windows in the pipeline
# and the pipeshift
nrows = FFY # number of rows in storage area
pnwin = int(((nrows / dny) + 1)/2)
pshift = nrows - (2*pnwin-1)*dny
frame_transfer = (dny+dys)*VCLOCK_FRAME
yshift = [dys*VCLOCK_STORAGE]
# After placing the window adjacent to the serial register, the
# register must be cleared by clocking out the entire register,
# taking FFX hclocks.
line_clear = [0.]
if yshift[0] != 0:
line_clear[0] = DUMP_TIME
# to calculate number of HCLOCKS needed to read a line in
# drift mode we have to account for the diff shifts and dumping.
# first perform diff shifts
# for now we need this *2 (for quadrants E, H or F, G)
numhclocks = 2*diffshift
# now add the amount of clocks needed to get
# both windows to edge of chip
if dxsl - 1 > 2*FFX - dxsr - dnx + 1:
# it was the left window that got the diff shift,
# so the number of hclocks increases by the amount
# needed to get the RH window to the edge
numhclocks += 2*FFX - dxsr - dnx + 1
else:
# vice versa
numhclocks += dxsl - 1
# now we actually clock the windows themselves
numhclocks += dnx
# finally, we need to hclock the additional pre-scan pixels
numhclocks += 2*PRSCX
# here is the total time to read the whole line
line_read = [VCLOCK_STORAGE*ybin + numhclocks*HCLOCK +
video*dnx/xbin + DUMP_TIME + 2*SETUP_READ]
readout = [(dny/ybin) * line_read[0]]
elif isFF:
# move entire image into storage area
frame_transfer = FFY*VCLOCK_FRAME + DUMP_TIME
yshift = [0]
line_clear = [0]
numhclocks = FFX + PRSCX
line_read = [VCLOCK_STORAGE*ybin + numhclocks*HCLOCK +
video*nx[0]/xbin + SETUP_READ]
if oscan:
line_read[0] += video*PRSCX/xbin
nlines = ny[0]/ybin if not oscany else (ny[0] + 8/ybin)
readout = [nlines*line_read[0]]
else:
# windowed mode
# move entire image into storage area
frame_transfer = FFY*VCLOCK_FRAME + DUMP_TIME
# dump rows in storage area up to start of the window without changing the
# image area.
yshift = nwin*[0.]
yshift[0] = ys[0]*DUMP_TIME
for nw in range(1, nwin):
yshift[nw] = (ys[nw]-ys[nw-1]-ny[nw-1])*DUMP_TIME
line_clear = nwin*[0.]
# Naidu always dumps the serial register, in windowed mode
# regardless of whether we need to or not
for nw in range(nwin):
line_clear[nw] = DUMP_TIME
# calculate how long it takes to shift one row into the serial
# register shift along serial register and then read out the data.
# total number of hclocks needs to account for diff shifts of
# windows, carried out in serial
numhclocks = nwin*[0]
for nw in range(nwin):
common_shift = min(xse[nw], xsf[nw], xsg[nw], xsh[nw])
diffshifts = sum((xs-common_shift for xs in (xse[nw], xsf[nw], xsg[nw], xsh[nw])))
numhclocks[nw] = 2*PRSCX + common_shift + diffshifts + nx[nw]
line_read = nwin*[0.]
# line read includes vclocking a row, all the hclocks, digitising pixels and dumping serial register
# when windows are read out.
for nw in range(nwin):
line_read[nw] = (VCLOCK_STORAGE*ybin + numhclocks[nw]*HCLOCK +
video*nx[nw]/xbin + 2*SETUP_READ + DUMP_TIME)
if oscan:
line_read[nw] += video*PRSCX/xbin
# multiply time to shift one row into serial register by
# number of rows for total readout time
readout = nwin*[0.]
for nw in range(nwin):
nlines = ny[nw]/ybin if not oscany else (ny[nw] + 8/ybin)
readout[nw] = nlines * line_read[nw]
# now get the total time to read out one exposure.
cycleTime = expose_delay + clear_time + frame_transfer
if isDriftMode:
cycleTime += pshift*VCLOCK_STORAGE + yshift[0] + line_clear[0] + readout[0]
else:
for nw in range(nwin):
cycleTime += yshift[nw] + line_clear[nw] + readout[nw]
# use 5sec estimate for nod time
# TODO: replace with accurate estimate
if self.nod() and lclear:
cycleTime += 5
elif self.nod():
g = get_root(self).globals
g.clog.warn('ERR: dithering enabled with clear mode off')
frameRate = 1.0/cycleTime
expTime = expose_delay if lclear else cycleTime - frame_transfer
deadTime = cycleTime - expTime
dutyCycle = 100.0*expTime/cycleTime
return (expTime, deadTime, cycleTime, dutyCycle, frameRate) | Estimates timing information for the current setup. You should
run a check on the instrument parameters before calling this.
Returns: (expTime, deadTime, cycleTime, dutyCycle)
expTime : exposure time per frame (seconds)
deadTime : dead time per frame (seconds)
cycleTime : sampling time (cadence), (seconds)
dutyCycle : percentage time exposing.
frameRate : number of frames per second | entailment |
def loadJSON(self, json_string):
"""
Sets the values of the run parameters given an JSON string
"""
g = get_root(self).globals
user = json.loads(json_string)['user']
def setField(widget, field):
val = user.get(field)
if val is not None:
widget.set(val)
setField(self.prog_ob.obid, 'OB')
setField(self.target, 'target')
setField(self.prog_ob.progid, 'ID')
setField(self.pi, 'PI')
setField(self.observers, 'Observers')
setField(self.comment, 'comment')
setField(self.filter, 'filters')
setField(g.observe.rtype, 'flags') | Sets the values of the run parameters given an JSON string | entailment |
def dumpJSON(self):
"""
Encodes current parameters to JSON compatible dictionary
"""
g = get_root(self).globals
dtype = g.observe.rtype()
if dtype == 'bias':
target = 'BIAS'
elif dtype == 'flat':
target = 'FLAT'
elif dtype == 'dark':
target = 'DARK'
else:
target = self.target.value()
return dict(
target=target,
ID=self.prog_ob.progid.value(),
PI=self.pi.value(),
OB='{:04d}'.format(self.prog_ob.obid.value()),
Observers=self.observers.value(),
comment=self.comment.value(),
flags=dtype,
filters=self.filter.value()
) | Encodes current parameters to JSON compatible dictionary | entailment |
def check(self, *args):
"""
Checks the validity of the run parameters. Returns
flag (True = OK), and a message which indicates the
nature of the problem if the flag is False.
"""
ok = True
msg = ''
g = get_root(self).globals
dtype = g.observe.rtype()
expert = g.cpars['expert_level'] > 0
if dtype == 'bias' or dtype == 'flat' or dtype == 'dark':
self.pi.configure(state='disable')
self.prog_ob.configure(state='disable')
self.target.disable()
else:
if expert:
self.pi.configure(state='normal')
self.prog_ob.configure(state='normal')
self.prog_ob.enable()
else:
self.prog_ob.configure(state='disable')
self.pi.configure(state='disable')
self.prog_ob.disable()
self.target.enable()
if g.cpars['require_run_params']:
if self.target.ok():
self.target.entry.config(bg=g.COL['main'])
else:
self.target.entry.config(bg=g.COL['error'])
ok = False
msg += 'Target name field cannot be blank\n'
if dtype == 'data caution' or \
dtype == 'data' or dtype == 'technical':
if self.prog_ob.ok():
self.prog_ob.config(bg=g.COL['main'])
else:
self.prog_ob.config(bg=g.COL['error'])
ok = False
msg += 'Programme or OB ID field cannot be blank\n'
if self.pi.ok():
self.pi.config(bg=g.COL['main'])
else:
self.pi.config(bg=g.COL['error'])
ok = False
msg += 'Principal Investigator field cannot be blank\n'
if self.observers.ok():
self.observers.config(bg=g.COL['main'])
else:
self.observers.config(bg=g.COL['error'])
ok = False
msg += 'Observers field cannot be blank'
return (ok, msg) | Checks the validity of the run parameters. Returns
flag (True = OK), and a message which indicates the
nature of the problem if the flag is False. | entailment |
def freeze(self):
"""
Freeze all settings so that they can't be altered
"""
self.target.disable()
self.filter.configure(state='disable')
self.prog_ob.configure(state='disable')
self.pi.configure(state='disable')
self.observers.configure(state='disable')
self.comment.configure(state='disable') | Freeze all settings so that they can't be altered | entailment |
def unfreeze(self):
"""
Unfreeze all settings so that they can be altered
"""
g = get_root(self).globals
self.filter.configure(state='normal')
dtype = g.observe.rtype()
if dtype == 'data caution' or dtype == 'data' or dtype == 'technical':
self.prog_ob.configure(state='normal')
self.pi.configure(state='normal')
self.target.enable()
self.observers.configure(state='normal')
self.comment.configure(state='normal') | Unfreeze all settings so that they can be altered | entailment |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.