code
stringlengths 75
104k
| docstring
stringlengths 1
46.9k
|
---|---|
def histogram_voltage(self, timestep=None, title=True, **kwargs):
"""
Plots histogram of voltages.
For more information see :func:`edisgo.tools.plots.histogram`.
Parameters
----------
timestep : :pandas:`pandas.Timestamp<timestamp>` or None, optional
Specifies time step histogram is plotted for. If timestep is None
all time steps voltages are calculated for are used. Default: None.
title : :obj:`str` or :obj:`bool`, optional
Title for plot. If True title is auto generated. If False plot has
no title. If :obj:`str`, the provided title is used. Default: True.
"""
data = self.network.results.v_res()
if title is True:
if timestep is not None:
title = "Voltage histogram for time step {}".format(timestep)
else:
title = "Voltage histogram \nfor time steps {} to {}".format(
data.index[0], data.index[-1])
elif title is False:
title = None
plots.histogram(data=data, title=title, timeindex=timestep, **kwargs) | Plots histogram of voltages.
For more information see :func:`edisgo.tools.plots.histogram`.
Parameters
----------
timestep : :pandas:`pandas.Timestamp<timestamp>` or None, optional
Specifies time step histogram is plotted for. If timestep is None
all time steps voltages are calculated for are used. Default: None.
title : :obj:`str` or :obj:`bool`, optional
Title for plot. If True title is auto generated. If False plot has
no title. If :obj:`str`, the provided title is used. Default: True. |
def yaml_dump(data, stream=None):
# type: (YamlData, Optional[TextIO]) -> Text
""" Dump data to a YAML string/file.
Args:
data (YamlData):
The data to serialize as YAML.
stream (TextIO):
The file-like object to save to. If given, this function will write
the resulting YAML to that stream.
Returns:
str: The YAML string.
"""
return yaml.dump(
data,
stream=stream,
Dumper=Dumper,
default_flow_style=False
) | Dump data to a YAML string/file.
Args:
data (YamlData):
The data to serialize as YAML.
stream (TextIO):
The file-like object to save to. If given, this function will write
the resulting YAML to that stream.
Returns:
str: The YAML string. |
def _dict_increment(self, dictionary, key):
"""Increments the value of the dictionary at the specified key."""
if key in dictionary:
dictionary[key] += 1
else:
dictionary[key] = 1 | Increments the value of the dictionary at the specified key. |
def reply_message(self, reply_token, messages, timeout=None):
"""Call reply message API.
https://devdocs.line.me/en/#reply-message
Respond to events from users, groups, and rooms.
Webhooks are used to notify you when an event occurs.
For events that you can respond to, a replyToken is issued for replying to messages.
Because the replyToken becomes invalid after a certain period of time,
responses should be sent as soon as a message is received.
Reply tokens can only be used once.
:param str reply_token: replyToken received via webhook
:param messages: Messages.
Max: 5
:type messages: T <= :py:class:`linebot.models.send_messages.SendMessage` |
list[T <= :py:class:`linebot.models.send_messages.SendMessage`]
:param timeout: (optional) How long to wait for the server
to send data before giving up, as a float,
or a (connect timeout, read timeout) float tuple.
Default is self.http_client.timeout
:type timeout: float | tuple(float, float)
"""
if not isinstance(messages, (list, tuple)):
messages = [messages]
data = {
'replyToken': reply_token,
'messages': [message.as_json_dict() for message in messages]
}
self._post(
'/v2/bot/message/reply', data=json.dumps(data), timeout=timeout
) | Call reply message API.
https://devdocs.line.me/en/#reply-message
Respond to events from users, groups, and rooms.
Webhooks are used to notify you when an event occurs.
For events that you can respond to, a replyToken is issued for replying to messages.
Because the replyToken becomes invalid after a certain period of time,
responses should be sent as soon as a message is received.
Reply tokens can only be used once.
:param str reply_token: replyToken received via webhook
:param messages: Messages.
Max: 5
:type messages: T <= :py:class:`linebot.models.send_messages.SendMessage` |
list[T <= :py:class:`linebot.models.send_messages.SendMessage`]
:param timeout: (optional) How long to wait for the server
to send data before giving up, as a float,
or a (connect timeout, read timeout) float tuple.
Default is self.http_client.timeout
:type timeout: float | tuple(float, float) |
def get_membership_document(membership_type: str, current_block: dict, identity: Identity, salt: str,
password: str) -> Membership:
"""
Get a Membership document
:param membership_type: "IN" to ask for membership or "OUT" to cancel membership
:param current_block: Current block data
:param identity: Identity document
:param salt: Passphrase of the account
:param password: Password of the account
:rtype: Membership
"""
# get current block BlockStamp
timestamp = BlockUID(current_block['number'], current_block['hash'])
# create keys from credentials
key = SigningKey.from_credentials(salt, password)
# create identity document
membership = Membership(
version=10,
currency=current_block['currency'],
issuer=key.pubkey,
membership_ts=timestamp,
membership_type=membership_type,
uid=identity.uid,
identity_ts=identity.timestamp,
signature=None
)
# sign document
membership.sign([key])
return membership | Get a Membership document
:param membership_type: "IN" to ask for membership or "OUT" to cancel membership
:param current_block: Current block data
:param identity: Identity document
:param salt: Passphrase of the account
:param password: Password of the account
:rtype: Membership |
def define(cls, name, parent=None, interleave=False):
"""Create define node."""
node = cls("define", parent, interleave=interleave)
node.occur = 0
node.attr["name"] = name
return node | Create define node. |
def _member_def(self, member):
"""
Return an individual member definition formatted as an RST glossary
entry, wrapped to fit within 78 columns.
"""
member_docstring = textwrap.dedent(member.docstring).strip()
member_docstring = textwrap.fill(
member_docstring, width=78, initial_indent=' '*4,
subsequent_indent=' '*4
)
return '%s\n%s\n' % (member.name, member_docstring) | Return an individual member definition formatted as an RST glossary
entry, wrapped to fit within 78 columns. |
def make_application_error(name, tag):
"""
Create and return a **class** inheriting from :class:`.xso.XSO`. The
:attr:`.xso.XSO.TAG` is set to `tag` and the class’ name will be `name`.
In addition, the class is automatically registered with
:attr:`.Error.application_condition` using
:meth:`~.Error.as_application_condition`.
Keep in mind that if you subclass the class returned by this function, the
subclass is not registered with :class:`.Error`. In addition, if you do not
override the :attr:`~.xso.XSO.TAG`, you will not be able to register
the subclass as application defined condition as it has the same tag as the
class returned by this function, which has already been registered as
application condition.
"""
cls = type(xso.XSO)(name, (xso.XSO,), {
"TAG": tag,
})
Error.as_application_condition(cls)
return cls | Create and return a **class** inheriting from :class:`.xso.XSO`. The
:attr:`.xso.XSO.TAG` is set to `tag` and the class’ name will be `name`.
In addition, the class is automatically registered with
:attr:`.Error.application_condition` using
:meth:`~.Error.as_application_condition`.
Keep in mind that if you subclass the class returned by this function, the
subclass is not registered with :class:`.Error`. In addition, if you do not
override the :attr:`~.xso.XSO.TAG`, you will not be able to register
the subclass as application defined condition as it has the same tag as the
class returned by this function, which has already been registered as
application condition. |
def authorization_url(self, **kwargs):
"""
Get authorization URL to redirect the resource owner to.
https://tools.ietf.org/html/rfc6749#section-4.1.1
:param str redirect_uri: (optional) Absolute URL of the client where
the user-agent will be redirected to.
:param str scope: (optional) Space delimited list of strings.
:param str state: (optional) An opaque value used by the client to
maintain state between the request and callback
:return: URL to redirect the resource owner to
:rtype: str
"""
payload = {'response_type': 'code', 'client_id': self._client_id}
for key in kwargs.keys():
# Add items in a sorted way for unittest purposes.
payload[key] = kwargs[key]
payload = sorted(payload.items(), key=lambda val: val[0])
params = urlencode(payload)
url = self.get_url('authorization_endpoint')
return '{}?{}'.format(url, params) | Get authorization URL to redirect the resource owner to.
https://tools.ietf.org/html/rfc6749#section-4.1.1
:param str redirect_uri: (optional) Absolute URL of the client where
the user-agent will be redirected to.
:param str scope: (optional) Space delimited list of strings.
:param str state: (optional) An opaque value used by the client to
maintain state between the request and callback
:return: URL to redirect the resource owner to
:rtype: str |
def cartesian_to_index(ranges, maxima=None):
"""
Inverts tuples from a cartesian product to a numeric index ie. the index this
tuple would have in a cartesian product.
Each column gets multiplied with a place value according to the preceding columns maxmimum
and all columns are summed up.
This function in the same direction as utils.cartesian, ie. the first column has the largest value.
"""
if maxima is None:
return reduce(lambda y,x: (x*y[1] + y[0],(np.max(x)+1)*y[1]), ranges[:,::-1].transpose(), (np.array([0]*ranges.shape[0]),1))[0]
else:
maxima_prod = np.concatenate([np.cumprod(np.array(maxima)[::-1])[1::-1],[1]])
return np.sum(ranges * maxima_prod, 1) | Inverts tuples from a cartesian product to a numeric index ie. the index this
tuple would have in a cartesian product.
Each column gets multiplied with a place value according to the preceding columns maxmimum
and all columns are summed up.
This function in the same direction as utils.cartesian, ie. the first column has the largest value. |
def readFromProto(cls, proto):
"""
Read state from proto object.
:param proto: SDRClassifierRegionProto capnproto object
"""
instance = cls()
instance.implementation = proto.implementation
instance.steps = proto.steps
instance.stepsList = [int(i) for i in proto.steps.split(",")]
instance.alpha = proto.alpha
instance.verbosity = proto.verbosity
instance.maxCategoryCount = proto.maxCategoryCount
instance._sdrClassifier = SDRClassifierFactory.read(proto)
instance.learningMode = proto.learningMode
instance.inferenceMode = proto.inferenceMode
instance.recordNum = proto.recordNum
return instance | Read state from proto object.
:param proto: SDRClassifierRegionProto capnproto object |
def _get_operator_param_name_and_values(operator_class_name, task_details):
""" Internal helper gets the name of the python parameter for the Airflow operator class. In
some cases, we do not expose the airflow parameter name in its native form, but choose to
expose a name that's more standard for Datalab, or one that's more friendly. For example,
Airflow's BigQueryOperator uses 'bql' for the query string, but we want %%bq users in Datalab
to use 'query'. Hence, a few substitutions that are specific to the Airflow operator need to
be made.
Similarly, we the parameter value could come from the notebook's context. All that happens
here.
Returns:
Dict containing _only_ the keys and values that are required in Airflow operator definition.
This requires a substituting existing keys in the dictionary with their Airflow equivalents (
i.e. by adding new keys, and removing the existing ones).
"""
# We make a clone and then remove 'type' and 'up_stream' since these aren't needed for the
# the operator's parameters.
operator_task_details = task_details.copy()
if 'type' in operator_task_details.keys():
del operator_task_details['type']
if 'up_stream' in operator_task_details.keys():
del operator_task_details['up_stream']
# We special-case certain operators if we do some translation of the parameter names. This is
# usually the case when we use syntactic sugar to expose the functionality.
# TODO(rajivpb): It should be possible to make this a lookup from the modules mapping via
# getattr() or equivalent. Avoid hard-coding these class-names here.
if (operator_class_name == 'BigQueryOperator'):
return PipelineGenerator._get_bq_execute_params(operator_task_details)
if (operator_class_name == 'BigQueryToCloudStorageOperator'):
return PipelineGenerator._get_bq_extract_params(operator_task_details)
if (operator_class_name == 'GoogleCloudStorageToBigQueryOperator'):
return PipelineGenerator._get_bq_load_params(operator_task_details)
return operator_task_details | Internal helper gets the name of the python parameter for the Airflow operator class. In
some cases, we do not expose the airflow parameter name in its native form, but choose to
expose a name that's more standard for Datalab, or one that's more friendly. For example,
Airflow's BigQueryOperator uses 'bql' for the query string, but we want %%bq users in Datalab
to use 'query'. Hence, a few substitutions that are specific to the Airflow operator need to
be made.
Similarly, we the parameter value could come from the notebook's context. All that happens
here.
Returns:
Dict containing _only_ the keys and values that are required in Airflow operator definition.
This requires a substituting existing keys in the dictionary with their Airflow equivalents (
i.e. by adding new keys, and removing the existing ones). |
def atFontFace(self, declarations):
"""
Embed fonts
"""
result = self.ruleset([self.selector('*')], declarations)
data = list(result[0].values())[0]
if "src" not in data:
# invalid - source is required, ignore this specification
return {}, {}
names = data["font-family"]
# Font weight
fweight = str(data.get("font-weight", "normal")).lower()
bold = fweight in ("bold", "bolder", "500", "600", "700", "800", "900")
if not bold and fweight != "normal":
log.warn(
self.c.warning("@fontface, unknown value font-weight '%s'", fweight))
# Font style
italic = str(
data.get("font-style", "")).lower() in ("italic", "oblique")
# The "src" attribute can be a CSS group but in that case
# ignore everything except the font URI
uri = data['src']
if not isinstance(data['src'], str):
for part in uri:
if isinstance(part, str):
uri = part
break
src = self.c.getFile(uri, relative=self.c.cssParser.rootPath)
self.c.loadFont(
names,
src,
bold=bold,
italic=italic)
return {}, {} | Embed fonts |
def set_bank_1(self, bits):
"""
Sets gpios 0-31 if the corresponding bit in bits is set.
bits:= a 32 bit mask with 1 set if the corresponding gpio is
to be set.
A returned status of PI_SOME_PERMITTED indicates that the user
is not allowed to write to one or more of the gpios.
...
pi.set_bank_1(int("111110010000",2))
...
"""
res = yield from self._pigpio_aio_command(_PI_CMD_BS1, bits, 0)
return _u2i(res) | Sets gpios 0-31 if the corresponding bit in bits is set.
bits:= a 32 bit mask with 1 set if the corresponding gpio is
to be set.
A returned status of PI_SOME_PERMITTED indicates that the user
is not allowed to write to one or more of the gpios.
...
pi.set_bank_1(int("111110010000",2))
... |
def make_form(fields=None, layout=None, layout_class=None, base_class=None,
get_form_field=None, name=None, rules=None, **kwargs):
"""
Make a from according dict data:
{'fields':[
{'name':'name', 'type':'str', 'label':'label,
'rules':{
'required':
'email'
'required:back|front' #back means server side, front means front side
}
...},
...
],
#layout_class should be defined in settings.ini, just like
#[FORM_LAYOUT_CLASSES]
#bs3 = '#{appname}.form_help.Bootstrap3Layout'
#is also can be a Layout Class
#default is BootstrapLayout
'layout_class':'bs3',
'layout':{
'rows':[
'-- legend title --',
'field_name',
['group_fieldname', 'group_fieldname']
{'name':'name', 'colspan':3}
],
}
'base_class':'form class if not existed, then use Form'
}
get_form_field is a callback function, used to defined customized field class
if has name then it'll be cached
"""
from uliweb.utils.sorteddict import SortedDict
get_form_field = get_form_field or (lambda name, f:None)
#make fields
props = SortedDict({})
for f in fields or []:
if isinstance(f, BaseField):
props[f.name] = get_form_field(f.name, f) or f
else:
props[f['name']] = get_form_field(f['name'], f) or make_field(**f)
#set other props
if layout:
props['layout'] = layout
if layout_class:
props['layout_class'] = layout_class
if rules:
props['rules'] = rules
layout_class_args = kwargs.pop('layout_class_args', None)
if layout_class_args:
props['layout_class_args'] = layout_class_args
cls = type(name or 'MakeForm_', (base_class or Form,), props)
return cls | Make a from according dict data:
{'fields':[
{'name':'name', 'type':'str', 'label':'label,
'rules':{
'required':
'email'
'required:back|front' #back means server side, front means front side
}
...},
...
],
#layout_class should be defined in settings.ini, just like
#[FORM_LAYOUT_CLASSES]
#bs3 = '#{appname}.form_help.Bootstrap3Layout'
#is also can be a Layout Class
#default is BootstrapLayout
'layout_class':'bs3',
'layout':{
'rows':[
'-- legend title --',
'field_name',
['group_fieldname', 'group_fieldname']
{'name':'name', 'colspan':3}
],
}
'base_class':'form class if not existed, then use Form'
}
get_form_field is a callback function, used to defined customized field class
if has name then it'll be cached |
def acquire_lock(self):
"""
Acquire the lock. Blocks indefinitely until lock is available
unless `lock_timeout` was supplied. If the lock_timeout elapses,
raises LockTimeout.
"""
# first ensure that a record exists for this session id
try:
self.collection.insert_one(dict(_id=self.id))
except pymongo.errors.DuplicateKeyError:
pass
unlocked_spec = dict(_id=self.id, locked=None)
lock_timer = (
timers.Timer.after(self.lock_timeout)
if self.lock_timeout
else timers.NeverExpires()
)
while not lock_timer.expired():
locked_spec = {'$set': dict(locked=datetime.datetime.utcnow())}
res = self.collection.update_one(unlocked_spec, locked_spec)
if res.raw_result['updatedExisting']:
# we have the lock
break
time.sleep(0.1)
else:
raise LockTimeout(f"Timeout acquiring lock for {self.id}")
self.locked = True | Acquire the lock. Blocks indefinitely until lock is available
unless `lock_timeout` was supplied. If the lock_timeout elapses,
raises LockTimeout. |
def windowed(seq, n, fillvalue=None, step=1):
"""Return a sliding window of width *n* over the given iterable.
>>> all_windows = windowed([1, 2, 3, 4, 5], 3)
>>> list(all_windows)
[(1, 2, 3), (2, 3, 4), (3, 4, 5)]
When the window is larger than the iterable, *fillvalue* is used in place
of missing values::
>>> list(windowed([1, 2, 3], 4))
[(1, 2, 3, None)]
Each window will advance in increments of *step*:
>>> list(windowed([1, 2, 3, 4, 5, 6], 3, fillvalue='!', step=2))
[(1, 2, 3), (3, 4, 5), (5, 6, '!')]
To slide into the iterable's items, use :func:`chain` to add filler items
to the left:
>>> iterable = [1, 2, 3, 4]
>>> n = 3
>>> padding = [None] * (n - 1)
>>> list(windowed(chain(padding, iterable), 3))
[(None, None, 1), (None, 1, 2), (1, 2, 3), (2, 3, 4)]
"""
if n < 0:
raise ValueError('n must be >= 0')
if n == 0:
yield tuple()
return
if step < 1:
raise ValueError('step must be >= 1')
it = iter(seq)
window = deque([], n)
append = window.append
# Initial deque fill
for _ in range(n):
append(next(it, fillvalue))
yield tuple(window)
# Appending new items to the right causes old items to fall off the left
i = 0
for item in it:
append(item)
i = (i + 1) % step
if i % step == 0:
yield tuple(window)
# If there are items from the iterable in the window, pad with the given
# value and emit them.
if (i % step) and (step - i < n):
for _ in range(step - i):
append(fillvalue)
yield tuple(window) | Return a sliding window of width *n* over the given iterable.
>>> all_windows = windowed([1, 2, 3, 4, 5], 3)
>>> list(all_windows)
[(1, 2, 3), (2, 3, 4), (3, 4, 5)]
When the window is larger than the iterable, *fillvalue* is used in place
of missing values::
>>> list(windowed([1, 2, 3], 4))
[(1, 2, 3, None)]
Each window will advance in increments of *step*:
>>> list(windowed([1, 2, 3, 4, 5, 6], 3, fillvalue='!', step=2))
[(1, 2, 3), (3, 4, 5), (5, 6, '!')]
To slide into the iterable's items, use :func:`chain` to add filler items
to the left:
>>> iterable = [1, 2, 3, 4]
>>> n = 3
>>> padding = [None] * (n - 1)
>>> list(windowed(chain(padding, iterable), 3))
[(None, None, 1), (None, 1, 2), (1, 2, 3), (2, 3, 4)] |
def add_JSsource(self, new_src):
"""add additional js script source(s)"""
if isinstance(new_src, list):
for h in new_src:
self.JSsource.append(h)
elif isinstance(new_src, basestring):
self.JSsource.append(new_src)
else:
raise OptionTypeError("Option: %s Not Allowed For Series Type: %s" % type(new_src)) | add additional js script source(s) |
def oauth_client_create(self, data, **kwargs):
"https://developer.zendesk.com/rest_api/docs/core/oauth_clients#create-client"
api_path = "/api/v2/oauth/clients.json"
return self.call(api_path, method="POST", data=data, **kwargs) | https://developer.zendesk.com/rest_api/docs/core/oauth_clients#create-client |
def autodiff(func,
wrt=(0,),
optimized=True,
motion='joint',
mode='reverse',
preserve_result=False,
check_dims=True,
input_derivative=INPUT_DERIVATIVE.Required,
verbose=0):
"""Build the vector-Jacobian or Jacobian-vector product of a function `func`.
For a vector-Jacobian product (reverse-mode autodiff):
This function proceeds by finding the primals and adjoints of all the
functions in the call tree.
For a Jacobian-vector product (forward-mode autodiff):
We first find the primals and tangents of all functions in the call tree.
It then wraps the top level function (i.e. the
one passed as `func`) in a slightly more user-friendly interface. It then
compiles the function and attaches to it the global namespace it needs to
run.
Args:
func: The function to take the gradient of.
wrt: A tuple of argument indices to differentiate with respect to. By
default the derivative is taken with respect to the first argument.
optimized: Whether to optimize the gradient function (`True` by default).
motion: Either 'split' (separate functions for forward and backward pass)
or 'joint' motion (a single combined function). Joint mode is the
default.
mode: Either 'forward' or 'reverse' mode. Forward mode is more efficient
when the input dimensionality is lower than the output dimensionality,
whereas it is the opposite for reverse mode.
input_derivative: An enum indicating whether the user must supply an input
derivative, and if not, what the default value is. See the
possible values of INPUT_DERIVATIVE in this file.
preserve_result: A boolean indicating whether or not the generated gradient
function should also return the output of the original function.
If False, the return signature of the input and output functions will be
> val = func(*args)
> df = grad(func,preserve_result=False)
> gradval = df(*args)
If True,
> val = func(*args)
> df = grad(func,preserve_result=True)
> gradval, val = df(*args)
Note that if taking gradients with respect to multiple arguments,
the primal value will be appended to the return signature. Ex:
> val = func(x,y)
> df = grad(func,wrt=(0,1),preserve_result=True)
> dx,dy,val = df(x,y)
verbose: If 1 the source code of the generated functions will be
output to stdout at various stages of the process for debugging
purposes. If > 1, all intermediate code generation steps will print.
Returns:
df: A function that calculates a derivative (see file-level documentation
above
for the kinds of derivatives available) with respect to arguments
specified in `wrt`, using forward or reverse mode according to `mode`.
If using reverse mode, the gradient is calculated in either split
or joint motion according to the value passed in `motion`. If
`preserve_result` is True, the function will also return the original
result of `func`.
"""
# If the function had the with insert_grad_of statements removed, retrieve them
func = getattr(func, 'tangent', func)
# Generate the derivative
node, namespace = autodiff_tree(func, wrt, motion, mode, preserve_result,
check_dims, verbose)
if mode == 'reverse' and motion == 'joint':
# Pull the stack definition and initial gradient into the function body
# TODO: Use first FunctionDef instead of first element
node.body[0] = _create_joint(node.body[0], func, wrt, input_derivative)
if verbose >= 2:
print('INLINED')
print(quoting.to_source(node))
if mode == 'forward':
node = _create_forward(node)
if optimized:
# Optimize the resulting functions
node = optimization.optimize(node)
node = comments.remove_repeated_comments(node)
if verbose >= 1:
print(quoting.to_source(node))
# Compile and return
module = compile_.compile_file(node, namespace)
if mode == 'forward' or motion == 'joint':
return getattr(module, node.body[0].name)
else:
# Compiling the top-level function in split mode makes no sense, but we use
# it for testing; hence we don't care about the source being readable
forward = getattr(module, node.body[0].name)
backward = getattr(module, node.body[1].name)
# Imported here to avoid circular imports
import tangent
def df(*args, **kwargs):
_stack = tangent.Stack()
init_grad = kwargs.pop('init_grad', 1.0)
forward(_stack, *args, **kwargs)
dx = backward(_stack, init_grad, *args, **kwargs)
if len(dx) == 1:
dx, = dx
return dx
return df | Build the vector-Jacobian or Jacobian-vector product of a function `func`.
For a vector-Jacobian product (reverse-mode autodiff):
This function proceeds by finding the primals and adjoints of all the
functions in the call tree.
For a Jacobian-vector product (forward-mode autodiff):
We first find the primals and tangents of all functions in the call tree.
It then wraps the top level function (i.e. the
one passed as `func`) in a slightly more user-friendly interface. It then
compiles the function and attaches to it the global namespace it needs to
run.
Args:
func: The function to take the gradient of.
wrt: A tuple of argument indices to differentiate with respect to. By
default the derivative is taken with respect to the first argument.
optimized: Whether to optimize the gradient function (`True` by default).
motion: Either 'split' (separate functions for forward and backward pass)
or 'joint' motion (a single combined function). Joint mode is the
default.
mode: Either 'forward' or 'reverse' mode. Forward mode is more efficient
when the input dimensionality is lower than the output dimensionality,
whereas it is the opposite for reverse mode.
input_derivative: An enum indicating whether the user must supply an input
derivative, and if not, what the default value is. See the
possible values of INPUT_DERIVATIVE in this file.
preserve_result: A boolean indicating whether or not the generated gradient
function should also return the output of the original function.
If False, the return signature of the input and output functions will be
> val = func(*args)
> df = grad(func,preserve_result=False)
> gradval = df(*args)
If True,
> val = func(*args)
> df = grad(func,preserve_result=True)
> gradval, val = df(*args)
Note that if taking gradients with respect to multiple arguments,
the primal value will be appended to the return signature. Ex:
> val = func(x,y)
> df = grad(func,wrt=(0,1),preserve_result=True)
> dx,dy,val = df(x,y)
verbose: If 1 the source code of the generated functions will be
output to stdout at various stages of the process for debugging
purposes. If > 1, all intermediate code generation steps will print.
Returns:
df: A function that calculates a derivative (see file-level documentation
above
for the kinds of derivatives available) with respect to arguments
specified in `wrt`, using forward or reverse mode according to `mode`.
If using reverse mode, the gradient is calculated in either split
or joint motion according to the value passed in `motion`. If
`preserve_result` is True, the function will also return the original
result of `func`. |
def stop(logfile, time_format):
"stop tracking for the active project"
def save_and_output(records):
records = server.stop(records)
write(records, logfile, time_format)
def output(r):
print "worked on %s" % colored(r[0], attrs=['bold'])
print " from %s" % colored(
server.date_to_txt(r[1][0], time_format), 'green')
print " to now, %s" % colored(
server.date_to_txt(r[1][1], time_format), 'green')
print " => %s elapsed" % colored(
time_elapsed(r[1][0], r[1][1]), 'red')
output(records[-1])
save_and_output(read(logfile, time_format)) | stop tracking for the active project |
def knx_to_time(knxdata):
"""Converts a KNX time to a tuple of a time object and the day of week"""
if len(knxdata) != 3:
raise KNXException("Can only convert a 3 Byte object to time")
dow = knxdata[0] >> 5
res = time(knxdata[0] & 0x1f, knxdata[1], knxdata[2])
return [res, dow] | Converts a KNX time to a tuple of a time object and the day of week |
def ram_dp_rf(clka, clkb, wea, web, addra, addrb, dia, dib, doa, dob):
''' RAM: Dual-Port, Read-First '''
memL = [Signal(intbv(0)[len(dia):]) for _ in range(2**len(addra))]
@always(clka.posedge)
def writea():
if wea:
memL[int(addra)].next = dia
doa.next = memL[int(addra)]
@always(clkb.posedge)
def writeb():
if web:
memL[int(addrb)].next = dib
dob.next = memL[int(addrb)]
return writea, writeb | RAM: Dual-Port, Read-First |
def get_dependency_graph_for_set(self, id, **kwargs):
"""
Gets dependency graph for a Build Group Record (running and completed).
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.get_dependency_graph_for_set(id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param int id: Build record set id. (required)
:return: BuildRecordPage
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.get_dependency_graph_for_set_with_http_info(id, **kwargs)
else:
(data) = self.get_dependency_graph_for_set_with_http_info(id, **kwargs)
return data | Gets dependency graph for a Build Group Record (running and completed).
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.get_dependency_graph_for_set(id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param int id: Build record set id. (required)
:return: BuildRecordPage
If the method is called asynchronously,
returns the request thread. |
def match_pattern(expr_or_pattern: object, expr: object) -> MatchDict:
"""Recursively match `expr` with the given `expr_or_pattern`
Args:
expr_or_pattern: either a direct expression (equal to `expr` for a
successful match), or an instance of :class:`Pattern`.
expr: the expression to be matched
"""
try: # first try expr_or_pattern as a Pattern
return expr_or_pattern.match(expr)
except AttributeError: # expr_or_pattern is an expr, not a Pattern
if expr_or_pattern == expr:
return MatchDict() # success
else:
res = MatchDict()
res.success = False
res.reason = "Expressions '%s' and '%s' are not the same" % (
repr(expr_or_pattern), repr(expr))
return res | Recursively match `expr` with the given `expr_or_pattern`
Args:
expr_or_pattern: either a direct expression (equal to `expr` for a
successful match), or an instance of :class:`Pattern`.
expr: the expression to be matched |
def dict2dzn(
objs, declare=False, assign=True, declare_enums=True, wrap=True, fout=None
):
"""Serializes the objects in input and produces a list of strings encoding
them into dzn format. Optionally, the produced dzn is written on a file.
Supported types of objects include: ``str``, ``int``, ``float``, ``set``,
``list`` or ``dict``. List and dict are serialized into dzn
(multi-dimensional) arrays. The key-set of a dict is used as index-set of
dzn arrays. The index-set of a list is implicitly set to ``1 .. len(list)``.
Parameters
----------
objs : dict
A dictionary containing the objects to serialize, the keys are the names
of the variables.
declare : bool
Whether to include the declaration of the variable in the statements or
just the assignment. Default is ``False``.
assign : bool
Whether to include assignment of the value in the statements or just the
declaration.
declare_enums : bool
Whether to declare the enums found as types of the objects to serialize.
Default is ``True``.
wrap : bool
Whether to wrap the serialized values.
fout : str
Path to the output file, if None no output file is written.
Returns
-------
list
List of strings containing the dzn-encoded objects.
"""
log = logging.getLogger(__name__)
vals = []
enums = set()
for key, val in objs.items():
if _is_enum(val) and declare_enums:
enum_type = type(val)
enum_name = enum_type.__name__
if enum_name not in enums:
enum_stmt = stmt2enum(
enum_type, declare=declare, assign=assign, wrap=wrap
)
vals.append(enum_stmt)
enums.add(enum_name)
stmt = stmt2dzn(key, val, declare=declare, assign=assign, wrap=wrap)
vals.append(stmt)
if fout:
log.debug('Writing file: {}'.format(fout))
with open(fout, 'w') as f:
for val in vals:
f.write('{}\n\n'.format(val))
return vals | Serializes the objects in input and produces a list of strings encoding
them into dzn format. Optionally, the produced dzn is written on a file.
Supported types of objects include: ``str``, ``int``, ``float``, ``set``,
``list`` or ``dict``. List and dict are serialized into dzn
(multi-dimensional) arrays. The key-set of a dict is used as index-set of
dzn arrays. The index-set of a list is implicitly set to ``1 .. len(list)``.
Parameters
----------
objs : dict
A dictionary containing the objects to serialize, the keys are the names
of the variables.
declare : bool
Whether to include the declaration of the variable in the statements or
just the assignment. Default is ``False``.
assign : bool
Whether to include assignment of the value in the statements or just the
declaration.
declare_enums : bool
Whether to declare the enums found as types of the objects to serialize.
Default is ``True``.
wrap : bool
Whether to wrap the serialized values.
fout : str
Path to the output file, if None no output file is written.
Returns
-------
list
List of strings containing the dzn-encoded objects. |
def add_node(self, payload):
"""
Returns
-------
int
Identifier for the inserted node.
"""
self.nodes.append(Node(len(self.nodes), payload))
return len(self.nodes) - 1 | Returns
-------
int
Identifier for the inserted node. |
def logging_syslog_facility_local(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
logging = ET.SubElement(config, "logging", xmlns="urn:brocade.com:mgmt:brocade-ras")
syslog_facility = ET.SubElement(logging, "syslog-facility")
local = ET.SubElement(syslog_facility, "local")
local.text = kwargs.pop('local')
callback = kwargs.pop('callback', self._callback)
return callback(config) | Auto Generated Code |
def load_default_moderator():
"""
Find a moderator object
"""
if appsettings.FLUENT_COMMENTS_DEFAULT_MODERATOR == 'default':
# Perform spam checks
return moderation.FluentCommentsModerator(None)
elif appsettings.FLUENT_COMMENTS_DEFAULT_MODERATOR == 'deny':
# Deny all comments not from known registered models.
return moderation.AlwaysDeny(None)
elif str(appsettings.FLUENT_COMMENTS_DEFAULT_MODERATOR).lower() == 'none':
# Disables default moderator
return moderation.NullModerator(None)
elif '.' in appsettings.FLUENT_COMMENTS_DEFAULT_MODERATOR:
return import_string(appsettings.FLUENT_COMMENTS_DEFAULT_MODERATOR)(None)
else:
raise ImproperlyConfigured(
"Bad FLUENT_COMMENTS_DEFAULT_MODERATOR value. Provide default/deny/none or a dotted path"
) | Find a moderator object |
def find_or_create_by_name(self, item_name, items_list, item_type):
"""
See if item with item_name exists in item_list.
If not, create that item.
Either way, return an item of type item_type.
"""
item = self.find_by_name(item_name, items_list)
if not item:
item = self.data_lists[item_type][2](item_name, None)
return item | See if item with item_name exists in item_list.
If not, create that item.
Either way, return an item of type item_type. |
def _ensure_set_contains(test_object, required_object, test_set_name=None):
"""
Ensure that the required entries (set or keys of a dict) are present in the test set or keys
of the test dict.
:param set|dict test_object: The test set or dict
:param set|dict required_object: The entries that need to be present in the test set (keys of
input dict if input is dict)
:param str test_set_name: Optional name for the set
:raises ParameterError: If required entry doesnt exist
"""
assert isinstance(test_object, (set, dict)), '%s,%s' % (test_object, test_set_name)
assert isinstance(required_object, (set, dict)), '%s,%s' % (required_object, test_set_name)
# set(dict) = set of keys of the dict
test_set = set(test_object)
required_set = set(required_object)
set_name = ' ' if test_set_name is None else ' entry "%s" of ' % test_set_name
missing_opts = required_set.difference(test_set)
if len(missing_opts) > 0:
raise ParameterError('The following entries are missing in%sthe config file:\n%s'
% (set_name, '\n'.join(missing_opts))) | Ensure that the required entries (set or keys of a dict) are present in the test set or keys
of the test dict.
:param set|dict test_object: The test set or dict
:param set|dict required_object: The entries that need to be present in the test set (keys of
input dict if input is dict)
:param str test_set_name: Optional name for the set
:raises ParameterError: If required entry doesnt exist |
def tvdb_login(api_key):
""" Logs into TVDb using the provided api key
Note: You can register for a free TVDb key at thetvdb.com/?tab=apiregister
Online docs: api.thetvdb.com/swagger#!/Authentication/post_login=
"""
url = "https://api.thetvdb.com/login"
body = {"apikey": api_key}
status, content = _request_json(url, body=body, cache=False)
if status == 401:
raise MapiProviderException("invalid api key")
elif status != 200 or not content.get("token"):
raise MapiNetworkException("TVDb down or unavailable?")
return content["token"] | Logs into TVDb using the provided api key
Note: You can register for a free TVDb key at thetvdb.com/?tab=apiregister
Online docs: api.thetvdb.com/swagger#!/Authentication/post_login= |
def complex_validates(validate_rule):
"""Quickly setup attributes validation by one-time, based on `sqlalchemy.orm.validates`.
Don't like `sqlalchemy.orm.validates`, you don't need create many model method,
as long as pass formatted validate rule.
(Cause of SQLAlchemy's validate mechanism, you need assignment this funciton's return value
to a model property.)
For simplicity, complex_validates don't support `include_removes` and `include_backrefs` parameters
that in `sqlalchemy.orm.validates`.
And we don't recommend you use this function multiple times in one model.
Because this will bring many problems, like:
1. Multiple complex_validates's execute order was decide by it's model property name, and by reversed order.
eg. predicates in `validator1 = complex_validates(...)`
will be executed **AFTER** predicates in `validator2 = complex_validates(...)`
2. If you try to validate the same attribute in two (or more) complex_validates, only one of complex_validates
will be execute. (May be this is a bug of SQLAlchemy?)
`complex_validates` was currently based on `sqlalchemy.orm.validates`, so it is difficult to solve these problems.
May be we can try to use `AttributeEvents` directly in further, to provide more reliable function.
Rule Format
-----------
{
column_name: predicate # basic format
(column_name2, column_name3): predicate # you can specify multiple column_names to given predicates
column_name4: (predicate, predicate2) # you can specify multiple predicates to given column_names
column_name5: [(predicate, arg1, ... argN)] # and you can specify what arguments should pass to predicate
# when it doing validate
(column_name6, column_name7): [(predicate, arg1, ... argN), predicate2] # another example
}
Notice: If you want pass arguments to predicate, you must wrap whole command by another list or tuple.
Otherwise, we will determine the argument as another predicate.
So, this is wrong: { column_name: (predicate, arg) }
this is right: { column_name: [(predicate, arg)] }
Predicate
---------
There's some `predefined_predicates`, you can just reference its name in validate rule.
{column_name: ['trans_upper']}
Or you can pass your own predicate function to the rule, like this:
def custom_predicate(value):
return value_is_legal # return True or False for valid or invalid value
{column_name: [custom_predicate]}
If you want change the value when doing validate, return an `dict(value=new_value)` instead of boolean
{column_name: lambda value: dict(value = value * 2)} # And you see, we can use lambda as a predicate.
And the predicate can receive extra arguments, that passes in rule:
def multiple(value, target_multiple):
return dict(value= value * target_multiple)
{column_name: (multiple, 10)}
Complete Example
----------------
class People(db.Model):
name = Column(String(100))
age = Column(Integer)
IQ = Column(Integer)
has_lover = Column(Boolean)
validator = complex_validates({
'name': [('min_length', 1), ('max_length', 100)],
('age', 'IQ'): [('min', 0)],
'has_lover': lambda value: return !value # hate you!
})"""
ref_dict = {
# column_name: (
# (predicate, arg1, ... argN),
# ...
# )
}
for column_names, predicate_refs in validate_rule.items():
for column_name in _to_tuple(column_names):
ref_dict[column_name] = \
ref_dict.get(column_name, tuple()) + _normalize_predicate_refs(predicate_refs)
return validates(*ref_dict.keys())(
lambda self, name, value: _validate_handler(name, value, ref_dict[name])) | Quickly setup attributes validation by one-time, based on `sqlalchemy.orm.validates`.
Don't like `sqlalchemy.orm.validates`, you don't need create many model method,
as long as pass formatted validate rule.
(Cause of SQLAlchemy's validate mechanism, you need assignment this funciton's return value
to a model property.)
For simplicity, complex_validates don't support `include_removes` and `include_backrefs` parameters
that in `sqlalchemy.orm.validates`.
And we don't recommend you use this function multiple times in one model.
Because this will bring many problems, like:
1. Multiple complex_validates's execute order was decide by it's model property name, and by reversed order.
eg. predicates in `validator1 = complex_validates(...)`
will be executed **AFTER** predicates in `validator2 = complex_validates(...)`
2. If you try to validate the same attribute in two (or more) complex_validates, only one of complex_validates
will be execute. (May be this is a bug of SQLAlchemy?)
`complex_validates` was currently based on `sqlalchemy.orm.validates`, so it is difficult to solve these problems.
May be we can try to use `AttributeEvents` directly in further, to provide more reliable function.
Rule Format
-----------
{
column_name: predicate # basic format
(column_name2, column_name3): predicate # you can specify multiple column_names to given predicates
column_name4: (predicate, predicate2) # you can specify multiple predicates to given column_names
column_name5: [(predicate, arg1, ... argN)] # and you can specify what arguments should pass to predicate
# when it doing validate
(column_name6, column_name7): [(predicate, arg1, ... argN), predicate2] # another example
}
Notice: If you want pass arguments to predicate, you must wrap whole command by another list or tuple.
Otherwise, we will determine the argument as another predicate.
So, this is wrong: { column_name: (predicate, arg) }
this is right: { column_name: [(predicate, arg)] }
Predicate
---------
There's some `predefined_predicates`, you can just reference its name in validate rule.
{column_name: ['trans_upper']}
Or you can pass your own predicate function to the rule, like this:
def custom_predicate(value):
return value_is_legal # return True or False for valid or invalid value
{column_name: [custom_predicate]}
If you want change the value when doing validate, return an `dict(value=new_value)` instead of boolean
{column_name: lambda value: dict(value = value * 2)} # And you see, we can use lambda as a predicate.
And the predicate can receive extra arguments, that passes in rule:
def multiple(value, target_multiple):
return dict(value= value * target_multiple)
{column_name: (multiple, 10)}
Complete Example
----------------
class People(db.Model):
name = Column(String(100))
age = Column(Integer)
IQ = Column(Integer)
has_lover = Column(Boolean)
validator = complex_validates({
'name': [('min_length', 1), ('max_length', 100)],
('age', 'IQ'): [('min', 0)],
'has_lover': lambda value: return !value # hate you!
}) |
def get_members(self, api=None):
"""Retrieve dataset members
:param api: Api instance
:return: Collection object
"""
api = api or self._API
response = api.get(url=self._URL['members'].format(id=self.id))
data = response.json()
total = response.headers['x-total-matching-query']
members = [Member(api=api, **member) for member in data['items']]
links = [Link(**link) for link in data['links']]
href = data['href']
return Collection(
resource=Member,
href=href,
total=total,
items=members,
links=links,
api=api
) | Retrieve dataset members
:param api: Api instance
:return: Collection object |
def K(self, parm):
""" Returns the Gram Matrix
Parameters
----------
parm : np.ndarray
Parameters for the Gram Matrix
Returns
----------
- Gram Matrix (np.ndarray)
"""
return RQ_K_matrix(self.X, parm) + np.identity(self.X.shape[0])*(10**-10) | Returns the Gram Matrix
Parameters
----------
parm : np.ndarray
Parameters for the Gram Matrix
Returns
----------
- Gram Matrix (np.ndarray) |
def delete(self, key, **kwargs):
"""DeleteRange deletes the given range from the key-value store.
A delete request increments the revision of the key-value store and
generates a delete event in the event history for every deleted key.
:param key:
:param kwargs:
:return:
"""
payload = {
"key": _encode(key),
}
payload.update(kwargs)
result = self.post(self.get_url("/kv/deleterange"),
json=payload)
if 'deleted' in result:
return True
return False | DeleteRange deletes the given range from the key-value store.
A delete request increments the revision of the key-value store and
generates a delete event in the event history for every deleted key.
:param key:
:param kwargs:
:return: |
def load_app_resource(**kwargs):
'''
:param kwargs: keyword args for :func:`~dxpy.bindings.search.find_one_data_object`, with the exception of "project"
:raises: :exc:`~dxpy.exceptions.DXError` if "project" is given, if this is called with dxpy.JOB_ID not set, or if "DX_RESOURCES_ID" or "DX_PROJECT_CONTEXT_ID" is not found in the environment variables
:returns: None if no matching object is found; otherwise returns a dxpy object handler for that class of object
Searches for a data object in the app resources container matching the given keyword arguments. If found, the
object will be cloned into the running job's workspace container, and the handler for it will be returned. If the
app resources container ID is not found in DX_RESOURCES_ID, falls back to looking in the current project.
Example::
@dxpy.entry_point('main')
def main(*args, **kwargs):
x = load_app_resource(name="Indexed genome", classname='file')
dxpy.download_dxfile(x)
'''
if 'project' in kwargs:
raise DXError('Unexpected kwarg: "project"')
if dxpy.JOB_ID is None:
raise DXError('Not called by a job')
if 'DX_RESOURCES_ID' not in os.environ and 'DX_PROJECT_CONTEXT_ID' not in os.environ:
raise DXError('App resources container ID could not be found')
kwargs['project'] = os.environ.get('DX_RESOURCES_ID', os.environ.get('DX_PROJECT_CONTEXT_ID'))
kwargs['return_handler'] = True
return find_one_data_object(**kwargs) | :param kwargs: keyword args for :func:`~dxpy.bindings.search.find_one_data_object`, with the exception of "project"
:raises: :exc:`~dxpy.exceptions.DXError` if "project" is given, if this is called with dxpy.JOB_ID not set, or if "DX_RESOURCES_ID" or "DX_PROJECT_CONTEXT_ID" is not found in the environment variables
:returns: None if no matching object is found; otherwise returns a dxpy object handler for that class of object
Searches for a data object in the app resources container matching the given keyword arguments. If found, the
object will be cloned into the running job's workspace container, and the handler for it will be returned. If the
app resources container ID is not found in DX_RESOURCES_ID, falls back to looking in the current project.
Example::
@dxpy.entry_point('main')
def main(*args, **kwargs):
x = load_app_resource(name="Indexed genome", classname='file')
dxpy.download_dxfile(x) |
def heatmap(dm, partition=None, cmap=CM.Blues, fontsize=10):
""" heatmap(dm, partition=None, cmap=CM.Blues, fontsize=10)
Produce a 2D plot of the distance matrix, with values encoded by
coloured cells.
Args:
partition: treeCl.Partition object - if supplied, will reorder
rows and columns of the distance matrix to reflect
the groups defined by the partition
cmap: matplotlib colourmap object - the colour palette to use
fontsize: int or None - sets the size of the locus lab
Returns:
matplotlib plottable object
"""
assert isinstance(dm, DistanceMatrix)
datamax = float(np.abs(dm.values).max())
length = dm.shape[0]
if partition:
sorting = np.array(flatten_list(partition.get_membership()))
new_dm = dm.reorder(dm.df.columns[sorting])
else:
new_dm = dm
fig = plt.figure()
ax = fig.add_subplot(111)
ax.xaxis.tick_top()
ax.grid(False)
tick_positions = np.array(list(range(length))) + 0.5
if fontsize is not None:
ax.set_yticks(tick_positions)
ax.set_xticks(tick_positions)
ax.set_xticklabels(new_dm.df.columns, rotation=90, fontsize=fontsize, ha='center')
ax.set_yticklabels(new_dm.df.index, fontsize=fontsize, va='center')
cbar_ticks_at = [0, 0.5 * datamax, datamax]
cax = ax.imshow(
new_dm.values,
interpolation='nearest',
extent=[0., length, length, 0.],
vmin=0,
vmax=datamax,
cmap=cmap,
)
cbar = fig.colorbar(cax, ticks=cbar_ticks_at, format='%1.2g')
cbar.set_label('Distance')
return fig | heatmap(dm, partition=None, cmap=CM.Blues, fontsize=10)
Produce a 2D plot of the distance matrix, with values encoded by
coloured cells.
Args:
partition: treeCl.Partition object - if supplied, will reorder
rows and columns of the distance matrix to reflect
the groups defined by the partition
cmap: matplotlib colourmap object - the colour palette to use
fontsize: int or None - sets the size of the locus lab
Returns:
matplotlib plottable object |
def apply(self, func, applyto='measurement', noneval=nan, setdata=False):
"""
Apply func either to self or to associated data.
If data is not already parsed, try and read it.
Parameters
----------
func : callable
The function either accepts a measurement object or an FCS object.
Does some calculation and returns the result.
applyto : ['data' | 'measurement']
* 'data' : apply to associated data
* 'measurement' : apply to measurement object itself.
noneval : obj
Value to return if `applyto` is 'data', but no data is available.
setdata : bool
Used only if data is not already set.
If true parsed data will be assigned to self.data
Otherwise data will be discarded at end of apply.
"""
applyto = applyto.lower()
if applyto == 'data':
if self.data is not None:
data = self.data
elif self.datafile is None:
return noneval
else:
data = self.read_data()
if setdata:
self.data = data
return func(data)
elif applyto == 'measurement':
return func(self)
else:
raise ValueError('Encountered unsupported value "%s" for applyto parameter.' % applyto) | Apply func either to self or to associated data.
If data is not already parsed, try and read it.
Parameters
----------
func : callable
The function either accepts a measurement object or an FCS object.
Does some calculation and returns the result.
applyto : ['data' | 'measurement']
* 'data' : apply to associated data
* 'measurement' : apply to measurement object itself.
noneval : obj
Value to return if `applyto` is 'data', but no data is available.
setdata : bool
Used only if data is not already set.
If true parsed data will be assigned to self.data
Otherwise data will be discarded at end of apply. |
def submit_job(job_ini, username, hazard_job_id=None):
"""
Create a job object from the given job.ini file in the job directory
and run it in a new process. Returns the job ID and PID.
"""
job_id = logs.init('job')
oq = engine.job_from_file(
job_ini, job_id, username, hazard_calculation_id=hazard_job_id)
pik = pickle.dumps(oq, protocol=0) # human readable protocol
code = RUNCALC % dict(job_id=job_id, hazard_job_id=hazard_job_id, pik=pik,
username=username)
tmp_py = gettemp(code, suffix='.py')
# print(code, tmp_py) # useful when debugging
devnull = subprocess.DEVNULL
popen = subprocess.Popen([sys.executable, tmp_py],
stdin=devnull, stdout=devnull, stderr=devnull)
threading.Thread(target=popen.wait).start()
logs.dbcmd('update_job', job_id, {'pid': popen.pid})
return job_id, popen.pid | Create a job object from the given job.ini file in the job directory
and run it in a new process. Returns the job ID and PID. |
def dir_df_boot(dir_df, nb=5000, par=False):
"""
Performs a bootstrap for direction DataFrame with optional parametric bootstrap
Parameters
_________
dir_df : Pandas DataFrame with columns:
dir_dec : mean declination
dir_inc : mean inclination
Required for parametric bootstrap
dir_n : number of data points in mean
dir_k : Fisher k statistic for mean
nb : number of bootstraps, default is 5000
par : if True, do a parameteric bootstrap
Returns
_______
BDIs: nested list of bootstrapped mean Dec,Inc pairs
"""
N = dir_df.dir_dec.values.shape[0] # number of data points
BDIs = []
for k in range(nb):
pdir_df = dir_df.sample(n=N, replace=True) # bootstrap pseudosample
pdir_df.reset_index(inplace=True) # reset the index
if par: # do a parametric bootstrap
for i in pdir_df.index: # set through the pseudosample
n = pdir_df.loc[i, 'dir_n'] # get number of samples/site
# get ks for each sample
ks = np.ones(shape=n)*pdir_df.loc[i, 'dir_k']
# draw a fisher distributed set of directions
decs, incs = fshdev(ks)
di_block = np.column_stack((decs, incs))
# rotate them to the mean
di_block = dodirot_V(
di_block, pdir_df.loc[i, 'dir_dec'], pdir_df.loc[i, 'dir_inc'])
# get the new mean direction for the pseudosample
fpars = fisher_mean(di_block)
# replace the pseudo sample mean direction
pdir_df.loc[i, 'dir_dec'] = fpars['dec']
pdir_df.loc[i, 'dir_inc'] = fpars['inc']
# get bootstrap mean bootstrap sample
bfpars = dir_df_fisher_mean(pdir_df)
BDIs.append([bfpars['dec'], bfpars['inc']])
return BDIs | Performs a bootstrap for direction DataFrame with optional parametric bootstrap
Parameters
_________
dir_df : Pandas DataFrame with columns:
dir_dec : mean declination
dir_inc : mean inclination
Required for parametric bootstrap
dir_n : number of data points in mean
dir_k : Fisher k statistic for mean
nb : number of bootstraps, default is 5000
par : if True, do a parameteric bootstrap
Returns
_______
BDIs: nested list of bootstrapped mean Dec,Inc pairs |
def remove_container(self, container, **kwargs):
"""
Identical to :meth:`dockermap.client.base.DockerClientWrapper.remove_container` with additional logging.
"""
self.push_log("Removing container '{0}'.".format(container))
set_raise_on_error(kwargs)
super(DockerFabricClient, self).remove_container(container, **kwargs) | Identical to :meth:`dockermap.client.base.DockerClientWrapper.remove_container` with additional logging. |
def _run_atstart():
'''Hook frameworks must invoke this before running the main hook body.'''
global _atstart
for callback, args, kwargs in _atstart:
callback(*args, **kwargs)
del _atstart[:] | Hook frameworks must invoke this before running the main hook body. |
def _parse_uri(uri):
"""Parse and validate MediaFire URI."""
tokens = urlparse(uri)
if tokens.netloc != '':
logger.error("Invalid URI: %s", uri)
raise ValueError("MediaFire URI format error: "
"host should be empty - mf:///path")
if tokens.scheme != '' and tokens.scheme != URI_SCHEME:
raise ValueError("MediaFire URI format error: "
"must start with 'mf:' or '/'")
return posixpath.normpath(tokens.path) | Parse and validate MediaFire URI. |
def execute(self):
"""
Main method to call to run the worker
"""
self.prepare_models()
self.prepare_worker()
if self.options.print_options:
self.print_options()
self.run() | Main method to call to run the worker |
def get(self, source, media, collection=None, start_date=None, days=None, query=None, years=None, genres=None,
languages=None, countries=None, runtimes=None, ratings=None, certifications=None, networks=None,
status=None, **kwargs):
"""Retrieve calendar items.
The `all` calendar displays info for all shows airing during the specified period. The `my` calendar displays
episodes for all shows that have been watched, collected, or watchlisted.
:param source: Calendar source (`all` or `my`)
:type source: str
:param media: Media type (`dvd`, `movies` or `shows`)
:type media: str
:param collection: Collection type (`new`, `premieres`)
:type collection: str or None
:param start_date: Start date (defaults to today)
:type start_date: datetime or None
:param days: Number of days to display (defaults to `7`)
:type days: int or None
:param query: Search title or description.
:type query: str or None
:param years: Year or range of years (e.g. `2014`, or `2014-2016`)
:type years: int or str or tuple or None
:param genres: Genre slugs (e.g. `action`)
:type genres: str or list of str or None
:param languages: Language codes (e.g. `en`)
:type languages: str or list of str or None
:param countries: Country codes (e.g. `us`)
:type countries: str or list of str or None
:param runtimes: Runtime range in minutes (e.g. `30-90`)
:type runtimes: str or tuple or None
:param ratings: Rating range between `0` and `100` (e.g. `75-100`)
:type ratings: str or tuple or None
:param certifications: US Content Certification (e.g. `pg-13`, `tv-pg`)
:type certifications: str or list of str or None
:param networks: (TV) Network name (e.g. `HBO`)
:type networks: str or list of str or None
:param status: (TV) Show status (e.g. `returning series`, `in production`, ended`)
:type status: str or list of str or None
:return: Items
:rtype: list of trakt.objects.video.Video
"""
if source not in ['all', 'my']:
raise ValueError('Unknown collection type: %s' % (source,))
if media not in ['dvd', 'movies', 'shows']:
raise ValueError('Unknown media type: %s' % (media,))
# Default `start_date` to today when only `days` is provided
if start_date is None and days:
start_date = datetime.utcnow()
# Request calendar collection
response = self.http.get(
'/calendars/%s/%s%s' % (
source, media,
('/' + collection) if collection else ''
),
params=[
start_date.strftime('%Y-%m-%d') if start_date else None,
days
],
query={
'query': query,
'years': years,
'genres': genres,
'languages': languages,
'countries': countries,
'runtimes': runtimes,
'ratings': ratings,
'certifications': certifications,
# TV
'networks': networks,
'status': status
},
**popitems(kwargs, [
'authenticated',
'validate_token'
])
)
# Parse response
items = self.get_data(response, **kwargs)
if isinstance(items, requests.Response):
return items
# Map items
if media == 'shows':
return SummaryMapper.episodes(
self.client, items,
parse_show=True
)
return SummaryMapper.movies(self.client, items) | Retrieve calendar items.
The `all` calendar displays info for all shows airing during the specified period. The `my` calendar displays
episodes for all shows that have been watched, collected, or watchlisted.
:param source: Calendar source (`all` or `my`)
:type source: str
:param media: Media type (`dvd`, `movies` or `shows`)
:type media: str
:param collection: Collection type (`new`, `premieres`)
:type collection: str or None
:param start_date: Start date (defaults to today)
:type start_date: datetime or None
:param days: Number of days to display (defaults to `7`)
:type days: int or None
:param query: Search title or description.
:type query: str or None
:param years: Year or range of years (e.g. `2014`, or `2014-2016`)
:type years: int or str or tuple or None
:param genres: Genre slugs (e.g. `action`)
:type genres: str or list of str or None
:param languages: Language codes (e.g. `en`)
:type languages: str or list of str or None
:param countries: Country codes (e.g. `us`)
:type countries: str or list of str or None
:param runtimes: Runtime range in minutes (e.g. `30-90`)
:type runtimes: str or tuple or None
:param ratings: Rating range between `0` and `100` (e.g. `75-100`)
:type ratings: str or tuple or None
:param certifications: US Content Certification (e.g. `pg-13`, `tv-pg`)
:type certifications: str or list of str or None
:param networks: (TV) Network name (e.g. `HBO`)
:type networks: str or list of str or None
:param status: (TV) Show status (e.g. `returning series`, `in production`, ended`)
:type status: str or list of str or None
:return: Items
:rtype: list of trakt.objects.video.Video |
def get_sub_commands(parser: argparse.ArgumentParser) -> List[str]:
"""Get a list of sub-commands for an ArgumentParser"""
sub_cmds = []
# Check if this is parser has sub-commands
if parser is not None and parser._subparsers is not None:
# Find the _SubParsersAction for the sub-commands of this parser
for action in parser._subparsers._actions:
if isinstance(action, argparse._SubParsersAction):
for sub_cmd, sub_cmd_parser in action.choices.items():
sub_cmds.append(sub_cmd)
# Look for nested sub-commands
for nested_sub_cmd in get_sub_commands(sub_cmd_parser):
sub_cmds.append('{} {}'.format(sub_cmd, nested_sub_cmd))
break
sub_cmds.sort()
return sub_cmds | Get a list of sub-commands for an ArgumentParser |
def pos_by_percent(self, x_percent, y_percent):
"""
Finds a point inside the box that is exactly at the given percentage place.
:param x_percent: how much percentage from left edge
:param y_percent: how much percentage from top edge
:return: A point inside the box
"""
x = round(x_percent * self.width)
y = round(y_percent * self.height)
return int(x), int(y) | Finds a point inside the box that is exactly at the given percentage place.
:param x_percent: how much percentage from left edge
:param y_percent: how much percentage from top edge
:return: A point inside the box |
def _retf(ins):
""" Returns from a procedure / function a Floating Point (40bits) value
"""
output = _float_oper(ins.quad[1])
output.append('#pragma opt require a,bc,de')
output.append('jp %s' % str(ins.quad[2]))
return output | Returns from a procedure / function a Floating Point (40bits) value |
def map_single_end(credentials, instance_config, instance_name,
script_dir, index_dir, fastq_file, output_dir,
num_threads=None, seed_start_lmax=None,
mismatch_nmax=None, multimap_nmax=None,
splice_min_overhang=None,
out_mult_nmax=None, sort_bam=True, keep_unmapped=False,
self_destruct=True, compressed=True,
**kwargs):
"""Maps single-end reads using STAR.
Reads are expected in FASTQ format. By default, they are also expected to
be compressed with gzip.
- recommended machine type: "n1-standard-16" (60 GB of RAM, 16 vCPUs).
- recommended disk size: depends on size of FASTQ files, at least 128 GB.
TODO: docstring"""
if sort_bam:
out_sam_type = 'BAM SortedByCoordinate'
else:
out_sam_type = 'BAM Unsorted'
# template expects a list of FASTQ files
fastq_files = fastq_file
if isinstance(fastq_files, (str, _oldstr)):
fastq_files = [fastq_file]
template = _TEMPLATE_ENV.get_template(
os.path.join('map_single-end.sh'))
startup_script = template.render(
script_dir=script_dir,
index_dir=index_dir,
fastq_files=fastq_files,
output_dir=output_dir,
num_threads=num_threads,
seed_start_lmax=seed_start_lmax,
self_destruct=self_destruct,
mismatch_nmax=mismatch_nmax,
multimap_nmax=multimap_nmax,
splice_min_overhang=splice_min_overhang,
out_mult_nmax=out_mult_nmax,
keep_unmapped=keep_unmapped,
compressed=compressed,
out_sam_type=out_sam_type)
if len(startup_script) > 32768:
raise ValueError('Startup script larger than 32,768 bytes!')
#print(startup_script)
op_name = instance_config.create_instance(
credentials, instance_name, startup_script=startup_script, **kwargs)
return op_name | Maps single-end reads using STAR.
Reads are expected in FASTQ format. By default, they are also expected to
be compressed with gzip.
- recommended machine type: "n1-standard-16" (60 GB of RAM, 16 vCPUs).
- recommended disk size: depends on size of FASTQ files, at least 128 GB.
TODO: docstring |
def create_hit(MaxAssignments=None, AutoApprovalDelayInSeconds=None, LifetimeInSeconds=None, AssignmentDurationInSeconds=None, Reward=None, Title=None, Keywords=None, Description=None, Question=None, RequesterAnnotation=None, QualificationRequirements=None, UniqueRequestToken=None, AssignmentReviewPolicy=None, HITReviewPolicy=None, HITLayoutId=None, HITLayoutParameters=None):
"""
The CreateHIT operation creates a new Human Intelligence Task (HIT). The new HIT is made available for Workers to find and accept on the Amazon Mechanical Turk website.
This operation allows you to specify a new HIT by passing in values for the properties of the HIT, such as its title, reward amount and number of assignments. When you pass these values to CreateHIT , a new HIT is created for you, with a new HITTypeID . The HITTypeID can be used to create additional HITs in the future without needing to specify common parameters such as the title, description and reward amount each time.
An alternative way to create HITs is to first generate a HITTypeID using the CreateHITType operation and then call the CreateHITWithHITType operation. This is the recommended best practice for Requesters who are creating large numbers of HITs.
CreateHIT also supports several ways to provide question data: by providing a value for the Question parameter that fully specifies the contents of the HIT, or by providing a HitLayoutId and associated HitLayoutParameters .
See also: AWS API Documentation
:example: response = client.create_hit(
MaxAssignments=123,
AutoApprovalDelayInSeconds=123,
LifetimeInSeconds=123,
AssignmentDurationInSeconds=123,
Reward='string',
Title='string',
Keywords='string',
Description='string',
Question='string',
RequesterAnnotation='string',
QualificationRequirements=[
{
'QualificationTypeId': 'string',
'Comparator': 'LessThan'|'LessThanOrEqualTo'|'GreaterThan'|'GreaterThanOrEqualTo'|'EqualTo'|'NotEqualTo'|'Exists'|'DoesNotExist'|'In'|'NotIn',
'IntegerValues': [
123,
],
'LocaleValues': [
{
'Country': 'string',
'Subdivision': 'string'
},
],
'RequiredToPreview': True|False
},
],
UniqueRequestToken='string',
AssignmentReviewPolicy={
'PolicyName': 'string',
'Parameters': [
{
'Key': 'string',
'Values': [
'string',
],
'MapEntries': [
{
'Key': 'string',
'Values': [
'string',
]
},
]
},
]
},
HITReviewPolicy={
'PolicyName': 'string',
'Parameters': [
{
'Key': 'string',
'Values': [
'string',
],
'MapEntries': [
{
'Key': 'string',
'Values': [
'string',
]
},
]
},
]
},
HITLayoutId='string',
HITLayoutParameters=[
{
'Name': 'string',
'Value': 'string'
},
]
)
:type MaxAssignments: integer
:param MaxAssignments: The number of times the HIT can be accepted and completed before the HIT becomes unavailable.
:type AutoApprovalDelayInSeconds: integer
:param AutoApprovalDelayInSeconds: The number of seconds after an assignment for the HIT has been submitted, after which the assignment is considered Approved automatically unless the Requester explicitly rejects it.
:type LifetimeInSeconds: integer
:param LifetimeInSeconds: [REQUIRED]
An amount of time, in seconds, after which the HIT is no longer available for users to accept. After the lifetime of the HIT elapses, the HIT no longer appears in HIT searches, even if not all of the assignments for the HIT have been accepted.
:type AssignmentDurationInSeconds: integer
:param AssignmentDurationInSeconds: [REQUIRED]
The amount of time, in seconds, that a Worker has to complete the HIT after accepting it. If a Worker does not complete the assignment within the specified duration, the assignment is considered abandoned. If the HIT is still active (that is, its lifetime has not elapsed), the assignment becomes available for other users to find and accept.
:type Reward: string
:param Reward: [REQUIRED]
The amount of money the Requester will pay a Worker for successfully completing the HIT.
:type Title: string
:param Title: [REQUIRED]
The title of the HIT. A title should be short and descriptive about the kind of task the HIT contains. On the Amazon Mechanical Turk web site, the HIT title appears in search results, and everywhere the HIT is mentioned.
:type Keywords: string
:param Keywords: One or more words or phrases that describe the HIT, separated by commas. These words are used in searches to find HITs.
:type Description: string
:param Description: [REQUIRED]
A general description of the HIT. A description includes detailed information about the kind of task the HIT contains. On the Amazon Mechanical Turk web site, the HIT description appears in the expanded view of search results, and in the HIT and assignment screens. A good description gives the user enough information to evaluate the HIT before accepting it.
:type Question: string
:param Question: The data the person completing the HIT uses to produce the results.
Constraints: Must be a QuestionForm data structure, an ExternalQuestion data structure, or an HTMLQuestion data structure. The XML question data must not be larger than 64 kilobytes (65,535 bytes) in size, including whitespace.
Either a Question parameter or a HITLayoutId parameter must be provided.
:type RequesterAnnotation: string
:param RequesterAnnotation: An arbitrary data field. The RequesterAnnotation parameter lets your application attach arbitrary data to the HIT for tracking purposes. For example, this parameter could be an identifier internal to the Requester's application that corresponds with the HIT.
The RequesterAnnotation parameter for a HIT is only visible to the Requester who created the HIT. It is not shown to the Worker, or any other Requester.
The RequesterAnnotation parameter may be different for each HIT you submit. It does not affect how your HITs are grouped.
:type QualificationRequirements: list
:param QualificationRequirements: A condition that a Worker's Qualifications must meet before the Worker is allowed to accept and complete the HIT.
(dict) --The QualificationRequirement data structure describes a Qualification that a Worker must have before the Worker is allowed to accept a HIT. A requirement may optionally state that a Worker must have the Qualification in order to preview the HIT.
QualificationTypeId (string) -- [REQUIRED]The ID of the Qualification type for the requirement.
Comparator (string) -- [REQUIRED]The kind of comparison to make against a Qualification's value. You can compare a Qualification's value to an IntegerValue to see if it is LessThan, LessThanOrEqualTo, GreaterThan, GreaterThanOrEqualTo, EqualTo, or NotEqualTo the IntegerValue. You can compare it to a LocaleValue to see if it is EqualTo, or NotEqualTo the LocaleValue. You can check to see if the value is In or NotIn a set of IntegerValue or LocaleValue values. Lastly, a Qualification requirement can also test if a Qualification Exists or DoesNotExist in the user's profile, regardless of its value.
IntegerValues (list) --The integer value to compare against the Qualification's value. IntegerValue must not be present if Comparator is Exists or DoesNotExist. IntegerValue can only be used if the Qualification type has an integer value; it cannot be used with the Worker_Locale QualificationType ID. When performing a set comparison by using the In or the NotIn comparator, you can use up to 15 IntegerValue elements in a QualificationRequirement data structure.
(integer) --
LocaleValues (list) --The locale value to compare against the Qualification's value. The local value must be a valid ISO 3166 country code or supports ISO 3166-2 subdivisions. LocaleValue can only be used with a Worker_Locale QualificationType ID. LocaleValue can only be used with the EqualTo, NotEqualTo, In, and NotIn comparators. You must only use a single LocaleValue element when using the EqualTo or NotEqualTo comparators. When performing a set comparison by using the In or the NotIn comparator, you can use up to 30 LocaleValue elements in a QualificationRequirement data structure.
(dict) --The Locale data structure represents a geographical region or location.
Country (string) -- [REQUIRED]The country of the locale. Must be a valid ISO 3166 country code. For example, the code US refers to the United States of America.
Subdivision (string) --The state or subdivision of the locale. A valid ISO 3166-2 subdivision code. For example, the code WA refers to the state of Washington.
RequiredToPreview (boolean) --If true, the question data for the HIT will not be shown when a Worker whose Qualifications do not meet this requirement tries to preview the HIT. That is, a Worker's Qualifications must meet all of the requirements for which RequiredToPreview is true in order to preview the HIT. If a Worker meets all of the requirements where RequiredToPreview is true (or if there are no such requirements), but does not meet all of the requirements for the HIT, the Worker will be allowed to preview the HIT's question data, but will not be allowed to accept and complete the HIT. The default is false.
:type UniqueRequestToken: string
:param UniqueRequestToken: A unique identifier for this request which allows you to retry the call on error without creating duplicate HITs. This is useful in cases such as network timeouts where it is unclear whether or not the call succeeded on the server. If the HIT already exists in the system from a previous call using the same UniqueRequestToken, subsequent calls will return a AWS.MechanicalTurk.HitAlreadyExists error with a message containing the HITId.
Note
Note: It is your responsibility to ensure uniqueness of the token. The unique token expires after 24 hours. Subsequent calls using the same UniqueRequestToken made after the 24 hour limit could create duplicate HITs.
:type AssignmentReviewPolicy: dict
:param AssignmentReviewPolicy: The Assignment-level Review Policy applies to the assignments under the HIT. You can specify for Mechanical Turk to take various actions based on the policy.
PolicyName (string) --Name of a Review Policy: SimplePlurality/2011-09-01 or ScoreMyKnownAnswers/2011-09-01
Parameters (list) --Name of the parameter from the Review policy.
(dict) --Name of the parameter from the Review policy.
Key (string) --Name of the parameter from the list of Review Polices.
Values (list) --The list of values of the Parameter
(string) --
MapEntries (list) --List of ParameterMapEntry objects.
(dict) --This data structure is the data type for the AnswerKey parameter of the ScoreMyKnownAnswers/2011-09-01 Review Policy.
Key (string) --The QuestionID from the HIT that is used to identify which question requires Mechanical Turk to score as part of the ScoreMyKnownAnswers/2011-09-01 Review Policy.
Values (list) --The list of answers to the question specified in the MapEntry Key element. The Worker must match all values in order for the answer to be scored correctly.
(string) --
:type HITReviewPolicy: dict
:param HITReviewPolicy: The HIT-level Review Policy applies to the HIT. You can specify for Mechanical Turk to take various actions based on the policy.
PolicyName (string) --Name of a Review Policy: SimplePlurality/2011-09-01 or ScoreMyKnownAnswers/2011-09-01
Parameters (list) --Name of the parameter from the Review policy.
(dict) --Name of the parameter from the Review policy.
Key (string) --Name of the parameter from the list of Review Polices.
Values (list) --The list of values of the Parameter
(string) --
MapEntries (list) --List of ParameterMapEntry objects.
(dict) --This data structure is the data type for the AnswerKey parameter of the ScoreMyKnownAnswers/2011-09-01 Review Policy.
Key (string) --The QuestionID from the HIT that is used to identify which question requires Mechanical Turk to score as part of the ScoreMyKnownAnswers/2011-09-01 Review Policy.
Values (list) --The list of answers to the question specified in the MapEntry Key element. The Worker must match all values in order for the answer to be scored correctly.
(string) --
:type HITLayoutId: string
:param HITLayoutId: The HITLayoutId allows you to use a pre-existing HIT design with placeholder values and create an additional HIT by providing those values as HITLayoutParameters.
Constraints: Either a Question parameter or a HITLayoutId parameter must be provided.
:type HITLayoutParameters: list
:param HITLayoutParameters: If the HITLayoutId is provided, any placeholder values must be filled in with values using the HITLayoutParameter structure. For more information, see HITLayout.
(dict) --The HITLayoutParameter data structure defines parameter values used with a HITLayout. A HITLayout is a reusable Amazon Mechanical Turk project template used to provide Human Intelligence Task (HIT) question data for CreateHIT.
Name (string) --The name of the parameter in the HITLayout.
Value (string) --The value substituted for the parameter referenced in the HITLayout.
:rtype: dict
:return: {
'HIT': {
'HITId': 'string',
'HITTypeId': 'string',
'HITGroupId': 'string',
'HITLayoutId': 'string',
'CreationTime': datetime(2015, 1, 1),
'Title': 'string',
'Description': 'string',
'Question': 'string',
'Keywords': 'string',
'HITStatus': 'Assignable'|'Unassignable'|'Reviewable'|'Reviewing'|'Disposed',
'MaxAssignments': 123,
'Reward': 'string',
'AutoApprovalDelayInSeconds': 123,
'Expiration': datetime(2015, 1, 1),
'AssignmentDurationInSeconds': 123,
'RequesterAnnotation': 'string',
'QualificationRequirements': [
{
'QualificationTypeId': 'string',
'Comparator': 'LessThan'|'LessThanOrEqualTo'|'GreaterThan'|'GreaterThanOrEqualTo'|'EqualTo'|'NotEqualTo'|'Exists'|'DoesNotExist'|'In'|'NotIn',
'IntegerValues': [
123,
],
'LocaleValues': [
{
'Country': 'string',
'Subdivision': 'string'
},
],
'RequiredToPreview': True|False
},
],
'HITReviewStatus': 'NotReviewed'|'MarkedForReview'|'ReviewedAppropriate'|'ReviewedInappropriate',
'NumberOfAssignmentsPending': 123,
'NumberOfAssignmentsAvailable': 123,
'NumberOfAssignmentsCompleted': 123
}
}
:returns:
(integer) --
"""
pass | The CreateHIT operation creates a new Human Intelligence Task (HIT). The new HIT is made available for Workers to find and accept on the Amazon Mechanical Turk website.
This operation allows you to specify a new HIT by passing in values for the properties of the HIT, such as its title, reward amount and number of assignments. When you pass these values to CreateHIT , a new HIT is created for you, with a new HITTypeID . The HITTypeID can be used to create additional HITs in the future without needing to specify common parameters such as the title, description and reward amount each time.
An alternative way to create HITs is to first generate a HITTypeID using the CreateHITType operation and then call the CreateHITWithHITType operation. This is the recommended best practice for Requesters who are creating large numbers of HITs.
CreateHIT also supports several ways to provide question data: by providing a value for the Question parameter that fully specifies the contents of the HIT, or by providing a HitLayoutId and associated HitLayoutParameters .
See also: AWS API Documentation
:example: response = client.create_hit(
MaxAssignments=123,
AutoApprovalDelayInSeconds=123,
LifetimeInSeconds=123,
AssignmentDurationInSeconds=123,
Reward='string',
Title='string',
Keywords='string',
Description='string',
Question='string',
RequesterAnnotation='string',
QualificationRequirements=[
{
'QualificationTypeId': 'string',
'Comparator': 'LessThan'|'LessThanOrEqualTo'|'GreaterThan'|'GreaterThanOrEqualTo'|'EqualTo'|'NotEqualTo'|'Exists'|'DoesNotExist'|'In'|'NotIn',
'IntegerValues': [
123,
],
'LocaleValues': [
{
'Country': 'string',
'Subdivision': 'string'
},
],
'RequiredToPreview': True|False
},
],
UniqueRequestToken='string',
AssignmentReviewPolicy={
'PolicyName': 'string',
'Parameters': [
{
'Key': 'string',
'Values': [
'string',
],
'MapEntries': [
{
'Key': 'string',
'Values': [
'string',
]
},
]
},
]
},
HITReviewPolicy={
'PolicyName': 'string',
'Parameters': [
{
'Key': 'string',
'Values': [
'string',
],
'MapEntries': [
{
'Key': 'string',
'Values': [
'string',
]
},
]
},
]
},
HITLayoutId='string',
HITLayoutParameters=[
{
'Name': 'string',
'Value': 'string'
},
]
)
:type MaxAssignments: integer
:param MaxAssignments: The number of times the HIT can be accepted and completed before the HIT becomes unavailable.
:type AutoApprovalDelayInSeconds: integer
:param AutoApprovalDelayInSeconds: The number of seconds after an assignment for the HIT has been submitted, after which the assignment is considered Approved automatically unless the Requester explicitly rejects it.
:type LifetimeInSeconds: integer
:param LifetimeInSeconds: [REQUIRED]
An amount of time, in seconds, after which the HIT is no longer available for users to accept. After the lifetime of the HIT elapses, the HIT no longer appears in HIT searches, even if not all of the assignments for the HIT have been accepted.
:type AssignmentDurationInSeconds: integer
:param AssignmentDurationInSeconds: [REQUIRED]
The amount of time, in seconds, that a Worker has to complete the HIT after accepting it. If a Worker does not complete the assignment within the specified duration, the assignment is considered abandoned. If the HIT is still active (that is, its lifetime has not elapsed), the assignment becomes available for other users to find and accept.
:type Reward: string
:param Reward: [REQUIRED]
The amount of money the Requester will pay a Worker for successfully completing the HIT.
:type Title: string
:param Title: [REQUIRED]
The title of the HIT. A title should be short and descriptive about the kind of task the HIT contains. On the Amazon Mechanical Turk web site, the HIT title appears in search results, and everywhere the HIT is mentioned.
:type Keywords: string
:param Keywords: One or more words or phrases that describe the HIT, separated by commas. These words are used in searches to find HITs.
:type Description: string
:param Description: [REQUIRED]
A general description of the HIT. A description includes detailed information about the kind of task the HIT contains. On the Amazon Mechanical Turk web site, the HIT description appears in the expanded view of search results, and in the HIT and assignment screens. A good description gives the user enough information to evaluate the HIT before accepting it.
:type Question: string
:param Question: The data the person completing the HIT uses to produce the results.
Constraints: Must be a QuestionForm data structure, an ExternalQuestion data structure, or an HTMLQuestion data structure. The XML question data must not be larger than 64 kilobytes (65,535 bytes) in size, including whitespace.
Either a Question parameter or a HITLayoutId parameter must be provided.
:type RequesterAnnotation: string
:param RequesterAnnotation: An arbitrary data field. The RequesterAnnotation parameter lets your application attach arbitrary data to the HIT for tracking purposes. For example, this parameter could be an identifier internal to the Requester's application that corresponds with the HIT.
The RequesterAnnotation parameter for a HIT is only visible to the Requester who created the HIT. It is not shown to the Worker, or any other Requester.
The RequesterAnnotation parameter may be different for each HIT you submit. It does not affect how your HITs are grouped.
:type QualificationRequirements: list
:param QualificationRequirements: A condition that a Worker's Qualifications must meet before the Worker is allowed to accept and complete the HIT.
(dict) --The QualificationRequirement data structure describes a Qualification that a Worker must have before the Worker is allowed to accept a HIT. A requirement may optionally state that a Worker must have the Qualification in order to preview the HIT.
QualificationTypeId (string) -- [REQUIRED]The ID of the Qualification type for the requirement.
Comparator (string) -- [REQUIRED]The kind of comparison to make against a Qualification's value. You can compare a Qualification's value to an IntegerValue to see if it is LessThan, LessThanOrEqualTo, GreaterThan, GreaterThanOrEqualTo, EqualTo, or NotEqualTo the IntegerValue. You can compare it to a LocaleValue to see if it is EqualTo, or NotEqualTo the LocaleValue. You can check to see if the value is In or NotIn a set of IntegerValue or LocaleValue values. Lastly, a Qualification requirement can also test if a Qualification Exists or DoesNotExist in the user's profile, regardless of its value.
IntegerValues (list) --The integer value to compare against the Qualification's value. IntegerValue must not be present if Comparator is Exists or DoesNotExist. IntegerValue can only be used if the Qualification type has an integer value; it cannot be used with the Worker_Locale QualificationType ID. When performing a set comparison by using the In or the NotIn comparator, you can use up to 15 IntegerValue elements in a QualificationRequirement data structure.
(integer) --
LocaleValues (list) --The locale value to compare against the Qualification's value. The local value must be a valid ISO 3166 country code or supports ISO 3166-2 subdivisions. LocaleValue can only be used with a Worker_Locale QualificationType ID. LocaleValue can only be used with the EqualTo, NotEqualTo, In, and NotIn comparators. You must only use a single LocaleValue element when using the EqualTo or NotEqualTo comparators. When performing a set comparison by using the In or the NotIn comparator, you can use up to 30 LocaleValue elements in a QualificationRequirement data structure.
(dict) --The Locale data structure represents a geographical region or location.
Country (string) -- [REQUIRED]The country of the locale. Must be a valid ISO 3166 country code. For example, the code US refers to the United States of America.
Subdivision (string) --The state or subdivision of the locale. A valid ISO 3166-2 subdivision code. For example, the code WA refers to the state of Washington.
RequiredToPreview (boolean) --If true, the question data for the HIT will not be shown when a Worker whose Qualifications do not meet this requirement tries to preview the HIT. That is, a Worker's Qualifications must meet all of the requirements for which RequiredToPreview is true in order to preview the HIT. If a Worker meets all of the requirements where RequiredToPreview is true (or if there are no such requirements), but does not meet all of the requirements for the HIT, the Worker will be allowed to preview the HIT's question data, but will not be allowed to accept and complete the HIT. The default is false.
:type UniqueRequestToken: string
:param UniqueRequestToken: A unique identifier for this request which allows you to retry the call on error without creating duplicate HITs. This is useful in cases such as network timeouts where it is unclear whether or not the call succeeded on the server. If the HIT already exists in the system from a previous call using the same UniqueRequestToken, subsequent calls will return a AWS.MechanicalTurk.HitAlreadyExists error with a message containing the HITId.
Note
Note: It is your responsibility to ensure uniqueness of the token. The unique token expires after 24 hours. Subsequent calls using the same UniqueRequestToken made after the 24 hour limit could create duplicate HITs.
:type AssignmentReviewPolicy: dict
:param AssignmentReviewPolicy: The Assignment-level Review Policy applies to the assignments under the HIT. You can specify for Mechanical Turk to take various actions based on the policy.
PolicyName (string) --Name of a Review Policy: SimplePlurality/2011-09-01 or ScoreMyKnownAnswers/2011-09-01
Parameters (list) --Name of the parameter from the Review policy.
(dict) --Name of the parameter from the Review policy.
Key (string) --Name of the parameter from the list of Review Polices.
Values (list) --The list of values of the Parameter
(string) --
MapEntries (list) --List of ParameterMapEntry objects.
(dict) --This data structure is the data type for the AnswerKey parameter of the ScoreMyKnownAnswers/2011-09-01 Review Policy.
Key (string) --The QuestionID from the HIT that is used to identify which question requires Mechanical Turk to score as part of the ScoreMyKnownAnswers/2011-09-01 Review Policy.
Values (list) --The list of answers to the question specified in the MapEntry Key element. The Worker must match all values in order for the answer to be scored correctly.
(string) --
:type HITReviewPolicy: dict
:param HITReviewPolicy: The HIT-level Review Policy applies to the HIT. You can specify for Mechanical Turk to take various actions based on the policy.
PolicyName (string) --Name of a Review Policy: SimplePlurality/2011-09-01 or ScoreMyKnownAnswers/2011-09-01
Parameters (list) --Name of the parameter from the Review policy.
(dict) --Name of the parameter from the Review policy.
Key (string) --Name of the parameter from the list of Review Polices.
Values (list) --The list of values of the Parameter
(string) --
MapEntries (list) --List of ParameterMapEntry objects.
(dict) --This data structure is the data type for the AnswerKey parameter of the ScoreMyKnownAnswers/2011-09-01 Review Policy.
Key (string) --The QuestionID from the HIT that is used to identify which question requires Mechanical Turk to score as part of the ScoreMyKnownAnswers/2011-09-01 Review Policy.
Values (list) --The list of answers to the question specified in the MapEntry Key element. The Worker must match all values in order for the answer to be scored correctly.
(string) --
:type HITLayoutId: string
:param HITLayoutId: The HITLayoutId allows you to use a pre-existing HIT design with placeholder values and create an additional HIT by providing those values as HITLayoutParameters.
Constraints: Either a Question parameter or a HITLayoutId parameter must be provided.
:type HITLayoutParameters: list
:param HITLayoutParameters: If the HITLayoutId is provided, any placeholder values must be filled in with values using the HITLayoutParameter structure. For more information, see HITLayout.
(dict) --The HITLayoutParameter data structure defines parameter values used with a HITLayout. A HITLayout is a reusable Amazon Mechanical Turk project template used to provide Human Intelligence Task (HIT) question data for CreateHIT.
Name (string) --The name of the parameter in the HITLayout.
Value (string) --The value substituted for the parameter referenced in the HITLayout.
:rtype: dict
:return: {
'HIT': {
'HITId': 'string',
'HITTypeId': 'string',
'HITGroupId': 'string',
'HITLayoutId': 'string',
'CreationTime': datetime(2015, 1, 1),
'Title': 'string',
'Description': 'string',
'Question': 'string',
'Keywords': 'string',
'HITStatus': 'Assignable'|'Unassignable'|'Reviewable'|'Reviewing'|'Disposed',
'MaxAssignments': 123,
'Reward': 'string',
'AutoApprovalDelayInSeconds': 123,
'Expiration': datetime(2015, 1, 1),
'AssignmentDurationInSeconds': 123,
'RequesterAnnotation': 'string',
'QualificationRequirements': [
{
'QualificationTypeId': 'string',
'Comparator': 'LessThan'|'LessThanOrEqualTo'|'GreaterThan'|'GreaterThanOrEqualTo'|'EqualTo'|'NotEqualTo'|'Exists'|'DoesNotExist'|'In'|'NotIn',
'IntegerValues': [
123,
],
'LocaleValues': [
{
'Country': 'string',
'Subdivision': 'string'
},
],
'RequiredToPreview': True|False
},
],
'HITReviewStatus': 'NotReviewed'|'MarkedForReview'|'ReviewedAppropriate'|'ReviewedInappropriate',
'NumberOfAssignmentsPending': 123,
'NumberOfAssignmentsAvailable': 123,
'NumberOfAssignmentsCompleted': 123
}
}
:returns:
(integer) -- |
def handle_hooks(stage, hooks, provider, context, dump, outline):
"""Handle pre/post hooks.
Args:
stage (str): The name of the hook stage - pre_build/post_build.
hooks (list): A list of dictionaries containing the hooks to execute.
provider (:class:`stacker.provider.base.BaseProvider`): The provider
the current stack is using.
context (:class:`stacker.context.Context`): The current stacker
context.
dump (bool): Whether running with dump set or not.
outline (bool): Whether running with outline set or not.
"""
if not outline and not dump and hooks:
utils.handle_hooks(
stage=stage,
hooks=hooks,
provider=provider,
context=context
) | Handle pre/post hooks.
Args:
stage (str): The name of the hook stage - pre_build/post_build.
hooks (list): A list of dictionaries containing the hooks to execute.
provider (:class:`stacker.provider.base.BaseProvider`): The provider
the current stack is using.
context (:class:`stacker.context.Context`): The current stacker
context.
dump (bool): Whether running with dump set or not.
outline (bool): Whether running with outline set or not. |
def isdir(self, path=None, client_kwargs=None, virtual_dir=True,
assume_exists=None):
"""
Return True if path is an existing directory.
Args:
path (str): Path or URL.
client_kwargs (dict): Client arguments.
virtual_dir (bool): If True, checks if directory exists virtually
if an object path if not exists as a specific object.
assume_exists (bool or None): This value define the value to return
in the case there is no enough permission to determinate the
existing status of the file. If set to None, the permission
exception is reraised (Default behavior). if set to True or
False, return this value.
Returns:
bool: True if directory exists.
"""
relative = self.relpath(path)
if not relative:
# Root always exists and is a directory
return True
if path[-1] == '/' or self.is_locator(relative, relative=True):
exists = self.exists(path=path, client_kwargs=client_kwargs,
assume_exists=assume_exists)
if exists:
return True
# Some directories only exists virtually in object path and don't
# have headers.
elif virtual_dir:
try:
next(self.list_objects(relative, relative=True,
max_request_entries=1))
return True
except (StopIteration, ObjectNotFoundError,
UnsupportedOperation):
return False
return False | Return True if path is an existing directory.
Args:
path (str): Path or URL.
client_kwargs (dict): Client arguments.
virtual_dir (bool): If True, checks if directory exists virtually
if an object path if not exists as a specific object.
assume_exists (bool or None): This value define the value to return
in the case there is no enough permission to determinate the
existing status of the file. If set to None, the permission
exception is reraised (Default behavior). if set to True or
False, return this value.
Returns:
bool: True if directory exists. |
def main(arguments=None):
"""Main command line entry point."""
if not arguments:
arguments = sys.argv[1:]
wordlist, sowpods, by_length, start, end = argument_parser(arguments)
for word in wordlist:
pretty_print(
word,
anagrams_in_word(word, sowpods, start, end),
by_length,
) | Main command line entry point. |
def handle_call(self, body, message):
"""Handle call message."""
try:
r = self._DISPATCH(body, ticket=message.properties['reply_to'])
except self.Next:
# don't reply, delegate to other agents.
pass
else:
self.reply(message, r) | Handle call message. |
def subnet_group_exists(name, tags=None, region=None, key=None, keyid=None,
profile=None):
'''
Check to see if an RDS subnet group exists.
CLI example::
salt myminion boto_rds.subnet_group_exists my-param-group \
region=us-east-1
'''
try:
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
if not conn:
return {'exists': bool(conn)}
rds = conn.describe_db_subnet_groups(DBSubnetGroupName=name)
return {'exists': bool(rds)}
except ClientError as e:
if "DBSubnetGroupNotFoundFault" in e.message:
return {'exists': False}
else:
return {'error': __utils__['boto3.get_error'](e)} | Check to see if an RDS subnet group exists.
CLI example::
salt myminion boto_rds.subnet_group_exists my-param-group \
region=us-east-1 |
def progress_view(shell):
""" updates the view """
while not ShellProgressView.done:
_, col = get_window_dim()
col = int(col)
progress = get_progress_message()
if '\n' in progress:
prog_list = progress.split('\n')
prog_val = len(prog_list[-1])
else:
prog_val = len(progress)
buffer_size = col - prog_val - 4
if ShellProgressView.progress_bar:
doc = u'{}:{}'.format(progress, ShellProgressView.progress_bar)
shell.spin_val = -1
counter = 0
ShellProgressView.heart_bar = ''
else:
if progress and not ShellProgressView.done:
heart_bar = ShellProgressView.heart_bar
if shell.spin_val >= 0:
beat = ShellProgressView.heart_beat_values[_get_heart_frequency()]
heart_bar += beat
heart_bar = heart_bar[len(beat):]
len_beat = len(heart_bar)
if len_beat > buffer_size:
heart_bar = heart_bar[len_beat - buffer_size:]
while len(heart_bar) < buffer_size:
beat = ShellProgressView.heart_beat_values[_get_heart_frequency()]
heart_bar += beat
else:
shell.spin_val = 0
counter = 0
while counter < buffer_size:
beat = ShellProgressView.heart_beat_values[_get_heart_frequency()]
heart_bar += beat
counter += len(beat)
ShellProgressView.heart_bar = heart_bar
doc = u'{}:{}'.format(progress, ShellProgressView.heart_bar)
shell.cli.buffers['progress'].reset(
initial_document=Document(doc))
shell.cli.request_redraw()
sleep(shell.intermediate_sleep)
ShellProgressView.done = False
ShellProgressView.progress_bar = ''
shell.spin_val = -1
sleep(shell.final_sleep)
return True | updates the view |
def set_climate_hold(self, index, climate, hold_type="nextTransition"):
''' Set a climate hold - ie away, home, sleep '''
body = {"selection": {
"selectionType": "thermostats",
"selectionMatch": self.thermostats[index]['identifier']},
"functions": [{"type": "setHold", "params": {
"holdType": hold_type,
"holdClimateRef": climate
}}]}
log_msg_action = "set climate hold"
return self.make_request(body, log_msg_action) | Set a climate hold - ie away, home, sleep |
def chop(array, epsilon=1e-10):
"""
Truncate small values of a complex array.
Args:
array (array_like): array to truncte small values.
epsilon (float): threshold.
Returns:
np.array: A new operator with small values set to zero.
"""
ret = np.array(array)
if np.isrealobj(ret):
ret[abs(ret) < epsilon] = 0.0
else:
ret.real[abs(ret.real) < epsilon] = 0.0
ret.imag[abs(ret.imag) < epsilon] = 0.0
return ret | Truncate small values of a complex array.
Args:
array (array_like): array to truncte small values.
epsilon (float): threshold.
Returns:
np.array: A new operator with small values set to zero. |
def update_room_name(self):
"""Updates self.name and returns True if room name has changed."""
try:
response = self.client.api.get_room_name(self.room_id)
if "name" in response and response["name"] != self.name:
self.name = response["name"]
return True
else:
return False
except MatrixRequestError:
return False | Updates self.name and returns True if room name has changed. |
def list_handler(HandlerResult="nparray"):
"""Wraps a function to handle list inputs."""
def decorate(func):
def wrapper(*args, **kwargs):
"""Run through the wrapped function once for each array element.
:param HandlerResult: output type. Defaults to numpy arrays.
"""
sequences = []
enumsUnitCheck = enumerate(args)
argsList = list(args)
#This for loop identifies pint unit objects and strips them
#of their units.
for num, arg in enumsUnitCheck:
if type(arg) == type(1 * u.m):
argsList[num] = arg.to_base_units().magnitude
enumsUnitless = enumerate(argsList)
#This for loop identifies arguments that are sequences and
#adds their index location to the list 'sequences'.
for num, arg in enumsUnitless:
if isinstance(arg, (list, tuple, np.ndarray)):
sequences.append(num)
#If there are no sequences to iterate through, simply return
#the function.
if len(sequences) == 0:
result = func(*args, **kwargs)
else:
#iterant keeps track of how many times we've iterated and
#limiter stops the loop once we've iterated as many times
#as there are list elements. Without this check, a few
#erroneous runs will occur, appending the last couple values
#to the end of the list multiple times.
#
#We only care about the length of sequences[0] because this
#function is recursive, and sequences[0] is always the relevant
#sequences for any given run.
limiter = len(argsList[sequences[0]])
iterant = 0
result = []
for num in sequences:
for arg in argsList[num]:
if iterant >= limiter:
break
#We can safely replace the entire list argument
#with a single element from it because of the looping
#we're doing. We redefine the object, but that
#definition remains within this namespace and does
#not penetrate further up the function.
argsList[num] = arg
#Here we dive down the rabbit hole. This ends up
#creating a multi-dimensional array shaped by the
#sizes and shapes of the lists passed.
result.append(wrapper(*argsList,
HandlerResult=HandlerResult, **kwargs))
iterant += 1
#HandlerResult allows the user to specify what type to
#return the generated sequence as. It defaults to numpy
#arrays because functions tend to handle them better, but if
#the user does not wish to import numpy the base Python options
#are available to them.
if HandlerResult == "nparray":
result = np.array(result)
elif HandlerResult == "tuple":
result = tuple(result)
elif HandlerResult == "list":
result == list(result)
return result
return wrapper
return decorate | Wraps a function to handle list inputs. |
def from_api_repr(cls, resource, client):
"""Factory: construct a zone given its API representation
:type resource: dict
:param resource: zone resource representation returned from the API
:type client: :class:`google.cloud.dns.client.Client`
:param client: Client which holds credentials and project
configuration for the zone.
:rtype: :class:`google.cloud.dns.zone.ManagedZone`
:returns: Zone parsed from ``resource``.
"""
name = resource.get("name")
dns_name = resource.get("dnsName")
if name is None or dns_name is None:
raise KeyError(
"Resource lacks required identity information:" '["name"]["dnsName"]'
)
zone = cls(name, dns_name, client=client)
zone._set_properties(resource)
return zone | Factory: construct a zone given its API representation
:type resource: dict
:param resource: zone resource representation returned from the API
:type client: :class:`google.cloud.dns.client.Client`
:param client: Client which holds credentials and project
configuration for the zone.
:rtype: :class:`google.cloud.dns.zone.ManagedZone`
:returns: Zone parsed from ``resource``. |
def add_paragraph(self, text='', style=None):
"""
Return a paragraph newly added to the end of the content in this
cell. If present, *text* is added to the paragraph in a single run.
If specified, the paragraph style *style* is applied. If *style* is
not specified or is |None|, the result is as though the 'Normal'
style was applied. Note that the formatting of text in a cell can be
influenced by the table style. *text* can contain tab (``\\t``)
characters, which are converted to the appropriate XML form for
a tab. *text* can also include newline (``\\n``) or carriage return
(``\\r``) characters, each of which is converted to a line break.
"""
return super(_Cell, self).add_paragraph(text, style) | Return a paragraph newly added to the end of the content in this
cell. If present, *text* is added to the paragraph in a single run.
If specified, the paragraph style *style* is applied. If *style* is
not specified or is |None|, the result is as though the 'Normal'
style was applied. Note that the formatting of text in a cell can be
influenced by the table style. *text* can contain tab (``\\t``)
characters, which are converted to the appropriate XML form for
a tab. *text* can also include newline (``\\n``) or carriage return
(``\\r``) characters, each of which is converted to a line break. |
def first(o):
"""If o is a ISeq, return the first element from o. If o is None, return
None. Otherwise, coerces o to a Seq and returns the first."""
if o is None:
return None
if isinstance(o, ISeq):
return o.first
s = to_seq(o)
if s is None:
return None
return s.first | If o is a ISeq, return the first element from o. If o is None, return
None. Otherwise, coerces o to a Seq and returns the first. |
def mod_bufsize(iface, *args, **kwargs):
'''
Modify network interface buffers (currently linux only)
CLI Example:
.. code-block:: bash
salt '*' network.mod_bufsize tx=<val> rx=<val> rx-mini=<val> rx-jumbo=<val>
'''
if __grains__['kernel'] == 'Linux':
if os.path.exists('/sbin/ethtool'):
return _mod_bufsize_linux(iface, *args, **kwargs)
return False | Modify network interface buffers (currently linux only)
CLI Example:
.. code-block:: bash
salt '*' network.mod_bufsize tx=<val> rx=<val> rx-mini=<val> rx-jumbo=<val> |
def get_preferred(self, addr_1, addr_2):
'''Return the preferred address.'''
if addr_1 > addr_2:
addr_1, addr_2 = addr_2, addr_1
return self._cache.get((addr_1, addr_2)) | Return the preferred address. |
def checkUserManage(worksheet, request, redirect=True):
""" Checks if the current user has granted access to the worksheet
and if has also privileges for managing it. If the user has no
granted access and redirect's value is True, redirects to
/manage_results view. Otherwise, does nothing
"""
allowed = worksheet.checkUserManage()
if allowed == False and redirect == True:
# Redirect to /manage_results view
destination_url = worksheet.absolute_url() + "/manage_results"
request.response.redirect(destination_url) | Checks if the current user has granted access to the worksheet
and if has also privileges for managing it. If the user has no
granted access and redirect's value is True, redirects to
/manage_results view. Otherwise, does nothing |
def get_version_url(self, version):
"""
Retrieve the URL for the designated version of the hive.
"""
for each_version in self.other_versions():
if version == each_version['version'] and 'location' in each_version:
return each_version.get('location')
raise VersionNotInHive(version) | Retrieve the URL for the designated version of the hive. |
def delete(self, url, headers=None, **kwargs):
"""Sends a DELETE request to a URL.
:param url: The URL.
:type url: ``string``
:param headers: A list of pairs specifying the headers for the HTTP
response (for example, ``[('Content-Type': 'text/cthulhu'), ('Token': 'boris')]``).
:type headers: ``list``
:param kwargs: Additional keyword arguments (optional). These arguments
are interpreted as the query part of the URL. The order of keyword
arguments is not preserved in the request, but the keywords and
their arguments will be URL encoded.
:type kwargs: ``dict``
:returns: A dictionary describing the response (see :class:`HttpLib` for
its structure).
:rtype: ``dict``
"""
if headers is None: headers = []
if kwargs:
# url is already a UrlEncoded. We have to manually declare
# the query to be encoded or it will get automatically URL
# encoded by being appended to url.
url = url + UrlEncoded('?' + _encode(**kwargs), skip_encode=True)
message = {
'method': "DELETE",
'headers': headers,
}
return self.request(url, message) | Sends a DELETE request to a URL.
:param url: The URL.
:type url: ``string``
:param headers: A list of pairs specifying the headers for the HTTP
response (for example, ``[('Content-Type': 'text/cthulhu'), ('Token': 'boris')]``).
:type headers: ``list``
:param kwargs: Additional keyword arguments (optional). These arguments
are interpreted as the query part of the URL. The order of keyword
arguments is not preserved in the request, but the keywords and
their arguments will be URL encoded.
:type kwargs: ``dict``
:returns: A dictionary describing the response (see :class:`HttpLib` for
its structure).
:rtype: ``dict`` |
def add_self_defined_objects(raw_objects):
"""Add self defined command objects for internal processing ;
bp_rule, _internal_host_up, _echo, _internal_host_check, _interna_service_check
:param raw_objects: Raw config objects dict
:type raw_objects: dict
:return: raw_objects with some more commands
:rtype: dict
"""
logger.info("- creating internally defined commands...")
if 'command' not in raw_objects:
raw_objects['command'] = []
# Business rule
raw_objects['command'].append({
'command_name': 'bp_rule',
'command_line': 'bp_rule',
'imported_from': 'alignak-self'
})
# Internal host checks
raw_objects['command'].append({
'command_name': '_internal_host_up',
'command_line': '_internal_host_up',
'imported_from': 'alignak-self'
})
raw_objects['command'].append({
'command_name': '_internal_host_check',
# Command line must contain: state_id;output
'command_line': '_internal_host_check;$ARG1$;$ARG2$',
'imported_from': 'alignak-self'
})
# Internal service check
raw_objects['command'].append({
'command_name': '_echo',
'command_line': '_echo',
'imported_from': 'alignak-self'
})
raw_objects['command'].append({
'command_name': '_internal_service_check',
# Command line must contain: state_id;output
'command_line': '_internal_service_check;$ARG1$;$ARG2$',
'imported_from': 'alignak-self'
}) | Add self defined command objects for internal processing ;
bp_rule, _internal_host_up, _echo, _internal_host_check, _interna_service_check
:param raw_objects: Raw config objects dict
:type raw_objects: dict
:return: raw_objects with some more commands
:rtype: dict |
def _snakify_name(self, name):
"""Snakify a name string.
In this context, "to snakify" means to strip a name of all
diacritics, convert it to lower case, and replace any spaces
inside the name with hyphens.
This way the name is made "machine-friendly", and ready to be
combined with a second name component into a full "snake_case"
name.
:param str name: A name to snakify.
:return str: A snakified name.
"""
name = self._strip_diacritics(name)
name = name.lower()
name = name.replace(' ', '-')
return name | Snakify a name string.
In this context, "to snakify" means to strip a name of all
diacritics, convert it to lower case, and replace any spaces
inside the name with hyphens.
This way the name is made "machine-friendly", and ready to be
combined with a second name component into a full "snake_case"
name.
:param str name: A name to snakify.
:return str: A snakified name. |
def key_from_keybase(username, fingerprint=None):
"""Look up a public key from a username"""
url = keybase_lookup_url(username)
resp = requests.get(url)
if resp.status_code == 200:
j_resp = json.loads(polite_string(resp.content))
if 'them' in j_resp and len(j_resp['them']) == 1:
kb_obj = j_resp['them'][0]
if fingerprint:
return fingerprint_from_keybase(fingerprint, kb_obj)
else:
if 'public_keys' in kb_obj \
and 'pgp_public_keys' in kb_obj['public_keys']:
key = kb_obj['public_keys']['primary']
return massage_key(key)
return None | Look up a public key from a username |
def revert(self, unchanged_only=False):
"""Revert all files in this changelist
:param unchanged_only: Only revert unchanged files
:type unchanged_only: bool
:raises: :class:`.ChangelistError`
"""
if self._reverted:
raise errors.ChangelistError('This changelist has been reverted')
change = self._change
if self._change == 0:
change = 'default'
cmd = ['revert', '-c', str(change)]
if unchanged_only:
cmd.append('-a')
files = [f.depotFile for f in self._files]
if files:
cmd += files
self._connection.run(cmd)
self._files = []
self._reverted = True | Revert all files in this changelist
:param unchanged_only: Only revert unchanged files
:type unchanged_only: bool
:raises: :class:`.ChangelistError` |
def text_extract(path, password=None):
"""Extract text from a PDF file"""
pdf = Info(path, password).pdf
return [pdf.getPage(i).extractText() for i in range(pdf.getNumPages())] | Extract text from a PDF file |
def create_process_behavior(self, behavior, process_id):
"""CreateProcessBehavior.
[Preview API] Creates a single behavior in the given process.
:param :class:`<ProcessBehaviorCreateRequest> <azure.devops.v5_0.work_item_tracking_process.models.ProcessBehaviorCreateRequest>` behavior:
:param str process_id: The ID of the process
:rtype: :class:`<ProcessBehavior> <azure.devops.v5_0.work_item_tracking_process.models.ProcessBehavior>`
"""
route_values = {}
if process_id is not None:
route_values['processId'] = self._serialize.url('process_id', process_id, 'str')
content = self._serialize.body(behavior, 'ProcessBehaviorCreateRequest')
response = self._send(http_method='POST',
location_id='d1800200-f184-4e75-a5f2-ad0b04b4373e',
version='5.0-preview.2',
route_values=route_values,
content=content)
return self._deserialize('ProcessBehavior', response) | CreateProcessBehavior.
[Preview API] Creates a single behavior in the given process.
:param :class:`<ProcessBehaviorCreateRequest> <azure.devops.v5_0.work_item_tracking_process.models.ProcessBehaviorCreateRequest>` behavior:
:param str process_id: The ID of the process
:rtype: :class:`<ProcessBehavior> <azure.devops.v5_0.work_item_tracking_process.models.ProcessBehavior>` |
def get_relationships_for_source_on_date(self, source_id, from_, to):
"""Pass through to provider RelationshipLookupSession.get_relationships_for_source_on_date"""
# Implemented from azosid template for -
# osid.relationship.RelationshipLookupSession.get_relationships_for_source_on_date_template
if self._can('lookup'):
return self._provider_session.get_relationships_for_source_on_date(source_id, from_, to)
self._check_lookup_conditions() # raises PermissionDenied
query = self._query_session.get_relationship_query()
query.match_source_id(source_id, match=True)
query.match_date(from_, to, match=True)
return self._try_harder(query) | Pass through to provider RelationshipLookupSession.get_relationships_for_source_on_date |
def _initializer_wrapper(actual_initializer, *rest):
"""
We ignore SIGINT. It's up to our parent to kill us in the typical
condition of this arising from ``^C`` on a terminal. If someone is
manually killing us with that signal, well... nothing will happen.
"""
signal.signal(signal.SIGINT, signal.SIG_IGN)
if actual_initializer is not None:
actual_initializer(*rest) | We ignore SIGINT. It's up to our parent to kill us in the typical
condition of this arising from ``^C`` on a terminal. If someone is
manually killing us with that signal, well... nothing will happen. |
def add_seconds(self, datetimestr, n):
"""Returns a time that n seconds after a time.
:param datetimestr: a datetime object or a datetime str
:param n: number of seconds, value can be negative
**中文文档**
返回给定日期N秒之后的时间。
"""
a_datetime = self.parse_datetime(datetimestr)
return a_datetime + timedelta(seconds=n) | Returns a time that n seconds after a time.
:param datetimestr: a datetime object or a datetime str
:param n: number of seconds, value can be negative
**中文文档**
返回给定日期N秒之后的时间。 |
def send_messages(self, email_messages):
"""Write all messages to the stream in a thread-safe way."""
if not email_messages:
return
with self._lock:
try:
stream_created = self.open()
for message in email_messages:
if six.PY3:
self.write_message(message)
else:
self.write_message(message)
self.stream.flush()
if stream_created:
self.close()
except:
if not self.fail_silently:
raise
return len(email_messages) | Write all messages to the stream in a thread-safe way. |
def dump_info():
'''Shows various details about the account & servers'''
vultr = Vultr(API_KEY)
try:
logging.info('Listing account info:\n%s', dumps(
vultr.account.info(), indent=2
))
logging.info('Listing apps:\n%s', dumps(
vultr.app.list(), indent=2
))
logging.info('Listing backups:\n%s', dumps(
vultr.backup.list(), indent=2
))
logging.info('Listing DNS:\n%s', dumps(
vultr.dns.list(), indent=2
))
logging.info('Listing ISOs:\n%s', dumps(
vultr.iso.list(), indent=2
))
logging.info('Listing OSs:\n%s', dumps(
vultr.os.list(), indent=2
))
logging.info('Listing plans:\n%s', dumps(
vultr.plans.list(), indent=2
))
logging.info('Listing regions:\n%s', dumps(
vultr.regions.list(), indent=2
))
logging.info('Listing servers:\n%s', dumps(
vultr.server.list(), indent=2
))
logging.info('Listing snapshots:\n%s', dumps(
vultr.snapshot.list(), indent=2
))
logging.info('Listing SSH keys:\n%s', dumps(
vultr.sshkey.list(), indent=2
))
logging.info('Listing startup scripts:\n%s', dumps(
vultr.startupscript.list(), indent=2
))
except VultrError as ex:
logging.error('VultrError: %s', ex) | Shows various details about the account & servers |
def params_values(self):
"""
Get a list of the ``Parameter`` values if they have a value.
This does not include the basis regularizer.
"""
return [p.value for p in atleast_list(self.params) if p.has_value] | Get a list of the ``Parameter`` values if they have a value.
This does not include the basis regularizer. |
def TAPQuery(RAdeg=180.0, DECdeg=0.0, width=1, height=1):
"""Do a query of the CADC Megacam table. Get all observations insize the box. Returns a file-like object"""
QUERY =( """ SELECT """
""" COORD1(CENTROID(Plane.position_bounds)) AS "RAJ2000", COORD2(CENTROID(Plane.position_bounds)) AS "DEJ2000", Plane.time_bounds_lower as "MJDATE" """
""" FROM """
""" caom2.Observation as o JOIN caom2.Plane as Plane on o.obsID=Plane.obsID """
""" WHERE """
""" o.collection = 'CFHT' """
""" AND o.instrument_name = 'MegaPrime' """
""" AND INTERSECTS( BOX('ICRS', {}, {}, {}, {}), Plane.position_bounds ) = 1 """
""" AND ( o.proposal_id LIKE '%P05') """)
# """ AND ( o.proposal_id LIKE '%P05' OR o.proposal_id LIKE '%L03' or o.proposal_id LIKE '%L06' or o.proposal_id
# in ( '06AF33', '06BF98' ) ) """ )
QUERY = QUERY.format( RAdeg, DECdeg, width, height)
data={"QUERY": QUERY,
"REQUEST": "doQuery",
"LANG": "ADQL",
"FORMAT": "votable"}
url="http://www.cadc.hia.nrc.gc.ca/tap/sync"
print url, data
return urllib.urlopen(url,urllib.urlencode(data)) | Do a query of the CADC Megacam table. Get all observations insize the box. Returns a file-like object |
def loader_for_type(self, ctype):
"""
Gets a function ref to deserialize content
for a certain mimetype.
"""
for loadee, mimes in Mimer.TYPES.iteritems():
for mime in mimes:
if ctype.startswith(mime):
return loadee | Gets a function ref to deserialize content
for a certain mimetype. |
def obtainInfo(self):
"""
Method for obtaining information about the movie.
"""
try:
info = self.ytdl.extract_info(self.yid, download=False)
except youtube_dl.utils.DownloadError:
raise ConnectionError
if not self.preferences['stream']:
self.url = (info['requested_formats'][0]['url'], info['requested_formats'][1]['url'])
return True
# else:
for f in info['formats']:
if 'filesize' not in f or not f['filesize']:
f['filesize'] = float('inf') # next line won't fail, infinity, because unknown filesize is the least preferred
# - for easy sorting - we'll get best quality and lowest filsize
aud = {(-int(f['abr']), f['filesize'], f['url']) for f in info['formats'] if f.get('abr') and not f.get('height')}
vid = {(-int(f['height']), f['filesize'], f['url']) for f in info['formats'] if not f.get('abr') and f.get('height')}
full= {(-int(f['height']), f['filesize'], f['url']) for f in info['formats'] if f.get('abr') and f.get('height')}
try:
_f = int( self.preferences.get('format') ) # if valid format is present, then choose closes value
_k = lambda x: abs(x[0] + _f) # +, because x[0] is negative
except (ValueError, TypeError):
_k = lambda d: d
if self.preferences['audio'] and self.preferences['video']: fm = sorted(full, key=_k)
elif self.preferences['audio']: fm = sorted(aud, key=_k)
elif self.preferences['video']: fm = sorted(vid, key=_k)
filesize = 0
i = -1
try:
while filesize == 0: # some videos are problematic, we will try to find format with non-zero filesize
i += 1
self.url = fm[i][2]
if fm[i][1] == float('inf'):
filesize = int(self.r_session.head(self.url).headers['content-length'])
else:
filesize = int(fm[i][1])
except IndexError: # finding filesize failed for every format
self.url = (info['requested_formats'][0]['url'], info['requested_formats'][1]['url'])
self.preferences['stream'] = False # hopefully non-stream download will work
return True
self.filesize = filesize
return True | Method for obtaining information about the movie. |
def make_levels_set(levels):
'''make set efficient will convert all lists of items
in levels to a set to speed up operations'''
for level_key,level_filters in levels.items():
levels[level_key] = make_level_set(level_filters)
return levels | make set efficient will convert all lists of items
in levels to a set to speed up operations |
def interactive_update_stack(self, fqn, template, old_parameters,
parameters, stack_policy, tags,
**kwargs):
"""Update a Cloudformation stack in interactive mode.
Args:
fqn (str): The fully qualified name of the Cloudformation stack.
template (:class:`stacker.providers.base.Template`): A Template
object to use when updating the stack.
old_parameters (list): A list of dictionaries that defines the
parameter list on the existing Cloudformation stack.
parameters (list): A list of dictionaries that defines the
parameter list to be applied to the Cloudformation stack.
stack_policy (:class:`stacker.providers.base.Template`): A template
object representing a stack policy.
tags (list): A list of dictionaries that defines the tags
that should be applied to the Cloudformation stack.
"""
logger.debug("Using interactive provider mode for %s.", fqn)
changes, change_set_id = create_change_set(
self.cloudformation, fqn, template, parameters, tags,
'UPDATE', service_role=self.service_role, **kwargs
)
old_parameters_as_dict = self.params_as_dict(old_parameters)
new_parameters_as_dict = self.params_as_dict(
[x
if 'ParameterValue' in x
else {'ParameterKey': x['ParameterKey'],
'ParameterValue': old_parameters_as_dict[x['ParameterKey']]}
for x in parameters]
)
params_diff = diff_parameters(
old_parameters_as_dict,
new_parameters_as_dict)
action = "replacements" if self.replacements_only else "changes"
full_changeset = changes
if self.replacements_only:
changes = requires_replacement(changes)
if changes or params_diff:
ui.lock()
try:
output_summary(fqn, action, changes, params_diff,
replacements_only=self.replacements_only)
ask_for_approval(
full_changeset=full_changeset,
params_diff=params_diff,
include_verbose=True,
)
finally:
ui.unlock()
self.deal_with_changeset_stack_policy(fqn, stack_policy)
self.cloudformation.execute_change_set(
ChangeSetName=change_set_id,
) | Update a Cloudformation stack in interactive mode.
Args:
fqn (str): The fully qualified name of the Cloudformation stack.
template (:class:`stacker.providers.base.Template`): A Template
object to use when updating the stack.
old_parameters (list): A list of dictionaries that defines the
parameter list on the existing Cloudformation stack.
parameters (list): A list of dictionaries that defines the
parameter list to be applied to the Cloudformation stack.
stack_policy (:class:`stacker.providers.base.Template`): A template
object representing a stack policy.
tags (list): A list of dictionaries that defines the tags
that should be applied to the Cloudformation stack. |
def _get_absolute_reference(self, ref_key):
"""Returns absolute reference code for key."""
key_str = u", ".join(map(str, ref_key))
return u"S[" + key_str + u"]" | Returns absolute reference code for key. |
def filter(self, func):
"""
A lazy way to skip elements in the stream that gives False for the given
function.
"""
self._data = xfilter(func, self._data)
return self | A lazy way to skip elements in the stream that gives False for the given
function. |
def set_errors(self, errors):
"""Set parameter error estimate """
if errors is None:
self.__errors__ = None
return
self.__errors__ = [asscalar(e) for e in errors] | Set parameter error estimate |
def vdistg(v1, v2, ndim):
"""
Return the distance between two vectors of arbitrary dimension.
http://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/vdistg_c.html
:param v1: ndim-dimensional double precision vector.
:type v1: list[ndim]
:param v2: ndim-dimensional double precision vector.
:type v2: list[ndim]
:param ndim: Dimension of v1 and v2.
:type ndim: int
:return: the distance between v1 and v2
:rtype: float
"""
v1 = stypes.toDoubleVector(v1)
v2 = stypes.toDoubleVector(v2)
ndim = ctypes.c_int(ndim)
return libspice.vdistg_c(v1, v2, ndim) | Return the distance between two vectors of arbitrary dimension.
http://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/vdistg_c.html
:param v1: ndim-dimensional double precision vector.
:type v1: list[ndim]
:param v2: ndim-dimensional double precision vector.
:type v2: list[ndim]
:param ndim: Dimension of v1 and v2.
:type ndim: int
:return: the distance between v1 and v2
:rtype: float |
def dotproduct(X, Y):
"""Return the sum of the element-wise product of vectors x and y.
>>> dotproduct([1, 2, 3], [1000, 100, 10])
1230
"""
return sum([x * y for x, y in zip(X, Y)]) | Return the sum of the element-wise product of vectors x and y.
>>> dotproduct([1, 2, 3], [1000, 100, 10])
1230 |
def build_variant(variant, institute_id, gene_to_panels = None,
hgncid_to_gene=None, sample_info=None):
"""Build a variant object based on parsed information
Args:
variant(dict)
institute_id(str)
gene_to_panels(dict): A dictionary with
{<hgnc_id>: {
'panel_names': [<panel_name>, ..],
'disease_associated_transcripts': [<transcript_id>, ..]
}
.
.
}
hgncid_to_gene(dict): A dictionary with
{<hgnc_id>: <hgnc_gene info>
.
.
}
sample_info(dict): A dictionary with info about samples.
Strictly for cancer to tell which is tumor
Returns:
variant_obj(dict)
variant = dict(
# document_id is a md5 string created by institute_genelist_caseid_variantid:
_id = str, # required, same as document_id
document_id = str, # required
# variant_id is a md5 string created by chrom_pos_ref_alt (simple_id)
variant_id = str, # required
# display name is variant_id (no md5)
display_name = str, # required
# chrom_pos_ref_alt
simple_id = str,
# The variant can be either research or clinical.
# For research variants we display all the available information while
# the clinical variants have limited annotation fields.
variant_type = str, # required, choices=('research', 'clinical'))
category = str, # choices=('sv', 'snv', 'str')
sub_category = str, # choices=('snv', 'indel', 'del', 'ins', 'dup', 'inv', 'cnv', 'bnd')
mate_id = str, # For SVs this identifies the other end
case_id = str, # case_id is a string like owner_caseid
chromosome = str, # required
position = int, # required
end = int, # required
length = int, # required
reference = str, # required
alternative = str, # required
rank_score = float, # required
variant_rank = int, # required
rank_score_results = list, # List if dictionaries
variant_rank = int, # required
institute = str, # institute_id, required
sanger_ordered = bool,
validation = str, # Sanger validation, choices=('True positive', 'False positive')
quality = float,
filters = list, # list of strings
samples = list, # list of dictionaries that are <gt_calls>
genetic_models = list, # list of strings choices=GENETIC_MODELS
compounds = list, # sorted list of <compound> ordering='combined_score'
genes = list, # list with <gene>
dbsnp_id = str,
# Gene ids:
hgnc_ids = list, # list of hgnc ids (int)
hgnc_symbols = list, # list of hgnc symbols (str)
panels = list, # list of panel names that the variant ovelapps
# Frequencies:
thousand_genomes_frequency = float,
thousand_genomes_frequency_left = float,
thousand_genomes_frequency_right = float,
exac_frequency = float,
max_thousand_genomes_frequency = float,
max_exac_frequency = float,
local_frequency = float,
local_obs_old = int,
local_obs_hom_old = int,
local_obs_total_old = int, # default=638
# Predicted deleteriousness:
cadd_score = float,
clnsig = list, # list of <clinsig>
spidex = float,
missing_data = bool, # default False
# STR specific information
str_repid = str, repeat id generally corresponds to gene symbol
str_ru = str, used e g in PanelApp naming of STRs
str_ref = int, reference copy number
str_len = int, number of repeats found in case
str_status = str, this indicates the severity of the expansion level
# Callers
gatk = str, # choices=VARIANT_CALL, default='Not Used'
samtools = str, # choices=VARIANT_CALL, default='Not Used'
freebayes = str, # choices=VARIANT_CALL, default='Not Used'
# Conservation:
phast_conservation = list, # list of str, choices=CONSERVATION
gerp_conservation = list, # list of str, choices=CONSERVATION
phylop_conservation = list, # list of str, choices=CONSERVATION
# Database options:
gene_lists = list,
manual_rank = int, # choices=[0, 1, 2, 3, 4, 5]
dismiss_variant = list,
acmg_evaluation = str, # choices=ACMG_TERMS
)
"""
gene_to_panels = gene_to_panels or {}
hgncid_to_gene = hgncid_to_gene or {}
sample_info = sample_info or {}
#LOG.debug("Building variant %s", variant['ids']['document_id'])
variant_obj = dict(
_id = variant['ids']['document_id'],
document_id=variant['ids']['document_id'],
variant_id=variant['ids']['variant_id'],
display_name=variant['ids']['display_name'],
variant_type=variant['variant_type'],
case_id=variant['case_id'],
chromosome=variant['chromosome'],
reference=variant['reference'],
alternative=variant['alternative'],
institute=institute_id,
)
variant_obj['missing_data'] = False
variant_obj['position'] = int(variant['position'])
variant_obj['rank_score'] = float(variant['rank_score'])
end = variant.get('end')
if end:
variant_obj['end'] = int(end)
length = variant.get('length')
if length:
variant_obj['length'] = int(length)
variant_obj['simple_id'] = variant['ids'].get('simple_id')
variant_obj['quality'] = float(variant['quality']) if variant['quality'] else None
variant_obj['filters'] = variant['filters']
variant_obj['dbsnp_id'] = variant.get('dbsnp_id')
variant_obj['cosmic_ids'] = variant.get('cosmic_ids')
variant_obj['category'] = variant['category']
variant_obj['sub_category'] = variant.get('sub_category')
if 'mate_id' in variant:
variant_obj['mate_id'] = variant['mate_id']
if 'cytoband_start' in variant:
variant_obj['cytoband_start'] = variant['cytoband_start']
if 'cytoband_end' in variant:
variant_obj['cytoband_end'] = variant['cytoband_end']
if 'end_chrom' in variant:
variant_obj['end_chrom'] = variant['end_chrom']
############ Str specific ############
if 'str_ru' in variant:
variant_obj['str_ru'] = variant['str_ru']
if 'str_repid' in variant:
variant_obj['str_repid'] = variant['str_repid']
if 'str_ref' in variant:
variant_obj['str_ref'] = variant['str_ref']
if 'str_len' in variant:
variant_obj['str_len'] = variant['str_len']
if 'str_status' in variant:
variant_obj['str_status'] = variant['str_status']
gt_types = []
for sample in variant.get('samples', []):
gt_call = build_genotype(sample)
gt_types.append(gt_call)
if sample_info:
sample_id = sample['individual_id']
if sample_info[sample_id] == 'case':
key = 'tumor'
else:
key = 'normal'
variant_obj[key] = {
'alt_depth': sample['alt_depth'],
'ref_depth': sample['ref_depth'],
'read_depth': sample['read_depth'],
'alt_freq': sample['alt_frequency'],
'ind_id': sample_id
}
variant_obj['samples'] = gt_types
if 'genetic_models' in variant:
variant_obj['genetic_models'] = variant['genetic_models']
# Add the compounds
compounds = []
for compound in variant.get('compounds', []):
compound_obj = build_compound(compound)
compounds.append(compound_obj)
if compounds:
variant_obj['compounds'] = compounds
# Add the genes with transcripts
genes = []
for index, gene in enumerate(variant.get('genes', [])):
if gene.get('hgnc_id'):
gene_obj = build_gene(gene, hgncid_to_gene)
genes.append(gene_obj)
if index > 30:
# avoid uploading too much data (specifically for SV variants)
# mark variant as missing data
variant_obj['missing_data'] = True
break
if genes:
variant_obj['genes'] = genes
# To make gene searches more effective
if 'hgnc_ids' in variant:
variant_obj['hgnc_ids'] = [hgnc_id for hgnc_id in variant['hgnc_ids'] if hgnc_id]
# Add the hgnc symbols from the database genes
hgnc_symbols = []
for hgnc_id in variant_obj['hgnc_ids']:
gene_obj = hgncid_to_gene.get(hgnc_id)
if gene_obj:
hgnc_symbols.append(gene_obj['hgnc_symbol'])
# else:
# LOG.warn("missing HGNC symbol for: %s", hgnc_id)
if hgnc_symbols:
variant_obj['hgnc_symbols'] = hgnc_symbols
# link gene panels
panel_names = set()
for hgnc_id in variant_obj['hgnc_ids']:
gene_panels = gene_to_panels.get(hgnc_id, set())
panel_names = panel_names.union(gene_panels)
if panel_names:
variant_obj['panels'] = list(panel_names)
# Add the clnsig ocbjects
clnsig_objects = []
for entry in variant.get('clnsig', []):
clnsig_obj = build_clnsig(entry)
clnsig_objects.append(clnsig_obj)
if clnsig_objects:
variant_obj['clnsig'] = clnsig_objects
# Add the callers
call_info = variant.get('callers', {})
for caller in call_info:
if call_info[caller]:
variant_obj[caller] = call_info[caller]
# Add the conservation
conservation_info = variant.get('conservation', {})
if conservation_info.get('phast'):
variant_obj['phast_conservation'] = conservation_info['phast']
if conservation_info.get('gerp'):
variant_obj['gerp_conservation'] = conservation_info['gerp']
if conservation_info.get('phylop'):
variant_obj['phylop_conservation'] = conservation_info['phylop']
# Add autozygosity calls
if variant.get('azlength'):
variant_obj['azlength'] = variant['azlength']
if variant.get('azqual'):
variant_obj['azqual'] = variant['azqual']
# Add the frequencies
frequencies = variant.get('frequencies', {})
if frequencies.get('thousand_g'):
variant_obj['thousand_genomes_frequency'] = float(frequencies['thousand_g'])
if frequencies.get('thousand_g_max'):
variant_obj['max_thousand_genomes_frequency'] = float(frequencies['thousand_g_max'])
if frequencies.get('exac'):
variant_obj['exac_frequency'] = float(frequencies['exac'])
if frequencies.get('exac_max'):
variant_obj['max_exac_frequency'] = float(frequencies['exac_max'])
if frequencies.get('gnomad'):
variant_obj['gnomad_frequency'] = float(frequencies['gnomad'])
if frequencies.get('gnomad_max'):
variant_obj['max_gnomad_frequency'] = float(frequencies['gnomad_max'])
if frequencies.get('thousand_g_left'):
variant_obj['thousand_genomes_frequency_left'] = float(frequencies['thousand_g_left'])
if frequencies.get('thousand_g_right'):
variant_obj['thousand_genomes_frequency_right'] = float(frequencies['thousand_g_right'])
# add the local observation counts from the old archive
if variant.get('local_obs_old'):
variant_obj['local_obs_old'] = variant['local_obs_old']
if variant.get('local_obs_hom_old'):
variant_obj['local_obs_hom_old'] = variant['local_obs_hom_old']
# Add the sv counts:
if frequencies.get('clingen_cgh_benign'):
variant_obj['clingen_cgh_benign'] = frequencies['clingen_cgh_benign']
if frequencies.get('clingen_cgh_pathogenic'):
variant_obj['clingen_cgh_pathogenic'] = frequencies['clingen_cgh_pathogenic']
if frequencies.get('clingen_ngi'):
variant_obj['clingen_ngi'] = frequencies['clingen_ngi']
if frequencies.get('swegen'):
variant_obj['swegen'] = frequencies['swegen']
# Decipher is never a frequency, it will ony give 1 if variant exists in decipher
# Check if decipher exists
if frequencies.get('decipher'):
variant_obj['decipher'] = frequencies['decipher']
# If not check if field decipherAF exists
elif frequencies.get('decipherAF'):
variant_obj['decipher'] = frequencies['decipherAF']
# Add the severity predictors
if variant.get('cadd_score'):
variant_obj['cadd_score'] = variant['cadd_score']
if variant.get('spidex'):
variant_obj['spidex'] = variant['spidex']
# Add the rank score results
rank_results = []
for category in variant.get('rank_result', []):
rank_result = {
'category': category,
'score': variant['rank_result'][category]
}
rank_results.append(rank_result)
if rank_results:
variant_obj['rank_score_results'] = rank_results
# Cancer specific
if variant.get('mvl_tag'):
variant_obj['mvl_tag'] = True
return variant_obj | Build a variant object based on parsed information
Args:
variant(dict)
institute_id(str)
gene_to_panels(dict): A dictionary with
{<hgnc_id>: {
'panel_names': [<panel_name>, ..],
'disease_associated_transcripts': [<transcript_id>, ..]
}
.
.
}
hgncid_to_gene(dict): A dictionary with
{<hgnc_id>: <hgnc_gene info>
.
.
}
sample_info(dict): A dictionary with info about samples.
Strictly for cancer to tell which is tumor
Returns:
variant_obj(dict)
variant = dict(
# document_id is a md5 string created by institute_genelist_caseid_variantid:
_id = str, # required, same as document_id
document_id = str, # required
# variant_id is a md5 string created by chrom_pos_ref_alt (simple_id)
variant_id = str, # required
# display name is variant_id (no md5)
display_name = str, # required
# chrom_pos_ref_alt
simple_id = str,
# The variant can be either research or clinical.
# For research variants we display all the available information while
# the clinical variants have limited annotation fields.
variant_type = str, # required, choices=('research', 'clinical'))
category = str, # choices=('sv', 'snv', 'str')
sub_category = str, # choices=('snv', 'indel', 'del', 'ins', 'dup', 'inv', 'cnv', 'bnd')
mate_id = str, # For SVs this identifies the other end
case_id = str, # case_id is a string like owner_caseid
chromosome = str, # required
position = int, # required
end = int, # required
length = int, # required
reference = str, # required
alternative = str, # required
rank_score = float, # required
variant_rank = int, # required
rank_score_results = list, # List if dictionaries
variant_rank = int, # required
institute = str, # institute_id, required
sanger_ordered = bool,
validation = str, # Sanger validation, choices=('True positive', 'False positive')
quality = float,
filters = list, # list of strings
samples = list, # list of dictionaries that are <gt_calls>
genetic_models = list, # list of strings choices=GENETIC_MODELS
compounds = list, # sorted list of <compound> ordering='combined_score'
genes = list, # list with <gene>
dbsnp_id = str,
# Gene ids:
hgnc_ids = list, # list of hgnc ids (int)
hgnc_symbols = list, # list of hgnc symbols (str)
panels = list, # list of panel names that the variant ovelapps
# Frequencies:
thousand_genomes_frequency = float,
thousand_genomes_frequency_left = float,
thousand_genomes_frequency_right = float,
exac_frequency = float,
max_thousand_genomes_frequency = float,
max_exac_frequency = float,
local_frequency = float,
local_obs_old = int,
local_obs_hom_old = int,
local_obs_total_old = int, # default=638
# Predicted deleteriousness:
cadd_score = float,
clnsig = list, # list of <clinsig>
spidex = float,
missing_data = bool, # default False
# STR specific information
str_repid = str, repeat id generally corresponds to gene symbol
str_ru = str, used e g in PanelApp naming of STRs
str_ref = int, reference copy number
str_len = int, number of repeats found in case
str_status = str, this indicates the severity of the expansion level
# Callers
gatk = str, # choices=VARIANT_CALL, default='Not Used'
samtools = str, # choices=VARIANT_CALL, default='Not Used'
freebayes = str, # choices=VARIANT_CALL, default='Not Used'
# Conservation:
phast_conservation = list, # list of str, choices=CONSERVATION
gerp_conservation = list, # list of str, choices=CONSERVATION
phylop_conservation = list, # list of str, choices=CONSERVATION
# Database options:
gene_lists = list,
manual_rank = int, # choices=[0, 1, 2, 3, 4, 5]
dismiss_variant = list,
acmg_evaluation = str, # choices=ACMG_TERMS
) |
def _unsigned_bounds(self):
"""
Get lower bound and upper bound for `self` in unsigned arithmetic.
:return: a list of (lower_bound, upper_bound) tuples.
"""
ssplit = self._ssplit()
if len(ssplit) == 1:
lb = ssplit[0].lower_bound
ub = ssplit[0].upper_bound
return [ (lb, ub) ]
elif len(ssplit) == 2:
# ssplit[0] is on the left hemisphere, and ssplit[1] is on the right hemisphere
lb_1 = ssplit[0].lower_bound
ub_1 = ssplit[0].upper_bound
lb_2 = ssplit[1].lower_bound
ub_2 = ssplit[1].upper_bound
return [ (lb_1, ub_1), (lb_2, ub_2) ]
else:
raise Exception('WTF') | Get lower bound and upper bound for `self` in unsigned arithmetic.
:return: a list of (lower_bound, upper_bound) tuples. |
def process_embed(embed_items=None,
embed_tracks=None,
embed_metadata=None,
embed_insights=None):
"""Returns an embed field value based on the parameters."""
result = None
embed = ''
if embed_items:
embed = 'items'
if embed_tracks:
if embed != '':
embed += ','
embed += 'tracks'
if embed_metadata:
if embed != '':
embed += ','
embed += 'metadata'
if embed_insights:
if embed != '':
embed += ','
embed += 'insights'
if embed != '':
result = embed
return result | Returns an embed field value based on the parameters. |
def remove_block(self, block, index="-1"):
"""Remove block element from scope
Args:
block (Block): Block object
"""
self[index]["__blocks__"].remove(block)
self[index]["__names__"].remove(block.raw()) | Remove block element from scope
Args:
block (Block): Block object |
def disassociate(self, eip_or_aid):
"""Disassociates an EIP. If the EIP was allocated for a VPC instance,
an AllocationId(aid) must be provided instead of a PublicIp.
"""
if "." in eip_or_aid: # If an IP is given (Classic)
return "true" == self.call("DisassociateAddress",
response_data_key="return",
PublicIp=eip_or_aid)
else: # If an AID is given (VPC)
return "true" == self.call("DisassociateAddress",
response_data_key="return",
AllocationId=eip_or_aid) | Disassociates an EIP. If the EIP was allocated for a VPC instance,
an AllocationId(aid) must be provided instead of a PublicIp. |
def from_raw(self, rval: RawValue, jptr: JSONPointer = "") -> Value:
"""Override the superclass method."""
def convert(val):
if isinstance(val, list):
res = ArrayValue([convert(x) for x in val])
elif isinstance(val, dict):
res = ObjectValue({x: convert(val[x]) for x in val})
else:
res = val
return res
return convert(rval) | Override the superclass method. |
def get_user_logins(self, user_id, params={}):
"""
Return a user's logins for the given user_id.
https://canvas.instructure.com/doc/api/logins.html#method.pseudonyms.index
"""
url = USERS_API.format(user_id) + "/logins"
data = self._get_paged_resource(url, params=params)
logins = []
for login_data in data:
logins.append(Login(data=login_data))
return logins | Return a user's logins for the given user_id.
https://canvas.instructure.com/doc/api/logins.html#method.pseudonyms.index |
def invalid(cls, data, context=None):
"""Shortcut to create an INVALID Token."""
return cls(cls.TagType.INVALID, data, context) | Shortcut to create an INVALID Token. |
def calc_max_flexural_wavelength(self):
"""
Returns the approximate maximum flexural wavelength
This is important when padding of the grid is required: in Flexure (this
code), grids are padded out to one maximum flexural wavelength, but in any
case, the flexural wavelength is a good characteristic distance for any
truncation limit
"""
if np.isscalar(self.D):
Dmax = self.D
else:
Dmax = self.D.max()
# This is an approximation if there is fill that evolves with iterations
# (e.g., water), but should be good enough that this won't do much to it
alpha = (4*Dmax/(self.drho*self.g))**.25 # 2D flexural parameter
self.maxFlexuralWavelength = 2*np.pi*alpha
self.maxFlexuralWavelength_ncells_x = int(np.ceil(self.maxFlexuralWavelength / self.dx))
self.maxFlexuralWavelength_ncells_y = int(np.ceil(self.maxFlexuralWavelength / self.dy)) | Returns the approximate maximum flexural wavelength
This is important when padding of the grid is required: in Flexure (this
code), grids are padded out to one maximum flexural wavelength, but in any
case, the flexural wavelength is a good characteristic distance for any
truncation limit |
Subsets and Splits