code
stringlengths 75
104k
| docstring
stringlengths 1
46.9k
| text
stringlengths 164
112k
|
---|---|---|
def _ParseMRUListExEntryValue(
self, parser_mediator, registry_key, entry_index, entry_number,
codepage='cp1252', **kwargs):
"""Parses the MRUListEx entry value.
Args:
parser_mediator (ParserMediator): mediates interactions between parsers
and other components, such as storage and dfvfs.
registry_key (dfwinreg.WinRegistryKey): Windows Registry key that contains
the MRUListEx value.
entry_index (int): MRUListEx entry index.
entry_number (int): entry number.
codepage (Optional[str]): extended ASCII string codepage.
Returns:
str: MRUList entry value.
"""
value_string = ''
value = registry_key.GetValueByName('{0:d}'.format(entry_number))
if value is None:
parser_mediator.ProduceExtractionWarning(
'missing MRUListEx value: {0:d} in key: {1:s}.'.format(
entry_number, registry_key.path))
elif not value.DataIsBinaryData():
logger.debug((
'[{0:s}] Non-binary MRUListEx entry value: {1:d} in key: '
'{2:s}.').format(self.NAME, entry_number, registry_key.path))
elif value.data:
shell_items_parser = shell_items.ShellItemsParser(registry_key.path)
shell_items_parser.ParseByteStream(
parser_mediator, value.data, codepage=codepage)
value_string = 'Shell item path: {0:s}'.format(
shell_items_parser.CopyToPath())
return value_string | Parses the MRUListEx entry value.
Args:
parser_mediator (ParserMediator): mediates interactions between parsers
and other components, such as storage and dfvfs.
registry_key (dfwinreg.WinRegistryKey): Windows Registry key that contains
the MRUListEx value.
entry_index (int): MRUListEx entry index.
entry_number (int): entry number.
codepage (Optional[str]): extended ASCII string codepage.
Returns:
str: MRUList entry value. | Below is the the instruction that describes the task:
### Input:
Parses the MRUListEx entry value.
Args:
parser_mediator (ParserMediator): mediates interactions between parsers
and other components, such as storage and dfvfs.
registry_key (dfwinreg.WinRegistryKey): Windows Registry key that contains
the MRUListEx value.
entry_index (int): MRUListEx entry index.
entry_number (int): entry number.
codepage (Optional[str]): extended ASCII string codepage.
Returns:
str: MRUList entry value.
### Response:
def _ParseMRUListExEntryValue(
self, parser_mediator, registry_key, entry_index, entry_number,
codepage='cp1252', **kwargs):
"""Parses the MRUListEx entry value.
Args:
parser_mediator (ParserMediator): mediates interactions between parsers
and other components, such as storage and dfvfs.
registry_key (dfwinreg.WinRegistryKey): Windows Registry key that contains
the MRUListEx value.
entry_index (int): MRUListEx entry index.
entry_number (int): entry number.
codepage (Optional[str]): extended ASCII string codepage.
Returns:
str: MRUList entry value.
"""
value_string = ''
value = registry_key.GetValueByName('{0:d}'.format(entry_number))
if value is None:
parser_mediator.ProduceExtractionWarning(
'missing MRUListEx value: {0:d} in key: {1:s}.'.format(
entry_number, registry_key.path))
elif not value.DataIsBinaryData():
logger.debug((
'[{0:s}] Non-binary MRUListEx entry value: {1:d} in key: '
'{2:s}.').format(self.NAME, entry_number, registry_key.path))
elif value.data:
shell_items_parser = shell_items.ShellItemsParser(registry_key.path)
shell_items_parser.ParseByteStream(
parser_mediator, value.data, codepage=codepage)
value_string = 'Shell item path: {0:s}'.format(
shell_items_parser.CopyToPath())
return value_string |
def nlmsg_flags(self, value):
"""Message flags setter."""
self.bytearray[self._get_slicers(2)] = bytearray(c_uint16(value or 0)) | Message flags setter. | Below is the the instruction that describes the task:
### Input:
Message flags setter.
### Response:
def nlmsg_flags(self, value):
"""Message flags setter."""
self.bytearray[self._get_slicers(2)] = bytearray(c_uint16(value or 0)) |
def handle_profile_save(self, sender, instance, **kwargs):
""" Custom handler for user profile save """
self.handle_save(instance.user.__class__, instance.user) | Custom handler for user profile save | Below is the the instruction that describes the task:
### Input:
Custom handler for user profile save
### Response:
def handle_profile_save(self, sender, instance, **kwargs):
""" Custom handler for user profile save """
self.handle_save(instance.user.__class__, instance.user) |
def create_manager(self, instance, superclass):
"""
Dynamically create a RelatedManager to handle the back side of the (G)FK
"""
rel_model = self.rating_model
rated_model = self.rated_model
class RelatedManager(superclass):
def get_query_set(self):
qs = RatingsQuerySet(rel_model, rated_model=rated_model)
return qs.filter(**(self.core_filters))
def add(self, *objs):
lookup_kwargs = rel_model.lookup_kwargs(instance)
for obj in objs:
if not isinstance(obj, self.model):
raise TypeError("'%s' instance expected" %
self.model._meta.object_name)
for (k, v) in lookup_kwargs.iteritems():
setattr(obj, k, v)
obj.save()
add.alters_data = True
def create(self, **kwargs):
kwargs.update(rel_model.lookup_kwargs(instance))
return super(RelatedManager, self).create(**kwargs)
create.alters_data = True
def get_or_create(self, **kwargs):
kwargs.update(rel_model.lookup_kwargs(instance))
return super(RelatedManager, self).get_or_create(**kwargs)
get_or_create.alters_data = True
def remove(self, *objs):
for obj in objs:
# Is obj actually part of this descriptor set?
if obj in self.all():
obj.delete()
else:
raise rel_model.DoesNotExist(
"%r is not related to %r." % (obj, instance))
remove.alters_data = True
def clear(self):
self.all().delete()
clear.alters_data = True
def rate(self, user, score):
rating, created = self.get_or_create(user=user)
if created or score != rating.score:
rating.score = score
rating.save()
return rating
def unrate(self, user):
return self.filter(user=user,
**rel_model.lookup_kwargs(instance)
).delete()
def perform_aggregation(self, aggregator):
score = self.all().aggregate(agg=aggregator('score'))
return score['agg']
def cumulative_score(self):
# simply the sum of all scores, useful for +1/-1
return self.perform_aggregation(models.Sum)
def average_score(self):
# the average of all the scores, useful for 1-5
return self.perform_aggregation(models.Avg)
def standard_deviation(self):
# the standard deviation of all the scores, useful for 1-5
return self.perform_aggregation(models.StdDev)
def variance(self):
# the variance of all the scores, useful for 1-5
return self.perform_aggregation(models.Variance)
def similar_items(self):
return SimilarItem.objects.get_for_item(instance)
manager = RelatedManager()
manager.core_filters = rel_model.lookup_kwargs(instance)
manager.model = rel_model
return manager | Dynamically create a RelatedManager to handle the back side of the (G)FK | Below is the the instruction that describes the task:
### Input:
Dynamically create a RelatedManager to handle the back side of the (G)FK
### Response:
def create_manager(self, instance, superclass):
"""
Dynamically create a RelatedManager to handle the back side of the (G)FK
"""
rel_model = self.rating_model
rated_model = self.rated_model
class RelatedManager(superclass):
def get_query_set(self):
qs = RatingsQuerySet(rel_model, rated_model=rated_model)
return qs.filter(**(self.core_filters))
def add(self, *objs):
lookup_kwargs = rel_model.lookup_kwargs(instance)
for obj in objs:
if not isinstance(obj, self.model):
raise TypeError("'%s' instance expected" %
self.model._meta.object_name)
for (k, v) in lookup_kwargs.iteritems():
setattr(obj, k, v)
obj.save()
add.alters_data = True
def create(self, **kwargs):
kwargs.update(rel_model.lookup_kwargs(instance))
return super(RelatedManager, self).create(**kwargs)
create.alters_data = True
def get_or_create(self, **kwargs):
kwargs.update(rel_model.lookup_kwargs(instance))
return super(RelatedManager, self).get_or_create(**kwargs)
get_or_create.alters_data = True
def remove(self, *objs):
for obj in objs:
# Is obj actually part of this descriptor set?
if obj in self.all():
obj.delete()
else:
raise rel_model.DoesNotExist(
"%r is not related to %r." % (obj, instance))
remove.alters_data = True
def clear(self):
self.all().delete()
clear.alters_data = True
def rate(self, user, score):
rating, created = self.get_or_create(user=user)
if created or score != rating.score:
rating.score = score
rating.save()
return rating
def unrate(self, user):
return self.filter(user=user,
**rel_model.lookup_kwargs(instance)
).delete()
def perform_aggregation(self, aggregator):
score = self.all().aggregate(agg=aggregator('score'))
return score['agg']
def cumulative_score(self):
# simply the sum of all scores, useful for +1/-1
return self.perform_aggregation(models.Sum)
def average_score(self):
# the average of all the scores, useful for 1-5
return self.perform_aggregation(models.Avg)
def standard_deviation(self):
# the standard deviation of all the scores, useful for 1-5
return self.perform_aggregation(models.StdDev)
def variance(self):
# the variance of all the scores, useful for 1-5
return self.perform_aggregation(models.Variance)
def similar_items(self):
return SimilarItem.objects.get_for_item(instance)
manager = RelatedManager()
manager.core_filters = rel_model.lookup_kwargs(instance)
manager.model = rel_model
return manager |
def is_static(*p):
""" A static value (does not change at runtime)
which is known at compile time
"""
return all(is_CONST(x) or
is_number(x) or
is_const(x)
for x in p) | A static value (does not change at runtime)
which is known at compile time | Below is the the instruction that describes the task:
### Input:
A static value (does not change at runtime)
which is known at compile time
### Response:
def is_static(*p):
""" A static value (does not change at runtime)
which is known at compile time
"""
return all(is_CONST(x) or
is_number(x) or
is_const(x)
for x in p) |
def get_env_args(env):
"""Yield options to inject into the slcli command from the environment."""
for arg, val in env.vars.get('global_args', {}).items():
if val is True:
yield '--%s' % arg
elif isinstance(val, int):
for _ in range(val):
yield '--%s' % arg
elif val is None:
continue
else:
yield '--%s=%s' % (arg, val) | Yield options to inject into the slcli command from the environment. | Below is the the instruction that describes the task:
### Input:
Yield options to inject into the slcli command from the environment.
### Response:
def get_env_args(env):
"""Yield options to inject into the slcli command from the environment."""
for arg, val in env.vars.get('global_args', {}).items():
if val is True:
yield '--%s' % arg
elif isinstance(val, int):
for _ in range(val):
yield '--%s' % arg
elif val is None:
continue
else:
yield '--%s=%s' % (arg, val) |
def collect(cls, sources):
"""
:param sources: dictionaries with a key 'tectonicRegion'
:returns: an ordered list of SourceGroup instances
"""
source_stats_dict = {}
for src in sources:
trt = src['tectonicRegion']
if trt not in source_stats_dict:
source_stats_dict[trt] = SourceGroup(trt)
sg = source_stats_dict[trt]
if not sg.sources:
# we append just one source per SourceGroup, so that
# the memory occupation is insignificant
sg.sources.append(src)
# return SourceGroups, ordered by TRT string
return sorted(source_stats_dict.values()) | :param sources: dictionaries with a key 'tectonicRegion'
:returns: an ordered list of SourceGroup instances | Below is the the instruction that describes the task:
### Input:
:param sources: dictionaries with a key 'tectonicRegion'
:returns: an ordered list of SourceGroup instances
### Response:
def collect(cls, sources):
"""
:param sources: dictionaries with a key 'tectonicRegion'
:returns: an ordered list of SourceGroup instances
"""
source_stats_dict = {}
for src in sources:
trt = src['tectonicRegion']
if trt not in source_stats_dict:
source_stats_dict[trt] = SourceGroup(trt)
sg = source_stats_dict[trt]
if not sg.sources:
# we append just one source per SourceGroup, so that
# the memory occupation is insignificant
sg.sources.append(src)
# return SourceGroups, ordered by TRT string
return sorted(source_stats_dict.values()) |
def set_mac_addr_adv_interval(self, name, vrid, value=None, disable=False,
default=False, run=True):
"""Set the mac_addr_adv_interval property of the vrrp
Args:
name (string): The interface to configure.
vrid (integer): The vrid number for the vrrp to be managed.
value (integer): mac-address advertisement-interval value to
assign to the vrrp.
disable (boolean): Unset mac-address advertisement-interval
if True.
default (boolean): Set mac-address advertisement-interval to
default if True.
run (boolean): Set to True to execute the command, False to
return a string with the formatted command.
Returns:
If run is True, returns True if the command executed successfully,
error if failure.
If run is False, returns the formatted command string which can
be passed to the node
"""
if not default and not disable:
if not int(value) or int(value) < 1 or int(value) > 3600:
raise ValueError("vrrp property 'mac_addr_adv_interval' must "
"be in the range 1-3600")
cmd = self.command_builder('vrrp %d mac-address advertisement-interval'
% vrid, value=value, default=default,
disable=disable)
# Run the command if requested
if run:
result = self.configure_interface(name, cmd)
# And verify the command succeeded
if result is False:
return self.error
return result
# Otherwise return the formatted command
return cmd | Set the mac_addr_adv_interval property of the vrrp
Args:
name (string): The interface to configure.
vrid (integer): The vrid number for the vrrp to be managed.
value (integer): mac-address advertisement-interval value to
assign to the vrrp.
disable (boolean): Unset mac-address advertisement-interval
if True.
default (boolean): Set mac-address advertisement-interval to
default if True.
run (boolean): Set to True to execute the command, False to
return a string with the formatted command.
Returns:
If run is True, returns True if the command executed successfully,
error if failure.
If run is False, returns the formatted command string which can
be passed to the node | Below is the the instruction that describes the task:
### Input:
Set the mac_addr_adv_interval property of the vrrp
Args:
name (string): The interface to configure.
vrid (integer): The vrid number for the vrrp to be managed.
value (integer): mac-address advertisement-interval value to
assign to the vrrp.
disable (boolean): Unset mac-address advertisement-interval
if True.
default (boolean): Set mac-address advertisement-interval to
default if True.
run (boolean): Set to True to execute the command, False to
return a string with the formatted command.
Returns:
If run is True, returns True if the command executed successfully,
error if failure.
If run is False, returns the formatted command string which can
be passed to the node
### Response:
def set_mac_addr_adv_interval(self, name, vrid, value=None, disable=False,
default=False, run=True):
"""Set the mac_addr_adv_interval property of the vrrp
Args:
name (string): The interface to configure.
vrid (integer): The vrid number for the vrrp to be managed.
value (integer): mac-address advertisement-interval value to
assign to the vrrp.
disable (boolean): Unset mac-address advertisement-interval
if True.
default (boolean): Set mac-address advertisement-interval to
default if True.
run (boolean): Set to True to execute the command, False to
return a string with the formatted command.
Returns:
If run is True, returns True if the command executed successfully,
error if failure.
If run is False, returns the formatted command string which can
be passed to the node
"""
if not default and not disable:
if not int(value) or int(value) < 1 or int(value) > 3600:
raise ValueError("vrrp property 'mac_addr_adv_interval' must "
"be in the range 1-3600")
cmd = self.command_builder('vrrp %d mac-address advertisement-interval'
% vrid, value=value, default=default,
disable=disable)
# Run the command if requested
if run:
result = self.configure_interface(name, cmd)
# And verify the command succeeded
if result is False:
return self.error
return result
# Otherwise return the formatted command
return cmd |
def get_backends():
"""
Get backends info so that we can find the correct one.
We just look in the directory structure to find modules.
"""
IGNORE_DIRS = ["core", "tools", "utils"]
global backends
if backends is None:
backends = {}
i3pystatus_dir = os.path.dirname(i3pystatus.__file__)
subdirs = [
x
for x in next(os.walk(i3pystatus_dir))[1]
if not x.startswith("_") and x not in IGNORE_DIRS
]
for subdir in subdirs:
dirs = next(os.walk(os.path.join(i3pystatus_dir, subdir)))[2]
backends.update(
{
x[:-3]: "i3pystatus.%s.%s" % (subdir, x[:-3])
for x in dirs
if not x.startswith("_") and x.endswith(".py")
}
)
return backends | Get backends info so that we can find the correct one.
We just look in the directory structure to find modules. | Below is the the instruction that describes the task:
### Input:
Get backends info so that we can find the correct one.
We just look in the directory structure to find modules.
### Response:
def get_backends():
"""
Get backends info so that we can find the correct one.
We just look in the directory structure to find modules.
"""
IGNORE_DIRS = ["core", "tools", "utils"]
global backends
if backends is None:
backends = {}
i3pystatus_dir = os.path.dirname(i3pystatus.__file__)
subdirs = [
x
for x in next(os.walk(i3pystatus_dir))[1]
if not x.startswith("_") and x not in IGNORE_DIRS
]
for subdir in subdirs:
dirs = next(os.walk(os.path.join(i3pystatus_dir, subdir)))[2]
backends.update(
{
x[:-3]: "i3pystatus.%s.%s" % (subdir, x[:-3])
for x in dirs
if not x.startswith("_") and x.endswith(".py")
}
)
return backends |
def execute(self, command, *args, encoding=_NOTSET):
"""Executes redis command and returns Future waiting for the answer.
Raises:
* TypeError if any of args can not be encoded as bytes.
* ReplyError on redis '-ERR' responses.
* ProtocolError when response can not be decoded meaning connection
is broken.
* ConnectionClosedError when either client or server has closed the
connection.
"""
if self._reader is None or self._reader.at_eof():
msg = self._close_msg or "Connection closed or corrupted"
raise ConnectionClosedError(msg)
if command is None:
raise TypeError("command must not be None")
if None in args:
raise TypeError("args must not contain None")
command = command.upper().strip()
is_pubsub = command in _PUBSUB_COMMANDS
is_ping = command in ('PING', b'PING')
if self._in_pubsub and not (is_pubsub or is_ping):
raise RedisError("Connection in SUBSCRIBE mode")
elif is_pubsub:
logger.warning("Deprecated. Use `execute_pubsub` method directly")
return self.execute_pubsub(command, *args)
if command in ('SELECT', b'SELECT'):
cb = partial(self._set_db, args=args)
elif command in ('MULTI', b'MULTI'):
cb = self._start_transaction
elif command in ('EXEC', b'EXEC'):
cb = partial(self._end_transaction, discard=False)
elif command in ('DISCARD', b'DISCARD'):
cb = partial(self._end_transaction, discard=True)
else:
cb = None
if encoding is _NOTSET:
encoding = self._encoding
fut = self._loop.create_future()
if self._pipeline_buffer is None:
self._writer.write(encode_command(command, *args))
else:
encode_command(command, *args, buf=self._pipeline_buffer)
self._waiters.append((fut, encoding, cb))
return fut | Executes redis command and returns Future waiting for the answer.
Raises:
* TypeError if any of args can not be encoded as bytes.
* ReplyError on redis '-ERR' responses.
* ProtocolError when response can not be decoded meaning connection
is broken.
* ConnectionClosedError when either client or server has closed the
connection. | Below is the the instruction that describes the task:
### Input:
Executes redis command and returns Future waiting for the answer.
Raises:
* TypeError if any of args can not be encoded as bytes.
* ReplyError on redis '-ERR' responses.
* ProtocolError when response can not be decoded meaning connection
is broken.
* ConnectionClosedError when either client or server has closed the
connection.
### Response:
def execute(self, command, *args, encoding=_NOTSET):
"""Executes redis command and returns Future waiting for the answer.
Raises:
* TypeError if any of args can not be encoded as bytes.
* ReplyError on redis '-ERR' responses.
* ProtocolError when response can not be decoded meaning connection
is broken.
* ConnectionClosedError when either client or server has closed the
connection.
"""
if self._reader is None or self._reader.at_eof():
msg = self._close_msg or "Connection closed or corrupted"
raise ConnectionClosedError(msg)
if command is None:
raise TypeError("command must not be None")
if None in args:
raise TypeError("args must not contain None")
command = command.upper().strip()
is_pubsub = command in _PUBSUB_COMMANDS
is_ping = command in ('PING', b'PING')
if self._in_pubsub and not (is_pubsub or is_ping):
raise RedisError("Connection in SUBSCRIBE mode")
elif is_pubsub:
logger.warning("Deprecated. Use `execute_pubsub` method directly")
return self.execute_pubsub(command, *args)
if command in ('SELECT', b'SELECT'):
cb = partial(self._set_db, args=args)
elif command in ('MULTI', b'MULTI'):
cb = self._start_transaction
elif command in ('EXEC', b'EXEC'):
cb = partial(self._end_transaction, discard=False)
elif command in ('DISCARD', b'DISCARD'):
cb = partial(self._end_transaction, discard=True)
else:
cb = None
if encoding is _NOTSET:
encoding = self._encoding
fut = self._loop.create_future()
if self._pipeline_buffer is None:
self._writer.write(encode_command(command, *args))
else:
encode_command(command, *args, buf=self._pipeline_buffer)
self._waiters.append((fut, encoding, cb))
return fut |
def with_stmt__26(self, with_loc, context, with_var, colon_loc, body):
"""(2.6, 3.0) with_stmt: 'with' test [ with_var ] ':' suite"""
if with_var:
as_loc, optional_vars = with_var
item = ast.withitem(context_expr=context, optional_vars=optional_vars,
as_loc=as_loc, loc=context.loc.join(optional_vars.loc))
else:
item = ast.withitem(context_expr=context, optional_vars=None,
as_loc=None, loc=context.loc)
return ast.With(items=[item], body=body,
keyword_loc=with_loc, colon_loc=colon_loc,
loc=with_loc.join(body[-1].loc)) | (2.6, 3.0) with_stmt: 'with' test [ with_var ] ':' suite | Below is the the instruction that describes the task:
### Input:
(2.6, 3.0) with_stmt: 'with' test [ with_var ] ':' suite
### Response:
def with_stmt__26(self, with_loc, context, with_var, colon_loc, body):
"""(2.6, 3.0) with_stmt: 'with' test [ with_var ] ':' suite"""
if with_var:
as_loc, optional_vars = with_var
item = ast.withitem(context_expr=context, optional_vars=optional_vars,
as_loc=as_loc, loc=context.loc.join(optional_vars.loc))
else:
item = ast.withitem(context_expr=context, optional_vars=None,
as_loc=None, loc=context.loc)
return ast.With(items=[item], body=body,
keyword_loc=with_loc, colon_loc=colon_loc,
loc=with_loc.join(body[-1].loc)) |
def scope_groups(self):
"""
Return a new raw REST interface to scope_group resources
:rtype: :py:class:`ns1.rest.ipam.Scopegroups`
"""
import ns1.rest.ipam
return ns1.rest.ipam.Scopegroups(self.config) | Return a new raw REST interface to scope_group resources
:rtype: :py:class:`ns1.rest.ipam.Scopegroups` | Below is the the instruction that describes the task:
### Input:
Return a new raw REST interface to scope_group resources
:rtype: :py:class:`ns1.rest.ipam.Scopegroups`
### Response:
def scope_groups(self):
"""
Return a new raw REST interface to scope_group resources
:rtype: :py:class:`ns1.rest.ipam.Scopegroups`
"""
import ns1.rest.ipam
return ns1.rest.ipam.Scopegroups(self.config) |
def run(self, *args):
"""Withdraw a unique identity from an organization."""
params = self.parser.parse_args(args)
uuid = params.uuid
organization = params.organization
try:
from_date = utils.str_to_datetime(params.from_date)
to_date = utils.str_to_datetime(params.to_date)
code = self.withdraw(uuid, organization, from_date, to_date)
except InvalidDateError as e:
self.error(str(e))
return e.code
return code | Withdraw a unique identity from an organization. | Below is the the instruction that describes the task:
### Input:
Withdraw a unique identity from an organization.
### Response:
def run(self, *args):
"""Withdraw a unique identity from an organization."""
params = self.parser.parse_args(args)
uuid = params.uuid
organization = params.organization
try:
from_date = utils.str_to_datetime(params.from_date)
to_date = utils.str_to_datetime(params.to_date)
code = self.withdraw(uuid, organization, from_date, to_date)
except InvalidDateError as e:
self.error(str(e))
return e.code
return code |
def _update_params_on_kvstore(param_arrays, grad_arrays, kvstore, param_names):
"""Perform update of param_arrays from grad_arrays on kvstore."""
for index, pair in enumerate(zip(param_arrays, grad_arrays)):
arg_list, grad_list = pair
if grad_list[0] is None:
continue
name = param_names[index]
# push gradient, priority is negative index
kvstore.push(name, grad_list, priority=-index)
# pull back the weights
kvstore.pull(name, arg_list, priority=-index) | Perform update of param_arrays from grad_arrays on kvstore. | Below is the the instruction that describes the task:
### Input:
Perform update of param_arrays from grad_arrays on kvstore.
### Response:
def _update_params_on_kvstore(param_arrays, grad_arrays, kvstore, param_names):
"""Perform update of param_arrays from grad_arrays on kvstore."""
for index, pair in enumerate(zip(param_arrays, grad_arrays)):
arg_list, grad_list = pair
if grad_list[0] is None:
continue
name = param_names[index]
# push gradient, priority is negative index
kvstore.push(name, grad_list, priority=-index)
# pull back the weights
kvstore.pull(name, arg_list, priority=-index) |
def parse_docopt_string(docopt_string):
"""returns a 2-tuple (usage, options)"""
from re import match, DOTALL
only_usage_pattern = r"""\s+Usage:\s+(?P<usage>.*)\s+"""
usage_and_options_pattern = r"""\s+Usage:\s+(?P<usage>.*)\s+Options:\s+(?P<options>.*)\s+"""
usage, options = '', ''
if match(usage_and_options_pattern, docopt_string, DOTALL):
usage = match(usage_and_options_pattern, docopt_string, DOTALL).groupdict()['usage']
options = match(usage_and_options_pattern, docopt_string, DOTALL).groupdict()['options']
elif match(only_usage_pattern, docopt_string, DOTALL):
usage = match(only_usage_pattern, docopt_string, DOTALL).groupdict()['usage']
return usage, options | returns a 2-tuple (usage, options) | Below is the the instruction that describes the task:
### Input:
returns a 2-tuple (usage, options)
### Response:
def parse_docopt_string(docopt_string):
"""returns a 2-tuple (usage, options)"""
from re import match, DOTALL
only_usage_pattern = r"""\s+Usage:\s+(?P<usage>.*)\s+"""
usage_and_options_pattern = r"""\s+Usage:\s+(?P<usage>.*)\s+Options:\s+(?P<options>.*)\s+"""
usage, options = '', ''
if match(usage_and_options_pattern, docopt_string, DOTALL):
usage = match(usage_and_options_pattern, docopt_string, DOTALL).groupdict()['usage']
options = match(usage_and_options_pattern, docopt_string, DOTALL).groupdict()['options']
elif match(only_usage_pattern, docopt_string, DOTALL):
usage = match(only_usage_pattern, docopt_string, DOTALL).groupdict()['usage']
return usage, options |
def qteAddMiniApplet(self, appletObj: QtmacsApplet):
"""
Install ``appletObj`` as the mini applet in the window layout.
At any given point there can ever only be one mini applet in
the entire Qtmacs application, irrespective of how many
windows are open.
Note that this method does nothing if a custom mini applet is
already active. Use ``qteKillMiniApplet`` to remove that one
first before installing a new one.
|Args|
* ``appletObj`` (**QtmacsApplet**): the new mini applet.
|Returns|
* **bool**: if **True** the mini applet was installed
successfully.
|Raises|
* **QtmacsArgumentError** if at least one argument has an invalid type.
"""
# Do nothing if a custom mini applet has already been
# installed.
if self._qteMiniApplet is not None:
msg = 'Cannot replace mini applet more than once.'
self.qteLogger.warning(msg)
return False
# Arrange all registered widgets inside this applet
# automatically if the mini applet object did not install its
# own layout.
if appletObj.layout() is None:
appLayout = QtGui.QHBoxLayout()
for handle in appletObj._qteAdmin.widgetList:
appLayout.addWidget(handle)
appletObj.setLayout(appLayout)
# Now that we have decided to install this mini applet, keep a
# reference to it and set the mini applet flag in the
# applet. This flag is necessary for some methods to separate
# conventional applets from mini applets.
appletObj._qteAdmin.isMiniApplet = True
self._qteMiniApplet = appletObj
# Shorthands.
app = self._qteActiveApplet
appWin = self.qteActiveWindow()
# Remember which window and applet spawned this mini applet.
self._qteMiniApplet._qteCallingApplet = app
self._qteMiniApplet._qteCallingWindow = appWin
del app
# Add the mini applet to the applet registry, ie. for most
# purposes the mini applet is treated like any other applet.
self._qteAppletList.insert(0, self._qteMiniApplet)
# Add the mini applet to the respective splitter in the window
# layout and show it.
appWin.qteLayoutSplitter.addWidget(self._qteMiniApplet)
self._qteMiniApplet.show(True)
# Give focus to first focusable widget in the mini applet
# applet (if one exists)
wid = self._qteMiniApplet.qteNextWidget(numSkip=0)
self._qteMiniApplet.qteMakeWidgetActive(wid)
self.qteMakeAppletActive(self._qteMiniApplet)
# Mini applet was successfully installed.
return True | Install ``appletObj`` as the mini applet in the window layout.
At any given point there can ever only be one mini applet in
the entire Qtmacs application, irrespective of how many
windows are open.
Note that this method does nothing if a custom mini applet is
already active. Use ``qteKillMiniApplet`` to remove that one
first before installing a new one.
|Args|
* ``appletObj`` (**QtmacsApplet**): the new mini applet.
|Returns|
* **bool**: if **True** the mini applet was installed
successfully.
|Raises|
* **QtmacsArgumentError** if at least one argument has an invalid type. | Below is the the instruction that describes the task:
### Input:
Install ``appletObj`` as the mini applet in the window layout.
At any given point there can ever only be one mini applet in
the entire Qtmacs application, irrespective of how many
windows are open.
Note that this method does nothing if a custom mini applet is
already active. Use ``qteKillMiniApplet`` to remove that one
first before installing a new one.
|Args|
* ``appletObj`` (**QtmacsApplet**): the new mini applet.
|Returns|
* **bool**: if **True** the mini applet was installed
successfully.
|Raises|
* **QtmacsArgumentError** if at least one argument has an invalid type.
### Response:
def qteAddMiniApplet(self, appletObj: QtmacsApplet):
"""
Install ``appletObj`` as the mini applet in the window layout.
At any given point there can ever only be one mini applet in
the entire Qtmacs application, irrespective of how many
windows are open.
Note that this method does nothing if a custom mini applet is
already active. Use ``qteKillMiniApplet`` to remove that one
first before installing a new one.
|Args|
* ``appletObj`` (**QtmacsApplet**): the new mini applet.
|Returns|
* **bool**: if **True** the mini applet was installed
successfully.
|Raises|
* **QtmacsArgumentError** if at least one argument has an invalid type.
"""
# Do nothing if a custom mini applet has already been
# installed.
if self._qteMiniApplet is not None:
msg = 'Cannot replace mini applet more than once.'
self.qteLogger.warning(msg)
return False
# Arrange all registered widgets inside this applet
# automatically if the mini applet object did not install its
# own layout.
if appletObj.layout() is None:
appLayout = QtGui.QHBoxLayout()
for handle in appletObj._qteAdmin.widgetList:
appLayout.addWidget(handle)
appletObj.setLayout(appLayout)
# Now that we have decided to install this mini applet, keep a
# reference to it and set the mini applet flag in the
# applet. This flag is necessary for some methods to separate
# conventional applets from mini applets.
appletObj._qteAdmin.isMiniApplet = True
self._qteMiniApplet = appletObj
# Shorthands.
app = self._qteActiveApplet
appWin = self.qteActiveWindow()
# Remember which window and applet spawned this mini applet.
self._qteMiniApplet._qteCallingApplet = app
self._qteMiniApplet._qteCallingWindow = appWin
del app
# Add the mini applet to the applet registry, ie. for most
# purposes the mini applet is treated like any other applet.
self._qteAppletList.insert(0, self._qteMiniApplet)
# Add the mini applet to the respective splitter in the window
# layout and show it.
appWin.qteLayoutSplitter.addWidget(self._qteMiniApplet)
self._qteMiniApplet.show(True)
# Give focus to first focusable widget in the mini applet
# applet (if one exists)
wid = self._qteMiniApplet.qteNextWidget(numSkip=0)
self._qteMiniApplet.qteMakeWidgetActive(wid)
self.qteMakeAppletActive(self._qteMiniApplet)
# Mini applet was successfully installed.
return True |
def init(opts):
'''
This function gets called when the proxy starts up.
'''
if 'host' not in opts['proxy']:
log.critical('No \'host\' key found in pillar for this proxy.')
return False
if 'username' not in opts['proxy']:
log.critical('No \'username\' key found in pillar for this proxy.')
return False
if 'password' not in opts['proxy']:
log.critical('No \'passwords\' key found in pillar for this proxy.')
return False
DETAILS['url'] = 'https://{0}/nuova'.format(opts['proxy']['host'])
DETAILS['headers'] = {'Content-Type': 'application/x-www-form-urlencoded',
'Content-Length': 62,
'USER-Agent': 'lwp-request/2.06'}
# Set configuration details
DETAILS['host'] = opts['proxy']['host']
DETAILS['username'] = opts['proxy'].get('username')
DETAILS['password'] = opts['proxy'].get('password')
# Ensure connectivity to the device
log.debug("Attempting to connect to cimc proxy host.")
get_config_resolver_class("computeRackUnit")
log.debug("Successfully connected to cimc proxy host.")
DETAILS['initialized'] = True | This function gets called when the proxy starts up. | Below is the the instruction that describes the task:
### Input:
This function gets called when the proxy starts up.
### Response:
def init(opts):
'''
This function gets called when the proxy starts up.
'''
if 'host' not in opts['proxy']:
log.critical('No \'host\' key found in pillar for this proxy.')
return False
if 'username' not in opts['proxy']:
log.critical('No \'username\' key found in pillar for this proxy.')
return False
if 'password' not in opts['proxy']:
log.critical('No \'passwords\' key found in pillar for this proxy.')
return False
DETAILS['url'] = 'https://{0}/nuova'.format(opts['proxy']['host'])
DETAILS['headers'] = {'Content-Type': 'application/x-www-form-urlencoded',
'Content-Length': 62,
'USER-Agent': 'lwp-request/2.06'}
# Set configuration details
DETAILS['host'] = opts['proxy']['host']
DETAILS['username'] = opts['proxy'].get('username')
DETAILS['password'] = opts['proxy'].get('password')
# Ensure connectivity to the device
log.debug("Attempting to connect to cimc proxy host.")
get_config_resolver_class("computeRackUnit")
log.debug("Successfully connected to cimc proxy host.")
DETAILS['initialized'] = True |
def write_segment(buff, segment, ver, ver_range, eci=False):
"""\
Writes a segment.
:param buff: The byte buffer.
:param _Segment segment: The segment to serialize.
:param ver: ``None`` if a QR Code is written, "M1", "M2", "M3", or "M4" if a
Micro QR Code is written.
:param ver_range: "M1", "M2", "M3", or "M4" if a Micro QR Code is written,
otherwise a constant representing a range of QR Code versions.
"""
mode = segment.mode
append_bits = buff.append_bits
# Write ECI header if requested
if eci and mode == consts.MODE_BYTE \
and segment.encoding != consts.DEFAULT_BYTE_ENCODING:
append_bits(consts.MODE_ECI, 4)
append_bits(get_eci_assignment_number(segment.encoding), 8)
if ver is None: # QR Code
append_bits(mode, 4)
elif ver > consts.VERSION_M1: # Micro QR Code (M1 has no mode indicator)
append_bits(consts.MODE_TO_MICRO_MODE_MAPPING[mode], ver + 3)
# Character count indicator
append_bits(segment.char_count,
consts.CHAR_COUNT_INDICATOR_LENGTH[mode][ver_range])
buff.extend(segment.bits) | \
Writes a segment.
:param buff: The byte buffer.
:param _Segment segment: The segment to serialize.
:param ver: ``None`` if a QR Code is written, "M1", "M2", "M3", or "M4" if a
Micro QR Code is written.
:param ver_range: "M1", "M2", "M3", or "M4" if a Micro QR Code is written,
otherwise a constant representing a range of QR Code versions. | Below is the the instruction that describes the task:
### Input:
\
Writes a segment.
:param buff: The byte buffer.
:param _Segment segment: The segment to serialize.
:param ver: ``None`` if a QR Code is written, "M1", "M2", "M3", or "M4" if a
Micro QR Code is written.
:param ver_range: "M1", "M2", "M3", or "M4" if a Micro QR Code is written,
otherwise a constant representing a range of QR Code versions.
### Response:
def write_segment(buff, segment, ver, ver_range, eci=False):
"""\
Writes a segment.
:param buff: The byte buffer.
:param _Segment segment: The segment to serialize.
:param ver: ``None`` if a QR Code is written, "M1", "M2", "M3", or "M4" if a
Micro QR Code is written.
:param ver_range: "M1", "M2", "M3", or "M4" if a Micro QR Code is written,
otherwise a constant representing a range of QR Code versions.
"""
mode = segment.mode
append_bits = buff.append_bits
# Write ECI header if requested
if eci and mode == consts.MODE_BYTE \
and segment.encoding != consts.DEFAULT_BYTE_ENCODING:
append_bits(consts.MODE_ECI, 4)
append_bits(get_eci_assignment_number(segment.encoding), 8)
if ver is None: # QR Code
append_bits(mode, 4)
elif ver > consts.VERSION_M1: # Micro QR Code (M1 has no mode indicator)
append_bits(consts.MODE_TO_MICRO_MODE_MAPPING[mode], ver + 3)
# Character count indicator
append_bits(segment.char_count,
consts.CHAR_COUNT_INDICATOR_LENGTH[mode][ver_range])
buff.extend(segment.bits) |
def _get_jenks_config():
""" retrieve the jenks configuration object """
config_file = (get_configuration_file() or
os.path.expanduser(os.path.join("~", CONFIG_FILE_NAME)))
if not os.path.exists(config_file):
open(config_file, 'w').close()
with open(config_file, 'r') as fh:
return JenksData(
yaml.load(fh.read()),
write_method=generate_write_yaml_to_file(config_file)
) | retrieve the jenks configuration object | Below is the the instruction that describes the task:
### Input:
retrieve the jenks configuration object
### Response:
def _get_jenks_config():
""" retrieve the jenks configuration object """
config_file = (get_configuration_file() or
os.path.expanduser(os.path.join("~", CONFIG_FILE_NAME)))
if not os.path.exists(config_file):
open(config_file, 'w').close()
with open(config_file, 'r') as fh:
return JenksData(
yaml.load(fh.read()),
write_method=generate_write_yaml_to_file(config_file)
) |
def find_actions(namespace, action_prefix):
"""Find all the actions in the namespace."""
actions = {}
for key, value in iteritems(namespace):
if key.startswith(action_prefix):
actions[key[len(action_prefix):]] = analyse_action(value)
return actions | Find all the actions in the namespace. | Below is the the instruction that describes the task:
### Input:
Find all the actions in the namespace.
### Response:
def find_actions(namespace, action_prefix):
"""Find all the actions in the namespace."""
actions = {}
for key, value in iteritems(namespace):
if key.startswith(action_prefix):
actions[key[len(action_prefix):]] = analyse_action(value)
return actions |
def render_customizations(self):
"""
Customize template for site user specified customizations
"""
disable_plugins = self.pt.customize_conf.get('disable_plugins', [])
if not disable_plugins:
logger.debug('No site-user specified plugins to disable')
else:
for plugin in disable_plugins:
try:
self.pt.remove_plugin(plugin['plugin_type'], plugin['plugin_name'],
'disabled at user request')
except KeyError:
# Malformed config
logger.info('Invalid custom configuration found for disable_plugins')
enable_plugins = self.pt.customize_conf.get('enable_plugins', [])
if not enable_plugins:
logger.debug('No site-user specified plugins to enable"')
else:
for plugin in enable_plugins:
try:
msg = 'enabled at user request'
self.pt.add_plugin(plugin['plugin_type'], plugin['plugin_name'],
plugin['plugin_args'], msg)
except KeyError:
# Malformed config
logger.info('Invalid custom configuration found for enable_plugins') | Customize template for site user specified customizations | Below is the the instruction that describes the task:
### Input:
Customize template for site user specified customizations
### Response:
def render_customizations(self):
"""
Customize template for site user specified customizations
"""
disable_plugins = self.pt.customize_conf.get('disable_plugins', [])
if not disable_plugins:
logger.debug('No site-user specified plugins to disable')
else:
for plugin in disable_plugins:
try:
self.pt.remove_plugin(plugin['plugin_type'], plugin['plugin_name'],
'disabled at user request')
except KeyError:
# Malformed config
logger.info('Invalid custom configuration found for disable_plugins')
enable_plugins = self.pt.customize_conf.get('enable_plugins', [])
if not enable_plugins:
logger.debug('No site-user specified plugins to enable"')
else:
for plugin in enable_plugins:
try:
msg = 'enabled at user request'
self.pt.add_plugin(plugin['plugin_type'], plugin['plugin_name'],
plugin['plugin_args'], msg)
except KeyError:
# Malformed config
logger.info('Invalid custom configuration found for enable_plugins') |
def close_connection (self):
"""Release the open connection from the connection pool."""
if self.url_connection is not None:
try:
self.url_connection.quit()
except Exception:
pass
self.url_connection = None | Release the open connection from the connection pool. | Below is the the instruction that describes the task:
### Input:
Release the open connection from the connection pool.
### Response:
def close_connection (self):
"""Release the open connection from the connection pool."""
if self.url_connection is not None:
try:
self.url_connection.quit()
except Exception:
pass
self.url_connection = None |
def compute_transformed(context):
"""Compute transformed key for opening database"""
key_composite = compute_key_composite(
password=context._._.password,
keyfile=context._._.keyfile
)
kdf_parameters = context._.header.value.dynamic_header.kdf_parameters.data.dict
if context._._.transformed_key is not None:
transformed_key = context._._.transformed_key
elif kdf_parameters['$UUID'].value == kdf_uuids['argon2']:
transformed_key = argon2.low_level.hash_secret_raw(
secret=key_composite,
salt=kdf_parameters['S'].value,
hash_len=32,
type=argon2.low_level.Type.D,
time_cost=kdf_parameters['I'].value,
memory_cost=kdf_parameters['M'].value // 1024,
parallelism=kdf_parameters['P'].value,
version=kdf_parameters['V'].value
)
elif kdf_parameters['$UUID'].value == kdf_uuids['aeskdf']:
transformed_key = aes_kdf(
kdf_parameters['S'].value,
kdf_parameters['R'].value,
key_composite
)
else:
raise Exception('Unsupported key derivation method')
return transformed_key | Compute transformed key for opening database | Below is the the instruction that describes the task:
### Input:
Compute transformed key for opening database
### Response:
def compute_transformed(context):
"""Compute transformed key for opening database"""
key_composite = compute_key_composite(
password=context._._.password,
keyfile=context._._.keyfile
)
kdf_parameters = context._.header.value.dynamic_header.kdf_parameters.data.dict
if context._._.transformed_key is not None:
transformed_key = context._._.transformed_key
elif kdf_parameters['$UUID'].value == kdf_uuids['argon2']:
transformed_key = argon2.low_level.hash_secret_raw(
secret=key_composite,
salt=kdf_parameters['S'].value,
hash_len=32,
type=argon2.low_level.Type.D,
time_cost=kdf_parameters['I'].value,
memory_cost=kdf_parameters['M'].value // 1024,
parallelism=kdf_parameters['P'].value,
version=kdf_parameters['V'].value
)
elif kdf_parameters['$UUID'].value == kdf_uuids['aeskdf']:
transformed_key = aes_kdf(
kdf_parameters['S'].value,
kdf_parameters['R'].value,
key_composite
)
else:
raise Exception('Unsupported key derivation method')
return transformed_key |
def send_command(self, command, args=None):
"""
Send a command to VNDB and then get the result.
:param command: What command are we sending
:param args: What are the json args for this command
:return: Servers Response
:rtype: Dictionary (See D11 docs on VNDB)
"""
if args:
if isinstance(args, str):
final_command = command + ' ' + args + '\x04'
else:
# We just let ujson propogate the error here if it can't parse the arguments
final_command = command + ' ' + ujson.dumps(args) + '\x04'
else:
final_command = command + '\x04'
self.sslwrap.sendall(final_command.encode('utf-8'))
return self._recv_data() | Send a command to VNDB and then get the result.
:param command: What command are we sending
:param args: What are the json args for this command
:return: Servers Response
:rtype: Dictionary (See D11 docs on VNDB) | Below is the the instruction that describes the task:
### Input:
Send a command to VNDB and then get the result.
:param command: What command are we sending
:param args: What are the json args for this command
:return: Servers Response
:rtype: Dictionary (See D11 docs on VNDB)
### Response:
def send_command(self, command, args=None):
"""
Send a command to VNDB and then get the result.
:param command: What command are we sending
:param args: What are the json args for this command
:return: Servers Response
:rtype: Dictionary (See D11 docs on VNDB)
"""
if args:
if isinstance(args, str):
final_command = command + ' ' + args + '\x04'
else:
# We just let ujson propogate the error here if it can't parse the arguments
final_command = command + ' ' + ujson.dumps(args) + '\x04'
else:
final_command = command + '\x04'
self.sslwrap.sendall(final_command.encode('utf-8'))
return self._recv_data() |
def open_resource(self, resource_name,
access_mode=constants.AccessModes.no_lock,
open_timeout=constants.VI_TMO_IMMEDIATE,
resource_pyclass=None,
**kwargs):
"""Return an instrument for the resource name.
:param resource_name: name or alias of the resource to open.
:param access_mode: access mode.
:type access_mode: :class:`pyvisa.constants.AccessModes`
:param open_timeout: time out to open.
:param resource_pyclass: resource python class to use to instantiate the Resource.
Defaults to None: select based on the resource name.
:param kwargs: keyword arguments to be used to change instrument attributes
after construction.
:rtype: :class:`pyvisa.resources.Resource`
"""
if resource_pyclass is None:
info = self.resource_info(resource_name, extended=True)
try:
resource_pyclass = self._resource_classes[(info.interface_type, info.resource_class)]
except KeyError:
resource_pyclass = self._resource_classes[(constants.InterfaceType.unknown, '')]
logger.warning('There is no class defined for %r. Using Resource', (info.interface_type, info.resource_class))
res = resource_pyclass(self, resource_name)
for key in kwargs.keys():
try:
getattr(res, key)
present = True
except AttributeError:
present = False
except errors.InvalidSession:
present = True
if not present:
raise ValueError('%r is not a valid attribute for type %s' % (key, res.__class__.__name__))
res.open(access_mode, open_timeout)
self._created_resources.add(res)
for key, value in kwargs.items():
setattr(res, key, value)
return res | Return an instrument for the resource name.
:param resource_name: name or alias of the resource to open.
:param access_mode: access mode.
:type access_mode: :class:`pyvisa.constants.AccessModes`
:param open_timeout: time out to open.
:param resource_pyclass: resource python class to use to instantiate the Resource.
Defaults to None: select based on the resource name.
:param kwargs: keyword arguments to be used to change instrument attributes
after construction.
:rtype: :class:`pyvisa.resources.Resource` | Below is the the instruction that describes the task:
### Input:
Return an instrument for the resource name.
:param resource_name: name or alias of the resource to open.
:param access_mode: access mode.
:type access_mode: :class:`pyvisa.constants.AccessModes`
:param open_timeout: time out to open.
:param resource_pyclass: resource python class to use to instantiate the Resource.
Defaults to None: select based on the resource name.
:param kwargs: keyword arguments to be used to change instrument attributes
after construction.
:rtype: :class:`pyvisa.resources.Resource`
### Response:
def open_resource(self, resource_name,
access_mode=constants.AccessModes.no_lock,
open_timeout=constants.VI_TMO_IMMEDIATE,
resource_pyclass=None,
**kwargs):
"""Return an instrument for the resource name.
:param resource_name: name or alias of the resource to open.
:param access_mode: access mode.
:type access_mode: :class:`pyvisa.constants.AccessModes`
:param open_timeout: time out to open.
:param resource_pyclass: resource python class to use to instantiate the Resource.
Defaults to None: select based on the resource name.
:param kwargs: keyword arguments to be used to change instrument attributes
after construction.
:rtype: :class:`pyvisa.resources.Resource`
"""
if resource_pyclass is None:
info = self.resource_info(resource_name, extended=True)
try:
resource_pyclass = self._resource_classes[(info.interface_type, info.resource_class)]
except KeyError:
resource_pyclass = self._resource_classes[(constants.InterfaceType.unknown, '')]
logger.warning('There is no class defined for %r. Using Resource', (info.interface_type, info.resource_class))
res = resource_pyclass(self, resource_name)
for key in kwargs.keys():
try:
getattr(res, key)
present = True
except AttributeError:
present = False
except errors.InvalidSession:
present = True
if not present:
raise ValueError('%r is not a valid attribute for type %s' % (key, res.__class__.__name__))
res.open(access_mode, open_timeout)
self._created_resources.add(res)
for key, value in kwargs.items():
setattr(res, key, value)
return res |
def add_matplotlib_cmaps(fail_on_import_error=True):
"""Add all matplotlib colormaps."""
try:
from matplotlib import cm as _cm
from matplotlib.cbook import mplDeprecation
except ImportError:
if fail_on_import_error:
raise
# silently fail
return
for name in _cm.cmap_d:
if not isinstance(name, str):
continue
try:
# Do not load deprecated colormaps
with warnings.catch_warnings():
warnings.simplefilter('error', mplDeprecation)
cm = _cm.get_cmap(name)
add_matplotlib_cmap(cm, name=name)
except Exception as e:
if fail_on_import_error:
print("Error adding colormap '%s': %s" % (name, str(e))) | Add all matplotlib colormaps. | Below is the the instruction that describes the task:
### Input:
Add all matplotlib colormaps.
### Response:
def add_matplotlib_cmaps(fail_on_import_error=True):
"""Add all matplotlib colormaps."""
try:
from matplotlib import cm as _cm
from matplotlib.cbook import mplDeprecation
except ImportError:
if fail_on_import_error:
raise
# silently fail
return
for name in _cm.cmap_d:
if not isinstance(name, str):
continue
try:
# Do not load deprecated colormaps
with warnings.catch_warnings():
warnings.simplefilter('error', mplDeprecation)
cm = _cm.get_cmap(name)
add_matplotlib_cmap(cm, name=name)
except Exception as e:
if fail_on_import_error:
print("Error adding colormap '%s': %s" % (name, str(e))) |
def times_called(self, n):
"""Set the number of times an object can be called.
When working with provided calls, you'll only see an
error if the expected call count is exceeded ::
>>> auth = Fake('auth').provides('login').times_called(1)
>>> auth.login()
>>> auth.login()
Traceback (most recent call last):
...
AssertionError: fake:auth.login() was called 2 time(s). Expected 1.
When working with expected calls, you'll see an error if
the call count is never met ::
>>> import fudge
>>> auth = fudge.Fake('auth').expects('login').times_called(2)
>>> auth.login()
>>> fudge.verify()
Traceback (most recent call last):
...
AssertionError: fake:auth.login() was called 1 time(s). Expected 2.
.. note:: This cannot be used in combination with :func:`fudge.Fake.next_call`
"""
if self._last_declared_call_name:
actual_last_call = self._declared_calls[self._last_declared_call_name]
if isinstance(actual_last_call, CallStack):
raise FakeDeclarationError("Cannot use times_called() in combination with next_call()")
# else: # self._callable is in effect
exp = self._get_current_call()
exp.expected_times_called = n
return self | Set the number of times an object can be called.
When working with provided calls, you'll only see an
error if the expected call count is exceeded ::
>>> auth = Fake('auth').provides('login').times_called(1)
>>> auth.login()
>>> auth.login()
Traceback (most recent call last):
...
AssertionError: fake:auth.login() was called 2 time(s). Expected 1.
When working with expected calls, you'll see an error if
the call count is never met ::
>>> import fudge
>>> auth = fudge.Fake('auth').expects('login').times_called(2)
>>> auth.login()
>>> fudge.verify()
Traceback (most recent call last):
...
AssertionError: fake:auth.login() was called 1 time(s). Expected 2.
.. note:: This cannot be used in combination with :func:`fudge.Fake.next_call` | Below is the the instruction that describes the task:
### Input:
Set the number of times an object can be called.
When working with provided calls, you'll only see an
error if the expected call count is exceeded ::
>>> auth = Fake('auth').provides('login').times_called(1)
>>> auth.login()
>>> auth.login()
Traceback (most recent call last):
...
AssertionError: fake:auth.login() was called 2 time(s). Expected 1.
When working with expected calls, you'll see an error if
the call count is never met ::
>>> import fudge
>>> auth = fudge.Fake('auth').expects('login').times_called(2)
>>> auth.login()
>>> fudge.verify()
Traceback (most recent call last):
...
AssertionError: fake:auth.login() was called 1 time(s). Expected 2.
.. note:: This cannot be used in combination with :func:`fudge.Fake.next_call`
### Response:
def times_called(self, n):
"""Set the number of times an object can be called.
When working with provided calls, you'll only see an
error if the expected call count is exceeded ::
>>> auth = Fake('auth').provides('login').times_called(1)
>>> auth.login()
>>> auth.login()
Traceback (most recent call last):
...
AssertionError: fake:auth.login() was called 2 time(s). Expected 1.
When working with expected calls, you'll see an error if
the call count is never met ::
>>> import fudge
>>> auth = fudge.Fake('auth').expects('login').times_called(2)
>>> auth.login()
>>> fudge.verify()
Traceback (most recent call last):
...
AssertionError: fake:auth.login() was called 1 time(s). Expected 2.
.. note:: This cannot be used in combination with :func:`fudge.Fake.next_call`
"""
if self._last_declared_call_name:
actual_last_call = self._declared_calls[self._last_declared_call_name]
if isinstance(actual_last_call, CallStack):
raise FakeDeclarationError("Cannot use times_called() in combination with next_call()")
# else: # self._callable is in effect
exp = self._get_current_call()
exp.expected_times_called = n
return self |
def city_center_distance(self):
"""
This method gets the distance to city center, in km.
:return:
"""
try:
infos = self._ad_page_content.find_all(
'div', {"class": "map_info_box"})
for info in infos:
if 'Distance to City Centre' in info.text:
distance_list = re.findall(
'Distance to City Centre: (.*) km', info.text)
return distance_list[0]
return None
except Exception as e:
if self._debug:
logging.error(e.args[0])
print(e)
return 'N/A' | This method gets the distance to city center, in km.
:return: | Below is the the instruction that describes the task:
### Input:
This method gets the distance to city center, in km.
:return:
### Response:
def city_center_distance(self):
"""
This method gets the distance to city center, in km.
:return:
"""
try:
infos = self._ad_page_content.find_all(
'div', {"class": "map_info_box"})
for info in infos:
if 'Distance to City Centre' in info.text:
distance_list = re.findall(
'Distance to City Centre: (.*) km', info.text)
return distance_list[0]
return None
except Exception as e:
if self._debug:
logging.error(e.args[0])
print(e)
return 'N/A' |
def load_combo_catalog():
"""Load a union of the user and global catalogs for convenience"""
user_dir = user_data_dir()
global_dir = global_data_dir()
desc = 'Generated from data packages found on your intake search path'
cat_dirs = []
if os.path.isdir(user_dir):
cat_dirs.append(user_dir + '/*.yaml')
cat_dirs.append(user_dir + '/*.yml')
if os.path.isdir(global_dir):
cat_dirs.append(global_dir + '/*.yaml')
cat_dirs.append(global_dir + '/*.yml')
for path_dir in conf.get('catalog_path', []):
if path_dir != '':
if not path_dir.endswith(('yaml', 'yml')):
cat_dirs.append(path_dir + '/*.yaml')
cat_dirs.append(path_dir + '/*.yml')
else:
cat_dirs.append(path_dir)
return YAMLFilesCatalog(cat_dirs, name='builtin', description=desc) | Load a union of the user and global catalogs for convenience | Below is the the instruction that describes the task:
### Input:
Load a union of the user and global catalogs for convenience
### Response:
def load_combo_catalog():
"""Load a union of the user and global catalogs for convenience"""
user_dir = user_data_dir()
global_dir = global_data_dir()
desc = 'Generated from data packages found on your intake search path'
cat_dirs = []
if os.path.isdir(user_dir):
cat_dirs.append(user_dir + '/*.yaml')
cat_dirs.append(user_dir + '/*.yml')
if os.path.isdir(global_dir):
cat_dirs.append(global_dir + '/*.yaml')
cat_dirs.append(global_dir + '/*.yml')
for path_dir in conf.get('catalog_path', []):
if path_dir != '':
if not path_dir.endswith(('yaml', 'yml')):
cat_dirs.append(path_dir + '/*.yaml')
cat_dirs.append(path_dir + '/*.yml')
else:
cat_dirs.append(path_dir)
return YAMLFilesCatalog(cat_dirs, name='builtin', description=desc) |
def query(
self,
*, # Forces keyword args.
time: Timestamp,
duration: Union[Duration, timedelta] = Duration(),
qubits: Iterable[Qid] = None,
include_query_end_time=False,
include_op_end_times=False) -> List[ScheduledOperation]:
"""Finds operations by time and qubit.
Args:
time: Operations must end after this time to be returned.
duration: Operations must start by time+duration to be
returned.
qubits: If specified, only operations touching one of the included
qubits will be returned.
include_query_end_time: Determines if the query interval includes
its end time. Defaults to no.
include_op_end_times: Determines if the scheduled operation
intervals include their end times or not. Defaults to no.
Returns:
A list of scheduled operations meeting the specified conditions.
"""
duration = Duration.create(duration)
earliest_time = time - self._max_duration
end_time = time + duration
qubits = None if qubits is None else frozenset(qubits)
def overlaps_interval(op):
if not include_op_end_times and op.time + op.duration == time:
return False
if not include_query_end_time and op.time == end_time:
return False
return op.time + op.duration >= time and op.time <= end_time
def overlaps_qubits(op):
if qubits is None:
return True
return not qubits.isdisjoint(op.operation.qubits)
potential_matches = self.scheduled_operations.irange_key(earliest_time,
end_time)
return [op
for op in potential_matches
if overlaps_interval(op) and overlaps_qubits(op)] | Finds operations by time and qubit.
Args:
time: Operations must end after this time to be returned.
duration: Operations must start by time+duration to be
returned.
qubits: If specified, only operations touching one of the included
qubits will be returned.
include_query_end_time: Determines if the query interval includes
its end time. Defaults to no.
include_op_end_times: Determines if the scheduled operation
intervals include their end times or not. Defaults to no.
Returns:
A list of scheduled operations meeting the specified conditions. | Below is the the instruction that describes the task:
### Input:
Finds operations by time and qubit.
Args:
time: Operations must end after this time to be returned.
duration: Operations must start by time+duration to be
returned.
qubits: If specified, only operations touching one of the included
qubits will be returned.
include_query_end_time: Determines if the query interval includes
its end time. Defaults to no.
include_op_end_times: Determines if the scheduled operation
intervals include their end times or not. Defaults to no.
Returns:
A list of scheduled operations meeting the specified conditions.
### Response:
def query(
self,
*, # Forces keyword args.
time: Timestamp,
duration: Union[Duration, timedelta] = Duration(),
qubits: Iterable[Qid] = None,
include_query_end_time=False,
include_op_end_times=False) -> List[ScheduledOperation]:
"""Finds operations by time and qubit.
Args:
time: Operations must end after this time to be returned.
duration: Operations must start by time+duration to be
returned.
qubits: If specified, only operations touching one of the included
qubits will be returned.
include_query_end_time: Determines if the query interval includes
its end time. Defaults to no.
include_op_end_times: Determines if the scheduled operation
intervals include their end times or not. Defaults to no.
Returns:
A list of scheduled operations meeting the specified conditions.
"""
duration = Duration.create(duration)
earliest_time = time - self._max_duration
end_time = time + duration
qubits = None if qubits is None else frozenset(qubits)
def overlaps_interval(op):
if not include_op_end_times and op.time + op.duration == time:
return False
if not include_query_end_time and op.time == end_time:
return False
return op.time + op.duration >= time and op.time <= end_time
def overlaps_qubits(op):
if qubits is None:
return True
return not qubits.isdisjoint(op.operation.qubits)
potential_matches = self.scheduled_operations.irange_key(earliest_time,
end_time)
return [op
for op in potential_matches
if overlaps_interval(op) and overlaps_qubits(op)] |
def _download_chunk(self, chunk_offset, chunk_size):
"""Reads or downloads the received blob from the system."""
range_id = 'bytes={0}-{1}'.format(
chunk_offset, chunk_offset + chunk_size - 1)
return self._blob_service.get_blob(
container_name=self._container_name,
blob_name=self._blob_name,
x_ms_range=range_id) | Reads or downloads the received blob from the system. | Below is the the instruction that describes the task:
### Input:
Reads or downloads the received blob from the system.
### Response:
def _download_chunk(self, chunk_offset, chunk_size):
"""Reads or downloads the received blob from the system."""
range_id = 'bytes={0}-{1}'.format(
chunk_offset, chunk_offset + chunk_size - 1)
return self._blob_service.get_blob(
container_name=self._container_name,
blob_name=self._blob_name,
x_ms_range=range_id) |
def process_response(self, request, response, spider):
"""Handle the a Scrapy response"""
if not self.is_cloudflare_challenge(response):
return response
logger = logging.getLogger('cloudflaremiddleware')
logger.debug(
'Cloudflare protection detected on %s, trying to bypass...',
response.url
)
cloudflare_tokens, __ = get_tokens(
request.url,
user_agent=spider.settings.get('USER_AGENT')
)
logger.debug(
'Successfully bypassed the protection for %s, re-scheduling the request',
response.url
)
request.cookies.update(cloudflare_tokens)
request.priority = 99999
return request | Handle the a Scrapy response | Below is the the instruction that describes the task:
### Input:
Handle the a Scrapy response
### Response:
def process_response(self, request, response, spider):
"""Handle the a Scrapy response"""
if not self.is_cloudflare_challenge(response):
return response
logger = logging.getLogger('cloudflaremiddleware')
logger.debug(
'Cloudflare protection detected on %s, trying to bypass...',
response.url
)
cloudflare_tokens, __ = get_tokens(
request.url,
user_agent=spider.settings.get('USER_AGENT')
)
logger.debug(
'Successfully bypassed the protection for %s, re-scheduling the request',
response.url
)
request.cookies.update(cloudflare_tokens)
request.priority = 99999
return request |
def verify(self):
'''Check that the database accurately describes the state of the repository'''
c = self.database.cursor()
non_exist = set()
no_db_entry = set(os.listdir(self.cache_dir))
try:
no_db_entry.remove('file_database.db')
no_db_entry.remove('file_database.db-journal')
except:
pass
for row in c.execute("SELECT path FROM files"):
path = row[0]
repo_path = os.path.join(self.cache_dir, path)
if os.path.exists(repo_path):
no_db_entry.remove(path)
else:
non_exist.add(path)
if len(non_exist) > 0:
raise Exception(
"Found {} records in db for files that don't exist: {}" .format(
len(non_exist),
','.join(non_exist)))
if len(no_db_entry) > 0:
raise Exception("Found {} files that don't have db entries: {}"
.format(len(no_db_entry), ','.join(no_db_entry))) | Check that the database accurately describes the state of the repository | Below is the the instruction that describes the task:
### Input:
Check that the database accurately describes the state of the repository
### Response:
def verify(self):
'''Check that the database accurately describes the state of the repository'''
c = self.database.cursor()
non_exist = set()
no_db_entry = set(os.listdir(self.cache_dir))
try:
no_db_entry.remove('file_database.db')
no_db_entry.remove('file_database.db-journal')
except:
pass
for row in c.execute("SELECT path FROM files"):
path = row[0]
repo_path = os.path.join(self.cache_dir, path)
if os.path.exists(repo_path):
no_db_entry.remove(path)
else:
non_exist.add(path)
if len(non_exist) > 0:
raise Exception(
"Found {} records in db for files that don't exist: {}" .format(
len(non_exist),
','.join(non_exist)))
if len(no_db_entry) > 0:
raise Exception("Found {} files that don't have db entries: {}"
.format(len(no_db_entry), ','.join(no_db_entry))) |
def channel_history(current):
"""
Get old messages for a channel. 20 messages per request
.. code-block:: python
# request:
{
'view':'_zops_channel_history,
'channel_key': key,
'timestamp': datetime, # timestamp data of oldest shown message
}
# response:
{
'messages': [MSG_DICT, ],
'status': 'OK',
'code': 200
}
"""
current.output = {
'status': 'OK',
'code': 201,
'messages': []
}
for msg in list(Message.objects.filter(channel_id=current.input['channel_key'],
updated_at__lte=current.input['timestamp'])[:20]):
current.output['messages'].insert(0, msg.serialize(current.user))
# FIXME: looks like pyoko's __lt is broken
# TODO: convert lte to lt and remove this block, when __lt filter fixed
if current.output['messages']:
current.output['messages'].pop(-1) | Get old messages for a channel. 20 messages per request
.. code-block:: python
# request:
{
'view':'_zops_channel_history,
'channel_key': key,
'timestamp': datetime, # timestamp data of oldest shown message
}
# response:
{
'messages': [MSG_DICT, ],
'status': 'OK',
'code': 200
} | Below is the the instruction that describes the task:
### Input:
Get old messages for a channel. 20 messages per request
.. code-block:: python
# request:
{
'view':'_zops_channel_history,
'channel_key': key,
'timestamp': datetime, # timestamp data of oldest shown message
}
# response:
{
'messages': [MSG_DICT, ],
'status': 'OK',
'code': 200
}
### Response:
def channel_history(current):
"""
Get old messages for a channel. 20 messages per request
.. code-block:: python
# request:
{
'view':'_zops_channel_history,
'channel_key': key,
'timestamp': datetime, # timestamp data of oldest shown message
}
# response:
{
'messages': [MSG_DICT, ],
'status': 'OK',
'code': 200
}
"""
current.output = {
'status': 'OK',
'code': 201,
'messages': []
}
for msg in list(Message.objects.filter(channel_id=current.input['channel_key'],
updated_at__lte=current.input['timestamp'])[:20]):
current.output['messages'].insert(0, msg.serialize(current.user))
# FIXME: looks like pyoko's __lt is broken
# TODO: convert lte to lt and remove this block, when __lt filter fixed
if current.output['messages']:
current.output['messages'].pop(-1) |
def resource_moved(self, resource, new_resource):
"""It is called when a resource is moved"""
if self.moved is not None:
self.moved(resource, new_resource) | It is called when a resource is moved | Below is the the instruction that describes the task:
### Input:
It is called when a resource is moved
### Response:
def resource_moved(self, resource, new_resource):
"""It is called when a resource is moved"""
if self.moved is not None:
self.moved(resource, new_resource) |
def cvtToMag(rh, size):
"""
Convert a size value to a number with a magnitude appended.
Input:
Request Handle
Size bytes
Output:
Converted value with a magnitude
"""
rh.printSysLog("Enter generalUtils.cvtToMag")
mSize = ''
size = size / (1024 * 1024)
if size > (1024 * 5):
# Size is greater than 5G. Using "G" magnitude.
size = size / 1024
mSize = "%.1fG" % size
else:
# Size is less than or equal 5G. Using "M" magnitude.
mSize = "%.1fM" % size
rh.printSysLog("Exit generalUtils.cvtToMag, magSize: " + mSize)
return mSize | Convert a size value to a number with a magnitude appended.
Input:
Request Handle
Size bytes
Output:
Converted value with a magnitude | Below is the the instruction that describes the task:
### Input:
Convert a size value to a number with a magnitude appended.
Input:
Request Handle
Size bytes
Output:
Converted value with a magnitude
### Response:
def cvtToMag(rh, size):
"""
Convert a size value to a number with a magnitude appended.
Input:
Request Handle
Size bytes
Output:
Converted value with a magnitude
"""
rh.printSysLog("Enter generalUtils.cvtToMag")
mSize = ''
size = size / (1024 * 1024)
if size > (1024 * 5):
# Size is greater than 5G. Using "G" magnitude.
size = size / 1024
mSize = "%.1fG" % size
else:
# Size is less than or equal 5G. Using "M" magnitude.
mSize = "%.1fM" % size
rh.printSysLog("Exit generalUtils.cvtToMag, magSize: " + mSize)
return mSize |
def ret_glob_minions(self):
'''
Return minions that match via glob
'''
minions = {}
for minion in self.raw:
if fnmatch.fnmatch(minion, self.tgt):
data = self.get_data(minion)
if data:
minions[minion] = data
return minions | Return minions that match via glob | Below is the the instruction that describes the task:
### Input:
Return minions that match via glob
### Response:
def ret_glob_minions(self):
'''
Return minions that match via glob
'''
minions = {}
for minion in self.raw:
if fnmatch.fnmatch(minion, self.tgt):
data = self.get_data(minion)
if data:
minions[minion] = data
return minions |
def importSite(self, location):
"""
This operation imports the portal site configuration to a location
you specify.
"""
params = {
"location" : location,
"f" : "json"
}
url = self._url + "/importSite"
return self._post(url=url, param_dict=params) | This operation imports the portal site configuration to a location
you specify. | Below is the the instruction that describes the task:
### Input:
This operation imports the portal site configuration to a location
you specify.
### Response:
def importSite(self, location):
"""
This operation imports the portal site configuration to a location
you specify.
"""
params = {
"location" : location,
"f" : "json"
}
url = self._url + "/importSite"
return self._post(url=url, param_dict=params) |
def create(self, data, **kwargs):
"""Create a new object.
Args:
data (dict): Parameters to send to the server to create the
resource
**kwargs: Extra options to send to the server (e.g. sudo or
'ref_name', 'stage', 'name', 'all')
Raises:
GitlabAuthenticationError: If authentication is not correct
GitlabCreateError: If the server cannot perform the request
Returns:
RESTObject: A new instance of the manage object class build with
the data sent by the server
"""
# project_id and commit_id are in the data dict when using the CLI, but
# they are missing when using only the API
# See #511
base_path = '/projects/%(project_id)s/statuses/%(commit_id)s'
if 'project_id' in data and 'commit_id' in data:
path = base_path % data
else:
path = self._compute_path(base_path)
return CreateMixin.create(self, data, path=path, **kwargs) | Create a new object.
Args:
data (dict): Parameters to send to the server to create the
resource
**kwargs: Extra options to send to the server (e.g. sudo or
'ref_name', 'stage', 'name', 'all')
Raises:
GitlabAuthenticationError: If authentication is not correct
GitlabCreateError: If the server cannot perform the request
Returns:
RESTObject: A new instance of the manage object class build with
the data sent by the server | Below is the the instruction that describes the task:
### Input:
Create a new object.
Args:
data (dict): Parameters to send to the server to create the
resource
**kwargs: Extra options to send to the server (e.g. sudo or
'ref_name', 'stage', 'name', 'all')
Raises:
GitlabAuthenticationError: If authentication is not correct
GitlabCreateError: If the server cannot perform the request
Returns:
RESTObject: A new instance of the manage object class build with
the data sent by the server
### Response:
def create(self, data, **kwargs):
"""Create a new object.
Args:
data (dict): Parameters to send to the server to create the
resource
**kwargs: Extra options to send to the server (e.g. sudo or
'ref_name', 'stage', 'name', 'all')
Raises:
GitlabAuthenticationError: If authentication is not correct
GitlabCreateError: If the server cannot perform the request
Returns:
RESTObject: A new instance of the manage object class build with
the data sent by the server
"""
# project_id and commit_id are in the data dict when using the CLI, but
# they are missing when using only the API
# See #511
base_path = '/projects/%(project_id)s/statuses/%(commit_id)s'
if 'project_id' in data and 'commit_id' in data:
path = base_path % data
else:
path = self._compute_path(base_path)
return CreateMixin.create(self, data, path=path, **kwargs) |
def setup_logging():
"""
Sets up the internal logging mechanism, i.e., it creates the :py:attr:`console_handler`, sets
its formatting, and mounts on on the main ``"law"`` logger. It also sets the levels of all
loggers that are given in the law config.
"""
global console_handler
# make sure logging is setup only once
if console_handler:
return
# set the handler of the law root logger
console_handler = logging.StreamHandler()
console_handler.setFormatter(LogFormatter())
logging.getLogger("law").addHandler(console_handler)
# set levels for all loggers
for name, level in Config.instance().items("logging"):
level = level.upper()
if hasattr(logging, level):
logger = logging.getLogger(name)
logger.setLevel(getattr(logging, level))
logger.debug("registered logger with level '{}'".format(level)) | Sets up the internal logging mechanism, i.e., it creates the :py:attr:`console_handler`, sets
its formatting, and mounts on on the main ``"law"`` logger. It also sets the levels of all
loggers that are given in the law config. | Below is the the instruction that describes the task:
### Input:
Sets up the internal logging mechanism, i.e., it creates the :py:attr:`console_handler`, sets
its formatting, and mounts on on the main ``"law"`` logger. It also sets the levels of all
loggers that are given in the law config.
### Response:
def setup_logging():
"""
Sets up the internal logging mechanism, i.e., it creates the :py:attr:`console_handler`, sets
its formatting, and mounts on on the main ``"law"`` logger. It also sets the levels of all
loggers that are given in the law config.
"""
global console_handler
# make sure logging is setup only once
if console_handler:
return
# set the handler of the law root logger
console_handler = logging.StreamHandler()
console_handler.setFormatter(LogFormatter())
logging.getLogger("law").addHandler(console_handler)
# set levels for all loggers
for name, level in Config.instance().items("logging"):
level = level.upper()
if hasattr(logging, level):
logger = logging.getLogger(name)
logger.setLevel(getattr(logging, level))
logger.debug("registered logger with level '{}'".format(level)) |
def spherical_matrix(theta, phi, axes='sxyz'):
"""
Give a spherical coordinate vector, find the rotation that will
transform a [0,0,1] vector to those coordinates
Parameters
-----------
theta: float, rotation angle in radians
phi: float, rotation angle in radians
Returns
----------
matrix: (4,4) rotation matrix where the following will
be a cartesian vector in the direction of the
input spherical coordinats:
np.dot(matrix, [0,0,1,0])
"""
result = euler_matrix(0.0, phi, theta, axes=axes)
return result | Give a spherical coordinate vector, find the rotation that will
transform a [0,0,1] vector to those coordinates
Parameters
-----------
theta: float, rotation angle in radians
phi: float, rotation angle in radians
Returns
----------
matrix: (4,4) rotation matrix where the following will
be a cartesian vector in the direction of the
input spherical coordinats:
np.dot(matrix, [0,0,1,0]) | Below is the the instruction that describes the task:
### Input:
Give a spherical coordinate vector, find the rotation that will
transform a [0,0,1] vector to those coordinates
Parameters
-----------
theta: float, rotation angle in radians
phi: float, rotation angle in radians
Returns
----------
matrix: (4,4) rotation matrix where the following will
be a cartesian vector in the direction of the
input spherical coordinats:
np.dot(matrix, [0,0,1,0])
### Response:
def spherical_matrix(theta, phi, axes='sxyz'):
"""
Give a spherical coordinate vector, find the rotation that will
transform a [0,0,1] vector to those coordinates
Parameters
-----------
theta: float, rotation angle in radians
phi: float, rotation angle in radians
Returns
----------
matrix: (4,4) rotation matrix where the following will
be a cartesian vector in the direction of the
input spherical coordinats:
np.dot(matrix, [0,0,1,0])
"""
result = euler_matrix(0.0, phi, theta, axes=axes)
return result |
def delete_tags(self, arg):
"""Removes html-tags from extracted data.
:param arg: A string, the string which shall be cleaned
:return: A string, the cleaned string
"""
if len(arg) > 0:
raw = html.fromstring(arg)
return raw.text_content().strip()
return arg | Removes html-tags from extracted data.
:param arg: A string, the string which shall be cleaned
:return: A string, the cleaned string | Below is the the instruction that describes the task:
### Input:
Removes html-tags from extracted data.
:param arg: A string, the string which shall be cleaned
:return: A string, the cleaned string
### Response:
def delete_tags(self, arg):
"""Removes html-tags from extracted data.
:param arg: A string, the string which shall be cleaned
:return: A string, the cleaned string
"""
if len(arg) > 0:
raw = html.fromstring(arg)
return raw.text_content().strip()
return arg |
def shell(commands, splitlines=False, ignore_errors=False):
'''
Subprocess based implementation of pyinfra/api/ssh.py's ``run_shell_command``.
Args:
commands (string, list): command or list of commands to execute
spltlines (bool): optionally have the output split by lines
ignore_errors (bool): ignore errors when executing these commands
'''
if isinstance(commands, six.string_types):
commands = [commands]
all_stdout = []
# Checking for pseudo_state means this function works outside a deploy
# eg the vagrant connector.
print_output = (
pseudo_state.print_output
if pseudo_state.isset()
else False
)
for command in commands:
print_prefix = 'localhost: '
if print_output:
print('{0}>>> {1}'.format(print_prefix, command))
process = Popen(command, shell=True, stdout=PIPE, stderr=STDOUT)
stdout = read_buffer(
process.stdout,
print_output=print_output,
print_func=lambda line: '{0}{1}'.format(print_prefix, line),
)
# Get & check result
result = process.wait()
# Close any open file descriptor
process.stdout.close()
if result > 0 and not ignore_errors:
raise PyinfraError(
'Local command failed: {0}\n{1}'.format(command, stdout),
)
all_stdout.extend(stdout)
if not splitlines:
return '\n'.join(all_stdout)
return all_stdout | Subprocess based implementation of pyinfra/api/ssh.py's ``run_shell_command``.
Args:
commands (string, list): command or list of commands to execute
spltlines (bool): optionally have the output split by lines
ignore_errors (bool): ignore errors when executing these commands | Below is the the instruction that describes the task:
### Input:
Subprocess based implementation of pyinfra/api/ssh.py's ``run_shell_command``.
Args:
commands (string, list): command or list of commands to execute
spltlines (bool): optionally have the output split by lines
ignore_errors (bool): ignore errors when executing these commands
### Response:
def shell(commands, splitlines=False, ignore_errors=False):
'''
Subprocess based implementation of pyinfra/api/ssh.py's ``run_shell_command``.
Args:
commands (string, list): command or list of commands to execute
spltlines (bool): optionally have the output split by lines
ignore_errors (bool): ignore errors when executing these commands
'''
if isinstance(commands, six.string_types):
commands = [commands]
all_stdout = []
# Checking for pseudo_state means this function works outside a deploy
# eg the vagrant connector.
print_output = (
pseudo_state.print_output
if pseudo_state.isset()
else False
)
for command in commands:
print_prefix = 'localhost: '
if print_output:
print('{0}>>> {1}'.format(print_prefix, command))
process = Popen(command, shell=True, stdout=PIPE, stderr=STDOUT)
stdout = read_buffer(
process.stdout,
print_output=print_output,
print_func=lambda line: '{0}{1}'.format(print_prefix, line),
)
# Get & check result
result = process.wait()
# Close any open file descriptor
process.stdout.close()
if result > 0 and not ignore_errors:
raise PyinfraError(
'Local command failed: {0}\n{1}'.format(command, stdout),
)
all_stdout.extend(stdout)
if not splitlines:
return '\n'.join(all_stdout)
return all_stdout |
def run_profilers(run_object, prof_config, verbose=False):
"""Runs profilers on run_object.
Args:
run_object: An object (string or tuple) for profiling.
prof_config: A string with profilers configuration.
verbose: True if info about running profilers should be shown.
Returns:
An ordered dictionary with collected stats.
Raises:
AmbiguousConfigurationError: when prof_config is ambiguous.
BadOptionError: when unknown options are present in configuration.
"""
if len(prof_config) > len(set(prof_config)):
raise AmbiguousConfigurationError(
'Profiler configuration %s is ambiguous' % prof_config)
available_profilers = {opt for opt, _ in _PROFILERS}
for option in prof_config:
if option not in available_profilers:
raise BadOptionError('Unknown option: %s' % option)
run_stats = OrderedDict()
present_profilers = ((o, p) for o, p in _PROFILERS if o in prof_config)
for option, prof in present_profilers:
curr_profiler = prof(run_object)
if verbose:
print('Running %s...' % curr_profiler.__class__.__name__)
run_stats[option] = curr_profiler.run()
return run_stats | Runs profilers on run_object.
Args:
run_object: An object (string or tuple) for profiling.
prof_config: A string with profilers configuration.
verbose: True if info about running profilers should be shown.
Returns:
An ordered dictionary with collected stats.
Raises:
AmbiguousConfigurationError: when prof_config is ambiguous.
BadOptionError: when unknown options are present in configuration. | Below is the the instruction that describes the task:
### Input:
Runs profilers on run_object.
Args:
run_object: An object (string or tuple) for profiling.
prof_config: A string with profilers configuration.
verbose: True if info about running profilers should be shown.
Returns:
An ordered dictionary with collected stats.
Raises:
AmbiguousConfigurationError: when prof_config is ambiguous.
BadOptionError: when unknown options are present in configuration.
### Response:
def run_profilers(run_object, prof_config, verbose=False):
"""Runs profilers on run_object.
Args:
run_object: An object (string or tuple) for profiling.
prof_config: A string with profilers configuration.
verbose: True if info about running profilers should be shown.
Returns:
An ordered dictionary with collected stats.
Raises:
AmbiguousConfigurationError: when prof_config is ambiguous.
BadOptionError: when unknown options are present in configuration.
"""
if len(prof_config) > len(set(prof_config)):
raise AmbiguousConfigurationError(
'Profiler configuration %s is ambiguous' % prof_config)
available_profilers = {opt for opt, _ in _PROFILERS}
for option in prof_config:
if option not in available_profilers:
raise BadOptionError('Unknown option: %s' % option)
run_stats = OrderedDict()
present_profilers = ((o, p) for o, p in _PROFILERS if o in prof_config)
for option, prof in present_profilers:
curr_profiler = prof(run_object)
if verbose:
print('Running %s...' % curr_profiler.__class__.__name__)
run_stats[option] = curr_profiler.run()
return run_stats |
def _CreateNewShowDir(self, showName):
"""
Create new directory name for show. An autogenerated choice, which is the
showName input that has been stripped of special characters, is proposed
which the user can accept or they can enter a new name to use. If the
skipUserInput variable is True the autogenerated value is accepted
by default.
Parameters
----------
showName : string
Name of TV show
Returns
----------
string or None
Either the autogenerated directory name, the user given directory name
or None if the user chooses to skip at this input stage.
"""
stripedDir = util.StripSpecialCharacters(showName)
goodlogging.Log.Info("RENAMER", "Suggested show directory name is: '{0}'".format(stripedDir))
if self._skipUserInput is False:
response = goodlogging.Log.Input('RENAMER', "Enter 'y' to accept this directory, 'x' to skip this show or enter a new directory to use: ")
else:
response = 'y'
if response.lower() == 'x':
return None
elif response.lower() == 'y':
return stripedDir
else:
return response | Create new directory name for show. An autogenerated choice, which is the
showName input that has been stripped of special characters, is proposed
which the user can accept or they can enter a new name to use. If the
skipUserInput variable is True the autogenerated value is accepted
by default.
Parameters
----------
showName : string
Name of TV show
Returns
----------
string or None
Either the autogenerated directory name, the user given directory name
or None if the user chooses to skip at this input stage. | Below is the the instruction that describes the task:
### Input:
Create new directory name for show. An autogenerated choice, which is the
showName input that has been stripped of special characters, is proposed
which the user can accept or they can enter a new name to use. If the
skipUserInput variable is True the autogenerated value is accepted
by default.
Parameters
----------
showName : string
Name of TV show
Returns
----------
string or None
Either the autogenerated directory name, the user given directory name
or None if the user chooses to skip at this input stage.
### Response:
def _CreateNewShowDir(self, showName):
"""
Create new directory name for show. An autogenerated choice, which is the
showName input that has been stripped of special characters, is proposed
which the user can accept or they can enter a new name to use. If the
skipUserInput variable is True the autogenerated value is accepted
by default.
Parameters
----------
showName : string
Name of TV show
Returns
----------
string or None
Either the autogenerated directory name, the user given directory name
or None if the user chooses to skip at this input stage.
"""
stripedDir = util.StripSpecialCharacters(showName)
goodlogging.Log.Info("RENAMER", "Suggested show directory name is: '{0}'".format(stripedDir))
if self._skipUserInput is False:
response = goodlogging.Log.Input('RENAMER', "Enter 'y' to accept this directory, 'x' to skip this show or enter a new directory to use: ")
else:
response = 'y'
if response.lower() == 'x':
return None
elif response.lower() == 'y':
return stripedDir
else:
return response |
def prepare_destruction(self):
"""Prepares the model for destruction
Un-registers itself as observer from the state machine and the root state
"""
try:
self.relieve_model(self.state_machine_model)
assert self.__buffered_root_state_model is self.state_machine_model.root_state
self.relieve_model(self.__buffered_root_state_model)
self.state_machine_model = None
self.__buffered_root_state_model = None
self.modifications.prepare_destruction()
except KeyError: # Might happen if the observer was already unregistered
pass
if self.active_action:
try:
self.active_action.prepare_destruction()
except Exception as e:
logger.exception("The modification history has had left over an active-action and "
"could not destroy it {0}.".format(e))
self.active_action = None | Prepares the model for destruction
Un-registers itself as observer from the state machine and the root state | Below is the the instruction that describes the task:
### Input:
Prepares the model for destruction
Un-registers itself as observer from the state machine and the root state
### Response:
def prepare_destruction(self):
"""Prepares the model for destruction
Un-registers itself as observer from the state machine and the root state
"""
try:
self.relieve_model(self.state_machine_model)
assert self.__buffered_root_state_model is self.state_machine_model.root_state
self.relieve_model(self.__buffered_root_state_model)
self.state_machine_model = None
self.__buffered_root_state_model = None
self.modifications.prepare_destruction()
except KeyError: # Might happen if the observer was already unregistered
pass
if self.active_action:
try:
self.active_action.prepare_destruction()
except Exception as e:
logger.exception("The modification history has had left over an active-action and "
"could not destroy it {0}.".format(e))
self.active_action = None |
def open(url, wait=10):
""" Returns a connection to a url which you can read().
When the wait amount is exceeded, raises a URLTimeout.
When an error occurs, raises a URLError.
404 errors specifically return a HTTP404NotFound.
"""
# If the url is a URLParser, get any POST parameters.
post = None
if isinstance(url, URLParser) and url.method == "post":
post = urllib.urlencode(url.query)
# If the url is a URLParser (or a YahooResult or something),
# use its string representation.
url = str(url)
# Use urllib instead of urllib2 for local files.
if os.path.exists(url):
return urllib.urlopen(url)
else:
socket.setdefaulttimeout(wait)
try:
#connection = urllib2.urlopen(url, post)
request = urllib2.Request(url, post, {"User-Agent": USER_AGENT, "Referer": REFERER})
if PROXY:
p = urllib2.ProxyHandler({PROXY[1]: PROXY[0]})
o = urllib2.build_opener(p, urllib2.HTTPHandler)
urllib2.install_opener(o)
connection = urllib2.urlopen(request)
except urllib2.HTTPError, e:
if e.code == 401: raise HTTP401Authentication
if e.code == 403: raise HTTP403Forbidden
if e.code == 404: raise HTTP404NotFound
raise HTTPError
except urllib2.URLError, e:
if e.reason[0] == 36: raise URLTimeout
raise URLError
return connection | Returns a connection to a url which you can read().
When the wait amount is exceeded, raises a URLTimeout.
When an error occurs, raises a URLError.
404 errors specifically return a HTTP404NotFound. | Below is the the instruction that describes the task:
### Input:
Returns a connection to a url which you can read().
When the wait amount is exceeded, raises a URLTimeout.
When an error occurs, raises a URLError.
404 errors specifically return a HTTP404NotFound.
### Response:
def open(url, wait=10):
""" Returns a connection to a url which you can read().
When the wait amount is exceeded, raises a URLTimeout.
When an error occurs, raises a URLError.
404 errors specifically return a HTTP404NotFound.
"""
# If the url is a URLParser, get any POST parameters.
post = None
if isinstance(url, URLParser) and url.method == "post":
post = urllib.urlencode(url.query)
# If the url is a URLParser (or a YahooResult or something),
# use its string representation.
url = str(url)
# Use urllib instead of urllib2 for local files.
if os.path.exists(url):
return urllib.urlopen(url)
else:
socket.setdefaulttimeout(wait)
try:
#connection = urllib2.urlopen(url, post)
request = urllib2.Request(url, post, {"User-Agent": USER_AGENT, "Referer": REFERER})
if PROXY:
p = urllib2.ProxyHandler({PROXY[1]: PROXY[0]})
o = urllib2.build_opener(p, urllib2.HTTPHandler)
urllib2.install_opener(o)
connection = urllib2.urlopen(request)
except urllib2.HTTPError, e:
if e.code == 401: raise HTTP401Authentication
if e.code == 403: raise HTTP403Forbidden
if e.code == 404: raise HTTP404NotFound
raise HTTPError
except urllib2.URLError, e:
if e.reason[0] == 36: raise URLTimeout
raise URLError
return connection |
def present(name, service_name, auth=None, **kwargs):
'''
Ensure an endpoint exists and is up-to-date
name
Interface name
url
URL of the endpoint
service_name
Service name or ID
region
The region name to assign the endpoint
enabled
Boolean to control if endpoint is enabled
'''
ret = {'name': name,
'changes': {},
'result': True,
'comment': ''}
kwargs = __utils__['args.clean_kwargs'](**kwargs)
__salt__['keystoneng.setup_clouds'](auth)
success, val = _, endpoint = _common(ret, name, service_name, kwargs)
if not success:
return val
if not endpoint:
if __opts__['test'] is True:
ret['result'] = None
ret['changes'] = kwargs
ret['comment'] = 'Endpoint will be created.'
return ret
# NOTE(SamYaple): Endpoints are returned as a list which can contain
# several items depending on the options passed
endpoints = __salt__['keystoneng.endpoint_create'](**kwargs)
if len(endpoints) == 1:
ret['changes'] = endpoints[0]
else:
for i, endpoint in enumerate(endpoints):
ret['changes'][i] = endpoint
ret['comment'] = 'Created endpoint'
return ret
changes = __salt__['keystoneng.compare_changes'](endpoint, **kwargs)
if changes:
if __opts__['test'] is True:
ret['result'] = None
ret['changes'] = changes
ret['comment'] = 'Endpoint will be updated.'
return ret
kwargs['endpoint_id'] = endpoint.id
__salt__['keystoneng.endpoint_update'](**kwargs)
ret['changes'].update(changes)
ret['comment'] = 'Updated endpoint'
return ret | Ensure an endpoint exists and is up-to-date
name
Interface name
url
URL of the endpoint
service_name
Service name or ID
region
The region name to assign the endpoint
enabled
Boolean to control if endpoint is enabled | Below is the the instruction that describes the task:
### Input:
Ensure an endpoint exists and is up-to-date
name
Interface name
url
URL of the endpoint
service_name
Service name or ID
region
The region name to assign the endpoint
enabled
Boolean to control if endpoint is enabled
### Response:
def present(name, service_name, auth=None, **kwargs):
'''
Ensure an endpoint exists and is up-to-date
name
Interface name
url
URL of the endpoint
service_name
Service name or ID
region
The region name to assign the endpoint
enabled
Boolean to control if endpoint is enabled
'''
ret = {'name': name,
'changes': {},
'result': True,
'comment': ''}
kwargs = __utils__['args.clean_kwargs'](**kwargs)
__salt__['keystoneng.setup_clouds'](auth)
success, val = _, endpoint = _common(ret, name, service_name, kwargs)
if not success:
return val
if not endpoint:
if __opts__['test'] is True:
ret['result'] = None
ret['changes'] = kwargs
ret['comment'] = 'Endpoint will be created.'
return ret
# NOTE(SamYaple): Endpoints are returned as a list which can contain
# several items depending on the options passed
endpoints = __salt__['keystoneng.endpoint_create'](**kwargs)
if len(endpoints) == 1:
ret['changes'] = endpoints[0]
else:
for i, endpoint in enumerate(endpoints):
ret['changes'][i] = endpoint
ret['comment'] = 'Created endpoint'
return ret
changes = __salt__['keystoneng.compare_changes'](endpoint, **kwargs)
if changes:
if __opts__['test'] is True:
ret['result'] = None
ret['changes'] = changes
ret['comment'] = 'Endpoint will be updated.'
return ret
kwargs['endpoint_id'] = endpoint.id
__salt__['keystoneng.endpoint_update'](**kwargs)
ret['changes'].update(changes)
ret['comment'] = 'Updated endpoint'
return ret |
def get_trial_info(current_trial):
"""Get job information for current trial."""
if current_trial.end_time and ("_" in current_trial.end_time):
# end time is parsed from result.json and the format
# is like: yyyy-mm-dd_hh-MM-ss, which will be converted
# to yyyy-mm-dd hh:MM:ss here
time_obj = datetime.datetime.strptime(current_trial.end_time,
"%Y-%m-%d_%H-%M-%S")
end_time = time_obj.strftime("%Y-%m-%d %H:%M:%S")
else:
end_time = current_trial.end_time
if current_trial.metrics:
metrics = eval(current_trial.metrics)
else:
metrics = None
trial_info = {
"trial_id": current_trial.trial_id,
"job_id": current_trial.job_id,
"trial_status": current_trial.trial_status,
"start_time": current_trial.start_time,
"end_time": end_time,
"params": eval(current_trial.params.encode("utf-8")),
"metrics": metrics
}
return trial_info | Get job information for current trial. | Below is the the instruction that describes the task:
### Input:
Get job information for current trial.
### Response:
def get_trial_info(current_trial):
"""Get job information for current trial."""
if current_trial.end_time and ("_" in current_trial.end_time):
# end time is parsed from result.json and the format
# is like: yyyy-mm-dd_hh-MM-ss, which will be converted
# to yyyy-mm-dd hh:MM:ss here
time_obj = datetime.datetime.strptime(current_trial.end_time,
"%Y-%m-%d_%H-%M-%S")
end_time = time_obj.strftime("%Y-%m-%d %H:%M:%S")
else:
end_time = current_trial.end_time
if current_trial.metrics:
metrics = eval(current_trial.metrics)
else:
metrics = None
trial_info = {
"trial_id": current_trial.trial_id,
"job_id": current_trial.job_id,
"trial_status": current_trial.trial_status,
"start_time": current_trial.start_time,
"end_time": end_time,
"params": eval(current_trial.params.encode("utf-8")),
"metrics": metrics
}
return trial_info |
def _argtop(y_score, k=None):
"""
Returns the indexes of the top k scores (not necessarily sorted)
"""
# avoid sorting when just want the top all
if k is None:
return slice(0, len(y_score))
else:
return _argsort(y_score, k) | Returns the indexes of the top k scores (not necessarily sorted) | Below is the the instruction that describes the task:
### Input:
Returns the indexes of the top k scores (not necessarily sorted)
### Response:
def _argtop(y_score, k=None):
"""
Returns the indexes of the top k scores (not necessarily sorted)
"""
# avoid sorting when just want the top all
if k is None:
return slice(0, len(y_score))
else:
return _argsort(y_score, k) |
def create_node(vm_):
'''
Build and submit the XML to create a node
'''
# Start the tree
content = ET.Element('ve')
# Name of the instance
name = ET.SubElement(content, 'name')
name.text = vm_['name']
# Description, defaults to name
desc = ET.SubElement(content, 'description')
desc.text = config.get_cloud_config_value(
'desc', vm_, __opts__, default=vm_['name'], search_global=False
)
# How many CPU cores, and how fast they are
cpu = ET.SubElement(content, 'cpu')
cpu.attrib['number'] = config.get_cloud_config_value(
'cpu_number', vm_, __opts__, default='1', search_global=False
)
cpu.attrib['power'] = config.get_cloud_config_value(
'cpu_power', vm_, __opts__, default='1000', search_global=False
)
# How many megabytes of RAM
ram = ET.SubElement(content, 'ram-size')
ram.text = config.get_cloud_config_value(
'ram', vm_, __opts__, default='256', search_global=False
)
# Bandwidth available, in kbps
bandwidth = ET.SubElement(content, 'bandwidth')
bandwidth.text = config.get_cloud_config_value(
'bandwidth', vm_, __opts__, default='100', search_global=False
)
# How many public IPs will be assigned to this instance
ip_num = ET.SubElement(content, 'no-of-public-ip')
ip_num.text = config.get_cloud_config_value(
'ip_num', vm_, __opts__, default='1', search_global=False
)
# Size of the instance disk
disk = ET.SubElement(content, 've-disk')
disk.attrib['local'] = 'true'
disk.attrib['size'] = config.get_cloud_config_value(
'disk_size', vm_, __opts__, default='10', search_global=False
)
# Attributes for the image
vm_image = config.get_cloud_config_value(
'image', vm_, __opts__, search_global=False
)
image = show_image({'image': vm_image}, call='function')
platform = ET.SubElement(content, 'platform')
template = ET.SubElement(platform, 'template-info')
template.attrib['name'] = vm_image
os_info = ET.SubElement(platform, 'os-info')
os_info.attrib['technology'] = image[vm_image]['technology']
os_info.attrib['type'] = image[vm_image]['osType']
# Username and password
admin = ET.SubElement(content, 'admin')
admin.attrib['login'] = config.get_cloud_config_value(
'ssh_username', vm_, __opts__, default='root'
)
admin.attrib['password'] = config.get_cloud_config_value(
'password', vm_, __opts__, search_global=False
)
data = ET.tostring(content, encoding='UTF-8')
__utils__['cloud.fire_event'](
'event',
'requesting instance',
'salt/cloud/{0}/requesting'.format(vm_['name']),
args={
'kwargs': __utils__['cloud.filter_event']('requesting', data, list(data)),
},
sock_dir=__opts__['sock_dir'],
transport=__opts__['transport']
)
node = query(action='ve', method='POST', data=data)
return node | Build and submit the XML to create a node | Below is the the instruction that describes the task:
### Input:
Build and submit the XML to create a node
### Response:
def create_node(vm_):
'''
Build and submit the XML to create a node
'''
# Start the tree
content = ET.Element('ve')
# Name of the instance
name = ET.SubElement(content, 'name')
name.text = vm_['name']
# Description, defaults to name
desc = ET.SubElement(content, 'description')
desc.text = config.get_cloud_config_value(
'desc', vm_, __opts__, default=vm_['name'], search_global=False
)
# How many CPU cores, and how fast they are
cpu = ET.SubElement(content, 'cpu')
cpu.attrib['number'] = config.get_cloud_config_value(
'cpu_number', vm_, __opts__, default='1', search_global=False
)
cpu.attrib['power'] = config.get_cloud_config_value(
'cpu_power', vm_, __opts__, default='1000', search_global=False
)
# How many megabytes of RAM
ram = ET.SubElement(content, 'ram-size')
ram.text = config.get_cloud_config_value(
'ram', vm_, __opts__, default='256', search_global=False
)
# Bandwidth available, in kbps
bandwidth = ET.SubElement(content, 'bandwidth')
bandwidth.text = config.get_cloud_config_value(
'bandwidth', vm_, __opts__, default='100', search_global=False
)
# How many public IPs will be assigned to this instance
ip_num = ET.SubElement(content, 'no-of-public-ip')
ip_num.text = config.get_cloud_config_value(
'ip_num', vm_, __opts__, default='1', search_global=False
)
# Size of the instance disk
disk = ET.SubElement(content, 've-disk')
disk.attrib['local'] = 'true'
disk.attrib['size'] = config.get_cloud_config_value(
'disk_size', vm_, __opts__, default='10', search_global=False
)
# Attributes for the image
vm_image = config.get_cloud_config_value(
'image', vm_, __opts__, search_global=False
)
image = show_image({'image': vm_image}, call='function')
platform = ET.SubElement(content, 'platform')
template = ET.SubElement(platform, 'template-info')
template.attrib['name'] = vm_image
os_info = ET.SubElement(platform, 'os-info')
os_info.attrib['technology'] = image[vm_image]['technology']
os_info.attrib['type'] = image[vm_image]['osType']
# Username and password
admin = ET.SubElement(content, 'admin')
admin.attrib['login'] = config.get_cloud_config_value(
'ssh_username', vm_, __opts__, default='root'
)
admin.attrib['password'] = config.get_cloud_config_value(
'password', vm_, __opts__, search_global=False
)
data = ET.tostring(content, encoding='UTF-8')
__utils__['cloud.fire_event'](
'event',
'requesting instance',
'salt/cloud/{0}/requesting'.format(vm_['name']),
args={
'kwargs': __utils__['cloud.filter_event']('requesting', data, list(data)),
},
sock_dir=__opts__['sock_dir'],
transport=__opts__['transport']
)
node = query(action='ve', method='POST', data=data)
return node |
def get_or_add_image_part(self, image_file):
"""
Return an ``(image_part, rId)`` 2-tuple corresponding to an
|ImagePart| object containing the image in *image_file*, and related
to this slide with the key *rId*. If either the image part or
relationship already exists, they are reused, otherwise they are
newly created.
"""
image_part = self._package.get_or_add_image_part(image_file)
rId = self.relate_to(image_part, RT.IMAGE)
return image_part, rId | Return an ``(image_part, rId)`` 2-tuple corresponding to an
|ImagePart| object containing the image in *image_file*, and related
to this slide with the key *rId*. If either the image part or
relationship already exists, they are reused, otherwise they are
newly created. | Below is the the instruction that describes the task:
### Input:
Return an ``(image_part, rId)`` 2-tuple corresponding to an
|ImagePart| object containing the image in *image_file*, and related
to this slide with the key *rId*. If either the image part or
relationship already exists, they are reused, otherwise they are
newly created.
### Response:
def get_or_add_image_part(self, image_file):
"""
Return an ``(image_part, rId)`` 2-tuple corresponding to an
|ImagePart| object containing the image in *image_file*, and related
to this slide with the key *rId*. If either the image part or
relationship already exists, they are reused, otherwise they are
newly created.
"""
image_part = self._package.get_or_add_image_part(image_file)
rId = self.relate_to(image_part, RT.IMAGE)
return image_part, rId |
def save(self, *args, **kwargs):
""" Saves the topic instance. """
# It is vital to track the changes of the forum associated with a topic in order to
# maintain counters up-to-date.
old_instance = None
if self.pk:
old_instance = self.__class__._default_manager.get(pk=self.pk)
# Update the slug field
self.slug = slugify(force_text(self.subject), allow_unicode=True)
# Do the save
super().save(*args, **kwargs)
# If any change has been made to the parent forum, trigger the update of the counters
if old_instance and old_instance.forum != self.forum:
self.update_trackers()
# The previous parent forum counters should also be updated
if old_instance.forum:
old_forum = old_instance.forum
old_forum.refresh_from_db()
old_forum.update_trackers() | Saves the topic instance. | Below is the the instruction that describes the task:
### Input:
Saves the topic instance.
### Response:
def save(self, *args, **kwargs):
""" Saves the topic instance. """
# It is vital to track the changes of the forum associated with a topic in order to
# maintain counters up-to-date.
old_instance = None
if self.pk:
old_instance = self.__class__._default_manager.get(pk=self.pk)
# Update the slug field
self.slug = slugify(force_text(self.subject), allow_unicode=True)
# Do the save
super().save(*args, **kwargs)
# If any change has been made to the parent forum, trigger the update of the counters
if old_instance and old_instance.forum != self.forum:
self.update_trackers()
# The previous parent forum counters should also be updated
if old_instance.forum:
old_forum = old_instance.forum
old_forum.refresh_from_db()
old_forum.update_trackers() |
def Lazarek_Black(m, D, mul, kl, Hvap, q=None, Te=None):
r'''Calculates heat transfer coefficient for film boiling of saturated
fluid in vertical tubes for either upward or downward flow. Correlation
is as shown in [1]_, and also reviewed in [2]_ and [3]_.
Either the heat flux or excess temperature is required for the calculation
of heat transfer coefficient.
Quality independent. Requires no properties of the gas.
Uses a Reynolds number assuming all the flow is liquid.
.. math::
h_{tp} = 30 Re_{lo}^{0.857} Bg^{0.714} \frac{k_l}{D}
Re_{lo} = \frac{G_{tp}D}{\mu_l}
Parameters
----------
m : float
Mass flow rate [kg/s]
D : float
Diameter of the channel [m]
mul : float
Viscosity of liquid [Pa*s]
kl : float
Thermal conductivity of liquid [W/m/K]
Hvap : float
Heat of vaporization of liquid [J/kg]
q : float, optional
Heat flux to wall [W/m^2]
Te : float, optional
Excess temperature of wall, [K]
Returns
-------
h : float
Heat transfer coefficient [W/m^2/K]
Notes
-----
[1]_ has been reviewed.
[2]_ claims it was developed for a range of quality 0-0.6,
Relo 860-5500, mass flux 125-750 kg/m^2/s, q of 1.4-38 W/cm^2, and with a
pipe diameter of 3.1 mm. Developed with data for R113 only.
Examples
--------
>>> Lazarek_Black(m=10, D=0.3, mul=1E-3, kl=0.6, Hvap=2E6, Te=100)
9501.932636079293
References
----------
.. [1] Lazarek, G. M., and S. H. Black. "Evaporative Heat Transfer,
Pressure Drop and Critical Heat Flux in a Small Vertical Tube with
R-113." International Journal of Heat and Mass Transfer 25, no. 7 (July
1982): 945-60. doi:10.1016/0017-9310(82)90070-9.
.. [2] Fang, Xiande, Zhanru Zhou, and Dingkun Li. "Review of Correlations
of Flow Boiling Heat Transfer Coefficients for Carbon Dioxide."
International Journal of Refrigeration 36, no. 8 (December 2013):
2017-39. doi:10.1016/j.ijrefrig.2013.05.015.
.. [3] Bertsch, Stefan S., Eckhard A. Groll, and Suresh V. Garimella.
"Review and Comparative Analysis of Studies on Saturated Flow Boiling in
Small Channels." Nanoscale and Microscale Thermophysical Engineering 12,
no. 3 (September 4, 2008): 187-227. doi:10.1080/15567260802317357.
'''
G = m/(pi/4*D**2)
Relo = G*D/mul
if q:
Bg = Boiling(G=G, q=q, Hvap=Hvap)
return 30*Relo**0.857*Bg**0.714*kl/D
elif Te:
# Solved with sympy
return 27000*30**(71/143)*(1./(G*Hvap))**(357/143)*Relo**(857/286)*Te**(357/143)*kl**(500/143)/D**(500/143)
else:
raise Exception('Either q or Te is needed for this correlation') | r'''Calculates heat transfer coefficient for film boiling of saturated
fluid in vertical tubes for either upward or downward flow. Correlation
is as shown in [1]_, and also reviewed in [2]_ and [3]_.
Either the heat flux or excess temperature is required for the calculation
of heat transfer coefficient.
Quality independent. Requires no properties of the gas.
Uses a Reynolds number assuming all the flow is liquid.
.. math::
h_{tp} = 30 Re_{lo}^{0.857} Bg^{0.714} \frac{k_l}{D}
Re_{lo} = \frac{G_{tp}D}{\mu_l}
Parameters
----------
m : float
Mass flow rate [kg/s]
D : float
Diameter of the channel [m]
mul : float
Viscosity of liquid [Pa*s]
kl : float
Thermal conductivity of liquid [W/m/K]
Hvap : float
Heat of vaporization of liquid [J/kg]
q : float, optional
Heat flux to wall [W/m^2]
Te : float, optional
Excess temperature of wall, [K]
Returns
-------
h : float
Heat transfer coefficient [W/m^2/K]
Notes
-----
[1]_ has been reviewed.
[2]_ claims it was developed for a range of quality 0-0.6,
Relo 860-5500, mass flux 125-750 kg/m^2/s, q of 1.4-38 W/cm^2, and with a
pipe diameter of 3.1 mm. Developed with data for R113 only.
Examples
--------
>>> Lazarek_Black(m=10, D=0.3, mul=1E-3, kl=0.6, Hvap=2E6, Te=100)
9501.932636079293
References
----------
.. [1] Lazarek, G. M., and S. H. Black. "Evaporative Heat Transfer,
Pressure Drop and Critical Heat Flux in a Small Vertical Tube with
R-113." International Journal of Heat and Mass Transfer 25, no. 7 (July
1982): 945-60. doi:10.1016/0017-9310(82)90070-9.
.. [2] Fang, Xiande, Zhanru Zhou, and Dingkun Li. "Review of Correlations
of Flow Boiling Heat Transfer Coefficients for Carbon Dioxide."
International Journal of Refrigeration 36, no. 8 (December 2013):
2017-39. doi:10.1016/j.ijrefrig.2013.05.015.
.. [3] Bertsch, Stefan S., Eckhard A. Groll, and Suresh V. Garimella.
"Review and Comparative Analysis of Studies on Saturated Flow Boiling in
Small Channels." Nanoscale and Microscale Thermophysical Engineering 12,
no. 3 (September 4, 2008): 187-227. doi:10.1080/15567260802317357. | Below is the the instruction that describes the task:
### Input:
r'''Calculates heat transfer coefficient for film boiling of saturated
fluid in vertical tubes for either upward or downward flow. Correlation
is as shown in [1]_, and also reviewed in [2]_ and [3]_.
Either the heat flux or excess temperature is required for the calculation
of heat transfer coefficient.
Quality independent. Requires no properties of the gas.
Uses a Reynolds number assuming all the flow is liquid.
.. math::
h_{tp} = 30 Re_{lo}^{0.857} Bg^{0.714} \frac{k_l}{D}
Re_{lo} = \frac{G_{tp}D}{\mu_l}
Parameters
----------
m : float
Mass flow rate [kg/s]
D : float
Diameter of the channel [m]
mul : float
Viscosity of liquid [Pa*s]
kl : float
Thermal conductivity of liquid [W/m/K]
Hvap : float
Heat of vaporization of liquid [J/kg]
q : float, optional
Heat flux to wall [W/m^2]
Te : float, optional
Excess temperature of wall, [K]
Returns
-------
h : float
Heat transfer coefficient [W/m^2/K]
Notes
-----
[1]_ has been reviewed.
[2]_ claims it was developed for a range of quality 0-0.6,
Relo 860-5500, mass flux 125-750 kg/m^2/s, q of 1.4-38 W/cm^2, and with a
pipe diameter of 3.1 mm. Developed with data for R113 only.
Examples
--------
>>> Lazarek_Black(m=10, D=0.3, mul=1E-3, kl=0.6, Hvap=2E6, Te=100)
9501.932636079293
References
----------
.. [1] Lazarek, G. M., and S. H. Black. "Evaporative Heat Transfer,
Pressure Drop and Critical Heat Flux in a Small Vertical Tube with
R-113." International Journal of Heat and Mass Transfer 25, no. 7 (July
1982): 945-60. doi:10.1016/0017-9310(82)90070-9.
.. [2] Fang, Xiande, Zhanru Zhou, and Dingkun Li. "Review of Correlations
of Flow Boiling Heat Transfer Coefficients for Carbon Dioxide."
International Journal of Refrigeration 36, no. 8 (December 2013):
2017-39. doi:10.1016/j.ijrefrig.2013.05.015.
.. [3] Bertsch, Stefan S., Eckhard A. Groll, and Suresh V. Garimella.
"Review and Comparative Analysis of Studies on Saturated Flow Boiling in
Small Channels." Nanoscale and Microscale Thermophysical Engineering 12,
no. 3 (September 4, 2008): 187-227. doi:10.1080/15567260802317357.
### Response:
def Lazarek_Black(m, D, mul, kl, Hvap, q=None, Te=None):
r'''Calculates heat transfer coefficient for film boiling of saturated
fluid in vertical tubes for either upward or downward flow. Correlation
is as shown in [1]_, and also reviewed in [2]_ and [3]_.
Either the heat flux or excess temperature is required for the calculation
of heat transfer coefficient.
Quality independent. Requires no properties of the gas.
Uses a Reynolds number assuming all the flow is liquid.
.. math::
h_{tp} = 30 Re_{lo}^{0.857} Bg^{0.714} \frac{k_l}{D}
Re_{lo} = \frac{G_{tp}D}{\mu_l}
Parameters
----------
m : float
Mass flow rate [kg/s]
D : float
Diameter of the channel [m]
mul : float
Viscosity of liquid [Pa*s]
kl : float
Thermal conductivity of liquid [W/m/K]
Hvap : float
Heat of vaporization of liquid [J/kg]
q : float, optional
Heat flux to wall [W/m^2]
Te : float, optional
Excess temperature of wall, [K]
Returns
-------
h : float
Heat transfer coefficient [W/m^2/K]
Notes
-----
[1]_ has been reviewed.
[2]_ claims it was developed for a range of quality 0-0.6,
Relo 860-5500, mass flux 125-750 kg/m^2/s, q of 1.4-38 W/cm^2, and with a
pipe diameter of 3.1 mm. Developed with data for R113 only.
Examples
--------
>>> Lazarek_Black(m=10, D=0.3, mul=1E-3, kl=0.6, Hvap=2E6, Te=100)
9501.932636079293
References
----------
.. [1] Lazarek, G. M., and S. H. Black. "Evaporative Heat Transfer,
Pressure Drop and Critical Heat Flux in a Small Vertical Tube with
R-113." International Journal of Heat and Mass Transfer 25, no. 7 (July
1982): 945-60. doi:10.1016/0017-9310(82)90070-9.
.. [2] Fang, Xiande, Zhanru Zhou, and Dingkun Li. "Review of Correlations
of Flow Boiling Heat Transfer Coefficients for Carbon Dioxide."
International Journal of Refrigeration 36, no. 8 (December 2013):
2017-39. doi:10.1016/j.ijrefrig.2013.05.015.
.. [3] Bertsch, Stefan S., Eckhard A. Groll, and Suresh V. Garimella.
"Review and Comparative Analysis of Studies on Saturated Flow Boiling in
Small Channels." Nanoscale and Microscale Thermophysical Engineering 12,
no. 3 (September 4, 2008): 187-227. doi:10.1080/15567260802317357.
'''
G = m/(pi/4*D**2)
Relo = G*D/mul
if q:
Bg = Boiling(G=G, q=q, Hvap=Hvap)
return 30*Relo**0.857*Bg**0.714*kl/D
elif Te:
# Solved with sympy
return 27000*30**(71/143)*(1./(G*Hvap))**(357/143)*Relo**(857/286)*Te**(357/143)*kl**(500/143)/D**(500/143)
else:
raise Exception('Either q or Te is needed for this correlation') |
def gnomonicImageToSphere(x, y):
"""
Inverse gnomonic projection (deg).
"""
# Convert angle to [-180, 180] interval
x = x - 360.*(x>180)
x = np.asarray(x)
y = np.asarray(y)
lon = np.degrees(np.arctan2(y, x))
r_theta = np.sqrt(x**2 + y**2)
lat = np.degrees(np.arctan(180. / (np.pi * r_theta)))
return lon, lat | Inverse gnomonic projection (deg). | Below is the the instruction that describes the task:
### Input:
Inverse gnomonic projection (deg).
### Response:
def gnomonicImageToSphere(x, y):
"""
Inverse gnomonic projection (deg).
"""
# Convert angle to [-180, 180] interval
x = x - 360.*(x>180)
x = np.asarray(x)
y = np.asarray(y)
lon = np.degrees(np.arctan2(y, x))
r_theta = np.sqrt(x**2 + y**2)
lat = np.degrees(np.arctan(180. / (np.pi * r_theta)))
return lon, lat |
def get_element_id(self, complete_name):
"""Get the TocElement element id-number of the element with the
supplied name."""
[group, name] = complete_name.split('.')
element = self.get_element(group, name)
if element:
return element.ident
else:
logger.warning('Unable to find variable [%s]', complete_name)
return None | Get the TocElement element id-number of the element with the
supplied name. | Below is the the instruction that describes the task:
### Input:
Get the TocElement element id-number of the element with the
supplied name.
### Response:
def get_element_id(self, complete_name):
"""Get the TocElement element id-number of the element with the
supplied name."""
[group, name] = complete_name.split('.')
element = self.get_element(group, name)
if element:
return element.ident
else:
logger.warning('Unable to find variable [%s]', complete_name)
return None |
def lookup_entry(
self,
linked_resource=None,
sql_resource=None,
retry=google.api_core.gapic_v1.method.DEFAULT,
timeout=google.api_core.gapic_v1.method.DEFAULT,
metadata=None,
):
"""
Get an entry by target resource name. This method allows clients to use
the resource name from the source Google Cloud Platform service to get the
Cloud Data Catalog Entry.
Example:
>>> from google.cloud import datacatalog_v1beta1
>>>
>>> client = datacatalog_v1beta1.DataCatalogClient()
>>>
>>> response = client.lookup_entry()
Args:
linked_resource (str): The full name of the Google Cloud Platform resource the Data Catalog
entry represents. See:
https://cloud.google.com/apis/design/resource\_names#full\_resource\_name
Full names are case-sensitive.
Examples:
"//bigquery.googleapis.com/projects/projectId/datasets/datasetId/tables/tableId".
"//pubsub.googleapis.com/projects/projectId/topics/topicId"
sql_resource (str): The SQL name of the entry. SQL names are case-sensitive.
Examples:
1. cloud\_pubsub.project\_id.topic\_id
2. bigquery.project\_id.dataset\_id.table\_id
3. datacatalog.project\_id.location\_id.entry\_group\_id.entry\_id
retry (Optional[google.api_core.retry.Retry]): A retry object used
to retry requests. If ``None`` is specified, requests will not
be retried.
timeout (Optional[float]): The amount of time, in seconds, to wait
for the request to complete. Note that if ``retry`` is
specified, the timeout applies to each individual attempt.
metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata
that is provided to the method.
Returns:
A :class:`~google.cloud.datacatalog_v1beta1.types.Entry` instance.
Raises:
google.api_core.exceptions.GoogleAPICallError: If the request
failed for any reason.
google.api_core.exceptions.RetryError: If the request failed due
to a retryable error and retry attempts failed.
ValueError: If the parameters are invalid.
"""
# Wrap the transport method to add retry and timeout logic.
if "lookup_entry" not in self._inner_api_calls:
self._inner_api_calls[
"lookup_entry"
] = google.api_core.gapic_v1.method.wrap_method(
self.transport.lookup_entry,
default_retry=self._method_configs["LookupEntry"].retry,
default_timeout=self._method_configs["LookupEntry"].timeout,
client_info=self._client_info,
)
# Sanity check: We have some fields which are mutually exclusive;
# raise ValueError if more than one is sent.
google.api_core.protobuf_helpers.check_oneof(
linked_resource=linked_resource, sql_resource=sql_resource
)
request = datacatalog_pb2.LookupEntryRequest(
linked_resource=linked_resource, sql_resource=sql_resource
)
return self._inner_api_calls["lookup_entry"](
request, retry=retry, timeout=timeout, metadata=metadata
) | Get an entry by target resource name. This method allows clients to use
the resource name from the source Google Cloud Platform service to get the
Cloud Data Catalog Entry.
Example:
>>> from google.cloud import datacatalog_v1beta1
>>>
>>> client = datacatalog_v1beta1.DataCatalogClient()
>>>
>>> response = client.lookup_entry()
Args:
linked_resource (str): The full name of the Google Cloud Platform resource the Data Catalog
entry represents. See:
https://cloud.google.com/apis/design/resource\_names#full\_resource\_name
Full names are case-sensitive.
Examples:
"//bigquery.googleapis.com/projects/projectId/datasets/datasetId/tables/tableId".
"//pubsub.googleapis.com/projects/projectId/topics/topicId"
sql_resource (str): The SQL name of the entry. SQL names are case-sensitive.
Examples:
1. cloud\_pubsub.project\_id.topic\_id
2. bigquery.project\_id.dataset\_id.table\_id
3. datacatalog.project\_id.location\_id.entry\_group\_id.entry\_id
retry (Optional[google.api_core.retry.Retry]): A retry object used
to retry requests. If ``None`` is specified, requests will not
be retried.
timeout (Optional[float]): The amount of time, in seconds, to wait
for the request to complete. Note that if ``retry`` is
specified, the timeout applies to each individual attempt.
metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata
that is provided to the method.
Returns:
A :class:`~google.cloud.datacatalog_v1beta1.types.Entry` instance.
Raises:
google.api_core.exceptions.GoogleAPICallError: If the request
failed for any reason.
google.api_core.exceptions.RetryError: If the request failed due
to a retryable error and retry attempts failed.
ValueError: If the parameters are invalid. | Below is the the instruction that describes the task:
### Input:
Get an entry by target resource name. This method allows clients to use
the resource name from the source Google Cloud Platform service to get the
Cloud Data Catalog Entry.
Example:
>>> from google.cloud import datacatalog_v1beta1
>>>
>>> client = datacatalog_v1beta1.DataCatalogClient()
>>>
>>> response = client.lookup_entry()
Args:
linked_resource (str): The full name of the Google Cloud Platform resource the Data Catalog
entry represents. See:
https://cloud.google.com/apis/design/resource\_names#full\_resource\_name
Full names are case-sensitive.
Examples:
"//bigquery.googleapis.com/projects/projectId/datasets/datasetId/tables/tableId".
"//pubsub.googleapis.com/projects/projectId/topics/topicId"
sql_resource (str): The SQL name of the entry. SQL names are case-sensitive.
Examples:
1. cloud\_pubsub.project\_id.topic\_id
2. bigquery.project\_id.dataset\_id.table\_id
3. datacatalog.project\_id.location\_id.entry\_group\_id.entry\_id
retry (Optional[google.api_core.retry.Retry]): A retry object used
to retry requests. If ``None`` is specified, requests will not
be retried.
timeout (Optional[float]): The amount of time, in seconds, to wait
for the request to complete. Note that if ``retry`` is
specified, the timeout applies to each individual attempt.
metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata
that is provided to the method.
Returns:
A :class:`~google.cloud.datacatalog_v1beta1.types.Entry` instance.
Raises:
google.api_core.exceptions.GoogleAPICallError: If the request
failed for any reason.
google.api_core.exceptions.RetryError: If the request failed due
to a retryable error and retry attempts failed.
ValueError: If the parameters are invalid.
### Response:
def lookup_entry(
self,
linked_resource=None,
sql_resource=None,
retry=google.api_core.gapic_v1.method.DEFAULT,
timeout=google.api_core.gapic_v1.method.DEFAULT,
metadata=None,
):
"""
Get an entry by target resource name. This method allows clients to use
the resource name from the source Google Cloud Platform service to get the
Cloud Data Catalog Entry.
Example:
>>> from google.cloud import datacatalog_v1beta1
>>>
>>> client = datacatalog_v1beta1.DataCatalogClient()
>>>
>>> response = client.lookup_entry()
Args:
linked_resource (str): The full name of the Google Cloud Platform resource the Data Catalog
entry represents. See:
https://cloud.google.com/apis/design/resource\_names#full\_resource\_name
Full names are case-sensitive.
Examples:
"//bigquery.googleapis.com/projects/projectId/datasets/datasetId/tables/tableId".
"//pubsub.googleapis.com/projects/projectId/topics/topicId"
sql_resource (str): The SQL name of the entry. SQL names are case-sensitive.
Examples:
1. cloud\_pubsub.project\_id.topic\_id
2. bigquery.project\_id.dataset\_id.table\_id
3. datacatalog.project\_id.location\_id.entry\_group\_id.entry\_id
retry (Optional[google.api_core.retry.Retry]): A retry object used
to retry requests. If ``None`` is specified, requests will not
be retried.
timeout (Optional[float]): The amount of time, in seconds, to wait
for the request to complete. Note that if ``retry`` is
specified, the timeout applies to each individual attempt.
metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata
that is provided to the method.
Returns:
A :class:`~google.cloud.datacatalog_v1beta1.types.Entry` instance.
Raises:
google.api_core.exceptions.GoogleAPICallError: If the request
failed for any reason.
google.api_core.exceptions.RetryError: If the request failed due
to a retryable error and retry attempts failed.
ValueError: If the parameters are invalid.
"""
# Wrap the transport method to add retry and timeout logic.
if "lookup_entry" not in self._inner_api_calls:
self._inner_api_calls[
"lookup_entry"
] = google.api_core.gapic_v1.method.wrap_method(
self.transport.lookup_entry,
default_retry=self._method_configs["LookupEntry"].retry,
default_timeout=self._method_configs["LookupEntry"].timeout,
client_info=self._client_info,
)
# Sanity check: We have some fields which are mutually exclusive;
# raise ValueError if more than one is sent.
google.api_core.protobuf_helpers.check_oneof(
linked_resource=linked_resource, sql_resource=sql_resource
)
request = datacatalog_pb2.LookupEntryRequest(
linked_resource=linked_resource, sql_resource=sql_resource
)
return self._inner_api_calls["lookup_entry"](
request, retry=retry, timeout=timeout, metadata=metadata
) |
def save(func):
"""@decorator: Saves data after executing :func:.
Also performs modifications set as permanent options.
"""
def aux(self, *args, **kwargs):
out = func(self, *args, **kwargs)
path = (hasattr(self, 'path') and self.path
or os.path.join(os.getcwd(), '.td'))
gpath = (hasattr(self, 'gpath') and self.gpath
or os.path.expanduser('~/.tdrc'))
if os.path.exists(path):
shutil.copy2(path, os.path.join(os.path.dirname(path), '.td~'))
open(path, 'w').write(
json.dumps({
'items': self.data,
'refs': self.refs,
'options': self.options
})
)
open(gpath, 'w').write(json.dumps(self.globalOptions))
return out
return aux | @decorator: Saves data after executing :func:.
Also performs modifications set as permanent options. | Below is the the instruction that describes the task:
### Input:
@decorator: Saves data after executing :func:.
Also performs modifications set as permanent options.
### Response:
def save(func):
"""@decorator: Saves data after executing :func:.
Also performs modifications set as permanent options.
"""
def aux(self, *args, **kwargs):
out = func(self, *args, **kwargs)
path = (hasattr(self, 'path') and self.path
or os.path.join(os.getcwd(), '.td'))
gpath = (hasattr(self, 'gpath') and self.gpath
or os.path.expanduser('~/.tdrc'))
if os.path.exists(path):
shutil.copy2(path, os.path.join(os.path.dirname(path), '.td~'))
open(path, 'w').write(
json.dumps({
'items': self.data,
'refs': self.refs,
'options': self.options
})
)
open(gpath, 'w').write(json.dumps(self.globalOptions))
return out
return aux |
def from_text(self, line, line_index, column, is_escaped):
"""Return the new state of the comment parser."""
begin = self._begin
end = self._end
single = self._single
single_len = len(single)
start_len = len(begin)
if _token_at_col_in_line(line, column, single, single_len):
return (STATE_IN_COMMENT,
(line_index, column + single_len),
ParserState.EOL)
elif _token_at_col_in_line(line, column, begin, start_len):
return (STATE_IN_COMMENT,
(line_index, column + single_len),
end)
elif ((_token_at_col_in_line(line, column, "\"") or
_token_at_col_in_line(line, column, "'")) and
not _is_escaped(line, column, is_escaped)):
# Check here to see whether this is a quote or if this
# is a spellcheckable line
if (_token_at_col_in_line(line, column, "\"\"\"") or
_token_at_col_in_line(line, column, "'''")):
return (STATE_IN_COMMENT,
(line_index, column + 3),
line[column:column + 3])
else:
return (STATE_IN_QUOTE,
(line_index, column + 1),
line[column:column + 1])
return (STATE_IN_TEXT,
(0, 0),
None) | Return the new state of the comment parser. | Below is the the instruction that describes the task:
### Input:
Return the new state of the comment parser.
### Response:
def from_text(self, line, line_index, column, is_escaped):
"""Return the new state of the comment parser."""
begin = self._begin
end = self._end
single = self._single
single_len = len(single)
start_len = len(begin)
if _token_at_col_in_line(line, column, single, single_len):
return (STATE_IN_COMMENT,
(line_index, column + single_len),
ParserState.EOL)
elif _token_at_col_in_line(line, column, begin, start_len):
return (STATE_IN_COMMENT,
(line_index, column + single_len),
end)
elif ((_token_at_col_in_line(line, column, "\"") or
_token_at_col_in_line(line, column, "'")) and
not _is_escaped(line, column, is_escaped)):
# Check here to see whether this is a quote or if this
# is a spellcheckable line
if (_token_at_col_in_line(line, column, "\"\"\"") or
_token_at_col_in_line(line, column, "'''")):
return (STATE_IN_COMMENT,
(line_index, column + 3),
line[column:column + 3])
else:
return (STATE_IN_QUOTE,
(line_index, column + 1),
line[column:column + 1])
return (STATE_IN_TEXT,
(0, 0),
None) |
def _area_settings(area, setting, value, validate_value):
"""Will validate area settings and values, returns data packet."""
if validate_value:
# Exit delay has some specific limitations apparently
if (setting == CONST.SETTING_EXIT_DELAY_AWAY
and value not in CONST.VALID_SETTING_EXIT_AWAY):
raise AbodeException(ERROR.INVALID_SETTING_VALUE,
CONST.VALID_SETTING_EXIT_AWAY)
elif value not in CONST.ALL_SETTING_ENTRY_EXIT_DELAY:
raise AbodeException(ERROR.INVALID_SETTING_VALUE,
CONST.ALL_SETTING_ENTRY_EXIT_DELAY)
return {'area': area, setting: value} | Will validate area settings and values, returns data packet. | Below is the the instruction that describes the task:
### Input:
Will validate area settings and values, returns data packet.
### Response:
def _area_settings(area, setting, value, validate_value):
"""Will validate area settings and values, returns data packet."""
if validate_value:
# Exit delay has some specific limitations apparently
if (setting == CONST.SETTING_EXIT_DELAY_AWAY
and value not in CONST.VALID_SETTING_EXIT_AWAY):
raise AbodeException(ERROR.INVALID_SETTING_VALUE,
CONST.VALID_SETTING_EXIT_AWAY)
elif value not in CONST.ALL_SETTING_ENTRY_EXIT_DELAY:
raise AbodeException(ERROR.INVALID_SETTING_VALUE,
CONST.ALL_SETTING_ENTRY_EXIT_DELAY)
return {'area': area, setting: value} |
def proxyval(self, visited):
'''
Support for new-style classes.
Currently we just locate the dictionary using a transliteration to
python of _PyObject_GetDictPtr, ignoring descriptors
'''
# Guard against infinite loops:
if self.as_address() in visited:
return ProxyAlreadyVisited('<...>')
visited.add(self.as_address())
pyop_attr_dict = self.get_attr_dict()
if pyop_attr_dict:
attr_dict = pyop_attr_dict.proxyval(visited)
else:
attr_dict = {}
tp_name = self.safe_tp_name()
# New-style class:
return InstanceProxy(tp_name, attr_dict, long(self._gdbval)) | Support for new-style classes.
Currently we just locate the dictionary using a transliteration to
python of _PyObject_GetDictPtr, ignoring descriptors | Below is the the instruction that describes the task:
### Input:
Support for new-style classes.
Currently we just locate the dictionary using a transliteration to
python of _PyObject_GetDictPtr, ignoring descriptors
### Response:
def proxyval(self, visited):
'''
Support for new-style classes.
Currently we just locate the dictionary using a transliteration to
python of _PyObject_GetDictPtr, ignoring descriptors
'''
# Guard against infinite loops:
if self.as_address() in visited:
return ProxyAlreadyVisited('<...>')
visited.add(self.as_address())
pyop_attr_dict = self.get_attr_dict()
if pyop_attr_dict:
attr_dict = pyop_attr_dict.proxyval(visited)
else:
attr_dict = {}
tp_name = self.safe_tp_name()
# New-style class:
return InstanceProxy(tp_name, attr_dict, long(self._gdbval)) |
def get_desc2nts_fnc(self, hdrgo_prt=True, section_prt=None,
top_n=None, use_sections=True):
"""Return grouped, sorted namedtuples in either format: flat, sections."""
# RETURN: flat list of namedtuples
nts_flat = self.get_nts_flat(hdrgo_prt, use_sections)
if nts_flat:
flds = nts_flat[0]._fields
if not use_sections:
return {'sortobj':self, 'flat' : nts_flat, 'hdrgo_prt':hdrgo_prt, 'flds':flds,
'num_items':len(nts_flat), 'num_sections':1}
else:
return {'sortobj':self,
'sections' : [(self.grprobj.hdrobj.secdflt, nts_flat)],
'hdrgo_prt':hdrgo_prt,
'flds':flds,
'num_items':len(nts_flat), 'num_sections':1}
# print('FFFF Sorter:get_desc2nts_fnc: nts_flat is None')
# RETURN: 2-D list [(section_name0, namedtuples0), (section_name1, namedtuples1), ...
# kws: top_n hdrgo_prt section_sortby
# Over-ride hdrgo_prt depending on top_n value
assert top_n is not True and top_n is not False, \
"top_n({T}) MUST BE None OR AN int".format(T=top_n)
assert self.sectobj is not None, "SECTIONS OBJECT DOES NOT EXIST"
sec_sb = self.sectobj.section_sortby
# Override hdrgo_prt, if sorting by sections or returning a subset of GO IDs in section
hdrgo_prt_curr = hdrgo_prt is True
if sec_sb is True or (sec_sb is not False and sec_sb is not None) or top_n is not None:
hdrgo_prt_curr = False
# print('GGGG Sorter:get_desc2nts_fnc: hdrgo_prt_curr({}) sec_sb({}) top_n({})'.format(
# hdrgo_prt_curr, sec_sb, top_n))
nts_section = self.sectobj.get_sorted_nts_keep_section(hdrgo_prt_curr)
# print('HHHH Sorter:get_desc2nts_fnc: nts_section')
# Take top_n in each section, if requested
if top_n is not None:
nts_section = [(s, nts[:top_n]) for s, nts in nts_section]
if section_prt is None:
nts_flat = self.get_sections_flattened(nts_section)
flds = nts_flat[0]._fields if nts_flat else []
return {'sortobj':self, 'flat' : nts_flat, 'hdrgo_prt':hdrgo_prt_curr, 'flds':flds,
'num_items':len(nts_flat), 'num_sections':1}
# Send flat list of sections nts back, as requested
if section_prt is False:
nts_flat = self.get_sections_flattened(nts_section)
flds = nts_flat[0]._fields if nts_flat else []
return {'sortobj':self, 'flat' : nts_flat, 'hdrgo_prt':hdrgo_prt_curr, 'flds':flds,
'num_items':len(nts_flat),
'num_sections':len(nts_section)}
# Send 2-D sections nts back
# print('IIII Sorter:get_desc2nts_fnc: nts_section')
flds = nts_section[0][1][0]._fields if nts_section else []
return {'sortobj':self, 'sections' : nts_section, 'hdrgo_prt':hdrgo_prt_curr, 'flds':flds,
'num_items':sum(len(nts) for _, nts in nts_section),
'num_sections':len(nts_section)} | Return grouped, sorted namedtuples in either format: flat, sections. | Below is the the instruction that describes the task:
### Input:
Return grouped, sorted namedtuples in either format: flat, sections.
### Response:
def get_desc2nts_fnc(self, hdrgo_prt=True, section_prt=None,
top_n=None, use_sections=True):
"""Return grouped, sorted namedtuples in either format: flat, sections."""
# RETURN: flat list of namedtuples
nts_flat = self.get_nts_flat(hdrgo_prt, use_sections)
if nts_flat:
flds = nts_flat[0]._fields
if not use_sections:
return {'sortobj':self, 'flat' : nts_flat, 'hdrgo_prt':hdrgo_prt, 'flds':flds,
'num_items':len(nts_flat), 'num_sections':1}
else:
return {'sortobj':self,
'sections' : [(self.grprobj.hdrobj.secdflt, nts_flat)],
'hdrgo_prt':hdrgo_prt,
'flds':flds,
'num_items':len(nts_flat), 'num_sections':1}
# print('FFFF Sorter:get_desc2nts_fnc: nts_flat is None')
# RETURN: 2-D list [(section_name0, namedtuples0), (section_name1, namedtuples1), ...
# kws: top_n hdrgo_prt section_sortby
# Over-ride hdrgo_prt depending on top_n value
assert top_n is not True and top_n is not False, \
"top_n({T}) MUST BE None OR AN int".format(T=top_n)
assert self.sectobj is not None, "SECTIONS OBJECT DOES NOT EXIST"
sec_sb = self.sectobj.section_sortby
# Override hdrgo_prt, if sorting by sections or returning a subset of GO IDs in section
hdrgo_prt_curr = hdrgo_prt is True
if sec_sb is True or (sec_sb is not False and sec_sb is not None) or top_n is not None:
hdrgo_prt_curr = False
# print('GGGG Sorter:get_desc2nts_fnc: hdrgo_prt_curr({}) sec_sb({}) top_n({})'.format(
# hdrgo_prt_curr, sec_sb, top_n))
nts_section = self.sectobj.get_sorted_nts_keep_section(hdrgo_prt_curr)
# print('HHHH Sorter:get_desc2nts_fnc: nts_section')
# Take top_n in each section, if requested
if top_n is not None:
nts_section = [(s, nts[:top_n]) for s, nts in nts_section]
if section_prt is None:
nts_flat = self.get_sections_flattened(nts_section)
flds = nts_flat[0]._fields if nts_flat else []
return {'sortobj':self, 'flat' : nts_flat, 'hdrgo_prt':hdrgo_prt_curr, 'flds':flds,
'num_items':len(nts_flat), 'num_sections':1}
# Send flat list of sections nts back, as requested
if section_prt is False:
nts_flat = self.get_sections_flattened(nts_section)
flds = nts_flat[0]._fields if nts_flat else []
return {'sortobj':self, 'flat' : nts_flat, 'hdrgo_prt':hdrgo_prt_curr, 'flds':flds,
'num_items':len(nts_flat),
'num_sections':len(nts_section)}
# Send 2-D sections nts back
# print('IIII Sorter:get_desc2nts_fnc: nts_section')
flds = nts_section[0][1][0]._fields if nts_section else []
return {'sortobj':self, 'sections' : nts_section, 'hdrgo_prt':hdrgo_prt_curr, 'flds':flds,
'num_items':sum(len(nts) for _, nts in nts_section),
'num_sections':len(nts_section)} |
def _init_metadata(self):
"""stub"""
self._objective_bank_id_metadata = {
'element_id': Id(self.my_osid_object_form._authority,
self.my_osid_object_form._namespace,
'objective_bank_id'),
'element_label': 'Objective Bank Id',
'instructions': 'accepts a valid OSID Id string',
'required': False,
'read_only': False,
'linked': False,
'array': False,
'default_id_values': [''],
'syntax': 'ID',
'id_set': []
} | stub | Below is the the instruction that describes the task:
### Input:
stub
### Response:
def _init_metadata(self):
"""stub"""
self._objective_bank_id_metadata = {
'element_id': Id(self.my_osid_object_form._authority,
self.my_osid_object_form._namespace,
'objective_bank_id'),
'element_label': 'Objective Bank Id',
'instructions': 'accepts a valid OSID Id string',
'required': False,
'read_only': False,
'linked': False,
'array': False,
'default_id_values': [''],
'syntax': 'ID',
'id_set': []
} |
def from_coordinates(cls, coordinates):
"""Creates a `Primitive` from a list of coordinates."""
prim = cls()
for coord in coordinates:
pm = PseudoMonomer(ampal_parent=prim)
pa = PseudoAtom(coord, ampal_parent=pm)
pm.atoms = OrderedDict([('CA', pa)])
prim.append(pm)
prim.relabel_all()
return prim | Creates a `Primitive` from a list of coordinates. | Below is the the instruction that describes the task:
### Input:
Creates a `Primitive` from a list of coordinates.
### Response:
def from_coordinates(cls, coordinates):
"""Creates a `Primitive` from a list of coordinates."""
prim = cls()
for coord in coordinates:
pm = PseudoMonomer(ampal_parent=prim)
pa = PseudoAtom(coord, ampal_parent=pm)
pm.atoms = OrderedDict([('CA', pa)])
prim.append(pm)
prim.relabel_all()
return prim |
def ListHashes(age=aff4.NEWEST_TIME):
"""Yields all the hashes in the file store.
Args:
age: AFF4 age specification. Only get hits corresponding to the given age
spec. Should be aff4.NEWEST_TIME or a time range given as a tuple
(start, end) in microseconds since Jan 1st, 1970. If just a microseconds
value is given it's treated as the higher end of the range, i.e. (0,
age). See aff4.FACTORY.ParseAgeSpecification for details.
Yields:
FileStoreHash instances corresponding to all the hashes in the file store.
Raises:
ValueError: if age was set to aff4.ALL_TIMES.
"""
if age == aff4.ALL_TIMES:
raise ValueError("age==aff4.ALL_TIMES is not allowed.")
urns = []
for fingerprint_type, hash_types in iteritems(HashFileStore.HASH_TYPES):
for hash_type in hash_types:
urns.append(HashFileStore.PATH.Add(fingerprint_type).Add(hash_type))
for _, values in aff4.FACTORY.MultiListChildren(urns, age=age):
for value in values:
yield FileStoreHash(value) | Yields all the hashes in the file store.
Args:
age: AFF4 age specification. Only get hits corresponding to the given age
spec. Should be aff4.NEWEST_TIME or a time range given as a tuple
(start, end) in microseconds since Jan 1st, 1970. If just a microseconds
value is given it's treated as the higher end of the range, i.e. (0,
age). See aff4.FACTORY.ParseAgeSpecification for details.
Yields:
FileStoreHash instances corresponding to all the hashes in the file store.
Raises:
ValueError: if age was set to aff4.ALL_TIMES. | Below is the the instruction that describes the task:
### Input:
Yields all the hashes in the file store.
Args:
age: AFF4 age specification. Only get hits corresponding to the given age
spec. Should be aff4.NEWEST_TIME or a time range given as a tuple
(start, end) in microseconds since Jan 1st, 1970. If just a microseconds
value is given it's treated as the higher end of the range, i.e. (0,
age). See aff4.FACTORY.ParseAgeSpecification for details.
Yields:
FileStoreHash instances corresponding to all the hashes in the file store.
Raises:
ValueError: if age was set to aff4.ALL_TIMES.
### Response:
def ListHashes(age=aff4.NEWEST_TIME):
"""Yields all the hashes in the file store.
Args:
age: AFF4 age specification. Only get hits corresponding to the given age
spec. Should be aff4.NEWEST_TIME or a time range given as a tuple
(start, end) in microseconds since Jan 1st, 1970. If just a microseconds
value is given it's treated as the higher end of the range, i.e. (0,
age). See aff4.FACTORY.ParseAgeSpecification for details.
Yields:
FileStoreHash instances corresponding to all the hashes in the file store.
Raises:
ValueError: if age was set to aff4.ALL_TIMES.
"""
if age == aff4.ALL_TIMES:
raise ValueError("age==aff4.ALL_TIMES is not allowed.")
urns = []
for fingerprint_type, hash_types in iteritems(HashFileStore.HASH_TYPES):
for hash_type in hash_types:
urns.append(HashFileStore.PATH.Add(fingerprint_type).Add(hash_type))
for _, values in aff4.FACTORY.MultiListChildren(urns, age=age):
for value in values:
yield FileStoreHash(value) |
def get_row(self, row):
'''Format a single row (if necessary)'''
if isinstance(self.fields, dict):
return dict([
(key, text_type(value).format(**row) if RE_FORMATTED.match(value) else row[value])
for key, value in self.fields.items()
])
else:
return [text_type(field).format(**row) if RE_FORMATTED.match(field)
else row[field]
for field in self.fields] | Format a single row (if necessary) | Below is the the instruction that describes the task:
### Input:
Format a single row (if necessary)
### Response:
def get_row(self, row):
'''Format a single row (if necessary)'''
if isinstance(self.fields, dict):
return dict([
(key, text_type(value).format(**row) if RE_FORMATTED.match(value) else row[value])
for key, value in self.fields.items()
])
else:
return [text_type(field).format(**row) if RE_FORMATTED.match(field)
else row[field]
for field in self.fields] |
def error(transaction, code): # pragma: no cover
"""
Notifies generic error on blockwise exchange.
:type transaction: Transaction
:param transaction: the transaction that owns the response
:rtype : Transaction
:return: the edited transaction
"""
transaction.block_transfer = True
transaction.response = Response()
transaction.response.destination = transaction.request.source
transaction.response.type = defines.Types["RST"]
transaction.response.token = transaction.request.token
transaction.response.code = code
return transaction | Notifies generic error on blockwise exchange.
:type transaction: Transaction
:param transaction: the transaction that owns the response
:rtype : Transaction
:return: the edited transaction | Below is the the instruction that describes the task:
### Input:
Notifies generic error on blockwise exchange.
:type transaction: Transaction
:param transaction: the transaction that owns the response
:rtype : Transaction
:return: the edited transaction
### Response:
def error(transaction, code): # pragma: no cover
"""
Notifies generic error on blockwise exchange.
:type transaction: Transaction
:param transaction: the transaction that owns the response
:rtype : Transaction
:return: the edited transaction
"""
transaction.block_transfer = True
transaction.response = Response()
transaction.response.destination = transaction.request.source
transaction.response.type = defines.Types["RST"]
transaction.response.token = transaction.request.token
transaction.response.code = code
return transaction |
def shift_image(im, shift, borderValue=0):
"""shift the image
Parameters
----------
im: 2d array
The image
shift: 2 numbers
(y,x) the shift in y and x direction
borderValue: number, default 0
The value for the pixels outside the border (default 0)
Returns
-------
im: 2d array
The shifted image
Notes
-----
The output image has the same size as the input.
Therefore the image will be cropped in the process.
"""
im = np.asarray(im, dtype=np.float32)
rows, cols = im.shape
M = np.asarray([[1, 0, shift[1]], [0, 1, shift[0]]], dtype=np.float32)
return cv2.warpAffine(im, M, (cols, rows),
borderMode=cv2.BORDER_CONSTANT,
flags=cv2.INTER_CUBIC,
borderValue=borderValue) | shift the image
Parameters
----------
im: 2d array
The image
shift: 2 numbers
(y,x) the shift in y and x direction
borderValue: number, default 0
The value for the pixels outside the border (default 0)
Returns
-------
im: 2d array
The shifted image
Notes
-----
The output image has the same size as the input.
Therefore the image will be cropped in the process. | Below is the the instruction that describes the task:
### Input:
shift the image
Parameters
----------
im: 2d array
The image
shift: 2 numbers
(y,x) the shift in y and x direction
borderValue: number, default 0
The value for the pixels outside the border (default 0)
Returns
-------
im: 2d array
The shifted image
Notes
-----
The output image has the same size as the input.
Therefore the image will be cropped in the process.
### Response:
def shift_image(im, shift, borderValue=0):
"""shift the image
Parameters
----------
im: 2d array
The image
shift: 2 numbers
(y,x) the shift in y and x direction
borderValue: number, default 0
The value for the pixels outside the border (default 0)
Returns
-------
im: 2d array
The shifted image
Notes
-----
The output image has the same size as the input.
Therefore the image will be cropped in the process.
"""
im = np.asarray(im, dtype=np.float32)
rows, cols = im.shape
M = np.asarray([[1, 0, shift[1]], [0, 1, shift[0]]], dtype=np.float32)
return cv2.warpAffine(im, M, (cols, rows),
borderMode=cv2.BORDER_CONSTANT,
flags=cv2.INTER_CUBIC,
borderValue=borderValue) |
def delete(self):
"""Delete the customer payment profile remotely and locally"""
response = delete_payment_profile(self.customer_profile.profile_id,
self.payment_profile_id)
response.raise_if_error()
return super(CustomerPaymentProfile, self).delete() | Delete the customer payment profile remotely and locally | Below is the the instruction that describes the task:
### Input:
Delete the customer payment profile remotely and locally
### Response:
def delete(self):
"""Delete the customer payment profile remotely and locally"""
response = delete_payment_profile(self.customer_profile.profile_id,
self.payment_profile_id)
response.raise_if_error()
return super(CustomerPaymentProfile, self).delete() |
def write_to_manifest(self):
""" Overwrites the section of the manifest with the featureconfig's value """
self.manifest.remove_section(self.feature_name)
self.manifest.add_section(self.feature_name)
for k, v in self.raw_dict.items():
self.manifest.set(self.feature_name, k, v) | Overwrites the section of the manifest with the featureconfig's value | Below is the the instruction that describes the task:
### Input:
Overwrites the section of the manifest with the featureconfig's value
### Response:
def write_to_manifest(self):
""" Overwrites the section of the manifest with the featureconfig's value """
self.manifest.remove_section(self.feature_name)
self.manifest.add_section(self.feature_name)
for k, v in self.raw_dict.items():
self.manifest.set(self.feature_name, k, v) |
def entropy_score(var,bins, w=None, decimate=True):
'''Compute entropy scores, given a variance and # of bins
'''
if w is None:
n = len(var)
w = np.arange(0,n,n//bins) / float(n)
if decimate:
n = len(var)
var = var[0:n:n//bins]
score = w * np.log(var * w * np.sqrt(2*np.pi*np.exp(1)))
score[np.isnan(score)]=np.Inf
return score | Compute entropy scores, given a variance and # of bins | Below is the the instruction that describes the task:
### Input:
Compute entropy scores, given a variance and # of bins
### Response:
def entropy_score(var,bins, w=None, decimate=True):
'''Compute entropy scores, given a variance and # of bins
'''
if w is None:
n = len(var)
w = np.arange(0,n,n//bins) / float(n)
if decimate:
n = len(var)
var = var[0:n:n//bins]
score = w * np.log(var * w * np.sqrt(2*np.pi*np.exp(1)))
score[np.isnan(score)]=np.Inf
return score |
def save(fname, data):
"""Saves a list of arrays or a dict of str->array to file.
Examples of filenames:
- ``/path/to/file``
- ``s3://my-bucket/path/to/file`` (if compiled with AWS S3 supports)
- ``hdfs://path/to/file`` (if compiled with HDFS supports)
Parameters
----------
fname : str
The filename.
data : NDArray, RowSparseNDArray or CSRNDArray, \
or list of NDArray, RowSparseNDArray or CSRNDArray, \
or dict of str to NDArray, RowSparseNDArray or CSRNDArray
The data to save.
Examples
--------
>>> x = mx.nd.zeros((2,3))
>>> y = mx.nd.ones((1,4))
>>> mx.nd.save('my_list', [x,y])
>>> mx.nd.save('my_dict', {'x':x, 'y':y})
>>> mx.nd.load('my_list')
[<NDArray 2x3 @cpu(0)>, <NDArray 1x4 @cpu(0)>]
>>> mx.nd.load('my_dict')
{'y': <NDArray 1x4 @cpu(0)>, 'x': <NDArray 2x3 @cpu(0)>}
"""
if isinstance(data, NDArray):
data = [data]
handles = c_array(NDArrayHandle, [])
if isinstance(data, dict):
str_keys = data.keys()
nd_vals = data.values()
if any(not isinstance(k, string_types) for k in str_keys) or \
any(not isinstance(v, NDArray) for v in nd_vals):
raise TypeError('save only accept dict str->NDArray or list of NDArray')
keys = c_str_array(str_keys)
handles = c_handle_array(nd_vals)
elif isinstance(data, list):
if any(not isinstance(v, NDArray) for v in data):
raise TypeError('save only accept dict str->NDArray or list of NDArray')
keys = None
handles = c_handle_array(data)
else:
raise ValueError("data needs to either be a NDArray, dict of str, NDArray pairs "
"or a list of NDarrays.")
check_call(_LIB.MXNDArraySave(c_str(fname),
mx_uint(len(handles)),
handles,
keys)) | Saves a list of arrays or a dict of str->array to file.
Examples of filenames:
- ``/path/to/file``
- ``s3://my-bucket/path/to/file`` (if compiled with AWS S3 supports)
- ``hdfs://path/to/file`` (if compiled with HDFS supports)
Parameters
----------
fname : str
The filename.
data : NDArray, RowSparseNDArray or CSRNDArray, \
or list of NDArray, RowSparseNDArray or CSRNDArray, \
or dict of str to NDArray, RowSparseNDArray or CSRNDArray
The data to save.
Examples
--------
>>> x = mx.nd.zeros((2,3))
>>> y = mx.nd.ones((1,4))
>>> mx.nd.save('my_list', [x,y])
>>> mx.nd.save('my_dict', {'x':x, 'y':y})
>>> mx.nd.load('my_list')
[<NDArray 2x3 @cpu(0)>, <NDArray 1x4 @cpu(0)>]
>>> mx.nd.load('my_dict')
{'y': <NDArray 1x4 @cpu(0)>, 'x': <NDArray 2x3 @cpu(0)>} | Below is the the instruction that describes the task:
### Input:
Saves a list of arrays or a dict of str->array to file.
Examples of filenames:
- ``/path/to/file``
- ``s3://my-bucket/path/to/file`` (if compiled with AWS S3 supports)
- ``hdfs://path/to/file`` (if compiled with HDFS supports)
Parameters
----------
fname : str
The filename.
data : NDArray, RowSparseNDArray or CSRNDArray, \
or list of NDArray, RowSparseNDArray or CSRNDArray, \
or dict of str to NDArray, RowSparseNDArray or CSRNDArray
The data to save.
Examples
--------
>>> x = mx.nd.zeros((2,3))
>>> y = mx.nd.ones((1,4))
>>> mx.nd.save('my_list', [x,y])
>>> mx.nd.save('my_dict', {'x':x, 'y':y})
>>> mx.nd.load('my_list')
[<NDArray 2x3 @cpu(0)>, <NDArray 1x4 @cpu(0)>]
>>> mx.nd.load('my_dict')
{'y': <NDArray 1x4 @cpu(0)>, 'x': <NDArray 2x3 @cpu(0)>}
### Response:
def save(fname, data):
"""Saves a list of arrays or a dict of str->array to file.
Examples of filenames:
- ``/path/to/file``
- ``s3://my-bucket/path/to/file`` (if compiled with AWS S3 supports)
- ``hdfs://path/to/file`` (if compiled with HDFS supports)
Parameters
----------
fname : str
The filename.
data : NDArray, RowSparseNDArray or CSRNDArray, \
or list of NDArray, RowSparseNDArray or CSRNDArray, \
or dict of str to NDArray, RowSparseNDArray or CSRNDArray
The data to save.
Examples
--------
>>> x = mx.nd.zeros((2,3))
>>> y = mx.nd.ones((1,4))
>>> mx.nd.save('my_list', [x,y])
>>> mx.nd.save('my_dict', {'x':x, 'y':y})
>>> mx.nd.load('my_list')
[<NDArray 2x3 @cpu(0)>, <NDArray 1x4 @cpu(0)>]
>>> mx.nd.load('my_dict')
{'y': <NDArray 1x4 @cpu(0)>, 'x': <NDArray 2x3 @cpu(0)>}
"""
if isinstance(data, NDArray):
data = [data]
handles = c_array(NDArrayHandle, [])
if isinstance(data, dict):
str_keys = data.keys()
nd_vals = data.values()
if any(not isinstance(k, string_types) for k in str_keys) or \
any(not isinstance(v, NDArray) for v in nd_vals):
raise TypeError('save only accept dict str->NDArray or list of NDArray')
keys = c_str_array(str_keys)
handles = c_handle_array(nd_vals)
elif isinstance(data, list):
if any(not isinstance(v, NDArray) for v in data):
raise TypeError('save only accept dict str->NDArray or list of NDArray')
keys = None
handles = c_handle_array(data)
else:
raise ValueError("data needs to either be a NDArray, dict of str, NDArray pairs "
"or a list of NDarrays.")
check_call(_LIB.MXNDArraySave(c_str(fname),
mx_uint(len(handles)),
handles,
keys)) |
def get_devices(self):
"""
Return the devices linked to the gateway.
Returns a Command.
"""
def process_result(result):
return [self.get_device(dev) for dev in result]
return Command('get', [ROOT_DEVICES], process_result=process_result) | Return the devices linked to the gateway.
Returns a Command. | Below is the the instruction that describes the task:
### Input:
Return the devices linked to the gateway.
Returns a Command.
### Response:
def get_devices(self):
"""
Return the devices linked to the gateway.
Returns a Command.
"""
def process_result(result):
return [self.get_device(dev) for dev in result]
return Command('get', [ROOT_DEVICES], process_result=process_result) |
def perform_action(self, action):
""" Perform actions in the api from the CLI """
form = ToolForm
s_action = form_action = action.split('_')[0]
form_name = s_action.title() + ' tools'
cores = False
a_type = 'containers'
forms = [action.upper() + 'TOOLS']
form_args = {'color': 'CONTROL',
'names': [s_action],
'name': form_name,
'action_dict': {'action_name': s_action,
'present_t': s_action + 'ing ' + a_type,
'past_t': s_action.title() + ' ' + a_type,
'action': form_action,
'type': a_type,
'cores': cores}}
# grammar rules
vowels = ['a', 'e', 'i', 'o', 'u']
# consonant-vowel-consonant ending
# Eg: stop -> stopping
if s_action[-1] not in vowels and \
s_action[-2] in vowels and \
s_action[-3] not in vowels:
form_args['action_dict']['present_t'] = s_action + \
s_action[-1] + 'ing ' + a_type
# word ends with a 'e'
# eg: remove -> removing
if s_action[-1] == 'e':
form_args['action_dict']['present_t'] = s_action[:-1] \
+ 'ing ' + a_type
if s_action == 'configure':
form_args['names'].pop()
form_args['names'].append('get_configure')
form_args['names'].append('save_configure')
form_args['names'].append('restart_tools')
if action == 'add':
form = AddForm
forms = ['ADD', 'ADDOPTIONS', 'CHOOSETOOLS']
form_args['name'] = 'Add plugins'
form_args['name'] += '\t'*6 + '^Q to quit'
elif action == 'inventory':
form = InventoryToolsForm
forms = ['INVENTORY']
form_args = {'color': 'STANDOUT', 'name': 'Inventory of tools'}
elif action == 'services':
form = ServicesForm
forms = ['SERVICES']
form_args = {'color': 'STANDOUT',
'name': 'Plugin Services',
'core': True}
elif action == 'services_external':
form = ServicesForm
forms = ['SERVICES']
form_args = {'color': 'STANDOUT',
'name': 'External Services',
'core': True,
'external': True}
form_args['name'] += '\t'*8 + '^T to toggle main'
if s_action in self.view_togglable:
form_args['name'] += '\t'*8 + '^V to toggle group view'
try:
self.remove_forms(forms)
thr = Thread(target=self.add_form, args=(),
kwargs={'form': form,
'form_name': forms[0],
'form_args': form_args})
thr.start()
while thr.is_alive():
npyscreen.notify('Please wait, loading form...',
title='Loading')
time.sleep(1)
except Exception as e: # pragma: no cover
pass
return | Perform actions in the api from the CLI | Below is the the instruction that describes the task:
### Input:
Perform actions in the api from the CLI
### Response:
def perform_action(self, action):
""" Perform actions in the api from the CLI """
form = ToolForm
s_action = form_action = action.split('_')[0]
form_name = s_action.title() + ' tools'
cores = False
a_type = 'containers'
forms = [action.upper() + 'TOOLS']
form_args = {'color': 'CONTROL',
'names': [s_action],
'name': form_name,
'action_dict': {'action_name': s_action,
'present_t': s_action + 'ing ' + a_type,
'past_t': s_action.title() + ' ' + a_type,
'action': form_action,
'type': a_type,
'cores': cores}}
# grammar rules
vowels = ['a', 'e', 'i', 'o', 'u']
# consonant-vowel-consonant ending
# Eg: stop -> stopping
if s_action[-1] not in vowels and \
s_action[-2] in vowels and \
s_action[-3] not in vowels:
form_args['action_dict']['present_t'] = s_action + \
s_action[-1] + 'ing ' + a_type
# word ends with a 'e'
# eg: remove -> removing
if s_action[-1] == 'e':
form_args['action_dict']['present_t'] = s_action[:-1] \
+ 'ing ' + a_type
if s_action == 'configure':
form_args['names'].pop()
form_args['names'].append('get_configure')
form_args['names'].append('save_configure')
form_args['names'].append('restart_tools')
if action == 'add':
form = AddForm
forms = ['ADD', 'ADDOPTIONS', 'CHOOSETOOLS']
form_args['name'] = 'Add plugins'
form_args['name'] += '\t'*6 + '^Q to quit'
elif action == 'inventory':
form = InventoryToolsForm
forms = ['INVENTORY']
form_args = {'color': 'STANDOUT', 'name': 'Inventory of tools'}
elif action == 'services':
form = ServicesForm
forms = ['SERVICES']
form_args = {'color': 'STANDOUT',
'name': 'Plugin Services',
'core': True}
elif action == 'services_external':
form = ServicesForm
forms = ['SERVICES']
form_args = {'color': 'STANDOUT',
'name': 'External Services',
'core': True,
'external': True}
form_args['name'] += '\t'*8 + '^T to toggle main'
if s_action in self.view_togglable:
form_args['name'] += '\t'*8 + '^V to toggle group view'
try:
self.remove_forms(forms)
thr = Thread(target=self.add_form, args=(),
kwargs={'form': form,
'form_name': forms[0],
'form_args': form_args})
thr.start()
while thr.is_alive():
npyscreen.notify('Please wait, loading form...',
title='Loading')
time.sleep(1)
except Exception as e: # pragma: no cover
pass
return |
def best_prefix(bytes, system=NIST):
"""Return a bitmath instance representing the best human-readable
representation of the number of bytes given by ``bytes``. In addition
to a numeric type, the ``bytes`` parameter may also be a bitmath type.
Optionally select a preferred unit system by specifying the ``system``
keyword. Choices for ``system`` are ``bitmath.NIST`` (default) and
``bitmath.SI``.
Basically a shortcut for:
>>> import bitmath
>>> b = bitmath.Byte(12345)
>>> best = b.best_prefix()
Or:
>>> import bitmath
>>> best = (bitmath.KiB(12345) * 4201).best_prefix()
"""
if isinstance(bytes, Bitmath):
value = bytes.bytes
else:
value = bytes
return Byte(value).best_prefix(system=system) | Return a bitmath instance representing the best human-readable
representation of the number of bytes given by ``bytes``. In addition
to a numeric type, the ``bytes`` parameter may also be a bitmath type.
Optionally select a preferred unit system by specifying the ``system``
keyword. Choices for ``system`` are ``bitmath.NIST`` (default) and
``bitmath.SI``.
Basically a shortcut for:
>>> import bitmath
>>> b = bitmath.Byte(12345)
>>> best = b.best_prefix()
Or:
>>> import bitmath
>>> best = (bitmath.KiB(12345) * 4201).best_prefix() | Below is the the instruction that describes the task:
### Input:
Return a bitmath instance representing the best human-readable
representation of the number of bytes given by ``bytes``. In addition
to a numeric type, the ``bytes`` parameter may also be a bitmath type.
Optionally select a preferred unit system by specifying the ``system``
keyword. Choices for ``system`` are ``bitmath.NIST`` (default) and
``bitmath.SI``.
Basically a shortcut for:
>>> import bitmath
>>> b = bitmath.Byte(12345)
>>> best = b.best_prefix()
Or:
>>> import bitmath
>>> best = (bitmath.KiB(12345) * 4201).best_prefix()
### Response:
def best_prefix(bytes, system=NIST):
"""Return a bitmath instance representing the best human-readable
representation of the number of bytes given by ``bytes``. In addition
to a numeric type, the ``bytes`` parameter may also be a bitmath type.
Optionally select a preferred unit system by specifying the ``system``
keyword. Choices for ``system`` are ``bitmath.NIST`` (default) and
``bitmath.SI``.
Basically a shortcut for:
>>> import bitmath
>>> b = bitmath.Byte(12345)
>>> best = b.best_prefix()
Or:
>>> import bitmath
>>> best = (bitmath.KiB(12345) * 4201).best_prefix()
"""
if isinstance(bytes, Bitmath):
value = bytes.bytes
else:
value = bytes
return Byte(value).best_prefix(system=system) |
def download(self, source_file, source_len=0,
info_cb=DEFAULT_MESSAGE_CALLBACK, progress_callback=None):
"""Downloads a file to the device.
Args:
source_file: A filename or file-like object to download to the device.
source_len: Optional length of source_file. If source_file is a file-like
object and source_len is not provided, source_file is read into
memory.
info_cb: Optional callback accepting FastbootMessage for text sent from
the bootloader.
progress_callback: Optional callback called with the percent of the
source_file downloaded. Note, this doesn't include progress of the
actual flashing.
Returns:
Response to a download request, normally nothing.
"""
if isinstance(source_file, six.string_types):
source_len = os.stat(source_file).st_size
source_file = open(source_file)
if source_len == 0:
# Fall back to storing it all in memory :(
data = source_file.read()
source_file = six.StringIO(data)
source_len = len(data)
self._protocol.send_command('download', '%08x' % source_len)
return self._protocol.handle_data_sending(
source_file, source_len, info_cb, progress_callback=progress_callback) | Downloads a file to the device.
Args:
source_file: A filename or file-like object to download to the device.
source_len: Optional length of source_file. If source_file is a file-like
object and source_len is not provided, source_file is read into
memory.
info_cb: Optional callback accepting FastbootMessage for text sent from
the bootloader.
progress_callback: Optional callback called with the percent of the
source_file downloaded. Note, this doesn't include progress of the
actual flashing.
Returns:
Response to a download request, normally nothing. | Below is the the instruction that describes the task:
### Input:
Downloads a file to the device.
Args:
source_file: A filename or file-like object to download to the device.
source_len: Optional length of source_file. If source_file is a file-like
object and source_len is not provided, source_file is read into
memory.
info_cb: Optional callback accepting FastbootMessage for text sent from
the bootloader.
progress_callback: Optional callback called with the percent of the
source_file downloaded. Note, this doesn't include progress of the
actual flashing.
Returns:
Response to a download request, normally nothing.
### Response:
def download(self, source_file, source_len=0,
info_cb=DEFAULT_MESSAGE_CALLBACK, progress_callback=None):
"""Downloads a file to the device.
Args:
source_file: A filename or file-like object to download to the device.
source_len: Optional length of source_file. If source_file is a file-like
object and source_len is not provided, source_file is read into
memory.
info_cb: Optional callback accepting FastbootMessage for text sent from
the bootloader.
progress_callback: Optional callback called with the percent of the
source_file downloaded. Note, this doesn't include progress of the
actual flashing.
Returns:
Response to a download request, normally nothing.
"""
if isinstance(source_file, six.string_types):
source_len = os.stat(source_file).st_size
source_file = open(source_file)
if source_len == 0:
# Fall back to storing it all in memory :(
data = source_file.read()
source_file = six.StringIO(data)
source_len = len(data)
self._protocol.send_command('download', '%08x' % source_len)
return self._protocol.handle_data_sending(
source_file, source_len, info_cb, progress_callback=progress_callback) |
def dispatch_request(self):
""" Handle redirect back from provider """
if current_user.is_authenticated:
return redirect(self.next)
# clear previous!
if 'social_data' in session:
del session['social_data']
res = self.app.authorized_response()
if res is None:
if self.flash: flash(self.auth_failed_msg, 'danger')
return redirect(self.next)
# retrieve profile
data = self.get_profile_data(res)
if data is None:
if self.flash: flash(self.data_failed_msg, 'danger')
return redirect(self.next)
# attempt login
try:
ok = user_service.attempt_social_login(self.provider, data['id'])
if ok:
if self.flash:
flash(self.logged_in_msg.format(self.provider), 'success')
return redirect(self.logged_in)
except x.AccountLocked as locked:
msg = self.lock_msg.format(locked.locked_until)
if self.flash: flash(msg, 'danger')
url = url_for(self.lock_redirect, **self.lock_redirect_params)
return redirect(url)
except x.EmailNotConfirmed:
return redirect(url_for(self.unconfirmed_email_endpoint))
# get data
email = data.get('email')
provider = data.get('provider')
id = data.get('id')
id_column = '{}_id'.format(provider)
# user exists: add social id to profile
user = user_service.first(email=email)
if user:
setattr(user, id_column, id)
user_service.save(user)
# no user: register
if not user:
cfg = current_app.config
send_welcome = cfg.get('USER_SEND_WELCOME_MESSAGE')
base_confirm_url = cfg.get('USER_BASE_EMAIL_CONFIRM_URL')
if not base_confirm_url:
endpoint = 'user.confirm.email.request'
base_confirm_url = url_for(endpoint, _external=True)
data = dict(email=email)
data[id_column] = id
user = user_service.register(
user_data=data,
send_welcome=send_welcome,
base_confirm_url=base_confirm_url
)
# email confirmed?
if user_service.require_confirmation and not user.email_confirmed:
return redirect(url_for(self.ok_endpoint, **self.ok_params))
# otherwise just login
user_service.force_login(user)
return redirect(self.force_login_redirect) | Handle redirect back from provider | Below is the the instruction that describes the task:
### Input:
Handle redirect back from provider
### Response:
def dispatch_request(self):
""" Handle redirect back from provider """
if current_user.is_authenticated:
return redirect(self.next)
# clear previous!
if 'social_data' in session:
del session['social_data']
res = self.app.authorized_response()
if res is None:
if self.flash: flash(self.auth_failed_msg, 'danger')
return redirect(self.next)
# retrieve profile
data = self.get_profile_data(res)
if data is None:
if self.flash: flash(self.data_failed_msg, 'danger')
return redirect(self.next)
# attempt login
try:
ok = user_service.attempt_social_login(self.provider, data['id'])
if ok:
if self.flash:
flash(self.logged_in_msg.format(self.provider), 'success')
return redirect(self.logged_in)
except x.AccountLocked as locked:
msg = self.lock_msg.format(locked.locked_until)
if self.flash: flash(msg, 'danger')
url = url_for(self.lock_redirect, **self.lock_redirect_params)
return redirect(url)
except x.EmailNotConfirmed:
return redirect(url_for(self.unconfirmed_email_endpoint))
# get data
email = data.get('email')
provider = data.get('provider')
id = data.get('id')
id_column = '{}_id'.format(provider)
# user exists: add social id to profile
user = user_service.first(email=email)
if user:
setattr(user, id_column, id)
user_service.save(user)
# no user: register
if not user:
cfg = current_app.config
send_welcome = cfg.get('USER_SEND_WELCOME_MESSAGE')
base_confirm_url = cfg.get('USER_BASE_EMAIL_CONFIRM_URL')
if not base_confirm_url:
endpoint = 'user.confirm.email.request'
base_confirm_url = url_for(endpoint, _external=True)
data = dict(email=email)
data[id_column] = id
user = user_service.register(
user_data=data,
send_welcome=send_welcome,
base_confirm_url=base_confirm_url
)
# email confirmed?
if user_service.require_confirmation and not user.email_confirmed:
return redirect(url_for(self.ok_endpoint, **self.ok_params))
# otherwise just login
user_service.force_login(user)
return redirect(self.force_login_redirect) |
def getLoadAvg(self):
"""Return system Load Average.
@return: List of 1 min, 5 min and 15 min Load Average figures.
"""
try:
fp = open(loadavgFile, 'r')
line = fp.readline()
fp.close()
except:
raise IOError('Failed reading stats from file: %s' % loadavgFile)
arr = line.split()
if len(arr) >= 3:
return [float(col) for col in arr[:3]]
else:
return None | Return system Load Average.
@return: List of 1 min, 5 min and 15 min Load Average figures. | Below is the the instruction that describes the task:
### Input:
Return system Load Average.
@return: List of 1 min, 5 min and 15 min Load Average figures.
### Response:
def getLoadAvg(self):
"""Return system Load Average.
@return: List of 1 min, 5 min and 15 min Load Average figures.
"""
try:
fp = open(loadavgFile, 'r')
line = fp.readline()
fp.close()
except:
raise IOError('Failed reading stats from file: %s' % loadavgFile)
arr = line.split()
if len(arr) >= 3:
return [float(col) for col in arr[:3]]
else:
return None |
def asset_url_for(self, asset):
"""
Lookup the hashed asset path of a file name unless it starts with
something that resembles a web address, then take it as is.
:param asset: A logical path to an asset
:type asset: str
:return: Asset path or None if not found
"""
if '//' in asset:
return asset
if asset not in self.assets:
return None
return '{0}{1}'.format(self.assets_url, self.assets[asset]) | Lookup the hashed asset path of a file name unless it starts with
something that resembles a web address, then take it as is.
:param asset: A logical path to an asset
:type asset: str
:return: Asset path or None if not found | Below is the the instruction that describes the task:
### Input:
Lookup the hashed asset path of a file name unless it starts with
something that resembles a web address, then take it as is.
:param asset: A logical path to an asset
:type asset: str
:return: Asset path or None if not found
### Response:
def asset_url_for(self, asset):
"""
Lookup the hashed asset path of a file name unless it starts with
something that resembles a web address, then take it as is.
:param asset: A logical path to an asset
:type asset: str
:return: Asset path or None if not found
"""
if '//' in asset:
return asset
if asset not in self.assets:
return None
return '{0}{1}'.format(self.assets_url, self.assets[asset]) |
def init(self, force_deploy=False, client=None):
"""Reserve and deploys the nodes according to the resources section
In comparison to the vagrant provider, networks must be characterized
as in the networks key.
Args:
force_deploy (bool): True iff the environment must be redeployed
Raises:
MissingNetworkError: If one network is missing in comparison to
what is claimed.
NotEnoughNodesError: If the `min` constraints can't be met.
"""
_force_deploy = self.provider_conf.force_deploy
self.provider_conf.force_deploy = _force_deploy or force_deploy
self._provider_conf = self.provider_conf.to_dict()
r = api.Resources(self._provider_conf, client=client)
r.launch()
roles = r.get_roles()
networks = r.get_networks()
return (_to_enos_roles(roles),
_to_enos_networks(networks)) | Reserve and deploys the nodes according to the resources section
In comparison to the vagrant provider, networks must be characterized
as in the networks key.
Args:
force_deploy (bool): True iff the environment must be redeployed
Raises:
MissingNetworkError: If one network is missing in comparison to
what is claimed.
NotEnoughNodesError: If the `min` constraints can't be met. | Below is the the instruction that describes the task:
### Input:
Reserve and deploys the nodes according to the resources section
In comparison to the vagrant provider, networks must be characterized
as in the networks key.
Args:
force_deploy (bool): True iff the environment must be redeployed
Raises:
MissingNetworkError: If one network is missing in comparison to
what is claimed.
NotEnoughNodesError: If the `min` constraints can't be met.
### Response:
def init(self, force_deploy=False, client=None):
"""Reserve and deploys the nodes according to the resources section
In comparison to the vagrant provider, networks must be characterized
as in the networks key.
Args:
force_deploy (bool): True iff the environment must be redeployed
Raises:
MissingNetworkError: If one network is missing in comparison to
what is claimed.
NotEnoughNodesError: If the `min` constraints can't be met.
"""
_force_deploy = self.provider_conf.force_deploy
self.provider_conf.force_deploy = _force_deploy or force_deploy
self._provider_conf = self.provider_conf.to_dict()
r = api.Resources(self._provider_conf, client=client)
r.launch()
roles = r.get_roles()
networks = r.get_networks()
return (_to_enos_roles(roles),
_to_enos_networks(networks)) |
def setup_seasonal(self):
"""
Check if there's some seasonal holiday going on, setup appropriate
Shibe picture and load holiday words.
Note: if there are two or more holidays defined for a certain date,
the first one takes precedence.
"""
# If we've specified a season, just run that one
if self.ns.season:
return self.load_season(self.ns.season)
# If we've specified another doge or no doge at all, it does not make
# sense to use seasons.
if self.ns.doge_path is not None and not self.ns.no_shibe:
return
now = datetime.datetime.now()
for season, data in wow.SEASONS.items():
start, end = data['dates']
start_dt = datetime.datetime(now.year, start[0], start[1])
# Be sane if the holiday season spans over New Year's day.
end_dt = datetime.datetime(
now.year + (start[0] > end[0] and 1 or 0), end[0], end[1])
if start_dt <= now <= end_dt:
# Wow, much holiday!
return self.load_season(season) | Check if there's some seasonal holiday going on, setup appropriate
Shibe picture and load holiday words.
Note: if there are two or more holidays defined for a certain date,
the first one takes precedence. | Below is the the instruction that describes the task:
### Input:
Check if there's some seasonal holiday going on, setup appropriate
Shibe picture and load holiday words.
Note: if there are two or more holidays defined for a certain date,
the first one takes precedence.
### Response:
def setup_seasonal(self):
"""
Check if there's some seasonal holiday going on, setup appropriate
Shibe picture and load holiday words.
Note: if there are two or more holidays defined for a certain date,
the first one takes precedence.
"""
# If we've specified a season, just run that one
if self.ns.season:
return self.load_season(self.ns.season)
# If we've specified another doge or no doge at all, it does not make
# sense to use seasons.
if self.ns.doge_path is not None and not self.ns.no_shibe:
return
now = datetime.datetime.now()
for season, data in wow.SEASONS.items():
start, end = data['dates']
start_dt = datetime.datetime(now.year, start[0], start[1])
# Be sane if the holiday season spans over New Year's day.
end_dt = datetime.datetime(
now.year + (start[0] > end[0] and 1 or 0), end[0], end[1])
if start_dt <= now <= end_dt:
# Wow, much holiday!
return self.load_season(season) |
def unsubscribe(request, message_id, dispatch_id, hashed, redirect_to=None):
"""Handles unsubscribe request.
:param Request request:
:param int message_id:
:param int dispatch_id:
:param str hashed:
:param str redirect_to:
:return:
"""
return _generic_view(
'handle_unsubscribe_request', sig_unsubscribe_failed,
request, message_id, dispatch_id, hashed, redirect_to=redirect_to
) | Handles unsubscribe request.
:param Request request:
:param int message_id:
:param int dispatch_id:
:param str hashed:
:param str redirect_to:
:return: | Below is the the instruction that describes the task:
### Input:
Handles unsubscribe request.
:param Request request:
:param int message_id:
:param int dispatch_id:
:param str hashed:
:param str redirect_to:
:return:
### Response:
def unsubscribe(request, message_id, dispatch_id, hashed, redirect_to=None):
"""Handles unsubscribe request.
:param Request request:
:param int message_id:
:param int dispatch_id:
:param str hashed:
:param str redirect_to:
:return:
"""
return _generic_view(
'handle_unsubscribe_request', sig_unsubscribe_failed,
request, message_id, dispatch_id, hashed, redirect_to=redirect_to
) |
def _request(self, method, *relative_path_parts, **kwargs):
"""Sends an HTTP request to the REST API and receives the requested data.
Additionally sets up pagination cursors.
:param str method: HTTP method name
:param relative_path_parts: the relative paths for the request URI
:param kwargs: argument keywords
:returns: requested data
:raises APIError: for non-2xx responses
"""
uri = self._create_api_uri(*relative_path_parts)
response = get(uri, params=self._get_params(**kwargs))
self.is_initial = False
self.before_cursor = response.headers.get('cb-before', None)
self.after_cursor = response.headers.get('cb-after', None)
return self._handle_response(response).json() | Sends an HTTP request to the REST API and receives the requested data.
Additionally sets up pagination cursors.
:param str method: HTTP method name
:param relative_path_parts: the relative paths for the request URI
:param kwargs: argument keywords
:returns: requested data
:raises APIError: for non-2xx responses | Below is the the instruction that describes the task:
### Input:
Sends an HTTP request to the REST API and receives the requested data.
Additionally sets up pagination cursors.
:param str method: HTTP method name
:param relative_path_parts: the relative paths for the request URI
:param kwargs: argument keywords
:returns: requested data
:raises APIError: for non-2xx responses
### Response:
def _request(self, method, *relative_path_parts, **kwargs):
"""Sends an HTTP request to the REST API and receives the requested data.
Additionally sets up pagination cursors.
:param str method: HTTP method name
:param relative_path_parts: the relative paths for the request URI
:param kwargs: argument keywords
:returns: requested data
:raises APIError: for non-2xx responses
"""
uri = self._create_api_uri(*relative_path_parts)
response = get(uri, params=self._get_params(**kwargs))
self.is_initial = False
self.before_cursor = response.headers.get('cb-before', None)
self.after_cursor = response.headers.get('cb-after', None)
return self._handle_response(response).json() |
def add_leverage(self):
""" Adds leverage term to the model
Returns
----------
None (changes instance attributes)
"""
if self.leverage is True:
pass
else:
self.leverage = True
self.z_no += 1
self.latent_variables.z_list.pop()
self.latent_variables.z_list.pop()
self.latent_variables.z_list.pop()
self.latent_variables.z_list.pop()
self.latent_variables.add_z('Leverage Term', fam.Flat(transform=None), fam.Normal(0, 3))
self.latent_variables.add_z('Skewness', fam.Flat(transform='exp'), fam.Normal(0, 3))
self.latent_variables.add_z('v', fam.Flat(transform='exp'), fam.Normal(0, 3))
self.latent_variables.add_z('Returns Constant', fam.Normal(0,3,transform=None), fam.Normal(0, 3))
self.latent_variables.add_z('GARCH-M', fam.Normal(0,3,transform=None), fam.Normal(0, 3))
self.latent_variables.z_list[-3].start = 2.0 | Adds leverage term to the model
Returns
----------
None (changes instance attributes) | Below is the the instruction that describes the task:
### Input:
Adds leverage term to the model
Returns
----------
None (changes instance attributes)
### Response:
def add_leverage(self):
""" Adds leverage term to the model
Returns
----------
None (changes instance attributes)
"""
if self.leverage is True:
pass
else:
self.leverage = True
self.z_no += 1
self.latent_variables.z_list.pop()
self.latent_variables.z_list.pop()
self.latent_variables.z_list.pop()
self.latent_variables.z_list.pop()
self.latent_variables.add_z('Leverage Term', fam.Flat(transform=None), fam.Normal(0, 3))
self.latent_variables.add_z('Skewness', fam.Flat(transform='exp'), fam.Normal(0, 3))
self.latent_variables.add_z('v', fam.Flat(transform='exp'), fam.Normal(0, 3))
self.latent_variables.add_z('Returns Constant', fam.Normal(0,3,transform=None), fam.Normal(0, 3))
self.latent_variables.add_z('GARCH-M', fam.Normal(0,3,transform=None), fam.Normal(0, 3))
self.latent_variables.z_list[-3].start = 2.0 |
def handle_command_line(options):
"""
act upon command options
"""
options = merge(options, constants.DEFAULT_OPTIONS)
engine = plugins.ENGINES.get_engine(
options[constants.LABEL_TEMPLATE_TYPE],
options[constants.LABEL_TMPL_DIRS],
options[constants.LABEL_CONFIG_DIR],
)
if options[constants.LABEL_TEMPLATE] is None:
if options[constants.POSITIONAL_LABEL_TEMPLATE] is None:
raise exceptions.NoTemplate(constants.ERROR_NO_TEMPLATE)
else:
engine.render_string_to_file(
options[constants.POSITIONAL_LABEL_TEMPLATE],
options[constants.LABEL_CONFIG],
options[constants.LABEL_OUTPUT],
)
else:
engine.render_to_file(
options[constants.LABEL_TEMPLATE],
options[constants.LABEL_CONFIG],
options[constants.LABEL_OUTPUT],
)
engine.report()
HASH_STORE.save_hashes()
exit_code = reporter.convert_to_shell_exit_code(
engine.number_of_templated_files()
)
return exit_code | act upon command options | Below is the the instruction that describes the task:
### Input:
act upon command options
### Response:
def handle_command_line(options):
"""
act upon command options
"""
options = merge(options, constants.DEFAULT_OPTIONS)
engine = plugins.ENGINES.get_engine(
options[constants.LABEL_TEMPLATE_TYPE],
options[constants.LABEL_TMPL_DIRS],
options[constants.LABEL_CONFIG_DIR],
)
if options[constants.LABEL_TEMPLATE] is None:
if options[constants.POSITIONAL_LABEL_TEMPLATE] is None:
raise exceptions.NoTemplate(constants.ERROR_NO_TEMPLATE)
else:
engine.render_string_to_file(
options[constants.POSITIONAL_LABEL_TEMPLATE],
options[constants.LABEL_CONFIG],
options[constants.LABEL_OUTPUT],
)
else:
engine.render_to_file(
options[constants.LABEL_TEMPLATE],
options[constants.LABEL_CONFIG],
options[constants.LABEL_OUTPUT],
)
engine.report()
HASH_STORE.save_hashes()
exit_code = reporter.convert_to_shell_exit_code(
engine.number_of_templated_files()
)
return exit_code |
def GetLayerFromFeatureService(self, fs, layerName="", returnURLOnly=False):
"""Obtains a layer from a feature service by feature service reference.
Args:
fs (FeatureService): The feature service from which to obtain the layer.
layerName (str): The name of the layer. Defaults to ``""``.
returnURLOnly (bool): A boolean value to return the URL of the layer. Defaults to ``False``.
Returns:
When ``returnURLOnly`` is ``True``, the URL of the layer is returned.
When ``False``, the result from :py:func:`arcrest.agol.services.FeatureService` or :py:func:`arcrest.ags.services.FeatureService`.
"""
layers = None
table = None
layer = None
sublayer = None
try:
layers = fs.layers
if (layers is None or len(layers) == 0) and fs.url is not None:
fs = arcrest.ags.FeatureService(
url=fs.url)
layers = fs.layers
if layers is not None:
for layer in layers:
if layer.name == layerName:
if returnURLOnly:
return fs.url + '/' + str(layer.id)
else:
return layer
elif not layer.subLayers is None:
for sublayer in layer.subLayers:
if sublayer == layerName:
return sublayer
if fs.tables is not None:
for table in fs.tables:
if table.name == layerName:
if returnURLOnly:
return fs.url + '/' + str(layer.id)
else:
return table
return None
except:
line, filename, synerror = trace()
raise common.ArcRestHelperError({
"function": "GetLayerFromFeatureService",
"line": line,
"filename": filename,
"synerror": synerror,
}
)
finally:
layers = None
table = None
layer = None
sublayer = None
del layers
del table
del layer
del sublayer
gc.collect() | Obtains a layer from a feature service by feature service reference.
Args:
fs (FeatureService): The feature service from which to obtain the layer.
layerName (str): The name of the layer. Defaults to ``""``.
returnURLOnly (bool): A boolean value to return the URL of the layer. Defaults to ``False``.
Returns:
When ``returnURLOnly`` is ``True``, the URL of the layer is returned.
When ``False``, the result from :py:func:`arcrest.agol.services.FeatureService` or :py:func:`arcrest.ags.services.FeatureService`. | Below is the the instruction that describes the task:
### Input:
Obtains a layer from a feature service by feature service reference.
Args:
fs (FeatureService): The feature service from which to obtain the layer.
layerName (str): The name of the layer. Defaults to ``""``.
returnURLOnly (bool): A boolean value to return the URL of the layer. Defaults to ``False``.
Returns:
When ``returnURLOnly`` is ``True``, the URL of the layer is returned.
When ``False``, the result from :py:func:`arcrest.agol.services.FeatureService` or :py:func:`arcrest.ags.services.FeatureService`.
### Response:
def GetLayerFromFeatureService(self, fs, layerName="", returnURLOnly=False):
"""Obtains a layer from a feature service by feature service reference.
Args:
fs (FeatureService): The feature service from which to obtain the layer.
layerName (str): The name of the layer. Defaults to ``""``.
returnURLOnly (bool): A boolean value to return the URL of the layer. Defaults to ``False``.
Returns:
When ``returnURLOnly`` is ``True``, the URL of the layer is returned.
When ``False``, the result from :py:func:`arcrest.agol.services.FeatureService` or :py:func:`arcrest.ags.services.FeatureService`.
"""
layers = None
table = None
layer = None
sublayer = None
try:
layers = fs.layers
if (layers is None or len(layers) == 0) and fs.url is not None:
fs = arcrest.ags.FeatureService(
url=fs.url)
layers = fs.layers
if layers is not None:
for layer in layers:
if layer.name == layerName:
if returnURLOnly:
return fs.url + '/' + str(layer.id)
else:
return layer
elif not layer.subLayers is None:
for sublayer in layer.subLayers:
if sublayer == layerName:
return sublayer
if fs.tables is not None:
for table in fs.tables:
if table.name == layerName:
if returnURLOnly:
return fs.url + '/' + str(layer.id)
else:
return table
return None
except:
line, filename, synerror = trace()
raise common.ArcRestHelperError({
"function": "GetLayerFromFeatureService",
"line": line,
"filename": filename,
"synerror": synerror,
}
)
finally:
layers = None
table = None
layer = None
sublayer = None
del layers
del table
del layer
del sublayer
gc.collect() |
def get_model_creation_kwargs(model_obj):
"""
Get a dictionary of the keyword arguments needed to create the passed model
object using `pylogit.create_choice_model`.
Parameters
----------
model_obj : An MNDC_Model instance.
Returns
-------
model_kwargs : dict.
Contains the keyword arguments and the required values that are needed
to initialize a replica of `model_obj`.
"""
# Extract the model abbreviation for this model
model_abbrev = get_model_abbrev(model_obj)
# Create a dictionary to store the keyword arguments needed to Initialize
# the new model object.d
model_kwargs = {"model_type": model_abbrev,
"names": model_obj.name_spec,
"intercept_names": model_obj.intercept_names,
"intercept_ref_pos": model_obj.intercept_ref_position,
"shape_names": model_obj.shape_names,
"shape_ref_pos": model_obj.shape_ref_position,
"nest_spec": model_obj.nest_spec,
"mixing_vars": model_obj.mixing_vars,
"mixing_id_col": model_obj.mixing_id_col}
return model_kwargs | Get a dictionary of the keyword arguments needed to create the passed model
object using `pylogit.create_choice_model`.
Parameters
----------
model_obj : An MNDC_Model instance.
Returns
-------
model_kwargs : dict.
Contains the keyword arguments and the required values that are needed
to initialize a replica of `model_obj`. | Below is the the instruction that describes the task:
### Input:
Get a dictionary of the keyword arguments needed to create the passed model
object using `pylogit.create_choice_model`.
Parameters
----------
model_obj : An MNDC_Model instance.
Returns
-------
model_kwargs : dict.
Contains the keyword arguments and the required values that are needed
to initialize a replica of `model_obj`.
### Response:
def get_model_creation_kwargs(model_obj):
"""
Get a dictionary of the keyword arguments needed to create the passed model
object using `pylogit.create_choice_model`.
Parameters
----------
model_obj : An MNDC_Model instance.
Returns
-------
model_kwargs : dict.
Contains the keyword arguments and the required values that are needed
to initialize a replica of `model_obj`.
"""
# Extract the model abbreviation for this model
model_abbrev = get_model_abbrev(model_obj)
# Create a dictionary to store the keyword arguments needed to Initialize
# the new model object.d
model_kwargs = {"model_type": model_abbrev,
"names": model_obj.name_spec,
"intercept_names": model_obj.intercept_names,
"intercept_ref_pos": model_obj.intercept_ref_position,
"shape_names": model_obj.shape_names,
"shape_ref_pos": model_obj.shape_ref_position,
"nest_spec": model_obj.nest_spec,
"mixing_vars": model_obj.mixing_vars,
"mixing_id_col": model_obj.mixing_id_col}
return model_kwargs |
def clean_expired_user_attempts(attempt_time: datetime = None) -> int:
"""
Clean expired user attempts from the database.
"""
if settings.AXES_COOLOFF_TIME is None:
log.debug('AXES: Skipping clean for expired access attempts because no AXES_COOLOFF_TIME is configured')
return 0
threshold = get_cool_off_threshold(attempt_time)
count, _ = AccessAttempt.objects.filter(attempt_time__lt=threshold).delete()
log.info('AXES: Cleaned up %s expired access attempts from database that were older than %s', count, threshold)
return count | Clean expired user attempts from the database. | Below is the the instruction that describes the task:
### Input:
Clean expired user attempts from the database.
### Response:
def clean_expired_user_attempts(attempt_time: datetime = None) -> int:
"""
Clean expired user attempts from the database.
"""
if settings.AXES_COOLOFF_TIME is None:
log.debug('AXES: Skipping clean for expired access attempts because no AXES_COOLOFF_TIME is configured')
return 0
threshold = get_cool_off_threshold(attempt_time)
count, _ = AccessAttempt.objects.filter(attempt_time__lt=threshold).delete()
log.info('AXES: Cleaned up %s expired access attempts from database that were older than %s', count, threshold)
return count |
def clean_username(self):
"""
Validate that the username is unique and not listed
in ``defaults.ACCOUNTS_FORBIDDEN_USERNAMES`` list.
"""
try:
user = get_user_model().objects.get(username=self.cleaned_data["username"])
except get_user_model().DoesNotExist:
pass
else:
raise forms.ValidationError(
self.error_messages['duplicate_username'])
if self.cleaned_data['username'].lower() \
in defaults.ACCOUNTS_FORBIDDEN_USERNAMES:
raise forms.ValidationError(_(u'This username is not allowed.'))
return self.cleaned_data['username'] | Validate that the username is unique and not listed
in ``defaults.ACCOUNTS_FORBIDDEN_USERNAMES`` list. | Below is the the instruction that describes the task:
### Input:
Validate that the username is unique and not listed
in ``defaults.ACCOUNTS_FORBIDDEN_USERNAMES`` list.
### Response:
def clean_username(self):
"""
Validate that the username is unique and not listed
in ``defaults.ACCOUNTS_FORBIDDEN_USERNAMES`` list.
"""
try:
user = get_user_model().objects.get(username=self.cleaned_data["username"])
except get_user_model().DoesNotExist:
pass
else:
raise forms.ValidationError(
self.error_messages['duplicate_username'])
if self.cleaned_data['username'].lower() \
in defaults.ACCOUNTS_FORBIDDEN_USERNAMES:
raise forms.ValidationError(_(u'This username is not allowed.'))
return self.cleaned_data['username'] |
def local_time_to_online(dt=None):
"""Converts datetime object to a UTC timestamp for AGOL.
Args:
dt (datetime): The :py:class:`datetime.datetime` object to convert. Defaults to ``None``, i.e., :py:func:`datetime.datetime.now`.
Returns:
float: A UTC timestamp as understood by AGOL (time in ms since Unix epoch * 1000)
Examples:
>>> arcresthelper.common.local_time_to_online() # PST
1457167261000.0
>>> dt = datetime.datetime(1993, 3, 5, 12, 35, 15) # PST
>>> arcresthelper.common.local_time_to_online(dt)
731392515000.0
See Also:
:py:func:`online_time_to_string` for converting a UTC timestamp
"""
is_dst = None
utc_offset = None
try:
if dt is None:
dt = datetime.datetime.now()
is_dst = time.daylight > 0 and time.localtime().tm_isdst > 0
utc_offset = (time.altzone if is_dst else time.timezone)
return (time.mktime(dt.timetuple()) * 1000) + (utc_offset * 1000)
except:
line, filename, synerror = trace()
raise ArcRestHelperError({
"function": "local_time_to_online",
"line": line,
"filename": filename,
"synerror": synerror,
}
)
finally:
is_dst = None
utc_offset = None
del is_dst
del utc_offset | Converts datetime object to a UTC timestamp for AGOL.
Args:
dt (datetime): The :py:class:`datetime.datetime` object to convert. Defaults to ``None``, i.e., :py:func:`datetime.datetime.now`.
Returns:
float: A UTC timestamp as understood by AGOL (time in ms since Unix epoch * 1000)
Examples:
>>> arcresthelper.common.local_time_to_online() # PST
1457167261000.0
>>> dt = datetime.datetime(1993, 3, 5, 12, 35, 15) # PST
>>> arcresthelper.common.local_time_to_online(dt)
731392515000.0
See Also:
:py:func:`online_time_to_string` for converting a UTC timestamp | Below is the the instruction that describes the task:
### Input:
Converts datetime object to a UTC timestamp for AGOL.
Args:
dt (datetime): The :py:class:`datetime.datetime` object to convert. Defaults to ``None``, i.e., :py:func:`datetime.datetime.now`.
Returns:
float: A UTC timestamp as understood by AGOL (time in ms since Unix epoch * 1000)
Examples:
>>> arcresthelper.common.local_time_to_online() # PST
1457167261000.0
>>> dt = datetime.datetime(1993, 3, 5, 12, 35, 15) # PST
>>> arcresthelper.common.local_time_to_online(dt)
731392515000.0
See Also:
:py:func:`online_time_to_string` for converting a UTC timestamp
### Response:
def local_time_to_online(dt=None):
"""Converts datetime object to a UTC timestamp for AGOL.
Args:
dt (datetime): The :py:class:`datetime.datetime` object to convert. Defaults to ``None``, i.e., :py:func:`datetime.datetime.now`.
Returns:
float: A UTC timestamp as understood by AGOL (time in ms since Unix epoch * 1000)
Examples:
>>> arcresthelper.common.local_time_to_online() # PST
1457167261000.0
>>> dt = datetime.datetime(1993, 3, 5, 12, 35, 15) # PST
>>> arcresthelper.common.local_time_to_online(dt)
731392515000.0
See Also:
:py:func:`online_time_to_string` for converting a UTC timestamp
"""
is_dst = None
utc_offset = None
try:
if dt is None:
dt = datetime.datetime.now()
is_dst = time.daylight > 0 and time.localtime().tm_isdst > 0
utc_offset = (time.altzone if is_dst else time.timezone)
return (time.mktime(dt.timetuple()) * 1000) + (utc_offset * 1000)
except:
line, filename, synerror = trace()
raise ArcRestHelperError({
"function": "local_time_to_online",
"line": line,
"filename": filename,
"synerror": synerror,
}
)
finally:
is_dst = None
utc_offset = None
del is_dst
del utc_offset |
def psetex(self, name, time_ms, value):
"""
Set the value of key ``name`` to ``value`` that expires in ``time_ms``
milliseconds. ``time_ms`` can be represented by an integer or a Python
timedelta object
"""
if isinstance(time_ms, datetime.timedelta):
time_ms = int(time_ms.total_seconds() * 1000)
return self.execute_command('PSETEX', name, time_ms, value) | Set the value of key ``name`` to ``value`` that expires in ``time_ms``
milliseconds. ``time_ms`` can be represented by an integer or a Python
timedelta object | Below is the the instruction that describes the task:
### Input:
Set the value of key ``name`` to ``value`` that expires in ``time_ms``
milliseconds. ``time_ms`` can be represented by an integer or a Python
timedelta object
### Response:
def psetex(self, name, time_ms, value):
"""
Set the value of key ``name`` to ``value`` that expires in ``time_ms``
milliseconds. ``time_ms`` can be represented by an integer or a Python
timedelta object
"""
if isinstance(time_ms, datetime.timedelta):
time_ms = int(time_ms.total_seconds() * 1000)
return self.execute_command('PSETEX', name, time_ms, value) |
def configure(self, name=None, rules=None, query=None, **options):
"""Configure the alert."""
self.name = name
if not name:
raise AssertionError("Alert's name should be defined and not empty.")
if not rules:
raise AssertionError("%s: Alert's rules is invalid" % name)
self.rules = [parse_rule(rule) for rule in rules]
self.rules = list(sorted(self.rules, key=lambda r: LEVELS.get(r.get('level'), 99)))
assert query, "%s: Alert's query is invalid" % self.name
self.query = query
interval_raw = options.get('interval', self.reactor.options['interval'])
self.interval = TimeUnit.from_interval(interval_raw)
time_window_raw = options.get(
'time_window',
self.reactor.options.get('time_window', interval_raw),
)
self.time_window = TimeUnit.from_interval(time_window_raw)
until_raw = options.get('until', self.reactor.options['until'])
self.until = TimeUnit.from_interval(until_raw)
# Adjust the start time to cater for `until`
self.from_time = self.time_window + self.until
self._format = options.get('format', self.reactor.options['format'])
self.request_timeout = options.get(
'request_timeout', self.reactor.options['request_timeout'])
self.connect_timeout = options.get(
'connect_timeout', self.reactor.options['connect_timeout'])
interval_ms = self.interval.convert_to(units.MILLISECOND)
history_size_raw = options.get('history_size', self.reactor.options['history_size'])
history_size_unit = TimeUnit.from_interval(history_size_raw)
history_size_ms = history_size_unit.convert_to(MILLISECOND)
self.history_size = int(math.ceil(history_size_ms / interval_ms))
self.no_data = options.get('no_data', self.reactor.options['no_data'])
self.loading_error = options.get('loading_error', self.reactor.options['loading_error'])
if self.reactor.options.get('debug'):
self.callback = ioloop.PeriodicCallback(self.load, 5000)
else:
self.callback = ioloop.PeriodicCallback(self.load, interval_ms) | Configure the alert. | Below is the the instruction that describes the task:
### Input:
Configure the alert.
### Response:
def configure(self, name=None, rules=None, query=None, **options):
"""Configure the alert."""
self.name = name
if not name:
raise AssertionError("Alert's name should be defined and not empty.")
if not rules:
raise AssertionError("%s: Alert's rules is invalid" % name)
self.rules = [parse_rule(rule) for rule in rules]
self.rules = list(sorted(self.rules, key=lambda r: LEVELS.get(r.get('level'), 99)))
assert query, "%s: Alert's query is invalid" % self.name
self.query = query
interval_raw = options.get('interval', self.reactor.options['interval'])
self.interval = TimeUnit.from_interval(interval_raw)
time_window_raw = options.get(
'time_window',
self.reactor.options.get('time_window', interval_raw),
)
self.time_window = TimeUnit.from_interval(time_window_raw)
until_raw = options.get('until', self.reactor.options['until'])
self.until = TimeUnit.from_interval(until_raw)
# Adjust the start time to cater for `until`
self.from_time = self.time_window + self.until
self._format = options.get('format', self.reactor.options['format'])
self.request_timeout = options.get(
'request_timeout', self.reactor.options['request_timeout'])
self.connect_timeout = options.get(
'connect_timeout', self.reactor.options['connect_timeout'])
interval_ms = self.interval.convert_to(units.MILLISECOND)
history_size_raw = options.get('history_size', self.reactor.options['history_size'])
history_size_unit = TimeUnit.from_interval(history_size_raw)
history_size_ms = history_size_unit.convert_to(MILLISECOND)
self.history_size = int(math.ceil(history_size_ms / interval_ms))
self.no_data = options.get('no_data', self.reactor.options['no_data'])
self.loading_error = options.get('loading_error', self.reactor.options['loading_error'])
if self.reactor.options.get('debug'):
self.callback = ioloop.PeriodicCallback(self.load, 5000)
else:
self.callback = ioloop.PeriodicCallback(self.load, interval_ms) |
def set_secure_boot_mode(self, secure_boot_enable):
"""Enable/Disable secure boot on the server.
:param secure_boot_enable: True, if secure boot needs to be
enabled for next boot, else False.
:raises: IloError, on an error from iLO.
:raises: IloCommandNotSupportedError, if the command is not supported
on the server.
"""
if self._is_boot_mode_uefi():
self._change_secure_boot_settings('SecureBootEnable',
secure_boot_enable)
else:
msg = ('System is not in UEFI boot mode. "SecureBoot" related '
'resources cannot be changed.')
raise exception.IloCommandNotSupportedInBiosError(msg) | Enable/Disable secure boot on the server.
:param secure_boot_enable: True, if secure boot needs to be
enabled for next boot, else False.
:raises: IloError, on an error from iLO.
:raises: IloCommandNotSupportedError, if the command is not supported
on the server. | Below is the the instruction that describes the task:
### Input:
Enable/Disable secure boot on the server.
:param secure_boot_enable: True, if secure boot needs to be
enabled for next boot, else False.
:raises: IloError, on an error from iLO.
:raises: IloCommandNotSupportedError, if the command is not supported
on the server.
### Response:
def set_secure_boot_mode(self, secure_boot_enable):
"""Enable/Disable secure boot on the server.
:param secure_boot_enable: True, if secure boot needs to be
enabled for next boot, else False.
:raises: IloError, on an error from iLO.
:raises: IloCommandNotSupportedError, if the command is not supported
on the server.
"""
if self._is_boot_mode_uefi():
self._change_secure_boot_settings('SecureBootEnable',
secure_boot_enable)
else:
msg = ('System is not in UEFI boot mode. "SecureBoot" related '
'resources cannot be changed.')
raise exception.IloCommandNotSupportedInBiosError(msg) |
def build(client, repository_tag, docker_file, tag=None, use_cache=False):
"""
Build a docker image
"""
if not isinstance(client, docker.Client):
raise TypeError("client needs to be of type docker.Client.")
if not isinstance(docker_file, six.string_types) or not os.path.exists(docker_file):
# TODO: need to add path stuff for git and http etc.
raise Exception("docker file path doesn't exist: {0}".format(docker_file))
if not isinstance(repository_tag, six.string_types):
raise TypeError('repository must be a string')
if not tag:
tag = 'latest'
if not isinstance(use_cache, bool):
raise TypeError("use_cache must be a bool. {0} was passed.".format(use_cache))
no_cache = not use_cache
if ':' not in repository_tag:
repository_tag = "{0}:{1}".format(repository_tag, tag)
file_obj = None
try:
if os.path.isfile(docker_file):
path = os.getcwd()
docker_file = "./{0}".format(os.path.relpath(docker_file))
# TODO: support using file_obj in the future. Needed for post pre hooks and the injector.
# with open(docker_file) as Dockerfile:
# testing = Dockerfile.read()
# file_obj = BytesIO(testing.encode('utf-8'))
response = client.build(
path=path,
nocache=no_cache,
# custom_context=True,
dockerfile=docker_file,
# fileobj=file_obj,
tag=repository_tag,
rm=True,
stream=True
)
else:
response = client.build(path=docker_file, tag=repository_tag, rm=True, nocache=no_cache, stream=True)
except Exception as e:
raise e
finally:
if file_obj:
file_obj.close()
parse_stream(response)
client.close()
return Image(client, repository_tag) | Build a docker image | Below is the the instruction that describes the task:
### Input:
Build a docker image
### Response:
def build(client, repository_tag, docker_file, tag=None, use_cache=False):
"""
Build a docker image
"""
if not isinstance(client, docker.Client):
raise TypeError("client needs to be of type docker.Client.")
if not isinstance(docker_file, six.string_types) or not os.path.exists(docker_file):
# TODO: need to add path stuff for git and http etc.
raise Exception("docker file path doesn't exist: {0}".format(docker_file))
if not isinstance(repository_tag, six.string_types):
raise TypeError('repository must be a string')
if not tag:
tag = 'latest'
if not isinstance(use_cache, bool):
raise TypeError("use_cache must be a bool. {0} was passed.".format(use_cache))
no_cache = not use_cache
if ':' not in repository_tag:
repository_tag = "{0}:{1}".format(repository_tag, tag)
file_obj = None
try:
if os.path.isfile(docker_file):
path = os.getcwd()
docker_file = "./{0}".format(os.path.relpath(docker_file))
# TODO: support using file_obj in the future. Needed for post pre hooks and the injector.
# with open(docker_file) as Dockerfile:
# testing = Dockerfile.read()
# file_obj = BytesIO(testing.encode('utf-8'))
response = client.build(
path=path,
nocache=no_cache,
# custom_context=True,
dockerfile=docker_file,
# fileobj=file_obj,
tag=repository_tag,
rm=True,
stream=True
)
else:
response = client.build(path=docker_file, tag=repository_tag, rm=True, nocache=no_cache, stream=True)
except Exception as e:
raise e
finally:
if file_obj:
file_obj.close()
parse_stream(response)
client.close()
return Image(client, repository_tag) |
def callJavaFunc(func, *args):
""" Call Java Function """
gateway = _get_gateway()
args = [_py2java(gateway, a) for a in args]
result = func(*args)
return _java2py(gateway, result) | Call Java Function | Below is the the instruction that describes the task:
### Input:
Call Java Function
### Response:
def callJavaFunc(func, *args):
""" Call Java Function """
gateway = _get_gateway()
args = [_py2java(gateway, a) for a in args]
result = func(*args)
return _java2py(gateway, result) |
def extra_space_exists(str1: str, str2: str) -> bool: # noqa
"""
Return True if a space shouldn't exist between two items
"""
ls1, ls2 = len(str1), len(str2)
if str1.isdigit():
# 10 SM
if str2 in ['SM', '0SM']:
return True
# 12 /10
if ls2 > 2 and str2[0] == '/' and str2[1:].isdigit():
return True
if str2.isdigit():
# OVC 040
if str1 in CLOUD_LIST:
return True
# 12/ 10
if ls1 > 2 and str1.endswith('/') and str1[:-1].isdigit():
return True
# 12/1 0
if ls2 == 1 and ls1 > 3 and str1[:2].isdigit() and '/' in str1 and str1[3:].isdigit():
return True
# Q 1001
if str1 in ['Q', 'A']:
return True
# 36010G20 KT
if str2 == 'KT' and str1[-1].isdigit() \
and (str1[:5].isdigit() or (str1.startswith('VRB') and str1[3:5].isdigit())):
return True
# 36010K T
if str2 == 'T' and ls1 >= 6 \
and (str1[:5].isdigit() or (str1.startswith('VRB') and str1[3:5].isdigit())) and str1[-1] == 'K':
return True
# OVC022 CB
if str2 in CLOUD_TRANSLATIONS and str2 not in CLOUD_LIST and ls1 >= 3 and str1[:3] in CLOUD_LIST:
return True
# FM 122400
if str1 in ['FM', 'TL'] and (str2.isdigit() or (str2.endswith('Z') and str2[:-1].isdigit())):
return True
# TX 20/10
if str1 in ['TX', 'TN'] and str2.find('/') != -1:
return True
return False | Return True if a space shouldn't exist between two items | Below is the the instruction that describes the task:
### Input:
Return True if a space shouldn't exist between two items
### Response:
def extra_space_exists(str1: str, str2: str) -> bool: # noqa
"""
Return True if a space shouldn't exist between two items
"""
ls1, ls2 = len(str1), len(str2)
if str1.isdigit():
# 10 SM
if str2 in ['SM', '0SM']:
return True
# 12 /10
if ls2 > 2 and str2[0] == '/' and str2[1:].isdigit():
return True
if str2.isdigit():
# OVC 040
if str1 in CLOUD_LIST:
return True
# 12/ 10
if ls1 > 2 and str1.endswith('/') and str1[:-1].isdigit():
return True
# 12/1 0
if ls2 == 1 and ls1 > 3 and str1[:2].isdigit() and '/' in str1 and str1[3:].isdigit():
return True
# Q 1001
if str1 in ['Q', 'A']:
return True
# 36010G20 KT
if str2 == 'KT' and str1[-1].isdigit() \
and (str1[:5].isdigit() or (str1.startswith('VRB') and str1[3:5].isdigit())):
return True
# 36010K T
if str2 == 'T' and ls1 >= 6 \
and (str1[:5].isdigit() or (str1.startswith('VRB') and str1[3:5].isdigit())) and str1[-1] == 'K':
return True
# OVC022 CB
if str2 in CLOUD_TRANSLATIONS and str2 not in CLOUD_LIST and ls1 >= 3 and str1[:3] in CLOUD_LIST:
return True
# FM 122400
if str1 in ['FM', 'TL'] and (str2.isdigit() or (str2.endswith('Z') and str2[:-1].isdigit())):
return True
# TX 20/10
if str1 in ['TX', 'TN'] and str2.find('/') != -1:
return True
return False |
def entity(self, entity_type, identifier=None):
"""Factory method for creating an Entity.
If an entity with the same type and identifier already exists,
this will return a reference to that entity. If not, it will
create a new one and add it to the list of known entities for
this ACL.
:type entity_type: str
:param entity_type: The type of entity to create
(ie, ``user``, ``group``, etc)
:type identifier: str
:param identifier: The ID of the entity (if applicable).
This can be either an ID or an e-mail address.
:rtype: :class:`_ACLEntity`
:returns: A new Entity or a reference to an existing identical entity.
"""
entity = _ACLEntity(entity_type=entity_type, identifier=identifier)
if self.has_entity(entity):
entity = self.get_entity(entity)
else:
self.add_entity(entity)
return entity | Factory method for creating an Entity.
If an entity with the same type and identifier already exists,
this will return a reference to that entity. If not, it will
create a new one and add it to the list of known entities for
this ACL.
:type entity_type: str
:param entity_type: The type of entity to create
(ie, ``user``, ``group``, etc)
:type identifier: str
:param identifier: The ID of the entity (if applicable).
This can be either an ID or an e-mail address.
:rtype: :class:`_ACLEntity`
:returns: A new Entity or a reference to an existing identical entity. | Below is the the instruction that describes the task:
### Input:
Factory method for creating an Entity.
If an entity with the same type and identifier already exists,
this will return a reference to that entity. If not, it will
create a new one and add it to the list of known entities for
this ACL.
:type entity_type: str
:param entity_type: The type of entity to create
(ie, ``user``, ``group``, etc)
:type identifier: str
:param identifier: The ID of the entity (if applicable).
This can be either an ID or an e-mail address.
:rtype: :class:`_ACLEntity`
:returns: A new Entity or a reference to an existing identical entity.
### Response:
def entity(self, entity_type, identifier=None):
"""Factory method for creating an Entity.
If an entity with the same type and identifier already exists,
this will return a reference to that entity. If not, it will
create a new one and add it to the list of known entities for
this ACL.
:type entity_type: str
:param entity_type: The type of entity to create
(ie, ``user``, ``group``, etc)
:type identifier: str
:param identifier: The ID of the entity (if applicable).
This can be either an ID or an e-mail address.
:rtype: :class:`_ACLEntity`
:returns: A new Entity or a reference to an existing identical entity.
"""
entity = _ACLEntity(entity_type=entity_type, identifier=identifier)
if self.has_entity(entity):
entity = self.get_entity(entity)
else:
self.add_entity(entity)
return entity |
def since(self, ts):
"""
Query the oplog for items since ts and then return
"""
spec = {'ts': {'$gt': ts}}
cursor = self.query(spec)
while True:
# todo: trap InvalidDocument errors:
# except bson.errors.InvalidDocument as e:
# logging.info(repr(e))
for doc in cursor:
yield doc
if not cursor.alive:
break
time.sleep(1) | Query the oplog for items since ts and then return | Below is the the instruction that describes the task:
### Input:
Query the oplog for items since ts and then return
### Response:
def since(self, ts):
"""
Query the oplog for items since ts and then return
"""
spec = {'ts': {'$gt': ts}}
cursor = self.query(spec)
while True:
# todo: trap InvalidDocument errors:
# except bson.errors.InvalidDocument as e:
# logging.info(repr(e))
for doc in cursor:
yield doc
if not cursor.alive:
break
time.sleep(1) |
def _matcher(self, other):
"""
QueryContainer < MoleculeContainer
QueryContainer < QueryContainer[more general]
QueryContainer < QueryCGRContainer[more general]
"""
if isinstance(other, MoleculeContainer):
return GraphMatcher(other, self, lambda x, y: y == x, lambda x, y: y == x)
elif isinstance(other, (QueryContainer, QueryCGRContainer)):
return GraphMatcher(other, self, lambda x, y: x == y, lambda x, y: x == y)
raise TypeError('only query-molecule, query-query or query-cgr_query possible') | QueryContainer < MoleculeContainer
QueryContainer < QueryContainer[more general]
QueryContainer < QueryCGRContainer[more general] | Below is the the instruction that describes the task:
### Input:
QueryContainer < MoleculeContainer
QueryContainer < QueryContainer[more general]
QueryContainer < QueryCGRContainer[more general]
### Response:
def _matcher(self, other):
"""
QueryContainer < MoleculeContainer
QueryContainer < QueryContainer[more general]
QueryContainer < QueryCGRContainer[more general]
"""
if isinstance(other, MoleculeContainer):
return GraphMatcher(other, self, lambda x, y: y == x, lambda x, y: y == x)
elif isinstance(other, (QueryContainer, QueryCGRContainer)):
return GraphMatcher(other, self, lambda x, y: x == y, lambda x, y: x == y)
raise TypeError('only query-molecule, query-query or query-cgr_query possible') |
def new(path='.', template=None):
"""Creates a new project
"""
path = abspath(path.rstrip(sep))
template = template or DEFAULT_TEMPLATE_URL
render_skeleton(
template, path,
include_this=['.gitignore'],
filter_this=[
'~*', '*.py[co]',
'__pycache__', '__pycache__/*',
'.git', '.git/*',
'.hg', '.hg/*',
'.svn', '.svn/*',
]
)
print(HELP_MSG % (path,)) | Creates a new project | Below is the the instruction that describes the task:
### Input:
Creates a new project
### Response:
def new(path='.', template=None):
"""Creates a new project
"""
path = abspath(path.rstrip(sep))
template = template or DEFAULT_TEMPLATE_URL
render_skeleton(
template, path,
include_this=['.gitignore'],
filter_this=[
'~*', '*.py[co]',
'__pycache__', '__pycache__/*',
'.git', '.git/*',
'.hg', '.hg/*',
'.svn', '.svn/*',
]
)
print(HELP_MSG % (path,)) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.