text
stringlengths 78
104k
| score
float64 0
0.18
|
---|---|
def virtual_machine_get(name, resource_group, **kwargs):
'''
.. versionadded:: 2019.2.0
Retrieves information about the model view or the instance view of a
virtual machine.
:param name: The name of the virtual machine.
:param resource_group: The resource group name assigned to the
virtual machine.
CLI Example:
.. code-block:: bash
salt-call azurearm_compute.virtual_machine_get testvm testgroup
'''
expand = kwargs.get('expand')
compconn = __utils__['azurearm.get_client']('compute', **kwargs)
try:
# pylint: disable=invalid-name
vm = compconn.virtual_machines.get(
resource_group_name=resource_group,
vm_name=name,
expand=expand
)
result = vm.as_dict()
except CloudError as exc:
__utils__['azurearm.log_cloud_error']('compute', str(exc), **kwargs)
result = {'error': str(exc)}
return result | 0.001043 |
def is_not_modified(
self, response_headers: Headers, request_headers: Headers
) -> bool:
"""
Given the request and response headers, return `True` if an HTTP
"Not Modified" response could be returned instead.
"""
try:
if_none_match = request_headers["if-none-match"]
etag = response_headers["etag"]
if if_none_match == etag:
return True
except KeyError:
pass
try:
if_modified_since = parsedate(request_headers["if-modified-since"])
last_modified = parsedate(response_headers["last-modified"])
if (
if_modified_since is not None
and last_modified is not None
and if_modified_since >= last_modified
):
return True
except KeyError:
pass
return False | 0.003257 |
def adjust_learning_rate(optimizer, epoch):
"""Sets the learning rate to the initial LR decayed by 10 every 30 epochs"""
lr = args.lr * (0.1 ** (epoch // 30))
for param_group in optimizer.param_groups:
param_group['lr'] = lr | 0.008197 |
def main():
"""Cli entrypoint.
"""
if len(sys.argv) == 2:
fname = sys.argv[1]
data = json.load(open(fname, 'rb'))
else:
data = json.loads(sys.stdin.read())
print(pydeps2reqs(data)) | 0.00431 |
def parse_graph_section(config_obj, section, outdir_default, indir_default):
"""
Parse the GRAPH section of the config to extract useful values
:param config_obj: ConfigParser object
:param section: Section name
:param outdir_default: Default output directory passed in args
:param indir_default: Default input directory passed in args
:return: List of options extracted from the GRAPH section
"""
graph_timezone = None
graphing_library = CONSTANTS.DEFAULT_GRAPHING_LIBRARY
crossplots = []
if config_obj.has_option(section, 'graphing_library'):
graphing_library = config_obj.get(section, 'graphing_library')
if config_obj.has_option(section, 'graphs'):
graphs_string = config_obj.get(section, 'graphs')
crossplots = graphs_string.split()
# Supporting both outdir and output_dir
if config_obj.has_option(section, 'outdir'):
outdir_default = config_obj.get(section, 'outdir')
if config_obj.has_option(section, 'output_dir'):
outdir_default = config_obj.get(section, 'output_dir')
if config_obj.has_option(section, 'input_dir'):
indir_default = config_obj.get(section, 'input_dir')
if config_obj.has_option(section, 'graph_timezone'):
graph_timezone = config_obj.get(section, 'graph_timezone')
if graph_timezone not in ("UTC", "PST", "PDT"):
logger.warn('Unsupported timezone ' + graph_timezone + ' specified in option graph_timezone. Will use UTC instead')
graph_timezone = "UTC"
return graphing_library, crossplots, outdir_default, indir_default, graph_timezone | 0.010356 |
def _get_ref_info_helper(cls, repo, ref_path):
"""Return: (str(sha), str(target_ref_path)) if available, the sha the file at
rela_path points to, or None. target_ref_path is the reference we
point to, or None"""
tokens = None
repodir = _git_dir(repo, ref_path)
try:
with open(osp.join(repodir, ref_path), 'rt') as fp:
value = fp.read().rstrip()
# Don't only split on spaces, but on whitespace, which allows to parse lines like
# 60b64ef992065e2600bfef6187a97f92398a9144 branch 'master' of git-server:/path/to/repo
tokens = value.split()
assert(len(tokens) != 0)
except (OSError, IOError):
# Probably we are just packed, find our entry in the packed refs file
# NOTE: We are not a symbolic ref if we are in a packed file, as these
# are excluded explicitly
for sha, path in cls._iter_packed_refs(repo):
if path != ref_path:
continue
# sha will be used
tokens = sha, path
break
# END for each packed ref
# END handle packed refs
if tokens is None:
raise ValueError("Reference at %r does not exist" % ref_path)
# is it a reference ?
if tokens[0] == 'ref:':
return (None, tokens[1])
# its a commit
if repo.re_hexsha_only.match(tokens[0]):
return (tokens[0], None)
raise ValueError("Failed to parse reference information from %r" % ref_path) | 0.004932 |
def get_related(page):
"""
Returns list of related Entry instances for specified page.
:param page: the page instance.
:rtype: list.
"""
related = []
entry = Entry.get_for_model(page)
if entry:
related = entry.related
return related | 0.007117 |
def load_config_file(self):
"""Parse configuration file and get config values."""
config_parser = SafeConfigParser()
config_parser.read(self.CONFIG_FILE)
if config_parser.has_section('handlers'):
self._config['handlers_package'] = config_parser.get('handlers', 'package')
if config_parser.has_section('auth'):
self._config['consumer_key'] = config_parser.get('auth', 'consumer_key')
self._config['consumer_secret'] = config_parser.get('auth', 'consumer_secret')
self._config['token_key'] = config_parser.get('auth', 'token_key')
self._config['token_secret'] = config_parser.get('auth', 'token_secret')
if config_parser.has_section('stream'):
self._config['user_stream'] = config_parser.get('stream', 'user_stream').lower() == 'true'
else:
self._config['user_stream'] = False
if config_parser.has_option('general', 'min_seconds_between_errors'):
self._config['min_seconds_between_errors'] = config_parser.get('general', 'min_seconds_between_errors')
if config_parser.has_option('general', 'sleep_seconds_on_consecutive_errors'):
self._config['sleep_seconds_on_consecutive_errors'] = config_parser.get(
'general', 'sleep_seconds_on_consecutive_errors') | 0.007402 |
def search(self, pkg_name=None, search_carts=False, query='/content/units/rpm/search/'):
"""
search for a package stored in a pulp repo
`pkg_name` - substring in the name of the package
`search_carts` - whether or not to return carts that include
the listed package
"""
# this data block is... yeah. searching in pulp v2 is painful
#
# https://pulp-dev-guide.readthedocs.org/en/latest/rest-api/content/retrieval.html#search-for-units
# https://pulp-dev-guide.readthedocs.org/en/latest/rest-api/conventions/criteria.html#search-criteria
#
# those are the API docs for searching
data = {
'criteria': {
'filters': {'filename': {'$regex': ".*%s.*" % pkg_name}},
'sort': [['name', 'ascending']],
'fields': ['name', 'description', 'version', 'release', 'arch', 'filename']
},
'include_repos': 'true'
}
repos = []
juicer.utils.Log.log_info('Packages:')
for env in self.args.environment:
juicer.utils.Log.log_debug("Querying %s server" % env)
_r = self.connectors[env].post(query, data)
if not _r.status_code == Constants.PULP_POST_OK:
juicer.utils.Log.log_debug("Expected PULP_POST_OK, got %s", _r.status_code)
_r.raise_for_status()
juicer.utils.Log.log_info('%s:' % str.upper(env))
pkg_list = juicer.utils.load_json_str(_r.content)
for package in pkg_list:
# if the package is in a repo, show a link to the package in said repo
# otherwise, show nothing
if len(package['repository_memberships']) > 0:
target = package['repository_memberships'][0]
_r = self.connectors[env].get('/repositories/%s/' % target)
if not _r.status_code == Constants.PULP_GET_OK:
raise JuicerPulpError("%s was not found as a repoid. A %s status code was returned" %
(target, _r.status_code))
repo = juicer.utils.load_json_str(_r.content)['display_name']
repos.append(repo)
link = juicer.utils.remote_url(self.connectors[env], env, repo, package['filename'])
else:
link = ''
juicer.utils.Log.log_info('%s\t%s\t%s\t%s' % (package['name'], package['version'], package['release'], link))
if search_carts:
# if the package is in a cart, show the cart name
juicer.utils.Log.log_info('\nCarts:')
for env in self.args.environment:
carts = juicer.utils.search_carts(env, pkg_name, repos)
for cart in carts:
juicer.utils.Log.log_info(cart['_id']) | 0.004036 |
def create_task_from_xml(name,
location='\\',
xml_text=None,
xml_path=None,
user_name='System',
password=None):
r'''
Create a task based on XML. Source can be a file or a string of XML.
:param str name: The name of the task. This will be displayed in the task
scheduler.
:param str location: A string value representing the location in which to
create the task. Default is '\\' which is the root for the task
scheduler (C:\Windows\System32\tasks).
:param str xml_text: A string of xml representing the task to be created.
This will be overridden by `xml_path` if passed.
:param str xml_path: The path to an XML file on the local system containing
the xml that defines the task. This will override `xml_text`
:param str user_name: The user account under which to run the task. To
specify the 'System' account, use 'System'. The password will be
ignored.
:param str password: The password to use for authentication. This should set
the task to run whether the user is logged in or not, but is currently
not working.
:return: True if successful, False if unsuccessful, A string with the error message if there is an error
:rtype: bool
:raises: CommandExecutionError
CLI Example:
.. code-block:: bash
salt 'minion-id' task.create_task_from_xml <task_name> xml_path=C:\task.xml
'''
# Check for existing task
if name in list_tasks(location):
# Connect to an existing task definition
return '{0} already exists'.format(name)
if not xml_text and not xml_path:
raise ArgumentValueError('Must specify either xml_text or xml_path')
# Create the task service object
with salt.utils.winapi.Com():
task_service = win32com.client.Dispatch("Schedule.Service")
task_service.Connect()
# Load xml from file, overrides xml_text
# Need to figure out how to load contents of xml
if xml_path:
xml_text = xml_path
# Get the folder to list folders from
task_folder = task_service.GetFolder(location)
# Determine logon type
if user_name:
if user_name.lower() == 'system':
logon_type = TASK_LOGON_SERVICE_ACCOUNT
user_name = 'SYSTEM'
password = None
else:
if password:
logon_type = TASK_LOGON_PASSWORD
else:
logon_type = TASK_LOGON_INTERACTIVE_TOKEN
else:
password = None
logon_type = TASK_LOGON_NONE
# Save the task
try:
task_folder.RegisterTask(name,
xml_text,
TASK_CREATE,
user_name,
password,
logon_type)
except pythoncom.com_error as error:
hr, msg, exc, arg = error.args # pylint: disable=W0633
error_code = hex(exc[5] + 2**32)
failure_code = error_code
fc = {'0x80041319L': 'Required element or attribute missing',
'0x80041318L': 'Value incorrectly formatted or out of range',
'0x80020005L': 'Access denied',
'0x80041309L': "A task's trigger is not found",
'0x8004130aL': "One or more of the properties required to run this task have not been set",
'0x8004130cL': "The Task Scheduler service is not installed on this computer",
'0x8004130dL': "The task object could not be opened",
'0x8004130eL': "The object is either an invalid task object or is not a task object",
'0x8004130fL': "No account information could be found in the Task Scheduler security database for the task indicated",
'0x80041310L': "Unable to establish existence of the account specified",
'0x80041311L': "Corruption was detected in the Task Scheduler security database; the database has been reset",
'0x80041313L': "The task object version is either unsupported or invalid",
'0x80041314L': "The task has been configured with an unsupported combination of account settings and run time options",
'0x80041315L': "The Task Scheduler Service is not running",
'0x80041316L': "The task XML contains an unexpected node",
'0x80041317L': "The task XML contains an element or attribute from an unexpected namespace",
'0x8004131aL': "The task XML is malformed",
'0x0004131cL': "The task is registered, but may fail to start. Batch logon privilege needs to be enabled for the task principal",
'0x8004131dL': "The task XML contains too many nodes of the same type",
}
try:
failure_code = fc[error_code]
except KeyError:
failure_code = 'Unknown Failure: {0}'.format(error_code)
finally:
log.debug('Failed to create task: %s', failure_code)
raise CommandExecutionError(failure_code)
# Verify creation
return name in list_tasks(location) | 0.002868 |
async def warn_user(channel, user):
"""
Gives a user a warning, and bans them if they are over the maximum warnings
Args:
channel: The channel to send the warning message in
user: The user to give the warning to
"""
data = datatools.get_data()
server_id = channel.server.id
if "warnings_max" not in data["discord"]["servers"][server_id][_data.modulename]:
data["discord"]["servers"][server_id][_data.modulename]["warnings_max"] = 3
if "warnings" not in data["discord"]["servers"][server_id][_data.modulename]:
data["discord"]["servers"][server_id][_data.modulename]["warnings"] = {}
if user.id in data["discord"]["servers"][server_id][_data.modulename]["warnings"]:
data["discord"]["servers"][server_id][_data.modulename]["warnings"][user.id] += 1
else:
data["discord"]["servers"][server_id][_data.modulename]["warnings"][user.id] = 1
datatools.write_data(data)
warnings = data["discord"]["servers"][server_id][_data.modulename]["warnings"][user.id]
max_warnings = data["discord"]["servers"][server_id][_data.modulename]["warnings_max"]
await client.send_typing(channel)
embed = ui_embed.user_warning(channel, user, warnings, max_warnings)
await embed.send()
if warnings >= max_warnings:
await ban_user(channel, user) | 0.007413 |
def use_plenary_proficiency_view(self):
"""Pass through to provider ProficiencyLookupSession.use_plenary_proficiency_view"""
self._object_views['proficiency'] = PLENARY
# self._get_provider_session('proficiency_lookup_session') # To make sure the session is tracked
for session in self._get_provider_sessions():
try:
session.use_plenary_proficiency_view()
except AttributeError:
pass | 0.008493 |
def operator_complexity(self):
"""Operator complexity of this multigrid hierarchy.
Defined as:
Number of nonzeros in the matrix on all levels /
Number of nonzeros in the matrix on the finest level
"""
return sum([level.A.nnz for level in self.levels]) /\
float(self.levels[0].A.nnz) | 0.005682 |
def _generate_hex_for_uris(self, uris):
"""Given uris, generate and return hex version of it
Parameters
----------
uris : list
Containing all uris
Returns
-------
str
Hexed uris
"""
return sha256((":".join(uris) + str(time())).encode()).hexdigest() | 0.00578 |
def print_docs(self):
'''
Print out the documentation!
'''
arg = self.opts.get('fun', None)
docs = super(Runner, self).get_docs(arg)
for fun in sorted(docs):
display_output('{0}:'.format(fun), 'text', self.opts)
print(docs[fun]) | 0.006667 |
def genty_dataset(*args, **kwargs):
"""Decorator defining data sets to provide to a test.
Inspired by http://sebastian-bergmann.de/archives/
702-Data-Providers-in-PHPUnit-3.2.html
The canonical way to call @genty_dataset, with each argument each
representing a data set to be injected in the test method call:
@genty_dataset(
('a1', 'b1'),
('a2', 'b2'),
)
def test_some_function(a, b)
...
If the test function takes only one parameter, you can replace the tuples
by a single value. So instead of the more verbose:
@genty_dataset(
('c1',),
('c2',),
)
def test_some_other_function(c)
...
One can write:
@genty_dataset('c1', 'c2')
def test_some_other_function(c)
...
For each set of arguments, a suffix identifying that argument set is
built by concatenating the string representation of the arguments
together. You can control the test names for each data set by passing
the data sets as keyword args, where the keyword is the desired suffix.
For example:
@genty_dataset(
('a1', 'b1),
)
def test_function(a, b)
...
produces a test named 'test_function_for_a1_and_b1', while
@genty_dataset(
happy_path=('a1', 'b1'),
)
def test_function(a, b)
...
produces a test named test_function_for_happy_path. These are just
parameters to a method call, so one can have unnamed args first
followed by keyword args
@genty_dataset(
('x', 'y'),
('p', 'q'),
Monday=('a1', 'b1'),
Tuesday=('t1', 't2'),
)
def test_function(a, b)
...
Finally, datasets can be chained. Useful for example if there are
distinct sets of params that make sense (cleaner, more readable, or
semantically nicer) if kept separate. A fabricated example:
@genty_dataset(
*([i for i in range(10)] + [(i, i) for i in range(10)])
)
def test_some_other_function(param1, param2=None)
...
-- vs --
@genty_dataset(*[i for i in range(10)])
@genty_dataset(*[(i, i) for i in range(10)])
def test_some_other_function(param1, param2=None)
...
If the names of datasets conflict across chained genty_datasets, the
key&value pair from the outer (first) decorator will override the
data from the inner.
:param args:
Tuple of unnamed data sets.
:type args:
`tuple` of varies
:param kwargs:
Dict of pre-named data sets.
:type kwargs:
`dict` of `unicode` to varies
"""
datasets = _build_datasets(*args, **kwargs)
def wrap(test_method):
# Save the datasets in the test method. This data will be consumed
# by the @genty decorator.
if not hasattr(test_method, 'genty_datasets'):
test_method.genty_datasets = OrderedDict()
test_method.genty_datasets.update(datasets)
return test_method
return wrap | 0.000316 |
def console_hline(
con: tcod.console.Console,
x: int,
y: int,
l: int,
flag: int = BKGND_DEFAULT,
) -> None:
"""Draw a horizontal line on the console.
This always uses the character 196, the horizontal line character.
.. deprecated:: 8.5
Use :any:`Console.hline` instead.
"""
lib.TCOD_console_hline(_console(con), x, y, l, flag) | 0.005305 |
def crop_keypoint_by_coords(keypoint, crop_coords, crop_height, crop_width, rows, cols):
"""Crop a keypoint using the provided coordinates of bottom-left and top-right corners in pixels and the
required height and width of the crop.
"""
x, y, a, s = keypoint
x1, y1, x2, y2 = crop_coords
cropped_keypoint = [x - x1, y - y1, a, s]
return cropped_keypoint | 0.007874 |
def AddEventSource(self, event_source):
"""Adds an event source.
Args:
event_source (EventSource): event source.
Raises:
IOError: when the storage file is closed or read-only.
OSError: when the storage file is closed or read-only.
"""
self._RaiseIfNotWritable()
self._AddAttributeContainer(
self._CONTAINER_TYPE_EVENT_SOURCE, event_source) | 0.002551 |
def get_random_user(self):
"""
Gets a random user from the provider
:returns: Dictionary
"""
c = self.db.cursor()
c.execute('''SELECT username, password, fullname FROM users
WHERE rowid >= (abs(random()) % (SELECT max(rowid) FROM users))
LIMIT 1''')
r = c.fetchone()
return {"username": r[0], "password": r[1], "fullname": r[2]} | 0.006897 |
def update_employee(emp_id, key=None, value=None, items=None):
'''
Update one or more items for this employee. Specifying an empty value will
clear it for that employee.
CLI Examples:
salt myminion bamboohr.update_employee 1138 nickname Curly
salt myminion bamboohr.update_employee 1138 nickname ''
salt myminion bamboohr.update_employee 1138 items='{"nickname": "Curly"}
salt myminion bamboohr.update_employee 1138 items='{"nickname": ""}
'''
if items is None:
if key is None or value is None:
return {'Error': 'At least one key/value pair is required'}
items = {key: value}
elif isinstance(items, six.string_types):
items = salt.utils.yaml.safe_load(items)
xml_items = ''
for pair in items:
xml_items += '<field id="{0}">{1}</field>'.format(pair, items[pair])
xml_items = '<employee>{0}</employee>'.format(xml_items)
status, result = _query(
action='employees',
command=emp_id,
data=xml_items,
method='POST',
)
return show_employee(emp_id, ','.join(items.keys())) | 0.00177 |
def delete(self, using=None, **kwargs):
"""
Deletes the index in elasticsearch.
Any additional keyword arguments will be passed to
``Elasticsearch.indices.delete`` unchanged.
"""
return self._get_connection(using).indices.delete(index=self._name, **kwargs) | 0.009836 |
def bitsCmp(self, other, op, evalFn=None):
"""
:attention: If other is Bool signal convert this to bool (not ideal,
due VHDL event operator)
"""
other = toHVal(other)
t = self._dtype
ot = other._dtype
iamVal = isinstance(self, Value)
otherIsVal = isinstance(other, Value)
if evalFn is None:
evalFn = op._evalFn
if iamVal and otherIsVal:
if ot == BOOL:
self = self._auto_cast(BOOL)
elif t == ot:
pass
elif isinstance(ot, Integer):
other = other._auto_cast(t)
else:
raise TypeError("Values of types (%r, %r) are not comparable" % (
self._dtype, other._dtype))
return bitsCmp__val(self, other, op, evalFn)
else:
if ot == BOOL:
self = self._auto_cast(BOOL)
elif t == ot:
pass
elif isinstance(ot, Integer):
other = other._auto_cast(self._dtype)
else:
raise TypeError("Values of types (%r, %r) are not comparable" % (
self._dtype, other._dtype))
# try to reduce useless cmp
res = None
if otherIsVal and other._isFullVld():
res = bitsCmp_detect_useless_cmp(self, other, op)
elif iamVal and self._isFullVld():
res = bitsCmp_detect_useless_cmp(other, self, CMP_OP_REVERSE[op])
if res is None:
pass
elif isinstance(res, Value):
return res
else:
assert res == AllOps.EQ, res
op = res
return Operator.withRes(op, [self, other], BOOL) | 0.000616 |
def _loadData(self, data):
""" Load attribute values from Plex XML response. """
self._data = data
for elem in data:
id = utils.lowerFirst(elem.attrib['id'])
if id in self._settings:
self._settings[id]._loadData(elem)
continue
self._settings[id] = Setting(self._server, elem, self._initpath) | 0.005222 |
def get_name(self,item):
"""
Return the name for this item registered with this NameSelector.
If no name has previously been registered, then generate a new
one.
"""
if not isinstance(item,ford.sourceform.FortranBase):
raise Exception('{} is not of a type derived from FortranBase'.format(str(item)))
if item in self._items:
return self._items[item]
else:
if item.get_dir() not in self._counts:
self._counts[item.get_dir()] = {}
if item.name in self._counts[item.get_dir()]:
num = self._counts[item.get_dir()][item.name] + 1
else:
num = 1
self._counts[item.get_dir()][item.name] = num
name = item.name.lower().replace('<','lt')
# name is already lower
name = name.replace('>','gt')
name = name.replace('/','SLASH')
if name == '': name = '__unnamed__'
if num > 1:
name = name + '~' + str(num)
self._items[item] = name
return name | 0.008 |
def getDignities(self):
""" Returns the dignities belonging to this object. """
info = self.getInfo()
dignities = [dign for (dign, objID) in info.items()
if objID == self.obj.id]
return dignities | 0.011952 |
def guess_file_compression(file_path, magic_dict=None):
"""Guesses the compression of an input file.
This function guesses the compression of a given file by checking for
a binary signature at the beginning of the file. These signatures are
stored in the :py:data:`MAGIC_DICT` dictionary. The supported compression
formats are gzip, bzip2 and zip. If none of the signatures in this
dictionary are found at the beginning of the file, it returns ``None``.
Parameters
----------
file_path : str
Path to input file.
magic_dict : dict, optional
Dictionary containing the signatures of the compression types. The
key should be the binary signature and the value should be the
compression format. If left ``None``, it falls back to
:py:data:`MAGIC_DICT`.
Returns
-------
file_type : str or None
If a compression type is detected, returns a string with the format.
If not, returns ``None``.
"""
if not magic_dict:
magic_dict = MAGIC_DICT
max_len = max(len(x) for x in magic_dict)
with open(file_path, "rb") as f:
file_start = f.read(max_len)
logger.debug("Binary signature start: {}".format(file_start))
for magic, file_type in magic_dict.items():
if file_start.startswith(magic):
return file_type
return None | 0.000724 |
def ImportContractAddr(wallet, contract_hash, pubkey_script_hash):
"""
Args:
wallet (Wallet): a UserWallet instance
contract_hash (UInt160): hash of the contract to import
pubkey_script_hash (UInt160):
Returns:
neo.SmartContract.Contract.Contract
"""
contract = Blockchain.Default().GetContract(contract_hash)
if not contract or not pubkey_script_hash:
print("Could not find contract")
return
reedeem_script = contract.Code.Script.hex()
# there has to be at least 1 param, and the first one needs to be a signature param
param_list = bytearray(b'\x00')
# if there's more than one param
# we set the first parameter to be the signature param
if len(contract.Code.ParameterList) > 1:
param_list = bytearray(contract.Code.ParameterList)
param_list[0] = 0
verification_contract = Contract.Create(reedeem_script, param_list, pubkey_script_hash)
address = verification_contract.Address
wallet.AddContract(verification_contract)
print(f"Added contract address {address} to wallet")
return verification_contract | 0.002622 |
def notify_items(**kwargs):
"""
Signal endpoint that actually sends knocks whenever an instance is created / saved
"""
instance = kwargs.get('instance')
created = kwargs.get('created', False)
if hasattr(instance, 'send_knock') and active_knocks(instance):
try:
# This is a stupid generic interface for multilanguage models (hvad / parler)
if hasattr(instance, 'get_available_languages'):
langs = instance.get_available_languages()
else:
langs = [get_language()]
for lang in langs:
with override(lang):
instance.send_knock(created)
return True
except AttributeError: # pragma: no cover
pass
return False | 0.003812 |
def create_namespaced_secret(self, namespace, body, **kwargs): # noqa: E501
"""create_namespaced_secret # noqa: E501
create a Secret # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.create_namespaced_secret(namespace, body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param V1Secret body: (required)
:param bool include_uninitialized: If true, partially initialized resources are included in the response.
:param str pretty: If 'true', then the output is pretty printed.
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:return: V1Secret
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.create_namespaced_secret_with_http_info(namespace, body, **kwargs) # noqa: E501
else:
(data) = self.create_namespaced_secret_with_http_info(namespace, body, **kwargs) # noqa: E501
return data | 0.001307 |
def _main(instaloader: Instaloader, targetlist: List[str],
username: Optional[str] = None, password: Optional[str] = None,
sessionfile: Optional[str] = None,
download_profile_pic: bool = True, download_posts=True,
download_stories: bool = False, download_highlights: bool = False, download_tagged: bool = False,
fast_update: bool = False,
max_count: Optional[int] = None, post_filter_str: Optional[str] = None,
storyitem_filter_str: Optional[str] = None) -> None:
"""Download set of profiles, hashtags etc. and handle logging in and session files if desired."""
# Parse and generate filter function
post_filter = None
if post_filter_str is not None:
post_filter = filterstr_to_filterfunc(post_filter_str, Post)
instaloader.context.log('Only download posts with property "{}".'.format(post_filter_str))
storyitem_filter = None
if storyitem_filter_str is not None:
storyitem_filter = filterstr_to_filterfunc(storyitem_filter_str, StoryItem)
instaloader.context.log('Only download storyitems with property "{}".'.format(storyitem_filter_str))
# Login, if desired
if username is not None:
try:
instaloader.load_session_from_file(username, sessionfile)
except FileNotFoundError as err:
if sessionfile is not None:
print(err, file=sys.stderr)
instaloader.context.log("Session file does not exist yet - Logging in.")
if not instaloader.context.is_logged_in or username != instaloader.test_login():
if password is not None:
try:
instaloader.login(username, password)
except TwoFactorAuthRequiredException:
while True:
try:
code = input("Enter 2FA verification code: ")
instaloader.two_factor_login(code)
break
except BadCredentialsException:
pass
else:
instaloader.interactive_login(username)
instaloader.context.log("Logged in as %s." % username)
# Try block for KeyboardInterrupt (save session on ^C)
profiles = set()
anonymous_retry_profiles = set()
try:
# Generate set of profiles, already downloading non-profile targets
for target in targetlist:
if (target.endswith('.json') or target.endswith('.json.xz')) and os.path.isfile(target):
with instaloader.context.error_catcher(target):
structure = load_structure_from_file(instaloader.context, target)
if isinstance(structure, Post):
if post_filter is not None and not post_filter(structure):
instaloader.context.log("<{} ({}) skipped>".format(structure, target), flush=True)
continue
instaloader.context.log("Downloading {} ({})".format(structure, target))
instaloader.download_post(structure, os.path.dirname(target))
elif isinstance(structure, StoryItem):
if storyitem_filter is not None and not storyitem_filter(structure):
instaloader.context.log("<{} ({}) skipped>".format(structure, target), flush=True)
continue
instaloader.context.log("Attempting to download {} ({})".format(structure, target))
instaloader.download_storyitem(structure, os.path.dirname(target))
elif isinstance(structure, Profile):
raise InvalidArgumentException("Profile JSON are ignored. Pass \"{}\" to download that profile"
.format(structure.username))
else:
raise InvalidArgumentException("{} JSON file not supported as target"
.format(structure.__class__.__name__))
continue
# strip '/' characters to be more shell-autocompletion-friendly
target = target.rstrip('/')
with instaloader.context.error_catcher(target):
if target[0] == '@':
instaloader.context.log("Retrieving followees of %s..." % target[1:])
profile = Profile.from_username(instaloader.context, target[1:])
for followee in profile.get_followees():
instaloader.save_profile_id(followee)
profiles.add(followee)
elif target[0] == '#':
instaloader.download_hashtag(hashtag=target[1:], max_count=max_count, fast_update=fast_update,
post_filter=post_filter)
elif target[0] == '-':
instaloader.download_post(Post.from_shortcode(instaloader.context, target[1:]), target)
elif target[0] == "%":
instaloader.download_location(location=target[1:], max_count=max_count, fast_update=fast_update,
post_filter=post_filter)
elif target == ":feed":
instaloader.download_feed_posts(fast_update=fast_update, max_count=max_count,
post_filter=post_filter)
elif target == ":stories":
instaloader.download_stories(fast_update=fast_update, storyitem_filter=storyitem_filter)
elif target == ":saved":
instaloader.download_saved_posts(fast_update=fast_update, max_count=max_count,
post_filter=post_filter)
else:
try:
profile = instaloader.check_profile_id(target)
if instaloader.context.is_logged_in and profile.has_blocked_viewer:
if download_profile_pic or ((download_posts or download_tagged) and not profile.is_private):
raise ProfileNotExistsException("{} blocked you; But we download her anonymously."
.format(target))
else:
instaloader.context.error("{} blocked you.".format(target))
else:
profiles.add(profile)
except ProfileNotExistsException as err:
# Not only our profile.has_blocked_viewer condition raises ProfileNotExistsException,
# check_profile_id() also does, since access to blocked profile may be responded with 404.
if instaloader.context.is_logged_in and (download_profile_pic or download_posts or
download_tagged):
instaloader.context.log(err)
instaloader.context.log("Trying again anonymously, helps in case you are just blocked.")
with instaloader.anonymous_copy() as anonymous_loader:
with instaloader.context.error_catcher():
anonymous_retry_profiles.add(anonymous_loader.check_profile_id(target))
instaloader.context.error("Warning: {} will be downloaded anonymously (\"{}\")."
.format(target, err))
else:
raise
if len(profiles) > 1:
instaloader.context.log("Downloading {} profiles: {}".format(len(profiles),
' '.join([p.username for p in profiles])))
if profiles and download_profile_pic and not instaloader.context.is_logged_in:
instaloader.context.error("Warning: Use --login to download HD version of profile pictures.")
instaloader.download_profiles(profiles,
download_profile_pic, download_posts, download_tagged, download_highlights,
download_stories, fast_update, post_filter, storyitem_filter)
if anonymous_retry_profiles:
instaloader.context.log("Downloading anonymously: {}"
.format(' '.join([p.username for p in anonymous_retry_profiles])))
with instaloader.anonymous_copy() as anonymous_loader:
anonymous_loader.download_profiles(anonymous_retry_profiles,
download_profile_pic, download_posts, download_tagged,
fast_update=fast_update, post_filter=post_filter)
except KeyboardInterrupt:
print("\nInterrupted by user.", file=sys.stderr)
# Save session if it is useful
if instaloader.context.is_logged_in:
instaloader.save_session_to_file(sessionfile)
# User might be confused if Instaloader does nothing
if not targetlist:
if instaloader.context.is_logged_in:
# Instaloader did at least save a session file
instaloader.context.log("No targets were specified, thus nothing has been downloaded.")
else:
# Instloader did not do anything
instaloader.context.log("usage:" + usage_string()) | 0.005663 |
def load_metadata(self, data_dir, feature_name=None):
"""See base class for details."""
# Restore names if defined
names_filepath = _get_names_filepath(data_dir, feature_name)
if tf.io.gfile.exists(names_filepath):
self.names = _load_names_from_file(names_filepath) | 0.006969 |
def dict_entry_has_key(line, key):
"""Return True if `line` is a dict entry that uses `key`.
Return False for multiline cases where the line should not be removed by
itself.
"""
if '#' in line:
return False
result = re.match(r'\s*(.*)\s*:\s*(.*),\s*$', line)
if not result:
return False
try:
candidate_key = ast.literal_eval(result.group(1))
except (SyntaxError, ValueError):
return False
if multiline_statement(result.group(2)):
return False
return candidate_key == key | 0.001786 |
def get_installed_daps_detailed():
'''Returns a dictionary with all installed daps and their versions and locations
First version and location in the dap's list is the one that is preferred'''
daps = {}
for loc in _data_dirs():
s = get_installed_daps(loc)
for dap in s:
if dap not in daps:
daps[dap] = []
daps[dap].append({'version': get_installed_version_of(dap, loc), 'location': loc})
return daps | 0.008421 |
def to_bytes(value, encoding='utf-8'):
"""Converts a string value to bytes, if necessary.
Unfortunately, ``six.b`` is insufficient for this task since in
Python 2 because it does not modify ``unicode`` objects.
Args:
value (Union[str, bytes]): The value to be converted.
encoding (str): The encoding to use to convert unicode to bytes.
Defaults to "utf-8".
Returns:
bytes: The original value converted to bytes (if unicode) or as
passed in if it started out as bytes.
Raises:
ValueError: If the value could not be converted to bytes.
"""
result = (value.encode(encoding)
if isinstance(value, six.text_type) else value)
if isinstance(result, six.binary_type):
return result
else:
raise ValueError('{0!r} could not be converted to bytes'.format(value)) | 0.001136 |
def build_re_pattern_from_intervals(intervals: IntervalListType) -> BuiltInReType:
"""
Convert intervals to regular expression pattern.
:param intervals: Unicode codepoint intervals.
"""
inner = [f'{chr(lb)}-{chr(ub)}' for lb, ub in intervals]
joined_inner = ''.join(inner)
pattern = f'[{joined_inner}]+'
return re.compile(pattern, re.UNICODE) | 0.005291 |
def onUserError(self, fail, message):
"""
Handle user errors
"""
self.log.error(fail)
self.log.error(message) | 0.013423 |
def _validate(self):
"""
The purpose of this method is to verify that the user has set sensible
values for the training program before rendering. The user will still
be able to render, but error messages will be printed. This method:
* Validates that the average intensity is in the range [65, 85].
* Validates that the number of repetitions is in the range [15, 45].
* Validates that 'reps_to_intensity_func' maps to [0, 100].
* Validates that 'reps_to_intensity_func' is a decreasing function.
* Validates that the exercises do not grow more than 2.5% per week.
Apart from these sanity checks, the user is on his own.
"""
# Validate the intensity
if max([s * self.intensity for s in self._intensity_scalers]) > 85:
warnings.warn('\nWARNING: Average intensity is > 85.')
if min([s * self.intensity for s in self._intensity_scalers]) < 65:
warnings.warn('\nWARNING: Average intensity is < 65.')
# Validate the repetitions
if max([s * self.reps_per_exercise for s in self._rep_scalers]) > 45:
warnings.warn('\nWARNING: Number of repetitions > 45.')
if min([s * self.reps_per_exercise for s in self._rep_scalers]) < 15:
warnings.warn('\nWARNING: Number of repetitions < 15.')
# Validate the 'reps_to_intensity_func'
for x1, x2 in zip(range(1, 20), range(2, 21)):
y1 = self.reps_to_intensity_func(x1)
y2 = self.reps_to_intensity_func(x2)
if y1 < y2:
warnings.warn("\n'reps_to_intensity_func' is not decreasing.")
if any(self.reps_to_intensity_func(x) > 100 for x in range(1, 20)):
warnings.warn("\n'reps_to_intensity_func' maps to > 100.")
if any(self.reps_to_intensity_func(x) < 0 for x in range(1, 20)):
warnings.warn("\n'reps_to_intensity_func' maps to < 0.")
# Validate the exercises
for day in self.days:
for dynamic_ex in day.dynamic_exercises:
start, end = dynamic_ex.start_weight, dynamic_ex.final_weight
percentage_growth = (end / start) ** (1 / self.duration)
percentage_growth = dynamic_ex.weekly_growth(self.duration)
if percentage_growth > 4:
msg = '\n"{}" grows with {}% each week.'.format(
dynamic_ex.name, percentage_growth)
warnings.warn(msg) | 0.001955 |
def add_field(self, name, script, lang=None, params=None, ignore_failure=False):
"""
Add a field to script_fields
"""
data = {}
if lang:
data["lang"] = lang
if script:
data['script'] = script
else:
raise ScriptFieldsError("Script is required for script_fields definition")
if params:
if isinstance(params, dict):
if len(params):
data['params'] = params
else:
raise ScriptFieldsError("Parameters should be a valid dictionary")
if ignore_failure:
data['ignore_failure'] = ignore_failure
self.fields[name] = data | 0.007003 |
def _request(self, path, key, data, method, key_is_cik, extra_headers={}):
"""Generically shared HTTP request method.
Args:
path: The API endpoint to interact with.
key: A string for the key used by the device for the API. Either a CIK or token.
data: A string for the pre-encoded data to be sent with this request.
method: A string denoting the HTTP verb to use for the request (e.g. 'GET', 'POST')
key_is_cik: Whether or not the device key used is a CIK or token.
extra_headers: A dictionary of extra headers to include with the request.
Returns:
A ProvisionResponse containing the result of the HTTP request.
"""
if method == 'GET':
if len(data) > 0:
url = path + '?' + data
else:
url = path
body = None
else:
url = path
body = data
headers = {}
if key_is_cik:
headers['X-Exosite-CIK'] = key
else:
headers['X-Exosite-Token'] = key
if method == 'POST':
headers['Content-Type'] = 'application/x-www-form-urlencoded; charset=utf-8'
headers['Accept'] = 'text/plain, text/csv, application/x-www-form-urlencoded'
headers.update(extra_headers)
body, response = self._onephttp.request(method,
url,
body,
headers)
pr = ProvisionResponse(body, response)
if self._raise_api_exceptions and not pr.isok:
raise ProvisionException(pr)
return pr | 0.004614 |
def updateState(self, slicedArray, rtiInfo, separateFields):
""" Sets the slicedArray and rtiInfo and other members. This will reset the model.
Will be called from the tableInspector._drawContents.
"""
self.beginResetModel()
try:
# The sliced array can be a masked array or a (regular) numpy array.
# The table works fine with masked arrays, no need to replace the masked values.
self._slicedArray = slicedArray
if slicedArray is None:
self._nRows = 0
self._nCols = 0
self._fieldNames = []
else:
self._nRows, self._nCols = self._slicedArray.shape
if self._slicedArray.data.dtype.names:
self._fieldNames = self._slicedArray.data.dtype.names
else:
self._fieldNames = []
self._rtiInfo = rtiInfo
self._separateFields = separateFields
# Don't put numbers in the header if the record is of structured type, fields are
# placed in separate cells and the fake dimension is selected (combo index 0)
if self._separateFields and self._fieldNames:
if self._rtiInfo['x-dim'] == FAKE_DIM_NAME:
self._separateFieldOrientation = Qt.Horizontal
self._numbersInHeader = False
elif self._rtiInfo['y-dim'] == FAKE_DIM_NAME:
self._separateFieldOrientation = Qt.Vertical
self._numbersInHeader = False
else:
self._separateFieldOrientation = Qt.Horizontal
self._numbersInHeader = True
else:
self._separateFieldOrientation = None
self._numbersInHeader = True
finally:
self.endResetModel() | 0.003686 |
def validator(self, meth):
"""
Decorator that adds *meth* to the list of validators.
Returns *meth* unchanged.
.. versionadded:: 17.1.0
"""
if self._validator is None:
self._validator = meth
else:
self._validator = and_(self._validator, meth)
return meth | 0.005814 |
def in_(self, qfield, *values):
''' Check to see that the value of ``qfield`` is one of ``values``
:param qfield: Instances of :class:`ommongo.query_expression.QueryExpression`
:param values: Values should be python values which ``qfield`` \
understands
'''
# TODO: make sure that this field represents a list
qfield = resolve_name(self.type, qfield)
self.filter(QueryExpression({ qfield : { '$in' : [qfield.wrap_value(value) for value in values]}}))
return self | 0.014519 |
def _set_residual_probability(p: np.ndarray) -> np.ndarray:
"""Turns any use of `RESIDUAL_CHOICE` into a residual probability.
Parameters
----------
p :
Array where each row is a set of probability weights and potentially
a `RESIDUAL_CHOICE` placeholder.
Returns
-------
np.ndarray
Array where each row is a set of normalized probability weights.
"""
residual_mask = p == RESIDUAL_CHOICE
if residual_mask.any(): # I.E. if we have any placeholders.
if np.any(np.sum(residual_mask, axis=1) - 1):
raise RandomnessError(
'More than one residual choice supplied for a single set of weights. Weights: {}.'.format(p))
p[residual_mask] = 0
residual_p = 1 - np.sum(p, axis=1) # Probabilities sum to 1.
if np.any(residual_p < 0): # We got un-normalized probability weights.
raise RandomnessError(
'Residual choice supplied with weights that summed to more than 1. Weights: {}.'.format(p))
p[residual_mask] = residual_p
return p | 0.002755 |
def get_metadata_parser(metadata_container, **metadata_defaults):
"""
Takes a metadata_container, which may be a type or instance of a parser, a dict, string, or file.
:return: a new instance of a parser corresponding to the standard represented by metadata_container
:see: get_parsed_content(metdata_content) for more on types of content that can be parsed
"""
parser_type = None
if isinstance(metadata_container, MetadataParser):
parser_type = type(metadata_container)
elif isinstance(metadata_container, type):
parser_type = metadata_container
metadata_container = metadata_container().update(**metadata_defaults)
xml_root, xml_tree = get_parsed_content(metadata_container)
# The get_parsed_content method ensures only these roots will be returned
parser = None
if parser_type is not None:
parser = parser_type(xml_tree, **metadata_defaults)
elif xml_root in ISO_ROOTS:
parser = IsoParser(xml_tree, **metadata_defaults)
else:
has_arcgis_data = any(element_exists(xml_tree, e) for e in ARCGIS_NODES)
if xml_root == FGDC_ROOT and not has_arcgis_data:
parser = FgdcParser(xml_tree, **metadata_defaults)
elif xml_root in ARCGIS_ROOTS:
parser = ArcGISParser(xml_tree, **metadata_defaults)
return parser | 0.003685 |
def _proc_loop(proc_id, alive, queue, fn):
"""Thread loop for generating data
Parameters
----------
proc_id: int
Process id
alive: multiprocessing.Value
variable for signaling whether process should continue or not
queue: multiprocessing.Queue
queue for passing data back
fn: function
function object that returns a sample to be pushed into the queue
"""
print("proc {} started".format(proc_id))
try:
while alive.value:
data = fn()
put_success = False
while alive.value and not put_success:
try:
queue.put(data, timeout=0.5)
put_success = True
except QFullExcept:
# print("Queue Full")
pass
except KeyboardInterrupt:
print("W: interrupt received, stopping process {} ...".format(proc_id))
print("Closing process {}".format(proc_id))
queue.close() | 0.00271 |
def add_view(self, *args, **kwargs):
"""
Add a new view
Parameters
----------
uid: string
The uid of new view
width: int
The width of this of view on a 12 unit grid
height: int
The height of the this view. The height is proportional
to the height of all the views present.
x: int
The position of this view on the grid
y: int
The position of this view on the grid
initialXDoamin: [int, int]
The initial x range of the view
initialYDomain: [int, int]
The initial y range of the view
"""
new_view = View(*args, **kwargs)
for view in self.views:
if view.uid == new_view.uid:
raise ValueError("View with this uid already exists")
self.views += [new_view]
return new_view | 0.002174 |
def _translate(self):
"""
Convert the DataFrame in `self.data` and the attrs from `_build_styles`
into a dictionary of {head, body, uuid, cellstyle}.
"""
table_styles = self.table_styles or []
caption = self.caption
ctx = self.ctx
precision = self.precision
hidden_index = self.hidden_index
hidden_columns = self.hidden_columns
uuid = self.uuid or str(uuid1()).replace("-", "_")
ROW_HEADING_CLASS = "row_heading"
COL_HEADING_CLASS = "col_heading"
INDEX_NAME_CLASS = "index_name"
DATA_CLASS = "data"
BLANK_CLASS = "blank"
BLANK_VALUE = ""
def format_attr(pair):
return "{key}={value}".format(**pair)
# for sparsifying a MultiIndex
idx_lengths = _get_level_lengths(self.index)
col_lengths = _get_level_lengths(self.columns, hidden_columns)
cell_context = dict()
n_rlvls = self.data.index.nlevels
n_clvls = self.data.columns.nlevels
rlabels = self.data.index.tolist()
clabels = self.data.columns.tolist()
if n_rlvls == 1:
rlabels = [[x] for x in rlabels]
if n_clvls == 1:
clabels = [[x] for x in clabels]
clabels = list(zip(*clabels))
cellstyle = []
head = []
for r in range(n_clvls):
# Blank for Index columns...
row_es = [{"type": "th",
"value": BLANK_VALUE,
"display_value": BLANK_VALUE,
"is_visible": not hidden_index,
"class": " ".join([BLANK_CLASS])}] * (n_rlvls - 1)
# ... except maybe the last for columns.names
name = self.data.columns.names[r]
cs = [BLANK_CLASS if name is None else INDEX_NAME_CLASS,
"level{lvl}".format(lvl=r)]
name = BLANK_VALUE if name is None else name
row_es.append({"type": "th",
"value": name,
"display_value": name,
"class": " ".join(cs),
"is_visible": not hidden_index})
if clabels:
for c, value in enumerate(clabels[r]):
cs = [COL_HEADING_CLASS, "level{lvl}".format(lvl=r),
"col{col}".format(col=c)]
cs.extend(cell_context.get(
"col_headings", {}).get(r, {}).get(c, []))
es = {
"type": "th",
"value": value,
"display_value": value,
"class": " ".join(cs),
"is_visible": _is_visible(c, r, col_lengths),
}
colspan = col_lengths.get((r, c), 0)
if colspan > 1:
es["attributes"] = [
format_attr({"key": "colspan", "value": colspan})
]
row_es.append(es)
head.append(row_es)
if (self.data.index.names and
com._any_not_none(*self.data.index.names) and
not hidden_index):
index_header_row = []
for c, name in enumerate(self.data.index.names):
cs = [INDEX_NAME_CLASS,
"level{lvl}".format(lvl=c)]
name = '' if name is None else name
index_header_row.append({"type": "th", "value": name,
"class": " ".join(cs)})
index_header_row.extend(
[{"type": "th",
"value": BLANK_VALUE,
"class": " ".join([BLANK_CLASS])
}] * (len(clabels[0]) - len(hidden_columns)))
head.append(index_header_row)
body = []
for r, idx in enumerate(self.data.index):
row_es = []
for c, value in enumerate(rlabels[r]):
rid = [ROW_HEADING_CLASS, "level{lvl}".format(lvl=c),
"row{row}".format(row=r)]
es = {
"type": "th",
"is_visible": (_is_visible(r, c, idx_lengths) and
not hidden_index),
"value": value,
"display_value": value,
"id": "_".join(rid[1:]),
"class": " ".join(rid)
}
rowspan = idx_lengths.get((c, r), 0)
if rowspan > 1:
es["attributes"] = [
format_attr({"key": "rowspan", "value": rowspan})
]
row_es.append(es)
for c, col in enumerate(self.data.columns):
cs = [DATA_CLASS, "row{row}".format(row=r),
"col{col}".format(col=c)]
cs.extend(cell_context.get("data", {}).get(r, {}).get(c, []))
formatter = self._display_funcs[(r, c)]
value = self.data.iloc[r, c]
row_dict = {"type": "td",
"value": value,
"class": " ".join(cs),
"display_value": formatter(value),
"is_visible": (c not in hidden_columns)}
# only add an id if the cell has a style
if (self.cell_ids or
not(len(ctx[r, c]) == 1 and ctx[r, c][0] == '')):
row_dict["id"] = "_".join(cs[1:])
row_es.append(row_dict)
props = []
for x in ctx[r, c]:
# have to handle empty styles like ['']
if x.count(":"):
props.append(x.split(":"))
else:
props.append(['', ''])
cellstyle.append({'props': props,
'selector': "row{row}_col{col}"
.format(row=r, col=c)})
body.append(row_es)
table_attr = self.table_attributes
use_mathjax = get_option("display.html.use_mathjax")
if not use_mathjax:
table_attr = table_attr or ''
if 'class="' in table_attr:
table_attr = table_attr.replace('class="',
'class="tex2jax_ignore ')
else:
table_attr += ' class="tex2jax_ignore"'
return dict(head=head, cellstyle=cellstyle, body=body, uuid=uuid,
precision=precision, table_styles=table_styles,
caption=caption, table_attributes=table_attr) | 0.000294 |
def from_db_value(cls, value, *_) -> Optional[LocalizedValue]:
"""Turns the specified database value into its Python
equivalent.
Arguments:
value:
The value that is stored in the database and
needs to be converted to its Python equivalent.
Returns:
A :see:LocalizedValue instance containing the
data extracted from the database.
"""
if not value:
if getattr(settings, 'LOCALIZED_FIELDS_EXPERIMENTAL', False):
return None
else:
return cls.attr_class()
# we can get a list if an aggregation expression was used..
# if we the expression was flattened when only one key was selected
# then we don't wrap each value in a localized value, otherwise we do
if isinstance(value, list):
result = []
for inner_val in value:
if isinstance(inner_val, dict):
if inner_val is None:
result.append(None)
else:
result.append(cls.attr_class(inner_val))
else:
result.append(inner_val)
return result
# this is for when you select an individual key, it will be string,
# not a dictionary, we'll give it to you as a flat value, not as a
# localized value instance
if not isinstance(value, dict):
return value
return cls.attr_class(value) | 0.001288 |
def create(self, resource, timeout=-1):
"""
Creates a scope.
Args:
resource (dict): Object to create.
timeout:
Timeout in seconds. Wait for task completion by default. The timeout does not abort the operation
in OneView, just stop waiting for its completion.
Returns:
dict: Created scope.
"""
return self._client.create(resource, timeout=timeout, default_values=self.DEFAULT_VALUES) | 0.007984 |
def visit_Slice(self, node: ast.Slice) -> slice:
"""Visit ``lower``, ``upper`` and ``step`` and recompute the node as a ``slice``."""
lower = None # type: Optional[int]
if node.lower is not None:
lower = self.visit(node=node.lower)
upper = None # type: Optional[int]
if node.upper is not None:
upper = self.visit(node=node.upper)
step = None # type: Optional[int]
if node.step is not None:
step = self.visit(node=node.step)
result = slice(lower, upper, step)
self.recomputed_values[node] = result
return result | 0.004739 |
def findall_window_ids(pattern):
"""
CommandLine:
wmctrl -l
python -m utool.util_ubuntu XCtrl.findall_window_ids gvim --src
python -m utool.util_ubuntu XCtrl.findall_window_ids gvim --src
python -m utool.util_ubuntu XCtrl.findall_window_ids joncrall --src
xprop -id
wmctrl -l | awk '{print $1}' | xprop -id
0x00a00007 | grep "WM_CLASS(STRING)"
"""
import utool as ut
cmdkw = dict(verbose=False, quiet=True, silence=True)
command = "wmctrl -lx | grep '%s' | awk '{print $1}'" % (pattern,)
# print(command)
winid_list = ut.cmd(command, **cmdkw)[0].strip().split('\n')
winid_list = [h for h in winid_list if h]
winid_list = [int(h, 16) for h in winid_list]
return winid_list | 0.002392 |
def kallisto(fastq, out_dir, cb_histogram, cb_cutoff):
''' Convert fastqtransformed file to output format compatible with
kallisto.
'''
parser_re = re.compile('(.*):CELL_(?<CB>.*):UMI_(?P<UMI>.*)\\n(.*)\\n\\+\\n(.*)\\n')
if fastq.endswith('gz'):
fastq_fh = gzip.GzipFile(fileobj=open(fastq))
elif fastq == "-":
fastq_fh = sys.stdin
else:
fastq_fh = open(fastq)
cb_depth_set = get_cb_depth_set(cb_histogram, cb_cutoff)
cb_set = set()
cb_batch = collections.defaultdict(list)
parsed = 0
for read in stream_fastq(fastq_fh):
match = parser_re.search(read).groupdict()
umi = match['UMI']
cb = match['CB']
if cb_depth_set and cb not in cb_depth_set:
continue
parsed += 1
cb_set.add(cb)
cb_batch[cb].append((read, umi))
# write in batches to avoid opening up file handles repeatedly
if not parsed % 10000000:
for cb, chunk in cb_batch.items():
write_kallisto_chunk(out_dir, cb, chunk)
cb_batch = collections.defaultdict(list)
for cb, chunk in cb_batch.items():
write_kallisto_chunk(out_dir, cb, chunk)
with open(os.path.join(out_dir, "barcodes.batch"), "w") as out_handle:
out_handle.write("#id umi-file file-1\n")
batchformat = "{cb} {cb}.umi {cb}.fq\n"
for cb in cb_set:
out_handle.write(batchformat.format(**locals())) | 0.001364 |
def _to_snippet(self, cfg_node=None, addr=None, size=None, thumb=False, jumpkind=None, base_state=None):
"""
Convert a CFGNode instance to a CodeNode object.
:param angr.analyses.CFGNode cfg_node: The CFGNode instance.
:param int addr: Address of the node. Only used when `cfg_node` is None.
:param bool thumb: Whether this is in THUMB mode or not. Only used for ARM code and when `cfg_node` is None.
:param str or None jumpkind: Jumpkind of this node.
:param SimState or None base_state: The state where BlockNode should be created from.
:return: A converted CodeNode instance.
:rtype: CodeNode
"""
if cfg_node is not None:
addr = cfg_node.addr
size = cfg_node.size
thumb = cfg_node.thumb
else:
addr = addr
size = size
thumb = thumb
if addr is None:
raise ValueError('_to_snippet(): Either cfg_node or addr must be provided.')
if self.project.is_hooked(addr) and jumpkind != 'Ijk_NoHook':
hooker = self.project._sim_procedures[addr]
size = hooker.kwargs.get('length', 0)
return HookNode(addr, size, type(hooker))
if cfg_node is not None:
return BlockNode(addr, size, thumb=thumb, bytestr=cfg_node.byte_string) # pylint: disable=no-member
else:
return self.project.factory.snippet(addr, size=size, jumpkind=jumpkind, thumb=thumb,
backup_state=base_state) | 0.005696 |
def step_it_should_fail_with(context):
'''
EXAMPLE:
...
when I run "behave ..."
then it should fail with:
"""
TEXT
"""
'''
assert context.text is not None, "ENSURE: multiline text is provided."
step_command_output_should_contain(context)
assert_that(context.command_result.returncode, is_not(equal_to(0))) | 0.002571 |
def preview(ident):
'''Preview an harvesting for a given source'''
source = get_source(ident)
cls = backends.get(current_app, source.backend)
max_items = current_app.config['HARVEST_PREVIEW_MAX_ITEMS']
backend = cls(source, dryrun=True, max_items=max_items)
return backend.harvest() | 0.003268 |
def _set_start_time(self, v, load=False):
"""
Setter method for start_time, mapped from YANG variable /keychain/key/accept_lifetime/start_time (time-format-start)
If this variable is read-only (config: false) in the
source YANG file, then _set_start_time is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_start_time() directly.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=RestrictedClassType(base_type=unicode, restriction_dict={'pattern': u'(([0-1][0-9]|2[0-3]):[0-5][0-9]:[0-5][0-9]\\|(0[1-9]|1[012])/(0[1-9]|[12][0-9]|3[01])/\\d{4})', 'length': [u'0..32']}), is_leaf=True, yang_name="start-time", rest_name="start-time", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'enter start time in the format HH:MM:SS|MM/DD/YYYY', u'cli-drop-node-name': None}}, namespace='urn:brocade.com:mgmt:brocade-keychain', defining_module='brocade-keychain', yang_type='time-format-start', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """start_time must be of a type compatible with time-format-start""",
'defined-type': "brocade-keychain:time-format-start",
'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=unicode, restriction_dict={'pattern': u'(([0-1][0-9]|2[0-3]):[0-5][0-9]:[0-5][0-9]\\|(0[1-9]|1[012])/(0[1-9]|[12][0-9]|3[01])/\\d{4})', 'length': [u'0..32']}), is_leaf=True, yang_name="start-time", rest_name="start-time", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'enter start time in the format HH:MM:SS|MM/DD/YYYY', u'cli-drop-node-name': None}}, namespace='urn:brocade.com:mgmt:brocade-keychain', defining_module='brocade-keychain', yang_type='time-format-start', is_config=True)""",
})
self.__start_time = t
if hasattr(self, '_set'):
self._set() | 0.004796 |
def default_package(self):
"""
::
GET /:login/packages
:Returns: the default package for this datacenter
:rtype: :py:class:`dict` or ``None``
Requests all the packages in this datacenter, filters for the default,
and returns the corresponding dict, if a default has been defined.
"""
packages = [pk for pk in self.packages()
if pk.get('default') == 'true']
if packages:
return packages[0]
else:
return None | 0.013913 |
def load_hdf(cls, filename, path='', name=None):
"""
A class method to load a saved StarModel from an HDF5 file.
File must have been created by a call to :func:`StarModel.save_hdf`.
:param filename:
H5 file to load.
:param path: (optional)
Path within HDF file.
:return:
:class:`StarModel` object.
"""
store = pd.HDFStore(filename)
try:
samples = store['{}/samples'.format(path)]
attrs = store.get_storer('{}/samples'.format(path)).attrs
except:
store.close()
raise
properties = attrs.properties
maxAV = attrs.maxAV
max_distance = attrs.max_distance
min_logg = attrs.min_logg
ic_type = attrs.ic_type
use_emcee = attrs.use_emcee
basename = attrs._mnest_basename
if name is None:
try:
name = attrs.name
except:
name = ''
store.close()
#ic = ic_type() don't need to initialize anymore
mod = cls(ic_type, maxAV=maxAV, max_distance=max_distance,
use_emcee=use_emcee, name=name,
**properties)
mod._samples = samples
mod._mnest_basename = basename
return mod | 0.005212 |
def load_file(self):
"""
Loads SAR format logfile in ASCII format (sarXX).
:return: ``True`` if loading and parsing of file went fine, \
``False`` if it failed (at any point)
"""
# We first split file into pieces
searchunks = self._split_file()
if searchunks:
# And then we parse pieces into meaningful data
usage = self._parse_file(searchunks)
if 'CPU' in usage:
return False
self._sarinfo = usage
del usage
return True
else:
return False | 0.0032 |
def create_continuous_query(self, name, select, database=None,
resample_opts=None):
r"""Create a continuous query for a database.
:param name: the name of continuous query to create
:type name: str
:param select: select statement for the continuous query
:type select: str
:param database: the database for which the continuous query is
created. Defaults to current client's database
:type database: str
:param resample_opts: resample options
:type resample_opts: str
:Example:
::
>> select_clause = 'SELECT mean("value") INTO "cpu_mean" ' \
... 'FROM "cpu" GROUP BY time(1m)'
>> client.create_continuous_query(
... 'cpu_mean', select_clause, 'db_name', 'EVERY 10s FOR 2m'
... )
>> client.get_list_continuous_queries()
[
{
'db_name': [
{
'name': 'cpu_mean',
'query': 'CREATE CONTINUOUS QUERY "cpu_mean" '
'ON "db_name" '
'RESAMPLE EVERY 10s FOR 2m '
'BEGIN SELECT mean("value") '
'INTO "cpu_mean" FROM "cpu" '
'GROUP BY time(1m) END'
}
]
}
]
"""
query_string = (
"CREATE CONTINUOUS QUERY {0} ON {1}{2} BEGIN {3} END"
).format(quote_ident(name), quote_ident(database or self._database),
' RESAMPLE ' + resample_opts if resample_opts else '', select)
self.query(query_string) | 0.001629 |
def _add_attribute_values(self, value, att_mappings, indices):
"""Add an attribute value to the given vertices.
:param int value: Attribute value.
:param dict att_mappings: Dictionary of mappings between vertices and enumerated attributes.
:param list indices: Indices of the vertices.
"""
for i in indices:
att_mappings[i].append(value) | 0.007538 |
def encrypt(self):
"""
We perform no encryption, we just encode the value as base64 and then
decode it in decrypt().
"""
value = self.parameters.get("Plaintext")
if isinstance(value, six.text_type):
value = value.encode('utf-8')
return json.dumps({"CiphertextBlob": base64.b64encode(value).decode("utf-8"), 'KeyId': 'key_id'}) | 0.007614 |
def _reatach(self):
"""Reinsert the hidden items."""
for item, p, idx in self._detached:
# The item may have been deleted.
if self.treeview.exists(item) and self.treeview.exists(p):
self.treeview.move(item, p, idx)
self._detached = [] | 0.006711 |
def _unique_coords(self, coords_in, magmoms_in=None, lattice=None):
"""
Generate unique coordinates using coord and symmetry positions
and also their corresponding magnetic moments, if supplied.
"""
coords = []
if magmoms_in:
magmoms = []
if len(magmoms_in) != len(coords_in):
raise ValueError
for tmp_coord, tmp_magmom in zip(coords_in, magmoms_in):
for op in self.symmetry_operations:
coord = op.operate(tmp_coord)
coord = np.array([i - math.floor(i) for i in coord])
if isinstance(op, MagSymmOp):
# Up to this point, magmoms have been defined relative
# to crystal axis. Now convert to Cartesian and into
# a Magmom object.
magmom = Magmom.from_moment_relative_to_crystal_axes(
op.operate_magmom(tmp_magmom),
lattice=lattice
)
else:
magmom = Magmom(tmp_magmom)
if not in_coord_list_pbc(coords, coord,
atol=self._site_tolerance):
coords.append(coord)
magmoms.append(magmom)
return coords, magmoms
else:
for tmp_coord in coords_in:
for op in self.symmetry_operations:
coord = op.operate(tmp_coord)
coord = np.array([i - math.floor(i) for i in coord])
if not in_coord_list_pbc(coords, coord,
atol=self._site_tolerance):
coords.append(coord)
return coords, [Magmom(0)] * len(coords) | 0.001064 |
def _scroll_up(self, cli):
" Scroll window up. "
info = self.render_info
if info.vertical_scroll > 0:
# TODO: not entirely correct yet in case of line wrapping and long lines.
if info.cursor_position.y >= info.window_height - 1 - info.configured_scroll_offsets.bottom:
self.content.move_cursor_up(cli)
self.vertical_scroll -= 1 | 0.009877 |
def prepare_to_run_task(context, claim_task):
"""Given a `claim_task` json dict, prepare the `context` and `work_dir`.
Set `context.claim_task`, and write a `work_dir/current_task_info.json`
Args:
context (scriptworker.context.Context): the scriptworker context.
claim_task (dict): the claim_task dict.
Returns:
dict: the contents of `current_task_info.json`
"""
current_task_info = {}
context.claim_task = claim_task
current_task_info['taskId'] = get_task_id(claim_task)
current_task_info['runId'] = get_run_id(claim_task)
log.info("Going to run taskId {taskId} runId {runId}!".format(
**current_task_info
))
context.write_json(
os.path.join(context.config['work_dir'], 'current_task_info.json'),
current_task_info, "Writing current task info to {path}..."
)
return current_task_info | 0.001121 |
def plot_transit_model(self, show=True, fold=None, ax=None):
'''
Plot the light curve de-trended with a join instrumental + transit
model with the best fit transit model overlaid. The transit model
should be specified using the :py:obj:`transit_model` attribute
and should be an instance or list of instances of
:py:class:`everest.transit.TransitModel`.
:param bool show: Show the plot, or return the `fig, ax` instances? \
Default `True`
:param str fold: The name of the planet/transit model on which to \
fold. If only one model is present, can be set to \
:py:obj:`True`. Default :py:obj:`False` \
(does not fold the data).
:param ax: A `matplotlib` axis instance to use for plotting. \
Default :py:obj:`None`
'''
if self.transit_model is None:
raise ValueError("No transit model provided!")
if self.transit_depth is None:
self.compute()
if fold is not None:
if (fold is True and len(self.transit_model) > 1) or \
(type(fold) is not str):
raise Exception(
"Kwarg `fold` should be the name of the transit " +
"model on which to fold the data.")
if fold is True:
# We are folding on the first index of `self.transit_model`
fold = 0
elif type(fold) is str:
# Figure out the index of the transit model on which to fold
fold = np.argmax(
[fold == tm.name for tm in self.transit_model])
log.info('Plotting the transit model folded ' +
'on transit model index %d...' % fold)
else:
log.info('Plotting the transit model...')
# Set up axes
if ax is None:
if fold is not None:
fig, ax = pl.subplots(1, figsize=(8, 5))
else:
fig, ax = pl.subplots(1, figsize=(13, 6))
fig.canvas.set_window_title('EVEREST Light curve')
else:
fig = pl.gcf()
# Set up some stuff
if self.cadence == 'sc':
ms = 2
else:
ms = 4
# Fold?
if fold is not None:
times = self.transit_model[fold].params.get('times', None)
if times is not None:
time = self.time - \
[times[np.argmin(np.abs(ti - times))] for ti in self.time]
t0 = times[0]
else:
t0 = self.transit_model[fold].params.get('t0', 0.)
period = self.transit_model[fold].params.get('per', 10.)
time = (self.time - t0 - period / 2.) % period - period / 2.
dur = 0.01 * \
len(np.where(self.transit_model[fold](
np.linspace(t0 - 0.5, t0 + 0.5, 100)) < 0)[0])
else:
time = self.time
ax.plot(self.apply_mask(time), self.apply_mask(self.flux),
ls='none', marker='.', color='k', markersize=ms, alpha=0.5)
ax.plot(time[self.outmask], self.flux[self.outmask],
ls='none', marker='.', color='k', markersize=ms, alpha=0.5)
ax.plot(time[self.transitmask], self.flux[self.transitmask],
ls='none', marker='.', color='k', markersize=ms, alpha=0.5)
# Plot the transit + GP model
med = np.nanmedian(self.apply_mask(self.flux))
transit_model = \
med * np.sum([depth * tm(self.time)
for tm, depth in zip(self.transit_model,
self.transit_depth)], axis=0)
gp = GP(self.kernel, self.kernel_params, white=False)
gp.compute(self.apply_mask(self.time), self.apply_mask(self.fraw_err))
y, _ = gp.predict(self.apply_mask(
self.flux - transit_model) - med, self.time)
if fold is not None:
flux = (self.flux - y) / med
ax.plot(self.apply_mask(time), self.apply_mask(flux),
ls='none', marker='.', color='k', markersize=ms, alpha=0.5)
ax.plot(time[self.outmask], flux[self.outmask], ls='none',
marker='.', color='k', markersize=ms, alpha=0.5)
ax.plot(time[self.transitmask], flux[self.transitmask],
ls='none', marker='.', color='k', markersize=ms, alpha=0.5)
hires_time = np.linspace(-5 * dur, 5 * dur, 1000)
hires_transit_model = 1 + \
self.transit_depth[fold] * \
self.transit_model[fold](hires_time + t0)
ax.plot(hires_time, hires_transit_model, 'r-', lw=1, alpha=1)
else:
flux = self.flux
y += med
y += transit_model
ax.plot(time, y, 'r-', lw=1, alpha=1)
# Plot the bad data points
bnmask = np.array(
list(set(np.concatenate([self.badmask, self.nanmask]))), dtype=int)
bmask = [i for i in self.badmask if i not in self.nanmask]
ax.plot(time[bmask], flux[bmask], 'r.', markersize=ms, alpha=0.25)
# Appearance
ax.set_ylabel('EVEREST Flux', fontsize=18)
ax.margins(0.01, 0.1)
if fold is not None:
ax.set_xlabel('Time From Transit Center (days)', fontsize=18)
ax.set_xlim(-3 * dur, 3 * dur)
else:
ax.set_xlabel('Time (%s)' % self._mission.TIMEUNITS, fontsize=18)
for brkpt in self.breakpoints[:-1]:
ax.axvline(time[brkpt], color='r', ls='--', alpha=0.25)
ax.get_yaxis().set_major_formatter(Formatter.Flux)
# Get y lims that bound most of the flux
if fold is not None:
lo = np.min(hires_transit_model)
pad = 1.5 * (1 - lo)
ylim = (lo - pad, 1 + pad)
else:
f = np.delete(flux, bnmask)
N = int(0.995 * len(f))
hi, lo = f[np.argsort(f)][[N, -N]]
pad = (hi - lo) * 0.1
ylim = (lo - pad, hi + pad)
ax.set_ylim(ylim)
# Indicate off-axis outliers
for i in np.where(flux < ylim[0])[0]:
if i in bmask:
color = "#ffcccc"
else:
color = "#ccccff"
ax.annotate('', xy=(time[i], ylim[0]), xycoords='data',
xytext=(0, 15), textcoords='offset points',
arrowprops=dict(arrowstyle="-|>", color=color,
alpha=0.5))
for i in np.where(flux > ylim[1])[0]:
if i in bmask:
color = "#ffcccc"
else:
color = "#ccccff"
ax.annotate('', xy=(time[i], ylim[1]), xycoords='data',
xytext=(0, -15), textcoords='offset points',
arrowprops=dict(arrowstyle="-|>", color=color,
alpha=0.5))
if show:
pl.show()
pl.close()
else:
return fig, ax | 0.00028 |
def display_breakdown(scores, outfile=None):
"""Writes the point breakdown to `outfile` given a dictionary of scores.
`outfile` should be a string. If `outfile` is None, write to stdout.
RETURNS:
dict; 'Total' -> finalized score (float)
"""
total = 0
outfile = open(outfile, 'w') if outfile else sys.stdout
format.print_line('-')
print('Point breakdown', file=outfile)
for name, (score, max_score) in scores.items():
print(' {}: {}/{}'.format(name, score, max_score), file=outfile)
total += score
print(file=outfile)
print('Score:', file=outfile)
print(' Total: {}'.format(total), file=outfile)
return {'Total': total} | 0.001429 |
def search_vip_request(self, search):
"""
Method to list vip request
param search: search
"""
uri = 'api/v3/vip-request/?%s' % urllib.urlencode({'search': search})
return super(ApiVipRequest, self).get(uri) | 0.007813 |
def _FormatMessage(self, event):
"""Formats the message.
Args:
event (EventObject): event.
Returns:
str: message field.
Raises:
NoFormatterFound: if no event formatter can be found to match the data
type in the event.
"""
message, _ = self._output_mediator.GetFormattedMessages(event)
if message is None:
data_type = getattr(event, 'data_type', 'UNKNOWN')
raise errors.NoFormatterFound(
'Unable to find event formatter for: {0:s}.'.format(data_type))
return message | 0.005445 |
def _update_simulation_end_from_lsm(self):
"""
Update simulation end time from LSM
"""
te = self.l2g.xd.lsm.datetime[-1]
simulation_end = te.replace(tzinfo=utc) \
.astimezone(tz=self.tz) \
.replace(tzinfo=None)
if self.simulation_end is None:
self.simulation_end = simulation_end
elif self.simulation_end > simulation_end:
self.simulation_end = simulation_end
self._update_card("END_TIME",
self.simulation_end
.strftime("%Y %m %d %H %M")) | 0.00314 |
def stepper_request_library_version(self):
"""
Request the stepper library version from the Arduino.
To retrieve the version after this command is called, call
get_stepper_version
"""
data = [self.STEPPER_LIBRARY_VERSION]
self._command_handler.send_sysex(self._command_handler.STEPPER_DATA, data) | 0.008523 |
def get_complexity_factor(self, output='average', temp=300, doping_levels=False,
Lambda=0.5):
"""
Fermi surface complexity factor respect to calculated as explained in Ref.
Gibbs, Z. M. et al., Effective mass and fermi surface complexity factor
from ab initio band structure calculations.
npj Computational Materials 3, 8 (2017).
Args:
output: 'average' returns the complexity factor calculated using the average
of the three diagonal components of the seebeck and conductivity tensors.
'tensor' returns the complexity factor respect to the three
diagonal components of seebeck and conductivity tensors.
doping_levels: False means that the complexity factor is calculated
for every value of the chemical potential
True means that the complexity factor is calculated
for every value of the doping levels for both n and p types
temp: temperature of calculated seebeck and conductivity.
Lambda: fitting parameter used to model the scattering (0.5 means constant
relaxation time).
Returns:
a list of values for the complexity factor w.r.t the chemical potential,
if doping_levels is set at False;
a dict with n an p keys that contain a list of values for the complexity factor
w.r.t the doping levels, if doping_levels is set at True;
if 'tensor' is selected, each element of the lists is a list containing
the three components of the complexity factor.
"""
if doping_levels:
cmplx_fact = {}
for dt in ('n','p'):
sbk_mass = self.get_seebeck_eff_mass(output, temp, True, Lambda)[dt]
cond_mass = self.get_average_eff_mass(output=output, doping_levels=True)[dt][temp]
if output == 'average':
cmplx_fact[dt] = [ (m_s/abs(m_c))**1.5 for m_s,m_c in zip(sbk_mass,cond_mass)]
elif output == 'tensor':
cmplx_fact[dt] = []
for i in range(len(sbk_mass)):
cmplx_fact[dt].append([])
for j in range(3):
cmplx_fact[dt][-1].append((sbk_mass[i][j]/abs(cond_mass[i][j][j]))**1.5)
else:
sbk_mass = self.get_seebeck_eff_mass(output, temp, False, Lambda)
cond_mass = self.get_average_eff_mass(output=output, doping_levels=False)[temp]
if output == 'average':
cmplx_fact = [ (m_s/abs(m_c))**1.5 for m_s,m_c in zip(sbk_mass,cond_mass)]
elif output == 'tensor':
cmplx_fact = []
for i in range(len(sbk_mass)):
cmplx_fact.append([])
for j in range(3):
cmplx_fact[-1].append((sbk_mass[i][j]/abs(cond_mass[i][j][j]))**1.5)
return cmplx_fact | 0.011339 |
def _update_secrets(self):
'''update secrets will take a secrets credential file
either located at .sregistry or the environment variable
SREGISTRY_CLIENT_SECRETS and update the current client
secrets as well as the associated API base.
'''
self.secrets = read_client_secrets()
if self.secrets is not None:
if "registry" in self.secrets:
if "base" in self.secrets['registry']:
self.base = self.secrets['registry']['base']
self._update_base() | 0.0053 |
def get_home(self, home_id=None):
"""
Get the data about a home
"""
now = datetime.datetime.utcnow()
if self.home and now < self.home_refresh_at:
return self.home
if not self._do_auth():
raise RuntimeError("Unable to login")
if home_id is None:
home_id = self.home_id
url = self.api_base_url + "Home/GetHomeById"
params = {
"homeId": home_id
}
headers = {
"Accept": "application/json",
'Authorization':
'bearer ' + self.login_data['token']['accessToken']
}
response = requests.get(
url, params=params, headers=headers, timeout=10)
if response.status_code != 200:
raise RuntimeError(
"{} response code when getting home".format(
response.status_code))
home = response.json()
if self.cache_home:
self.home = home
self.home_refresh_at = (datetime.datetime.utcnow()
+ datetime.timedelta(minutes=5))
return home | 0.001724 |
def set_base_step_parameters(self, filename, bp_step, parameters='all', step_range=True, helical=False):
"""To read and store base-step (Shift, Slide, Rise, Tilt, Roll and Twist) and
helical base-step (X-disp, Y-disp, h-Rise, Inclination, Tip and h-Twist) parameters from an input file
Parameters
----------
filename : str
Input file, which is generated from do_x3dna. e.g. L-BPS_g.dat or L-BPH_g.dat.
bp_step : 1D list or array
base-steps to analyze
Example: ::
bp_step = [6] # step_range = False
bp_step = [4,15] # step_range = True
bp_step = range(4,15) # step_range = False
bp_step = np.arange(4,15) # step_range = False
bp_step = [2,5,6,7,9,12,18] # step_range = False
parameters : str or 1D list
Either ``All`` to extract all parameter available in input file
or list of either base-step or helical base-step parameter as follows:
If helical = ``False``:
* ``shift``
* ``slide``
* ``rise``
* ``tilt``
* ``roll``
* ``twist``
If helical = ``True``:
* ``x-disp``
* ``y-disp``
* ``h-rise``
* ``inclination``
* ``tip``
* ``h-twist``
step_range : bool
``Dfault=True``: As shown above, if ``True``, bp_step is taken as a range otherwise list or numpy array.
helical : bool
If ``True``, parameters in input file will be considered as helical base-steps parameters
If ``False``, parameters will be considered as base-steps parameters.
"""
if not (isinstance(bp_step, list) or isinstance(bp_step, np.ndarray)):
raise AssertionError(
"type %s is not list or np.ndarray" % type(bp_step))
if not (isinstance(parameters, list) or isinstance(parameters, np.ndarray)):
if parameters == 'all' and not helical:
parameters = ['shift', 'slide', 'rise', 'tilt', 'roll', 'twist']
elif parameters == 'all' and helical:
parameters = ['x-disp', 'y-disp', 'h-rise', 'inclination', 'tip', 'h-twist']
else:
raise ValueError(" ERROR: {0} is not accepted parameters!!! It should be either \"all\" or list of parameter names.".format(parameters) )
targetParameters = None
targetParametersReverse = None
if helical:
targetParameters = { 1:'x-disp', 2:'y-disp', 3:'h-rise', 4:'inclination', 5:'tip', 6:'h-twist' }
else:
targetParameters = { 1:'shift', 2:'slide', 3:'rise', 4:'tilt', 5:'roll', 6:'twist' }
targetParametersReverse = dict((v,k) for k,v in targetParameters.items())
# Check if requested parameters found within input file
gotParametersInputFile = checkParametersInputFile(filename)
if gotParametersInputFile is None:
raise IOError(' Something wrong in input file {0}.\n Cannot read parameters.\n File should be an output from do_x3dna.'.format(filename))
for parameter in parameters:
if parameter not in gotParametersInputFile:
raise ValueError(' Parameter {0} not found in input file. \n This file contains following parameters: \n {1}'.format(parameter, gotParametersInputFile))
InputParamIndex = []
for parameter in parameters:
if parameter in targetParameters.values():
InputParamIndex.append( targetParametersReverse[parameter] )
else:
print('\nWARNING: base pair parameters \"{0}\" not accepted. Skipping it !!\n' .format(parameter))
if not InputParamIndex:
raise ValueError("No acceptable base-pair parameters found!!!")
data, time = read_param_file(filename, InputParamIndex, bp_step, step_range, startBP=self.startBP)
self._set_time(time)
bpIndex, OutParamIndex = get_idx_of_bp_parameters(bp_step, InputParamIndex, step_range, startBP=self.startBP)
for i in range(len(data)):
for j in range(len(data[i])):
bp_num = str( bpIndex[i]+self.startBP )
param = targetParameters[OutParamIndex[j]+1]
self._set_data(data[i][j], 'bps', bp_num, param, scaleoffset=2) | 0.010855 |
def check_battery(self):
"""
Implement how we will check battery condition. Now it just trying to check standard battery
in /sys
"""
self.charging = False if \
subprocess.getoutput("cat /sys/class/power_supply/BAT0/status") == 'Discharging' \
else True
percent = subprocess.getoutput("cat /sys/class/power_supply/BAT0/capacity")
if not self.charging:
for val in self.dischlist:
if int(percent) <= int(val):
self.indicator.set_icon(self.dischformat.format(value=val))
break
else:
for val in self.chlist:
if int(percent) <= int(val):
self.indicator.set_icon(self.chformat.format(value=val))
break
return True | 0.005967 |
def compute_features(self):
"""Actual implementation of the features.
Returns
-------
tonnetz: np.array(N, F)
The features, each row representing a feature vector for a give
time frame/beat.
"""
pcp = PCP(self.file_struct, self.feat_type, self.sr, self.hop_length,
self.n_bins, self.norm, self.f_min, self.n_octaves).features
tonnetz = librosa.feature.tonnetz(chroma=pcp.T).T
return tonnetz | 0.004024 |
def compile(self,
container: Container,
verbose: bool = False
) -> CompilationOutcome:
"""
Attempts to compile the program inside a given container.
Params:
verbose: specifies whether to print the stdout and stderr produced
by the compilation command to the stdout. If `True`, then the
stdout and stderr will be printed.
Returns:
a summary of the outcome of the compilation attempt.
"""
# TODO use container name
bug = self.__installation.bugs[container.bug]
return bug.compiler.compile(self, container, verbose=verbose) | 0.007246 |
def _wiki_articles(shard_id, wikis_dir=None):
"""Generates WikipediaArticles from GCS that are part of shard shard_id."""
if not wikis_dir:
wikis_dir = WIKI_CONTENT_DIR
with tf.Graph().as_default():
dataset = tf.data.TFRecordDataset(
cc_utils.readahead(
os.path.join(wikis_dir, WIKI_CONTENT_FILE % shard_id)),
buffer_size=16 * 1000 * 1000)
def _parse_example(ex_ser):
"""Parse serialized Example containing Wikipedia article content."""
features = {
"url": tf.VarLenFeature(tf.string),
"title": tf.VarLenFeature(tf.string),
"section_titles": tf.VarLenFeature(tf.string),
"section_texts": tf.VarLenFeature(tf.string),
}
ex = tf.parse_single_example(ex_ser, features)
for k in ex.keys():
ex[k] = ex[k].values
ex["url"] = ex["url"][0]
ex["title"] = ex["title"][0]
return ex
dataset = dataset.map(_parse_example, num_parallel_calls=32)
dataset = dataset.prefetch(100)
record_it = dataset.make_one_shot_iterator().get_next()
with tf.Session() as sess:
while True:
try:
ex = sess.run(record_it)
except tf.errors.OutOfRangeError:
break
sections = [
WikipediaSection(title=text_encoder.to_unicode(title),
text=text_encoder.to_unicode(text))
for title, text in zip(ex["section_titles"], ex["section_texts"])
]
yield WikipediaArticle(
url=text_encoder.to_unicode(ex["url"]),
title=text_encoder.to_unicode(ex["title"]),
sections=sections) | 0.008516 |
def filter(self, filter_arguments):
"""
Takes a dictionary of filter parameters.
Return a list of objects based on a list of parameters.
"""
results = self._get_content()
# Filter based on a dictionary of search parameters
if isinstance(filter_arguments, dict):
for item, content in iteritems(self._get_content()):
for key, value in iteritems(filter_arguments):
keys = key.split('.')
value = filter_arguments[key]
if not self._contains_value({item: content}, keys, value):
del results[item]
# Filter based on an input string that should match database key
if isinstance(filter_arguments, str):
if filter_arguments in results:
return [{filter_arguments: results[filter_arguments]}]
else:
return []
return results | 0.002077 |
def fmt_confusion_matrix(hyps: Sequence[Sequence[str]],
refs: Sequence[Sequence[str]],
label_set: Set[str] = None,
max_width: int = 25) -> str:
""" Formats a confusion matrix over substitutions, ignoring insertions
and deletions. """
if not label_set:
# Then determine the label set by reading
raise NotImplementedError()
alignments = [min_edit_distance_align(ref, hyp)
for hyp, ref in zip(hyps, refs)]
arrow_counter = Counter() # type: Dict[Tuple[str, str], int]
for alignment in alignments:
arrow_counter.update(alignment)
ref_total = Counter() # type: Dict[str, int]
for alignment in alignments:
ref_total.update([arrow[0] for arrow in alignment])
labels = [label for label, count
in sorted(ref_total.items(), key=lambda x: x[1], reverse=True)
if label != ""][:max_width]
format_pieces = []
fmt = "{:3} "*(len(labels)+1)
format_pieces.append(fmt.format(" ", *labels))
fmt = "{:3} " + ("{:<3} " * (len(labels)))
for ref in labels:
# TODO
ref_results = [arrow_counter[(ref, hyp)] for hyp in labels]
format_pieces.append(fmt.format(ref, *ref_results))
return "\n".join(format_pieces) | 0.002261 |
def remove_dhcp_server(self, server):
"""Removes the DHCP server settings
in server of type :class:`IDHCPServer`
DHCP server settings to be removed
raises :class:`OleErrorInvalidarg`
Host network interface @a name already exists.
"""
if not isinstance(server, IDHCPServer):
raise TypeError("server can only be an instance of type IDHCPServer")
self._call("removeDHCPServer",
in_p=[server]) | 0.009901 |
def query_all_issues(after):
"""Hits the github API for all closed issues after the given date, returns the data."""
page = count(1)
data = []
while True:
page_data = query_issues(next(page), after)
if not page_data:
break
data.extend(page_data)
return data | 0.00639 |
def item_enclosure_mime_type(self, item):
"""
Guess the enclosure's mimetype.
Note: this method is only called if item_enclosure_url
has returned something.
"""
mime_type, encoding = guess_type(self.cached_enclosure_url)
if mime_type:
return mime_type
return 'image/jpeg' | 0.005764 |
def make_json_response(rv):
""" Make JsonResponse
:param rv: Response: the object to encode, or tuple (response, status, headers)
:type rv: tuple|*
:rtype: JsonResponse
"""
# Tuple of (response, status, headers)
rv, status, headers = normalize_response_value(rv)
# JsonResponse
if isinstance(rv, JsonResponse):
return rv
# Data
return JsonResponse(rv, status, headers) | 0.004739 |
def _frozen_pool_single_run(kwargs):
"""Single run wrapper for the frozen pool, makes a single run and passes kwargs"""
idx = kwargs.pop('idx')
frozen_kwargs = _frozen_pool_single_run.kwargs
frozen_kwargs.update(kwargs) # in case of `run_map`
# we need to update job's args and kwargs
traj = frozen_kwargs['traj']
traj.f_set_crun(idx)
return _sigint_handling_single_run(frozen_kwargs) | 0.004796 |
def create_user(self, params):
"""Register a new user account."""
receivers = create_user.send(
sender=__name__,
request=this.request,
params=params,
)
if len(receivers) == 0:
raise NotImplementedError(
'Handler for `create_user` not registered.'
)
user = receivers[0][1]
user = auth.authenticate(
username=user.get_username(), password=params['password'],
)
self.do_login(user)
return get_user_token(
user=user, purpose=HashPurpose.RESUME_LOGIN,
minutes_valid=HASH_MINUTES_VALID[HashPurpose.RESUME_LOGIN],
) | 0.002857 |
def login(request):
"""View to check the persona assertion and remember the user"""
email = verify_login(request)
request.response.headers.extend(remember(request, email))
return {'redirect': request.POST.get('came_from', '/'), 'success': True} | 0.003846 |
def log_url (self, url_data):
"""Write csv formatted url check info."""
row = []
if self.has_part("urlname"):
row.append(url_data.base_url)
if self.has_part("parentname"):
row.append(url_data.parent_url)
if self.has_part("baseref"):
row.append(url_data.base_ref)
if self.has_part("result"):
row.append(url_data.result)
if self.has_part("warningstring"):
row.append(self.linesep.join(x[1] for x in url_data.warnings))
if self.has_part("infostring"):
row.append(self.linesep.join(url_data.info))
if self.has_part("valid"):
row.append(url_data.valid)
if self.has_part("url"):
row.append(url_data.url)
if self.has_part("line"):
row.append(url_data.line)
if self.has_part("column"):
row.append(url_data.column)
if self.has_part("name"):
row.append(url_data.name)
if self.has_part("dltime"):
row.append(url_data.dltime)
if self.has_part("dlsize"):
row.append(url_data.size)
if self.has_part("checktime"):
row.append(url_data.checktime)
if self.has_part("cached"):
row.append(0)
if self.has_part("level"):
row.append(url_data.level)
if self.has_part("modified"):
row.append(self.format_modified(url_data.modified))
self.writerow(map(strformat.unicode_safe, row))
self.flush() | 0.001948 |
def get_qualifier_id(self):
"""Gets the ``Qualifier Id`` for this authorization.
return: (osid.id.Id) - the qualifier ``Id``
*compliance: mandatory -- This method must be implemented.*
"""
# Implemented from template for osid.learning.Activity.get_objective_id
if not bool(self._my_map['qualifierId']):
raise errors.IllegalState('qualifier empty')
return Id(self._my_map['qualifierId']) | 0.004386 |
def _should_sign_response_header(header_name):
"""
:type header_name: str
:rtype: bool
"""
if header_name == _HEADER_SERVER_SIGNATURE:
return False
if re.match(_PATTERN_HEADER_PREFIX_BUNQ, header_name):
return True
return False | 0.003636 |
def from_string(cls, value):
"""Return single instance parsed from given accept header string."""
match = cls.pattern.search(value)
if match is None:
raise ValueError('"%s" is not a valid media type' % value)
try:
return cls(match.group('mime_type'), float(match.group('weight') or 1))
except ValueError:
return cls(value) | 0.007538 |
async def run_with_interrupt(task, *events, loop=None):
"""
Awaits a task while allowing it to be interrupted by one or more
`asyncio.Event`s.
If the task finishes without the events becoming set, the results of the
task will be returned. If the event become set, the task will be cancelled
``None`` will be returned.
:param task: Task to run
:param events: One or more `asyncio.Event`s which, if set, will interrupt
`task` and cause it to be cancelled.
:param loop: Optional event loop to use other than the default.
"""
loop = loop or asyncio.get_event_loop()
task = asyncio.ensure_future(task, loop=loop)
event_tasks = [loop.create_task(event.wait()) for event in events]
done, pending = await asyncio.wait([task] + event_tasks,
loop=loop,
return_when=asyncio.FIRST_COMPLETED)
for f in pending:
f.cancel() # cancel unfinished tasks
for f in done:
f.exception() # prevent "exception was not retrieved" errors
if task in done:
return task.result() # may raise exception
else:
return None | 0.000845 |
def wait_for_instance(
vm_=None,
data=None,
ip_address=None,
display_ssh_output=True,
call=None,
):
'''
Wait for an instance upon creation from the EC2 API, to become available
'''
if call == 'function':
# Technically this function may be called other ways too, but it
# definitely cannot be called with --function.
raise SaltCloudSystemExit(
'The wait_for_instance action must be called with -a or --action.'
)
if vm_ is None:
vm_ = {}
if data is None:
data = {}
ssh_gateway_config = vm_.get(
'gateway', get_ssh_gateway_config(vm_)
)
__utils__['cloud.fire_event'](
'event',
'waiting for ssh',
'salt/cloud/{0}/waiting_for_ssh'.format(vm_['name']),
args={'ip_address': ip_address},
sock_dir=__opts__['sock_dir'],
transport=__opts__['transport']
)
ssh_connect_timeout = config.get_cloud_config_value(
'ssh_connect_timeout', vm_, __opts__, 900 # 15 minutes
)
ssh_port = config.get_cloud_config_value(
'ssh_port', vm_, __opts__, 22
)
if config.get_cloud_config_value('win_installer', vm_, __opts__):
username = config.get_cloud_config_value(
'win_username', vm_, __opts__, default='Administrator'
)
win_passwd = config.get_cloud_config_value(
'win_password', vm_, __opts__, default=''
)
win_deploy_auth_retries = config.get_cloud_config_value(
'win_deploy_auth_retries', vm_, __opts__, default=10
)
win_deploy_auth_retry_delay = config.get_cloud_config_value(
'win_deploy_auth_retry_delay', vm_, __opts__, default=1
)
use_winrm = config.get_cloud_config_value(
'use_winrm', vm_, __opts__, default=False
)
winrm_verify_ssl = config.get_cloud_config_value(
'winrm_verify_ssl', vm_, __opts__, default=True
)
if win_passwd and win_passwd == 'auto':
log.debug('Waiting for auto-generated Windows EC2 password')
while True:
password_data = get_password_data(
name=vm_['name'],
kwargs={
'key_file': vm_['private_key'],
},
call='action',
)
win_passwd = password_data.get('password', None)
if win_passwd is None:
log.debug(password_data)
# This wait is so high, because the password is unlikely to
# be generated for at least 4 minutes
time.sleep(60)
else:
logging_data = password_data
logging_data['password'] = 'XXX-REDACTED-XXX'
logging_data['passwordData'] = 'XXX-REDACTED-XXX'
log.debug(logging_data)
vm_['win_password'] = win_passwd
break
# SMB used whether psexec or winrm
if not salt.utils.cloud.wait_for_port(ip_address,
port=445,
timeout=ssh_connect_timeout):
raise SaltCloudSystemExit(
'Failed to connect to remote windows host'
)
# If not using winrm keep same psexec behavior
if not use_winrm:
log.debug('Trying to authenticate via SMB using psexec')
if not salt.utils.cloud.validate_windows_cred(ip_address,
username,
win_passwd,
retries=win_deploy_auth_retries,
retry_delay=win_deploy_auth_retry_delay):
raise SaltCloudSystemExit(
'Failed to authenticate against remote windows host (smb)'
)
# If using winrm
else:
# Default HTTPS port can be changed in cloud configuration
winrm_port = config.get_cloud_config_value(
'winrm_port', vm_, __opts__, default=5986
)
# Wait for winrm port to be available
if not salt.utils.cloud.wait_for_port(ip_address,
port=winrm_port,
timeout=ssh_connect_timeout):
raise SaltCloudSystemExit(
'Failed to connect to remote windows host (winrm)'
)
log.debug('Trying to authenticate via Winrm using pywinrm')
if not salt.utils.cloud.wait_for_winrm(ip_address,
winrm_port,
username,
win_passwd,
timeout=ssh_connect_timeout,
verify=winrm_verify_ssl):
raise SaltCloudSystemExit(
'Failed to authenticate against remote windows host'
)
elif salt.utils.cloud.wait_for_port(ip_address,
port=ssh_port,
timeout=ssh_connect_timeout,
gateway=ssh_gateway_config
):
# If a known_hosts_file is configured, this instance will not be
# accessible until it has a host key. Since this is provided on
# supported instances by cloud-init, and viewable to us only from the
# console output (which may take several minutes to become available,
# we have some more waiting to do here.
known_hosts_file = config.get_cloud_config_value(
'known_hosts_file', vm_, __opts__, default=None
)
if known_hosts_file:
console = {}
while 'output_decoded' not in console:
console = get_console_output(
instance_id=vm_['instance_id'],
call='action',
location=get_location(vm_)
)
pprint.pprint(console)
time.sleep(5)
output = salt.utils.stringutils.to_unicode(console['output_decoded'])
comps = output.split('-----BEGIN SSH HOST KEY KEYS-----')
if len(comps) < 2:
# Fail; there are no host keys
return False
comps = comps[1].split('-----END SSH HOST KEY KEYS-----')
keys = ''
for line in comps[0].splitlines():
if not line:
continue
keys += '\n{0} {1}'.format(ip_address, line)
with salt.utils.files.fopen(known_hosts_file, 'a') as fp_:
fp_.write(salt.utils.stringutils.to_str(keys))
fp_.close()
for user in vm_['usernames']:
if salt.utils.cloud.wait_for_passwd(
host=ip_address,
port=ssh_port,
username=user,
ssh_timeout=config.get_cloud_config_value(
'wait_for_passwd_timeout', vm_, __opts__, default=1 * 60
),
key_filename=vm_['key_filename'],
display_ssh_output=display_ssh_output,
gateway=ssh_gateway_config,
maxtries=config.get_cloud_config_value(
'wait_for_passwd_maxtries', vm_, __opts__, default=15
),
known_hosts_file=config.get_cloud_config_value(
'known_hosts_file', vm_, __opts__,
default='/dev/null'
),
):
__opts__['ssh_username'] = user
vm_['ssh_username'] = user
break
else:
raise SaltCloudSystemExit(
'Failed to authenticate against remote ssh'
)
else:
raise SaltCloudSystemExit(
'Failed to connect to remote ssh'
)
if 'reactor' in vm_ and vm_['reactor'] is True:
__utils__['cloud.fire_event'](
'event',
'ssh is available',
'salt/cloud/{0}/ssh_ready_reactor'.format(vm_['name']),
args={'ip_address': ip_address},
sock_dir=__opts__['sock_dir'],
transport=__opts__['transport']
)
return vm_ | 0.001374 |
def remove_group_from_favorites(self, id):
"""
Remove group from favorites.
Remove a group from the current user's favorites.
"""
path = {}
data = {}
params = {}
# REQUIRED - PATH - id
"""the ID or SIS ID of the group to remove"""
path["id"] = id
self.logger.debug("DELETE /api/v1/users/self/favorites/groups/{id} with query params: {params} and form data: {data}".format(params=params, data=data, **path))
return self.generic_request("DELETE", "/api/v1/users/self/favorites/groups/{id}".format(**path), data=data, params=params, single_item=True) | 0.006051 |
def name():
"Get/view the name for the well known ID of a Projection"
if self.wkid in projected:
return projected[self.wkid]
elif self.wkid in geographic:
return geographic[self.wkid]
else:
raise KeyError("Not a known WKID.") | 0.006826 |
def get_chain(self, use_login=False, use_fork=False):
"""
Return the :class:`mitogen.parent.CallChain` to use for executing
function calls.
:param bool use_login:
If :data:`True`, always return the chain for the login account
rather than any active become user.
:param bool use_fork:
If :data:`True`, return the chain for the fork parent.
:returns mitogen.parent.CallChain:
"""
self._connect()
if use_login:
return self.login_context.default_call_chain
# See FORK_SUPPORTED comments in target.py.
if use_fork and self.init_child_result['fork_context'] is not None:
return self.init_child_result['fork_context'].default_call_chain
return self.chain | 0.002488 |
def handle_single_request(self, request_object):
"""
Handles a single request object and returns the correct result as follows:
- A valid response object if it is a regular request (with ID)
- ``None`` if it was a notification (if None is returned, a response object with
"received" body was already sent to the client.
:param request_object: A :py:class:`gemstone.core.structs.JsonRpcRequest` object
representing a Request object
:return: A :py:class:`gemstone.core.structs.JsonRpcResponse` object representing a
Response object or None if no response is expected (it was a notification)
"""
# don't handle responses?
if isinstance(request_object, JsonRpcResponse):
return request_object
error = None
result = None
id_ = request_object.id
# validate method name
if request_object.method not in self.methods:
resp = GenericResponse.METHOD_NOT_FOUND
resp.id = id_
return resp
# check for private access
method = self.methods[request_object.method]
if isinstance(request_object.params, (list, tuple)):
self.call_method_from_all_plugins("on_method_call", request_object)
else:
self.call_method_from_all_plugins("on_method_call", request_object)
if self._method_is_private(method):
if not self.get_current_user():
resp = GenericResponse.ACCESS_DENIED
resp.id = id_
return resp
method = self.prepare_method_call(method, request_object.params)
# before request hook
_method_duration = time.time()
try:
result = yield self.call_method(method)
except Exception as e:
# catch all exceptions generated by method
# and handle in a special manner only the TypeError
if isinstance(e, TypeError):
# TODO: find a proper way to check that the function got the wrong
# parameters (with **kwargs)
if "got an unexpected keyword argument" in e.args[0]:
resp = GenericResponse.INVALID_PARAMS
resp.id = id_
return resp
# TODO: find a proper way to check that the function got the wrong
# parameters (with *args)
elif "takes" in e.args[0] and "positional argument" in e.args[0] and "were given" in \
e.args[0]:
resp = GenericResponse.INVALID_PARAMS
resp.id = id_
return resp
elif "missing" in e.args[0] and "required positional argument" in e.args[0]:
resp = GenericResponse.INVALID_PARAMS
resp.id = id_
return resp
# generic handling for any exception (even TypeError) that
# is not generated because of bad parameters
self.call_method_from_all_plugins("on_internal_error", e)
err = GenericResponse.INTERNAL_ERROR
err.id = id_
err.error["data"] = {
"class": type(e).__name__,
"info": str(e)
}
return err
to_return_resp = JsonRpcResponse(result=result, error=error, id=id_)
return to_return_resp | 0.003164 |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.