text
stringlengths 78
104k
| score
float64 0
0.18
|
---|---|
def ctxtUseOptions(self, options):
"""Applies the options to the parser context """
ret = libxml2mod.xmlCtxtUseOptions(self._o, options)
return ret | 0.011696 |
def _create_axes(hist: HistogramBase, vega: dict, kwargs: dict):
"""Create axes in the figure."""
xlabel = kwargs.pop("xlabel", hist.axis_names[0])
ylabel = kwargs.pop("ylabel", hist.axis_names[1] if len(hist.axis_names) >= 2 else None)
vega["axes"] = [
{"orient": "bottom", "scale": "xscale", "title": xlabel},
{"orient": "left", "scale": "yscale", "title": ylabel}
] | 0.00495 |
def hub_history(self):
"""Get the Hub's history
Just like the Client, the Hub has a history, which is a list of msg_ids.
This will contain the history of all clients, and, depending on configuration,
may contain history across multiple cluster sessions.
Any msg_id returned here is a valid argument to `get_result`.
Returns
-------
msg_ids : list of strs
list of all msg_ids, ordered by task submission time.
"""
self.session.send(self._query_socket, "history_request", content={})
idents, msg = self.session.recv(self._query_socket, 0)
if self.debug:
pprint(msg)
content = msg['content']
if content['status'] != 'ok':
raise self._unwrap_exception(content)
else:
return content['history'] | 0.004619 |
def authenticate_multi(self, msg: Dict, signatures: Dict[str, str],
threshold: Optional[int] = None):
"""
:param msg:
:param signatures: A mapping from identifiers to signatures.
:param threshold: The number of successful signature verification
required. By default all signatures are required to be verified.
:return: returns the identifiers whose signature was matched and
correct; a SigningException is raised if threshold was not met
""" | 0.007519 |
def __read_device(self):
"""Read the state of the gamepad."""
state = XinputState()
res = self.manager.xinput.XInputGetState(
self.__device_number, ctypes.byref(state))
if res == XINPUT_ERROR_SUCCESS:
return state
if res != XINPUT_ERROR_DEVICE_NOT_CONNECTED:
raise RuntimeError(
"Unknown error %d attempting to get state of device %d" % (
res, self.__device_number))
# else (device is not connected)
return None | 0.003711 |
def copyData(self, source):
"""
Subclasses may override this method.
If so, they should call the super.
"""
for attr in self.copyAttributes:
selfValue = getattr(self, attr)
sourceValue = getattr(source, attr)
if isinstance(selfValue, BaseObject):
selfValue.copyData(sourceValue)
else:
setattr(self, attr, sourceValue) | 0.004577 |
def transacted(func):
"""
Return a callable which will invoke C{func} in a transaction using the
C{store} attribute of the first parameter passed to it. Typically this is
used to create Item methods which are automatically run in a transaction.
The attributes of the returned callable will resemble those of C{func} as
closely as L{twisted.python.util.mergeFunctionMetadata} can make them.
"""
def transactionified(item, *a, **kw):
return item.store.transact(func, item, *a, **kw)
return mergeFunctionMetadata(func, transactionified) | 0.001727 |
def local_machine():
"""Option to do something on local machine."""
common_conf()
env.machine = 'local'
env.pg_admin_role = settings.LOCAL_PG_ADMIN_ROLE
env.db_backup_dir = settings.DJANGO_PROJECT_ROOT
env.media_backup_dir = settings.DJANGO_PROJECT_ROOT
# Not sure what this is good for. Not used in our fabfile.
# env.media_root = settings.DJANGO_MEDIA_ROOT
# env.local_db_password = settings.DJANGO_DB_PASSWORD
env.db_role = settings.DATABASES['default']['USER']
env.db_name = settings.DATABASES['default']['NAME'] | 0.001779 |
def get_resolved_config(
egrc_path,
examples_dir,
custom_dir,
use_color,
pager_cmd,
squeeze,
debug=True,
):
"""
Create a Config namedtuple. Passed in values will override defaults.
This function is responsible for producing a Config that is correct for the
passed in arguments. In general, it prefers first command line options,
then values from the egrc, and finally defaults.
examples_dir and custom_dir when returned from this function are fully
expanded.
"""
# Call this with the passed in values, NOT the resolved values. We are
# informing the caller only if values passed in at the command line are
# invalid. If you pass a path to a nonexistent egrc, for example, it's
# helpful to know. If you don't have an egrc, and thus one isn't found
# later at the default location, we don't want to notify them.
inform_if_paths_invalid(egrc_path, examples_dir, custom_dir)
# Expand the paths so we can use them with impunity later.
examples_dir = get_expanded_path(examples_dir)
custom_dir = get_expanded_path(custom_dir)
# The general rule is: caller-defined, egrc-defined, defaults. We'll try
# and get all three then use get_priority to choose the right one.
egrc_config = get_egrc_config(egrc_path)
resolved_examples_dir = get_priority(
examples_dir,
egrc_config.examples_dir,
DEFAULT_EXAMPLES_DIR
)
resolved_examples_dir = get_expanded_path(resolved_examples_dir)
resolved_custom_dir = get_priority(
custom_dir,
egrc_config.custom_dir,
DEFAULT_CUSTOM_DIR
)
resolved_custom_dir = get_expanded_path(resolved_custom_dir)
resolved_use_color = get_priority(
use_color,
egrc_config.use_color,
DEFAULT_USE_COLOR
)
resolved_pager_cmd = get_priority(
pager_cmd,
egrc_config.pager_cmd,
DEFAULT_PAGER_CMD
)
# There is no command line option for this, so in this case we will use the
# priority: egrc, environment, DEFAULT.
environment_editor_cmd = get_editor_cmd_from_environment()
resolved_editor_cmd = get_priority(
egrc_config.editor_cmd,
environment_editor_cmd,
DEFAULT_EDITOR_CMD
)
color_config = None
if resolved_use_color:
default_color_config = get_default_color_config()
color_config = merge_color_configs(
egrc_config.color_config,
default_color_config
)
resolved_squeeze = get_priority(
squeeze,
egrc_config.squeeze,
DEFAULT_SQUEEZE
)
# Pass in None, as subs can't be specified at the command line.
resolved_subs = get_priority(
None,
egrc_config.subs,
get_default_subs()
)
result = Config(
examples_dir=resolved_examples_dir,
custom_dir=resolved_custom_dir,
color_config=color_config,
use_color=resolved_use_color,
pager_cmd=resolved_pager_cmd,
editor_cmd=resolved_editor_cmd,
squeeze=resolved_squeeze,
subs=resolved_subs,
)
return result | 0.000318 |
def _create(self):
"""Create new callback resampler."""
from samplerate.lowlevel import ffi, src_callback_new, src_delete
from samplerate.exceptions import ResamplingError
state, handle, error = src_callback_new(
self._callback, self._converter_type.value, self._channels)
if error != 0:
raise ResamplingError(error)
self._state = ffi.gc(state, src_delete)
self._handle = handle | 0.004357 |
def psetex(self, key, time, value):
"""
Set the value of ``key`` to ``value`` that expires in ``time``
milliseconds. ``time`` can be represented by an integer or a Python
timedelta object.
"""
return self.set(key, value, px=time) | 0.00722 |
def _get_provider_session(self, session_name):
"""Returns the requested provider session.
Instantiates a new one if the named session is not already known.
"""
agent_key = self._get_agent_key()
if session_name in self._provider_sessions[agent_key]:
return self._provider_sessions[agent_key][session_name]
else:
session_class = getattr(self._provider_manager, 'get_' + session_name + '_for_family')
if self._proxy is None:
if 'notification_session' in session_name:
# Is there something else we should do about the receiver field?
session = session_class('fake receiver', self._catalog.get_id())
else:
session = session_class(self._catalog.get_id())
else:
if 'notification_session' in session_name:
# Is there something else we should do about the receiver field?
session = session_class('fake receiver', self._catalog.get_id(), self._proxy)
else:
session = session_class(self._catalog.get_id(), self._proxy)
self._set_family_view(session)
self._set_object_view(session)
self._set_operable_view(session)
self._set_containable_view(session)
if self._session_management != DISABLED:
self._provider_sessions[agent_key][session_name] = session
return session | 0.005253 |
def _expand_variables(self, config, hostname ):
"""
Return a dict of config options with expanded substitutions
for a given hostname.
Please refer to man ssh_config(5) for the parameters that
are replaced.
@param config: the config for the hostname
@type hostname: dict
@param hostname: the hostname that the config belongs to
@type hostname: str
"""
if 'hostname' in config:
config['hostname'] = config['hostname'].replace('%h',hostname)
else:
config['hostname'] = hostname
if 'port' in config:
port = config['port']
else:
port = SSH_PORT
user = os.getenv('USER')
if 'user' in config:
remoteuser = config['user']
else:
remoteuser = user
host = socket.gethostname().split('.')[0]
fqdn = socket.getfqdn()
homedir = os.path.expanduser('~')
replacements = {'controlpath' :
[
('%h', config['hostname']),
('%l', fqdn),
('%L', host),
('%n', hostname),
('%p', port),
('%r', remoteuser),
('%u', user)
],
'identityfile' :
[
('~', homedir),
('%d', homedir),
('%h', config['hostname']),
('%l', fqdn),
('%u', user),
('%r', remoteuser)
]
}
for k in config:
if k in replacements:
for find, replace in replacements[k]:
config[k] = config[k].replace(find, str(replace))
return config | 0.004355 |
def as_list(self, key):
"""
A convenience method which fetches the specified value, guaranteeing
that it is a list.
>>> a = ConfigObj()
>>> a['a'] = 1
>>> a.as_list('a')
[1]
>>> a['a'] = (1,)
>>> a.as_list('a')
[1]
>>> a['a'] = [1]
>>> a.as_list('a')
[1]
"""
result = self[key]
if isinstance(result, (tuple, list)):
return list(result)
return [result] | 0.004 |
def gpib_command(library, session, data):
"""Write GPIB command bytes on the bus.
Corresponds to viGpibCommand function of the VISA library.
:param library: the visa library wrapped by ctypes.
:param session: Unique logical identifier to a session.
:param data: data tor write.
:type data: bytes
:return: Number of written bytes, return value of the library call.
:rtype: int, :class:`pyvisa.constants.StatusCode`
"""
return_count = ViUInt32()
# [ViSession, ViBuf, ViUInt32, ViPUInt32]
ret = library.viGpibCommand(session, data, len(data), byref(return_count))
return return_count.value, ret | 0.001548 |
def get(name, defval=None):
'''
Return an object from the embedded synapse data folder.
Example:
for tld in syanpse.data.get('iana.tlds'):
dostuff(tld)
NOTE: Files are named synapse/data/<name>.mpk
'''
with s_datfile.openDatFile('synapse.data/%s.mpk' % name) as fd:
return s_msgpack.un(fd.read()) | 0.002849 |
def _get_csrf_token(self):
"""Return the CSRF Token of easyname login form."""
from bs4 import BeautifulSoup
home_response = self.session.get(self.URLS['login'])
self._log('Home', home_response)
assert home_response.status_code == 200, \
'Could not load Easyname login page.'
html = BeautifulSoup(home_response.content, 'html.parser')
self._log('Home', html)
csrf_token_field = html.find('input', {'id': 'loginxtoken'})
assert csrf_token_field is not None, 'Could not find login token.'
return csrf_token_field['value'] | 0.003268 |
def interp_qa_v1(self):
"""Calculate the lake outflow based on linear interpolation.
Required control parameters:
|N|
|llake_control.Q|
Required derived parameters:
|llake_derived.TOY|
|llake_derived.VQ|
Required aide sequence:
|llake_aides.VQ|
Calculated aide sequence:
|llake_aides.QA|
Examples:
In preparation for the following examples, define a short simulation
time period with a simulation step size of 12 hours and initialize
the required model object:
>>> from hydpy import pub
>>> pub.timegrids = '2000.01.01','2000.01.04', '12h'
>>> from hydpy.models.llake import *
>>> parameterstep()
Next, for the sake of brevity, define a test function:
>>> def test(*vqs):
... for vq in vqs:
... aides.vq(vq)
... model.interp_qa_v1()
... print(repr(aides.vq), repr(aides.qa))
The following three relationships between the auxiliary term `vq` and
the tabulated discharge `q` are taken as examples. Each one is valid
for one of the first three days in January and is defined via five
nodes:
>>> n(5)
>>> derived.toy.update()
>>> derived.vq(_1_1_6=[0., 1., 2., 2., 3.],
... _1_2_6=[0., 1., 2., 2., 3.],
... _1_3_6=[0., 1., 2., 3., 4.])
>>> q(_1_1_6=[0., 0., 0., 0., 0.],
... _1_2_6=[0., 2., 5., 6., 9.],
... _1_3_6=[0., 2., 1., 3., 2.])
In the first example, discharge does not depend on the actual value
of the auxiliary term and is always zero:
>>> model.idx_sim = pub.timegrids.init['2000.01.01']
>>> test(0., .75, 1., 4./3., 2., 7./3., 3., 10./3.)
vq(0.0) qa(0.0)
vq(0.75) qa(0.0)
vq(1.0) qa(0.0)
vq(1.333333) qa(0.0)
vq(2.0) qa(0.0)
vq(2.333333) qa(0.0)
vq(3.0) qa(0.0)
vq(3.333333) qa(0.0)
The seconds example demonstrates that relationships are allowed to
contain jumps, which is the case for the (`vq`,`q`) pairs (2,6) and
(2,7). Also it demonstrates that when the highest `vq` value is
exceeded linear extrapolation based on the two highest (`vq`,`q`)
pairs is performed:
>>> model.idx_sim = pub.timegrids.init['2000.01.02']
>>> test(0., .75, 1., 4./3., 2., 7./3., 3., 10./3.)
vq(0.0) qa(0.0)
vq(0.75) qa(1.5)
vq(1.0) qa(2.0)
vq(1.333333) qa(3.0)
vq(2.0) qa(5.0)
vq(2.333333) qa(7.0)
vq(3.0) qa(9.0)
vq(3.333333) qa(10.0)
The third example shows that the relationships do not need to be
arranged monotonously increasing. Particualarly for the extrapolation
range, this could result in negative values of `qa`, which is avoided
by setting it to zero in such cases:
>>> model.idx_sim = pub.timegrids.init['2000.01.03']
>>> test(.5, 1.5, 2.5, 3.5, 4.5, 10.)
vq(0.5) qa(1.0)
vq(1.5) qa(1.5)
vq(2.5) qa(2.0)
vq(3.5) qa(2.5)
vq(4.5) qa(1.5)
vq(10.0) qa(0.0)
"""
con = self.parameters.control.fastaccess
der = self.parameters.derived.fastaccess
aid = self.sequences.aides.fastaccess
idx = der.toy[self.idx_sim]
for jdx in range(1, con.n):
if der.vq[idx, jdx] >= aid.vq:
break
aid.qa = ((aid.vq-der.vq[idx, jdx-1]) *
(con.q[idx, jdx]-con.q[idx, jdx-1]) /
(der.vq[idx, jdx]-der.vq[idx, jdx-1]) +
con.q[idx, jdx-1])
aid.qa = max(aid.qa, 0.) | 0.000272 |
def write_shas_to_shastore(sha_dict):
"""
Writes a sha1 dictionary stored in memory to
the .shastore file
"""
if sys.version_info[0] < 3:
fn_open = open
else:
fn_open = io.open
with fn_open(".shastore", "w") as fh:
fh.write("---\n")
fh.write('sake version: {}\n'.format(constants.VERSION))
if sha_dict:
fh.write(yaml.dump(sha_dict))
fh.write("...") | 0.002294 |
def _mount_devicemapper(self, identifier):
"""
Devicemapper mount backend.
"""
info = self.client.info()
# cid is the contaienr_id of the temp container
cid = self._identifier_as_cid(identifier)
cinfo = self.client.inspect_container(cid)
dm_dev_name, dm_dev_id, dm_dev_size = '', '', ''
dm_pool = info['DriverStatus'][0][1]
try:
dm_dev_name = cinfo['GraphDriver']['Data']['DeviceName']
dm_dev_id = cinfo['GraphDriver']['Data']['DeviceId']
dm_dev_size = cinfo['GraphDriver']['Data']['DeviceSize']
except:
# TODO: deprecated when GraphDriver patch makes it upstream
dm_dev_id, dm_dev_size = DockerMount._no_gd_api_dm(cid)
dm_dev_name = dm_pool.replace('pool', cid)
# grab list of devces
dmsetupLs = dmsetupWrap.getDmsetupLs()
if dmsetupLs == -1:
raise MountError('Error: dmsetup returned non zero error ')
# ENSURE device exists!
if dm_dev_name not in dmsetupLs:
# IF device doesn't exist yet we create it!
Mount._activate_thin_device(dm_dev_name, dm_dev_id, dm_dev_size,
dm_pool)
# check that device is shown in /dev/mapper, if not we can use the
# major minor numbers in /dev/block
mapperDir = os.path.join('/dev/mapper', dm_dev_name)
if os.path.exists(mapperDir):
dm_dev_path = mapperDir
else:
# get new dmsetupLs after device has been created!
dmsetupLs = dmsetupWrap.getDmsetupLs()
# test if device exists in dmsetupls, if so, get its majorminor found in /dev/block
majorMinor = dmsetupWrap.getMajorMinor(dm_dev_name, dmsetupLs)
blockDir = os.path.join('/dev/block', majorMinor)
# FIXME, coudl be due to Virtual box, but occasionally the block device
# will not be created by the time we check it exists below, so we
# can wait a half a second to let it be created up
import time
time.sleep(0.1)
if os.path.exists(blockDir):
dm_dev_path = blockDir
else:
raise MountError('Error: Block device found in dmsetup ls '
'but not in /dev/mapper/ or /dev/block')
options = ['ro', 'nosuid', 'nodev']
# XFS should get nouuid
fstype = Mount._get_fs(dm_dev_path).decode(sys.getdefaultencoding())
if fstype.upper() == 'XFS' and 'nouuid' not in options:
if 'nouuid' not in options:
options.append('nouuid')
try:
Mount.mount_path(dm_dev_path, self.mountpoint)
except MountError as de:
self._cleanup_container(cinfo)
Mount.remove_thin_device(dm_dev_name)
raise de
# return the temp container ID so we can unmount later
return cid | 0.001664 |
def is_value_in(constants_group, value):
"""
Checks whether value can be found in the given constants group, which in
turn, should be a Django-like choices tuple.
"""
for const_value, label in constants_group:
if const_value == value:
return True
return False | 0.0033 |
def login(self, email=None, password=None):
"""
Interactive login using the `cloudgenix.API` object. This function is more robust and handles SAML and MSP accounts.
Expects interactive capability. if this is not available, use `cloudenix.API.post.login` directly.
**Parameters:**:
- **email**: Email to log in for, will prompt if not entered.
- **password**: Password to log in with, will prompt if not entered. Ignored for SAML v2.0 users.
**Returns:** Bool. In addition the function will mutate the `cloudgenix.API` constructor items as needed.
"""
# if email not given in function, or if first login fails, prompt.
if email is None:
# If user is not set, pull from cache. If not in cache, prompt.
if self._parent_class.email:
email = self._parent_class.email
else:
email = compat_input("login: ")
if password is None:
# if pass not given on function, or if first login fails, prompt.
if self._parent_class._password:
password = self._parent_class._password
else:
password = getpass.getpass()
# Try and login
# For SAML 2.0 support, set the Referer URL prior to logging in.
# add referer header to the session.
self._parent_class.add_headers({'Referer': "{}/v2.0/api/login".format(self._parent_class.controller)})
# call the login API.
response = self._parent_class.post.login({"email": email, "password": password})
if response.cgx_status:
# Check for SAML 2.0 login
if not response.cgx_content.get('x_auth_token'):
urlpath = response.cgx_content.get("urlpath", "")
request_id = response.cgx_content.get("requestId", "")
if urlpath and request_id:
# SAML 2.0
print('SAML 2.0: To finish login open the following link in a browser\n\n{0}\n\n'.format(urlpath))
found_auth_token = False
for i in range(20):
print('Waiting for {0} seconds for authentication...'.format((20 - i) * 5))
saml_response = self.check_sso_login(email, request_id)
if saml_response.cgx_status and saml_response.cgx_content.get('x_auth_token'):
found_auth_token = True
break
# wait before retry.
time.sleep(5)
if not found_auth_token:
print("Login time expired! Please re-login.\n")
# log response when debug
try:
api_logger.debug("LOGIN_FAIL_RESPONSE = %s", json.dumps(response, indent=4))
except (TypeError, ValueError):
# not JSON response, don't pretty print log.
api_logger.debug("LOGIN_FAIL_RESPONSE = %s", str(response))
# print login error
print('Login failed, please try again', response)
# Flush command-line entered login info if failure.
self._parent_class.email = None
self._parent_class.password = None
return False
api_logger.info('Login successful:')
# if we got here, we either got an x_auth_token in the original login, or
# we got an auth_token cookie set via SAML. Figure out which.
auth_token = response.cgx_content.get('x_auth_token')
if auth_token:
# token in the original login (not saml) means region parsing has not been done.
# do now, and recheck if cookie needs set.
auth_region = self._parent_class.parse_region(response)
self._parent_class.update_region_to_controller(auth_region)
self._parent_class.reparse_login_cookie_after_region_update(response)
# debug info if needed
api_logger.debug("AUTH_TOKEN=%s", response.cgx_content.get('x_auth_token'))
# Step 2: Get operator profile for tenant ID and other info.
if self.interactive_update_profile_vars():
# pull tenant detail
if self._parent_class.tenant_id:
# add tenant values to API() object
if self.interactive_tenant_update_vars():
# Step 3: Check for ESP/MSP. If so, ask which tenant this session should be for.
if self._parent_class.is_esp:
# ESP/MSP!
choose_status, chosen_client_id = self.interactive_client_choice()
if choose_status:
# attempt to login as client
clogin_resp = self._parent_class.post.login_clients(chosen_client_id, {})
if clogin_resp.cgx_status:
# login successful, update profile and tenant info
c_profile = self.interactive_update_profile_vars()
t_profile = self.interactive_tenant_update_vars()
if c_profile and t_profile:
# successful full client login.
self._parent_class._password = None
# remove referer header prior to continuing.
self._parent_class.remove_header('Referer')
return True
else:
if t_profile:
print("ESP Client Tenant detail retrieval failed.")
# clear password out of memory
self._parent_class.email = None
self._parent_class._password = None
# remove referer header prior to continuing.
self._parent_class.remove_header('Referer')
return False
else:
print("ESP Client Login failed.")
# clear password out of memory
self._parent_class.email = None
self._parent_class._password = None
# remove referer header prior to continuing.
self._parent_class.remove_header('Referer')
return False
else:
print("ESP Client Choice failed.")
# clear password out of memory
self._parent_class.email = None
self._parent_class._password = None
# remove referer header prior to continuing.
self._parent_class.remove_header('Referer')
return False
# successful!
# clear password out of memory
self._parent_class._password = None
# remove referer header prior to continuing.
self._parent_class.remove_header('Referer')
return True
else:
print("Tenant detail retrieval failed.")
# clear password out of memory
self._parent_class.email = None
self._parent_class._password = None
# remove referer header prior to continuing.
self._parent_class.remove_header('Referer')
return False
else:
# Profile detail retrieval failed
self._parent_class.email = None
self._parent_class._password = None
return False
api_logger.info("EMAIL = %s", self._parent_class.email)
api_logger.info("USER_ID = %s", self._parent_class._user_id)
api_logger.info("USER ROLES = %s", json.dumps(self._parent_class.roles))
api_logger.info("TENANT_ID = %s", self._parent_class.tenant_id)
api_logger.info("TENANT_NAME = %s", self._parent_class.tenant_name)
api_logger.info("TOKEN_SESSION = %s", self._parent_class.token_session)
# remove referer header prior to continuing.
self._parent_class.remove_header('Referer')
else:
# log response when debug
api_logger.debug("LOGIN_FAIL_RESPONSE = %s", json.dumps(response.cgx_content, indent=4))
# print login error
print('Login failed, please try again:', response.cgx_content)
# Flush command-line entered login info if failure.
self._parent_class.email = None
self._parent_class.password = None
# remove referer header prior to continuing.
self._parent_class.remove_header('Referer')
return False | 0.003324 |
def map_vals(func, dict_):
"""
applies a function to each of the keys in a dictionary
Args:
func (callable): a function or indexable object
dict_ (dict): a dictionary
Returns:
newdict: transformed dictionary
CommandLine:
python -m ubelt.util_dict map_vals
Example:
>>> import ubelt as ub
>>> dict_ = {'a': [1, 2, 3], 'b': []}
>>> func = len
>>> newdict = ub.map_vals(func, dict_)
>>> assert newdict == {'a': 3, 'b': 0}
>>> print(newdict)
>>> # Can also use indexables as `func`
>>> dict_ = {'a': 0, 'b': 1}
>>> func = [42, 21]
>>> newdict = ub.map_vals(func, dict_)
>>> assert newdict == {'a': 42, 'b': 21}
>>> print(newdict)
"""
if not hasattr(func, '__call__'):
func = func.__getitem__
keyval_list = [(key, func(val)) for key, val in six.iteritems(dict_)]
dictclass = OrderedDict if isinstance(dict_, OrderedDict) else dict
newdict = dictclass(keyval_list)
# newdict = type(dict_)(keyval_list)
return newdict | 0.000906 |
def _create_wcs (fitsheader):
"""For compatibility between astropy and pywcs."""
wcsmodule = _load_wcs_module ()
is_pywcs = hasattr (wcsmodule, 'UnitConverter')
wcs = wcsmodule.WCS (fitsheader)
wcs.wcs.set ()
wcs.wcs.fix () # I'm interested in MJD computation via datfix()
if hasattr (wcs, 'wcs_pix2sky'):
wcs.wcs_pix2world = wcs.wcs_pix2sky
wcs.wcs_world2pix = wcs.wcs_sky2pix
return wcs | 0.020501 |
def get_serializer_class(configuration_model):
""" Returns a ConfigurationModel serializer class for the supplied configuration_model. """
class AutoConfigModelSerializer(ModelSerializer):
"""Serializer class for configuration models."""
class Meta(object):
"""Meta information for AutoConfigModelSerializer."""
model = configuration_model
fields = '__all__'
def create(self, validated_data):
if "changed_by_username" in self.context:
model = get_user_model()
validated_data['changed_by'] = model.objects.get(username=self.context["changed_by_username"])
return super(AutoConfigModelSerializer, self).create(validated_data)
return AutoConfigModelSerializer | 0.005083 |
def _write(self, frame):
"""
Write a YubiKeyFrame to the USB HID.
Includes polling for YubiKey readiness before each write.
"""
for data in frame.to_feature_reports(debug=self.debug):
debug_str = None
if self.debug:
(data, debug_str) = data
# first, we ensure the YubiKey will accept a write
self._waitfor_clear(yubikey_defs.SLOT_WRITE_FLAG)
self._raw_write(data, debug_str)
return True | 0.003914 |
def logger_focus(self,i,focus_shift=16):
"""
focuses the logger on an index 12 entries below i
@param: i -> index to focus on
"""
if self.logger.GetItemCount()-1 > i+focus_shift:
i += focus_shift
else:
i = self.logger.GetItemCount()-1
self.logger.Focus(i) | 0.01194 |
def create(
name,
attributes=None,
region=None,
key=None,
keyid=None,
profile=None,
):
'''
Create an SQS queue.
CLI Example:
.. code-block:: bash
salt myminion boto_sqs.create myqueue region=us-east-1
'''
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
if attributes is None:
attributes = {}
attributes = _preprocess_attributes(attributes)
try:
conn.create_queue(QueueName=name, Attributes=attributes)
except botocore.exceptions.ClientError as e:
return {'error': __utils__['boto3.get_error'](e)}
return {'result': True} | 0.001543 |
def _newton_refine(s, nodes1, t, nodes2):
r"""Apply one step of 2D Newton's method.
.. note::
There is also a Fortran implementation of this function, which
will be used if it can be built.
We want to use Newton's method on the function
.. math::
F(s, t) = B_1(s) - B_2(t)
to refine :math:`\left(s_{\ast}, t_{\ast}\right)`. Using this,
and the Jacobian :math:`DF`, we "solve"
.. math::
\left[\begin{array}{c}
0 \\ 0 \end{array}\right] \approx
F\left(s_{\ast} + \Delta s, t_{\ast} + \Delta t\right) \approx
F\left(s_{\ast}, t_{\ast}\right) +
\left[\begin{array}{c c}
B_1'\left(s_{\ast}\right) &
- B_2'\left(t_{\ast}\right) \end{array}\right]
\left[\begin{array}{c}
\Delta s \\ \Delta t \end{array}\right]
and refine with the component updates :math:`\Delta s` and
:math:`\Delta t`.
.. note::
This implementation assumes the curves live in
:math:`\mathbf{R}^2`.
For example, the curves
.. math::
\begin{align*}
B_1(s) &= \left[\begin{array}{c} 0 \\ 0 \end{array}\right] (1 - s)^2
+ \left[\begin{array}{c} 2 \\ 4 \end{array}\right] 2s(1 - s)
+ \left[\begin{array}{c} 4 \\ 0 \end{array}\right] s^2 \\
B_2(t) &= \left[\begin{array}{c} 2 \\ 0 \end{array}\right] (1 - t)
+ \left[\begin{array}{c} 0 \\ 3 \end{array}\right] t
\end{align*}
intersect at the point
:math:`B_1\left(\frac{1}{4}\right) = B_2\left(\frac{1}{2}\right) =
\frac{1}{2} \left[\begin{array}{c} 2 \\ 3 \end{array}\right]`.
However, starting from the wrong point we have
.. math::
\begin{align*}
F\left(\frac{3}{8}, \frac{1}{4}\right) &= \frac{1}{8}
\left[\begin{array}{c} 0 \\ 9 \end{array}\right] \\
DF\left(\frac{3}{8}, \frac{1}{4}\right) &=
\left[\begin{array}{c c}
4 & 2 \\ 2 & -3 \end{array}\right] \\
\Longrightarrow \left[\begin{array}{c} \Delta s \\ \Delta t
\end{array}\right] &= \frac{9}{64} \left[\begin{array}{c}
-1 \\ 2 \end{array}\right].
\end{align*}
.. image:: ../images/newton_refine1.png
:align: center
.. testsetup:: newton-refine1, newton-refine2, newton-refine3
import numpy as np
import bezier
from bezier._intersection_helpers import newton_refine
machine_eps = np.finfo(np.float64).eps
def cuberoot(value):
return np.cbrt(value)
.. doctest:: newton-refine1
>>> nodes1 = np.asfortranarray([
... [0.0, 2.0, 4.0],
... [0.0, 4.0, 0.0],
... ])
>>> nodes2 = np.asfortranarray([
... [2.0, 0.0],
... [0.0, 3.0],
... ])
>>> s, t = 0.375, 0.25
>>> new_s, new_t = newton_refine(s, nodes1, t, nodes2)
>>> 64.0 * (new_s - s)
-9.0
>>> 64.0 * (new_t - t)
18.0
.. testcleanup:: newton-refine1
import make_images
curve1 = bezier.Curve(nodes1, degree=2)
curve2 = bezier.Curve(nodes2, degree=1)
make_images.newton_refine1(s, new_s, curve1, t, new_t, curve2)
For "typical" curves, we converge to a solution quadratically.
This means that the number of correct digits doubles every
iteration (until machine precision is reached).
.. image:: ../images/newton_refine2.png
:align: center
.. doctest:: newton-refine2
>>> nodes1 = np.asfortranarray([
... [0.0, 0.25, 0.5, 0.75, 1.0],
... [0.0, 2.0 , -2.0, 2.0 , 0.0],
... ])
>>> nodes2 = np.asfortranarray([
... [0.0, 0.25, 0.5, 0.75, 1.0],
... [1.0, 0.5 , 0.5, 0.5 , 0.0],
... ])
>>> # The expected intersection is the only real root of
>>> # 28 s^3 - 30 s^2 + 9 s - 1.
>>> omega = cuberoot(28.0 * np.sqrt(17.0) + 132.0) / 28.0
>>> expected = 5.0 / 14.0 + omega + 1 / (49.0 * omega)
>>> s_vals = [0.625, None, None, None, None]
>>> t = 0.625
>>> np.log2(abs(expected - s_vals[0]))
-4.399...
>>> s_vals[1], t = newton_refine(s_vals[0], nodes1, t, nodes2)
>>> np.log2(abs(expected - s_vals[1]))
-7.901...
>>> s_vals[2], t = newton_refine(s_vals[1], nodes1, t, nodes2)
>>> np.log2(abs(expected - s_vals[2]))
-16.010...
>>> s_vals[3], t = newton_refine(s_vals[2], nodes1, t, nodes2)
>>> np.log2(abs(expected - s_vals[3]))
-32.110...
>>> s_vals[4], t = newton_refine(s_vals[3], nodes1, t, nodes2)
>>> np.allclose(s_vals[4], expected, rtol=machine_eps, atol=0.0)
True
.. testcleanup:: newton-refine2
import make_images
curve1 = bezier.Curve(nodes1, degree=4)
curve2 = bezier.Curve(nodes2, degree=4)
make_images.newton_refine2(s_vals, curve1, curve2)
However, when the intersection occurs at a point of tangency,
the convergence becomes linear. This means that the number of
correct digits added each iteration is roughly constant.
.. image:: ../images/newton_refine3.png
:align: center
.. doctest:: newton-refine3
>>> nodes1 = np.asfortranarray([
... [0.0, 0.5, 1.0],
... [0.0, 1.0, 0.0],
... ])
>>> nodes2 = np.asfortranarray([
... [0.0, 1.0],
... [0.5, 0.5],
... ])
>>> expected = 0.5
>>> s_vals = [0.375, None, None, None, None, None]
>>> t = 0.375
>>> np.log2(abs(expected - s_vals[0]))
-3.0
>>> s_vals[1], t = newton_refine(s_vals[0], nodes1, t, nodes2)
>>> np.log2(abs(expected - s_vals[1]))
-4.0
>>> s_vals[2], t = newton_refine(s_vals[1], nodes1, t, nodes2)
>>> np.log2(abs(expected - s_vals[2]))
-5.0
>>> s_vals[3], t = newton_refine(s_vals[2], nodes1, t, nodes2)
>>> np.log2(abs(expected - s_vals[3]))
-6.0
>>> s_vals[4], t = newton_refine(s_vals[3], nodes1, t, nodes2)
>>> np.log2(abs(expected - s_vals[4]))
-7.0
>>> s_vals[5], t = newton_refine(s_vals[4], nodes1, t, nodes2)
>>> np.log2(abs(expected - s_vals[5]))
-8.0
.. testcleanup:: newton-refine3
import make_images
curve1 = bezier.Curve(nodes1, degree=2)
curve2 = bezier.Curve(nodes2, degree=1)
make_images.newton_refine3(s_vals, curve1, curve2)
Unfortunately, the process terminates with an error that is not close
to machine precision :math:`\varepsilon` when
:math:`\Delta s = \Delta t = 0`.
.. testsetup:: newton-refine3-continued
import numpy as np
import bezier
from bezier._intersection_helpers import newton_refine
nodes1 = np.asfortranarray([
[0.0, 0.5, 1.0],
[0.0, 1.0, 0.0],
])
nodes2 = np.asfortranarray([
[0.0, 1.0],
[0.5, 0.5],
])
.. doctest:: newton-refine3-continued
>>> s1 = t1 = 0.5 - 0.5**27
>>> np.log2(0.5 - s1)
-27.0
>>> s2, t2 = newton_refine(s1, nodes1, t1, nodes2)
>>> s2 == t2
True
>>> np.log2(0.5 - s2)
-28.0
>>> s3, t3 = newton_refine(s2, nodes1, t2, nodes2)
>>> s3 == t3 == s2
True
Due to round-off near the point of tangency, the final error
resembles :math:`\sqrt{\varepsilon}` rather than machine
precision as expected.
.. note::
The following is not implemented in this function. It's just
an exploration on how the shortcomings might be addressed.
However, this can be overcome. At the point of tangency, we want
:math:`B_1'(s) \parallel B_2'(t)`. This can be checked numerically via
.. math::
B_1'(s) \times B_2'(t) = 0.
For the last example (the one that converges linearly), this is
.. math::
0 = \left[\begin{array}{c} 1 \\ 2 - 4s \end{array}\right] \times
\left[\begin{array}{c} 1 \\ 0 \end{array}\right] = 4 s - 2.
With this, we can modify Newton's method to find a zero of the
over-determined system
.. math::
G(s, t) = \left[\begin{array}{c} B_0(s) - B_1(t) \\
B_1'(s) \times B_2'(t) \end{array}\right] =
\left[\begin{array}{c} s - t \\ 2 s (1 - s) - \frac{1}{2} \\
4 s - 2\end{array}\right].
Since :math:`DG` is :math:`3 \times 2`, we can't invert it. However,
we can find a least-squares solution:
.. math::
\left(DG^T DG\right) \left[\begin{array}{c}
\Delta s \\ \Delta t \end{array}\right] = -DG^T G.
This only works if :math:`DG` has full rank. In this case, it does
since the submatrix containing the first and last rows has rank two:
.. math::
DG = \left[\begin{array}{c c} 1 & -1 \\
2 - 4 s & 0 \\
4 & 0 \end{array}\right].
Though this avoids a singular system, the normal equations have a
condition number that is the square of the condition number of the matrix.
Starting from :math:`s = t = \frac{3}{8}` as above:
.. testsetup:: newton-refine4
import numpy as np
from bezier import _helpers
def modified_update(s, t):
minus_G = np.asfortranarray([
[t - s],
[0.5 - 2.0 * s * (1.0 - s)],
[2.0 - 4.0 * s],
])
DG = np.asfortranarray([
[1.0, -1.0],
[2.0 - 4.0 * s, 0.0],
[4.0, 0.0],
])
DG_t = np.asfortranarray(DG.T)
LHS = _helpers.matrix_product(DG_t, DG)
RHS = _helpers.matrix_product(DG_t, minus_G)
delta_params = np.linalg.solve(LHS, RHS)
delta_s, delta_t = delta_params.flatten()
return s + delta_s, t + delta_t
.. doctest:: newton-refine4
>>> s0, t0 = 0.375, 0.375
>>> np.log2(0.5 - s0)
-3.0
>>> s1, t1 = modified_update(s0, t0)
>>> s1 == t1
True
>>> 1040.0 * s1
519.0
>>> np.log2(0.5 - s1)
-10.022...
>>> s2, t2 = modified_update(s1, t1)
>>> s2 == t2
True
>>> np.log2(0.5 - s2)
-31.067...
>>> s3, t3 = modified_update(s2, t2)
>>> s3 == t3 == 0.5
True
Args:
s (float): Parameter of a near-intersection along the first curve.
nodes1 (numpy.ndarray): Nodes of first curve forming intersection.
t (float): Parameter of a near-intersection along the second curve.
nodes2 (numpy.ndarray): Nodes of second curve forming intersection.
Returns:
Tuple[float, float]: The refined parameters from a single Newton
step.
Raises:
ValueError: If the Jacobian is singular at ``(s, t)``.
"""
# NOTE: We form -F(s, t) since we want to solve -DF^{-1} F(s, t).
func_val = _curve_helpers.evaluate_multi(
nodes2, np.asfortranarray([t])
) - _curve_helpers.evaluate_multi(nodes1, np.asfortranarray([s]))
if np.all(func_val == 0.0):
# No refinement is needed.
return s, t
# NOTE: This assumes the curves are 2D.
jac_mat = np.empty((2, 2), order="F")
jac_mat[:, :1] = _curve_helpers.evaluate_hodograph(s, nodes1)
jac_mat[:, 1:] = -_curve_helpers.evaluate_hodograph(t, nodes2)
# Solve the system.
singular, delta_s, delta_t = _helpers.solve2x2(jac_mat, func_val[:, 0])
if singular:
raise ValueError("Jacobian is singular.")
else:
return s + delta_s, t + delta_t | 0.000086 |
def start_flask_service(self):
"""Define Flask parameter server service.
This HTTP server can do two things: get the current model
parameters and update model parameters. After registering
the `parameters` and `update` routes, the service will
get started.
"""
app = Flask(__name__)
self.app = app
@app.route('/')
def home():
return 'Elephas'
@app.route('/parameters', methods=['GET'])
def handle_get_parameters():
if self.mode == 'asynchronous':
self.lock.acquire_read()
self.pickled_weights = pickle.dumps(self.weights, -1)
pickled_weights = self.pickled_weights
if self.mode == 'asynchronous':
self.lock.release()
return pickled_weights
@app.route('/update', methods=['POST'])
def handle_update_parameters():
delta = pickle.loads(request.data)
if self.mode == 'asynchronous':
self.lock.acquire_write()
if not self.master_network.built:
self.master_network.build()
# Just apply the gradient
weights_before = self.weights
self.weights = subtract_params(weights_before, delta)
if self.mode == 'asynchronous':
self.lock.release()
return 'Update done'
master_url = determine_master(self.port)
host = master_url.split(':')[0]
self.app.run(host=host, debug=self.debug, port=self.port,
threaded=self.threaded, use_reloader=self.use_reloader) | 0.001212 |
def _crossings(self):
"""
counts (inefficently but at least accurately) the number of
crossing edges between layer l and l+dirv.
P[i][j] counts the number of crossings from j-th edge of vertex i.
The total count of crossings is the sum of flattened P:
x = sum(sum(P,[]))
"""
g=self.layout.grx
P=[]
for v in self:
P.append([g[x].pos for x in self._neighbors(v)])
for i,p in enumerate(P):
candidates = sum(P[i+1:],[])
for j,e in enumerate(p):
p[j] = len(filter((lambda nx:nx<e), candidates))
del candidates
return P | 0.013373 |
def _encode_time(self, value):
"""Convert datetime to base64 or plaintext string"""
if self._kp.version >= (4, 0):
diff_seconds = int(
(
self._datetime_to_utc(value) -
datetime(
year=1,
month=1,
day=1,
tzinfo=tz.gettz('UTC')
)
).total_seconds()
)
return base64.b64encode(
struct.pack('<Q', diff_seconds)
).decode('utf-8')
else:
return self._datetime_to_utc(value).isoformat() | 0.003012 |
def GetDevicePath(device_handle):
"""Obtains the unique path for the device.
Args:
device_handle: reference to the device
Returns:
A unique path for the device, obtained from the IO Registry
"""
# Obtain device path from IO Registry
io_service_obj = iokit.IOHIDDeviceGetService(device_handle)
str_buffer = ctypes.create_string_buffer(DEVICE_PATH_BUFFER_SIZE)
iokit.IORegistryEntryGetPath(io_service_obj, K_IO_SERVICE_PLANE, str_buffer)
return str_buffer.value | 0.014315 |
def _get_form_or_formset(self, request, obj, **kwargs):
"""
Generic code shared by get_form and get_formset.
"""
if self.exclude is None:
exclude = []
else:
exclude = list(self.exclude)
exclude.extend(self.get_readonly_fields(request, obj))
if not self.exclude and hasattr(self.form, '_meta') and self.form._meta.exclude:
# Take the custom ModelForm's Meta.exclude into account only if the
# ModelAdmin doesn't define its own.
exclude.extend(self.form._meta.exclude)
# If exclude is an empty list we pass None to be consistant with the
# default on modelform_factory
exclude = self.replace_orig_field(exclude) or None
exclude = self._exclude_original_fields(exclude)
kwargs.update({'exclude': exclude})
return kwargs | 0.003405 |
def find_sinks(obj):
"""
Returns a dictionary of sink methods found on this object,
keyed on method name. Sink methods are identified by
(self, context) arguments on this object. For example:
def f(self, context):
...
is a sink method, but
def f(self, ctx):
...
is not.
"""
SINK_ARGSPEC = ['self', 'context']
return { n: m for n, m in inspect.getmembers(obj, inspect.ismethod)
if inspect.getargspec(m)[0] == SINK_ARGSPEC } | 0.008065 |
def create_ospf_profile():
"""
An OSPF Profile contains administrative distance and redistribution settings. An
OSPF Profile is applied at the engine level.
When creating an OSPF Profile, you must reference a OSPFDomainSetting.
An OSPFDomainSetting holds the settings of the area border router (ABR) type,
throttle timer settings, and the max metric router link-state advertisement
(LSA) settings.
"""
OSPFDomainSetting.create(name='custom',
abr_type='cisco')
ospf_domain = OSPFDomainSetting('custom') # obtain resource
ospf_profile = OSPFProfile.create(name='myospfprofile',
domain_settings_ref=ospf_domain.href)
print(ospf_profile) | 0.009259 |
def subsystem_closure_iter(cls):
"""Iterate over the transitive closure of subsystem dependencies of this Optionable.
:rtype: :class:`collections.Iterator` of :class:`SubsystemDependency`
:raises: :class:`pants.subsystem.subsystem_client_mixin.SubsystemClientMixin.CycleException`
if a dependency cycle is detected.
"""
seen = set()
dep_path = OrderedSet()
def iter_subsystem_closure(subsystem_cls):
if subsystem_cls in dep_path:
raise cls.CycleException(list(dep_path) + [subsystem_cls])
dep_path.add(subsystem_cls)
for dep in subsystem_cls.subsystem_dependencies_iter():
if dep not in seen:
seen.add(dep)
yield dep
for d in iter_subsystem_closure(dep.subsystem_cls):
yield d
dep_path.remove(subsystem_cls)
for dep in iter_subsystem_closure(cls):
yield dep | 0.012291 |
def kdot(x, y, K=2):
"""Algorithm 5.10. Dot product algorithm in K-fold working precision,
K >= 3.
"""
xx = x.reshape(-1, x.shape[-1])
yy = y.reshape(y.shape[0], -1)
xx = numpy.ascontiguousarray(xx)
yy = numpy.ascontiguousarray(yy)
r = _accupy.kdot_helper(xx, yy).reshape((-1,) + x.shape[:-1] + y.shape[1:])
return ksum(r, K - 1) | 0.002725 |
def _reduce_by_ngram(self, data, ngram):
"""Lowers the counts of all n-grams in `data` that are
substrings of `ngram` by `ngram`\'s count.
Modifies `data` in place.
:param data: row data dictionary for the current text
:type data: `dict`
:param ngram: n-gram being reduced
:type ngram: `str`
"""
# Find all substrings of `ngram` and reduce their count by the
# count of `ngram`. Substrings may not exist in `data`.
count = data[ngram]['count']
for substring in self._generate_substrings(ngram, data[ngram]['size']):
try:
substring_data = data[substring]
except KeyError:
continue
else:
substring_data['count'] -= count | 0.002497 |
def get_field_resolver(
self, field_resolver: GraphQLFieldResolver
) -> GraphQLFieldResolver:
"""Wrap the provided resolver with the middleware.
Returns a function that chains the middleware functions with the provided
resolver function.
"""
if self._middleware_resolvers is None:
return field_resolver
if field_resolver not in self._cached_resolvers:
self._cached_resolvers[field_resolver] = reduce(
lambda chained_fns, next_fn: partial(next_fn, chained_fns),
self._middleware_resolvers,
field_resolver,
)
return self._cached_resolvers[field_resolver] | 0.005674 |
def apply_integer_offsets(image2d, offx, offy):
"""Apply global (integer) offsets to image.
Parameters
----------
image2d : numpy array
Input image
offx : int
Offset in the X direction (must be integer).
offy : int
Offset in the Y direction (must be integer).
Returns
-------
image2d_shifted : numpy array
Shifted image
"""
# protections
if type(offx) != int or type(offy) != int:
raise ValueError('Invalid non-integer offsets')
# image dimensions
naxis2, naxis1 = image2d.shape
# initialize output image
image2d_shifted = np.zeros((naxis2, naxis1))
# handle negative and positive shifts accordingly
non = lambda s: s if s < 0 else None
mom = lambda s: max(0,s)
# shift image
image2d_shifted[mom(offy):non(offy), mom(offx):non(offx)] = \
image2d[mom(-offy):non(-offy), mom(-offx):non(-offx)]
# return shifted image
return image2d_shifted | 0.004057 |
def info(request, message, extra_tags='', fail_silently=False, async=False):
"""Adds a message with the ``INFO`` level."""
if ASYNC and async:
messages.info(_get_user(request), message)
else:
add_message(request, constants.INFO, message, extra_tags=extra_tags,
fail_silently=fail_silently) | 0.008902 |
def nodes(self, unreported=2, with_status=False, **kwargs):
"""Query for nodes by either name or query. If both aren't
provided this will return a list of all nodes. This method
also fetches the nodes status and event counts of the latest
report from puppetdb.
:param with_status: (optional) include the node status in the\
returned nodes
:type with_status: :bool:
:param unreported: (optional) amount of hours when a node gets
marked as unreported
:type unreported: :obj:`None` or integer
:param \*\*kwargs: The rest of the keyword arguments are passed
to the _query function
:returns: A generator yieling Nodes.
:rtype: :class:`pypuppetdb.types.Node`
"""
nodes = self._query('nodes', **kwargs)
now = datetime.datetime.utcnow()
# If we happen to only get one node back it
# won't be inside a list so iterating over it
# goes boom. Therefor we wrap a list around it.
if type(nodes) == dict:
nodes = [nodes, ]
if with_status:
latest_events = self.event_counts(
query=EqualsOperator("latest_report?", True),
summarize_by='certname'
)
for node in nodes:
node['status_report'] = None
node['events'] = None
if with_status:
status = [s for s in latest_events
if s['subject']['title'] == node['certname']]
try:
node['status_report'] = node['latest_report_status']
if status:
node['events'] = status[0]
except KeyError:
if status:
node['events'] = status = status[0]
if status['successes'] > 0:
node['status_report'] = 'changed'
if status['noops'] > 0:
node['status_report'] = 'noop'
if status['failures'] > 0:
node['status_report'] = 'failed'
else:
node['status_report'] = 'unchanged'
# node report age
if node['report_timestamp'] is not None:
try:
last_report = json_to_datetime(
node['report_timestamp'])
last_report = last_report.replace(tzinfo=None)
unreported_border = now - timedelta(hours=unreported)
if last_report < unreported_border:
delta = (now - last_report)
node['unreported'] = True
node['unreported_time'] = '{0}d {1}h {2}m'.format(
delta.days,
int(delta.seconds / 3600),
int((delta.seconds % 3600) / 60)
)
except AttributeError:
node['unreported'] = True
if not node['report_timestamp']:
node['unreported'] = True
yield Node(self,
name=node['certname'],
deactivated=node['deactivated'],
expired=node['expired'],
report_timestamp=node['report_timestamp'],
catalog_timestamp=node['catalog_timestamp'],
facts_timestamp=node['facts_timestamp'],
status_report=node['status_report'],
noop=node.get('latest_report_noop'),
noop_pending=node.get('latest_report_noop_pending'),
events=node['events'],
unreported=node.get('unreported'),
unreported_time=node.get('unreported_time'),
report_environment=node['report_environment'],
catalog_environment=node['catalog_environment'],
facts_environment=node['facts_environment'],
latest_report_hash=node.get('latest_report_hash'),
cached_catalog_status=node.get('cached_catalog_status')
) | 0.000897 |
def run(): # pylint: disable=too-many-branches
"""
Main thread runner for all Duts.
:return: Nothing
"""
Dut._logger.debug("Start DUT communication", extra={'type': '<->'})
while Dut._run:
Dut._sem.acquire()
try:
dut = Dut._signalled_duts.pop()
# Check for pending requests
if dut.waiting_for_response is not None:
item = dut.waiting_for_response
# pylint: disable=protected-access
dut.response_coming_in = dut._read_response()
if dut.response_coming_in is None:
# Continue to next node
continue
if isinstance(dut.response_coming_in, CliResponse):
dut.response_coming_in.set_response_time(item.get_timedelta(dut.get_time()))
dut.waiting_for_response = None
dut.logger.debug("Got response", extra={'type': '<->'})
dut.response_received.set()
continue
# Check for new Request
if dut.query is not None:
item = dut.query
dut.query = None
dut.logger.info(item.cmd, extra={'type': '-->'})
try:
dut.writeline(item.cmd)
except RuntimeError:
dut.response_coming_in = -1
dut.response_received.set()
continue
dut.prev = item # Save previous command for logging purposes
if item.wait:
# Only caller will care if this was asynchronous.
dut.waiting_for_response = item
else:
dut.query_timeout = 0
dut.response_received.set()
continue
try:
line = dut.readline()
except RuntimeError:
dut.response_coming_in = -1
dut.response_received.set()
continue
if line:
if dut.store_traces:
dut.traces.append(line)
EventObject(EventTypes.DUT_LINE_RECEIVED, dut, line)
retcode = dut.check_retcode(line)
if retcode is not None:
dut.logger.warning("unrequested retcode", extra={'type': '!<-'})
dut.logger.debug(line, extra={'type': '<<<'})
except IndexError:
pass
Dut._logger.debug("End DUT communication", extra={'type': '<->'}) | 0.00215 |
def create(observation_data,
user_id='user_id', item_id='item_id', target=None,
user_data=None, item_data=None,
nearest_items=None,
similarity_type='jaccard',
threshold=0.001,
only_top_k=64,
verbose=True,
target_memory_usage = 8*1024*1024*1024,
**kwargs):
"""
Create a recommender that uses item-item similarities based on
users in common.
Parameters
----------
observation_data : SFrame
The dataset to use for training the model. It must contain a column of
user ids and a column of item ids. Each row represents an observed
interaction between the user and the item. The (user, item) pairs
are stored with the model so that they can later be excluded from
recommendations if desired. It can optionally contain a target ratings
column. All other columns are interpreted by the underlying model as
side features for the observations.
The user id and item id columns must be of type 'int' or 'str'. The
target column must be of type 'int' or 'float'.
user_id : string, optional
The name of the column in `observation_data` that corresponds to the
user id.
item_id : string, optional
The name of the column in `observation_data` that corresponds to the
item id.
target : string, optional
The `observation_data` can optionally contain a column of scores
representing ratings given by the users. If present, the name of this
column may be specified variables `target`.
user_data : SFrame, optional
Side information for the users. This SFrame must have a column with
the same name as what is specified by the `user_id` input parameter.
`user_data` can provide any amount of additional user-specific
information. (NB: This argument is currently ignored by this model.)
item_data : SFrame, optional
Side information for the items. This SFrame must have a column with
the same name as what is specified by the `item_id` input parameter.
`item_data` can provide any amount of additional item-specific
information. (NB: This argument is currently ignored by this model.)
similarity_type : {'jaccard', 'cosine', 'pearson'}, optional
Similarity metric to use. See ItemSimilarityRecommender for details.
Default: 'jaccard'.
threshold : float, optional
Predictions ignore items below this similarity value.
Default: 0.001.
only_top_k : int, optional
Number of similar items to store for each item. Default value is
64. Decreasing this decreases the amount of memory required for the
model, but may also decrease the accuracy.
nearest_items : SFrame, optional
A set of each item's nearest items. When provided, this overrides
the similarity computed above.
See Notes in the documentation for ItemSimilarityRecommender.
Default: None.
target_memory_usage : int, optional
The target memory usage for the processing buffers and lookup
tables. The actual memory usage may be higher or lower than this,
but decreasing this decreases memory usage at the expense of
training time, and increasing this can dramatically speed up the
training time. Default is 8GB = 8589934592.
seed_item_set_size : int, optional
For users that have not yet rated any items, or have only
rated uniquely occurring items with no similar item info,
the model seeds the user's item set with the average
ratings of the seed_item_set_size most popular items when
making predictions and recommendations. If set to 0, then
recommendations based on either popularity (no target present)
or average item score (target present) are made in this case.
training_method : (advanced), optional.
The internal processing is done with a combination of nearest
neighbor searching, dense tables for tracking item-item
similarities, and sparse item-item tables. If 'auto' is chosen
(default), then the estimated computation time is estimated for
each, and the computation balanced between the methods in order to
minimize training time given the target memory usage. This allows
the user to force the use of one of these methods. All should give
equivalent results; the only difference would be training time.
Possible values are {'auto', 'dense', 'sparse', 'nn', 'nn:dense',
'nn:sparse'}. 'dense' uses a dense matrix to store item-item
interactions as a lookup, and may do multiple passes to control
memory requirements. 'sparse' does the same but with a sparse lookup
table; this is better if the data has many infrequent items. "nn"
uses a brute-force nearest neighbors search. "nn:dense" and
"nn:sparse" use nearest neighbors for the most frequent items
(see nearest_neighbors_interaction_proportion_threshold below),
and either sparse or dense matrices for the remainder. "auto"
chooses the method predicted to be the fastest based on the
properties of the data.
nearest_neighbors_interaction_proportion_threshold : (advanced) float
Any item that has was rated by more than this proportion of
users is treated by doing a nearest neighbors search. For
frequent items, this is almost always faster, but it is slower
for infrequent items. Furthermore, decreasing this causes more
items to be processed using the nearest neighbor path, which may
decrease memory requirements.
degree_approximation_threshold : (advanced) int, optional
Users with more than this many item interactions may be
approximated. The approximation is done by a combination of
sampling and choosing the interactions likely to have the most
impact on the model. Increasing this can increase the training time
and may or may not increase the quality of the model. Default = 4096.
max_data_passes : (advanced) int, optional
The maximum number of passes through the data allowed in
building the similarity lookup tables. If it is not possible to
build the recommender in this many passes (calculated before
that stage of training), then additional approximations are
applied; namely decreasing degree_approximation_threshold. If
this is not possible, an error is raised. To decrease the
number of passes required, increase target_memory_usage or
decrease nearest_neighbors_interaction_proportion_threshold.
Default = 1024.
Examples
--------
Given basic user-item observation data, an
:class:`~turicreate.recommender.item_similarity_recommender.ItemSimilarityRecommender` is created:
>>> sf = turicreate.SFrame({'user_id': ['0', '0', '0', '1', '1', '2', '2', '2'],
... 'item_id': ['a', 'b', 'c', 'a', 'b', 'b', 'c', 'd']})
>>> m = turicreate.item_similarity_recommender.create(sf)
>>> recs = m.recommend()
When a target is available, one can specify the desired similarity. For
example we may choose to use a cosine similarity, and use it to make
predictions or recommendations.
>>> sf2 = turicreate.SFrame({'user_id': ['0', '0', '0', '1', '1', '2', '2', '2'],
... 'item_id': ['a', 'b', 'c', 'a', 'b', 'b', 'c', 'd'],
... 'rating': [1, 3, 2, 5, 4, 1, 4, 3]})
>>> m2 = turicreate.item_similarity_recommender.create(sf2, target="rating",
... similarity_type='cosine')
>>> m2.predict(sf)
>>> m2.recommend()
Notes
-----
Currently, :class:`~turicreate.recommender.item_similarity_recommender.ItemSimilarityRecommender`
does not leverage the use of side features `user_data` and `item_data`.
**Incorporating pre-defined similar items**
For item similarity models, one may choose to provide user-specified
nearest neighbors graph using the keyword argument `nearest_items`. This is
an SFrame containing, for each item, the nearest items and the similarity
score between them. If provided, these item similarity scores are used for
recommendations. The SFrame must contain (at least) three columns:
* 'item_id': a column with the same name as that provided to the `item_id`
argument (which defaults to the string "item_id").
* 'similar': a column containing the nearest items for the given item id.
This should have the same type as the `item_id` column.
* 'score': a numeric score measuring how similar these two items are.
For example, suppose you first create an ItemSimilarityRecommender and use
:class:`~turicreate.recommender.ItemSimilarityRecommender.get_similar_items`:
>>> sf = turicreate.SFrame({'user_id': ["0", "0", "0", "1", "1", "2", "2", "2"],
... 'item_id': ["a", "b", "c", "a", "b", "b", "c", "d"]})
>>> m = turicreate.item_similarity_recommender.create(sf)
>>> nn = m.get_similar_items()
>>> m2 = turicreate.item_similarity_recommender.create(sf, nearest_items=nn)
With the above code, the item similarities computed for model `m` can be
used to create a new recommender object, `m2`. Note that we could have
created `nn` from some other means, but now use `m2` to make
recommendations via `m2.recommend()`.
See Also
--------
ItemSimilarityRecommender
"""
from turicreate._cython.cy_server import QuietProgress
opts = {}
model_proxy = _turicreate.extensions.item_similarity()
model_proxy.init_options(opts)
if user_data is None:
user_data = _turicreate.SFrame()
if item_data is None:
item_data = _turicreate.SFrame()
if nearest_items is None:
nearest_items = _turicreate.SFrame()
if "training_method" in kwargs and kwargs["training_method"] in ["in_memory", "sgraph"]:
print("WARNING: training_method = " + str(kwargs["training_method"]) + " deprecated; see documentation.")
kwargs["training_method"] = "auto"
opts = {'user_id': user_id,
'item_id': item_id,
'target': target,
'similarity_type': similarity_type,
'threshold': threshold,
'target_memory_usage' : float(target_memory_usage),
'max_item_neighborhood_size': only_top_k}
extra_data = {"nearest_items" : nearest_items}
if kwargs:
try:
possible_args = set(_get_default_options()["name"])
except (RuntimeError, KeyError):
possible_args = set()
bad_arguments = set(kwargs.keys()).difference(possible_args)
if bad_arguments:
raise TypeError("Bad Keyword Arguments: " + ', '.join(bad_arguments))
opts.update(kwargs)
extra_data = {"nearest_items" : nearest_items}
opts.update(kwargs)
with QuietProgress(verbose):
model_proxy.train(observation_data, user_data, item_data, opts, extra_data)
return ItemSimilarityRecommender(model_proxy) | 0.002029 |
def count_missing(self, axis=None):
"""Count missing genotypes.
Parameters
----------
axis : int, optional
Axis over which to count, or None to perform overall count.
"""
b = self.is_missing()
return np.sum(b, axis=axis) | 0.006897 |
def _get_mu_tensor(self):
"""Get the min mu which minimize the surrogate.
Returns:
The mu_t.
"""
root = self._get_cubic_root()
dr = self._h_max / self._h_min
mu = tf.maximum(
root**2, ((tf.sqrt(dr) - 1) / (tf.sqrt(dr) + 1))**2)
return mu | 0.003571 |
def risk_score(self, domains):
"""Performs Umbrella risk score analysis on the input domains
Args:
domains: an enumerable of domains
Returns:
An enumerable of associated domain risk scores
"""
api_name = 'opendns-risk_score'
fmt_url_path = u'domains/risk-score/{0}'
return self._multi_get(api_name, fmt_url_path, domains) | 0.004975 |
def parse_lxml(self, file, encoding=None, target_class=HTMLParserTarget,
parser_type='html'):
'''Return an iterator of elements found in the document.
Args:
file: A file object containing the document.
encoding (str): The encoding of the document.
target_class: A class to be used for target parsing.
parser_type (str): The type of parser to use. Accepted values:
``html``, ``xhtml``, ``xml``.
Returns:
iterator: Each item is an element from
:mod:`.document.htmlparse.element`
'''
if encoding:
lxml_encoding = to_lxml_encoding(encoding) or 'latin1'
else:
lxml_encoding = encoding
elements = []
callback_func = elements.append
target = target_class(callback_func)
if parser_type == 'html':
parser = lxml.html.HTMLParser(
encoding=lxml_encoding, target=target
)
elif parser_type == 'xhtml':
parser = lxml.html.XHTMLParser(
encoding=lxml_encoding, target=target, recover=True
)
else:
parser = lxml.etree.XMLParser(
encoding=lxml_encoding, target=target, recover=True
)
if parser_type == 'html':
# XXX: Force libxml2 to do full read in case of early "</html>"
# See https://github.com/chfoo/wpull/issues/104
# See https://bugzilla.gnome.org/show_bug.cgi?id=727935
for dummy in range(3):
parser.feed('<html>'.encode(encoding))
while True:
data = file.read(self.BUFFER_SIZE)
if not data:
break
parser.feed(data)
for element in elements:
yield element
del elements[:]
parser.close()
for element in elements:
yield element | 0.001519 |
def get_person(people_id):
''' Return a single person '''
result = _get(people_id, settings.PEOPLE)
return People(result.content) | 0.007092 |
def header(self, text, level, raw=None):
"""Rendering header/heading tags like ``<h1>`` ``<h2>``.
:param text: rendered text content for the header.
:param level: a number for the header level, for example: 1.
:param raw: raw text content of the header.
"""
return '\n{0}\n{1}\n'.format(text,
self.hmarks[level] * column_width(text)) | 0.004773 |
def CopyNote(self, part, measure_id, new_note):
'''
handles copying the latest note into the measure note list.
done at end of note loading to make sure staff_id is right as staff id could be encountered
any point during the note tag
:param part: the part class to copy it into
:param measure_id: the id of the measure in which the note belongs
:param new_note: the new note class to be copied in
:return: None, side effects modifying the piece tree
'''
if part.getMeasure(measure_id, self.data["staff_id"]) is None:
part.addEmptyMeasure(measure_id, self.data["staff_id"])
measure = part.getMeasure(measure_id, self.data["staff_id"])
voice_obj = measure.getVoice(self.data["voice"])
if voice_obj is None:
measure.addVoice(id=self.data["voice"])
voice_obj = measure.getVoice(self.data["voice"])
add = True
notes = voice_obj.GetChildrenIndexes()
for n in notes:
no = voice_obj.GetChild(n)
if new_note == no:
add = False
break
if add:
chord = False
if hasattr(new_note, "chord"):
chord = new_note.chord
measure.addNote(new_note, self.data["voice"], chord=chord)
if hasattr(
new_note, "BarlinesAndMarkersRest") and new_note.BarlinesAndMarkersRest:
measure.rest = True
voice_obj.rest = True | 0.002608 |
def get_address_transactions(self, address_id, **params):
"""https://developers.coinbase.com/api/v2#list-address39s-transactions"""
return self.api_client.get_address_transactions(self.id, address_id, **params) | 0.017699 |
def list_changes(self):
"""
Return a list of modified records.
This is only applicable for attached tables.
Returns:
A list of `(row_index, record)` tuples of modified records
Raises:
:class:`delphin.exceptions.ItsdbError`: when called on a
detached table
"""
if not self.is_attached():
raise ItsdbError('changes are not tracked for detached tables.')
return [(i, self[i]) for i, row in enumerate(self._records)
if row is not None] | 0.00354 |
def TryCompile( self, text, extension):
"""Compiles the program given in text to an env.Object, using extension
as file extension (e.g. '.c'). Returns 1, if compilation was
successful, 0 otherwise. The target is saved in self.lastTarget (for
further processing).
"""
return self.TryBuild(self.env.Object, text, extension) | 0.00813 |
def is_data_diverging(data_container):
"""
We want to use this to check whether the data are diverging or not.
This is a simple check, can be made much more sophisticated.
:param data_container: A generic container of data points.
:type data_container: `iterable`
"""
assert infer_data_type(data_container) in [
"ordinal",
"continuous",
], "Data type should be ordinal or continuous"
# Check whether the data contains negative and positive values.
has_negative = False
has_positive = False
for i in data_container:
if i < 0:
has_negative = True
elif i > 0:
has_positive = True
if has_negative and has_positive:
return True
else:
return False | 0.001295 |
def validate_profile_exists(self):
"""Validate the provided profiles name exists."""
if self.args.profile_name not in self.profiles:
self.handle_error('Could not find profile "{}"'.format(self.args.profile_name)) | 0.012448 |
def pnlSingle(
self, account: str = '', modelCode: str = '',
conId: int = 0) -> List[PnLSingle]:
"""
List of subscribed :class:`.PnLSingle` objects (profit and loss for
single positions).
The :class:`.PnLSingle` objects are kept live updated.
Args:
account: If specified, filter for this account name.
modelCode: If specified, filter for this account model.
conId: If specified, filter for this contract ID.
"""
return [v for v in self.wrapper.pnlSingles.values() if
(not account or v.account == account) and
(not modelCode or v.modelCode == modelCode) and
(not conId or v.conId == conId)] | 0.002649 |
def prepare_editable_requirement(
self,
req, # type: InstallRequirement
require_hashes, # type: bool
use_user_site, # type: bool
finder # type: PackageFinder
):
# type: (...) -> DistAbstraction
"""Prepare an editable requirement
"""
assert req.editable, "cannot prepare a non-editable req as editable"
logger.info('Obtaining %s', req)
with indent_log():
if require_hashes:
raise InstallationError(
'The editable requirement %s cannot be installed when '
'requiring hashes, because there is no single file to '
'hash.' % req
)
req.ensure_has_source_dir(self.src_dir)
req.update_editable(not self._download_should_save)
abstract_dist = make_abstract_dist(req)
with self.req_tracker.track(req):
abstract_dist.prep_for_dist(finder, self.build_isolation)
if self._download_should_save:
req.archive(self.download_dir)
req.check_if_exists(use_user_site)
return abstract_dist | 0.003378 |
def add_license(key, description, safety_checks=True,
service_instance=None):
'''
Adds a license to the vCenter or ESXi host
key
License key.
description
License description added in as a label.
safety_checks
Specify whether to perform safety check or to skip the checks and try
performing the required task
service_instance
Service instance (vim.ServiceInstance) of the vCenter/ESXi host.
Default is None.
.. code-block:: bash
salt '*' vsphere.add_license key=<license_key> desc='License desc'
'''
log.trace('Adding license \'%s\'', key)
salt.utils.vmware.add_license(service_instance, key, description)
return True | 0.001355 |
def raw_cron(user):
'''
Return the contents of the user's crontab
CLI Example:
.. code-block:: bash
salt '*' cron.raw_cron root
'''
if _check_instance_uid_match(user) or __grains__.get('os_family') in ('Solaris', 'AIX'):
cmd = 'crontab -l'
# Preserve line endings
lines = salt.utils.data.decode(
__salt__['cmd.run_stdout'](cmd,
runas=user,
ignore_retcode=True,
rstrip=False,
python_shell=False)
).splitlines(True)
else:
cmd = 'crontab -u {0} -l'.format(user)
# Preserve line endings
lines = salt.utils.data.decode(
__salt__['cmd.run_stdout'](cmd,
ignore_retcode=True,
rstrip=False,
python_shell=False)
).splitlines(True)
if lines and lines[0].startswith('# DO NOT EDIT THIS FILE - edit the master and reinstall.'):
del lines[0:3]
return ''.join(lines) | 0.002568 |
def is_related_to(item, app_id, app_ver=None):
"""Return True if the item relates to the given app_id (and app_ver, if passed)."""
versionRange = item.get('versionRange')
if not versionRange:
return True
for vR in versionRange:
if not vR.get('targetApplication'):
return True
if get_related_targetApplication(vR, app_id, app_ver) is not None:
return True
return False | 0.004587 |
def read_file(self, file_path):
''' Read a configuration file and return configuration data '''
getLogger().info("Loading app config from {} file: {}".format(self.__mode, file_path))
if self.__mode == AppConfig.JSON:
return json.loads(FileHelper.read(file_path), object_pairs_hook=OrderedDict)
elif self.__mode == AppConfig.INI:
config = configparser.ConfigParser(allow_no_value=True)
config.read(file_path)
return config | 0.007984 |
def _func_args_from_dict(self, d):
"""Given a Python dictionary, creates a string representing arguments
for invoking a function. All arguments with a value of None are
ignored."""
filtered_d = self.filter_out_none_valued_keys(d)
return ', '.join(['%s=%s' % (k, v) for k, v in filtered_d.items()]) | 0.005935 |
def cyclic_rainbow(script, direction='sphere', start_pt=(0, 0, 0),
amplitude=255 / 2, center=255 / 2, freq=0.8,
phase=(0, 120, 240, 0), alpha=False):
""" Color mesh vertices in a repeating sinusiodal rainbow pattern
Sine wave follows the following equation for each color channel (RGBA):
channel = sin(freq*increment + phase)*amplitude + center
Args:
script: the FilterScript object or script filename to write
the filter to.
direction (str) = the direction that the sine wave will travel; this
and the start_pt determine the 'increment' of the sine function.
Valid values are:
'sphere' - radiate sine wave outward from start_pt (default)
'x' - sine wave travels along the X axis
'y' - sine wave travels along the Y axis
'z' - sine wave travels along the Z axis
or define the increment directly using a muparser function, e.g.
'2x + y'. In this case start_pt will not be used; include it in
the function directly.
start_pt (3 coordinate tuple or list): start point of the sine wave. For a
sphere this is the center of the sphere.
amplitude (float [0, 255], single value or 4 term tuple or list): amplitude
of the sine wave, with range between 0-255. If a single value is
specified it will be used for all channels, otherwise specify each
channel individually.
center (float [0, 255], single value or 4 term tuple or list): center
of the sine wave, with range between 0-255. If a single value is
specified it will be used for all channels, otherwise specify each
channel individually.
freq (float, single value or 4 term tuple or list): frequency of the sine
wave. If a single value is specified it will be used for all channels,
otherwise specifiy each channel individually.
phase (float [0, 360], single value or 4 term tuple or list): phase
of the sine wave in degrees, with range between 0-360. If a single
value is specified it will be used for all channels, otherwise specify
each channel individually.
alpha (bool): if False the alpha channel will be set to 255 (full opacity).
Layer stack:
No impacts
MeshLab versions:
2016.12
1.3.4BETA
"""
start_pt = util.make_list(start_pt, 3)
amplitude = util.make_list(amplitude, 4)
center = util.make_list(center, 4)
freq = util.make_list(freq, 4)
phase = util.make_list(phase, 4)
if direction.lower() == 'sphere':
increment = 'sqrt((x-{})^2+(y-{})^2+(z-{})^2)'.format(
start_pt[0], start_pt[1], start_pt[2])
elif direction.lower() == 'x':
increment = 'x - {}'.format(start_pt[0])
elif direction.lower() == 'y':
increment = 'y - {}'.format(start_pt[1])
elif direction.lower() == 'z':
increment = 'z - {}'.format(start_pt[2])
else:
increment = direction
red_func = '{a}*sin({f}*{i} + {p}) + {c}'.format(
f=freq[0], i=increment, p=math.radians(phase[0]),
a=amplitude[0], c=center[0])
green_func = '{a}*sin({f}*{i} + {p}) + {c}'.format(
f=freq[1], i=increment, p=math.radians(phase[1]),
a=amplitude[1], c=center[1])
blue_func = '{a}*sin({f}*{i} + {p}) + {c}'.format(
f=freq[2], i=increment, p=math.radians(phase[2]),
a=amplitude[2], c=center[2])
if alpha:
alpha_func = '{a}*sin({f}*{i} + {p}) + {c}'.format(
f=freq[3], i=increment, p=math.radians(phase[3]),
a=amplitude[3], c=center[3])
else:
alpha_func = 255
function(script, red=red_func, green=green_func, blue=blue_func,
alpha=alpha_func)
return None | 0.000261 |
def update_case(self, case_obj):
"""Update a case in the database
The following will be updated:
- collaborators: If new collaborators these will be added to the old ones
- analysis_date: Is updated to the new date
- analyses: The new analysis date will be added to old runs
- individuals: There could be new individuals
- updated_at: When the case was updated in the database
- rerun_requested: Is set to False since that is probably what happened
- panels: The new gene panels are added
- genome_build: If there is a new genome build
- genome_version: - || -
- rank_model_version: If there is a new rank model
- madeline_info: If there is a new pedigree
- vcf_files: paths to the new files
- has_svvariants: If there are new svvariants
- has_strvariants: If there are new strvariants
- multiqc: If there's an updated multiqc report location
- mme_submission: If case was submitted to MatchMaker Exchange
Args:
case_obj(dict): The new case information
Returns:
updated_case(dict): The updated case information
"""
# Todo: rename to match the intended purpose
LOG.info("Updating case {0}".format(case_obj['_id']))
old_case = self.case_collection.find_one(
{'_id': case_obj['_id']}
)
updated_case = self.case_collection.find_one_and_update(
{'_id': case_obj['_id']},
{
'$addToSet': {
'collaborators': {'$each': case_obj['collaborators']},
'analyses': {
'date': old_case['analysis_date'],
'delivery_report': old_case.get('delivery_report')
}
},
'$set': {
'analysis_date': case_obj['analysis_date'],
'delivery_report': case_obj.get('delivery_report'),
'individuals': case_obj['individuals'],
'updated_at': datetime.datetime.now(),
'rerun_requested': False,
'panels': case_obj.get('panels', []),
'genome_build': case_obj.get('genome_build', '37'),
'genome_version': case_obj.get('genome_version'),
'rank_model_version': case_obj.get('rank_model_version'),
'madeline_info': case_obj.get('madeline_info'),
'vcf_files': case_obj.get('vcf_files'),
'has_svvariants': case_obj.get('has_svvariants'),
'has_strvariants': case_obj.get('has_strvariants'),
'is_research': case_obj.get('is_research', False),
'research_requested': case_obj.get('research_requested', False),
'multiqc': case_obj.get('multiqc'),
'mme_submission': case_obj.get('mme_submission'),
}
},
return_document=pymongo.ReturnDocument.AFTER
)
LOG.info("Case updated")
return updated_case | 0.001537 |
def exists(self, dataset_id):
""" Check if a dataset exists in Google BigQuery
Parameters
----------
dataset_id : str
Name of dataset to be verified
Returns
-------
boolean
true if dataset exists, otherwise false
"""
from google.api_core.exceptions import NotFound
try:
self.client.get_dataset(self.client.dataset(dataset_id))
return True
except NotFound:
return False
except self.http_error as ex:
self.process_http_error(ex) | 0.00335 |
def xisabs(filename):
""" Cross-platform version of `os.path.isabs()`
Returns True if `filename` is absolute on
Linux, OS X or Windows.
"""
if filename.startswith(b'/'): # Linux/Unix
return True
elif filename.startswith(b'\\'): # Windows
return True
elif re.match(b'\\w:[\\\\/]', filename): # Windows
return True
return False | 0.019126 |
def mouse_event(dwFlags: int, dx: int, dy: int, dwData: int, dwExtraInfo: int) -> None:
"""mouse_event from Win32."""
ctypes.windll.user32.mouse_event(dwFlags, dx, dy, dwData, dwExtraInfo) | 0.010204 |
def write_to(self, group):
"""Write stored items to the given HDF5 group.
We assume that self.create() has been called.
"""
# The HDF5 group where to write data
items_group = group[self.name]
nitems = items_group.shape[0]
items_group.resize((nitems + len(self.data),))
items_group[nitems:] = self.data | 0.005435 |
def to_map_with_default(value, default_value):
"""
Converts value into map object or returns default when conversion is not possible
:param value: the value to convert.
:param default_value: the default value.
:return: map object or emptu map when conversion is not supported.
"""
result = RecursiveMapConverter.to_nullable_map(value)
return result if result != None else default_value | 0.00885 |
def Fierz_to_Bern_chrom(C, dd, parameters):
"""From Fierz to chromomagnetic Bern basis for Class V.
dd should be of the form 'sb', 'ds' etc."""
e = sqrt(4 * pi * parameters['alpha_e'])
gs = sqrt(4 * pi * parameters['alpha_s'])
if dd == 'sb' or dd == 'db':
mq = parameters['m_b']
elif dd == 'ds':
mq = parameters['m_s']
else:
KeyError("Not sure what to do with quark mass for flavour {}".format(dd))
return {
'7gamma' + dd : gs**2 / e / mq * C['F7gamma' + dd ],
'8g' + dd : gs / mq * C['F8g' + dd ],
'7pgamma' + dd : gs**2 / e /mq * C['F7pgamma' + dd],
'8pg' + dd : gs / mq * C['F8pg' + dd]
} | 0.013025 |
def _process_member(self, member, parent, string):
"""Extracts all the member info from the regex match; returns a ValueElements."""
#The modifiers regex is very greedy so we have some cleaning up to do
#to extract the mods.
modifiers = member.group("modifiers")
dimension = None
if modifiers is not None:
#Unfortunately, the dimension can also be specified as a modifier and
#the dimensions can include variable names and functions. This introduces
#the possibility of nested lists.
modifiers = modifiers.lower()
if "dimension" in modifiers:
start, end = self._get_dim_modifier(modifiers)
dimension = modifiers[start+1:end]
dimtext = modifiers[modifiers.index("dimension"):end+1]
modifiers = re.split(",\s*", modifiers.replace(dimtext, "").strip())
#modifiers.append("dimension")
else:
modifiers = re.split("[,\s]+", modifiers.strip())
if "" in modifiers:
modifiers.remove("")
dtype = member.group("type")
kind = member.group("kind")
names = member.group("names")
#If there are multiple vars defined on this line we need to return
#a list of all of them.
result = []
#They might have defined multiple vars on the same line
refstring = string[member.start():member.end()].strip()
if parent is not None:
refline = parent.module.linenum(member.start())
else:
refline = "?"
ready = self._separate_multiple_def(re.sub(",\s*", ", ", names.strip()), parent, refstring, refline)
for name, ldimension, default, D in self._clean_multiple_def(ready):
#Now construct the element and set all the values, then add it in the results list.
udim = ldimension if ldimension is not None else dimension
uD = D if ldimension is not None else count_dimensions([dimension])
result.append(ValueElement(name, modifiers, dtype, kind, default, udim, parent, uD))
return result | 0.010944 |
def arping(net, timeout=2, cache=0, verbose=None, **kargs):
"""Send ARP who-has requests to determine which hosts are up
arping(net, [cache=0,] [iface=conf.iface,] [verbose=conf.verb]) -> None
Set cache=True if you want arping to modify internal ARP-Cache"""
if verbose is None:
verbose = conf.verb
ans,unans = srp(Ether(dst="ff:ff:ff:ff:ff:ff")/ARP(pdst=net), verbose=verbose,
filter="arp and arp[7] = 2", timeout=timeout, iface_hint=net, **kargs)
ans = ARPingResult(ans.res)
if cache and ans is not None:
for pair in ans:
conf.netcache.arp_cache[pair[1].psrc] = (pair[1].hwsrc, time.time())
if verbose:
ans.show()
return ans,unans | 0.008368 |
def diff(self, diff):
""" Serialize to a dictionary. """
if diff is None:
return None
return dict(
toVol=diff.toUUID,
fromVol=diff.fromUUID,
size=diff.size,
sizeIsEstimated=diff.sizeIsEstimated,
) | 0.006944 |
def load_file(self, filename):
"""
load file which contains yaml configuration entries.and merge it by
current instance
:param files: files to load and merge into existing configuration
instance
:type files: list
"""
if not path.exists(filename):
raise FileNotFoundError(filename)
loaded_yaml = load_yaml(filename, self.context)
if loaded_yaml:
self.merge(loaded_yaml) | 0.00409 |
def _set_objective_bank_view(self, session):
"""Sets the underlying objective_bank view to match current view"""
if self._objective_bank_view == FEDERATED:
try:
session.use_federated_objective_bank_view()
except AttributeError:
pass
else:
try:
session.use_isolated_objective_bank_view()
except AttributeError:
pass | 0.004444 |
def check_output(self, cmd):
"""Calls a command through SSH and returns its output.
"""
ret, output = self._call(cmd, True)
if ret != 0: # pragma: no cover
raise RemoteCommandFailure(command=cmd, ret=ret)
logger.debug("Output: %r", output)
return output | 0.006369 |
def current(self):
# type: () -> Hub
"""Returns the current instance of the hub."""
rv = _local.get(None)
if rv is None:
rv = Hub(GLOBAL_HUB)
_local.set(rv)
return rv | 0.013043 |
def rfc2426(self):
"""RFC2426-encode the field content.
:return: the field in the RFC 2426 format.
:returntype: `str`"""
return rfc2425encode("label",u"\n".join(self.lines),
{"type":",".join(self.type)}) | 0.019841 |
def put_intent(self, intent_id, intent_json):
"""Send a put request to update the intent with intent_id"""
endpoint = self._intent_uri(intent_id)
return self._put(endpoint, intent_json) | 0.009569 |
def update(self, campaign_id, search_channels, nonsearch_channels, outside_discount, nick=None):
'''xxxxx.xxxxx.campaign.platform.update
===================================
取得一个推广计划的投放平台设置'''
request = TOPRequest('xxxxx.xxxxx.campaign.platform.update')
request['campaign_id'] = campaign_id
request['search_channels'] = search_channels
request['nonsearch_channels'] = nonsearch_channels
request['outside_discount'] = outside_discount
if nick!=None: request['nick'] = nick
self.create(self.execute(request), fields=['success','result','success','result_code','result_message'], models={'result':CampaignPlatform})
return self.result | 0.016713 |
def _print_single_file(self, path, apps_models):
"""
Print apps_models which contains a list of 2-tuples containing apps and their models
into a single file.
"""
if path:
outfile = codecs.open(path, 'w', encoding='utf-8')
self._print = lambda s: outfile.write(s + '\n')
self._print(self._diagram_start)
for app, app_models in apps_models:
self._print_app(app, app_models)
self._print(self._diagram_end)
if path:
outfile.close() | 0.005484 |
def _get_users_of_group(config, group):
""" Utility to query fas for users of a group. """
if not group:
return set()
fas = fmn.rules.utils.get_fas(config)
return fmn.rules.utils.get_user_of_group(config, fas, group) | 0.004167 |
def detach(gandi, resource, background, force):
"""Detach an ip from it's currently attached vm.
resource can be an ip id or ip.
"""
if not force:
proceed = click.confirm('Are you sure you want to detach ip %s?' %
resource)
if not proceed:
return
return gandi.ip.detach(resource, background, force) | 0.002632 |
def resetVector(x1, x2):
"""
Copies the contents of vector x1 into vector x2.
@param x1 (array) binary vector to be copied
@param x2 (array) binary vector where x1 is copied
"""
size = len(x1)
for i in range(size):
x2[i] = x1[i] | 0.016194 |
def add(self, **args):
"""Handles the 'a' command.
:args: Arguments supplied to the 'a' command.
"""
kwargs = self.getKwargs(args)
if kwargs:
self.model.add(**kwargs) | 0.009091 |
def p_rule(self, rule):
'''rule : GUIDELINE
| REGULATION'''
if len(rule[1]) == 4:
# This is a guideline
rule[0] = Guideline(rule[1][1], rule[1][2], rule[1][3])
else:
# This is a regulation
indentsize = rule[1][0]
number = rule[1][1]
text = rule[1][2]
parent = None
# If we just "un"nested, shrink the current rule to our level
if self.prev_indent > indentsize:
self.current_rule = self.current_rule[0:indentsize+1]
# We just added a nested level, the parent is the list's last elem
if self.prev_indent < indentsize:
parent = self.current_rule[-1]
# Else, if we are nested the parent is the one before the last elem
elif len(self.current_rule) > 1:
parent = self.current_rule[-2]
# Else if we are not nested, then we are a root rule and parent is none
# (do nothing as parent is initialized to none)
# Create the regulation node
reg = Regulation(number, text, parent)
# Let our parent knows he has a new child, if we don't have a parent
# let's create an item in the article rules list
if parent:
parent.add_child(reg)
else:
rule[0] = reg
# Unless we nested, pop and replace the last rule by ourself
# If we added a nesting level, we just need to add ourself
if self.prev_indent >= indentsize:
self.current_rule.pop()
self.current_rule.append(reg)
self.prev_indent = indentsize | 0.002313 |
def restore_from_cluster_snapshot(ClusterIdentifier=None, SnapshotIdentifier=None, SnapshotClusterIdentifier=None, Port=None, AvailabilityZone=None, AllowVersionUpgrade=None, ClusterSubnetGroupName=None, PubliclyAccessible=None, OwnerAccount=None, HsmClientCertificateIdentifier=None, HsmConfigurationIdentifier=None, ElasticIp=None, ClusterParameterGroupName=None, ClusterSecurityGroups=None, VpcSecurityGroupIds=None, PreferredMaintenanceWindow=None, AutomatedSnapshotRetentionPeriod=None, KmsKeyId=None, NodeType=None, EnhancedVpcRouting=None, AdditionalInfo=None, IamRoles=None):
"""
Creates a new cluster from a snapshot. By default, Amazon Redshift creates the resulting cluster with the same configuration as the original cluster from which the snapshot was created, except that the new cluster is created with the default cluster security and parameter groups. After Amazon Redshift creates the cluster, you can use the ModifyCluster API to associate a different security group and different parameter group with the restored cluster. If you are using a DS node type, you can also choose to change to another DS node type of the same size during restore.
If you restore a cluster into a VPC, you must provide a cluster subnet group where you want the cluster restored.
For more information about working with snapshots, go to Amazon Redshift Snapshots in the Amazon Redshift Cluster Management Guide .
See also: AWS API Documentation
:example: response = client.restore_from_cluster_snapshot(
ClusterIdentifier='string',
SnapshotIdentifier='string',
SnapshotClusterIdentifier='string',
Port=123,
AvailabilityZone='string',
AllowVersionUpgrade=True|False,
ClusterSubnetGroupName='string',
PubliclyAccessible=True|False,
OwnerAccount='string',
HsmClientCertificateIdentifier='string',
HsmConfigurationIdentifier='string',
ElasticIp='string',
ClusterParameterGroupName='string',
ClusterSecurityGroups=[
'string',
],
VpcSecurityGroupIds=[
'string',
],
PreferredMaintenanceWindow='string',
AutomatedSnapshotRetentionPeriod=123,
KmsKeyId='string',
NodeType='string',
EnhancedVpcRouting=True|False,
AdditionalInfo='string',
IamRoles=[
'string',
]
)
:type ClusterIdentifier: string
:param ClusterIdentifier: [REQUIRED]
The identifier of the cluster that will be created from restoring the snapshot.
Constraints:
Must contain from 1 to 63 alphanumeric characters or hyphens.
Alphabetic characters must be lowercase.
First character must be a letter.
Cannot end with a hyphen or contain two consecutive hyphens.
Must be unique for all clusters within an AWS account.
:type SnapshotIdentifier: string
:param SnapshotIdentifier: [REQUIRED]
The name of the snapshot from which to create the new cluster. This parameter isn't case sensitive.
Example: my-snapshot-id
:type SnapshotClusterIdentifier: string
:param SnapshotClusterIdentifier: The name of the cluster the source snapshot was created from. This parameter is required if your IAM user has a policy containing a snapshot resource element that specifies anything other than * for the cluster name.
:type Port: integer
:param Port: The port number on which the cluster accepts connections.
Default: The same port as the original cluster.
Constraints: Must be between 1115 and 65535 .
:type AvailabilityZone: string
:param AvailabilityZone: The Amazon EC2 Availability Zone in which to restore the cluster.
Default: A random, system-chosen Availability Zone.
Example: us-east-1a
:type AllowVersionUpgrade: boolean
:param AllowVersionUpgrade: If true , major version upgrades can be applied during the maintenance window to the Amazon Redshift engine that is running on the cluster.
Default: true
:type ClusterSubnetGroupName: string
:param ClusterSubnetGroupName: The name of the subnet group where you want to cluster restored.
A snapshot of cluster in VPC can be restored only in VPC. Therefore, you must provide subnet group name where you want the cluster restored.
:type PubliclyAccessible: boolean
:param PubliclyAccessible: If true , the cluster can be accessed from a public network.
:type OwnerAccount: string
:param OwnerAccount: The AWS customer account used to create or copy the snapshot. Required if you are restoring a snapshot you do not own, optional if you own the snapshot.
:type HsmClientCertificateIdentifier: string
:param HsmClientCertificateIdentifier: Specifies the name of the HSM client certificate the Amazon Redshift cluster uses to retrieve the data encryption keys stored in an HSM.
:type HsmConfigurationIdentifier: string
:param HsmConfigurationIdentifier: Specifies the name of the HSM configuration that contains the information the Amazon Redshift cluster can use to retrieve and store keys in an HSM.
:type ElasticIp: string
:param ElasticIp: The elastic IP (EIP) address for the cluster.
:type ClusterParameterGroupName: string
:param ClusterParameterGroupName: The name of the parameter group to be associated with this cluster.
Default: The default Amazon Redshift cluster parameter group. For information about the default parameter group, go to Working with Amazon Redshift Parameter Groups .
Constraints:
Must be 1 to 255 alphanumeric characters or hyphens.
First character must be a letter.
Cannot end with a hyphen or contain two consecutive hyphens.
:type ClusterSecurityGroups: list
:param ClusterSecurityGroups: A list of security groups to be associated with this cluster.
Default: The default cluster security group for Amazon Redshift.
Cluster security groups only apply to clusters outside of VPCs.
(string) --
:type VpcSecurityGroupIds: list
:param VpcSecurityGroupIds: A list of Virtual Private Cloud (VPC) security groups to be associated with the cluster.
Default: The default VPC security group is associated with the cluster.
VPC security groups only apply to clusters in VPCs.
(string) --
:type PreferredMaintenanceWindow: string
:param PreferredMaintenanceWindow: The weekly time range (in UTC) during which automated cluster maintenance can occur.
Format: ddd:hh24:mi-ddd:hh24:mi
Default: The value selected for the cluster from which the snapshot was taken. For more information about the time blocks for each region, see Maintenance Windows in Amazon Redshift Cluster Management Guide.
Valid Days: Mon | Tue | Wed | Thu | Fri | Sat | Sun
Constraints: Minimum 30-minute window.
:type AutomatedSnapshotRetentionPeriod: integer
:param AutomatedSnapshotRetentionPeriod: The number of days that automated snapshots are retained. If the value is 0, automated snapshots are disabled. Even if automated snapshots are disabled, you can still create manual snapshots when you want with CreateClusterSnapshot .
Default: The value selected for the cluster from which the snapshot was taken.
Constraints: Must be a value from 0 to 35.
:type KmsKeyId: string
:param KmsKeyId: The AWS Key Management Service (KMS) key ID of the encryption key that you want to use to encrypt data in the cluster that you restore from a shared snapshot.
:type NodeType: string
:param NodeType: The node type that the restored cluster will be provisioned with.
Default: The node type of the cluster from which the snapshot was taken. You can modify this if you are using any DS node type. In that case, you can choose to restore into another DS node type of the same size. For example, you can restore ds1.8xlarge into ds2.8xlarge, or ds2.xlarge into ds1.xlarge. If you have a DC instance type, you must restore into that same instance type and size. In other words, you can only restore a dc1.large instance type into another dc1.large instance type. For more information about node types, see About Clusters and Nodes in the Amazon Redshift Cluster Management Guide
:type EnhancedVpcRouting: boolean
:param EnhancedVpcRouting: An option that specifies whether to create the cluster with enhanced VPC routing enabled. To create a cluster that uses enhanced VPC routing, the cluster must be in a VPC. For more information, see Enhanced VPC Routing in the Amazon Redshift Cluster Management Guide.
If this option is true , enhanced VPC routing is enabled.
Default: false
:type AdditionalInfo: string
:param AdditionalInfo: Reserved.
:type IamRoles: list
:param IamRoles: A list of AWS Identity and Access Management (IAM) roles that can be used by the cluster to access other AWS services. You must supply the IAM roles in their Amazon Resource Name (ARN) format. You can supply up to 10 IAM roles in a single request.
A cluster can have up to 10 IAM roles associated at any time.
(string) --
:rtype: dict
:return: {
'Cluster': {
'ClusterIdentifier': 'string',
'NodeType': 'string',
'ClusterStatus': 'string',
'ModifyStatus': 'string',
'MasterUsername': 'string',
'DBName': 'string',
'Endpoint': {
'Address': 'string',
'Port': 123
},
'ClusterCreateTime': datetime(2015, 1, 1),
'AutomatedSnapshotRetentionPeriod': 123,
'ClusterSecurityGroups': [
{
'ClusterSecurityGroupName': 'string',
'Status': 'string'
},
],
'VpcSecurityGroups': [
{
'VpcSecurityGroupId': 'string',
'Status': 'string'
},
],
'ClusterParameterGroups': [
{
'ParameterGroupName': 'string',
'ParameterApplyStatus': 'string',
'ClusterParameterStatusList': [
{
'ParameterName': 'string',
'ParameterApplyStatus': 'string',
'ParameterApplyErrorDescription': 'string'
},
]
},
],
'ClusterSubnetGroupName': 'string',
'VpcId': 'string',
'AvailabilityZone': 'string',
'PreferredMaintenanceWindow': 'string',
'PendingModifiedValues': {
'MasterUserPassword': 'string',
'NodeType': 'string',
'NumberOfNodes': 123,
'ClusterType': 'string',
'ClusterVersion': 'string',
'AutomatedSnapshotRetentionPeriod': 123,
'ClusterIdentifier': 'string',
'PubliclyAccessible': True|False,
'EnhancedVpcRouting': True|False
},
'ClusterVersion': 'string',
'AllowVersionUpgrade': True|False,
'NumberOfNodes': 123,
'PubliclyAccessible': True|False,
'Encrypted': True|False,
'RestoreStatus': {
'Status': 'string',
'CurrentRestoreRateInMegaBytesPerSecond': 123.0,
'SnapshotSizeInMegaBytes': 123,
'ProgressInMegaBytes': 123,
'ElapsedTimeInSeconds': 123,
'EstimatedTimeToCompletionInSeconds': 123
},
'HsmStatus': {
'HsmClientCertificateIdentifier': 'string',
'HsmConfigurationIdentifier': 'string',
'Status': 'string'
},
'ClusterSnapshotCopyStatus': {
'DestinationRegion': 'string',
'RetentionPeriod': 123,
'SnapshotCopyGrantName': 'string'
},
'ClusterPublicKey': 'string',
'ClusterNodes': [
{
'NodeRole': 'string',
'PrivateIPAddress': 'string',
'PublicIPAddress': 'string'
},
],
'ElasticIpStatus': {
'ElasticIp': 'string',
'Status': 'string'
},
'ClusterRevisionNumber': 'string',
'Tags': [
{
'Key': 'string',
'Value': 'string'
},
],
'KmsKeyId': 'string',
'EnhancedVpcRouting': True|False,
'IamRoles': [
{
'IamRoleArn': 'string',
'ApplyStatus': 'string'
},
]
}
}
:returns:
available
creating
deleting
final-snapshot
hardware-failure
incompatible-hsm
incompatible-network
incompatible-parameters
incompatible-restore
modifying
rebooting
renaming
resizing
rotating-keys
storage-full
updating-hsm
"""
pass | 0.003796 |
def run_hybrid(wf, selector, workers):
"""
Returns the result of evaluating the workflow; runs through several
supplied workers in as many threads.
:param wf:
Workflow to compute
:type wf: :py:class:`Workflow` or :py:class:`PromisedObject`
:param selector:
A function selecting the worker that should be run, given a hint.
:param workers:
A dictionary of workers
:returns:
result of running the workflow
"""
worker = hybrid_threaded_worker(selector, workers)
return Scheduler().run(worker, get_workflow(wf)) | 0.001706 |
def remove(attributes, properties):
"""Returns a property sets which include all the elements
in 'properties' that do not have attributes listed in 'attributes'."""
if isinstance(attributes, basestring):
attributes = [attributes]
assert is_iterable_typed(attributes, basestring)
assert is_iterable_typed(properties, basestring)
result = []
for e in properties:
attributes_new = feature.attributes(get_grist(e))
has_common_features = 0
for a in attributes_new:
if a in attributes:
has_common_features = 1
break
if not has_common_features:
result += e
return result | 0.001443 |
def add(self, *dic):
'''add a config to StartCalendarInterval.
Args:
*dic (dict): dictionary with format {'Day': 12, 'Hour': 34} Avaliable keys are Month, Day, Weekday, Hour, Minute. *Note the uppercase.* You can use gen(), genMix() to generate complex config dictionary.
'''
dicList = list(flatten(dic))
# for every dict in the list passed in
for d in dicList:
# make a dict single (list of pairs)
di = []
for k in d:
# checkKey(k, self.keyWord)
di.append(Pair(k, IntegerSingle(d[k])))
dictSingle = DictSingle(di)
# append dict single to array single's value
self._add([dictSingle], self.l) | 0.003968 |
def qteRunMacro(self, macroName: str, widgetObj: QtGui.QWidget=None,
keysequence: QtmacsKeysequence=None):
"""
Queue a previously registered macro for execution once the
event loop is idle.
The reason for queuing macros in the first place, instead of
running them straight away, is to ensure that the event loop
updates all the widgets in between any two macros. This will
avoid many spurious and hard to find bugs due to macros
assuming that all user interface elements have been updated
when in fact they were not.
|Args|
* ``macroName`` (**str**): name of macro.
* ``widgetObj`` (**QWidget**): widget (if any) on which the
macro should operate.
* ``keysequence`` (**QtmacsKeysequence**): key sequence that
triggered the macro.
|Returns|
* **None**
|Raises|
* **QtmacsArgumentError** if at least one argument has an invalid type.
"""
# Add the new macro to the queue and call qteUpdate to ensure
# that the macro is processed once the event loop is idle again.
self._qteMacroQueue.append((macroName, widgetObj, keysequence))
self.qteUpdate() | 0.005529 |
def listen(self, port=None):
"""Start listening for incoming connections.
A request handler must have already been specified with
``TChannel.host``.
:param port:
An explicit port to listen on. This is unnecessary when advertising
on Hyperbahn.
:returns:
Returns immediately.
:raises AlreadyListeningError:
If listen was already called.
"""
if self.is_listening():
raise AlreadyListeningError(
"listen has already been called"
)
if port:
assert not self._port, "Port has already been set."
self._port = int(port)
assert self._handler, "Call .host with a RequestHandler first"
server = TChannelServer(self)
bind_sockets_kwargs = {
'port': self._port,
# ipv6 causes random address already in use (socket.error w errno
# == 98) when getaddrinfo() returns multiple values
# @see https://github.com/uber/tchannel-python/issues/256
'family': socket.AF_INET,
}
if self._reuse_port is True:
# allow multiple processes to share the same port,
# this is really useful in a world where services launch N
# processes per container/os-space, where N is
# the amount of cpus for example
bind_sockets_kwargs['reuse_port'] = True
sockets = bind_sockets(**bind_sockets_kwargs)
assert sockets, "No sockets bound for port %d" % self._port
# If port was 0, the OS probably assigned something better.
self._port = sockets[0].getsockname()[1]
server.add_sockets(sockets)
# assign server so we don't listen twice
self._server = server | 0.001098 |
def admin_view_url(admin_site: AdminSite,
obj,
view_type: str = "change",
current_app: str = None) -> str:
"""
Get a Django admin site URL for an object.
"""
app_name = obj._meta.app_label.lower()
model_name = obj._meta.object_name.lower()
pk = obj.pk
viewname = "admin:{}_{}_{}".format(app_name, model_name, view_type)
if current_app is None:
current_app = admin_site.name
url = reverse(viewname, args=[pk], current_app=current_app)
return url | 0.001821 |
def grab_selenium_chromedriver(redownload=False):
r"""
Automatically download selenium chrome driver if needed
CommandLine:
python -m utool.util_grabdata --test-grab_selenium_chromedriver:1
Example:
>>> # DISABLE_DOCTEST
>>> ut.grab_selenium_chromedriver()
>>> import selenium.webdriver
>>> driver = selenium.webdriver.Chrome()
>>> driver.get('http://www.google.com')
>>> search_field = driver.find_element_by_name('q')
>>> search_field.send_keys('puppies')
>>> search_field.send_keys(selenium.webdriver.common.keys.Keys.ENTER)
Example1:
>>> # DISABLE_DOCTEST
>>> import selenium.webdriver
>>> driver = selenium.webdriver.Firefox()
>>> driver.get('http://www.google.com')
>>> search_field = driver.find_element_by_name('q')
>>> search_field.send_keys('puppies')
>>> search_field.send_keys(selenium.webdriver.common.keys.Keys.ENTER)
"""
import utool as ut
import os
import stat
# TODO: use a better download dir (but it must be in the PATh or selenium freaks out)
chromedriver_dpath = ut.ensuredir(ut.truepath('~/bin'))
chromedriver_fpath = join(chromedriver_dpath, 'chromedriver')
if not ut.checkpath(chromedriver_fpath) or redownload:
assert chromedriver_dpath in os.environ['PATH'].split(os.pathsep)
# TODO: make this work for windows as well
if ut.LINUX and ut.util_cplat.is64bit_python():
import requests
rsp = requests.get('http://chromedriver.storage.googleapis.com/LATEST_RELEASE', timeout=TIMEOUT)
assert rsp.status_code == 200
url = 'http://chromedriver.storage.googleapis.com/' + rsp.text.strip() + '/chromedriver_linux64.zip'
ut.grab_zipped_url(url, download_dir=chromedriver_dpath, redownload=True)
else:
raise AssertionError('unsupported chrome driver getter script')
if not ut.WIN32:
st = os.stat(chromedriver_fpath)
os.chmod(chromedriver_fpath, st.st_mode | stat.S_IEXEC)
ut.assert_exists(chromedriver_fpath)
os.environ['webdriver.chrome.driver'] = chromedriver_fpath
return chromedriver_fpath | 0.002233 |
def _write_metrics(self, iteration:int, last_metrics:MetricsList, start_idx:int=2)->None:
"Writes training metrics to Tensorboard."
recorder = self.learn.recorder
for i, name in enumerate(recorder.names[start_idx:]):
if last_metrics is None or len(last_metrics) < i+1: return
scalar_value = last_metrics[i]
self._write_scalar(name=name, scalar_value=scalar_value, iteration=iteration) | 0.024775 |
def make_event_filter(self):
"""Create a new event filter."""
event_filter = EventFilter(
self.event_name,
self.event,
self.filters,
from_block=self.from_block,
to_block=self.to_block
)
event_filter.set_poll_interval(0.5)
return event_filter | 0.005865 |
def prepare_question_encoder(inputs, hparams):
"""Prepare question encoder.
Args:
inputs: a Tensor.
hparams: run hyperparameters
Returns:
encoder_input: a Tensor, bottom of encoder stack
encoder_self_attention_bias: a bias tensor for use in encoder self-attention
"""
encoder_input = inputs
# Usual case - not a packed dataset.
encoder_padding = common_attention.embedding_to_padding(encoder_input)
ignore_padding = common_attention.attention_bias_ignore_padding(
encoder_padding)
encoder_self_attention_bias = ignore_padding
if hparams.pos == "timing":
encoder_input = common_attention.add_timing_signal_1d(encoder_input)
elif hparams.pos == "emb":
encoder_input = common_attention.add_positional_embedding(
encoder_input, hparams.max_length, "inputs_positional_embedding",
None)
return (encoder_input, encoder_self_attention_bias) | 0.012168 |
def get_adjacency_matrix(df_connected):
'''
Return matrix where $a_{i,j} = 1$ indicates polygon $i$ is connected to
polygon $j$.
Also, return mapping (and reverse mapping) from original keys in
`df_connected` to zero-based integer index used for matrix rows and
columns.
'''
sorted_path_keys = np.sort(np.unique(df_connected[['source', 'target']]
.values.ravel()))
indexed_paths = pd.Series(sorted_path_keys)
path_indexes = pd.Series(indexed_paths.index, index=sorted_path_keys)
adjacency_matrix = np.zeros((path_indexes.shape[0], ) * 2, dtype=int)
for i_key, j_key in df_connected[['source', 'target']].values:
i, j = path_indexes.loc[[i_key, j_key]]
adjacency_matrix[i, j] = 1
adjacency_matrix[j, i] = 1
return adjacency_matrix, indexed_paths, path_indexes | 0.00114 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.