code
stringlengths 75
104k
| docstring
stringlengths 1
46.9k
|
---|---|
def get_nested_streams(dmap):
"""Recurses supplied DynamicMap to find all streams
Args:
dmap: DynamicMap to recurse to look for streams
Returns:
List of streams that were found
"""
return list({s for dmap in get_nested_dmaps(dmap) for s in dmap.streams}) | Recurses supplied DynamicMap to find all streams
Args:
dmap: DynamicMap to recurse to look for streams
Returns:
List of streams that were found |
def create_folder(query, default_name=None, default_path=None):
"""Shows a user dialog for folder creation
A dialog is opened with the prompt `query`. The current path is set to the last path that was opened/created. The
roots of all libraries is added to the list of shortcut folders.
:param str query: Prompt asking the user for a specific folder
:param str default_name: Default name of the folder to be created
:param str default_path: Path in which the folder is created if the user doesn't specify a path
:return: Path created by the user or `default_path`\`default_name` if no path was specified or None if none of the
paths is valid
:rtype: str
"""
from gi.repository import Gtk
from os.path import expanduser, dirname, join, exists, isdir
from rafcon.core.storage.storage import STATEMACHINE_FILE
from rafcon.gui.singleton import main_window_controller
from rafcon.gui.runtime_config import global_runtime_config
last_path = global_runtime_config.get_config_value('LAST_PATH_OPEN_SAVE', "")
if last_path and isdir(last_path) and not exists(join(last_path, STATEMACHINE_FILE)):
pass
elif last_path:
last_path = dirname(last_path)
else:
last_path = expanduser('~')
dialog = Gtk.FileChooserDialog(query,
None,
Gtk.FileChooserAction.CREATE_FOLDER,
(Gtk.STOCK_CANCEL, Gtk.ResponseType.CANCEL,
Gtk.STOCK_SAVE, Gtk.ResponseType.OK))
# Allows confirming with Enter and double-click
dialog.set_default_response(Gtk.ResponseType.OK)
if main_window_controller:
dialog.set_transient_for(main_window_controller.view.get_top_widget())
dialog.set_current_folder(last_path)
if default_name:
dialog.set_current_name(default_name)
dialog.set_show_hidden(False)
# Add library roots to list of shortcut folders
add_library_root_path_to_shortcut_folders_of_dialog(dialog)
response = dialog.run()
if response != Gtk.ResponseType.OK:
dialog.destroy()
if default_path and default_name:
default = os.path.join(default_path, default_name)
if os.path.isdir(default):
return default
return None
path = dialog.get_filename()
dialog.destroy()
if os.path.isdir(path):
global_runtime_config.set_config_value('LAST_PATH_OPEN_SAVE', path)
return path
return None | Shows a user dialog for folder creation
A dialog is opened with the prompt `query`. The current path is set to the last path that was opened/created. The
roots of all libraries is added to the list of shortcut folders.
:param str query: Prompt asking the user for a specific folder
:param str default_name: Default name of the folder to be created
:param str default_path: Path in which the folder is created if the user doesn't specify a path
:return: Path created by the user or `default_path`\`default_name` if no path was specified or None if none of the
paths is valid
:rtype: str |
def get_line_flux(line_wave, wave, flux, **kwargs):
"""Interpolated flux at a given wavelength (calls np.interp)."""
return np.interp(line_wave, wave, flux, **kwargs) | Interpolated flux at a given wavelength (calls np.interp). |
def get_certificate():
''' Read openvswitch certificate from disk '''
if os.path.exists(CERT_PATH):
log('Reading ovs certificate from {}'.format(CERT_PATH))
with open(CERT_PATH, 'r') as cert:
full_cert = cert.read()
begin_marker = "-----BEGIN CERTIFICATE-----"
end_marker = "-----END CERTIFICATE-----"
begin_index = full_cert.find(begin_marker)
end_index = full_cert.rfind(end_marker)
if end_index == -1 or begin_index == -1:
raise RuntimeError("Certificate does not contain valid begin"
" and end markers.")
full_cert = full_cert[begin_index:(end_index + len(end_marker))]
return full_cert
else:
log('Certificate not found', level=WARNING)
return None | Read openvswitch certificate from disk |
def _is_empty(self):
"""
True if this cell contains only a single empty ``<w:p>`` element.
"""
block_items = list(self.iter_block_items())
if len(block_items) > 1:
return False
p = block_items[0] # cell must include at least one <w:p> element
if len(p.r_lst) == 0:
return True
return False | True if this cell contains only a single empty ``<w:p>`` element. |
def fwdl_status_output_fwdl_state(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
fwdl_status = ET.Element("fwdl_status")
config = fwdl_status
output = ET.SubElement(fwdl_status, "output")
fwdl_state = ET.SubElement(output, "fwdl-state")
fwdl_state.text = kwargs.pop('fwdl_state')
callback = kwargs.pop('callback', self._callback)
return callback(config) | Auto Generated Code |
def dump(file_name, predictions=None, algo=None, verbose=0):
"""A basic wrapper around Pickle to serialize a list of prediction and/or
an algorithm on drive.
What is dumped is a dictionary with keys ``'predictions'`` and ``'algo'``.
Args:
file_name(str): The name (with full path) specifying where to dump the
predictions.
predictions(list of :obj:`Prediction\
<surprise.prediction_algorithms.predictions.Prediction>`): The
predictions to dump.
algo(:class:`Algorithm\
<surprise.prediction_algorithms.algo_base.AlgoBase>`, optional):
The algorithm to dump.
verbose(int): Level of verbosity. If ``1``, then a message indicates
that the dumping went successfully. Default is ``0``.
"""
dump_obj = {'predictions': predictions,
'algo': algo
}
pickle.dump(dump_obj, open(file_name, 'wb'),
protocol=pickle.HIGHEST_PROTOCOL)
if verbose:
print('The dump has been saved as file', file_name) | A basic wrapper around Pickle to serialize a list of prediction and/or
an algorithm on drive.
What is dumped is a dictionary with keys ``'predictions'`` and ``'algo'``.
Args:
file_name(str): The name (with full path) specifying where to dump the
predictions.
predictions(list of :obj:`Prediction\
<surprise.prediction_algorithms.predictions.Prediction>`): The
predictions to dump.
algo(:class:`Algorithm\
<surprise.prediction_algorithms.algo_base.AlgoBase>`, optional):
The algorithm to dump.
verbose(int): Level of verbosity. If ``1``, then a message indicates
that the dumping went successfully. Default is ``0``. |
def _processing_controller_status(self):
"""Report on the status of the Processing Block queue(s)."""
LOG.info('Starting Processing Block queue reporter.')
while True:
LOG.info('PB queue length = %d', len(self._queue))
time.sleep(self._report_interval)
if active_count() != 5:
LOG.critical('Processing Controller not running '
'correctly! (%d/%d threads active)',
active_count(), 5) | Report on the status of the Processing Block queue(s). |
def start(name, call=None):
'''
Start a node
CLI Examples:
.. code-block:: bash
salt-cloud -a start myinstance
'''
if call != 'action':
raise SaltCloudSystemExit(
'The stop action must be called with -a or --action.'
)
log.info('Starting node %s', name)
instanceId = _get_node(name)['InstanceId']
params = {'Action': 'StartInstance',
'InstanceId': instanceId}
result = query(params)
return result | Start a node
CLI Examples:
.. code-block:: bash
salt-cloud -a start myinstance |
def state(self):
"""State of this service."""
if self._proto.HasField('state'):
return yamcsManagement_pb2.ServiceState.Name(self._proto.state)
return None | State of this service. |
def w(msg, *args, **kwargs):
'''
log a message at warn level;
'''
return logging.log(WARN, msg, *args, **kwargs) | log a message at warn level; |
def copyfileobj(fsrc, fdst, length=16*1024):
"""copy data from file-like object fsrc to file-like object fdst"""
while 1:
buf = fsrc.read(length)
if not buf:
break
fdst.write(buf) | copy data from file-like object fsrc to file-like object fdst |
def refresh_oauth_credential(self):
"""Refresh session's OAuth 2.0 credentials if they are stale."""
if self.session.token_type == auth.SERVER_TOKEN_TYPE:
return
credential = self.session.oauth2credential
if credential.is_stale():
refresh_session = refresh_access_token(credential)
self.session = refresh_session | Refresh session's OAuth 2.0 credentials if they are stale. |
def E(self,*args,**kwargs):
"""
NAME:
E
PURPOSE:
calculate the energy
INPUT:
t - (optional) time at which to get the radius
pot= potential instance or list of such instances
OUTPUT:
energy
HISTORY:
2010-09-15 - Written - Bovy (NYU)
2011-04-18 - Added t - Bovy (NYU)
"""
if not 'pot' in kwargs or kwargs['pot'] is None:
try:
pot= self._pot
except AttributeError:
raise AttributeError("Integrate orbit or specify pot=")
if 'pot' in kwargs and kwargs['pot'] is None:
kwargs.pop('pot')
else:
pot= kwargs.pop('pot')
if isinstance(pot,Potential):
thispot= RZToplanarPotential(pot)
elif isinstance(pot,list):
thispot= []
for p in pot:
if isinstance(p,Potential): thispot.append(RZToplanarPotential(p))
else: thispot.append(p)
else:
thispot= pot
if len(args) > 0:
t= args[0]
else:
t= 0.
#Get orbit
thiso= self(*args,**kwargs)
onet= (len(thiso.shape) == 1)
if onet:
return _evaluateplanarPotentials(thispot,thiso[0],
t=t)\
+thiso[1]**2./2.\
+thiso[2]**2./2.
else:
return nu.array([_evaluateplanarPotentials(thispot,thiso[0,ii],
t=t[ii])\
+thiso[1,ii]**2./2.\
+thiso[2,ii]**2./2. for ii in range(len(t))]) | NAME:
E
PURPOSE:
calculate the energy
INPUT:
t - (optional) time at which to get the radius
pot= potential instance or list of such instances
OUTPUT:
energy
HISTORY:
2010-09-15 - Written - Bovy (NYU)
2011-04-18 - Added t - Bovy (NYU) |
def add_callback(self, name, func):
"""Add a callback when device events happen.
Args:
name (str): currently support 'on_scan' and 'on_disconnect'
func (callable): the function that should be called
"""
if name == 'on_scan':
events = ['device_seen']
def callback(_conn_string, _conn_id, _name, event):
func(self.id, event, event.get('validity_period', 60))
elif name == 'on_report':
events = ['report', 'broadcast']
def callback(_conn_string, conn_id, _name, event):
func(conn_id, event)
elif name == 'on_trace':
events = ['trace']
def callback(_conn_string, conn_id, _name, event):
func(conn_id, event)
elif name == 'on_disconnect':
events = ['disconnection']
def callback(_conn_string, conn_id, _name, _event):
func(self.id, conn_id)
else:
raise ArgumentError("Unknown callback type {}".format(name))
self._adapter.register_monitor([None], events, callback) | Add a callback when device events happen.
Args:
name (str): currently support 'on_scan' and 'on_disconnect'
func (callable): the function that should be called |
def get_account_tokens(self, address):
"""
Get the list of tokens that this address owns
"""
cur = self.db.cursor()
return namedb_get_account_tokens(cur, address) | Get the list of tokens that this address owns |
def push(self, undoObj):
"""
Add ``undoObj`` command to stack and run its ``commit`` method.
|Args|
* ``undoObj`` (**QtmacsUndoCommand**): the new command object.
|Returns|
* **None**
|Raises|
* **QtmacsArgumentError** if at least one argument has an invalid type.
"""
# Check type of input arguments.
if not isinstance(undoObj, QtmacsUndoCommand):
raise QtmacsArgumentError('undoObj', 'QtmacsUndoCommand',
inspect.stack()[0][3])
# Flag that the last action was not an undo action and push
# the command to the stack.
self._wasUndo = False
self._push(undoObj) | Add ``undoObj`` command to stack and run its ``commit`` method.
|Args|
* ``undoObj`` (**QtmacsUndoCommand**): the new command object.
|Returns|
* **None**
|Raises|
* **QtmacsArgumentError** if at least one argument has an invalid type. |
async def fetch_lightpad(self, lpid):
"""Lookup details for a given lightpad"""
url = "https://production.plum.technology/v2/getLightpad"
data = {"lpid": lpid}
return await self.__post(url, data) | Lookup details for a given lightpad |
def Serialize(self, writer):
"""
Serialize full object.
Args:
writer (neo.IO.BinaryWriter):
"""
super(Header, self).Serialize(writer)
writer.WriteByte(0) | Serialize full object.
Args:
writer (neo.IO.BinaryWriter): |
def filter_slaves(selfie, slaves):
"""
Remove slaves that are in an ODOWN or SDOWN state
also remove slaves that do not have 'ok' master-link-status
"""
return [(s['ip'], s['port']) for s in slaves
if not s['is_odown'] and
not s['is_sdown'] and
s['master-link-status'] == 'ok'] | Remove slaves that are in an ODOWN or SDOWN state
also remove slaves that do not have 'ok' master-link-status |
def lookup_field_label(self, context, field, default=None):
"""
Figures out what the field label should be for the passed in field name.
We overload this so as to use our form to see if there is label set there. If so
then we'll pass that as the default instead of having our parent derive
the field from the name.
"""
default = None
for form_field in self.form:
if form_field.name == field:
default = form_field.label
break
return super(SmartFormMixin, self).lookup_field_label(context, field, default=default) | Figures out what the field label should be for the passed in field name.
We overload this so as to use our form to see if there is label set there. If so
then we'll pass that as the default instead of having our parent derive
the field from the name. |
def custom_background_code():
""" Custom code run in a background thread. Prints the current block height.
This function is run in a daemonized thread, which means it can be instantly killed at any
moment, whenever the main thread quits. If you need more safety, don't use a daemonized
thread and handle exiting this thread in another way (eg. with signals and events).
"""
while True:
logger.info("Block %s / %s", str(Blockchain.Default().Height), str(Blockchain.Default().HeaderHeight))
sleep(15) | Custom code run in a background thread. Prints the current block height.
This function is run in a daemonized thread, which means it can be instantly killed at any
moment, whenever the main thread quits. If you need more safety, don't use a daemonized
thread and handle exiting this thread in another way (eg. with signals and events). |
def compose(f: Callable[[Any], Monad], g: Callable[[Any], Monad]) -> Callable[[Any], Monad]:
r"""Monadic compose function.
Right-to-left Kleisli composition of two monadic functions.
(<=<) :: Monad m => (b -> m c) -> (a -> m b) -> a -> m c
f <=< g = \x -> g x >>= f
"""
return lambda x: g(x).bind(f) | r"""Monadic compose function.
Right-to-left Kleisli composition of two monadic functions.
(<=<) :: Monad m => (b -> m c) -> (a -> m b) -> a -> m c
f <=< g = \x -> g x >>= f |
def get_critical_path_timings(self):
"""
Get the cumulative timings of each goal and all of the goals it (transitively) depended on.
"""
setup_workunit = WorkUnitLabel.SETUP.lower()
transitive_dependencies = dict()
for goal_info in self._sorted_goal_infos:
deps = transitive_dependencies.setdefault(goal_info.goal.name, set())
for dep in goal_info.goal_dependencies:
deps.add(dep.name)
deps.update(transitive_dependencies.get(dep.name))
# Add setup workunit as a dep manually, as its unaccounted for, otherwise.
deps.add(setup_workunit)
raw_timings = dict()
for entry in self.cumulative_timings.get_all():
raw_timings[entry["label"]] = entry["timing"]
critical_path_timings = AggregatedTimings()
def add_to_timings(goal, dep):
tracking_label = get_label(goal)
timing_label = get_label(dep)
critical_path_timings.add_timing(tracking_label, raw_timings.get(timing_label, 0.0))
def get_label(dep):
return "{}:{}".format(RunTracker.DEFAULT_ROOT_NAME, dep)
# Add setup workunit to critical_path_timings manually, as its unaccounted for, otherwise.
add_to_timings(setup_workunit, setup_workunit)
for goal, deps in transitive_dependencies.items():
add_to_timings(goal, goal)
for dep in deps:
add_to_timings(goal, dep)
return critical_path_timings | Get the cumulative timings of each goal and all of the goals it (transitively) depended on. |
def create_response_dic(self):
"""
Generate the dic that will be jsonify. Checking scopes given vs
registered.
Returns a dic.
"""
dic = {}
for scope in self.scopes:
if scope in self._scopes_registered():
dic.update(getattr(self, 'scope_' + scope)())
dic = self._clean_dic(dic)
return dic | Generate the dic that will be jsonify. Checking scopes given vs
registered.
Returns a dic. |
def getTypeWidth(self, dtype: "HdlType", do_eval=False) -> Tuple[int, str, bool]:
"""
:return: tuple (current value of width,
string of value (can be ID or int),
Flag which specifies if width of signal is locked
or can be changed by parameter)
"""
raise NotImplementedError(
"Implement this method in your HdlType classes") | :return: tuple (current value of width,
string of value (can be ID or int),
Flag which specifies if width of signal is locked
or can be changed by parameter) |
def set_hostname(hostname=None, deploy=False):
'''
Set the hostname of the Palo Alto proxy minion. A commit will be required before this is processed.
CLI Example:
Args:
hostname (str): The hostname to set
deploy (bool): If true then commit the full candidate configuration, if false only set pending change.
.. code-block:: bash
salt '*' panos.set_hostname newhostname
salt '*' panos.set_hostname newhostname deploy=True
'''
if not hostname:
raise CommandExecutionError("Hostname option must not be none.")
ret = {}
query = {'type': 'config',
'action': 'set',
'xpath': '/config/devices/entry[@name=\'localhost.localdomain\']/deviceconfig/system',
'element': '<hostname>{0}</hostname>'.format(hostname)}
ret.update(__proxy__['panos.call'](query))
if deploy is True:
ret.update(commit())
return ret | Set the hostname of the Palo Alto proxy minion. A commit will be required before this is processed.
CLI Example:
Args:
hostname (str): The hostname to set
deploy (bool): If true then commit the full candidate configuration, if false only set pending change.
.. code-block:: bash
salt '*' panos.set_hostname newhostname
salt '*' panos.set_hostname newhostname deploy=True |
def _delete(self):
"""Deletes this AssessmentSection from database.
Will be called by AssessmentTaken._delete() for clean-up purposes.
"""
collection = JSONClientValidated('assessment',
collection='AssessmentSection',
runtime=self._runtime)
collection.delete_one({'_id': ObjectId(self.get_id().get_identifier())}) | Deletes this AssessmentSection from database.
Will be called by AssessmentTaken._delete() for clean-up purposes. |
def run_license_checker(config_path):
# type: (str) -> None
"""Generate table of installed packages and check for license
warnings based off user defined restricted license values.
:param config_path: str
:return:
"""
whitelist_licenses = _get_whitelist_licenses(config_path)
table = PrintTable(ROW_HEADERS)
warnings = []
for pkg in _get_packages():
allowed = pkg.license in whitelist_licenses
table.add_row((pkg.name, pkg.version, pkg.license, str(allowed)))
if not allowed:
warnings.append(pkg)
print(table)
print('{} RESTRICTED LICENSES DETECTED'.format(len(warnings))) | Generate table of installed packages and check for license
warnings based off user defined restricted license values.
:param config_path: str
:return: |
def create_unihan_table(columns, metadata):
"""Create table and return :class:`sqlalchemy.Table`.
Parameters
----------
columns : list
columns for table, e.g. ``['kDefinition', 'kCantonese']``
metadata : :class:`sqlalchemy.schema.MetaData`
Instance of sqlalchemy metadata
Returns
-------
:class:`sqlalchemy.schema.Table` :
Newly created table with columns and index.
"""
if TABLE_NAME not in metadata.tables:
table = Table(TABLE_NAME, metadata)
table.append_column(Column('char', String(12), primary_key=True))
table.append_column(Column('ucn', String(12), primary_key=True))
for column_name in columns:
col = Column(column_name, String(256), nullable=True)
table.append_column(col)
return table
else:
return Table(TABLE_NAME, metadata) | Create table and return :class:`sqlalchemy.Table`.
Parameters
----------
columns : list
columns for table, e.g. ``['kDefinition', 'kCantonese']``
metadata : :class:`sqlalchemy.schema.MetaData`
Instance of sqlalchemy metadata
Returns
-------
:class:`sqlalchemy.schema.Table` :
Newly created table with columns and index. |
def angleOfView2(x,y, b, x0=None,y0=None):
'''
Corrected AngleOfView equation by Koentges (via mail from 14/02/2017)
b --> distance between the camera and the module in m
x0 --> viewable with in the module plane of the camera in m
y0 --> viewable height in the module plane of the camera in m
x,y --> pixel position [m] from top left
'''
if x0 is None:
x0 = x[-1,-1]
if y0 is None:
y0 = y[-1,-1]
return np.cos( np.arctan( np.sqrt(
( (x-x0/2)**2+(y-y0/2)**2 ) ) /b ) ) | Corrected AngleOfView equation by Koentges (via mail from 14/02/2017)
b --> distance between the camera and the module in m
x0 --> viewable with in the module plane of the camera in m
y0 --> viewable height in the module plane of the camera in m
x,y --> pixel position [m] from top left |
def verify(info, directory_path):
"""Return True if the checksum values in the torrent file match the
computed checksum values of downloaded file(s) in the directory and if
each file has the correct length as specified in the torrent file.
"""
base_path = os.path.join(directory_path, info['name'])
if 'length' in info:
if os.stat(base_path).st_size != info['length']:
return False
getfile = lambda: open(base_path, 'rb')
else:
assert 'files' in info, 'invalid torrent file'
for f in info['files']:
p = os.path.join(base_path, *f['path'])
if os.stat(p).st_size != f['length']:
return False
getfile = lambda: ConcatenatedFile(base_path, info['files'])
with getfile() as f:
return compare_checksum(info, f) | Return True if the checksum values in the torrent file match the
computed checksum values of downloaded file(s) in the directory and if
each file has the correct length as specified in the torrent file. |
def run(self, queue):
"""
Create the fullname, and store a a message serving as result in the job
"""
# add some random time to simulate a long job
time.sleep(random.random())
# compute the fullname
obj = self.get_object()
obj.fullname.hset('%s %s' % tuple(obj.hmget('firstname', 'lastname')))
# this will the "result" of the job
result = 'Created fullname for Person %s: %s' % (obj.pk.get(), obj.fullname.hget())
# save the result of the callback in the job itself
self.result.set(result)
# return the result for future use in the worker
return result | Create the fullname, and store a a message serving as result in the job |
def to_dict(self, index=True, ordered=False):
"""
Returns a dict where the keys are the column names and the values are lists of the values for that column.
:param index: If True then include the index in the dict with the index_name as the key
:param ordered: If True then return an OrderedDict() to preserve the order of the columns in the DataFrame
:return: dict or OrderedDict()
"""
result = OrderedDict() if ordered else dict()
if index:
result.update({self._index_name: self._index})
if ordered:
data_dict = [(column, self._data[i]) for i, column in enumerate(self._columns)]
else:
data_dict = {column: self._data[i] for i, column in enumerate(self._columns)}
result.update(data_dict)
return result | Returns a dict where the keys are the column names and the values are lists of the values for that column.
:param index: If True then include the index in the dict with the index_name as the key
:param ordered: If True then return an OrderedDict() to preserve the order of the columns in the DataFrame
:return: dict or OrderedDict() |
def expected_counts_stationary(T, n, mu=None):
r"""Expected transition counts for Markov chain in equilibrium.
Since mu is stationary for T we have
.. math::
E(C^{(N)})=N diag(mu)*T.
Parameters
----------
T : (M, M) ndarray
Transition matrix.
n : int
Number of steps for chain.
mu : (M,) ndarray (optional)
Stationary distribution for T. If mu is not specified it will be
computed via diagonalization of T.
Returns
-------
EC : numpy array, shape=(n,n)
Expected value for transition counts after a propagation of n steps.
"""
if n <= 0:
EC = np.zeros(T.shape)
return EC
else:
if mu is None:
mu = stationary_distribution(T)
EC = n * mu[:, np.newaxis] * T
return EC | r"""Expected transition counts for Markov chain in equilibrium.
Since mu is stationary for T we have
.. math::
E(C^{(N)})=N diag(mu)*T.
Parameters
----------
T : (M, M) ndarray
Transition matrix.
n : int
Number of steps for chain.
mu : (M,) ndarray (optional)
Stationary distribution for T. If mu is not specified it will be
computed via diagonalization of T.
Returns
-------
EC : numpy array, shape=(n,n)
Expected value for transition counts after a propagation of n steps. |
def _set_preprovision(self, v, load=False):
"""
Setter method for preprovision, mapped from YANG variable /preprovision (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_preprovision is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_preprovision() directly.
YANG Description: Preprovision profile
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=preprovision.preprovision, is_container='container', presence=False, yang_name="preprovision", rest_name="preprovision", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Preprovision profile', u'hidden': u'full', u'display-when': u'((/vcsmode/vcs-mode = "true") and (/vcsmode/vcs-cluster-mode = "true"))', u'cli-incomplete-no': None}}, namespace='urn:brocade.com:mgmt:brocade-preprovision', defining_module='brocade-preprovision', yang_type='container', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """preprovision must be of a type compatible with container""",
'defined-type': "container",
'generated-type': """YANGDynClass(base=preprovision.preprovision, is_container='container', presence=False, yang_name="preprovision", rest_name="preprovision", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Preprovision profile', u'hidden': u'full', u'display-when': u'((/vcsmode/vcs-mode = "true") and (/vcsmode/vcs-cluster-mode = "true"))', u'cli-incomplete-no': None}}, namespace='urn:brocade.com:mgmt:brocade-preprovision', defining_module='brocade-preprovision', yang_type='container', is_config=True)""",
})
self.__preprovision = t
if hasattr(self, '_set'):
self._set() | Setter method for preprovision, mapped from YANG variable /preprovision (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_preprovision is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_preprovision() directly.
YANG Description: Preprovision profile |
def _find_single(self, match_class, **keywds):
"""implementation details"""
self._logger.debug('find single query execution - started')
start_time = timeit.default_timer()
norm_keywds = self.__normalize_args(**keywds)
decl_matcher = self.__create_matcher(match_class, **norm_keywds)
dtype = self.__findout_decl_type(match_class, **norm_keywds)
recursive_ = self.__findout_recursive(**norm_keywds)
decls = self.__findout_range(norm_keywds['name'], dtype, recursive_)
found = matcher.get_single(decl_matcher, decls, False)
self._logger.debug(
'find single query execution - done( %f seconds )',
(timeit.default_timer() - start_time))
return found | implementation details |
def _group_by_batches(samples, check_fn):
"""Group calls by batches, processing families together during ensemble calling.
"""
batch_groups = collections.defaultdict(list)
extras = []
for data in [x[0] for x in samples]:
if check_fn(data):
batch_groups[multi.get_batch_for_key(data)].append(data)
else:
extras.append([data])
return batch_groups, extras | Group calls by batches, processing families together during ensemble calling. |
def execute_task(bufs):
"""Deserialize the buffer and execute the task.
Returns the result or throws exception.
"""
user_ns = locals()
user_ns.update({'__builtins__': __builtins__})
f, args, kwargs = unpack_apply_message(bufs, user_ns, copy=False)
# We might need to look into callability of the function from itself
# since we change it's name in the new namespace
prefix = "parsl_"
fname = prefix + "f"
argname = prefix + "args"
kwargname = prefix + "kwargs"
resultname = prefix + "result"
user_ns.update({fname: f,
argname: args,
kwargname: kwargs,
resultname: resultname})
code = "{0} = {1}(*{2}, **{3})".format(resultname, fname,
argname, kwargname)
try:
# logger.debug("[RUNNER] Executing: {0}".format(code))
exec(code, user_ns, user_ns)
except Exception as e:
logger.warning("Caught exception; will raise it: {}".format(e), exc_info=True)
raise e
else:
# logger.debug("[RUNNER] Result: {0}".format(user_ns.get(resultname)))
return user_ns.get(resultname) | Deserialize the buffer and execute the task.
Returns the result or throws exception. |
def backward_word_extend_selection(self, e): #
u"""Move back to the start of the current or previous word. Words are
composed of letters and digits."""
self.l_buffer.backward_word_extend_selection(self.argument_reset)
self.finalize() | u"""Move back to the start of the current or previous word. Words are
composed of letters and digits. |
def leave_room(self, sid, room, namespace=None):
"""Leave a room.
The only difference with the :func:`socketio.Server.leave_room` method
is that when the ``namespace`` argument is not given the namespace
associated with the class is used.
"""
return self.server.leave_room(sid, room,
namespace=namespace or self.namespace) | Leave a room.
The only difference with the :func:`socketio.Server.leave_room` method
is that when the ``namespace`` argument is not given the namespace
associated with the class is used. |
def calculate_mean(samples, weights):
r'''Calculate the mean of weighted samples (like the output of an
importance-sampling run).
:param samples:
Matrix-like numpy array; the samples to be used.
:param weights:
Vector-like numpy array; the (unnormalized) importance weights.
'''
assert len(samples) == len(weights), "The number of samples (got %i) must equal the number of weights (got %i)." % (len(samples),len(weights))
return _np.average(samples, axis=0, weights=weights) | r'''Calculate the mean of weighted samples (like the output of an
importance-sampling run).
:param samples:
Matrix-like numpy array; the samples to be used.
:param weights:
Vector-like numpy array; the (unnormalized) importance weights. |
def negate(arg):
"""
Negate a numeric expression
Parameters
----------
arg : numeric value expression
Returns
-------
negated : type of caller
"""
op = arg.op()
if hasattr(op, 'negate'):
result = op.negate()
else:
result = ops.Negate(arg)
return result.to_expr() | Negate a numeric expression
Parameters
----------
arg : numeric value expression
Returns
-------
negated : type of caller |
def __process_by_ccore(self):
"""!
@brief Performs cluster analysis using CCORE (C/C++ part of pyclustering library).
"""
cure_data_pointer = wrapper.cure_algorithm(self.__pointer_data, self.__number_cluster,
self.__number_represent_points, self.__compression)
self.__clusters = wrapper.cure_get_clusters(cure_data_pointer)
self.__representors = wrapper.cure_get_representors(cure_data_pointer)
self.__means = wrapper.cure_get_means(cure_data_pointer)
wrapper.cure_data_destroy(cure_data_pointer) | !
@brief Performs cluster analysis using CCORE (C/C++ part of pyclustering library). |
def binary_operation_math(self, rule, left, right, **kwargs):
"""
Implementation of :py:func:`pynspect.traversers.RuleTreeTraverser.binary_operation_math` interface.
"""
if isinstance(left, NumberRule) and isinstance(right, NumberRule):
return self._calculate_operation_math(rule, left, right)
return self._compile_operation_rule(
rule,
left,
right,
MathBinOpRule
) | Implementation of :py:func:`pynspect.traversers.RuleTreeTraverser.binary_operation_math` interface. |
def btc_script_classify(scriptpubkey, private_key_info=None):
"""
Classify a scriptpubkey, optionally also using the private key info that will generate the corresponding scriptsig/witness
Return None if not known (nonstandard)
"""
if scriptpubkey.startswith("76a914") and scriptpubkey.endswith("88ac") and len(scriptpubkey) == 50:
return 'p2pkh'
elif scriptpubkey.startswith("a914") and scriptpubkey.endswith("87") and len(scriptpubkey) == 46:
# maybe p2sh-p2wpkh or p2sh-p2wsh?
if private_key_info:
if btc_is_singlesig_segwit(private_key_info):
return 'p2sh-p2wpkh'
elif btc_is_multisig_segwit(private_key_info):
return 'p2sh-p2wsh'
return 'p2sh'
elif scriptpubkey.startswith('0014') and len(scriptpubkey) == 44:
return 'p2wpkh'
elif scriptpubkey.startswith('0020') and len(scriptpubkey) == 68:
return 'p2wsh'
script_tokens = btc_script_deserialize(scriptpubkey)
if len(script_tokens) == 0:
return None
if script_tokens[0] == OPCODE_VALUES['OP_RETURN']:
return "nulldata"
elif script_tokens[-1] == OPCODE_VALUES['OP_CHECKMULTISIG']:
return "multisig"
elif len(script_tokens) == 2 and script_tokens[-1] == OPCODE_VALUES["OP_CHECKSIG"]:
return "p2pk"
return None | Classify a scriptpubkey, optionally also using the private key info that will generate the corresponding scriptsig/witness
Return None if not known (nonstandard) |
def handle(self,
t_input: inference.TranslatorInput,
t_output: inference.TranslatorOutput,
t_walltime: float = 0.):
"""
:param t_input: Translator input.
:param t_output: Translator output.
:param t_walltime: Total walltime for translation.
"""
self.stream.write("{:.3f}\t{}\t{}\n".format(t_output.score,
C.TOKEN_SEPARATOR.join(t_input.tokens),
t_output.translation))
self.stream.flush() | :param t_input: Translator input.
:param t_output: Translator output.
:param t_walltime: Total walltime for translation. |
def unapply_top_patch(self, force=False):
""" Unapply top patch """
self._check(force)
patch = self.db.top_patch()
self._unapply_patch(patch)
self.db.save()
self.unapplied(self.db.top_patch()) | Unapply top patch |
def wb_db020(self, value=None):
""" Corresponds to IDD Field `wb_db020`
mean coincident wet-bulb temperature to
Dry-bulb temperature corresponding to 2.0% annual cumulative frequency of occurrence (warm conditions)
Args:
value (float): value for IDD Field `wb_db020`
Unit: C
if `value` is None it will not be checked against the
specification and is assumed to be a missing value
Raises:
ValueError: if `value` is not a valid value
"""
if value is not None:
try:
value = float(value)
except ValueError:
raise ValueError('value {} need to be of type float '
'for field `wb_db020`'.format(value))
self._wb_db020 = value | Corresponds to IDD Field `wb_db020`
mean coincident wet-bulb temperature to
Dry-bulb temperature corresponding to 2.0% annual cumulative frequency of occurrence (warm conditions)
Args:
value (float): value for IDD Field `wb_db020`
Unit: C
if `value` is None it will not be checked against the
specification and is assumed to be a missing value
Raises:
ValueError: if `value` is not a valid value |
def remove_objects_not_in(self, objects_to_keep, verbosity):
"""
Delete all the objects in the database that are not in objects_to_keep.
- objects_to_keep: A map where the keys are classes, and the values are a
set of the objects of that class we should keep.
"""
for class_ in objects_to_keep.keys():
current = class_.objects.all()
current_ids = set([x.pk for x in current])
keep_ids = set([x.pk for x in objects_to_keep[class_]])
remove_these_ones = current_ids.difference(keep_ids)
if remove_these_ones:
for obj in current:
if obj.pk in remove_these_ones:
obj.delete()
if verbosity >= 2:
print("Deleted object: %s" % six.u(obj))
if verbosity > 0 and remove_these_ones:
num_deleted = len(remove_these_ones)
if num_deleted > 1:
type_deleted = six.u(class_._meta.verbose_name_plural)
else:
type_deleted = six.u(class_._meta.verbose_name)
print("Deleted %s %s" % (str(num_deleted), type_deleted)) | Delete all the objects in the database that are not in objects_to_keep.
- objects_to_keep: A map where the keys are classes, and the values are a
set of the objects of that class we should keep. |
def setParameter(self, parameterName, index, parameterValue):
"""
Set the value of a Spec parameter. Most parameters are handled
automatically by PyRegion's parameter set mechanism. The ones that need
special treatment are explicitly handled here.
"""
if hasattr(self, parameterName):
setattr(self, parameterName, parameterValue)
else:
raise Exception("Unknown parameter: " + parameterName) | Set the value of a Spec parameter. Most parameters are handled
automatically by PyRegion's parameter set mechanism. The ones that need
special treatment are explicitly handled here. |
def list_runtime(self, scope="", skip_policy_evaluation=True, start_time=None, end_time=None):
'''**Description**
List runtime containers
**Arguments**
- scope: An AND-composed string of predicates that selects the scope in which the alert will be applied. (like: 'host.domain = "example.com" and container.image != "alpine:latest"')
- skip_policy_evaluation: If true, no policy evaluations will be triggered for the images.
- start_time: Start of the time range (integer of unix time).
- end_time: End of the time range (integer of unix time).
**Success Return Value**
A JSON object representing the list of runtime containers.
'''
containers = {
'scope': scope,
'skipPolicyEvaluation': skip_policy_evaluation
}
if start_time or end_time:
containers['time'] = {}
containers['time']['from'] = int(start_time * 100000) if start_time else 0
end_time = end_time if end_time else time.time()
containers['time']['to'] = int(end_time * 1000000)
url = self.url + '/api/scanning/v1/query/containers'
data = json.dumps(containers)
res = requests.post(url, headers=self.hdrs, data=data, verify=self.ssl_verify)
if not self._checkResponse(res):
return [False, self.lasterr]
return [True, res.json()] | **Description**
List runtime containers
**Arguments**
- scope: An AND-composed string of predicates that selects the scope in which the alert will be applied. (like: 'host.domain = "example.com" and container.image != "alpine:latest"')
- skip_policy_evaluation: If true, no policy evaluations will be triggered for the images.
- start_time: Start of the time range (integer of unix time).
- end_time: End of the time range (integer of unix time).
**Success Return Value**
A JSON object representing the list of runtime containers. |
def send_vdp_query_msg(self, mode, mgrid, typeid, typeid_ver, vsiid_frmt,
vsiid, filter_frmt, gid, mac, vlan, oui_id,
oui_data):
"""Constructs and Sends the VDP Query Message.
Please refer http://www.ieee802.org/1/pages/802.1bg.html VDP
Section for more detailed information
:param mode: Associate or De-associate
:param mgrid: MGR ID
:param typeid: Type ID
:param typeid_ver: Version of the Type ID
:param vsiid_frmt: Format of the following VSI argument
:param vsiid: VSI value
:param filter_frmt: Filter Format
:param gid: Group ID the vNIC belongs to
:param mac: MAC Address of the vNIC
:param vlan: VLAN of the vNIC
:param oui_id: OUI Type
:param oui_data: OUI Data
:param sw_resp: Flag indicating if response is required from the daemon
:return reply: Reply from vdptool
"""
if not self.is_ncb:
LOG.error("EVB cannot be set on NB")
return
vdp_key_str = self.construct_vdp_dict(mode, mgrid, typeid,
typeid_ver, vsiid_frmt, vsiid,
filter_frmt, gid, mac, vlan,
None, None)
if len(vdp_key_str) == 0:
LOG.error("NULL List")
return
reply = self.run_vdptool(["-t", "-i", self.port_name, "-R", "-V", mode,
"-c", vdp_key_str['mode'],
"-c", vdp_key_str['mgrid'],
"-c", vdp_key_str['typeid'],
"-c", vdp_key_str['typeid_ver'],
"-c", vdp_key_str['vsiid']])
return reply | Constructs and Sends the VDP Query Message.
Please refer http://www.ieee802.org/1/pages/802.1bg.html VDP
Section for more detailed information
:param mode: Associate or De-associate
:param mgrid: MGR ID
:param typeid: Type ID
:param typeid_ver: Version of the Type ID
:param vsiid_frmt: Format of the following VSI argument
:param vsiid: VSI value
:param filter_frmt: Filter Format
:param gid: Group ID the vNIC belongs to
:param mac: MAC Address of the vNIC
:param vlan: VLAN of the vNIC
:param oui_id: OUI Type
:param oui_data: OUI Data
:param sw_resp: Flag indicating if response is required from the daemon
:return reply: Reply from vdptool |
def decompressBWTPoolProcess(tup):
'''
Individual process for decompression
'''
(inputDir, outputDir, startIndex, endIndex) = tup
if startIndex == endIndex:
return True
#load the thing we'll be extracting from
msbwt = MultiStringBWT.CompressedMSBWT()
msbwt.loadMsbwt(inputDir, None)
#open our output
outputBwt = np.load(outputDir+'/msbwt.npy', 'r+')
outputBwt[startIndex:endIndex] = msbwt.getBWTRange(startIndex, endIndex)
return True | Individual process for decompression |
def parse_gzip(file_path):
"""Return a decoded API to the data from a file path. File is gzip compressed.
:param file_path: the input file path. Data is gzip compressed.
:return an API to decoded data"""
newDecoder = MMTFDecoder()
newDecoder.decode_data(_unpack(gzip.open(file_path, "rb")))
return newDecoder | Return a decoded API to the data from a file path. File is gzip compressed.
:param file_path: the input file path. Data is gzip compressed.
:return an API to decoded data |
def artist_create(self, name, other_names_comma=None, group_name=None,
url_string=None, body=None):
"""Function to create an artist (Requires login) (UNTESTED).
Parameters:
name (str):
other_names_comma (str): List of alternative names for this
artist, comma delimited.
group_name (str): The name of the group this artist belongs to.
url_string (str): List of URLs associated with this artist,
whitespace or newline delimited.
body (str): DText that will be used to create a wiki entry at the
same time.
"""
params = {
'artist[name]': name,
'artist[other_names_comma]': other_names_comma,
'artist[group_name]': group_name,
'artist[url_string]': url_string,
'artist[body]': body,
}
return self.get('artists.json', params, method='POST', auth=True) | Function to create an artist (Requires login) (UNTESTED).
Parameters:
name (str):
other_names_comma (str): List of alternative names for this
artist, comma delimited.
group_name (str): The name of the group this artist belongs to.
url_string (str): List of URLs associated with this artist,
whitespace or newline delimited.
body (str): DText that will be used to create a wiki entry at the
same time. |
def main(doc, timeout, size, debug, allow_codes, whitelist):
"""
Examples:
simple call
$ vl README.md
Adding debug outputs
$ vl README.md --debug
Adding a custom timeout for each url. time on seconds.
$ vl README.md -t 3
Adding a custom size param, to add throttle n requests per time
$ vl README -s 1000
Skipping some error codes. This will allow 500 and 404 responses to
be ignored
$ vl README.md -a 500,404
Adding Whitelists
$ vl README.md -w server1.com,server2.com
"""
t0 = time.time()
links = [i[0] for i in LINK_RE.findall(doc.read())]
request_urls = []
counts = {}
for link in links:
# no static
if is_static(link):
STATICS.append(link)
continue
# no dupes
if link in counts:
counts[link] += 1
continue
else:
counts[link] = 1
parsed = urlparse(link)
# fix no scheme links
if not parsed.scheme:
link = 'http://{0}'.format(link)
# whitelisted
if whitelist:
exists = [i for i in whitelist if i in parsed.netloc]
if exists:
WHITELISTED.append(link)
continue
request_urls.append(link)
# removing dupes
counts_keys = counts.keys()
DUPES.extend([(i, counts[i]) for i in counts_keys if counts[i] > 1])
requests = (grequests.head(u, timeout=timeout, verify=False) for u in request_urls)
responses = grequests.imap(requests, exception_handler=handle_exception,
size=size)
for res in responses:
color = 'green'
if is_error_code(res.status_code):
if res.status_code not in allow_codes:
ERRORS.append((res.status_code, res.url))
color = 'red'
else:
WHITELISTED.append(res.url)
status = click.style(str(res.status_code), fg=color)
click.echo('[{}] {}'.format(status, res.url))
errors_len = len(ERRORS)
exceptions_len = len(EXCEPTIONS)
dupes_len = len(DUPES)
white_len = len(WHITELISTED)
if errors_len:
click.echo()
click.echo('Failed URLs:')
for code, url in ERRORS:
code = click.style(str(code), fg='red')
click.echo('[{0}] {1}'.format(code, url))
if exceptions_len and debug:
import ssl
click.echo('Exceptions raised:')
click.echo('Note: OpenSSL Version = {0}'.format(ssl.OPENSSL_VERSION))
click.secho('Check URLs for possible false positives', fg='yellow')
for url, exception in EXCEPTIONS:
click.echo('- {0}'.format(url))
click.secho('{0}'.format(exception), fg='red', bold=True)
if dupes_len and debug: # pragma: nocover
click.echo('Dupes:')
for url, count in DUPES:
click.secho('- {0} - {1} times'.format(url, count), fg='yellow',
bold=True)
if white_len and debug:
click.echo()
click.echo('Whitelisted (allowed codes and whitelisted param)')
for url in WHITELISTED:
click.secho('- {0}'.format(url), fg='magenta')
click.secho('Total Links Parsed {0}'.format(len(links)), fg='green')
click.secho('Total Errors {0}'.format(errors_len), fg='red')
click.secho('Total Exceptions {0}'.format(exceptions_len), fg='red')
click.secho('Total Dupes {0}'.format(dupes_len), fg='yellow')
click.secho('Total whitelisted {0}'.format(white_len), fg='yellow')
click.secho('Total static {0}'.format(len(STATICS)), fg='yellow')
if debug:
click.echo('Execution time: {0:.2f} seconds'.format(time.time() - t0))
if errors_len:
sys.exit(1) | Examples:
simple call
$ vl README.md
Adding debug outputs
$ vl README.md --debug
Adding a custom timeout for each url. time on seconds.
$ vl README.md -t 3
Adding a custom size param, to add throttle n requests per time
$ vl README -s 1000
Skipping some error codes. This will allow 500 and 404 responses to
be ignored
$ vl README.md -a 500,404
Adding Whitelists
$ vl README.md -w server1.com,server2.com |
def at(host, command, seq, params):
"""
Parameters:
command -- the command
seq -- the sequence number
params -- a list of elements which can be either int, float or string
"""
params_str = []
for p in params:
if type(p) == int:
params_str.append('{:d}'.format(p))
elif type(p) == float:
params_str.append('{:d}'.format(f2i(p)))
elif type(p) == str:
params_str.append('"{:s}"'.format(p))
msg = 'AT*{:s}={:d},{:s}\r'.format(command, seq, ','.join(params_str))
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
sock.sendto(msg.encode(), (host, ardrone.constant.COMMAND_PORT)) | Parameters:
command -- the command
seq -- the sequence number
params -- a list of elements which can be either int, float or string |
def get_tree(self, process_name):
""" return tree that is managing time-periods for given process"""
for tree_name, tree in self.trees.items():
if process_name in tree:
return tree | return tree that is managing time-periods for given process |
def precheck(context):
"""
calls a function named "precheck_<key>" where <key> is context_key with '-' changed to '_'
(e.g. "precheck_ami_id")
Checking function should return True if OK, or raise RuntimeError w/ message if not
Args:
context: a populated EFVersionContext object
Returns:
True if the precheck passed, or if there was no precheck function for context.key
Raises:
RuntimeError if precheck failed, with explanatory message
"""
if context.noprecheck:
return True
func_name = "precheck_" + context.key.replace("-", "_")
if func_name in globals() and isfunction(globals()[func_name]):
return globals()[func_name](context)
else:
return True | calls a function named "precheck_<key>" where <key> is context_key with '-' changed to '_'
(e.g. "precheck_ami_id")
Checking function should return True if OK, or raise RuntimeError w/ message if not
Args:
context: a populated EFVersionContext object
Returns:
True if the precheck passed, or if there was no precheck function for context.key
Raises:
RuntimeError if precheck failed, with explanatory message |
def _runResponder(self, responder, request, command, identifier):
"""Run the responser function. If it succeeds, add the _answer key.
If it fails with an error known to the command, serialize the
error.
"""
d = defer.maybeDeferred(responder, **request)
def _addIdentifier(response):
"""Return the response with an ``_answer`` key.
"""
response["_answer"] = identifier
return response
def _serializeFailure(failure):
"""
If the failure is serializable by this AMP command, serialize it.
"""
key = failure.trap(*command.allErrors)
response = {
"_error_code": command.allErrors[key],
"_error_description": str(failure.value),
"_error": identifier
}
return response
d.addCallbacks(_addIdentifier, _serializeFailure)
return d | Run the responser function. If it succeeds, add the _answer key.
If it fails with an error known to the command, serialize the
error. |
def discover(service="ssdp:all", timeout=1, retries=2, ipAddress="239.255.255.250", port=1900):
"""Discovers UPnP devices in the local network.
Try to discover all devices in the local network which do support UPnP. The discovery process can fail
for various reasons and it is recommended to do at least two discoveries, which you can specify with the
``retries`` parameter.
The default ``service`` parameter tries to address all devices also if you know which kind of service type
you are looking for you should set it as some devices do not respond or respond differently otherwise.
:param service: the service type or list of service types of devices you look for
:type service: str or list[str]
:param float timeout: the socket timeout for each try
:param int retries: how often should be a discovery request send
:param str ipAddress: the multicast ip address to use
:param int port: the port to use
:return: a list of DiscoveryResponse objects or empty if no device was found
:rtype: list[DiscoveryResponse]
Example:
::
results = discover()
for result in results:
print("Host: " + result.locationHost + " Port: " + result.locationPort + " Device definitions: " + \\
result.location)
.. seealso::
:class:`~simpletr64.DiscoveryResponse`, :meth:`~simpletr64.Discover.discoverParticularHost`
"""
socket.setdefaulttimeout(timeout)
messages = []
if isinstance(service, str):
services = [service]
elif isinstance(service, list):
services = service
for service in services:
message = 'M-SEARCH * HTTP/1.1\r\nMX: 5\r\nMAN: "ssdp:discover"\r\nHOST: ' + \
ipAddress + ':' + str(port) + '\r\n'
message += "ST: " + service + "\r\n\r\n"
messages.append(message)
responses = {}
for _ in range(retries):
# setup the socket
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM, socket.IPPROTO_UDP)
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
sock.setsockopt(socket.IPPROTO_IP, socket.IP_MULTICAST_TTL, 2)
# noinspection PyAssignmentToLoopOrWithParameter
for _ in range(2):
# send the messages with different service types
for message in messages:
# send message more often to make sure all devices will get it
sock.sendto(message.encode('utf-8'), (ipAddress, port))
while True:
try:
# read the message until timeout
data = sock.recv(1024)
except socket.timeout:
break
else:
# no time out, read the response data and create response object
response = DiscoveryResponse(data)
# filter duplicated responses
responses[response.location] = response
# return a list of all responses
return list(responses.values()) | Discovers UPnP devices in the local network.
Try to discover all devices in the local network which do support UPnP. The discovery process can fail
for various reasons and it is recommended to do at least two discoveries, which you can specify with the
``retries`` parameter.
The default ``service`` parameter tries to address all devices also if you know which kind of service type
you are looking for you should set it as some devices do not respond or respond differently otherwise.
:param service: the service type or list of service types of devices you look for
:type service: str or list[str]
:param float timeout: the socket timeout for each try
:param int retries: how often should be a discovery request send
:param str ipAddress: the multicast ip address to use
:param int port: the port to use
:return: a list of DiscoveryResponse objects or empty if no device was found
:rtype: list[DiscoveryResponse]
Example:
::
results = discover()
for result in results:
print("Host: " + result.locationHost + " Port: " + result.locationPort + " Device definitions: " + \\
result.location)
.. seealso::
:class:`~simpletr64.DiscoveryResponse`, :meth:`~simpletr64.Discover.discoverParticularHost` |
def gpgga_to_dms(gpgga):
'''
Convert GPS coordinate in GPGGA format to degree/minute/second
Reference: http://us.cactii.net/~bb/gps.py
'''
deg_min, dmin = gpgga.split('.')
degrees = int(deg_min[:-2])
minutes = float('%s.%s' % (deg_min[-2:], dmin))
decimal = degrees + (minutes / 60)
return decimal | Convert GPS coordinate in GPGGA format to degree/minute/second
Reference: http://us.cactii.net/~bb/gps.py |
def geodetic_distance(lons1, lats1, lons2, lats2, diameter=2*EARTH_RADIUS):
"""
Calculate the geodetic distance between two points or two collections
of points.
Parameters are coordinates in decimal degrees. They could be scalar
float numbers or numpy arrays, in which case they should "broadcast
together".
Implements http://williams.best.vwh.net/avform.htm#Dist
:returns:
Distance in km, floating point scalar or numpy array of such.
"""
lons1, lats1, lons2, lats2 = _prepare_coords(lons1, lats1, lons2, lats2)
distance = numpy.arcsin(numpy.sqrt(
numpy.sin((lats1 - lats2) / 2.0) ** 2.0
+ numpy.cos(lats1) * numpy.cos(lats2)
* numpy.sin((lons1 - lons2) / 2.0) ** 2.0
))
return diameter * distance | Calculate the geodetic distance between two points or two collections
of points.
Parameters are coordinates in decimal degrees. They could be scalar
float numbers or numpy arrays, in which case they should "broadcast
together".
Implements http://williams.best.vwh.net/avform.htm#Dist
:returns:
Distance in km, floating point scalar or numpy array of such. |
def vlr_factory(raw_vlr):
""" Given a raw_vlr tries to find its corresponding KnownVLR class
that can parse its data.
If no KnownVLR implementation is found, returns a VLR (record_data will still be bytes)
"""
user_id = raw_vlr.header.user_id.rstrip(NULL_BYTE).decode()
known_vlrs = BaseKnownVLR.__subclasses__()
for known_vlr in known_vlrs:
if (
known_vlr.official_user_id() == user_id
and raw_vlr.header.record_id in known_vlr.official_record_ids()
):
return known_vlr.from_raw(raw_vlr)
else:
return VLR.from_raw(raw_vlr) | Given a raw_vlr tries to find its corresponding KnownVLR class
that can parse its data.
If no KnownVLR implementation is found, returns a VLR (record_data will still be bytes) |
def doExperiment(numColumns, l2Overrides, objectDescriptions, noiseMu,
noiseSigma, numInitialTraversals, noiseEverywhere):
"""
Touch every point on an object 'numInitialTraversals' times, then evaluate
whether it has inferred the object by touching every point once more and
checking the number of correctly active and incorrectly active cells.
@param numColumns (int)
The number of sensors to use
@param l2Overrides (dict)
Parameters for the ColumnPooler
@param objectDescriptions (dict)
A mapping of object names to their feature-locations.
See 'createRandomObjectDescriptions'.
@param noiseMu (float)
The average amount of noise in a feedforward input. The noise level for each
column's input is determined once per touch. It is a gaussian distribution
with mean 'noiseMu' and sigma 'noiseSigma'.
@param noiseSigma (float)
The sigma for the gaussian distribution of noise levels. If the noiseSigma is
0, then the noise level will always be 'noiseMu'.
@param numInitialTraversals (int)
The number of times to traverse the object before testing whether the object
has been inferred.
@param noiseEverywhere (bool)
If true, add noise to every column's input, and record accuracy of every
column. If false, add noise to one column's input, and only record accuracy
of that column.
"""
# For each column, keep a mapping from feature-location names to their SDRs
layer4sdr = lambda : np.array(sorted(random.sample(xrange(L4_CELL_COUNT),
40)), dtype="uint32")
featureLocationSDRs = [defaultdict(layer4sdr) for _ in xrange(numColumns)]
params = {"inputWidth": L4_CELL_COUNT,
"lateralInputWidths": [4096]*(numColumns-1),
"seed": random.randint(0, 1024)}
params.update(l2Overrides)
l2Columns = [ColumnPooler(**params)
for _ in xrange(numColumns)]
# Learn the objects
objectL2Representations = {}
for objectName, featureLocations in objectDescriptions.iteritems():
for featureLocationName in featureLocations:
# Touch it enough times for the distal synapses to reach the
# connected permanence, and then once more.
for _ in xrange(4):
allLateralInputs = [l2.getActiveCells() for l2 in l2Columns]
for columnNumber, l2 in enumerate(l2Columns):
feedforwardInput = featureLocationSDRs[columnNumber][featureLocationName]
lateralInputs = [lateralInput
for i, lateralInput in enumerate(allLateralInputs)
if i != columnNumber]
l2.compute(feedforwardInput, lateralInputs, learn=True)
objectL2Representations[objectName] = [set(l2.getActiveCells())
for l2 in l2Columns]
for l2 in l2Columns:
l2.reset()
results = []
# Try to infer the objects
for objectName, featureLocations in objectDescriptions.iteritems():
for l2 in l2Columns:
l2.reset()
sensorPositionsIterator = greedySensorPositions(numColumns, len(featureLocations))
# Touch each location at least numInitialTouches times, and then touch it
# once more, testing it. For each traversal, touch each point on the object
# ~once. Not once per sensor -- just once. So we translate the "number of
# traversals" into a "number of touches" according to the number of sensors.
numTouchesPerTraversal = len(featureLocations) / float(numColumns)
numInitialTouches = int(math.ceil(numInitialTraversals * numTouchesPerTraversal))
if noiseEverywhere:
numTestTouches = int(math.ceil(1 * numTouchesPerTraversal))
else:
numTestTouches = len(featureLocations)
for touch in xrange(numInitialTouches + numTestTouches):
sensorPositions = next(sensorPositionsIterator)
# Give the system a few timesteps to settle, allowing lateral connections
# to cause cells to be inhibited.
for _ in xrange(3):
allLateralInputs = [l2.getActiveCells() for l2 in l2Columns]
for columnNumber, l2 in enumerate(l2Columns):
position = sensorPositions[columnNumber]
featureLocationName = featureLocations[position]
feedforwardInput = featureLocationSDRs[columnNumber][featureLocationName]
if noiseEverywhere or columnNumber == 0:
noiseLevel = random.gauss(noiseMu, noiseSigma)
noiseLevel = max(0.0, min(1.0, noiseLevel))
feedforwardInput = noisy(feedforwardInput, noiseLevel, L4_CELL_COUNT)
lateralInputs = [lateralInput
for i, lateralInput in enumerate(allLateralInputs)
if i != columnNumber]
l2.compute(feedforwardInput, lateralInputs, learn=False)
if touch >= numInitialTouches:
if noiseEverywhere:
for columnNumber, l2 in enumerate(l2Columns):
activeCells = set(l2.getActiveCells())
correctCells = objectL2Representations[objectName][columnNumber]
results.append((len(activeCells & correctCells),
len(activeCells - correctCells)))
else:
activeCells = set(l2Columns[0].getActiveCells())
correctCells = objectL2Representations[objectName][0]
results.append((len(activeCells & correctCells),
len(activeCells - correctCells)))
return results | Touch every point on an object 'numInitialTraversals' times, then evaluate
whether it has inferred the object by touching every point once more and
checking the number of correctly active and incorrectly active cells.
@param numColumns (int)
The number of sensors to use
@param l2Overrides (dict)
Parameters for the ColumnPooler
@param objectDescriptions (dict)
A mapping of object names to their feature-locations.
See 'createRandomObjectDescriptions'.
@param noiseMu (float)
The average amount of noise in a feedforward input. The noise level for each
column's input is determined once per touch. It is a gaussian distribution
with mean 'noiseMu' and sigma 'noiseSigma'.
@param noiseSigma (float)
The sigma for the gaussian distribution of noise levels. If the noiseSigma is
0, then the noise level will always be 'noiseMu'.
@param numInitialTraversals (int)
The number of times to traverse the object before testing whether the object
has been inferred.
@param noiseEverywhere (bool)
If true, add noise to every column's input, and record accuracy of every
column. If false, add noise to one column's input, and only record accuracy
of that column. |
def print_tb(tb, limit=None, file=None):
"""Print up to 'limit' stack trace entries from the traceback 'tb'.
If 'limit' is omitted or None, all entries are printed. If 'file'
is omitted or None, the output goes to sys.stderr; otherwise
'file' should be an open file or file-like object with a write()
method.
"""
if file is None:
file = sys.stderr
if limit is None:
if hasattr(sys, 'tracebacklimit'):
limit = sys.tracebacklimit
file.write('\n'.join(format_tb(tb, limit)) + '\n') | Print up to 'limit' stack trace entries from the traceback 'tb'.
If 'limit' is omitted or None, all entries are printed. If 'file'
is omitted or None, the output goes to sys.stderr; otherwise
'file' should be an open file or file-like object with a write()
method. |
def on_receive_transactions(self, proto, transactions):
"receives rlp.decoded serialized"
log.debug('----------------------------------')
log.debug('remote_transactions_received', count=len(transactions), remote_id=proto)
def _add_txs():
for tx in transactions:
self.add_transaction(tx, origin=proto)
gevent.spawn(_add_txs) | receives rlp.decoded serialized |
def _with_meta_to_py_ast(
ctx: GeneratorContext, node: WithMeta, **kwargs
) -> GeneratedPyAST:
"""Generate a Python AST node for Python interop method calls."""
assert node.op == NodeOp.WITH_META
handle_expr = _WITH_META_EXPR_HANDLER.get(node.expr.op)
assert (
handle_expr is not None
), "No expression handler for with-meta child node type"
return handle_expr(ctx, node.expr, meta_node=node.meta, **kwargs) | Generate a Python AST node for Python interop method calls. |
def qteSplitApplet(self, applet: (QtmacsApplet, str)=None,
splitHoriz: bool=True,
windowObj: QtmacsWindow=None):
"""
Reveal ``applet`` by splitting the space occupied by the
current applet.
If ``applet`` is already visible then the method does
nothing. Furthermore, this method does not change the focus,
ie. the currently active applet will remain active.
If ``applet`` is **None** then the next invisible applet
will be shown. If ``windowObj`` is **None** then the
currently active window will be used.
The ``applet`` parameter can either be an instance of
``QtmacsApplet`` or a string denoting an applet ID. In the
latter case the ``qteGetAppletHandle`` method is used to fetch
the respective applet instance.
|Args|
* ``applet`` (**QtmacsApplet**, **str**): the applet to reveal.
* ``splitHoriz`` (**bool**): whether to split horizontally
or vertically.
* ``windowObj`` (**QtmacsWindow**): the window in which to
reveal ``applet``.
|Returns|
* **bool**: if **True**, ``applet`` was revealed.
|Raises|
* **QtmacsArgumentError** if at least one argument has an invalid type.
"""
# If ``newAppObj`` was specified by its ID (ie. a string) then
# fetch the associated ``QtmacsApplet`` instance. If
# ``newAppObj`` is already an instance of ``QtmacsApplet``
# then use it directly.
if isinstance(applet, str):
newAppObj = self.qteGetAppletHandle(applet)
else:
newAppObj = applet
# Use the currently active window if none was specified.
if windowObj is None:
windowObj = self.qteActiveWindow()
if windowObj is None:
msg = 'Cannot determine the currently active window.'
self.qteLogger.error(msg, stack_info=True)
return
# Convert ``splitHoriz`` to the respective Qt constant.
if splitHoriz:
splitOrientation = QtCore.Qt.Horizontal
else:
splitOrientation = QtCore.Qt.Vertical
if newAppObj is None:
# If no new applet was specified use the next available
# invisible applet.
newAppObj = self.qteNextApplet(skipVisible=True,
skipInvisible=False)
else:
# Do nothing if the new applet is already visible.
if newAppObj.qteIsVisible():
return False
# If we still have not found an applet then there are no
# invisible applets left to show. Therefore, splitting makes
# no sense.
if newAppObj is None:
self.qteLogger.warning('All applets are already visible.')
return False
# If the root splitter is empty then add the new applet and
# return immediately.
if windowObj.qteAppletSplitter.count() == 0:
windowObj.qteAppletSplitter.qteAddWidget(newAppObj)
windowObj.qteAppletSplitter.setOrientation(splitOrientation)
return True
# ------------------------------------------------------------
# The root splitter contains at least one widget, if we got
# this far.
# ------------------------------------------------------------
# Shorthand to last active applet in the current window. Query
# this applet with qteNextApplet method because
# self._qteActiveApplet may be a mini applet, and we are only
# interested in genuine applets.
curApp = self.qteNextApplet(numSkip=0, windowObj=windowObj)
# Get a reference to the splitter in which the currently
# active applet lives. This may be the root splitter, or one
# of its child splitters.
split = self._qteFindAppletInSplitter(
curApp, windowObj.qteAppletSplitter)
if split is None:
msg = 'Active applet <b>{}</b> not in the layout.'
msg = msg.format(curApp.qteAppletID())
self.qteLogger.error(msg, stack_info=True)
return False
# If 'curApp' lives in the root splitter, and the root
# splitter contains only a single element, then simply add the
# new applet as the second element and return.
if split is windowObj.qteAppletSplitter:
if split.count() == 1:
split.qteAddWidget(newAppObj)
split.setOrientation(splitOrientation)
return True
# ------------------------------------------------------------
# The splitter (root or not) contains two widgets, if we got
# this far.
# ------------------------------------------------------------
# Determine the index of the applet inside the splitter.
curAppIdx = split.indexOf(curApp)
# Create a new splitter and populate it with 'curApp' and the
# previously invisible ``newAppObj``. Then insert this new splitter at
# the position where the old applet was taken from. Note: widgets are
# inserted with ``qteAddWidget`` (because they are ``QtmacsApplet``
# instances), whereas splitters are added with ``insertWidget``, NOT
# ``qteInsertWidget``. The reason is that splitters do not require the
# extra TLC necessary for applets in terms of how and where to show
# them.
newSplit = QtmacsSplitter(splitOrientation, windowObj)
curApp.setParent(None)
newSplit.qteAddWidget(curApp)
newSplit.qteAddWidget(newAppObj)
split.insertWidget(curAppIdx, newSplit)
# Adjust the size of two widgets in ``split`` (ie. ``newSplit`` and
# whatever other widget) to take up equal space. The same adjusment is
# made for ``newSplit``, but there the ``qteAddWidget`` methods have
# already taken care of it.
split.qteAdjustWidgetSizes()
return True | Reveal ``applet`` by splitting the space occupied by the
current applet.
If ``applet`` is already visible then the method does
nothing. Furthermore, this method does not change the focus,
ie. the currently active applet will remain active.
If ``applet`` is **None** then the next invisible applet
will be shown. If ``windowObj`` is **None** then the
currently active window will be used.
The ``applet`` parameter can either be an instance of
``QtmacsApplet`` or a string denoting an applet ID. In the
latter case the ``qteGetAppletHandle`` method is used to fetch
the respective applet instance.
|Args|
* ``applet`` (**QtmacsApplet**, **str**): the applet to reveal.
* ``splitHoriz`` (**bool**): whether to split horizontally
or vertically.
* ``windowObj`` (**QtmacsWindow**): the window in which to
reveal ``applet``.
|Returns|
* **bool**: if **True**, ``applet`` was revealed.
|Raises|
* **QtmacsArgumentError** if at least one argument has an invalid type. |
def row_table(cls, d, order=None, labels=None):
"""prints a pretty table from data in the dict.
:param d: A dict to be printed
:param order: The order in which the columns are printed.
The order is specified by the key names of the dict.
:param labels: The array of labels for the column
"""
# header
header = list(d)
x = PrettyTable(labels)
if order is None:
order = header
for key in order:
value = d[key]
if type(value) == list:
x.add_row([key, value[0]])
for element in value[1:]:
x.add_row(["", element])
elif type(value) == dict:
value_keys = list(value)
first_key = value_keys[0]
rest_keys = value_keys[1:]
x.add_row(
[key, "{0} : {1}".format(first_key, value[first_key])])
for element in rest_keys:
x.add_row(["", "{0} : {1}".format(element, value[element])])
else:
x.add_row([key, value])
x.align = "l"
return x | prints a pretty table from data in the dict.
:param d: A dict to be printed
:param order: The order in which the columns are printed.
The order is specified by the key names of the dict.
:param labels: The array of labels for the column |
def security(policy, app_secret):
"""
Creates a valid signature and policy based on provided app secret and
parameters
```python
from filestack import Client, security
# a policy requires at least an expiry
policy = {'expiry': 56589012, 'call': ['read', 'store', 'pick']}
sec = security(policy, 'APP_SECRET')
client = Client('API_KEY', security=sec)
```
"""
validate(policy)
policy_enc = base64.urlsafe_b64encode(json.dumps(policy).encode('utf-8'))
signature = hmac.new(app_secret.encode('utf-8'),
policy_enc,
hashlib.sha256).hexdigest()
return {'policy': policy_enc, 'signature': signature} | Creates a valid signature and policy based on provided app secret and
parameters
```python
from filestack import Client, security
# a policy requires at least an expiry
policy = {'expiry': 56589012, 'call': ['read', 'store', 'pick']}
sec = security(policy, 'APP_SECRET')
client = Client('API_KEY', security=sec)
``` |
def replace_nan( trainingset, replace_with = None ): # if replace_with = None, replaces with mean value
"""
Replace instanced of "not a number" with either the mean of the signal feature
or a specific value assigned by `replace_nan_with`
"""
training_data = np.array( [instance.features for instance in trainingset ] ).astype( np.float64 )
def encoder( dataset ):
for instance in dataset:
instance.features = instance.features.astype( np.float64 )
if np.sum(np.isnan( instance.features )):
if replace_with == None:
instance.features[ np.isnan( instance.features ) ] = means[ np.isnan( instance.features ) ]
else:
instance.features[ np.isnan( instance.features ) ] = replace_with
return dataset
#end
if replace_nan_with == None:
means = np.mean( np.nan_to_num(training_data), axis=0 )
return encoder | Replace instanced of "not a number" with either the mean of the signal feature
or a specific value assigned by `replace_nan_with` |
def update_entity(self, table_name, entity, if_match='*', timeout=None):
'''
Updates an existing entity in a table. Throws if the entity does not exist.
The update_entity operation replaces the entire entity and can be used to
remove properties.
:param str table_name:
The name of the table containing the entity to update.
:param entity:
The entity to update. Could be a dict or an entity object.
Must contain a PartitionKey and a RowKey.
:type entity: dict or :class:`~azure.storage.table.models.Entity`
:param str if_match:
The client may specify the ETag for the entity on the
request in order to compare to the ETag maintained by the service
for the purpose of optimistic concurrency. The update operation
will be performed only if the ETag sent by the client matches the
value maintained by the server, indicating that the entity has
not been modified since it was retrieved by the client. To force
an unconditional update, set If-Match to the wildcard character (*).
:param int timeout:
The server timeout, expressed in seconds.
:return: The etag of the entity.
:rtype: str
'''
_validate_not_none('table_name', table_name)
request = _update_entity(entity, if_match, self.require_encryption, self.key_encryption_key,
self.encryption_resolver_function)
request.host_locations = self._get_host_locations()
request.path = _get_entity_path(table_name, entity['PartitionKey'], entity['RowKey'])
request.query['timeout'] = _int_to_str(timeout)
return self._perform_request(request, _extract_etag) | Updates an existing entity in a table. Throws if the entity does not exist.
The update_entity operation replaces the entire entity and can be used to
remove properties.
:param str table_name:
The name of the table containing the entity to update.
:param entity:
The entity to update. Could be a dict or an entity object.
Must contain a PartitionKey and a RowKey.
:type entity: dict or :class:`~azure.storage.table.models.Entity`
:param str if_match:
The client may specify the ETag for the entity on the
request in order to compare to the ETag maintained by the service
for the purpose of optimistic concurrency. The update operation
will be performed only if the ETag sent by the client matches the
value maintained by the server, indicating that the entity has
not been modified since it was retrieved by the client. To force
an unconditional update, set If-Match to the wildcard character (*).
:param int timeout:
The server timeout, expressed in seconds.
:return: The etag of the entity.
:rtype: str |
def UpdateFlow(self,
client_id,
flow_id,
flow_obj=db.Database.unchanged,
flow_state=db.Database.unchanged,
client_crash_info=db.Database.unchanged,
pending_termination=db.Database.unchanged,
processing_on=db.Database.unchanged,
processing_since=db.Database.unchanged,
processing_deadline=db.Database.unchanged,
cursor=None):
"""Updates flow objects in the database."""
updates = []
args = []
if flow_obj != db.Database.unchanged:
updates.append("flow=%s")
args.append(flow_obj.SerializeToString())
updates.append("flow_state=%s")
args.append(int(flow_obj.flow_state))
updates.append("user_cpu_time_used_micros=%s")
args.append(
db_utils.SecondsToMicros(flow_obj.cpu_time_used.user_cpu_time))
updates.append("system_cpu_time_used_micros=%s")
args.append(
db_utils.SecondsToMicros(flow_obj.cpu_time_used.system_cpu_time))
updates.append("network_bytes_sent=%s")
args.append(flow_obj.network_bytes_sent)
updates.append("num_replies_sent=%s")
args.append(flow_obj.num_replies_sent)
if flow_state != db.Database.unchanged:
updates.append("flow_state=%s")
args.append(int(flow_state))
if client_crash_info != db.Database.unchanged:
updates.append("client_crash_info=%s")
args.append(client_crash_info.SerializeToString())
if pending_termination != db.Database.unchanged:
updates.append("pending_termination=%s")
args.append(pending_termination.SerializeToString())
if processing_on != db.Database.unchanged:
updates.append("processing_on=%s")
args.append(processing_on)
if processing_since != db.Database.unchanged:
updates.append("processing_since=FROM_UNIXTIME(%s)")
args.append(mysql_utils.RDFDatetimeToTimestamp(processing_since))
if processing_deadline != db.Database.unchanged:
updates.append("processing_deadline=FROM_UNIXTIME(%s)")
args.append(mysql_utils.RDFDatetimeToTimestamp(processing_deadline))
if not updates:
return
query = "UPDATE flows SET last_update=NOW(6), "
query += ", ".join(updates)
query += " WHERE client_id=%s AND flow_id=%s"
args.append(db_utils.ClientIDToInt(client_id))
args.append(db_utils.FlowIDToInt(flow_id))
updated = cursor.execute(query, args)
if updated == 0:
raise db.UnknownFlowError(client_id, flow_id) | Updates flow objects in the database. |
def get_mods(self, project_path, vars):
"""
Build the mod list to enable
"""
# Start with answers from interactive command
mods = [var.name for var in self.vars if vars[var.name].lower() == 'yes']
mods = set(mods)
# Base mods
for name in self.mods_list:
mods.add(name)
# Conditionnal mods dependancies
if 'accounts' in mods or 'contact_form' in mods:
mods.add('crispy_forms')
mods.add('recaptcha')
return mods | Build the mod list to enable |
def _legacy_symbol_table(build_file_aliases):
"""Construct a SymbolTable for the given BuildFileAliases.
:param build_file_aliases: BuildFileAliases to register.
:type build_file_aliases: :class:`pants.build_graph.build_file_aliases.BuildFileAliases`
:returns: A SymbolTable.
"""
table = {
alias: _make_target_adaptor(TargetAdaptor, target_type)
for alias, target_type in build_file_aliases.target_types.items()
}
for alias, factory in build_file_aliases.target_macro_factories.items():
# TargetMacro.Factory with more than one target type is deprecated.
# For default sources, this means that TargetMacro Factories with more than one target_type
# will not parse sources through the engine, and will fall back to the legacy python sources
# parsing.
# Conveniently, multi-target_type TargetMacro.Factory, and legacy python source parsing, are
# targeted to be removed in the same version of pants.
if len(factory.target_types) == 1:
table[alias] = _make_target_adaptor(
TargetAdaptor,
tuple(factory.target_types)[0],
)
# TODO: The alias replacement here is to avoid elevating "TargetAdaptors" into the public
# API until after https://github.com/pantsbuild/pants/issues/3560 has been completed.
# These should likely move onto Target subclasses as the engine gets deeper into beta
# territory.
table['python_library'] = _make_target_adaptor(PythonTargetAdaptor, PythonLibrary)
table['jvm_app'] = _make_target_adaptor(AppAdaptor, JvmApp)
table['jvm_binary'] = _make_target_adaptor(JvmBinaryAdaptor, JvmBinary)
table['python_app'] = _make_target_adaptor(AppAdaptor, PythonApp)
table['python_tests'] = _make_target_adaptor(PythonTestsAdaptor, PythonTests)
table['python_binary'] = _make_target_adaptor(PythonBinaryAdaptor, PythonBinary)
table['remote_sources'] = _make_target_adaptor(RemoteSourcesAdaptor, RemoteSources)
table['page'] = _make_target_adaptor(PageAdaptor, Page)
# Note that these don't call _make_target_adaptor because we don't have a handy reference to the
# types being constructed. They don't have any default_sources behavior, so this should be ok,
# but if we end up doing more things in _make_target_adaptor, we should make sure they're
# applied here too.
table['pants_plugin'] = PantsPluginAdaptor
table['contrib_plugin'] = PantsPluginAdaptor
return SymbolTable(table) | Construct a SymbolTable for the given BuildFileAliases.
:param build_file_aliases: BuildFileAliases to register.
:type build_file_aliases: :class:`pants.build_graph.build_file_aliases.BuildFileAliases`
:returns: A SymbolTable. |
def search(self,
initial_state: State,
transition_function: TransitionFunction) -> Dict[int, List[State]]:
"""
Parameters
----------
initial_state : ``State``
The starting state of our search. This is assumed to be `batched`, and our beam search
is batch-aware - we'll keep ``beam_size`` states around for each instance in the batch.
transition_function : ``TransitionFunction``
The ``TransitionFunction`` object that defines and scores transitions from one state to the
next.
Returns
-------
best_states : ``Dict[int, List[State]]``
This is a mapping from batch index to the top states for that instance.
"""
finished_states: Dict[int, List[State]] = defaultdict(list)
states = [initial_state]
step_num = 0
while states:
step_num += 1
next_states: Dict[int, List[State]] = defaultdict(list)
grouped_state = states[0].combine_states(states)
allowed_actions = []
for batch_index, action_history in zip(grouped_state.batch_indices,
grouped_state.action_history):
allowed_actions.append(self._allowed_transitions[batch_index][tuple(action_history)])
for next_state in transition_function.take_step(grouped_state,
max_actions=self._per_node_beam_size,
allowed_actions=allowed_actions):
# NOTE: we're doing state.batch_indices[0] here (and similar things below),
# hard-coding a group size of 1. But, our use of `next_state.is_finished()`
# already checks for that, as it crashes if the group size is not 1.
batch_index = next_state.batch_indices[0]
if next_state.is_finished():
finished_states[batch_index].append(next_state)
else:
next_states[batch_index].append(next_state)
states = []
for batch_index, batch_states in next_states.items():
# The states from the generator are already sorted, so we can just take the first
# ones here, without an additional sort.
if self._beam_size:
batch_states = batch_states[:self._beam_size]
states.extend(batch_states)
best_states: Dict[int, List[State]] = {}
for batch_index, batch_states in finished_states.items():
# The time this sort takes is pretty negligible, no particular need to optimize this
# yet. Maybe with a larger beam size...
finished_to_sort = [(-state.score[0].item(), state) for state in batch_states]
finished_to_sort.sort(key=lambda x: x[0])
best_states[batch_index] = [state[1] for state in finished_to_sort[:self._beam_size]]
return best_states | Parameters
----------
initial_state : ``State``
The starting state of our search. This is assumed to be `batched`, and our beam search
is batch-aware - we'll keep ``beam_size`` states around for each instance in the batch.
transition_function : ``TransitionFunction``
The ``TransitionFunction`` object that defines and scores transitions from one state to the
next.
Returns
-------
best_states : ``Dict[int, List[State]]``
This is a mapping from batch index to the top states for that instance. |
def refresh(self, accept=MEDIA_TYPE_TAXII_V20):
"""Updates Status information"""
response = self.__raw = self._conn.get(self.url,
headers={"Accept": accept})
self._populate_fields(**response) | Updates Status information |
def heating_stats(self):
"""Calculate some heating data stats."""
local_5 = []
local_10 = []
for i in range(0, 10):
level = self.past_heating_level(i)
if level == 0:
_LOGGER.debug('Cant calculate stats yet...')
return
if i < 5:
local_5.append(level)
local_10.append(level)
_LOGGER.debug('%s Heating History: %s', self.side, local_10)
try:
# Average of 5min on the history dict.
fiveminavg = statistics.mean(local_5)
tenminavg = statistics.mean(local_10)
_LOGGER.debug('%s Heating 5 min avg: %s', self.side, fiveminavg)
_LOGGER.debug('%s Heating 10 min avg: %s', self.side, tenminavg)
# Standard deviation
fivestdev = statistics.stdev(local_5)
tenstdev = statistics.stdev(local_10)
_LOGGER.debug('%s Heating 5 min stdev: %s', self.side, fivestdev)
_LOGGER.debug('%s Heating 10 min stdev: %s', self.side, tenstdev)
# Variance
fivevar = statistics.variance(local_5)
tenvar = statistics.variance(local_10)
_LOGGER.debug('%s Heating 5 min variance: %s', self.side, fivevar)
_LOGGER.debug('%s Heating 10 min variance: %s', self.side, tenvar)
except:
_LOGGER.debug('Cant calculate stats yet...') | Calculate some heating data stats. |
def nearest(items, pivot):
'''Find nearest value in array, including datetimes
Args
----
items: iterable
List of values from which to find nearest value to `pivot`
pivot: int or float
Value to find nearest of in `items`
Returns
-------
nearest: int or float
Value in items nearest to `pivot`
'''
return min(items, key=lambda x: abs(x - pivot)) | Find nearest value in array, including datetimes
Args
----
items: iterable
List of values from which to find nearest value to `pivot`
pivot: int or float
Value to find nearest of in `items`
Returns
-------
nearest: int or float
Value in items nearest to `pivot` |
def render_response(self):
"""Render as a string formatted for HTTP response headers
(detailed 'Set-Cookie: ' style).
"""
# Use whatever renderers are defined for name and value.
# (.attributes() is responsible for all other rendering.)
name, value = self.name, self.value
renderer = self.attribute_renderers.get('name', None)
if renderer:
name = renderer(name)
renderer = self.attribute_renderers.get('value', None)
if renderer:
value = renderer(value)
return '; '.join(
['{0}={1}'.format(name, value)] +
[key if isinstance(val, bool) else '='.join((key, val))
for key, val in self.attributes().items()]
) | Render as a string formatted for HTTP response headers
(detailed 'Set-Cookie: ' style). |
def get_method(self, method_name, default=None):
"""
Returns the contained method of the specified name, or `default` if
not found.
"""
for method in self.methods:
if method.name == method_name:
return method
return default | Returns the contained method of the specified name, or `default` if
not found. |
def set_focused(self, account, is_focused):
# type: (OutlookAccount, bool) -> bool
""" Emails from this contact will either always be put in the Focused inbox, or always put in Other, based on
the value of is_focused.
Args:
account (OutlookAccount): The :class:`OutlookAccount <pyOutlook.core.main.OutlookAccount>`
the override should be set for
is_focused (bool): Whether this contact should be set to Focused, or Other.
Returns:
True if the request was successful
"""
endpoint = 'https://outlook.office.com/api/v2.0/me/InferenceClassification/Overrides'
if is_focused:
classification = 'Focused'
else:
classification = 'Other'
data = dict(ClassifyAs=classification, SenderEmailAddress=dict(Address=self.email))
r = requests.post(endpoint, headers=account._headers, data=json.dumps(data))
# Will raise an error if necessary, otherwise returns True
result = check_response(r)
self.focused = is_focused
return result | Emails from this contact will either always be put in the Focused inbox, or always put in Other, based on
the value of is_focused.
Args:
account (OutlookAccount): The :class:`OutlookAccount <pyOutlook.core.main.OutlookAccount>`
the override should be set for
is_focused (bool): Whether this contact should be set to Focused, or Other.
Returns:
True if the request was successful |
def _leave_event_hide(self):
""" Hides the tooltip after some time has passed (assuming the cursor is
not over the tooltip).
"""
if (not self._hide_timer.isActive() and
# If Enter events always came after Leave events, we wouldn't need
# this check. But on Mac OS, it sometimes happens the other way
# around when the tooltip is created.
QtGui.qApp.topLevelAt(QtGui.QCursor.pos()) != self):
self._hide_timer.start(300, self) | Hides the tooltip after some time has passed (assuming the cursor is
not over the tooltip). |
def p_base_type(self, p): # noqa
'''base_type : BOOL annotations
| BYTE annotations
| I8 annotations
| I16 annotations
| I32 annotations
| I64 annotations
| DOUBLE annotations
| STRING annotations
| BINARY annotations'''
name = p[1]
if name == 'i8':
name = 'byte'
p[0] = ast.PrimitiveType(name, p[2]) | base_type : BOOL annotations
| BYTE annotations
| I8 annotations
| I16 annotations
| I32 annotations
| I64 annotations
| DOUBLE annotations
| STRING annotations
| BINARY annotations |
def reset(self):
"""
Resets stream parameters to their defaults.
"""
with util.disable_constant(self):
for k, p in self.params().items():
if k != 'name':
setattr(self, k, p.default) | Resets stream parameters to their defaults. |
def connect(self, address='session'):
"""Connect to *address* and wait until the connection is established.
The *address* argument must be a D-BUS server address, in the format
described in the D-BUS specification. It may also be one of the special
addresses ``'session'`` or ``'system'``, to connect to the D-BUS
session and system bus, respectively.
"""
if isinstance(address, six.string_types):
addresses = parse_dbus_address(address)
else:
addresses = [address]
for addr in addresses:
try:
super(DbusClient, self).connect(addr)
except pyuv.error.UVError:
continue
break
else:
raise DbusError('could not connect to any address')
# Wait for authentication to complete
self.get_unique_name() | Connect to *address* and wait until the connection is established.
The *address* argument must be a D-BUS server address, in the format
described in the D-BUS specification. It may also be one of the special
addresses ``'session'`` or ``'system'``, to connect to the D-BUS
session and system bus, respectively. |
def generate(self, trilegal_filename, ra=None, dec=None,
n=2e4, ichrone='mist', MAfn=None,
mags=None, maxrad=None, f_binary=0.4, **kwargs):
"""
Generate population.
"""
n = int(n)
#generate/load BG primary stars from TRILEGAL simulation
bgpop = BGStarPopulation_TRILEGAL(trilegal_filename,
ra=ra, dec=dec, mags=mags,
maxrad=maxrad, **kwargs)
# Make sure that
# properties of stars are within allowable range for isochrone.
# This is a bit hacky, admitted.
mass = bgpop.stars['m_ini'].values
age = bgpop.stars['logAge'].values
feh = bgpop.stars['[M/H]'].values
ichrone = get_ichrone(ichrone)
pct = 0.05 #pct distance from "edges" of ichrone interpolation
mass[mass < ichrone.minmass*(1+pct)] = ichrone.minmass*(1+pct)
mass[mass > ichrone.maxmass*(1-pct)] = ichrone.maxmass*(1-pct)
age[age < ichrone.minage*(1+pct)] = ichrone.minage*(1+pct)
age[age > ichrone.maxage*(1-pct)] = ichrone.maxage*(1-pct)
feh[feh < ichrone.minfeh+0.05] = ichrone.minfeh+0.05
feh[feh > ichrone.maxfeh-0.05] = ichrone.maxfeh-0.05
distance = bgpop.stars['distance'].values
#Generate binary population to draw eclipses from
pop = MultipleStarPopulation(mA=mass, age=age, feh=feh,
f_triple=0, f_binary=1,
distance=distance,
ichrone=ichrone)
all_stars = pop.stars.dropna(subset=['mass_A'])
all_stars.reset_index(inplace=True)
#generate eclipses
stars = pd.DataFrame()
df_orbpop = pd.DataFrame()
tot_prob = None; tot_dprob=None; prob_norm=None
n_adapt = n
while len(stars) < n:
n_adapt = int(n_adapt)
inds = np.random.randint(len(all_stars), size=n_adapt)
s = all_stars.iloc[inds]
#calculate limb-darkening coefficients
u1A, u2A = ldcoeffs(s['Teff_A'], s['logg_A'])
u1B, u2B = ldcoeffs(s['Teff_B'], s['logg_B'])
inds, df, (prob,dprob) = calculate_eclipses(s['mass_A'], s['mass_B'],
s['radius_A'], s['radius_B'],
s['{}_mag_A'.format(self.band)],
s['{}_mag_B'.format(self.band)],
u11s=u1A, u21s=u2A,
u12s=u1B, u22s=u2B,
band=self.band,
period=self.period,
calc_mininc=True,
return_indices=True,
MAfn=MAfn)
s = s.iloc[inds].copy()
s.reset_index(inplace=True)
for col in df.columns:
s[col] = df[col]
stars = pd.concat((stars, s))
#new_df_orbpop = pop.orbpop.orbpop_long.dataframe.iloc[inds].copy()
#new_df_orbpop.reset_index(inplace=True)
#df_orbpop = pd.concat((df_orbpop, new_df_orbpop))
logging.info('{} BEB systems generated (target {})'.format(len(stars),n))
#logging.debug('{} nans in stars[dpri]'.format(np.isnan(stars['dpri']).sum()))
#logging.debug('{} nans in df[dpri]'.format(np.isnan(df['dpri']).sum()))
if tot_prob is None:
prob_norm = (1/dprob**2)
tot_prob = prob
tot_dprob = dprob
else:
prob_norm = (1/tot_dprob**2 + 1/dprob**2)
tot_prob = (tot_prob/tot_dprob**2 + prob/dprob**2)/prob_norm
tot_dprob = 1/np.sqrt(prob_norm)
n_adapt = min(int(1.2*(n-len(stars)) * n_adapt//len(s)), 5e5)
#logging.debug('n_adapt = {}'.format(n_adapt))
n_adapt = max(n_adapt, 100)
n_adapt = int(n_adapt)
stars = stars.iloc[:n]
if 'level_0' in stars:
stars.drop('level_0', axis=1, inplace=True) #dunno where this came from
stars = stars.reset_index()
stars.drop('index', axis=1, inplace=True)
stars['mass_1'] = stars['mass_A']
stars['radius_1'] = stars['radius_A']
stars['mass_2'] = stars['mass_B']
stars['radius_2'] = stars['radius_B']
MultipleStarPopulation.__init__(self, stars=stars,
#orbpop=orbpop,
f_triple=0, f_binary=f_binary,
period_long=self.period)
priorfactors = {'f_binary':f_binary}
#attributes needed for BGStarPopulation
self.density = bgpop.density
self.trilegal_args = bgpop.trilegal_args
self._maxrad = bgpop._maxrad
#create an OrbitPopulation here?
EclipsePopulation.__init__(self, stars=stars, #orbpop=orbpop,
period=self.period, cadence=self.cadence,
model=self.model,
lhoodcachefile=self.lhoodcachefile,
priorfactors=priorfactors, prob=tot_prob)
#add Rsky property
self.stars['Rsky'] = randpos_in_circle(len(self.stars),
self._maxrad, return_rad=True) | Generate population. |
def daterange(value, details=False):
'''Display a date range in the shorter possible maner.'''
if not isinstance(value, db.DateRange):
raise ValueError('daterange only accept db.DateRange as parameter')
if details:
return daterange_with_details(value)
date_format = 'YYYY'
delta = value.end - value.start
start, end = None, None
start = format_date(value.start, date_format)
if delta.days > 365:
end = format_date(value.end, date_format)
return '{start!s}–{end!s}'.format(start=start, end=end) if end else start | Display a date range in the shorter possible maner. |
def set_data(self, index, value):
"""Uses given data setter, and emit modelReset signal"""
acces, field = self.get_item(index), self.header[index.column()]
self.beginResetModel()
self.set_data_hook(acces, field, value)
self.endResetModel() | Uses given data setter, and emit modelReset signal |
def _run_command(self, arguments: List[str], input_data: Any=None, output_encoding: str="utf-8") -> str:
"""
Run a command as a subprocess.
Ignores errors given over stderr if there is output on stdout (this is the case where baton has been run
correctly and has expressed the error in it's JSON out, which can be handled more appropriately upstream to this
method.)
:param arguments: the arguments to run
:param input_data: the input data to pass to the subprocess
:param output_encoding: optional specification of the output encoding to expect
:return: the process' standard out
"""
process = subprocess.Popen(arguments, stdout=subprocess.PIPE, stdin=subprocess.PIPE, stderr=subprocess.PIPE)
if isinstance(input_data, List):
for to_write in input_data:
to_write_as_json = json.dumps(to_write)
process.stdin.write(str.encode(to_write_as_json))
input_data = None
else:
input_data = str.encode(json.dumps(input_data))
timeout_in_seconds = self.timeout_queries_after.total_seconds() if self.timeout_queries_after is not None \
else None
out, error = process.communicate(input=input_data, timeout=timeout_in_seconds)
if len(out) == 0 and len(error) > 0:
raise RuntimeError(error)
return out.decode(output_encoding).rstrip() | Run a command as a subprocess.
Ignores errors given over stderr if there is output on stdout (this is the case where baton has been run
correctly and has expressed the error in it's JSON out, which can be handled more appropriately upstream to this
method.)
:param arguments: the arguments to run
:param input_data: the input data to pass to the subprocess
:param output_encoding: optional specification of the output encoding to expect
:return: the process' standard out |
def prepare_response(self, request, cached):
"""Verify our vary headers match and construct a real urllib3
HTTPResponse object.
"""
# Special case the '*' Vary value as it means we cannot actually
# determine if the cached response is suitable for this request.
if "*" in cached.get("vary", {}):
return
# Ensure that the Vary headers for the cached response match our
# request
for header, value in cached.get("vary", {}).items():
if request.headers.get(header, None) != value:
return
body_raw = cached["response"].pop("body")
try:
body = io.BytesIO(body_raw)
except TypeError:
# This can happen if cachecontrol serialized to v1 format (pickle)
# using Python 2. A Python 2 str(byte string) will be unpickled as
# a Python 3 str (unicode string), which will cause the above to
# fail with:
#
# TypeError: 'str' does not support the buffer interface
body = io.BytesIO(body_raw.encode('utf8'))
return HTTPResponse(
body=body,
preload_content=False,
**cached["response"]
) | Verify our vary headers match and construct a real urllib3
HTTPResponse object. |
def line_cap_type(self):
"""Cap type, one of `butt`, `round`, `square`."""
key = self._data.get(b'strokeStyleLineCapType').enum
return self.STROKE_STYLE_LINE_CAP_TYPES.get(key, str(key)) | Cap type, one of `butt`, `round`, `square`. |
def nodes(self):
"""
Return the nodes for this VSS Container
:rtype: SubElementCollection(VSSContainerNode)
"""
resource = sub_collection(
self.get_relation('vss_container_node'),
VSSContainerNode)
resource._load_from_engine(self, 'nodes')
return resource | Return the nodes for this VSS Container
:rtype: SubElementCollection(VSSContainerNode) |
def repl_command(fxn):
"""
Decorator for cmd methods
Parses arguments from the arg string and passes them to the method as *args
and **kwargs.
"""
@functools.wraps(fxn)
def wrapper(self, arglist):
"""Wraps the command method"""
args = []
kwargs = {}
if arglist:
for arg in shlex.split(arglist):
if "=" in arg:
split = arg.split("=", 1)
kwargs[split[0]] = split[1]
else:
args.append(arg)
return fxn(self, *args, **kwargs)
return wrapper | Decorator for cmd methods
Parses arguments from the arg string and passes them to the method as *args
and **kwargs. |
def verify(self, otp, for_time=None, valid_window=0):
"""
Verifies the OTP passed in against the current time OTP
@param [String/Integer] otp the OTP to check against
@param [Integer] valid_window extends the validity to this many counter ticks before and after the current one
"""
if for_time is None:
for_time = datetime.datetime.now()
if valid_window:
for i in range(-valid_window, valid_window + 1):
if utils.strings_equal(str(otp), str(self.at(for_time, i))):
return True
return False
return utils.strings_equal(str(otp), str(self.at(for_time))) | Verifies the OTP passed in against the current time OTP
@param [String/Integer] otp the OTP to check against
@param [Integer] valid_window extends the validity to this many counter ticks before and after the current one |
def expectation_importance_sampler_logspace(
log_f,
log_p,
sampling_dist_q,
z=None,
n=None,
seed=None,
name='expectation_importance_sampler_logspace'):
r"""Importance sampling with a positive function, in log-space.
With \\(p(z) := exp^{log_p(z)}\\), and \\(f(z) = exp{log_f(z)}\\),
this `Op` returns
\\(Log[ n^{-1} sum_{i=1}^n [ f(z_i) p(z_i) / q(z_i) ] ], z_i ~ q,\\)
\\(\approx Log[ E_q[ f(Z) p(Z) / q(Z) ] ]\\)
\\(= Log[E_p[f(Z)]]\\)
This integral is done in log-space with max-subtraction to better handle the
often extreme values that `f(z) p(z) / q(z)` can take on.
In contrast to `expectation_importance_sampler`, this `Op` returns values in
log-space.
User supplies either `Tensor` of samples `z`, or number of samples to draw `n`
Args:
log_f: Callable mapping samples from `sampling_dist_q` to `Tensors` with
shape broadcastable to `q.batch_shape`.
For example, `log_f` works "just like" `sampling_dist_q.log_prob`.
log_p: Callable mapping samples from `sampling_dist_q` to `Tensors` with
shape broadcastable to `q.batch_shape`.
For example, `log_p` works "just like" `q.log_prob`.
sampling_dist_q: The sampling distribution.
`tfp.distributions.Distribution`.
`float64` `dtype` recommended.
`log_p` and `q` should be supported on the same set.
z: `Tensor` of samples from `q`, produced by `q.sample` for some `n`.
n: Integer `Tensor`. Number of samples to generate if `z` is not provided.
seed: Python integer to seed the random number generator.
name: A name to give this `Op`.
Returns:
Logarithm of the importance sampling estimate. `Tensor` with `shape` equal
to batch shape of `q`, and `dtype` = `q.dtype`.
"""
q = sampling_dist_q
with tf.name_scope(name):
z = _get_samples(q, z, n, seed)
log_values = log_f(z) + log_p(z) - q.log_prob(z)
return _logspace_mean(log_values) | r"""Importance sampling with a positive function, in log-space.
With \\(p(z) := exp^{log_p(z)}\\), and \\(f(z) = exp{log_f(z)}\\),
this `Op` returns
\\(Log[ n^{-1} sum_{i=1}^n [ f(z_i) p(z_i) / q(z_i) ] ], z_i ~ q,\\)
\\(\approx Log[ E_q[ f(Z) p(Z) / q(Z) ] ]\\)
\\(= Log[E_p[f(Z)]]\\)
This integral is done in log-space with max-subtraction to better handle the
often extreme values that `f(z) p(z) / q(z)` can take on.
In contrast to `expectation_importance_sampler`, this `Op` returns values in
log-space.
User supplies either `Tensor` of samples `z`, or number of samples to draw `n`
Args:
log_f: Callable mapping samples from `sampling_dist_q` to `Tensors` with
shape broadcastable to `q.batch_shape`.
For example, `log_f` works "just like" `sampling_dist_q.log_prob`.
log_p: Callable mapping samples from `sampling_dist_q` to `Tensors` with
shape broadcastable to `q.batch_shape`.
For example, `log_p` works "just like" `q.log_prob`.
sampling_dist_q: The sampling distribution.
`tfp.distributions.Distribution`.
`float64` `dtype` recommended.
`log_p` and `q` should be supported on the same set.
z: `Tensor` of samples from `q`, produced by `q.sample` for some `n`.
n: Integer `Tensor`. Number of samples to generate if `z` is not provided.
seed: Python integer to seed the random number generator.
name: A name to give this `Op`.
Returns:
Logarithm of the importance sampling estimate. `Tensor` with `shape` equal
to batch shape of `q`, and `dtype` = `q.dtype`. |
def write_elements(fd, mtp, data, is_name=False):
"""Write data element tag and data.
The tag contains the array type and the number of
bytes the array data will occupy when written to file.
If data occupies 4 bytes or less, it is written immediately
as a Small Data Element (SDE).
"""
fmt = etypes[mtp]['fmt']
if isinstance(data, Sequence):
if fmt == 's' or is_name:
if isinstance(data, bytes):
if is_name and len(data) > 31:
raise ValueError(
'Name "{}" is too long (max. 31 '
'characters allowed)'.format(data))
fmt = '{}s'.format(len(data))
data = (data,)
else:
fmt = ''.join('{}s'.format(len(s)) for s in data)
else:
l = len(data)
if l == 0:
# empty array
fmt = ''
if l > 1:
# more than one element to be written
fmt = '{}{}'.format(l, fmt)
else:
data = (data,)
num_bytes = struct.calcsize(fmt)
if num_bytes <= 4:
# write SDE
if num_bytes < 4:
# add pad bytes
fmt += '{}x'.format(4 - num_bytes)
fd.write(struct.pack('hh' + fmt, etypes[mtp]['n'],
*chain([num_bytes], data)))
return
# write tag: element type and number of bytes
fd.write(struct.pack('b3xI', etypes[mtp]['n'], num_bytes))
# add pad bytes to fmt, if needed
mod8 = num_bytes % 8
if mod8:
fmt += '{}x'.format(8 - mod8)
# write data
fd.write(struct.pack(fmt, *data)) | Write data element tag and data.
The tag contains the array type and the number of
bytes the array data will occupy when written to file.
If data occupies 4 bytes or less, it is written immediately
as a Small Data Element (SDE). |
def graft_neuron(root_section):
'''Returns a neuron starting at root_section'''
assert isinstance(root_section, Section)
return Neuron(soma=Soma(root_section.points[:1]), neurites=[Neurite(root_section)]) | Returns a neuron starting at root_section |
Subsets and Splits