text
stringlengths 78
104k
| score
float64 0
0.18
|
---|---|
def rectify_acquaintance_strategy(
circuit: circuits.Circuit,
acquaint_first: bool=True
) -> None:
"""Splits moments so that they contain either only acquaintance gates
or only permutation gates. Orders resulting moments so that the first one
is of the same type as the previous one.
Args:
circuit: The acquaintance strategy to rectify.
acquaint_first: Whether to make acquaintance moment first in when
splitting the first mixed moment.
"""
if not is_acquaintance_strategy(circuit):
raise TypeError('not is_acquaintance_strategy(circuit)')
rectified_moments = []
for moment in circuit:
gate_type_to_ops = collections.defaultdict(list
) # type: Dict[bool, List[ops.GateOperation]]
for op in moment.operations:
gate_type_to_ops[isinstance(op.gate, AcquaintanceOpportunityGate)
].append(op)
if len(gate_type_to_ops) == 1:
rectified_moments.append(moment)
continue
for acquaint_first in sorted(gate_type_to_ops.keys(),
reverse=acquaint_first):
rectified_moments.append(
ops.Moment(gate_type_to_ops[acquaint_first]))
circuit._moments = rectified_moments | 0.004559 |
def separate_into_sections(self, data_frame, labels_col='anno', labels_to_keep=[1,2], min_labels_in_sequence=100):
""" Helper function to separate a time series into multiple sections based on a labeled column.
:param data_frame: The data frame. It should have x, y, and z columns.
:type data_frame: pandas.DataFrame
:param labels_col: The column which has the labels we would like to separate the data_frame on on ('anno' default).
:type labels_col: str
:param labels_to_keep: The unique labele ids of the labels which we would like to keep, out of all the labels in the labels_col ([1, 2] default).
:type labels_to_keep: list
:param min_labels_in_sequence: The minimum number of samples which can make up a section (100 default).
:type min_labels_in_sequence: int
:return: A list of DataFrames, segmented accordingly.
:rtype: list
"""
sections = [[]]
mask = data_frame[labels_col].apply(lambda x: x in labels_to_keep)
for i,m in enumerate(mask):
if m:
sections[-1].append(i)
if not m and len(sections[-1]) > min_labels_in_sequence:
sections.append([])
sections.pop()
sections = [self.rebuild_indexes(data_frame.iloc[s]) for s in sections]
return sections | 0.011549 |
def view_page(name=None):
"""Serve a page name.
.. note:: this is a bottle view
* if the view is called with the POST method, write the new page
content to the file, commit the modification and then display the
html rendering of the restructured text file
* if the view is called with the GET method, directly display the html
rendering of the restructured text file
Keyword Arguments:
:name: (str) -- name of the rest file (without the .rst extension)
OPTIONAL
if no filename is given, first try to find a "index.rst" file in the
directory and serve it. If not found, serve the meta page __index__
Returns:
bottle response object
"""
if request.method == 'POST':
if name is None:
# new file
if len(request.forms.filename) > 0:
name = request.forms.filename
if name is not None:
filename = "{0}.rst".format(name)
file_handle = open(filename, 'w')
file_handle.write(request.forms.content.encode('utf-8'))
file_handle.close()
add_file_to_repo(filename)
commit(filename)
response.set_header('Cache-control', 'no-cache')
response.set_header('Pragma', 'no-cache')
if name is None:
# we try to find an index file
index_files = glob.glob("./[Ii][Nn][Dd][Ee][Xx].rst")
if len(index_files) == 0:
# not found
# redirect to __index__
return view_meta_index()
else:
name = index_files[0][2:-4]
files = glob.glob("{0}.rst".format(name))
if len(files) > 0:
file_handle = open(files[0], 'r')
html_body = publish_parts(file_handle.read(),
writer=AttowikiWriter(),
settings=None,
settings_overrides=None)['html_body']
history = commit_history("{0}.rst".format(name))
return template('page',
type="view",
name=name,
extended_name=None,
is_repo=check_repo(),
history=history,
gitref=None,
content=html_body)
else:
return static_file(name, '') | 0.000422 |
def coredump_configured(name, enabled, dump_ip, host_vnic='vmk0', dump_port=6500):
'''
Ensures a host's core dump configuration.
name
Name of the state.
enabled
Sets whether or not ESXi core dump collection should be enabled.
This is a boolean value set to ``True`` or ``False`` to enable
or disable core dumps.
Note that ESXi requires that the core dump must be enabled before
any other parameters may be set. This also affects the ``changes``
results in the state return dictionary. If ``enabled`` is ``False``,
we can't obtain any previous settings to compare other state variables,
resulting in many ``old`` references returning ``None``.
Once ``enabled`` is ``True`` the ``changes`` dictionary comparisons
will be more accurate. This is due to the way the system coredemp
network configuration command returns data.
dump_ip
The IP address of host that will accept the dump.
host_vnic
Host VNic port through which to communicate. Defaults to ``vmk0``.
dump_port
TCP port to use for the dump. Defaults to ``6500``.
Example:
.. code-block:: yaml
configure-host-coredump:
esxi.coredump_configured:
- enabled: True
- dump_ip: 'my-coredump-ip.example.com'
'''
ret = {'name': name,
'result': False,
'changes': {},
'comment': ''}
esxi_cmd = 'esxi.cmd'
enabled_msg = 'ESXi requires that the core dump must be enabled ' \
'before any other parameters may be set.'
host = __pillar__['proxy']['host']
current_config = __salt__[esxi_cmd]('get_coredump_network_config').get(host)
error = current_config.get('Error')
if error:
ret['comment'] = 'Error: {0}'.format(error)
return ret
current_config = current_config.get('Coredump Config')
current_enabled = current_config.get('enabled')
# Configure coredump enabled state, if there are changes.
if current_enabled != enabled:
enabled_changes = {'enabled': {'old': current_enabled, 'new': enabled}}
# Only run the command if not using test=True
if not __opts__['test']:
response = __salt__[esxi_cmd]('coredump_network_enable',
enabled=enabled).get(host)
error = response.get('Error')
if error:
ret['comment'] = 'Error: {0}'.format(error)
return ret
# Allow users to disable core dump, but then return since
# nothing else can be set if core dump is disabled.
if not enabled:
ret['result'] = True
ret['comment'] = enabled_msg
ret['changes'].update(enabled_changes)
return ret
ret['changes'].update(enabled_changes)
elif not enabled:
# If current_enabled and enabled match, but are both False,
# We must return before configuring anything. This isn't a
# failure as core dump may be disabled intentionally.
ret['result'] = True
ret['comment'] = enabled_msg
return ret
# Test for changes with all remaining configurations. The changes flag is used
# To detect changes, and then set_coredump_network_config is called one time.
changes = False
current_ip = current_config.get('ip')
if current_ip != dump_ip:
ret['changes'].update({'dump_ip':
{'old': current_ip,
'new': dump_ip}})
changes = True
current_vnic = current_config.get('host_vnic')
if current_vnic != host_vnic:
ret['changes'].update({'host_vnic':
{'old': current_vnic,
'new': host_vnic}})
changes = True
current_port = current_config.get('port')
if current_port != six.text_type(dump_port):
ret['changes'].update({'dump_port':
{'old': current_port,
'new': six.text_type(dump_port)}})
changes = True
# Only run the command if not using test=True and changes were detected.
if not __opts__['test'] and changes is True:
response = __salt__[esxi_cmd]('set_coredump_network_config',
dump_ip=dump_ip,
host_vnic=host_vnic,
dump_port=dump_port).get(host)
if response.get('success') is False:
msg = response.get('stderr')
if not msg:
msg = response.get('stdout')
ret['comment'] = 'Error: {0}'.format(msg)
return ret
ret['result'] = True
if ret['changes'] == {}:
ret['comment'] = 'Core Dump configuration is already in the desired state.'
return ret
if __opts__['test']:
ret['result'] = None
ret['comment'] = 'Core dump configuration will change.'
return ret | 0.001179 |
def _random_weights(n_features, lam, lam_perturb, prng):
"""Generate a symmetric random matrix with zeros along the diagnoal and
non-zero elements take the value {lam * lam_perturb, lam / lam_perturb}
with probability 1/2.
"""
weights = np.zeros((n_features, n_features))
n_off_diag = int((n_features ** 2 - n_features) / 2)
berns = prng.binomial(1, 0.5, size=n_off_diag)
vals = np.zeros(berns.shape)
vals[berns == 0] = 1. * lam * lam_perturb
vals[berns == 1] = 1. * lam / lam_perturb
weights[np.triu_indices(n_features, k=1)] = vals
weights[weights < 0] = 0
weights = weights + weights.T
return weights | 0.001517 |
def lines_iter(self):
'''
Returns contents of the Dockerfile as an array, where each line in the file is an element in the array.
:return: list
'''
# Convert unicode chars to string
byte_to_string = lambda x: x.strip().decode(u'utf-8') if isinstance(x, bytes) else x.strip()
# Read the entire contents of the Dockerfile, decoding each line, and return the result as an array
with open(self.docker_file_path, u'r') as f:
for line in f:
yield byte_to_string(line) | 0.01083 |
def model_counts_map(self, name=None, exclude=None, use_mask=False):
"""Return the model expectation map for a single source, a set
of sources, or all sources in the ROI. The map will be
computed using the current model parameters.
Parameters
----------
name : str
Parameter that defines the sources for which the model map
will be calculated. If name=None a model map will be
generated for all sources in the model. If name='diffuse'
a map for all diffuse sources will be generated.
exclude : list
Source name or list of source names that will be excluded
from the model map.
use_mask : bool
Parameter that specifies in the model counts map should include
mask pixels (i.e., ones whose weights are <= 0)
Returns
-------
map : `~fermipy.skymap.Map`
A map object containing the counts and WCS projection.
"""
if self.projtype == "WCS":
v = pyLike.FloatVector(self.npix ** 2 * self.enumbins)
elif self.projtype == "HPX":
v = pyLike.FloatVector(np.max(self.geom.npix) * self.enumbins)
else:
raise Exception("Unknown projection type %s", self.projtype)
exclude = utils.arg_to_list(exclude)
names = utils.arg_to_list(name)
excluded_names = []
for i, t in enumerate(exclude):
srcs = self.roi.get_sources_by_name(t)
excluded_names += [s.name for s in srcs]
if not hasattr(self.like.logLike, 'loadSourceMaps'):
# Update fixed model
self.like.logLike.buildFixedModelWts()
# Populate source map hash
self.like.logLike.buildFixedModelWts(True)
elif (name is None or name == 'all') and not exclude:
self.like.logLike.loadSourceMaps()
src_names = []
if (name is None) or (name == 'all'):
src_names = [src.name for src in self.roi.sources]
elif name == 'diffuse':
src_names = [src.name for src in self.roi.sources if
src.diffuse]
else:
srcs = [self.roi.get_source_by_name(t) for t in names]
src_names = [src.name for src in srcs]
# Remove sources in exclude list
src_names = [str(t) for t in src_names if t not in excluded_names]
# EAC we need the try blocks b/c older versions of the ST don't have some of these functions
if len(src_names) == len(self.roi.sources):
try:
self.like.logLike.computeModelMap(v, use_mask)
except (TypeError, NotImplementedError):
self.like.logLike.computeModelMap(v)
elif not hasattr(self.like.logLike, 'setSourceMapImage'):
for s in src_names:
model = self.like.logLike.sourceMap(str(s))
try:
self.like.logLike.updateModelMap(v, model, use_mask)
except (TypeError, NotImplementedError):
self.like.logLike.updateModelMap(v, model)
else:
try:
if hasattr(self.like.logLike, 'has_weights'):
self.like.logLike.computeModelMap(src_names, v, use_mask)
else:
self.like.logLike.computeModelMap(src_names, v)
except:
vsum = np.zeros(v.size())
for s in src_names:
vtmp = pyLike.FloatVector(v.size())
if hasattr(self.like.logLike, 'has_weights'):
self.like.logLike.computeModelMap(
str(s), vtmp, use_mask)
else:
self.like.logLike.computeModelMap(str(s), vtmp)
vsum += vtmp
v = pyLike.FloatVector(vsum)
if self.projtype == "WCS":
z = np.array(v).reshape(self.enumbins, self.npix, self.npix)
return WcsNDMap(copy.deepcopy(self._geom), z)
elif self.projtype == "HPX":
z = np.array(v).reshape(self.enumbins, np.max(self._geom.npix))
return HpxNDMap(copy.deepcopy(self._geom), z)
else:
raise Exception(
"Did not recognize projection type %s", self.projtype) | 0.000915 |
def insert_point(self, x, y):
""" Inserts a point on the path at the mouse location.
We first need to check if the mouse location is on the path.
Inserting point is time intensive and experimental.
"""
try:
bezier = _ctx.ximport("bezier")
except:
from nodebox.graphics import bezier
# Do a number of checks distributed along the path.
# Keep the one closest to the actual mouse location.
# Ten checks works fast but leads to imprecision in sharp corners
# and curves closely located next to each other.
# I prefer the slower but more stable approach.
n = 100
closest = None
dx0 = float("inf")
dy0 = float("inf")
for i in range(n):
t = float(i)/n
pt = self.path.point(t)
dx = abs(pt.x-x)
dy = abs(pt.y-y)
if dx+dy <= dx0+dy0:
dx0 = dx
dy0 = dy
closest = t
# Next, scan the area around the approximation.
# If the closest point is located at 0.2 on the path,
# we need to scan between 0.1 and 0.3 for a better
# approximation. If 1.5 was the best guess, scan
# 1.40, 1.41 ... 1.59 and so on.
# Each decimal precision takes 20 iterations.
decimals = [3,4]
for d in decimals:
d = 1.0/pow(10,d)
for i in range(20):
t = closest-d + float(i)*d*0.1
if t < 0.0: t = 1.0+t
if t > 1.0: t = t-1.0
pt = self.path.point(t)
dx = abs(pt.x-x)
dy = abs(pt.y-y)
if dx <= dx0 and dy <= dy0:
dx0 = dx
dy0 = dy
closest_precise = t
closest = closest_precise
# Update the points list with the inserted point.
p = bezier.insert_point(self.path, closest_precise)
i, t, pt = bezier._locate(self.path, closest_precise)
i += 1
pt = PathElement()
pt.cmd = p[i].cmd
pt.x = p[i].x
pt.y = p[i].y
pt.ctrl1 = Point(p[i].ctrl1.x, p[i].ctrl1.y)
pt.ctrl2 = Point(p[i].ctrl2.x, p[i].ctrl2.y)
pt.freehand = False
self._points.insert(i, pt)
self._points[i-1].ctrl1 = Point(p[i-1].ctrl1.x, p[i-1].ctrl1.y)
self._points[i+1].ctrl1 = Point(p[i+1].ctrl1.x, p[i+1].ctrl1.y)
self._points[i+1].ctrl2 = Point(p[i+1].ctrl2.x, p[i+1].ctrl2.y) | 0.006826 |
def send(self, tid, session, feature=None):
'''taobao.logistics.dummy.send 无需物流(虚拟)发货处理
用户调用该接口可实现无需物流(虚拟)发货,使用该接口发货,交易订单状态会直接变成卖家已发货'''
request = TOPRequest('taobao.logistics.dummy.send')
request['tid'] = tid
if feature!=None: request['feature'] = feature
self.create(self.execute(request, session))
return self.shipping | 0.015544 |
def generate_module_table_header(modules):
""" Generate header with module table entries for builtin modules.
:param List[(module_name, obj_module, enabled_define)] modules: module defs
:return: None
"""
# Print header file for all external modules.
mod_defs = []
print("// Automatically generated by makemoduledefs.py.\n")
for module_name, obj_module, enabled_define in modules:
mod_def = "MODULE_DEF_{}".format(module_name.upper())
mod_defs.append(mod_def)
print((
"#if ({enabled_define})\n"
" extern const struct _mp_obj_module_t {obj_module};\n"
" #define {mod_def} {{ MP_ROM_QSTR({module_name}), MP_ROM_PTR(&{obj_module}) }},\n"
"#else\n"
" #define {mod_def}\n"
"#endif\n"
).format(module_name=module_name, obj_module=obj_module,
enabled_define=enabled_define, mod_def=mod_def)
)
print("\n#define MICROPY_REGISTERED_MODULES \\")
for mod_def in mod_defs:
print(" {mod_def} \\".format(mod_def=mod_def))
print("// MICROPY_REGISTERED_MODULES") | 0.001739 |
def popular(self, period=DAILY):
'''
Get popular songs.
:param period: time period
:rtype: a generator generates :class:`Song` objects
Time periods:
+---------------------------------+-----------------------------------+
| Constant | Meaning |
+=================================+===================================+
| :const:`Client.DAILY` | Popular songs of this day |
+---------------------------------+-----------------------------------+
| :const:`Client.MONTHLY` | Popular songs of this month |
+---------------------------------+-----------------------------------+
'''
songs = self.connection.request(
'popularGetSongs',
{'type': period},
self.connection.header('popularGetSongs'))[1]['Songs']
return (Song.from_response(song, self.connection) for song in songs) | 0.001982 |
def calloc(self, sim_nmemb, sim_size):
"""
A somewhat faithful implementation of libc `calloc`.
:param sim_nmemb: the number of elements to allocated
:param sim_size: the size of each element (in bytes)
:returns: the address of the allocation, or a NULL pointer if the allocation failed
"""
raise NotImplementedError("%s not implemented for %s" % (self.calloc.__func__.__name__,
self.__class__.__name__)) | 0.009346 |
def largest_graph(mol):
"""Return a molecule which has largest graph in the compound
Passing single molecule object will results as same as molutil.clone
"""
mol.require("Valence")
mol.require("Topology")
m = clone(mol) # Avoid modification of original object
if m.isolated:
for k in itertools.chain.from_iterable(m.isolated):
m.remove_atom(k)
return m | 0.002469 |
def format_exc(*exc_info):
"""Show exception with traceback."""
typ, exc, tb = exc_info or sys.exc_info()
error = traceback.format_exception(typ, exc, tb)
return "".join(error) | 0.005208 |
def _set_tacacs_server(self, v, load=False):
"""
Setter method for tacacs_server, mapped from YANG variable /tacacs_server (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_tacacs_server is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_tacacs_server() directly.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=tacacs_server.tacacs_server, is_container='container', presence=False, yang_name="tacacs-server", rest_name="tacacs-server", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'TACACS+ server configuration', u'cli-incomplete-no': None, u'sort-priority': u'11'}}, namespace='urn:brocade.com:mgmt:brocade-aaa', defining_module='brocade-aaa', yang_type='container', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """tacacs_server must be of a type compatible with container""",
'defined-type': "container",
'generated-type': """YANGDynClass(base=tacacs_server.tacacs_server, is_container='container', presence=False, yang_name="tacacs-server", rest_name="tacacs-server", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'TACACS+ server configuration', u'cli-incomplete-no': None, u'sort-priority': u'11'}}, namespace='urn:brocade.com:mgmt:brocade-aaa', defining_module='brocade-aaa', yang_type='container', is_config=True)""",
})
self.__tacacs_server = t
if hasattr(self, '_set'):
self._set() | 0.005714 |
def organizations(self):
"""
| Comment: The ids of the organizations that have access
"""
if self.api and self.organization_ids:
return self.api._get_organizations(self.organization_ids) | 0.008658 |
def get_project(self) -> str:
""" Get the ihc project and make sure controller is ready before"""
with IHCController._mutex:
if self._project is None:
if self.client.get_state() != IHCSTATE_READY:
ready = self.client.wait_for_state_change(IHCSTATE_READY,
10)
if ready != IHCSTATE_READY:
return None
self._project = self.client.get_project()
return self._project | 0.003604 |
def combine_psf(kernel_list_new, kernel_old, sigma_bkg, factor=1, stacking_option='median', symmetry=1):
"""
updates psf estimate based on old kernel and several new estimates
:param kernel_list_new: list of new PSF kernels estimated from the point sources in the image
:param kernel_old: old PSF kernel
:param sigma_bkg: estimated background noise in the image
:param factor: weight of updated estimate based on new and old estimate, factor=1 means new estimate,
factor=0 means old estimate
:param stacking_option: option of stacking, mean or median
:param symmetry: imposed symmetry of PSF estimate
:return: updated PSF estimate and error_map associated with it
"""
n = int(len(kernel_list_new) * symmetry)
angle = 360. / symmetry
kernelsize = len(kernel_old)
kernel_list = np.zeros((n, kernelsize, kernelsize))
i = 0
for kernel_new in kernel_list_new:
for k in range(symmetry):
kernel_rotated = image_util.rotateImage(kernel_new, angle * k)
kernel_norm = kernel_util.kernel_norm(kernel_rotated)
kernel_list[i, :, :] = kernel_norm
i += 1
kernel_old_rotated = np.zeros((symmetry, kernelsize, kernelsize))
for i in range(symmetry):
kernel_old_rotated[i, :, :] = kernel_old
kernel_list_new = np.append(kernel_list, kernel_old_rotated, axis=0)
if stacking_option == 'median':
kernel_new = np.median(kernel_list_new, axis=0)
elif stacking_option == 'mean':
kernel_new = np.mean(kernel_list_new, axis=0)
else:
raise ValueError(" stack_option must be 'median' or 'mean', %s is not supported." % stacking_option)
kernel_new[kernel_new < 0] = 0
kernel_new = kernel_util.kernel_norm(kernel_new)
kernel_return = factor * kernel_new + (1.-factor)* kernel_old
kernel_bkg = copy.deepcopy(kernel_return)
kernel_bkg[kernel_bkg < sigma_bkg] = sigma_bkg
error_map = np.var(kernel_list_new, axis=0) / kernel_bkg**2 / 2.
return kernel_return, error_map | 0.003179 |
def colorize(text, ansi=True):
"""
If the client wants ansi, replace the tokens with ansi sequences --
otherwise, simply strip them out.
"""
if ansi:
text = text.replace('^^', '\x00')
for token, code in _ANSI_CODES:
text = text.replace(token, code)
text = text.replace('\x00', '^')
else:
text = strip_caret_codes(text)
return text | 0.002488 |
def rich_item(self, method_name, value):
"""
Convert this value into the rich txkoji objects (if applicable)
"""
if value is None:
return None
if method_name == 'getAverageBuildDuration':
return timedelta(seconds=value)
types = (Build, Channel, Package, Task)
if isinstance(value, Munch):
for type_ in types:
if type_.__name__ in method_name:
item = type_(value)
item.connection = self.connection
return item
if isinstance(value, list):
# Do this same rich item conversion for list of Munch objects
items_list = [self.rich_item(method_name, val) for val in value]
return items_list
return value | 0.002457 |
def get(self):
"""Get any clients ready to be used.
:returns: Iterable of redis clients
"""
now = time.time()
while self._clients and self._clients[0][0] < now:
_, (client, last_wait) = heapq.heappop(self._clients)
connect_start = time.time()
try:
client.echo("test") # reconnected if this succeeds.
self._client_ids.remove(client.pool_id)
yield client
except (ConnectionError, TimeoutError):
timer = time.time() - connect_start
wait = min(int(last_wait * self._multiplier), self._max_wait)
heapq.heappush(self._clients, (time.time() + wait, (client, wait)))
log.info(
"%r is still down after a %s second attempt to connect. Retrying in %ss.",
client,
timer,
wait,
) | 0.004149 |
def GetSecurityToken(self, username, password):
"""
Grabs a security Token to authenticate to Office 365 services
"""
url = 'https://login.microsoftonline.com/extSTS.srf'
body = """
<s:Envelope xmlns:s="http://www.w3.org/2003/05/soap-envelope"
xmlns:a="http://www.w3.org/2005/08/addressing"
xmlns:u="http://docs.oasis-open.org/wss/2004/01/oasis-200401-wss-wssecurity-utility-1.0.xsd">
<s:Header>
<a:Action s:mustUnderstand="1">http://schemas.xmlsoap.org/ws/2005/02/trust/RST/Issue</a:Action>
<a:ReplyTo>
<a:Address>http://www.w3.org/2005/08/addressing/anonymous</a:Address>
</a:ReplyTo>
<a:To s:mustUnderstand="1">https://login.microsoftonline.com/extSTS.srf</a:To>
<o:Security s:mustUnderstand="1"
xmlns:o="http://docs.oasis-open.org/wss/2004/01/oasis-200401-wss-wssecurity-secext-1.0.xsd">
<o:UsernameToken>
<o:Username>%s</o:Username>
<o:Password>%s</o:Password>
</o:UsernameToken>
</o:Security>
</s:Header>
<s:Body>
<t:RequestSecurityToken xmlns:t="http://schemas.xmlsoap.org/ws/2005/02/trust">
<wsp:AppliesTo xmlns:wsp="http://schemas.xmlsoap.org/ws/2004/09/policy">
<a:EndpointReference>
<a:Address>%s</a:Address>
</a:EndpointReference>
</wsp:AppliesTo>
<t:KeyType>http://schemas.xmlsoap.org/ws/2005/05/identity/NoProofKey</t:KeyType>
<t:RequestType>http://schemas.xmlsoap.org/ws/2005/02/trust/Issue</t:RequestType>
<t:TokenType>urn:oasis:names:tc:SAML:1.0:assertion</t:TokenType>
</t:RequestSecurityToken>
</s:Body>
</s:Envelope>""" % (username, password, self.share_point_site)
headers = {'accept': 'application/json;odata=verbose'}
response = requests.post(url, body, headers=headers)
xmldoc = etree.fromstring(response.content)
token = xmldoc.find(
'.//{http://docs.oasis-open.org/wss/2004/01/oasis-200401-wss-wssecurity-secext-1.0.xsd}BinarySecurityToken'
)
if token is not None:
return token.text
else:
raise Exception('Check username/password and rootsite') | 0.00278 |
def validate_context(context):
"""
Set the key for the current context.
Args:
context: a populated EFVersionContext object
"""
# Service must exist in service registry
if not context.service_registry.service_record(context.service_name):
fail("service: {} not found in service registry: {}".format(
context.service_name, context.service_registry.filespec))
service_type = context.service_registry.service_record(context.service_name)["type"]
# Key must be valid
if context.key not in EFConfig.VERSION_KEYS:
fail("invalid key: {}; see VERSION_KEYS in ef_config for supported keys".format(context.key))
# Lookup allowed key for service type
if "allowed_types" in EFConfig.VERSION_KEYS[context.key] and \
service_type not in EFConfig.VERSION_KEYS[context.key]["allowed_types"]:
fail("service_type: {} is not allowed for key {}; see VERSION_KEYS[KEY]['allowed_types']"
"in ef_config and validate service registry entry".format(service_type, context.key))
return True | 0.014437 |
def pack_sidechains(pdb, sequence, path=False):
"""Packs sidechains onto a given PDB file or string.
Parameters
----------
pdb : str
PDB string or a path to a PDB file.
sequence : str
Amino acid sequence for SCWRL to pack in single-letter code.
path : bool, optional
True if pdb is a path.
Returns
-------
scwrl_pdb : str
String of packed SCWRL PDB.
scwrl_score : float
Scwrl packing score.
"""
scwrl_std_out, scwrl_pdb = run_scwrl(pdb, sequence, path=path)
return parse_scwrl_out(scwrl_std_out, scwrl_pdb) | 0.001667 |
def post_data(api_key=None, name='OpsGenie Execution Module', reason=None,
action_type=None):
'''
Post data to OpsGenie. It's designed for Salt's Event Reactor.
After configuring the sls reaction file as shown above, you can trigger the
module with your designated tag (og-tag in this case).
CLI Example:
.. code-block:: bash
salt-call event.send 'og-tag' '{"reason" : "Overheating CPU!"}'
Required parameters:
api_key
It's the API Key you've copied while adding integration in OpsGenie.
reason
It will be used as alert's default message in OpsGenie.
action_type
OpsGenie supports the default values Create/Close for action_type. You
can customize this field with OpsGenie's custom actions for other
purposes like adding notes or acknowledging alerts.
Optional parameters:
name
It will be used as alert's alias. If you want to use the close
functionality you must provide name field for both states like in
this case.
'''
if api_key is None or reason is None:
raise salt.exceptions.SaltInvocationError(
'API Key or Reason cannot be None.')
data = dict()
data['alias'] = name
data['message'] = reason
# data['actions'] = action_type
data['cpuModel'] = __grains__['cpu_model']
data['cpuArch'] = __grains__['cpuarch']
data['fqdn'] = __grains__['fqdn']
data['host'] = __grains__['host']
data['id'] = __grains__['id']
data['kernel'] = __grains__['kernel']
data['kernelRelease'] = __grains__['kernelrelease']
data['master'] = __grains__['master']
data['os'] = __grains__['os']
data['saltPath'] = __grains__['saltpath']
data['saltVersion'] = __grains__['saltversion']
data['username'] = __grains__['username']
data['uuid'] = __grains__['uuid']
log.debug('Below data will be posted:\n%s', data)
log.debug('API Key: %s \t API Endpoint: %s', api_key, API_ENDPOINT)
if action_type == "Create":
response = requests.post(
url=API_ENDPOINT,
data=salt.utils.json.dumps(data),
headers={'Content-Type': 'application/json',
'Authorization': 'GenieKey ' + api_key})
else:
response = requests.post(
url=API_ENDPOINT + "/" + name + "/close?identifierType=alias",
data=salt.utils.json.dumps(data),
headers={'Content-Type': 'application/json',
'Authorization': 'GenieKey ' + api_key})
return response.status_code, response.text | 0.000771 |
def checksec_app(_parser, _, args): # pragma: no cover
"""
Check security features of an ELF file.
"""
import sys
import argparse
import csv
import os.path
def checksec(elf, path, fortifiable_funcs):
relro = 0
nx = False
pie = 0
rpath = False
runpath = False
for header in elf.program_headers:
if header.type == ELF.ProgramHeader.Type.gnu_relro:
relro = 1
elif header.type == ELF.ProgramHeader.Type.gnu_stack:
if not header.flags & ELF.ProgramHeader.Flags.x:
nx = True
if elf.type == ELF.Type.shared:
pie = 1
for entry in elf.dynamic_section_entries:
if entry.type == ELF.DynamicSectionEntry.Type.bind_now and relro == 1:
relro = 2
elif entry.type == ELF.DynamicSectionEntry.Type.flags and \
entry.value & ELF.DynamicSectionEntry.Flags.bind_now:
relro = 2
elif entry.type == ELF.DynamicSectionEntry.Type.flags_1 and \
entry.value & ELF.DynamicSectionEntry.Flags_1.now:
relro = 2
elif entry.type == ELF.DynamicSectionEntry.Type.debug and pie == 1:
pie = 2
elif entry.type == ELF.DynamicSectionEntry.Type.rpath:
rpath = True
elif entry.type == ELF.DynamicSectionEntry.Type.runpath:
runpath = True
rtl_symbol_names = set(
symbol.name
for symbol in elf.symbols
if symbol.name and symbol.shndx == ELF.Symbol.SpecialSection.undef
)
fortified = fortifiable_funcs & rtl_symbol_names
unfortified = fortifiable_funcs & set('__%s_chk' % symbol_name for symbol_name in rtl_symbol_names)
canary = '__stack_chk_fail' in rtl_symbol_names
return {
'path': path,
'relro': relro,
'nx': nx,
'pie': pie,
'rpath': rpath,
'runpath': runpath,
'canary': canary,
'fortified': len(fortified),
'unfortified': len(unfortified),
'fortifiable': len(fortified | unfortified),
}
def check_paths(paths, fortifiable_funcs):
for path in paths:
if os.path.isdir(path):
for data in check_paths(
(os.path.join(path, fn) for fn in os.listdir(path) if fn not in ('.', '..')),
fortifiable_funcs,
):
yield data
else:
try:
elf = ELF(path)
except:
continue
yield checksec(elf, path, fortifiable_funcs)
parser = argparse.ArgumentParser(
prog=_parser.prog,
description=_parser.description,
)
parser.add_argument('path', nargs='+', help='ELF file to check security features of')
parser.add_argument(
'-f', '--format',
dest='format',
choices=['text', 'csv'],
default='text',
help='set output format'
)
parser.add_argument(
'-l', '--libc',
dest='libc',
help='path to the applicable libc.so'
)
args = parser.parse_args(args)
if args.libc:
libc = ELF(args.libc)
fortifiable_funcs = set([
symbol.name
for symbol in libc.symbols
if symbol.name.startswith('__') and symbol.name.endswith('_chk')
])
else:
fortifiable_funcs = set('''__wctomb_chk __wcsncat_chk __mbstowcs_chk __strncpy_chk __syslog_chk __mempcpy_chk
__fprintf_chk __recvfrom_chk __readlinkat_chk __wcsncpy_chk __fread_chk
__getlogin_r_chk __vfwprintf_chk __recv_chk __strncat_chk __printf_chk __confstr_chk
__pread_chk __ppoll_chk __ptsname_r_chk __wcscat_chk __snprintf_chk __vwprintf_chk
__memset_chk __memmove_chk __gets_chk __fgetws_unlocked_chk __asprintf_chk __poll_chk
__fdelt_chk __fgets_unlocked_chk __strcat_chk __vsyslog_chk __stpcpy_chk
__vdprintf_chk __strcpy_chk __obstack_printf_chk __getwd_chk __pread64_chk
__wcpcpy_chk __fread_unlocked_chk __dprintf_chk __fgets_chk __wcpncpy_chk
__obstack_vprintf_chk __wprintf_chk __getgroups_chk __wcscpy_chk __vfprintf_chk
__fgetws_chk __vswprintf_chk __ttyname_r_chk __mbsrtowcs_chk
__wmempcpy_chk __wcsrtombs_chk __fwprintf_chk __read_chk __getcwd_chk __vsnprintf_chk
__memcpy_chk __wmemmove_chk __vasprintf_chk __sprintf_chk __vprintf_chk
__mbsnrtowcs_chk __wcrtomb_chk __realpath_chk __vsprintf_chk __wcsnrtombs_chk
__gethostname_chk __swprintf_chk __readlink_chk __wmemset_chk __getdomainname_chk
__wmemcpy_chk __longjmp_chk __stpncpy_chk __wcstombs_chk'''.split())
if args.format == 'text':
print('RELRO CANARY NX PIE RPATH RUNPATH FORTIFIED PATH')
for data in check_paths(args.path, fortifiable_funcs):
print('{:7} {:6} {:3} {:3} {:5} {:7} {:>9} {}'.format(
('No', 'Partial', 'Full')[data['relro']],
'Yes' if data['canary'] else 'No',
'Yes' if data['nx'] else 'No',
('No', 'DSO', 'Yes')[data['pie']],
'Yes' if data['rpath'] else 'No',
'Yes' if data['runpath'] else 'No',
'{}/{}/{}'.format(data['fortified'], data['unfortified'], data['fortifiable']),
data['path']
))
else:
writer = csv.writer(sys.stdout)
writer.writerow(['path', 'relro', 'canary', 'nx', 'pie', 'rpath', 'runpath', 'fortified', 'unfortified',
'fortifiable'])
for data in check_paths(args.path, fortifiable_funcs):
writer.writerow([
data['path'],
('no', 'partial', 'full')[data['relro']],
'yes' if data['canary'] else 'no',
'yes' if data['nx'] else 'no',
('no', 'dso', 'yes')[data['pie']],
'yes' if data['rpath'] else 'no',
'yes' if data['runpath'] else 'no',
data['fortified'],
data['unfortified'],
data['fortifiable'],
]) | 0.003429 |
def _parse_attr(cls, value, package_dir=None):
"""Represents value as a module attribute.
Examples:
attr: package.attr
attr: package.module.attr
:param str value:
:rtype: str
"""
attr_directive = 'attr:'
if not value.startswith(attr_directive):
return value
attrs_path = value.replace(attr_directive, '').strip().split('.')
attr_name = attrs_path.pop()
module_name = '.'.join(attrs_path)
module_name = module_name or '__init__'
parent_path = os.getcwd()
if package_dir:
if attrs_path[0] in package_dir:
# A custom path was specified for the module we want to import
custom_path = package_dir[attrs_path[0]]
parts = custom_path.rsplit('/', 1)
if len(parts) > 1:
parent_path = os.path.join(os.getcwd(), parts[0])
module_name = parts[1]
else:
module_name = custom_path
elif '' in package_dir:
# A custom parent directory was specified for all root modules
parent_path = os.path.join(os.getcwd(), package_dir[''])
sys.path.insert(0, parent_path)
try:
module = import_module(module_name)
value = getattr(module, attr_name)
finally:
sys.path = sys.path[1:]
return value | 0.001357 |
def adjust_boxes(line_wave, box_widths, left_edge, right_edge,
max_iter=1000, adjust_factor=0.35,
factor_decrement=3.0, fd_p=0.75):
"""Ajdust given boxes so that they don't overlap.
Parameters
----------
line_wave: list or array of floats
Line wave lengths. These are assumed to be the initial y (wave
length) location of the boxes.
box_widths: list or array of floats
Width of box containing labels for each line identification.
left_edge: float
Left edge of valid data i.e., wave length minimum.
right_edge: float
Right edge of valid data i.e., wave lengths maximum.
max_iter: int
Maximum number of iterations to attempt.
adjust_factor: float
Gap between boxes are reduced or increased by this factor after
each iteration.
factor_decrement: float
The `adjust_factor` itself if reduced by this factor, after
certain number of iterations. This is useful for crowded
regions.
fd_p: float
Percentage, given as a fraction between 0 and 1, after which
adjust_factor must be reduced by a factor of
`factor_decrement`. Default is set to 0.75.
Returns
-------
wlp, niter, changed: (float, float, float)
The new y (wave length) location of the text boxes, the number
of iterations used and a flag to indicated whether any changes to
the input locations were made or not.
Notes
-----
This is a direct translation of the code in lineid_plot.pro file in
NASA IDLAstro library.
Positions are returned either when the boxes no longer overlap or
when `max_iter` number of iterations are completed. So if there are
many boxes, there is a possibility that the final box locations
overlap.
References
----------
+ http://idlastro.gsfc.nasa.gov/ftp/pro/plot/lineid_plot.pro
+ http://idlastro.gsfc.nasa.gov/
"""
# Adjust positions.
niter = 0
changed = True
nlines = len(line_wave)
wlp = line_wave[:]
while changed:
changed = False
for i in range(nlines):
if i > 0:
diff1 = wlp[i] - wlp[i - 1]
separation1 = (box_widths[i] + box_widths[i - 1]) / 2.0
else:
diff1 = wlp[i] - left_edge + box_widths[i] * 1.01
separation1 = box_widths[i]
if i < nlines - 2:
diff2 = wlp[i + 1] - wlp[i]
separation2 = (box_widths[i] + box_widths[i + 1]) / 2.0
else:
diff2 = right_edge + box_widths[i] * 1.01 - wlp[i]
separation2 = box_widths[i]
if diff1 < separation1 or diff2 < separation2:
if wlp[i] == left_edge:
diff1 = 0
if wlp[i] == right_edge:
diff2 = 0
if diff2 > diff1:
wlp[i] = wlp[i] + separation2 * adjust_factor
wlp[i] = wlp[i] if wlp[i] < right_edge else \
right_edge
else:
wlp[i] = wlp[i] - separation1 * adjust_factor
wlp[i] = wlp[i] if wlp[i] > left_edge else \
left_edge
changed = True
niter += 1
if niter == max_iter * fd_p:
adjust_factor /= factor_decrement
if niter >= max_iter:
break
return wlp, changed, niter | 0.000285 |
def onset_strength(y=None, sr=22050, S=None, lag=1, max_size=1,
ref=None,
detrend=False, center=True,
feature=None, aggregate=None,
centering=None,
**kwargs):
"""Compute a spectral flux onset strength envelope.
Onset strength at time `t` is determined by:
`mean_f max(0, S[f, t] - ref[f, t - lag])`
where `ref` is `S` after local max filtering along the frequency
axis [1]_.
By default, if a time series `y` is provided, S will be the
log-power Mel spectrogram.
.. [1] Böck, Sebastian, and Gerhard Widmer.
"Maximum filter vibrato suppression for onset detection."
16th International Conference on Digital Audio Effects,
Maynooth, Ireland. 2013.
Parameters
----------
y : np.ndarray [shape=(n,)]
audio time-series
sr : number > 0 [scalar]
sampling rate of `y`
S : np.ndarray [shape=(d, m)]
pre-computed (log-power) spectrogram
lag : int > 0
time lag for computing differences
max_size : int > 0
size (in frequency bins) of the local max filter.
set to `1` to disable filtering.
ref : None or np.ndarray [shape=(d, m)]
An optional pre-computed reference spectrum, of the same shape as `S`.
If not provided, it will be computed from `S`.
If provided, it will override any local max filtering governed by `max_size`.
detrend : bool [scalar]
Filter the onset strength to remove the DC component
center : bool [scalar]
Shift the onset function by `n_fft / (2 * hop_length)` frames
feature : function
Function for computing time-series features, eg, scaled spectrograms.
By default, uses `librosa.feature.melspectrogram` with `fmax=11025.0`
aggregate : function
Aggregation function to use when combining onsets
at different frequency bins.
Default: `np.mean`
kwargs : additional keyword arguments
Additional parameters to `feature()`, if `S` is not provided.
Returns
-------
onset_envelope : np.ndarray [shape=(m,)]
vector containing the onset strength envelope
Raises
------
ParameterError
if neither `(y, sr)` nor `S` are provided
or if `lag` or `max_size` are not positive integers
See Also
--------
onset_detect
onset_strength_multi
Examples
--------
First, load some audio and plot the spectrogram
>>> import matplotlib.pyplot as plt
>>> y, sr = librosa.load(librosa.util.example_audio_file(),
... duration=10.0)
>>> D = np.abs(librosa.stft(y))
>>> times = librosa.frames_to_time(np.arange(D.shape[1]))
>>> plt.figure()
>>> ax1 = plt.subplot(2, 1, 1)
>>> librosa.display.specshow(librosa.amplitude_to_db(D, ref=np.max),
... y_axis='log', x_axis='time')
>>> plt.title('Power spectrogram')
Construct a standard onset function
>>> onset_env = librosa.onset.onset_strength(y=y, sr=sr)
>>> plt.subplot(2, 1, 2, sharex=ax1)
>>> plt.plot(times, 2 + onset_env / onset_env.max(), alpha=0.8,
... label='Mean (mel)')
Median aggregation, and custom mel options
>>> onset_env = librosa.onset.onset_strength(y=y, sr=sr,
... aggregate=np.median,
... fmax=8000, n_mels=256)
>>> plt.plot(times, 1 + onset_env / onset_env.max(), alpha=0.8,
... label='Median (custom mel)')
Constant-Q spectrogram instead of Mel
>>> onset_env = librosa.onset.onset_strength(y=y, sr=sr,
... feature=librosa.cqt)
>>> plt.plot(times, onset_env / onset_env.max(), alpha=0.8,
... label='Mean (CQT)')
>>> plt.legend(frameon=True, framealpha=0.75)
>>> plt.ylabel('Normalized strength')
>>> plt.yticks([])
>>> plt.axis('tight')
>>> plt.tight_layout()
"""
if aggregate is False:
raise ParameterError('aggregate={} cannot be False when computing full-spectrum onset strength.')
odf_all = onset_strength_multi(y=y,
sr=sr,
S=S,
lag=lag,
max_size=max_size,
ref=ref,
detrend=detrend,
center=center,
feature=feature,
aggregate=aggregate,
channels=None,
**kwargs)
return odf_all[0] | 0.000619 |
def post_upgrade_checks(self, upgrades):
"""Run post-upgrade checks after applying all pending upgrades.
Post checks may be used to emit warnings encountered when applying an
upgrade, but post-checks can also be used to advice the user to run
re-indexing or similar long running processes.
Post-checks may query for user-input, but should respect the
--yes-i-know option to run in an unattended mode.
All applied upgrades post-checks are executed.
:param upgrades: List of upgrades sorted in topological order.
"""
errors = []
for u in upgrades:
self._setup_log_prefix(plugin_id=u.name)
try:
u.post_upgrade()
except RuntimeError as e:
errors.append((u.name, e.args))
for check in self.global_post_upgrade:
self._setup_log_prefix(plugin_id=check.__name__)
try:
check()
except RuntimeError as e:
errors.append((check.__name__, e.args))
self._teardown_log_prefix()
self._check_errors(errors, "Post-upgrade check for %s failed with the "
"following errors:") | 0.001617 |
def mint_token_if_balance_low(
token_contract: ContractProxy,
target_address: str,
min_balance: int,
fund_amount: int,
gas_limit: int,
mint_msg: str,
no_action_msg: str = None,
) -> Optional[TransactionHash]:
""" Check token balance and mint if below minimum """
balance = token_contract.contract.functions.balanceOf(target_address).call()
if balance < min_balance:
mint_amount = fund_amount - balance
log.debug(mint_msg, address=target_address, amount=mint_amount)
return token_contract.transact('mintFor', gas_limit, mint_amount, target_address)
else:
if no_action_msg:
log.debug(no_action_msg, balance=balance)
return None | 0.004021 |
def remove_xml_element(name, tree):
""" Removes XML elements from an ElementTree content tree """
# root = tree.getroot()
remove = tree.findall(
".//{{http://soap.sforce.com/2006/04/metadata}}{}".format(name)
)
if not remove:
return tree
parent_map = {c: p for p in tree.iter() for c in p}
for elem in remove:
parent = parent_map[elem]
parent.remove(elem)
return tree | 0.002304 |
def where_unique(cls, ip, object_id, location):
""" Get db model by username """
return cls.query.filter_by(
ip=ip,
object_id=object_id,
location=location).first() | 0.009091 |
def data_log_likelihood(self, successes, trials, beta):
'''Calculates the log-likelihood of a Polya tree bin given the beta values.'''
return binom.logpmf(successes, trials, 1.0 / (1 + np.exp(-beta))).sum() | 0.013514 |
def get_currentDim(self):
'''
returns the current dimensions of the object
'''
selfDim = self._dimensions.copy()
if not isinstance(selfDim,dimStr):
if selfDim.has_key('_ndims') : nself = selfDim.pop('_ndims')
else :
self.warning(1, 'self._dimensions does not have the _ndims key')
nself = len(selfDim)
else : nself = selfDim['_ndims']
curDim = [[key for key in selfDim.keys()],[selfDim[key] for key in selfDim.keys()]]
return curDim, nself | 0.022727 |
def load_params(fname: str) -> Tuple[Dict[str, mx.nd.NDArray], Dict[str, mx.nd.NDArray]]:
"""
Loads parameters from a file.
:param fname: The file containing the parameters.
:return: Mapping from parameter names to the actual parameters for both the arg parameters and the aux parameters.
"""
save_dict = mx.nd.load(fname)
arg_params = {}
aux_params = {}
for k, v in save_dict.items():
tp, name = k.split(':', 1)
if tp == 'arg':
"""TODO(fhieber):
temporary weight split for models with combined weight for keys & values
in transformer source attention layers. This can be removed once with the next major version change."""
if "att_enc_kv2h_weight" in name:
logger.info("Splitting '%s' parameters into separate k & v matrices.", name)
v_split = mx.nd.split(v, axis=0, num_outputs=2)
arg_params[name.replace('kv2h', "k2h")] = v_split[0]
arg_params[name.replace('kv2h', "v2h")] = v_split[1]
else:
arg_params[name] = v
if tp == 'aux':
aux_params[name] = v
return arg_params, aux_params | 0.005004 |
def pubrec(self, mid):
"""Send PUBREC response to server."""
if self.sock == NC.INVALID_SOCKET:
return NC.ERR_NO_CONN
self.logger.info("Send PUBREC (msgid=%s)", mid)
pkt = MqttPkt()
pkt.command = NC.CMD_PUBREC
pkt.remaining_length = 2
ret = pkt.alloc()
if ret != NC.ERR_SUCCESS:
return ret
#variable header: acknowledged message id
pkt.write_uint16(mid)
return self.packet_queue(pkt) | 0.006 |
def row(self, data):
"""Return a formatted row for the given data."""
for column in self.column_funcs:
if callable(column):
yield column(data)
else:
yield utils.lookup(data, *column) | 0.007874 |
def all(guideids=None, filter=None, order=None):
'''
Fetch all guides.
:param iterable guideids: Only return Guides corresponding to these ids.
:param string filter: Only return guides of this type. Choices:
installation, repair, disassembly, teardown,
technique, maintenance.
:param string order: Instead of ordering by guideid, order alphabetically.
Choices: ASC, DESC.
:rtype: generator of :class:`pyfixit.guide.Guide` objects.
'''
parameters = []
if guideids:
parameters.append('guideids=%s' % ','.join(map(str, guideids)))
if filter:
parameters.append('filter=%s' % filter)
if order:
parameters.append('order=%s' % order)
parameters = '&'.join(parameters)
offset = 0
limit = 5 # Tune this to balance memory vs. frequent network trips.
guideJSONs = []
while True:
if not guideJSONs:
url = '%s/guides?offset=%s&limit=%s&%s' \
% (API_BASE_URL, offset, limit, parameters)
response = requests.get(url)
guideJSONs = response.json()
# Are we at the end of pagination?
if not guideJSONs:
return
offset += limit
yield Guide(guideJSONs.pop(0)['guideid']) | 0.015793 |
def iflat_tasks_wti(self, status=None, op="==", nids=None):
"""
Generator to iterate over all the tasks of the `Flow`.
Yields:
(task, work_index, task_index)
If status is not None, only the tasks whose status satisfies
the condition (task.status op status) are selected
status can be either one of the flags defined in the :class:`Task` class
(e.g Task.S_OK) or a string e.g "S_OK"
nids is an optional list of node identifiers used to filter the tasks.
"""
return self._iflat_tasks_wti(status=status, op=op, nids=nids, with_wti=True) | 0.00638 |
def build_model_classes(metadata):
"""Generate a model class for any models contained in the specified spec file."""
i = importlib.import_module(metadata)
env = get_jinja_env()
model_template = env.get_template('model.py.jinja2')
for model in i.models:
with open(model_path(model.name.lower()), 'w') as t:
t.write(model_template.render(model_md=model)) | 0.005013 |
def _cdf(self, xloc, left, right, cache):
"""
Cumulative distribution function.
Example:
>>> print(chaospy.Uniform().fwd([-0.5, 0.5, 1.5, 2.5]))
[0. 0.5 1. 1. ]
>>> print(chaospy.Pow(chaospy.Uniform(), 2).fwd([-0.5, 0.5, 1.5, 2.5]))
[0. 0.70710678 1. 1. ]
>>> print(chaospy.Pow(chaospy.Uniform(1, 2), -1).fwd([0.4, 0.6, 0.8, 1.2]))
[0. 0.33333333 0.75 1. ]
>>> print(chaospy.Pow(2, chaospy.Uniform()).fwd([-0.5, 0.5, 1.5, 2.5]))
[0. 0. 0.5849625 1. ]
>>> print(chaospy.Pow(2, chaospy.Uniform(-1, 0)).fwd([0.4, 0.6, 0.8, 1.2]))
[0. 0.26303441 0.67807191 1. ]
>>> print(chaospy.Pow(2, 3).fwd([7, 8, 9]))
[0. 1. 1.]
"""
left = evaluation.get_forward_cache(left, cache)
right = evaluation.get_forward_cache(right, cache)
if isinstance(left, Dist):
if isinstance(right, Dist):
raise StochasticallyDependentError(
"under-defined distribution {} or {}".format(left, right))
elif not isinstance(right, Dist):
return numpy.inf
else:
assert numpy.all(left > 0), "imaginary result"
y = (numpy.log(numpy.abs(xloc) + 1.*(xloc <= 0)) /
numpy.log(numpy.abs(left)+1.*(left == 1)))
out = evaluation.evaluate_forward(right, y)
out = numpy.where(xloc <= 0, 0., out)
return out
y = numpy.sign(xloc)*numpy.abs(xloc)**(1./right)
pairs = numpy.sign(xloc**right) != -1
out1, out2 = (
evaluation.evaluate_forward(left, y, cache=cache),
evaluation.evaluate_forward(left, -y, cache=cache),
)
out = numpy.where(right < 0, 1-out1, out1-pairs*out2)
return out | 0.003088 |
def _get_goslimids_norel(self, dagslim):
"""Get all GO slim GO IDs that do not have a relationship."""
go_slims = set()
go2obj = self.gosubdag.go2obj
for goid in dagslim:
goobj = go2obj[goid]
if not goobj.relationship:
go_slims.add(goobj.id)
return go_slims | 0.005935 |
def split_len(s, length):
"""split string *s* into list of strings no longer than *length*"""
return [s[i:i+length] for i in range(0, len(s), length)] | 0.006329 |
def qtePrepareToRun(self):
"""
This method is called by Qtmacs to prepare the macro for
execution.
It is probably a bad idea to overload this method as it only
administrates the macro execution and calls the ``qteRun``
method (which *should* be overloaded by the macro programmer
in order for the macro to do something).
|Args|
* **None**
|Returns|
* **None**
|Raises|
* **None**
"""
# Report the execution attempt.
msgObj = QtmacsMessage((self.qteMacroName(), self.qteWidget), None)
msgObj.setSignalName('qtesigMacroStart')
self.qteMain.qtesigMacroStart.emit(msgObj)
# Try to run the macro and radio the success via the
# ``qtesigMacroFinished`` signal.
try:
self.qteRun()
self.qteMain.qtesigMacroFinished.emit(msgObj)
except Exception as err:
if self.qteApplet is None:
appID = appSig = None
else:
appID = self.qteApplet.qteAppletID()
appSig = self.qteApplet.qteAppletSignature()
msg = ('Macro <b>{}</b> (called from the <b>{}</b> applet'
' with ID <b>{}</b>) did not execute properly.')
msg = msg.format(self.qteMacroName(), appSig, appID)
if isinstance(err, QtmacsArgumentError):
msg += '<br/>' + str(err)
# Irrespective of the error, log it, enable macro
# processing (in case it got disabled), and trigger the
# error signal.
self.qteMain.qteEnableMacroProcessing()
self.qteMain.qtesigMacroError.emit(msgObj)
self.qteLogger.exception(msg, exc_info=True, stack_info=True) | 0.00111 |
def has_a_matching_perm(self, perm_list, obj=None):
"""Returns True if the user has one of the specified permissions.
If object is passed, it checks if the user has any of the required
perms for this object.
"""
# If there are no permissions to check, just return true
if not perm_list:
return True
# Check that user has at least one of the required permissions.
for perm in perm_list:
if self.has_perm(perm, obj):
return True
return False | 0.003623 |
def login_service_description(self):
"""Login service description.
The login service description _MUST_ include the token service
description. The authentication pattern is indicated via the
profile URI which is built using self.auth_pattern.
"""
label = 'Login to ' + self.name
if (self.auth_type):
label = label + ' (' + self.auth_type + ')'
desc = {"@id": self.login_uri,
"profile": self.profile_base + self.auth_pattern,
"label": label}
if (self.header):
desc['header'] = self.header
if (self.description):
desc['description'] = self.description
return desc | 0.002786 |
def slice_hidden(self, x):
"""Slice encoder hidden state into block_dim.
Args:
x: Encoder hidden state of shape [-1, hidden_size].
Returns:
Sliced states of shape [-1, num_blocks, block_dim].
"""
x_sliced = tf.reshape(
x, shape=[-1, self.hparams.num_blocks, self.hparams.block_dim])
return x_sliced | 0.002874 |
def deactivate_mfa_device(self, user_name, serial_number):
"""
Deactivates the specified MFA device and removes it from
association with the user.
:type user_name: string
:param user_name: The username of the user
:type serial_number: string
:param seriasl_number: The serial number which uniquely identifies
the MFA device.
"""
params = {'UserName' : user_name,
'SerialNumber' : serial_number}
return self.get_response('DeactivateMFADevice', params) | 0.008475 |
def _create_cifti_image(bold_file, label_file, annotation_files, gii_files,
volume_target, surface_target, tr):
"""
Generate CIFTI image in target space
Parameters
bold_file : 4D BOLD timeseries
label_file : label atlas
annotation_files : FreeSurfer annotations
gii_files : 4D BOLD surface timeseries in GIFTI format
volume_target : label atlas space
surface_target : gii_files space
tr : repetition timeseries
Returns
out_file : BOLD data as CIFTI dtseries
"""
label_img = nb.load(label_file)
bold_img = resample_to_img(bold_file, label_img)
bold_data = bold_img.get_data()
timepoints = bold_img.shape[3]
label_data = label_img.get_data()
# set up CIFTI information
series_map = ci.Cifti2MatrixIndicesMap((0, ),
'CIFTI_INDEX_TYPE_SERIES',
number_of_series_points=timepoints,
series_exponent=0,
series_start=0.0,
series_step=tr,
series_unit='SECOND')
# Create CIFTI brain models
idx_offset = 0
brainmodels = []
bm_ts = np.empty((timepoints, 0))
for structure, labels in CIFTI_STRUCT_WITH_LABELS.items():
if labels is None: # surface model
model_type = "CIFTI_MODEL_TYPE_SURFACE"
# use the corresponding annotation
hemi = structure.split('_')[-1]
annot = nb.freesurfer.read_annot(annotation_files[hemi == "RIGHT"])
# currently only supports L/R cortex
gii = nb.load(gii_files[hemi == "RIGHT"])
# calculate total number of vertices
surf_verts = len(annot[0])
# remove medial wall for CIFTI format
vert_idx = np.nonzero(annot[0] != annot[2].index(b'unknown'))[0]
# extract values across volumes
ts = np.array([tsarr.data[vert_idx] for tsarr in gii.darrays])
vert_idx = ci.Cifti2VertexIndices(vert_idx)
bm = ci.Cifti2BrainModel(index_offset=idx_offset,
index_count=len(vert_idx),
model_type=model_type,
brain_structure=structure,
vertex_indices=vert_idx,
n_surface_vertices=surf_verts)
bm_ts = np.column_stack((bm_ts, ts))
idx_offset += len(vert_idx)
brainmodels.append(bm)
else:
model_type = "CIFTI_MODEL_TYPE_VOXELS"
vox = []
ts = None
for label in labels:
ijk = np.nonzero(label_data == label)
ts = (bold_data[ijk] if ts is None
else np.concatenate((ts, bold_data[ijk])))
vox += [[ijk[0][ix], ijk[1][ix], ijk[2][ix]]
for ix, row in enumerate(ts)]
bm_ts = np.column_stack((bm_ts, ts.T))
vox = ci.Cifti2VoxelIndicesIJK(vox)
bm = ci.Cifti2BrainModel(index_offset=idx_offset,
index_count=len(vox),
model_type=model_type,
brain_structure=structure,
voxel_indices_ijk=vox)
idx_offset += len(vox)
brainmodels.append(bm)
volume = ci.Cifti2Volume(
bold_img.shape[:3],
ci.Cifti2TransformationMatrixVoxelIndicesIJKtoXYZ(-3, bold_img.affine))
brainmodels.append(volume)
# create CIFTI geometry based on brainmodels
geometry_map = ci.Cifti2MatrixIndicesMap((1, ),
'CIFTI_INDEX_TYPE_BRAIN_MODELS',
maps=brainmodels)
# provide some metadata to CIFTI matrix
meta = {
"target_surface": surface_target,
"target_volume": volume_target,
}
# generate and save CIFTI image
matrix = ci.Cifti2Matrix()
matrix.append(series_map)
matrix.append(geometry_map)
matrix.metadata = ci.Cifti2MetaData(meta)
hdr = ci.Cifti2Header(matrix)
img = ci.Cifti2Image(bm_ts, hdr)
img.nifti_header.set_intent('NIFTI_INTENT_CONNECTIVITY_DENSE_SERIES')
_, out_base, _ = split_filename(bold_file)
out_file = "{}.dtseries.nii".format(out_base)
ci.save(img, out_file)
return os.path.join(os.getcwd(), out_file) | 0.001589 |
def _sort_by_indep(self, func='get_value', i=None, iunit=None, unit=None,
uncover=None, trail=None, linebreak=None,
sort_by_indep=None):
"""
must be called before (or within) _do_linebreak
"""
if sort_by_indep is None:
# TODO: add property of the call?
sort_by_indep = True
indep_array = self.call.i.get_value(i=i,
unit=iunit,
uncover=uncover,
trail=trail,
linebreak=False,
sort_by_indep=False)
this_array = getattr(self, func)(i=i,
unit=unit,
uncover=uncover,
trail=trail,
linebreak=False,
sort_by_indep=False)
if not (isinstance(indep_array, np.ndarray) and len(indep_array)==len(this_array)):
sort_by_indep = False
if sort_by_indep:
# TODO: it might be nice to buffer this at the call level, so making
# multiple get_value calls doesn't have to recompute the sort-order
sort_inds = indep_array.argsort()
return this_array[sort_inds]
else:
return this_array | 0.004654 |
def ranking_metric(df, method, pos, neg, classes, ascending):
"""The main function to rank an expression table.
:param df: gene_expression DataFrame.
:param method: The method used to calculate a correlation or ranking. Default: 'log2_ratio_of_classes'.
Others methods are:
1. 'signal_to_noise'
You must have at least three samples for each phenotype to use this metric.
The larger the signal-to-noise ratio, the larger the differences of the means (scaled by the standard deviations);
that is, the more distinct the gene expression is in each phenotype and the more the gene acts as a “class marker.”
2. 't_test'
Uses the difference of means scaled by the standard deviation and number of samples.
Note: You must have at least three samples for each phenotype to use this metric.
The larger the tTest ratio, the more distinct the gene expression is in each phenotype
and the more the gene acts as a “class marker.”
3. 'ratio_of_classes' (also referred to as fold change).
Uses the ratio of class means to calculate fold change for natural scale data.
4. 'diff_of_classes'
Uses the difference of class means to calculate fold change for natural scale data
5. 'log2_ratio_of_classes'
Uses the log2 ratio of class means to calculate fold change for natural scale data.
This is the recommended statistic for calculating fold change for log scale data.
:param str pos: one of labels of phenotype's names.
:param str neg: one of labels of phenotype's names.
:param list classes: a list of phenotype labels, to specify which column of dataframe belongs to what category of phenotype.
:param bool ascending: bool or list of bool. Sort ascending vs. descending.
:return:
returns a pd.Series of correlation to class of each variable. Gene_name is index, and value is rankings.
visit here for more docs: http://software.broadinstitute.org/gsea/doc/GSEAUserGuideFrame.html
"""
# exclude any zero stds.
df_mean = df.groupby(by=classes, axis=1).mean()
df_std = df.groupby(by=classes, axis=1).std()
if method == 'signal_to_noise':
ser = (df_mean[pos] - df_mean[neg])/(df_std[pos] + df_std[neg])
elif method == 't_test':
ser = (df_mean[pos] - df_mean[neg])/ np.sqrt(df_std[pos]**2/len(df_std)+df_std[neg]**2/len(df_std) )
elif method == 'ratio_of_classes':
ser = df_mean[pos] / df_mean[neg]
elif method == 'diff_of_classes':
ser = df_mean[pos] - df_mean[neg]
elif method == 'log2_ratio_of_classes':
ser = np.log2(df_mean[pos] / df_mean[neg])
else:
logging.error("Please provide correct method name!!!")
sys.exit(0)
ser = ser.sort_values(ascending=ascending)
return ser | 0.007526 |
def required_types(self):
"""Set of names of types which the Command depends on.
"""
required_types = set(x.type for x in self.params)
required_types.add(self.type)
required_types.discard(None)
return required_types | 0.007605 |
def real(self):
"""Real part of the element.
The real part can also be set using ``x.real = other``, where ``other``
is array-like or scalar.
Examples
--------
>>> space = odl.ProductSpace(odl.cn(3), odl.cn(2))
>>> x = space.element([[1 + 1j, 2, 3 - 3j],
... [-1 + 2j, -2 - 3j]])
>>> x.real
ProductSpace(rn(3), rn(2)).element([
[ 1., 2., 3.],
[-1., -2.]
])
The real part can also be set using different array-like types:
>>> x.real = space.real_space.zero()
>>> x
ProductSpace(cn(3), cn(2)).element([
[ 0.+1.j, 0.+0.j, 0.-3.j],
[ 0.+2.j, 0.-3.j]
])
>>> x.real = 1.0
>>> x
ProductSpace(cn(3), cn(2)).element([
[ 1.+1.j, 1.+0.j, 1.-3.j],
[ 1.+2.j, 1.-3.j]
])
>>> x.real = [[2, 3, 4], [5, 6]]
>>> x
ProductSpace(cn(3), cn(2)).element([
[ 2.+1.j, 3.+0.j, 4.-3.j],
[ 5.+2.j, 6.-3.j]
])
"""
real_part = [part.real for part in self.parts]
return self.space.real_space.element(real_part) | 0.00163 |
def _has_flaky_attributes(cls, test):
"""
Returns True if the test callable in question is marked as flaky.
:param test:
The test that is being prepared to run
:type test:
:class:`nose.case.Test` or :class:`Function`
:return:
:rtype:
`bool`
"""
current_runs = cls._get_flaky_attribute(test, FlakyNames.CURRENT_RUNS)
return current_runs is not None | 0.004386 |
def _get_header(self):
"""Parse the SWF header."""
fh = self._src
obj = _make_object("Header")
# first part of the header
obj.Signature = sign = "".join(chr(unpack_ui8(fh)) for _ in range(3))
obj.Version = self._version = unpack_ui8(fh)
obj.FileLength = file_length = unpack_ui32(fh)
# deal with compressed content
if sign[0] == 'C':
uncompressed = zlib.decompress(fh.read())
if len(uncompressed) + 8 != file_length:
raise ValueError("Problems dealing with compressed content")
fh = self._src = io.BytesIO(uncompressed)
# second part of the header
obj.FrameSize = self._get_struct_rect()
obj.FrameRate = unpack_ui16(fh)
obj.FrameCount = unpack_ui16(fh)
return obj | 0.00241 |
def change_password(self, user, password, send_email=None):
"""
Service method to change a user's password.
Sends signal `password_changed`.
:param user: The :class:`User`'s password to change.
:param password: The new password.
:param send_email: Whether or not to override the config option
``SECURITY_SEND_PASSWORD_CHANGED_EMAIL`` and force
either sending or not sending an email.
"""
user.password = password
self.user_manager.save(user)
if send_email or (app.config.SECURITY_SEND_PASSWORD_CHANGED_EMAIL
and send_email is None):
self.send_mail(
_('flask_unchained.bundles.security:email_subject.password_changed_notice'),
to=user.email,
template='security/email/password_changed_notice.html',
user=user)
password_changed.send(app._get_current_object(), user=user) | 0.002962 |
def remove_file(filename, path=None):
"""
Remove file filename from path.
:param filename: Name of file to remove
:param path: Path where file is located
:return: True if successfull
:raises OSError if chdir or remove fails.
"""
cwd = os.getcwd()
try:
if path:
os.chdir(path)
except OSError:
raise
try:
os.remove(filename)
os.chdir(cwd)
return True
except OSError:
os.chdir(cwd)
raise | 0.001996 |
def allocate(self):
"""
Arrange for a unique context ID to be allocated and associated with a
route leading to the active context. In masters, the ID is generated
directly, in children it is forwarded to the master via a
:data:`mitogen.core.ALLOCATE_ID` message.
"""
self.lock.acquire()
try:
id_ = self.next_id
self.next_id += 1
return id_
finally:
self.lock.release() | 0.004098 |
def show(self, display=None):
"""Removes the display style attribute.
If a display type is provided """
self._stable = False
if not display:
self.attrs["style"].pop("display")
else:
self.attrs["style"]["display"] = display
return self | 0.006536 |
def delete(self, client=None):
"""Deletes a blob from Cloud Storage.
If :attr:`user_project` is set on the bucket, bills the API request
to that project.
:type client: :class:`~google.cloud.storage.client.Client` or
``NoneType``
:param client: Optional. The client to use. If not passed, falls back
to the ``client`` stored on the blob's bucket.
:rtype: :class:`Blob`
:returns: The blob that was just deleted.
:raises: :class:`google.cloud.exceptions.NotFound`
(propagated from
:meth:`google.cloud.storage.bucket.Bucket.delete_blob`).
"""
return self.bucket.delete_blob(
self.name, client=client, generation=self.generation
) | 0.002475 |
def create_function_f_i(self):
"""state reinitialization (reset) function"""
return ca.Function(
'f_i',
[self.t, self.x, self.y, self.m, self.p, self.c, self.pre_c, self.ng, self.nu],
[self.f_i],
['t', 'x', 'y', 'm', 'p', 'c', 'pre_c', 'ng', 'nu'], ['x_n'], self.func_opt) | 0.011905 |
def optics(cls, data, eps, minpts, ccore=False):
"""
Constructor of OPTICS clustering.rst algorithm
:param data: Input data that is presented as a list of points (objects), where each point is represented by list or tuple
:param eps: Connectivity radius between points, points may be connected if distance between them less than the radius
:param minpts: Minimum number of shared neighbors that is required for establishing links between points
:param amount_clusters: Optional parameter where amount of clusters that should be allocated is specified.
In case of usage 'amount_clusters' connectivity radius can be greater than real, in other words, there is place for mistake
in connectivity radius usage.
:param ccore: if True than DLL CCORE (C++ solution) will be used for solving the problem
:return: the resulting clustering.rst object
"""
data = cls.input_preprocess(data)
model = optics(data, eps, minpts)
return cls(model) | 0.007505 |
def parse(cls, version_string, partial=False, coerce=False):
"""Parse a version string into a Version() object.
Args:
version_string (str), the version string to parse
partial (bool), whether to accept incomplete input
coerce (bool), whether to try to map the passed in string into a
valid Version.
"""
if not version_string:
raise ValueError('Invalid empty version string: %r' % version_string)
if partial:
version_re = cls.partial_version_re
else:
version_re = cls.version_re
match = version_re.match(version_string)
if not match:
raise ValueError('Invalid version string: %r' % version_string)
major, minor, patch, prerelease, build = match.groups()
if _has_leading_zero(major):
raise ValueError("Invalid leading zero in major: %r" % version_string)
if _has_leading_zero(minor):
raise ValueError("Invalid leading zero in minor: %r" % version_string)
if _has_leading_zero(patch):
raise ValueError("Invalid leading zero in patch: %r" % version_string)
major = int(major)
minor = cls._coerce(minor, partial)
patch = cls._coerce(patch, partial)
if prerelease is None:
if partial and (build is None):
# No build info, strip here
return (major, minor, patch, None, None)
else:
prerelease = ()
elif prerelease == '':
prerelease = ()
else:
prerelease = tuple(prerelease.split('.'))
cls._validate_identifiers(prerelease, allow_leading_zeroes=False)
if build is None:
if partial:
build = None
else:
build = ()
elif build == '':
build = ()
else:
build = tuple(build.split('.'))
cls._validate_identifiers(build, allow_leading_zeroes=True)
return (major, minor, patch, prerelease, build) | 0.00286 |
def run_rollouts(
env, agent, initial_observations, step_limit=None, discount_factor=1.0,
log_every_steps=None, video_writers=(), color_bar=False,
many_rollouts_from_each_env=False
):
"""Runs a batch of rollouts from given initial observations."""
assert step_limit is not None or not many_rollouts_from_each_env, (
"When collecting many rollouts from each environment, time limit must "
"be set."
)
num_dones = 0
first_dones = np.array([False] * env.batch_size)
observations = initial_observations
step_index = 0
cum_rewards = np.zeros(env.batch_size)
for (video_writer, obs_stack) in zip(video_writers, initial_observations):
for (i, ob) in enumerate(obs_stack):
debug_frame = augment_observation(
ob, reward=0, cum_reward=0, frame_index=(-len(obs_stack) + i + 1),
bar_color=((0, 255, 0) if color_bar else None)
)
video_writer.write(debug_frame)
def proceed():
if step_index < step_limit:
return num_dones < env.batch_size or many_rollouts_from_each_env
else:
return False
while proceed():
act_kwargs = {}
if agent.needs_env_state:
act_kwargs["env_state"] = env.state
actions = agent.act(observations, **act_kwargs)
(observations, rewards, dones) = env.step(actions)
observations = list(observations)
now_done_indices = []
for (i, done) in enumerate(dones):
if done and (not first_dones[i] or many_rollouts_from_each_env):
now_done_indices.append(i)
first_dones[i] = True
num_dones += 1
if now_done_indices:
# Unless many_rollouts_from_each_env, reset only envs done the first time
# in this timestep to ensure that we collect exactly 1 rollout from each
# env.
reset_observations = env.reset(now_done_indices)
for (i, observation) in zip(now_done_indices, reset_observations):
observations[i] = observation
observations = np.array(observations)
cum_rewards[~first_dones] = (
cum_rewards[~first_dones] * discount_factor + rewards[~first_dones]
)
step_index += 1
for (video_writer, obs_stack, reward, cum_reward, done) in zip(
video_writers, observations, rewards, cum_rewards, first_dones
):
if done:
continue
ob = obs_stack[-1]
debug_frame = augment_observation(
ob, reward=reward, cum_reward=cum_reward,
frame_index=step_index, bar_color=((255, 0, 0) if color_bar else None)
)
video_writer.write(debug_frame)
# TODO(afrozm): Clean this up with tf.logging.log_every_n
if log_every_steps is not None and step_index % log_every_steps == 0:
tf.logging.info("Step %d, mean_score: %f", step_index, cum_rewards.mean())
return (observations, cum_rewards) | 0.010772 |
def _get_candidate_swap(resources, location,
l2v, vertices_resources, fixed_vertices, machine):
"""Given a chip location, select a set of vertices which would have to be
moved elsewhere to accommodate the arrival of the specified set of
resources.
Parameters
----------
resources : {resource: value, ...}
The amount of resources which are required at the specified location.
location : (x, y)
The coordinates of the chip where the resources are sought.
l2v : {(x, y): [vertex, ...], ...}
vertices_resources : {vertex: {resource: value, ...}, ...}
fixed_vertices : {vertex, ...}
machine : :py:class:`rig.place_and_route.Machine`
Returns
-------
[Vertex, ...] or None
If a (possibly empty) list, gives the set of vertices which should be
removed from the specified location to make room.
If None, the situation is impossible.
"""
# The resources already available at the given location
chip_resources = machine[location]
# The set of vertices at that location
vertices = l2v[location]
# The set of vertices to be moved from the location to free up the
# specified amount of resources
to_move = []
# While there's not enough free resource, remove an arbitrary (movable)
# vertex from the chip.
i = 0
while overallocated(subtract_resources(chip_resources, resources)):
if i >= len(vertices):
# Run out of vertices to remove from this chip, thus the situation
# must be impossible.
return None
elif vertices[i] in fixed_vertices:
# Can't move fixed vertices, just skip them.
i += 1
continue
else:
# Work out the cost change when we remove the specified vertex
vertex = vertices[i]
chip_resources = add_resources(chip_resources,
vertices_resources[vertex])
to_move.append(vertex)
i += 1
return to_move | 0.000482 |
def plotfft(s, fmax, doplot=False):
""" This functions computes the fft of a signal, returning the frequency
and their magnitude values.
Parameters
----------
s: array-like
the input signal.
fmax: int
the sampling frequency.
doplot: boolean
a variable to indicate whether the plot is done or not.
Returns
-------
f: array-like
the frequency values (xx axis)
fs: array-like
the amplitude of the frequency values (yy axis)
"""
fs = abs(np.fft.fft(s))
f = linspace(0, fmax / 2, len(s) / 2)
if doplot:
#pl.plot(f[1:int(len(s) / 2)], fs[1:int(len(s) / 2)])
pass
return (f[1:int(len(s) / 2)].copy(), fs[1:int(len(s) / 2)].copy()) | 0.002717 |
def fftconv(a, b, axes=(0, 1)):
"""
Compute a multi-dimensional convolution via the Discrete Fourier
Transform. Note that the output has a phase shift relative to the
output of :func:`scipy.ndimage.convolve` with the default ``origin``
parameter.
Parameters
----------
a : array_like
Input array
b : array_like
Input array
axes : sequence of ints, optional (default (0, 1))
Axes on which to perform convolution
Returns
-------
ab : ndarray
Convolution of input arrays, a and b, along specified axes
"""
if np.isrealobj(a) and np.isrealobj(b):
fft = rfftn
ifft = irfftn
else:
fft = fftn
ifft = ifftn
dims = np.maximum([a.shape[i] for i in axes], [b.shape[i] for i in axes])
af = fft(a, dims, axes)
bf = fft(b, dims, axes)
return ifft(af * bf, dims, axes) | 0.001121 |
def compile_template(instance, template, additionnal_context=None):
"""
Fill the given template with the instance's datas and return the odt file
For every instance class, common values are also inserted in the context
dict (and so can be used) :
* config values
:param obj instance: the instance of a model (like Userdatas, Company)
:param template: the template object to use
:param dict additionnal_context: A dict containing datas we'd like to add to
the py3o compilation template
:return: a stringIO object filled with the resulting odt's informations
"""
py3o_context = get_compilation_context(instance)
if additionnal_context is not None:
py3o_context.update(additionnal_context)
output_doc = StringIO()
odt_builder = Template(template, output_doc)
odt_builder.render(py3o_context)
return output_doc | 0.00224 |
def execute_command(self, command):
"""
This method will execute the commands on the device without as if you were just connected to it (it will not
enter into any vdom). This method is not recommended unless you are 100% sure of what you are doing.
Args:
* **command** (str) -- Command to execute.
Returns:
A list of strings containing the output.
Raises:
exceptions.CommandExecutionException -- If it detects any problem with the command.
"""
logger.debug('Executing commands:\n %s' % command)
err_msg = 'Something happened when executing some commands on device'
chan = self.ssh.get_transport().open_session()
chan.settimeout(5)
chan.exec_command(command)
error_chan = chan.makefile_stderr()
output_chan = chan.makefile()
error = ''
output = ''
for e in error_chan.read():
error = error + self._read_wrapper(e)
for o in output_chan.read():
output = output + self._read_wrapper(o)
if len(error) > 0:
msg = '%s %s:\n%s\n%s' % (err_msg, self.ssh.get_host_keys().keys()[0], command, error)
logger.error(msg)
raise exceptions.CommandExecutionException(msg)
regex = re.compile('Command fail')
if len(regex.findall(output)) > 0:
msg = '%s %s:\n%s\n%s' % (err_msg, self.ssh.get_host_keys().keys()[0], command, output)
logger.error(msg)
raise exceptions.CommandExecutionException(msg)
output = output.splitlines()
# We look for the prompt and remove it
i = 0
for line in output:
current_line = line.split('#')
if len(current_line) > 1:
output[i] = current_line[1]
else:
output[i] = current_line[0]
i += 1
return output[:-1] | 0.003586 |
def createdb():
"""Create database tables from sqlalchemy models"""
manager.db.engine.echo = True
manager.db.create_all()
set_alembic_revision() | 0.00625 |
def is_marginable(self):
"""True if adding counts across this dimension axis is meaningful."""
return self.dimension_type not in {DT.CA, DT.MR, DT.MR_CAT, DT.LOGICAL} | 0.010989 |
def _read_para_unassigned(self, code, cbit, clen, *, desc, length, version):
"""Read HIP unassigned parameters.
Structure of HIP unassigned parameters [RFC 5201][RFC 7401]:
0 1 2 3
0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| Type |C| Length |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| |
/ Contents /
/ +-+-+-+-+-+-+-+-+
| | Padding |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
Octets Bits Name Description
0 0 para.type Parameter Type
1 15 para.critical Critical Bit
2 16 para.length Length of Contents
4 32 para.contents Contents
- - - Padding
"""
unassigned = dict(
type=desc,
critical=cbit,
length=clen,
contents=self._read_fileng(clen),
)
plen = length - clen
if plen:
self._read_fileng(plen)
return unassigned | 0.001203 |
def encode_request(username, password, uuid, owner_uuid, is_owner_connection, client_type, serialization_version, client_hazelcast_version):
""" Encode request into client_message"""
client_message = ClientMessage(payload_size=calculate_size(username, password, uuid, owner_uuid, is_owner_connection, client_type, serialization_version, client_hazelcast_version))
client_message.set_message_type(REQUEST_TYPE)
client_message.set_retryable(RETRYABLE)
client_message.append_str(username)
client_message.append_str(password)
client_message.append_bool(uuid is None)
if uuid is not None:
client_message.append_str(uuid)
client_message.append_bool(owner_uuid is None)
if owner_uuid is not None:
client_message.append_str(owner_uuid)
client_message.append_bool(is_owner_connection)
client_message.append_str(client_type)
client_message.append_byte(serialization_version)
client_message.append_str(client_hazelcast_version)
client_message.update_frame_length()
return client_message | 0.002844 |
def find_version(*file_paths):
"""
read __init__.py
"""
file_path = os.path.join(*file_paths)
with open(file_path, 'r') as version_file:
line = version_file.readline()
while line:
if line.startswith('__version__'):
version_match = re.search(
r"^__version__ = ['\"]([^'\"]*)['\"]",
line,
re.M
)
if version_match:
return version_match.group(1)
line = version_file.readline()
raise RuntimeError('Unable to find version string.') | 0.001621 |
def get_custom_level(regexp=None,description=None,skip_files=None,include_files=None):
'''get_custom_level will generate a custom level for the user,
based on a regular expression. If used outside the context of tarsum, the user
can generate their own named and described filters.
:param regexp: must be defined, the file filter regular expression
:param description: optional description
'''
if regexp == None:
regexp = "."
if description is None:
description = "This is a custom filter generated by the user."
custom = {"description":description,
"regexp":regexp}
# Include extra files?
if include_files is not None:
if not isinstance(include_files,set):
include_files = set(include_files)
custom['include_files'] = include_files
# Skip files?
if skip_files is not None:
if not isinstance(skip_files,set):
skip_files = set(skip_files)
custom['skip_files'] = skip_files
return custom | 0.01256 |
def refresh_tree(self, tree, items):
"""
refresh trees with current settings
Args:
tree: a QtWidgets.QTreeWidget object or a QtWidgets.QTreeView object
items: dictionary or Parameter items with which to populate the tree
show_all: boolean if true show all parameters, if false only selected ones
"""
if tree == self.tree_scripts or tree == self.tree_settings:
tree.itemChanged.disconnect()
self.fill_treewidget(tree, items)
tree.itemChanged.connect(lambda: self.update_parameters(tree))
elif tree == self.tree_gui_settings:
self.fill_treeview(tree, items) | 0.007267 |
def get_cluster_assignment(self):
"""Fetch the cluster layout in form of assignment from zookeeper"""
plan = self.get_cluster_plan()
assignment = {}
for elem in plan['partitions']:
assignment[
(elem['topic'], elem['partition'])
] = elem['replicas']
return assignment | 0.005764 |
def RfiltersBM(dataset,database,host=rbiomart_host):
"""
Lists BioMart filters through a RPY2 connection.
:param dataset: a dataset listed in RdatasetsBM()
:param database: a database listed in RdatabasesBM()
:param host: address of the host server, default='www.ensembl.org'
:returns: nothing
"""
biomaRt = importr("biomaRt")
ensemblMart=biomaRt.useMart(database, host=host)
ensembl=biomaRt.useDataset(dataset, mart=ensemblMart)
print(biomaRt.listFilters(ensembl)) | 0.009766 |
def pending():
"""Show the number of pending signals by signal type."""
signalbus = current_app.extensions['signalbus']
pending = []
total_pending = 0
for signal_model in signalbus.get_signal_models():
count = signal_model.query.count()
if count > 0:
pending.append((count, signal_model.__name__))
total_pending += count
if pending:
pending.sort()
max_chars = len(str(pending[-1][0]))
for n, signal_name in pending:
click.echo('{} of type "{}"'.format(str(n).rjust(max_chars), signal_name))
click.echo(25 * '-')
click.echo('Total pending: {} '.format(total_pending)) | 0.002981 |
def get_request(self, request):
"""Sets token-based auth headers."""
request.headers['authenticate'] = {
'complexType': 'PortalLoginToken',
'userId': self.user_id,
'authToken': self.auth_token,
}
return request | 0.007194 |
def sanitize_tex(original_text):
"""Sanitize TeX text.
:param original_text: the text to sanitize for LaTeX.
:type original_text: str
:returns: the sanitize text.
Text is sanitized by following these steps:
1. Replaces ``\\` by ``\\textbackslash``
2. Escapes certain characters (such as ``$``, ``%``, ``_``, ``}``, ``{``,
``&`` and ``#``) by adding a backslash (*e.g.* from ``&`` to ``\\&``).
3. Replaces special characters such as ``~`` by the LaTeX equivalent
(*e.g.* from ``~`` to ``$\\sim$``).
"""
# The backslashes
sanitized_tex = original_text.replace("\\", r"\textbackslash ")
# Escaping
sanitized_tex = re.sub(r"([{}])".format("".join(_escaped_char)),
r"\\\g<1>", sanitized_tex)
# Replacing
for character, mod in _char_mod.items():
sanitized_tex = sanitized_tex.replace(character, mod)
return sanitized_tex | 0.001068 |
def get_daemon_stats(self, details=False):
"""Increase the stats provided by the Daemon base class
:return: stats dictionary
:rtype: dict
"""
# Call the base Daemon one
res = super(Broker, self).get_daemon_stats(details=details)
res.update({'name': self.name, 'type': self.type})
counters = res['counters']
counters['broks-external'] = len(self.external_broks)
counters['broks-internal'] = len(self.internal_broks)
counters['broks-arbiter'] = len(self.arbiter_broks)
counters['satellites.pollers'] = len(self.pollers)
counters['satellites.reactionners'] = len(self.reactionners)
counters['satellites.receivers'] = len(self.receivers)
return res | 0.002604 |
def submit_row(context):
"""
Overrides 'django.contrib.admin.templatetags.admin_modify.submit_row'.
Manipulates the context going into that function by hiding all of the buttons
in the submit row if the key `readonly` is set in the context.
"""
ctx = original_submit_row(context)
if context.get('readonly', False):
ctx.update({
'show_delete_link': False,
'show_save_as_new': False,
'show_save_and_add_another': False,
'show_save_and_continue': False,
'show_save': False,
})
else:
return ctx | 0.003289 |
def get_enrollments(self, course_id=None, usernames=None):
"""
List all course enrollments.
Args:
course_id (str, optional): If used enrollments will be filtered to the specified
course id.
usernames (list, optional): List of usernames to filter enrollments.
Notes:
- This method returns an iterator to avoid going through the entire pagination at once.
- The :class:`Enrollments` instance returned for each generated item will not have any
course details.
Examples:
Get all enrollments for a specific course id
>>> api = EdxApi({'access_token': 'token'}, 'http://base_url')
>>> enrollments = api.enrollments.get_enrollments(course_id='course_id')
>>> for enrollment in enrollments:
do_something(enrollment)
Get all enrollments for a set of usernames
>>> api = EdxApi({'access_token': 'token'}, 'http://base_url')
>>> enrollments = api.enrollments.get_enrollments(usernames=['user1', 'user2'])
>>> for enrollment in enrollments:
do_something(enrollment)
Returns:
Generator with an instance of :class:`Enrollments` for each item.
"""
params = {}
if course_id is not None:
params['course_id'] = course_id
if usernames is not None and isinstance(usernames, list):
params['username'] = ','.join(usernames)
done = False
while not done:
enrollments, next_cursor = self._get_enrollments_list_page(params)
for enrollment in enrollments:
yield Enrollment(enrollment)
if next_cursor:
params['cursor'] = next_cursor
else:
done = True | 0.004271 |
def add(entry_point, all_entry_points, auto_write, scripts_path):
'''Add Scrim scripts for a python project'''
click.echo()
if not entry_point and not all_entry_points:
raise click.UsageError(
'Missing required option: --entry_point or --all_entry_points'
)
if not os.path.exists('setup.py'):
raise click.UsageError('No setup.py found.')
setup_data = parse_setup('setup.py')
console_scripts = get_console_scripts(setup_data)
scripts = []
if all_entry_points and console_scripts:
# Make sure our entry points start with py
for entry in console_scripts:
if not entry.startswith('py'):
click.echo('Your python entry_points must start with py.')
click.echo('Found: ' + entry)
raise click.Abort()
for entry in console_scripts:
click.echo('Found entry_point: ' + entry)
py_entry_point = entry
entry_point = entry[2:]
more_scripts = copy_templates(
entry_point,
py_entry_point,
auto_write,
scripts_path
)
for script in more_scripts:
click.echo(' Created ' + script)
scripts.extend(more_scripts)
elif entry_point:
if not entry_point.startswith('py'):
click.echo('Your python entry_points must start with py.')
raise click.Abort()
if entry_point not in console_scripts:
click.echo(entry_point + ' not found in your setups entry_points')
click.echo('You will need to add it afterward if you continue...')
click.echo('')
click.confirm('Do you want to continue?', abort=True)
click.echo('\nCreating scripts for: ' + entry_point)
py_entry_point = entry_point
entry_point = entry_point[2:]
more_scripts = copy_templates(
entry_point,
py_entry_point,
auto_write,
scripts_path
)
for script in more_scripts:
click.echo(' Created ' + script)
scripts.extend(more_scripts)
click.echo('\n\nAdd the following section to your package setup:\n')
click.echo('scripts=[')
for script in scripts:
click.echo(" '{}',".format(script))
click.echo('],') | 0.000418 |
def check_satpy(readers=None, writers=None, extras=None):
"""Check the satpy readers and writers for correct installation.
Args:
readers (list or None): Limit readers checked to those specified
writers (list or None): Limit writers checked to those specified
extras (list or None): Limit extras checked to those specified
Returns: bool
True if all specified features were successfully loaded.
"""
from satpy.readers import configs_for_reader
from satpy.writers import configs_for_writer
print('Readers')
print('=======')
for reader, res in sorted(check_yaml_configs(configs_for_reader(reader=readers), 'reader').items()):
print(reader + ': ', res)
print()
print('Writers')
print('=======')
for writer, res in sorted(check_yaml_configs(configs_for_writer(writer=writers), 'writer').items()):
print(writer + ': ', res)
print()
print('Extras')
print('======')
module_names = extras if extras is not None else ('cartopy', 'geoviews')
for module_name, res in sorted(_check_import(module_names).items()):
print(module_name + ': ', res)
print() | 0.002553 |
def check_schema(self):
"""Check the schema exists and matches configuration"""
if self.valid_schema:
return
config = self.config
metadata = self.metadata()
if 'current_version' not in metadata:
raise GaugedSchemaError('Gauged schema not found, '
'try a gauged.sync()')
if metadata['current_version'] != Gauged.VERSION:
msg = 'The schema is version %s while this Gauged is version %s. '
msg += 'Try upgrading Gauged and/or running gauged_migrate.py'
msg = msg % (metadata['current_version'], Gauged.VERSION)
raise GaugedVersionMismatchError(msg)
expected_block_size = '%s/%s' % (config.block_size, config.resolution)
block_size = '%s/%s' % (metadata['block_size'], metadata['resolution'])
if block_size != expected_block_size:
msg = 'Expected %s and got %s' % (expected_block_size, block_size)
warn(msg, GaugedBlockSizeMismatch)
self.valid_schema = True | 0.001878 |
def links_to_dynamic(self, ext):
"""Return true if 'ext' links to a dynamic lib in the same package"""
# XXX this should check to ensure the lib is actually being built
# XXX as dynamic, and not just using a locally-found version or a
# XXX static-compiled version
libnames = dict.fromkeys([lib._full_name for lib in self.shlibs])
pkg = '.'.join(ext._full_name.split('.')[:-1]+[''])
for libname in ext.libraries:
if pkg+libname in libnames: return True
return False | 0.005545 |
def gene_variants(institute_id):
"""Display a list of SNV variants."""
page = int(request.form.get('page', 1))
institute_obj = institute_and_case(store, institute_id)
# populate form, conditional on request method
if(request.method == "POST"):
form = GeneVariantFiltersForm(request.form)
else:
form = GeneVariantFiltersForm(request.args)
variant_type = form.data.get('variant_type', 'clinical')
# check if supplied gene symbols exist
hgnc_symbols = []
non_clinical_symbols = []
not_found_symbols = []
not_found_ids = []
data = {}
if (form.hgnc_symbols.data) and len(form.hgnc_symbols.data) > 0:
is_clinical = form.data.get('variant_type', 'clinical') == 'clinical'
clinical_symbols = store.clinical_symbols(case_obj) if is_clinical else None
for hgnc_symbol in form.hgnc_symbols.data:
if hgnc_symbol.isdigit():
hgnc_gene = store.hgnc_gene(int(hgnc_symbol))
if hgnc_gene is None:
not_found_ids.append(hgnc_symbol)
else:
hgnc_symbols.append(hgnc_gene['hgnc_symbol'])
elif store.hgnc_genes(hgnc_symbol).count() == 0:
not_found_symbols.append(hgnc_symbol)
elif is_clinical and (hgnc_symbol not in clinical_symbols):
non_clinical_symbols.append(hgnc_symbol)
else:
hgnc_symbols.append(hgnc_symbol)
if (not_found_ids):
flash("HGNC id not found: {}".format(", ".join(not_found_ids)), 'warning')
if (not_found_symbols):
flash("HGNC symbol not found: {}".format(", ".join(not_found_symbols)), 'warning')
if (non_clinical_symbols):
flash("Gene not included in clinical list: {}".format(", ".join(non_clinical_symbols)), 'warning')
form.hgnc_symbols.data = hgnc_symbols
log.debug("query {}".format(form.data))
variants_query = store.gene_variants(query=form.data, category='snv',
variant_type=variant_type)
data = controllers.gene_variants(store, variants_query, page)
return dict(institute=institute_obj, form=form, page=page, **data) | 0.003125 |
def make_refresh_on_demand_service(injector_component):
"""
create a refresh on demand service listening to refresh order on the component admin queue
:param injector_component: the injector_component to bind with the new refresh on demande service
:return: the created service
"""
LOGGER.debug("InjectorCachedComponentService.make_refresh_on_demand_service")
args = {
'service_q': injector_component.id,
'treatment_callback': injector_component.refresh,
'service_name': injector_component.id + " - On Demand Refreshing Service"
}
return InjectorCachedComponentService.driver.make_service(args) | 0.008547 |
def _gather_image_parts(self):
"""Load the image part collection with all the image parts in package."""
for rel in self.iter_rels():
if rel.is_external:
continue
if rel.reltype != RT.IMAGE:
continue
if rel.target_part in self.image_parts:
continue
self.image_parts.append(rel.target_part) | 0.007481 |
def validate(self, tracking_number):
"Return True if this is a valid USPS tracking number."
tracking_num = tracking_number[:-1].replace(' ', '')
odd_total = 0
even_total = 0
for ii, digit in enumerate(tracking_num):
if ii % 2:
odd_total += int(digit)
else:
even_total += int(digit)
total = odd_total + even_total * 3
check = ((total - (total % 10) + 10) - total) % 10
return (check == int(tracking_number[-1:])) | 0.003766 |
def _get_space_character_free_column_resolvers(self):
"""Return the space character free column resolvers of a dataframe.
Column names with spaces are 'cleaned up' so that they can be referred
to by backtick quoting.
Used in :meth:`DataFrame.eval`.
"""
from pandas.core.computation.common import _remove_spaces_column_name
return {_remove_spaces_column_name(k): v for k, v
in self.iteritems()} | 0.004283 |
def get_selections(fetchempty=True):
'''
Answers to debconf questions for all packages in the following format::
{'package': [['question', 'type', 'value'], ...]}
CLI Example:
.. code-block:: bash
salt '*' debconf.get_selections
'''
selections = {}
cmd = 'debconf-get-selections'
out = __salt__['cmd.run_stdout'](cmd)
lines = _unpack_lines(out)
for line in lines:
package, question, type_, value = line
if fetchempty or value:
(selections
.setdefault(package, [])
.append([question, type_, value]))
return selections | 0.001553 |
def get_identities(self, item):
""" Return the identities from an item """
field = self.get_field_author()
yield self.get_sh_identity(item, field) | 0.011696 |
def add_ap(self, id_, label=None, addPrefix=True):
""" Add id_ as an owl:AnnotationProperty"""
self.add_trip(id_, rdf.type, owl.AnnotationProperty)
if label:
self.add_trip(id_, rdfs.label, label)
if addPrefix:
prefix = ''.join([s.capitalize() for s in label.split()])
namespace = self.expand(id_)
self.add_namespace(prefix, namespace) | 0.004651 |
def get_namespace_from_name(name):
"""
can be either
<namespace>/projects/<project_name>
or
<namespace>/<project_name>
"""
if not re.match(NAMESPACE_PATTERN, name):
sys.exit(("Argument '%s' doesn't match any recognized pattern:\n"
"\tfloyd [data] init <project_or_dataset_name>\n"
"\tfloyd [data] init <namespace>/<project_or_dataset_name>\n"
"\tfloyd [data] init <namespace>/[projects|dataset]/<project_or_dataset_name>\n"
"\n Note: Argument can only contain alphanumeric, hyphen-minus '-' , underscore '_' and dot '.' characters."
) % name)
name_parts = name.split("/", 2)
if len(name_parts) > 1:
return name_parts[0], name_parts[-1]
else:
return current_username(), name | 0.003623 |
def getTopologyInfo(self, topologyName, cluster, role, environ):
"""
Returns the JSON representation of a topology
by its name, cluster, environ, and an optional role parameter.
Raises exception if no such topology is found.
"""
# Iterate over the values to filter the desired topology.
for (topology_name, _), topologyInfo in self.topologyInfos.items():
executionState = topologyInfo["execution_state"]
if (topologyName == topology_name and
cluster == executionState["cluster"] and
environ == executionState["environ"]):
# If role is specified, first try to match "role" field. If "role" field
# does not exist, try to match "submission_user" field.
if not role or executionState.get("role") == role:
return topologyInfo
if role is not None:
Log.info("Could not find topology info for topology: %s," \
"cluster: %s, role: %s, and environ: %s",
topologyName, cluster, role, environ)
else:
Log.info("Could not find topology info for topology: %s," \
"cluster: %s and environ: %s", topologyName, cluster, environ)
raise Exception("No topology found") | 0.008258 |
def _get_global_include_abs_path(self, path):
"""
Get a value after converting to absolute path.
Becoming different from other parameter,
validation of `include` parameter is complex.
Before other validation(at first) this method is called to merge to
one configuration.
:param str path: You can specify relative path and absolute path ,too.
:rtype: str
:return: absolute path
"""
if not os.path.isabs(path):
path = os.path.abspath(path)
if os.path.isdir(path) or path.endswith('/'):
path = os.path.join(path, '*')
return path | 0.003044 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.