text
stringlengths 78
104k
| score
float64 0
0.18
|
---|---|
def Throughput(self):
"""Combined throughput from multiplying all the components together.
Returns
-------
throughput : `~pysynphot.spectrum.TabularSpectralElement` or `None`
Combined throughput.
"""
try:
throughput = spectrum.TabularSpectralElement()
product = self._multiplyThroughputs(0)
throughput._wavetable = product.GetWaveSet()
throughput._throughputtable = product(throughput._wavetable)
throughput.waveunits = product.waveunits
throughput.name='*'.join([str(x) for x in self.components])
## throughput = throughput.resample(spectrum._default_waveset)
return throughput
except IndexError: # graph table is broken.
return None | 0.00489 |
def state(self):
"""Personsa state (e.g. Online, Offline, Away, Busy, etc)
:rtype: :class:`.EPersonaState`
"""
state = self.get_ps('persona_state', False)
return EPersonaState(state) if state else EPersonaState.Offline | 0.007722 |
def human_xor_01(X, y, model_generator, method_name):
""" XOR (false/true)
This tests how well a feature attribution method agrees with human intuition
for an eXclusive OR operation combined with linear effects. This metric deals
specifically with the question of credit allocation for the following function
when all three inputs are true:
if fever: +2 points
if cough: +2 points
if fever or cough but not both: +6 points
transform = "identity"
sort_order = 4
"""
return _human_xor(X, model_generator, method_name, False, True) | 0.00692 |
def remap_index_fn(ref_file):
"""minimap2 can build indexes on the fly but will also store commons ones.
"""
index_dir = os.path.join(os.path.dirname(ref_file), os.pardir, "minimap2")
if os.path.exists(index_dir) and os.path.isdir(index_dir):
return index_dir
else:
return os.path.dirname(ref_file) | 0.002994 |
def refresh(self):
"""
Reloads the wallet and its accounts. By default, this method is called only once,
on :class:`Wallet` initialization. When the wallet is accessed by multiple clients or
exists in multiple instances, calling `refresh()` will be necessary to update
the list of accounts.
"""
self.accounts = self.accounts or []
idx = 0
for _acc in self._backend.accounts():
_acc.wallet = self
try:
if self.accounts[idx]:
continue
except IndexError:
pass
self.accounts.append(_acc)
idx += 1 | 0.007396 |
def view_portfolio_losses(token, dstore):
"""
The losses for the full portfolio, for each realization and loss type,
extracted from the event loss table.
"""
oq = dstore['oqparam']
loss_dt = oq.loss_dt()
data = portfolio_loss(dstore).view(loss_dt)[:, 0]
rlzids = [str(r) for r in range(len(data))]
array = util.compose_arrays(numpy.array(rlzids), data, 'rlz')
# this is very sensitive to rounding errors, so I am using a low precision
return rst_table(array, fmt='%.5E') | 0.001946 |
def nice_log(x):
"""
Uses a log scale but with negative numbers.
:param x: NumPy array
"""
neg = x < 0
xi = np.log2(np.abs(x) + 1)
xi[neg] = -xi[neg]
return xi | 0.005208 |
def random(self, count=1, **kwargs):
"""
Retrieve a single random photo, given optional filters.
Note: If supplying multiple category ID’s,
the resulting photos will be those that
match all of the given categories, not ones that match any category.
Note: You can’t use the collections and query parameters in the same request
Note: When supplying a count parameter
- and only then - the response will be an array of photos,
even if the value of count is 1.
All parameters are optional, and can be combined to narrow
the pool of photos from which a random one will be chosen.
:param count [integer]: The number of photos to return. (Default: 1; max: 30)
:param category: Category ID(‘s) to filter selection. If multiple, comma-separated. (deprecated)
:param collections: Public collection ID(‘s) to filter selection. If multiple, comma-separated
:param featured: Limit selection to featured photos.
:param username: Limit selection to a single user.
:param query: Limit selection to photos matching a search term.
:param w: Image width in pixels.
:param h: Image height in pixels.
:param orientation: Filter search results by photo orientation.
Valid values are landscape, portrait, and squarish.
:return: [Array] or [Photo]: A single page of the curated Photo list or The Unsplash Photo. .
:raise UnsplashError: If the given orientation is not in the default orientation values.
"""
kwargs.update({"count": count})
orientation = kwargs.get("orientation", None)
if orientation and orientation not in self.orientation_values:
raise Exception()
url = "/photos/random"
result = self._get(url, params=kwargs)
return PhotoModel.parse_list(result) | 0.004215 |
def phonetic_i_umlaut(sound: Vowel) -> Vowel:
"""
>>> umlaut_a = OldNorsePhonology.phonetic_i_umlaut(a)
>>> umlaut_a.ipar
'ɛ'
>>> umlaut_au = OldNorsePhonology.phonetic_i_umlaut(DIPHTHONGS_IPA_class["au"])
>>> umlaut_au.ipar
'ɐy'
:param sound:
:return:
"""
if sound.is_equal(a):
return ee
elif sound.is_equal(a.lengthen()):
return ee.lengthen()
elif sound.is_equal(o):
return oee
elif sound.is_equal(o.lengthen()):
return oee.lengthen()
elif sound.is_equal(u):
return y
elif sound.is_equal(u.lengthen()):
return y.lengthen()
if sound.is_equal(DIPHTHONGS_IPA_class["au"]):
return DIPHTHONGS_IPA_class["ey"] | 0.003628 |
def add_section(self, section):
"""Create a new section in the configuration. Extends
RawConfigParser.add_section by validating if the section name is
a string."""
section, _, _ = self._validate_value_types(section=section)
super(ConfigParser, self).add_section(section) | 0.006431 |
def retract(args):
"""Deletes a vote for a poll."""
if not args.msg:
return "Syntax: !vote retract <pollnum>"
if not args.msg.isdigit():
return "Not A Valid Positive Integer."
response = get_response(args.session, args.msg, args.nick)
if response is None:
return "You haven't voted on that poll yet!"
args.session.delete(response)
return "Vote retracted" | 0.002463 |
def send(self, message):
"""
Deliver a message to all destinations.
The passed in message might be mutated.
@param message: A message dictionary that can be serialized to JSON.
@type message: L{dict}
"""
message.update(self._globalFields)
errors = []
for dest in self._destinations:
try:
dest(message)
except:
errors.append(sys.exc_info())
if errors:
raise _DestinationsSendError(errors) | 0.005587 |
def list_view_changed(self, widget, event, data=None):
"""
Function shows last rows.
"""
adj = self.scrolled_window.get_vadjustment()
adj.set_value(adj.get_upper() - adj.get_page_size()) | 0.00885 |
def endure_multi(self, keys, persist_to=-1, replicate_to=-1,
timeout=5.0, interval=0.010, check_removed=False):
"""Check durability requirements for multiple keys
:param keys: The keys to check
The type of keys may be one of the following:
* Sequence of keys
* A :class:`~couchbase.result.MultiResult` object
* A ``dict`` with CAS values as the dictionary value
* A sequence of :class:`~couchbase.result.Result` objects
:return: A :class:`~.MultiResult` object
of :class:`~.OperationResult` items.
.. seealso:: :meth:`endure`
"""
return _Base.endure_multi(self, keys, persist_to=persist_to,
replicate_to=replicate_to,
timeout=timeout, interval=interval,
check_removed=check_removed) | 0.003243 |
def walker(top, names):
"""
Walks a directory and records all packages and file extensions.
"""
global packages, extensions
if any(exc in top for exc in excludes):
return
package = top[top.rfind('holoviews'):].replace(os.path.sep, '.')
packages.append(package)
for name in names:
ext = '.'.join(name.split('.')[1:])
ext_str = '*.%s' % ext
if ext and ext not in excludes and ext_str not in extensions[package]:
extensions[package].append(ext_str) | 0.001919 |
def _digits(minval, maxval):
"""Digits needed to comforatbly display values in [minval, maxval]"""
if minval == maxval:
return 3
else:
return min(10, max(2, int(1 + abs(np.log10(maxval - minval))))) | 0.004425 |
def with_stdin(self, os_path=None, skip_sub_command=False,
disk_closed_callback=None):
"""
A context manager yielding a stdin-suitable file-like object
based on the optional os_path and optionally skipping any
configured sub-command.
:param os_path: Optional path to base the file-like object
on.
:param skip_sub_command: Set True to skip any configured
sub-command filter.
:param disk_closed_callback: If the backing of the file-like
object is an actual file that will be closed,
disk_closed_callback (if set) will be called with the
on-disk path just after closing it.
"""
sub_command = None if skip_sub_command else self.stdin_sub_command
inn, path = self._get_in_and_path(
self.stdin, self.stdin_root, sub_command, os_path)
try:
if hasattr(inn, 'stdout'):
yield inn.stdout
else:
yield inn
finally:
if hasattr(inn, 'stdout'):
self._close(inn.stdout)
self._wait(inn, path)
self._close(inn)
if disk_closed_callback and path:
disk_closed_callback(path) | 0.002351 |
def get_song(self, id_):
"""Data for a specific song."""
endpoint = "songs/{id}".format(id=id_)
return self._make_request(endpoint) | 0.012903 |
def res_1to1(pst,logger=None,filename=None,plot_hexbin=False,histogram=False,**kwargs):
""" make 1-to-1 plots and also observed vs residual by observation group
Parameters
----------
pst : pyemu.Pst
logger : Logger
if None, a generic one is created. Default is None
filename : str
PDF filename to save figures to. If None, figures are returned. Default is None
kwargs : dict
optional keyword args to pass to plotting functions
TODO: color symbols by weight
"""
if logger is None:
logger=Logger('Default_Loggger.log',echo=False)
logger.log("plot res_1to1")
if "ensemble" in kwargs:
try:
res=pst_utils.res_from_en(pst,kwargs['ensemble'])
except Exception as e:
logger.lraise("res_1to1: error loading ensemble file: {0}".format( str(e)))
else:
try:
res = pst.res
except:
logger.lraise("res_phi_pie: pst.res is None, couldn't find residuals file")
obs = pst.observation_data
if "grouper" in kwargs:
raise NotImplementedError()
else:
grouper = obs.groupby(obs.obgnme).groups
fig = plt.figure(figsize=figsize)
if "fig_title" in kwargs:
plt.figtext(0.5,0.5,kwargs["fig_title"])
else:
plt.figtext(0.5, 0.5, "pyemu.Pst.plot(kind='1to1')\nfrom pest control file '{0}'\n at {1}"
.format(pst.filename, str(datetime.now())), ha="center")
#if plot_hexbin:
# pdfname = pst.filename.replace(".pst", ".1to1.hexbin.pdf")
#else:
# pdfname = pst.filename.replace(".pst", ".1to1.pdf")
figs = []
ax_count = 0
for g, names in grouper.items():
logger.log("plotting 1to1 for {0}".format(g))
obs_g = obs.loc[names, :]
obs_g.loc[:, "sim"] = res.loc[names, "modelled"]
logger.statement("using control file obsvals to calculate residuals")
obs_g.loc[:,'res'] = obs_g.sim - obs_g.obsval
if "include_zero" not in kwargs or kwargs["include_zero"] is True:
obs_g = obs_g.loc[obs_g.weight > 0, :]
if obs_g.shape[0] == 0:
logger.statement("no non-zero obs for group '{0}'".format(g))
logger.log("plotting 1to1 for {0}".format(g))
continue
if ax_count % (nr * nc) == 0:
if ax_count > 0:
plt.tight_layout()
#pdf.savefig()
#plt.close(fig)
figs.append(fig)
fig = plt.figure(figsize=figsize)
axes = get_page_axes()
ax_count = 0
ax = axes[ax_count]
#if obs_g.shape[0] == 1:
# ax.scatter(list(obs_g.sim),list(obs_g.obsval),marker='.',s=30,color='b')
#else:
mx = max(obs_g.obsval.max(), obs_g.sim.max())
mn = min(obs_g.obsval.min(), obs_g.sim.min())
#if obs_g.shape[0] == 1:
mx *= 1.1
mn *= 0.9
ax.axis('square')
if plot_hexbin:
ax.hexbin(obs_g.obsval.values, obs_g.sim.values, mincnt=1, gridsize=(75, 75),
extent=(mn, mx, mn, mx), bins='log', edgecolors=None)
# plt.colorbar(ax=ax)
else:
ax.scatter([obs_g.obsval], [obs_g.sim], marker='.', s=10, color='b')
ax.plot([mn,mx],[mn,mx],'k--',lw=1.0)
xlim = (mn,mx)
ax.set_xlim(mn,mx)
ax.set_ylim(mn,mx)
ax.grid()
ax.set_xlabel("observed",labelpad=0.1)
ax.set_ylabel("simulated",labelpad=0.1)
ax.set_title("{0}) group:{1}, {2} observations".
format(abet[ax_count], g, obs_g.shape[0]), loc="left")
ax_count += 1
if histogram==False:
ax = axes[ax_count]
ax.scatter(obs_g.obsval, obs_g.res, marker='.', s=10, color='b')
ylim = ax.get_ylim()
mx = max(np.abs(ylim[0]), np.abs(ylim[1]))
if obs_g.shape[0] == 1:
mx *= 1.1
ax.set_ylim(-mx, mx)
#show a zero residuals line
ax.plot(xlim, [0,0], 'k--', lw=1.0)
meanres= obs_g.res.mean()
# show mean residuals line
ax.plot(xlim,[meanres,meanres], 'r-', lw=1.0)
ax.set_xlim(xlim)
ax.set_ylabel("residual",labelpad=0.1)
ax.set_xlabel("observed",labelpad=0.1)
ax.set_title("{0}) group:{1}, {2} observations".
format(abet[ax_count], g, obs_g.shape[0]), loc="left")
ax.grid()
ax_count += 1
else:
ax = axes[ax_count]
ax.hist(obs_g.res, 50, color='b')
meanres= obs_g.res.mean()
ax.axvline(meanres, color='r', lw=1)
b,t = ax.get_ylim()
ax.text(meanres + meanres/10,
t - t/10,
'Mean: {:.2f}'.format(meanres))
ax_count += 1
logger.log("plotting 1to1 for {0}".format(g))
for a in range(ax_count, nr * nc):
axes[a].set_axis_off()
axes[a].set_yticks([])
axes[a].set_xticks([])
plt.tight_layout()
#pdf.savefig()
#plt.close(fig)
figs.append(fig)
if filename is not None:
with PdfPages(filename) as pdf:
for fig in figs:
pdf.savefig(fig)
plt.close(fig)
logger.log("plot res_1to1")
else:
logger.log("plot res_1to1")
return figs | 0.010466 |
def parseBtop(btopString):
"""
Parse a BTOP string.
The format is described at https://www.ncbi.nlm.nih.gov/books/NBK279682/
@param btopString: A C{str} BTOP sequence.
@raise ValueError: If C{btopString} is not valid BTOP.
@return: A generator that yields a series of integers and 2-tuples of
letters, as found in the BTOP string C{btopString}.
"""
isdigit = str.isdigit
value = None
queryLetter = None
for offset, char in enumerate(btopString):
if isdigit(char):
if queryLetter is not None:
raise ValueError(
'BTOP string %r has a query letter %r at offset %d with '
'no corresponding subject letter' %
(btopString, queryLetter, offset - 1))
value = int(char) if value is None else value * 10 + int(char)
else:
if value is not None:
yield value
value = None
queryLetter = char
else:
if queryLetter is None:
queryLetter = char
else:
if queryLetter == '-' and char == '-':
raise ValueError(
'BTOP string %r has two consecutive gaps at '
'offset %d' % (btopString, offset - 1))
elif queryLetter == char:
raise ValueError(
'BTOP string %r has two consecutive identical %r '
'letters at offset %d' %
(btopString, char, offset - 1))
yield (queryLetter, char)
queryLetter = None
if value is not None:
yield value
elif queryLetter is not None:
raise ValueError(
'BTOP string %r has a trailing query letter %r with '
'no corresponding subject letter' % (btopString, queryLetter)) | 0.000505 |
def fixed(self):
""" Moved to MODIFIED and not later moved to ASSIGNED """
decision = False
for record in self.history:
# Completely ignore older changes
if record["when"] < self.options.since.date:
continue
# Look for status change to MODIFIED (unless already found)
if not decision and record["when"] < self.options.until.date:
for change in record["changes"]:
if (change["field_name"] == "status"
and change["added"] == "MODIFIED"
and change["removed"] != "CLOSED"):
decision = True
# Make sure that the bug has not been later moved to ASSIGNED.
# (This would mean the issue has not been fixed properly.)
else:
for change in record["changes"]:
if (change["field_name"] == "status"
and change["added"] == "ASSIGNED"):
decision = False
return decision | 0.001838 |
def adjust_ip (self, ip=None):
"""Called to explicitely fixup an associated IP header
The function adjusts the IP header based on conformance rules
and the group address encoded in the IGMP message.
The rules are:
1. Send General Group Query to 224.0.0.1 (all systems)
2. Send Leave Group to 224.0.0.2 (all routers)
3a.Otherwise send the packet to the group address
3b.Send reports/joins to the group address
4. ttl = 1 (RFC 2236, section 2)
5. send the packet with the router alert IP option (RFC 2236, section 2)
"""
if ip != None and ip.haslayer(IP):
if (self.type == 0x11):
if (self.gaddr == "0.0.0.0"):
ip.dst = "224.0.0.1" # IP rule 1
retCode = True
elif isValidMCAddr(self.gaddr):
ip.dst = self.gaddr # IP rule 3a
retCode = True
else:
print("Warning: Using invalid Group Address")
retCode = False
elif ((self.type == 0x17) and isValidMCAddr(self.gaddr)):
ip.dst = "224.0.0.2" # IP rule 2
retCode = True
elif ((self.type == 0x12) or (self.type == 0x16)) and (isValidMCAddr(self.gaddr)):
ip.dst = self.gaddr # IP rule 3b
retCode = True
else:
print("Warning: Using invalid IGMP Type")
retCode = False
else:
print("Warning: No IGMP Group Address set")
retCode = False
if retCode == True:
ip.ttl=1 # IP Rule 4
ip.options=[IPOption_Router_Alert()] # IP rule 5
return retCode | 0.016265 |
def Boolean(value, true=(u'yes', u'1', u'true'), false=(u'no', u'0', u'false'),
encoding=None):
"""
Parse a value as a boolean.
:type value: `unicode` or `bytes`
:param value: Text value to parse.
:type true: `tuple` of `unicode`
:param true: Values to compare, ignoring case, for ``True`` values.
:type false: `tuple` of `unicode`
:param false: Values to compare, ignoring case, for ``False`` values.
:type encoding: `bytes`
:param encoding: Encoding to treat `bytes` values as, defaults to
``utf-8``.
:rtype: `bool`
:return: Parsed boolean or ``None`` if ``value`` did not match ``true`` or
``false`` values.
"""
value = Text(value, encoding)
if value is not None:
value = value.lower().strip()
if value in true:
return True
elif value in false:
return False
return None | 0.001105 |
def add_method_drop_down(self, col_number, col_label):
"""
Add drop-down-menu options for magic_method_codes columns
"""
if self.data_type == 'age':
method_list = vocab.age_methods
elif '++' in col_label:
method_list = vocab.pmag_methods
elif self.data_type == 'result':
method_list = vocab.pmag_methods
else:
method_list = vocab.er_methods
self.choices[col_number] = (method_list, True) | 0.004008 |
def run(self, text):
"""Run each regex substitution on ``text``.
Args:
text (string): the input text.
Returns:
string: text after all substitutions have been sequentially
applied.
"""
for regex in self.regexes:
text = regex.sub(self.repl, text)
return text | 0.005634 |
def _checkIdEquality(self, requestedEffect, effect):
"""
Tests whether a requested effect and an effect
present in an annotation are equal.
"""
return self._idPresent(requestedEffect) and (
effect.term_id == requestedEffect.term_id) | 0.007042 |
def setData(self, index, value, role=Qt.EditRole):
"""Qt Override."""
if index.isValid() and 0 <= index.row() < len(self.shortcuts):
shortcut = self.shortcuts[index.row()]
column = index.column()
text = from_qvariant(value, str)
if column == SEQUENCE:
shortcut.key = text
self.dataChanged.emit(index, index)
return True
return False | 0.004405 |
def list_closed_workflow_executions(domain=None, startTimeFilter=None, closeTimeFilter=None, executionFilter=None, closeStatusFilter=None, typeFilter=None, tagFilter=None, nextPageToken=None, maximumPageSize=None, reverseOrder=None):
"""
Returns a list of closed workflow executions in the specified domain that meet the filtering criteria. The results may be split into multiple pages. To retrieve subsequent pages, make the call again using the nextPageToken returned by the initial call.
Access Control
You can use IAM policies to control this action's access to Amazon SWF resources as follows:
If the caller does not have sufficient permissions to invoke the action, or the parameter values fall outside the specified constraints, the action fails. The associated event attribute's cause parameter will be set to OPERATION_NOT_PERMITTED. For details and example IAM policies, see Using IAM to Manage Access to Amazon SWF Workflows .
See also: AWS API Documentation
:example: response = client.list_closed_workflow_executions(
domain='string',
startTimeFilter={
'oldestDate': datetime(2015, 1, 1),
'latestDate': datetime(2015, 1, 1)
},
closeTimeFilter={
'oldestDate': datetime(2015, 1, 1),
'latestDate': datetime(2015, 1, 1)
},
executionFilter={
'workflowId': 'string'
},
closeStatusFilter={
'status': 'COMPLETED'|'FAILED'|'CANCELED'|'TERMINATED'|'CONTINUED_AS_NEW'|'TIMED_OUT'
},
typeFilter={
'name': 'string',
'version': 'string'
},
tagFilter={
'tag': 'string'
},
nextPageToken='string',
maximumPageSize=123,
reverseOrder=True|False
)
:type domain: string
:param domain: [REQUIRED]
The name of the domain that contains the workflow executions to list.
:type startTimeFilter: dict
:param startTimeFilter: If specified, the workflow executions are included in the returned results based on whether their start times are within the range specified by this filter. Also, if this parameter is specified, the returned results are ordered by their start times.
Note
startTimeFilter and closeTimeFilter are mutually exclusive. You must specify one of these in a request but not both.
oldestDate (datetime) -- [REQUIRED]Specifies the oldest start or close date and time to return.
latestDate (datetime) --Specifies the latest start or close date and time to return.
:type closeTimeFilter: dict
:param closeTimeFilter: If specified, the workflow executions are included in the returned results based on whether their close times are within the range specified by this filter. Also, if this parameter is specified, the returned results are ordered by their close times.
Note
startTimeFilter and closeTimeFilter are mutually exclusive. You must specify one of these in a request but not both.
oldestDate (datetime) -- [REQUIRED]Specifies the oldest start or close date and time to return.
latestDate (datetime) --Specifies the latest start or close date and time to return.
:type executionFilter: dict
:param executionFilter: If specified, only workflow executions matching the workflow ID specified in the filter are returned.
Note
closeStatusFilter , executionFilter , typeFilter and tagFilter are mutually exclusive. You can specify at most one of these in a request.
workflowId (string) -- [REQUIRED]The workflowId to pass of match the criteria of this filter.
:type closeStatusFilter: dict
:param closeStatusFilter: If specified, only workflow executions that match this close status are listed. For example, if TERMINATED is specified, then only TERMINATED workflow executions are listed.
Note
closeStatusFilter , executionFilter , typeFilter and tagFilter are mutually exclusive. You can specify at most one of these in a request.
status (string) -- [REQUIRED]Required. The close status that must match the close status of an execution for it to meet the criteria of this filter.
:type typeFilter: dict
:param typeFilter: If specified, only executions of the type specified in the filter are returned.
Note
closeStatusFilter , executionFilter , typeFilter and tagFilter are mutually exclusive. You can specify at most one of these in a request.
name (string) -- [REQUIRED]Required. Name of the workflow type.
version (string) --Version of the workflow type.
:type tagFilter: dict
:param tagFilter: If specified, only executions that have the matching tag are listed.
Note
closeStatusFilter , executionFilter , typeFilter and tagFilter are mutually exclusive. You can specify at most one of these in a request.
tag (string) -- [REQUIRED]Required. Specifies the tag that must be associated with the execution for it to meet the filter criteria.
:type nextPageToken: string
:param nextPageToken: If a NextPageToken was returned by a previous call, there are more results available. To retrieve the next page of results, make the call again using the returned token in nextPageToken . Keep all other arguments unchanged.
The configured maximumPageSize determines how many results can be returned in a single call.
:type maximumPageSize: integer
:param maximumPageSize: The maximum number of results that will be returned per call. nextPageToken can be used to obtain futher pages of results. The default is 1000, which is the maximum allowed page size. You can, however, specify a page size smaller than the maximum.
This is an upper limit only; the actual number of results returned per call may be fewer than the specified maximum.
:type reverseOrder: boolean
:param reverseOrder: When set to true , returns the results in reverse order. By default the results are returned in descending order of the start or the close time of the executions.
:rtype: dict
:return: {
'executionInfos': [
{
'execution': {
'workflowId': 'string',
'runId': 'string'
},
'workflowType': {
'name': 'string',
'version': 'string'
},
'startTimestamp': datetime(2015, 1, 1),
'closeTimestamp': datetime(2015, 1, 1),
'executionStatus': 'OPEN'|'CLOSED',
'closeStatus': 'COMPLETED'|'FAILED'|'CANCELED'|'TERMINATED'|'CONTINUED_AS_NEW'|'TIMED_OUT',
'parent': {
'workflowId': 'string',
'runId': 'string'
},
'tagList': [
'string',
],
'cancelRequested': True|False
},
],
'nextPageToken': 'string'
}
:returns:
domain (string) -- [REQUIRED]
The name of the domain that contains the workflow executions to list.
startTimeFilter (dict) -- If specified, the workflow executions are included in the returned results based on whether their start times are within the range specified by this filter. Also, if this parameter is specified, the returned results are ordered by their start times.
Note
startTimeFilter and closeTimeFilter are mutually exclusive. You must specify one of these in a request but not both.
oldestDate (datetime) -- [REQUIRED]Specifies the oldest start or close date and time to return.
latestDate (datetime) --Specifies the latest start or close date and time to return.
closeTimeFilter (dict) -- If specified, the workflow executions are included in the returned results based on whether their close times are within the range specified by this filter. Also, if this parameter is specified, the returned results are ordered by their close times.
Note
startTimeFilter and closeTimeFilter are mutually exclusive. You must specify one of these in a request but not both.
oldestDate (datetime) -- [REQUIRED]Specifies the oldest start or close date and time to return.
latestDate (datetime) --Specifies the latest start or close date and time to return.
executionFilter (dict) -- If specified, only workflow executions matching the workflow ID specified in the filter are returned.
Note
closeStatusFilter , executionFilter , typeFilter and tagFilter are mutually exclusive. You can specify at most one of these in a request.
workflowId (string) -- [REQUIRED]The workflowId to pass of match the criteria of this filter.
closeStatusFilter (dict) -- If specified, only workflow executions that match this close status are listed. For example, if TERMINATED is specified, then only TERMINATED workflow executions are listed.
Note
closeStatusFilter , executionFilter , typeFilter and tagFilter are mutually exclusive. You can specify at most one of these in a request.
status (string) -- [REQUIRED]Required. The close status that must match the close status of an execution for it to meet the criteria of this filter.
typeFilter (dict) -- If specified, only executions of the type specified in the filter are returned.
Note
closeStatusFilter , executionFilter , typeFilter and tagFilter are mutually exclusive. You can specify at most one of these in a request.
name (string) -- [REQUIRED]Required. Name of the workflow type.
version (string) --Version of the workflow type.
tagFilter (dict) -- If specified, only executions that have the matching tag are listed.
Note
closeStatusFilter , executionFilter , typeFilter and tagFilter are mutually exclusive. You can specify at most one of these in a request.
tag (string) -- [REQUIRED]Required. Specifies the tag that must be associated with the execution for it to meet the filter criteria.
nextPageToken (string) -- If a NextPageToken was returned by a previous call, there are more results available. To retrieve the next page of results, make the call again using the returned token in nextPageToken . Keep all other arguments unchanged.
The configured maximumPageSize determines how many results can be returned in a single call.
maximumPageSize (integer) -- The maximum number of results that will be returned per call. nextPageToken can be used to obtain futher pages of results. The default is 1000, which is the maximum allowed page size. You can, however, specify a page size smaller than the maximum.
This is an upper limit only; the actual number of results returned per call may be fewer than the specified maximum.
reverseOrder (boolean) -- When set to true , returns the results in reverse order. By default the results are returned in descending order of the start or the close time of the executions.
"""
pass | 0.010078 |
def key_bytes(self):
"""Returns the raw signing key.
:rtype: bytes
"""
return self.key.private_bytes(
encoding=serialization.Encoding.DER,
format=serialization.PrivateFormat.PKCS8,
encryption_algorithm=serialization.NoEncryption(),
) | 0.006452 |
def addr_tuple(s):
"""converts a string into a tuple of (host, port)"""
if "[" in s:
# ip v6
if (s.count("[") != 1) or (s.count("]") != 1):
raise ValueError("Invalid IPv6 address!")
end = s.index("]")
if ":" not in s[end:]:
raise ValueError("IPv6 address does specify a port to use!")
host, port = s[1:end], s[end+1:]
port = int(port)
return (host, port)
else:
host, port = s.split(":")
port = int(port)
return (host, port) | 0.001855 |
def pnorm(stat, stat0):
""" [P(X>pi, mu, sigma) for pi in pvalues] for normal distributed stat with
expectation value mu and std deviation sigma """
mu, sigma = mean_and_std_dev(stat0)
stat = to_one_dim_array(stat, np.float64)
args = (stat - mu) / sigma
return 1-(0.5 * (1.0 + scipy.special.erf(args / np.sqrt(2.0)))) | 0.002915 |
def _pythonized_comments(tokens):
"""
Similar to tokens but converts strings after a colon (:) to comments.
"""
is_after_colon = True
for token_type, token_text in tokens:
if is_after_colon and (token_type in pygments.token.String):
token_type = pygments.token.Comment
elif token_text == ':':
is_after_colon = True
elif token_type not in pygments.token.Comment:
is_whitespace = len(token_text.rstrip(' \f\n\r\t')) == 0
if not is_whitespace:
is_after_colon = False
yield token_type, token_text | 0.001645 |
def romanise(number):
"""Return the roman numeral for a number.
Note that this only works for number in interval range [0, 12] since at
the moment we only use it on realtime earthquake to conver MMI value.
:param number: The number that will be romanised
:type number: float
:return Roman numeral equivalent of the value
:rtype: str
"""
if number is None:
return ''
roman_list = ['0', 'I', 'II', 'III', 'IV', 'V', 'VI', 'VII', 'VIII',
'IX', 'X', 'XI', 'XII']
try:
roman = roman_list[int(number)]
except ValueError:
return None
return roman | 0.00157 |
def get_params(self):
"""
Provides access to the model's parameters.
Works arounds the non-availability of graph collections in
eager mode.
:return: A list of all Variables defining the model parameters.
"""
assert tf.executing_eagerly()
out = []
# Collecting params from each layer.
for layer_name in self.layers:
out += self.get_layer_params(layer_name)
return out | 0.004619 |
def reindex(self, new_index=None, index_conf=None):
'''Rebuilt the current index
This function could be useful in the case you want to change some index settings/mappings
and you don't want to loose all the entries belonging to that index.
This function is built in such a way that you can continue to use the old index name,
this is achieved using index aliases.
The old index will be cloned into a new one with the given `index_conf`.
If we are working on an alias, it is redirected to the new index.
Otherwise a brand new alias with the old index name is created in such a way that
points to the newly create index.
Keep in mind that even if you can continue to use the same index name,
the old index will be deleted.
:param index_conf: Configuration to be used in the new index creation.
This param will be passed directly to :py:func:`DB.create_index`
'''
alias = self.index_name if self.es.indices.exists_alias(name=self.index_name) else None
if alias:
original_index=self.es.indices.get_alias(self.index_name).popitem()[0]
else:
original_index=self.index_name
if new_index is None:
mtc = re.match(r"^.*_v(\d)*$", original_index)
if mtc:
new_index = original_index[:mtc.start(1)] + str(int(mtc.group(1)) + 1)
else:
new_index = original_index + '_v1'
log.debug("Reindexing {{ alias: '{}', original_index: '{}', new_index: '{}'}}".format(alias, original_index, new_index))
self.clone_index(new_index, index_conf=index_conf)
if alias:
log.debug("Moving alias from ['{0}' -> '{1}'] to ['{0}' -> '{2}']".format(alias, original_index, new_index))
self.es.indices.update_aliases(body={
"actions" : [
{ "remove" : { "alias": alias, "index" : original_index} },
{ "add" : { "alias": alias, "index" : new_index } }
]})
log.debug("Deleting old index: '{}'".format(original_index))
self.es.indices.delete(original_index)
if not alias:
log.debug("Crating new alias: ['{0}' -> '{1}']".format(original_index, new_index))
self.es.indices.update_aliases(body={
"actions" : [
{ "add" : { "alias": original_index, "index" : new_index } }
]}) | 0.01451 |
def get_file_samples(file_ids):
"""Get TCGA associated sample barcodes for a list of file IDs.
Params
------
file_ids : Iterable
The file IDs.
Returns
-------
`pandas.Series`
Series containing file IDs as index and corresponding sample barcodes.
"""
assert isinstance(file_ids, Iterable)
# query TCGA API to get sample barcodes associated with file IDs
payload = {
"filters":json.dumps({
"op":"in",
"content":{
"field":"files.file_id",
"value": list(file_ids),
}
}),
"fields":"file_id,cases.samples.submitter_id",
"size":10000
}
r = requests.post('https://gdc-api.nci.nih.gov/files', data=payload)
j = json.loads(r.content.decode('utf-8'))
file_samples = OrderedDict()
for hit in j['data']['hits']:
file_id = hit['file_id']
assert len(hit['cases']) == 1
case = hit['cases'][0]
assert len(case['samples']) == 1
sample = case['samples'][0]
sample_barcode = sample['submitter_id']
file_samples[file_id] = sample_barcode
df = pd.DataFrame.from_dict(file_samples, orient='index')
df = df.reset_index()
df.columns = ['file_id', 'sample_barcode']
return df | 0.007587 |
def _set_src_vtep_ip(self, v, load=False):
"""
Setter method for src_vtep_ip, mapped from YANG variable /overlay/access_list/type/vxlan/standard/seq/src_vtep_ip (inet:ipv4-address)
If this variable is read-only (config: false) in the
source YANG file, then _set_src_vtep_ip is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_src_vtep_ip() directly.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=RestrictedClassType(base_type=unicode, restriction_dict={'pattern': u'(([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\\.){3}([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])(%[\\p{N}\\p{L}]+)?'}), is_leaf=True, yang_name="src-vtep-ip", rest_name="src-vtep-ip-host", parent=self, choice=(u'choice-src-vtep-ip', u'case-src-vtep-ip'), path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'src vtep ip address: A.B.C.D', u'alt-name': u'src-vtep-ip-host', u'cli-incomplete-command': None, u'cli-suppress-no': None}}, namespace='urn:brocade.com:mgmt:brocade-vxlan-visibility', defining_module='brocade-vxlan-visibility', yang_type='inet:ipv4-address', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """src_vtep_ip must be of a type compatible with inet:ipv4-address""",
'defined-type': "inet:ipv4-address",
'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=unicode, restriction_dict={'pattern': u'(([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\\.){3}([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])(%[\\p{N}\\p{L}]+)?'}), is_leaf=True, yang_name="src-vtep-ip", rest_name="src-vtep-ip-host", parent=self, choice=(u'choice-src-vtep-ip', u'case-src-vtep-ip'), path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'src vtep ip address: A.B.C.D', u'alt-name': u'src-vtep-ip-host', u'cli-incomplete-command': None, u'cli-suppress-no': None}}, namespace='urn:brocade.com:mgmt:brocade-vxlan-visibility', defining_module='brocade-vxlan-visibility', yang_type='inet:ipv4-address', is_config=True)""",
})
self.__src_vtep_ip = t
if hasattr(self, '_set'):
self._set() | 0.004255 |
def _from_matrix(cls, matrix):
"""Initialise from matrix representation
Create a Quaternion by specifying the 3x3 rotation or 4x4 transformation matrix
(as a numpy array) from which the quaternion's rotation should be created.
"""
try:
shape = matrix.shape
except AttributeError:
raise TypeError("Invalid matrix type: Input must be a 3x3 or 4x4 numpy array or matrix")
if shape == (3, 3):
R = matrix
elif shape == (4,4):
R = matrix[:-1][:,:-1] # Upper left 3x3 sub-matrix
else:
raise ValueError("Invalid matrix shape: Input must be a 3x3 or 4x4 numpy array or matrix")
# Check matrix properties
if not np.allclose(np.dot(R, R.conj().transpose()), np.eye(3)):
raise ValueError("Matrix must be orthogonal, i.e. its transpose should be its inverse")
if not np.isclose(np.linalg.det(R), 1.0):
raise ValueError("Matrix must be special orthogonal i.e. its determinant must be +1.0")
def decomposition_method(matrix):
""" Method supposedly able to deal with non-orthogonal matrices - NON-FUNCTIONAL!
Based on this method: http://arc.aiaa.org/doi/abs/10.2514/2.4654
"""
x, y, z = 0, 1, 2 # indices
K = np.array([
[R[x, x]-R[y, y]-R[z, z], R[y, x]+R[x, y], R[z, x]+R[x, z], R[y, z]-R[z, y]],
[R[y, x]+R[x, y], R[y, y]-R[x, x]-R[z, z], R[z, y]+R[y, z], R[z, x]-R[x, z]],
[R[z, x]+R[x, z], R[z, y]+R[y, z], R[z, z]-R[x, x]-R[y, y], R[x, y]-R[y, x]],
[R[y, z]-R[z, y], R[z, x]-R[x, z], R[x, y]-R[y, x], R[x, x]+R[y, y]+R[z, z]]
])
K = K / 3.0
e_vals, e_vecs = np.linalg.eig(K)
print('Eigenvalues:', e_vals)
print('Eigenvectors:', e_vecs)
max_index = np.argmax(e_vals)
principal_component = e_vecs[max_index]
return principal_component
def trace_method(matrix):
"""
This code uses a modification of the algorithm described in:
https://d3cw3dd2w32x2b.cloudfront.net/wp-content/uploads/2015/01/matrix-to-quat.pdf
which is itself based on the method described here:
http://www.euclideanspace.com/maths/geometry/rotations/conversions/matrixToQuaternion/
Altered to work with the column vector convention instead of row vectors
"""
m = matrix.conj().transpose() # This method assumes row-vector and postmultiplication of that vector
if m[2, 2] < 0:
if m[0, 0] > m[1, 1]:
t = 1 + m[0, 0] - m[1, 1] - m[2, 2]
q = [m[1, 2]-m[2, 1], t, m[0, 1]+m[1, 0], m[2, 0]+m[0, 2]]
else:
t = 1 - m[0, 0] + m[1, 1] - m[2, 2]
q = [m[2, 0]-m[0, 2], m[0, 1]+m[1, 0], t, m[1, 2]+m[2, 1]]
else:
if m[0, 0] < -m[1, 1]:
t = 1 - m[0, 0] - m[1, 1] + m[2, 2]
q = [m[0, 1]-m[1, 0], m[2, 0]+m[0, 2], m[1, 2]+m[2, 1], t]
else:
t = 1 + m[0, 0] + m[1, 1] + m[2, 2]
q = [t, m[1, 2]-m[2, 1], m[2, 0]-m[0, 2], m[0, 1]-m[1, 0]]
q = np.array(q)
q *= 0.5 / sqrt(t);
return q
return cls(array=trace_method(R)) | 0.007003 |
def add_barplot(self):
""" Generate the Samblaster bar plot. """
cats = OrderedDict()
cats['n_nondups'] = {'name': 'Non-duplicates'}
cats['n_dups'] = {'name': 'Duplicates'}
pconfig = {
'id': 'samblaster_duplicates',
'title': 'Samblaster: Number of duplicate reads',
'ylab': 'Number of reads'
}
self.add_section( plot = bargraph.plot(self.samblaster_data, cats, pconfig) ) | 0.015086 |
def day(t, now=None, format='%B %d'):
'''
Date delta compared to ``t``. You can override ``now`` to specify what date
to compare to.
You can override the date format by supplying a ``format`` parameter.
:param t: timestamp, :class:`datetime.date` or :class:`datetime.datetime`
object
:param now: default ``None``, optionally a :class:`datetime.datetime`
object
:param format: default ``'%B %d'``
>>> import time
>>> print(day(time.time()))
today
>>> print(day(time.time() - 86400))
yesterday
>>> print(day(time.time() - 604800))
last week
>>> print(day(time.time() + 86400))
tomorrow
>>> print(day(time.time() + 604800))
next week
'''
t1 = _to_date(t)
t2 = _to_date(now or datetime.datetime.now())
diff = t1 - t2
secs = _total_seconds(diff)
days = abs(diff.days)
if days == 0:
return _('today')
elif days == 1:
if secs < 0:
return _('yesterday')
else:
return _('tomorrow')
elif days == 7:
if secs < 0:
return _('last week')
else:
return _('next week')
else:
return t1.strftime(format) | 0.000816 |
def init_logging(debug=False, logfile=None):
"""Initialize logging."""
loglevel = logging.DEBUG if debug else logging.INFO
logformat = '%(asctime)s %(name)s: %(levelname)s: %(message)s'
formatter = logging.Formatter(logformat)
stderr = logging.StreamHandler()
stderr.setFormatter(formatter)
root = logging.getLogger()
root.setLevel(loglevel)
root.handlers = [stderr]
if logfile:
fhandler = logging.FileHandler(logfile)
fhandler.setFormatter(formatter)
root.addHandler(fhandler) | 0.001842 |
def get_spatial_bounds(gtfs, as_dict=False):
"""
Parameters
----------
gtfs
Returns
-------
min_lon: float
max_lon: float
min_lat: float
max_lat: float
"""
stats = get_stats(gtfs)
lon_min = stats['lon_min']
lon_max = stats['lon_max']
lat_min = stats['lat_min']
lat_max = stats['lat_max']
if as_dict:
return {'lon_min': lon_min, 'lon_max': lon_max, 'lat_min': lat_min, 'lat_max': lat_max}
else:
return lon_min, lon_max, lat_min, lat_max | 0.003817 |
def find_autosummary_in_files(filenames):
"""Find out what items are documented in source/*.rst.
See `find_autosummary_in_lines`.
"""
documented = []
for filename in filenames:
f = open(filename, 'r')
lines = f.read().splitlines()
documented.extend(find_autosummary_in_lines(lines, filename=filename))
f.close()
return documented | 0.002591 |
def _init_uninit_vars(self):
""" Initialize all other trainable variables, i.e. those which are uninitialized """
uninit_vars = self.sess.run(tf.report_uninitialized_variables())
vars_list = list()
for v in uninit_vars:
var = v.decode("utf-8")
vars_list.append(var)
uninit_vars_tf = [v for v in tf.global_variables() if v.name.split(':')[0] in vars_list]
self.sess.run(tf.variables_initializer(var_list=uninit_vars_tf)) | 0.008147 |
def is_effective_member(self, group_id, netid):
"""
Returns True if the netid is in the group, False otherwise.
"""
self._valid_group_id(group_id)
# GWS doesn't accept EPPNs on effective member checks, for UW users
netid = re.sub('@washington.edu', '', netid)
url = "{}/group/{}/effective_member/{}".format(self.API,
group_id,
netid)
try:
data = self._get_resource(url)
return True # 200
except DataFailureException as ex:
if ex.status == 404:
return False
else:
raise | 0.002725 |
def determine_interactive(self):
"""Determine whether we're in an interactive shell.
Sets interactivity off if appropriate.
cf http://stackoverflow.com/questions/24861351/how-to-detect-if-python-script-is-being-run-as-a-background-process
"""
try:
if not sys.stdout.isatty() or os.getpgrp() != os.tcgetpgrp(sys.stdout.fileno()):
self.interactive = 0
return False
except Exception:
self.interactive = 0
return False
if self.interactive == 0:
return False
return True | 0.035928 |
def get_objectives_by_search(self, objective_query, objective_search):
"""Pass through to provider ObjectiveSearchSession.get_objectives_by_search"""
# Implemented from azosid template for -
# osid.resource.ResourceSearchSession.get_resources_by_search_template
if not self._can('search'):
raise PermissionDenied()
return self._provider_session.get_objectives_by_search(objective_query, objective_search) | 0.008772 |
def getMaxISOPacketSize(self, endpoint):
"""
Get the maximum size for a single isochronous packet for given
endpoint.
Warning: this function will not always give you the expected result.
See https://libusb.org/ticket/77 . You should instead consult the
endpoint descriptor of current configuration and alternate setting.
"""
result = libusb1.libusb_get_max_iso_packet_size(self.device_p, endpoint)
mayRaiseUSBError(result)
return result | 0.005803 |
def get_inc(self):
"""
Get include directories of Visual C++.
"""
dirs = []
for part in ['', 'atlmfc']:
include = os.path.join(self.vc_dir, part, 'include')
if os.path.isdir(include):
logging.info(_('using include: %s'), include)
dirs.append(include)
else:
logging.debug(_('include not found: %s'), include)
return dirs | 0.004435 |
def determine_config_changes(self):
""" The magic: Determine what has changed since the last time.
Caller should pass the returned config to register_config_changes to persist.
"""
# 'update' here is synonymous with 'add or update'
instances = set()
new_configs = {}
meta_changes = { 'changed_instances' : set(),
'remove_instances' : [],
'remove_configs' : self.get_remove_configs() }
for config_file, stored_config in self.get_registered_configs().items():
new_config = stored_config
try:
ini_config = ConfigManager.get_ini_config(config_file, defaults=stored_config.defaults)
except (OSError, IOError) as exc:
log.warning('Unable to read %s (hint: use `rename` or `remove` to fix): %s', config_file, exc)
new_configs[config_file] = stored_config
instances.add(stored_config['instance_name'])
continue
if ini_config['instance_name'] is not None:
# instance name is explicitly set in the config
instance_name = ini_config['instance_name']
if ini_config['instance_name'] != stored_config['instance_name']:
# instance name has changed
# (removal of old instance will happen later if no other config references it)
new_config['update_instance_name'] = instance_name
meta_changes['changed_instances'].add(instance_name)
else:
# instance name is dynamically generated
instance_name = stored_config['instance_name']
if ini_config['attribs'] != stored_config['attribs']:
# Ensure that dynamically generated virtualenv is not lost
if ini_config['attribs']['virtualenv'] is None:
ini_config['attribs']['virtualenv'] = stored_config['attribs']['virtualenv']
# Recheck to see if dynamic virtualenv was the only change.
if ini_config['attribs'] != stored_config['attribs']:
self.create_virtualenv(ini_config['attribs']['virtualenv'])
new_config['update_attribs'] = ini_config['attribs']
meta_changes['changed_instances'].add(instance_name)
# make sure this instance isn't removed
instances.add(instance_name)
services = []
for service in ini_config['services']:
if service not in stored_config['services']:
# instance has a new service
if 'update_services' not in new_config:
new_config['update_services'] = []
new_config['update_services'].append(service)
meta_changes['changed_instances'].add(instance_name)
# make sure this service isn't removed
services.append(service)
for service in stored_config['services']:
if service not in services:
if 'remove_services' not in new_config:
new_config['remove_services'] = []
new_config['remove_services'].append(service)
meta_changes['changed_instances'].add(instance_name)
new_configs[config_file] = new_config
# once finished processing all configs, find any instances which have been deleted
for instance_name in self.get_registered_instances(include_removed=True):
if instance_name not in instances:
meta_changes['remove_instances'].append(instance_name)
return new_configs, meta_changes | 0.004249 |
def _get_triplet_value_list(self, graph, identity, rdf_type):
"""
Get a list of values from RDF triples when more than one may be present
"""
values = []
for elem in graph.objects(identity, rdf_type):
values.append(elem.toPython())
return values | 0.006557 |
def exec_command(self):
"""Glean the command to run and exec.
On problems, sys.exit.
This method should *never* return.
"""
if not self.original_command_string:
raise SSHEnvironmentError('no SSH command found; '
'interactive shell disallowed.')
command_info = {'from': self.get_client_ip(),
'keyname': self.keyname,
'ssh_original_comand': self.original_command_string,
'time': time.time()}
os.environ['AUTHPROGS_KEYNAME'] = self.keyname
retcode = 126
try:
match = self.find_match()
command_info['command'] = match.get('command')
self.logdebug('find_match returned "%s"\n' % match)
command = match['command']
retcode = subprocess.call(command)
command_info['code'] = retcode
self.log('result: %s\n' % command_info)
sys.exit(retcode)
except (CommandRejected, OSError) as err:
command_info['exception'] = '%s' % err
self.log('result: %s\n' % command_info)
sys.exit(retcode) | 0.001654 |
def getDigitalChannelData(self,ChNumber):
"""
Returns an array of numbers (0 or 1) containing the values of the
digital channel status.
ChNumber: digital channel number.
"""
if not self.DatFileContent:
print "No data file content. Use the method ReadDataFile first"
return 0
if (ChNumber > self.D):
print "Digital channel number greater than the total number of channels."
return 0
# Fomating string for struct module:
str_struct = "ii%dh%dH" %(self.A, int(numpy.ceil((float(self.D)/float(16)))))
# Number of bytes per sample:
NB = 4 + 4 + self.A*2 + int(numpy.ceil((float(self.D)/float(16))))*2
# Number of samples:
N = self.getNumberOfSamples()
# Empty column vector:
values = numpy.empty((N,1))
# Number of the 16 word where digital channal is. Every word contains
# 16 digital channels:
byte_number = int(numpy.ceil((ChNumber-1)/16)+1)
# Value of the digital channel. Ex. channal 1 has value 2^0=1, channel
# 2 has value 2^1 = 2, channel 3 => 2^2=4 and so on.
digital_ch_value = (1<<(ChNumber-1-(byte_number-1)*16))
# Reading the values from DatFileContent string:
for i in range(N):
data = struct.unpack(str_struct,self.DatFileContent[i*NB:(i*NB)+NB])
# The first two number ar the sample index and timestamp.
# And logic to extract only one channel from the 16 bit.
# Normalize the output to 0 or 1
values[i] = (digital_ch_value & data[self.A+1+byte_number]) * 1/digital_ch_value
# Return the array.
return values | 0.010668 |
def unstack(self):
"""
Unstack array and return a new BoltArraySpark via flatMap().
"""
from bolt.spark.array import BoltArraySpark
if self._rekeyed:
rdd = self._rdd
else:
rdd = self._rdd.flatMap(lambda kv: zip(kv[0], list(kv[1])))
return BoltArraySpark(rdd, shape=self.shape, split=self.split) | 0.005319 |
def _create_attach_record(self, id, timed):
"""
Create a new pivot attachement record.
"""
record = super(MorphToMany, self)._create_attach_record(id, timed)
record[self._morph_type] = self._morph_class
return record | 0.007519 |
def simplex_identify_cycle(self, t, k, l):
'''
API:
identify_cycle(self, t, k, l)
Description:
Identifies and returns to the pivot cycle, which is a list of
nodes.
Pre:
(1) t is spanning tree solution, (k,l) is the entering arc.
Input:
t: current spanning tree solution
k: tail of the entering arc
l: head of the entering arc
Returns:
List of nodes in the cycle.
'''
i = k
j = l
cycle = []
li = [k]
lj = [j]
while i is not j:
depth_i = t.get_node(i).get_attr('depth')
depth_j = t.get_node(j).get_attr('depth')
if depth_i > depth_j:
i = t.get_node(i).get_attr('pred')
li.append(i)
elif depth_i < depth_j:
j = t.get_node(j).get_attr('pred')
lj.append(j)
else:
i = t.get_node(i).get_attr('pred')
li.append(i)
j = t.get_node(j).get_attr('pred')
lj.append(j)
cycle.extend(lj)
li.pop()
li.reverse()
cycle.extend(li)
# l is beginning k is end
return cycle | 0.002336 |
def python_script_exists(package=None, module=None):
"""
Return absolute path if Python script exists (otherwise, return None)
package=None -> module is in sys.path (standard library modules)
"""
assert module is not None
try:
if package is None:
path = imp.find_module(module)[1]
else:
path = osp.join(imp.find_module(package)[1], module)+'.py'
except ImportError:
return
if not osp.isfile(path):
path += 'w'
if osp.isfile(path):
return path | 0.001792 |
def get_files_in_commit(git_folder, commit_id="HEAD"):
"""List of files in HEAD commit.
"""
repo = Repo(str(git_folder))
output = repo.git.diff("--name-only", commit_id+"^", commit_id)
return output.splitlines() | 0.004329 |
def check_permission(cls, role, permission):
"""
Check if role contains permission
"""
result = permission in settings.ARCTIC_ROLES[role]
# will try to call a method with the same name as the permission
# to enable an object level permission check.
if result:
try:
return getattr(cls, permission)(role)
except AttributeError:
pass
return result | 0.00431 |
def GetSOAPHeaders(self, create_method):
"""Returns the SOAP headers required for request authorization.
Args:
create_method: The SOAP library specific method used to instantiate SOAP
objects.
Returns:
A SOAP object containing the headers.
"""
header = create_method(self._SOAP_HEADER_CLASS)
header.networkCode = self._ad_manager_client.network_code
header.applicationName = ''.join([
self._ad_manager_client.application_name,
googleads.common.GenerateLibSig(self._PRODUCT_SIG)])
return header | 0.001783 |
def expire_in(self, value):
"""
Computes :attr:`.expiration_time` when the value is set.
"""
# pylint:disable=attribute-defined-outside-init
if value:
self._expiration_time = int(time.time()) + int(value)
self._expire_in = value | 0.006826 |
def repeater(self, req, tag):
"""
Render some UI for repeating our form.
"""
repeater = inevow.IQ(self.docFactory).onePattern('repeater')
return repeater.fillSlots(
'object-description', self.parameter.modelObjectDescription) | 0.00722 |
def request_param_update(self, complete_name):
"""
Request an update of the value for the supplied parameter.
"""
self.param_updater.request_param_update(
self.toc.get_element_id(complete_name)) | 0.008403 |
def plot_file_distances(dist_matrix):
"""
Plots dist_matrix
Parameters
----------
dist_matrix: np.ndarray
"""
import matplotlib.pyplot as plt
fig = plt.figure()
ax = fig.add_subplot(111)
ax.matshow(dist_matrix, interpolation='nearest',
cmap=plt.cm.get_cmap('PuBu')) | 0.005464 |
def rm(name):
"""
Remove an existing project and its container.
"""
path = get_existing_project_path(name)
click.confirm(
'Are you sure you want to delete project %s?' % name, abort=True)
container_name = get_container_name(name)
client = docker.Client()
try:
client.inspect_container(container_name)
except docker.errors.NotFound:
pass
else:
print("Removing container %s..." % container_name)
client.remove_container(container_name, v=True, force=True)
print("Removing %s..." % path)
shutil.rmtree(path) | 0.001681 |
def _is_module_path(self, path):
"""Returns true if the passed in path is a test module path
:param path: string, the path to check, will need to start or end with the
module test prefixes or postfixes to be considered valid
:returns: boolean, True if a test module path, False otherwise
"""
ret = False
basename = os.path.basename(path)
fileroot = os.path.splitext(basename)[0]
for pf in self.module_postfixes:
if fileroot.endswith(pf):
ret = True
break
if not ret:
for pf in self.module_prefixes:
if fileroot.startswith(pf):
ret = True
break
return ret | 0.003947 |
def _GetNormalizedTimestamp(self):
"""Retrieves the normalized timestamp.
Returns:
decimal.Decimal: normalized timestamp, which contains the number of
seconds since January 1, 1970 00:00:00 and a fraction of second used
for increased precision, or None if the normalized timestamp cannot be
determined.
"""
if self._normalized_timestamp is None:
if (self._timestamp is not None and self._timestamp >= 0 and
self._timestamp <= self._UINT32_MAX):
self._normalized_timestamp = (
decimal.Decimal(self._timestamp) - self._HFS_TO_POSIX_BASE)
return self._normalized_timestamp | 0.006033 |
def _fix_callback_item(self, item):
'Update component identifier'
item.component_id = self._fix_id(item.component_id)
return item | 0.013072 |
def start(self):
"""Schedule the fiber to be started in the next iteration of the
event loop."""
target = getattr(self._target, '__qualname__', self._target.__name__)
self._log.debug('starting fiber {}, target {}', self.name, target)
self._hub.run_callback(self.switch) | 0.006472 |
def _clear_empty_values(args):
'''
Scrap junk data from a dict.
'''
result = {}
for param in args:
if args[param] is not None:
result[param] = args[param]
return result | 0.004717 |
def circumradius(self):
'''
Distance from the circumcenter to all the verticies in
the Triangle, float.
'''
return (self.a * self.b * self.c) / (self.area * 4) | 0.01 |
def se_iban_load_map(filename: str) -> list:
"""
Loads Swedish monetary institution codes in CSV format.
:param filename: CSV file name of the BIC definitions.
Columns: Institution Name, Range Begin-Range End (inclusive), Account digits count
:return: List of (bank name, clearing code begin, clearing code end, account digits)
"""
out = []
name_repl = {
'BNP Paribas Fortis SA/NV, Bankfilial Sverige': 'BNP Paribas Fortis SA/NV',
'Citibank International Plc, Sweden Branch': 'Citibank International Plc',
'Santander Consumer Bank AS (deltar endast i Dataclearingen)': 'Santander Consumer Bank AS',
'Nordax Bank AB (deltar endast i Dataclearingen)': 'Nordax Bank AB',
'Swedbank och fristående Sparbanker, t ex Leksands Sparbank och Roslagsbanken.': 'Swedbank',
'Ålandsbanken Abp (Finland),svensk filial': 'Ålandsbanken Abp',
'SBAB deltar endast i Dataclearingen': 'SBAB',
}
with open(filename) as fp:
for row in csv.reader(fp):
if len(row) == 3:
name, series, acc_digits = row
# pprint([name, series, acc_digits])
# clean up name
name = re.sub(r'\n.*', '', name)
if name in name_repl:
name = name_repl[name]
# clean up series
ml_acc_digits = acc_digits.split('\n')
for i, ser in enumerate(series.split('\n')):
begin, end = None, None
res = re.match(r'^(\d+)-(\d+).*$', ser)
if res:
begin, end = res.group(1), res.group(2)
if begin is None:
res = re.match(r'^(\d{4}).*$', ser)
if res:
begin = res.group(1)
end = begin
if begin and end:
digits = None
try:
digits = int(acc_digits)
except ValueError:
pass
if digits is None:
try:
digits = int(ml_acc_digits[i])
except ValueError:
digits = '?'
except IndexError:
digits = '?'
out.append([name, begin, end, digits])
# print('OK!')
return out | 0.002728 |
def days_to_hmsm(days):
"""
Convert fractional days to hours, minutes, seconds, and microseconds.
Precision beyond microseconds is rounded to the nearest microsecond.
Parameters
----------
days : float
A fractional number of days. Must be less than 1.
Returns
-------
hour : int
Hour number.
min : int
Minute number.
sec : int
Second number.
micro : int
Microsecond number.
Raises
------
ValueError
If `days` is >= 1.
Examples
--------
>>> days_to_hmsm(0.1)
(2, 24, 0, 0)
"""
hours = days * 24.
hours, hour = math.modf(hours)
mins = hours * 60.
mins, min = math.modf(mins)
secs = mins * 60.
secs, sec = math.modf(secs)
micro = round(secs * 1.e6)
return int(hour), int(min), int(sec), int(micro) | 0.001155 |
def wait(self):
'''This waits until the child exits. This is a blocking call. This will
not read any data from the child, so this will block forever if the
child has unread output and has terminated. In other words, the child
may have printed output then called exit(), but, the child is
technically still alive until its output is read by the parent.
This method is non-blocking if :meth:`wait` has already been called
previously or :meth:`isalive` method returns False. It simply returns
the previously determined exit status.
'''
ptyproc = self.ptyproc
with _wrap_ptyprocess_err():
# exception may occur if "Is some other process attempting
# "job control with our child pid?"
exitstatus = ptyproc.wait()
self.status = ptyproc.status
self.exitstatus = ptyproc.exitstatus
self.signalstatus = ptyproc.signalstatus
self.terminated = True
return exitstatus | 0.001955 |
def mpl_palette(name, n_colors=6):
"""Return discrete colors from a matplotlib palette.
Note that this handles the qualitative colorbrewer palettes
properly, although if you ask for more colors than a particular
qualitative palette can provide you will fewer than you are
expecting.
Parameters
----------
name : string
name of the palette
n_colors : int
number of colors in the palette
Returns
-------
palette : list of tuples
palette colors in r, g, b format
"""
brewer_qual_pals = {"Accent": 8, "Dark2": 8, "Paired": 12,
"Pastel1": 9, "Pastel2": 8,
"Set1": 9, "Set2": 8, "Set3": 12}
if name.endswith("_d"):
pal = ["#333333"]
pal.extend(color_palette(name.replace("_d", "_r"), 2))
cmap = blend_palette(pal, n_colors, as_cmap=True)
else:
cmap = getattr(mpl.cm, name)
if name in brewer_qual_pals:
bins = np.linspace(0, 1, brewer_qual_pals[name])[:n_colors]
else:
bins = np.linspace(0, 1, n_colors + 2)[1:-1]
palette = list(map(tuple, cmap(bins)[:, :3]))
return palette | 0.000853 |
def wait(self):
"""
Wait until all transferred events have been sent.
"""
if self.error:
raise self.error
if not self.running:
raise ValueError("Unable to send until client has been started.")
try:
self._handler.wait()
except (errors.TokenExpired, errors.AuthenticationException):
log.info("Sender disconnected due to token error. Attempting reconnect.")
self.reconnect()
except (errors.LinkDetach, errors.ConnectionClose) as shutdown:
if shutdown.action.retry and self.auto_reconnect:
log.info("Sender detached. Attempting reconnect.")
self.reconnect()
else:
log.info("Sender detached. Shutting down.")
error = EventHubError(str(shutdown), shutdown)
self.close(exception=error)
raise error
except errors.MessageHandlerError as shutdown:
if self.auto_reconnect:
log.info("Sender detached. Attempting reconnect.")
self.reconnect()
else:
log.info("Sender detached. Shutting down.")
error = EventHubError(str(shutdown), shutdown)
self.close(exception=error)
raise error
except Exception as e:
log.info("Unexpected error occurred (%r).", e)
raise EventHubError("Send failed: {}".format(e)) | 0.002015 |
def show_label(self, text, size = None, color = None, font_desc = None):
"""display text. unless font_desc is provided, will use system's default font"""
font_desc = pango.FontDescription(font_desc or _font_desc)
if color: self.set_color(color)
if size: font_desc.set_absolute_size(size * pango.SCALE)
self.show_layout(text, font_desc) | 0.029333 |
def build(self, client,
nobuild=False,
usecache=True,
pull=False):
"""
Drives the build of the final image - get the list of steps and execute them.
Args:
client (docker.Client): docker client object that will build the image
nobuild (bool): just create dockerfiles, don't actually build the image
usecache (bool): use docker cache, or rebuild everything from scratch?
pull (bool): try to pull new versions of repository images?
"""
if not nobuild:
self.update_source_images(client,
usecache=usecache,
pull=pull)
width = utils.get_console_width()
cprint('\n' + '='*width,
color='white', attrs=['bold'])
line = 'STARTING BUILD for "%s" (image definition "%s" from %s)\n' % (
self.targetname, self.imagename, self.steps[-1].sourcefile)
cprint(_centered(line, width), color='blue', attrs=['bold'])
for istep, step in enumerate(self.steps):
print(colored('* Step','blue'),
colored('%d/%d' % (istep+1, len(self.steps)), 'blue', attrs=['bold']),
colored('for image', color='blue'),
colored(self.imagename, color='blue', attrs=['bold']))
if not nobuild:
if step.bust_cache:
stackkey = self._get_stack_key(istep)
if stackkey in _rebuilt:
step.bust_cache = False
step.build(client, usecache=usecache)
print(colored("* Created intermediate image", 'green'),
colored(step.buildname, 'green', attrs=['bold']),
end='\n\n')
if step.bust_cache:
_rebuilt.add(stackkey)
finalimage = step.buildname
if not nobuild:
self.finalizenames(client, finalimage)
line = 'FINISHED BUILDING "%s" (image definition "%s" from %s)'%(
self.targetname, self.imagename, self.steps[-1].sourcefile)
cprint(_centered(line, width),
color='green', attrs=['bold'])
cprint('=' * width, color='white', attrs=['bold'], end='\n\n') | 0.005106 |
def post_periodic_filtered(values, repeat_after, block):
"""
After every *repeat_after* items, blocks the next *block* items from
*values*. Note that unlike :func:`pre_periodic_filtered`, *repeat_after*
can't be 0. For example, to block every tenth item read from an ADC::
from gpiozero import MCP3008
from gpiozero.tools import post_periodic_filtered
adc = MCP3008(channel=0)
for value in post_periodic_filtered(adc, 9, 1):
print(value)
"""
values = _normalize(values)
if repeat_after < 1:
raise ValueError("repeat_after must be 1 or larger")
if block < 1:
raise ValueError("block must be 1 or larger")
it = iter(values)
try:
while True:
for _ in range(repeat_after):
yield next(it)
for _ in range(block):
next(it)
except StopIteration:
pass | 0.001086 |
def get_snapshots(self, si, logger, vm_uuid):
"""
Restores a virtual machine to a snapshot
:param vim.ServiceInstance si: py_vmomi service instance
:param logger: Logger
:param vm_uuid: uuid of the virtual machine
"""
vm = self.pyvmomi_service.find_by_uuid(si, vm_uuid)
logger.info("Get snapshots")
snapshots = SnapshotRetriever.get_vm_snapshots(vm)
return snapshots.keys() | 0.004405 |
def read_files(*files):
"""Read files into setup"""
text = ""
for single_file in files:
content = read(single_file)
text = text + content + "\n"
return text | 0.005319 |
def pad_if_need(sz_atleast, img, mode='constant'):
# fixme : function or ....
"""
pad img if need to guarantee minumum size
:param sz_atleast: [H,W] at least
:param img: image np.array [H,W, ...]
:param mode: str, padding mode
:return: padded image or asis if enought size
"""
# sz_atleast = np.asarray(sz_atleast)
imsz = img.shape[:2] # assume img [H,W, ...]
padneed = np.asarray((sz_atleast[0] - imsz[0], sz_atleast[1] - imsz[1]))
if np.any(padneed > 0):
# need padding
padding = np.zeros((img.ndim, 2), dtype='int16')
padneed = np.maximum(padneed, 0)
padding[:2, 0] = padneed/2
padding[:2, 1] = padneed - padneed/2
img = np.pad(img, padding, mode=mode)
return img | 0.001302 |
def remove_whitespace(self, html):
"""
Clean whitespace from html
@Params
html - html source to remove whitespace from
@Returns
String html without whitespace
"""
# Does python have a better way to do exactly this?
clean_html = html
for char in ("\r", "\n", "\t"):
clean_html = clean_html.replace(char, "")
return clean_html | 0.004717 |
def login_and_store_credentials_in_session(request, *args, **kwargs):
'''Custom login view. Calls the standard Django authentication,
but on successful login, stores encrypted user credentials in
order to allow accessing the Fedora repository with the
credentials of the currently-logged in user (e.g., when the
application and Fedora share a common authentication system, such
as LDAP).
In order for :class:`~eulcore.django.fedora.server.Repository` to
pick up user credentials, you must pass the request object in (so
it will have access to the session). Example::
from eulcore.django.fedora.server import Repository
def my_view(rqst):
repo = Repository(request=rqst)
Any arguments supported by :meth:`django.contrib.auth.views.login`
can be specified and they will be passed along for the standard
login functionality.
**This is not a terribly secure. Do NOT use this method unless
you need the functionality.**
'''
response = authviews.login(request, *args, **kwargs)
if request.method == "POST" and request.user.is_authenticated():
# on successful login, encrypt and store user's password to use for fedora access
request.session[FEDORA_PASSWORD_SESSION_KEY] = encrypt(request.POST.get('password'))
return response | 0.00223 |
def reboot(name, call=None):
'''
Reboot a vagrant minion.
name
The name of the VM to reboot.
CLI Example:
.. code-block:: bash
salt-cloud -a reboot vm_name
'''
if call != 'action':
raise SaltCloudException(
'The reboot action must be called with -a or --action.'
)
my_info = _get_my_info(name)
profile_name = my_info[name]['profile']
profile = __opts__['profiles'][profile_name]
host = profile['host']
local = salt.client.LocalClient()
return local.cmd(host, 'vagrant.reboot', [name]) | 0.001712 |
def resample(data, s_freq=None, axis='time', ftype='fir', n=None):
"""Downsample the data after applying a filter.
Parameters
----------
data : instance of Data
data to downsample
s_freq : int or float
desired sampling frequency
axis : str
axis you want to apply downsample on (most likely 'time')
ftype : str
filter type to apply. The default here is 'fir', like Matlab but unlike
the default in scipy, because it works better
n : int
The order of the filter (1 less than the length for ‘fir’).
Returns
-------
instance of Data
downsampled data
"""
output = data._copy()
ratio = int(data.s_freq / s_freq)
for i in range(data.number_of('trial')):
output.data[i] = decimate(data.data[i], ratio,
axis=data.index_of(axis),
zero_phase=True)
n_samples = output.data[i].shape[data.index_of(axis)]
output.axis[axis][i] = linspace(data.axis[axis][i][0],
data.axis[axis][i][-1] +
1 / data.s_freq,
n_samples)
output.s_freq = s_freq
return output | 0.000783 |
def xml_filter(self, content):
r"""Filter and preprocess xml content
:param content: xml content
:rtype: str
"""
content = utils.strip_whitespace(content, True) if self.__options['strip'] else content.strip()
if not self.__options['encoding']:
encoding = self.guess_xml_encoding(content) or self.__encoding
self.set_options(encoding=encoding)
if self.__options['encoding'].lower() != self.__encoding:
# 编码转换去除xml头
content = self.strip_xml_header(content.decode(self.__options['encoding'], errors=self.__options['errors']))
if self.__options['unescape']:
content = utils.html_entity_decode(content)
return content | 0.005348 |
def describe(self, language=DEFAULT_LANGUAGE, min_score: int=75) -> dict:
"""
Return a dictionary that describes a given language tag in a specified
natural language.
See `language_name` and related methods for more specific versions of this.
The desired `language` will in fact be matched against the available
options using the matching technique that this module provides. We can
illustrate many aspects of this by asking for a description of Shavian
script (a script devised by author George Bernard Shaw), and where you
might find it, in various languages.
>>> from pprint import pprint
>>> shaw = Language.make(script='Shaw').maximize()
>>> pprint(shaw.describe('en'))
{'language': 'English', 'region': 'United Kingdom', 'script': 'Shavian'}
>>> pprint(shaw.describe('fr'))
{'language': 'anglais', 'region': 'Royaume-Uni', 'script': 'shavien'}
>>> pprint(shaw.describe('es'))
{'language': 'inglés', 'region': 'Reino Unido', 'script': 'shaviano'}
>>> pprint(shaw.describe('pt'))
{'language': 'inglês', 'region': 'Reino Unido', 'script': 'shaviano'}
>>> pprint(shaw.describe('uk'))
{'language': 'англійська', 'region': 'Велика Британія', 'script': 'шоу'}
>>> pprint(shaw.describe('arb'))
{'language': 'الإنجليزية', 'region': 'المملكة المتحدة', 'script': 'الشواني'}
>>> pprint(shaw.describe('th'))
{'language': 'อังกฤษ', 'region': 'สหราชอาณาจักร', 'script': 'ซอเวียน'}
>>> pprint(shaw.describe('zh-Hans'))
{'language': '英语', 'region': '英国', 'script': '萧伯纳式文'}
>>> pprint(shaw.describe('zh-Hant'))
{'language': '英文', 'region': '英國', 'script': '簫柏納字符'}
>>> pprint(shaw.describe('ja'))
{'language': '英語', 'region': 'イギリス', 'script': 'ショー文字'}
When we don't have a localization for the language, we fall back on
'und', which just shows the language codes.
>>> pprint(shaw.describe('lol'))
{'language': 'en', 'region': 'GB', 'script': 'Shaw'}
Wait, is that a real language?
>>> pprint(Language.get('lol').maximize().describe())
{'language': 'Mongo', 'region': 'Congo - Kinshasa', 'script': 'Latin'}
"""
names = {}
if self.language:
names['language'] = self.language_name(language, min_score)
if self.script:
names['script'] = self.script_name(language, min_score)
if self.region:
names['region'] = self.region_name(language, min_score)
if self.variants:
names['variants'] = self.variant_names(language, min_score)
return names | 0.002911 |
def relabel(self, qubits: Qubits) -> 'Density':
"""Return a copy of this state with new qubits"""
return Density(self.vec.tensor, qubits, self._memory) | 0.011976 |
def subselect(self, obj):
"""
Filter a dict of hyperparameter settings to only those keys defined
in this HyperparameterDefaults .
"""
return dict(
(key, value) for (key, value)
in obj.items()
if key in self.defaults) | 0.006803 |
def data_in_db(db_data, user_data):
"""Validate db data in user data.
Args:
db_data (str): The data store in Redis.
user_data (list): The user provided data.
Returns:
bool: True if the data passed validation.
"""
if isinstance(user_data, list):
if db_data in user_data:
return True
return False | 0.004902 |
def _get_fuzzy_tc_matches(text, full_text, options):
'''
Get the options that match the full text, then from each option
return only the individual words which have not yet been matched
which also match the text being tab-completed.
'''
print("text: {}, full: {}, options: {}".format(text, full_text, options))
# get the options which match the full text
matching_options = _get_fuzzy_matches(full_text, options)
# need to only return the individual words which:
# - match the 'text'
# - are not exclusively matched by other input in full_text
# - when matched, still allows all other input in full_text to be matched
# get the input tokens
input_tokens = full_text.split()
# remove one instance of the text to be matched
initial_tokens = input_tokens.remove(text)
# track the final matches:
final_matches = []
# find matches per option
for option in options:
option_tokens = option.split()
# get tokens which match the text
matches = [t for t in option_tokens if text in t]
# get input tokens which match one of the matches
input_tokens_which_match = [t for t in input_tokens for m in matches if t in m]
# if any input token ONLY matches a match, remove that match
for token in input_tokens_which_match:
token_matches = [t for t in option_tokens if token in t]
if len(token_matches) == 1:
match = token_matches[0]
if match in matches:
matches.remove(match)
# for the remaining matches, if the input tokens can be fuzzily matched without
# the match, it's ok to return it.
for match in matches:
# copy option tokens
option_tokens_minus_match = option_tokens[:]
# remove the match
option_tokens_minus_match.remove(match)
option_minus_match = ' '.join(option_tokens_minus_match)
if _get_fuzzy_matches(' '.join(input_tokens), [option_minus_match]):
if match not in final_matches:
final_matches.append(match)
return final_matches | 0.001841 |
def parse_headers_link(headers):
"""Returns the parsed header links of the response, if any."""
header = CaseInsensitiveDict(headers).get('link')
l = {}
if header:
links = parse_link(header)
for link in links:
key = link.get('rel') or link.get('url')
l[key] = link
return l | 0.005917 |
def clean_time_slots(self):
"""Clean up all unused timeslots.
.. warning:: This can and will take time for larger tiers.
When you want to do a lot of operations on a lot of tiers please unset
the flags for cleaning in the functions so that the cleaning is only
performed afterwards.
"""
ts = ((a[0], a[1]) for t in self.tiers.values() for a in t[0].values())
for a in {a for b in ts for a in b} ^ set(self.timeslots):
del(self.timeslots[a]) | 0.003861 |
def handle_connack(self):
"""Handle incoming CONNACK command."""
self.logger.info("CONNACK reveived")
ret, flags = self.in_packet.read_byte()
if ret != NC.ERR_SUCCESS:
self.logger.error("error read byte")
return ret
# useful for v3.1.1 only
session_present = flags & 0x01
ret, retcode = self.in_packet.read_byte()
if ret != NC.ERR_SUCCESS:
return ret
evt = event.EventConnack(retcode, session_present)
self.push_event(evt)
if retcode == NC.CONNECT_ACCEPTED:
self.state = NC.CS_CONNECTED
return NC.ERR_SUCCESS
elif retcode >= 1 and retcode <= 5:
return NC.ERR_CONN_REFUSED
else:
return NC.ERR_PROTOCOL | 0.007273 |
def read_only(self, value):
"""
Setter for **self.__read_only** attribute.
:param value: Attribute value.
:type value: bool
"""
if value is not None:
assert type(value) is bool, "'{0}' attribute: '{1}' type is not 'bool'!".format("read_only", value)
self.__read_only = value | 0.008721 |
def print_warning(cls):
"""Print a missing progress bar warning if it was not printed.
"""
if not cls.warning:
cls.warning = True
print('Can\'t create progress bar:', str(TQDM_IMPORT_ERROR),
file=sys.stderr) | 0.007326 |
def _dump_cml_molecule(f, molecule):
"""Dump a single molecule to a CML file
Arguments:
| ``f`` -- a file-like object
| ``molecule`` -- a Molecule instance
"""
extra = getattr(molecule, "extra", {})
attr_str = " ".join("%s='%s'" % (key, value) for key, value in extra.items())
f.write(" <molecule id='%s' %s>\n" % (molecule.title, attr_str))
f.write(" <atomArray>\n")
atoms_extra = getattr(molecule, "atoms_extra", {})
for counter, number, coordinate in zip(range(molecule.size), molecule.numbers, molecule.coordinates/angstrom):
atom_extra = atoms_extra.get(counter, {})
attr_str = " ".join("%s='%s'" % (key, value) for key, value in atom_extra.items())
f.write(" <atom id='a%i' elementType='%s' x3='%s' y3='%s' z3='%s' %s />\n" % (
counter, periodic[number].symbol, coordinate[0], coordinate[1],
coordinate[2], attr_str,
))
f.write(" </atomArray>\n")
if molecule.graph is not None:
bonds_extra = getattr(molecule, "bonds_extra", {})
f.write(" <bondArray>\n")
for edge in molecule.graph.edges:
bond_extra = bonds_extra.get(edge, {})
attr_str = " ".join("%s='%s'" % (key, value) for key, value in bond_extra.items())
i1, i2 = edge
f.write(" <bond atomRefs2='a%i a%i' %s />\n" % (i1, i2, attr_str))
f.write(" </bondArray>\n")
f.write(" </molecule>\n") | 0.004772 |
def setShowLanguage(self, state):
"""
Sets the display mode for this widget to the inputed mode.
:param state | <bool>
"""
if state == self._showLanguage:
return
self._showLanguage = state
self.setDirty() | 0.013115 |
def calculate_size(name, listener_flags, local_only):
""" Calculates the request payload size"""
data_size = 0
data_size += calculate_size_str(name)
data_size += INT_SIZE_IN_BYTES
data_size += BOOLEAN_SIZE_IN_BYTES
return data_size | 0.003922 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.