text
stringlengths 78
104k
| score
float64 0
0.18
|
---|---|
def list_cidr_ips(cidr):
'''
Get a list of IP addresses from a CIDR.
CLI example::
salt myminion netaddress.list_cidr_ips 192.168.0.0/20
'''
ips = netaddr.IPNetwork(cidr)
return [six.text_type(ip) for ip in list(ips)] | 0.003984 |
def get_existing_path(path, topmost_path=None):
"""Get the longest parent path in `path` that exists.
If `path` exists, it is returned.
Args:
path (str): Path to test
topmost_path (str): Do not test this path or above
Returns:
str: Existing path, or None if no path was found.
"""
prev_path = None
if topmost_path:
topmost_path = os.path.normpath(topmost_path)
while True:
if os.path.exists(path):
return path
path = os.path.dirname(path)
if path == prev_path:
return None
if topmost_path and os.path.normpath(path) == topmost_path:
return None
prev_path = path | 0.00141 |
def positional_filter(positional_filters, title=''):
'''
a method to construct a conditional filter function to test positional arguments
:param positional_filters: dictionary or list of dictionaries with query criteria
:param title: string with name of function to use instead
:return: callable for filter_function
NOTE: query criteria architecture
each item in the path filters argument must be a dictionary
which is composed of integer-value key names that represent the
index value of the positional segment to test and key values
with the dictionary of conditional operators used to test the
string value in the indexed field of the record.
eg. positional_filters = [ { 0: { 'must_contain': [ '^lab' ] } } ]
this example filter looks at the first segment of each key string
in the collection for a string value which starts with the
characters 'lab'. as a result, it will match both the following:
lab/unittests/1473719695.2165067.json
'laboratory20160912.json'
NOTE: the filter function uses a query filters list structure to represent
the disjunctive normal form of a logical expression. a record is
added to the results list if any query criteria dictionary in the
list evaluates to true. within each query criteria dictionary, all
declared conditional operators must evaluate to true.
in this way, the positional_filters represents a boolean OR operator and
each criteria dictionary inside the list represents a boolean AND
operator between all keys in the dictionary.
each query criteria uses the architecture of query declaration in
the jsonModel.query method
NOTE: function function will lazy load a dictionary input
positional_filters:
[ { 0: { conditional operators }, 1: { conditional_operators }, ... } ]
conditional operators:
"byte_data": false,
"discrete_values": [ "" ],
"excluded_values": [ "" ],
'equal_to': '',
"greater_than": "",
"less_than": "",
"max_length": 0,
"max_value": "",
"min_length": 0,
"min_value": "",
"must_contain": [ "" ],
"must_not_contain": [ "" ],
"contains_either": [ "" ]
'''
# define help text
if not title:
title = 'positional_filter'
filter_arg = '%s(positional_filters=[...])' % title
# construct path_filter model
filter_schema = {
'schema': {
'byte_data': False,
'discrete_values': [ '' ],
'excluded_values': [ '' ],
'equal_to': '',
'greater_than': '',
'less_than': '',
'max_length': 0,
'max_value': '',
'min_length': 0,
'min_value': '',
'must_contain': [ '' ],
'must_not_contain': [ '' ],
'contains_either': [ '' ]
},
'components': {
'.discrete_values': {
'required_field': False
},
'.excluded_values': {
'required_field': False
},
'.must_contain': {
'required_field': False
},
'.must_not_contain': {
'required_field': False
},
'.contains_either': {
'required_field': False
}
}
}
from jsonmodel.validators import jsonModel
filter_model = jsonModel(filter_schema)
# lazy load path dictionary
if isinstance(positional_filters, dict):
positional_filters = [ positional_filters ]
# validate input
if not isinstance(positional_filters, list):
raise TypeError('%s must be a list.' % filter_arg)
for i in range(len(positional_filters)):
if not isinstance(positional_filters[i], dict):
raise TypeError('%s item %s must be a dictionary.' % (filter_arg, i))
for key, value in positional_filters[i].items():
_key_name = '%s : {...}' % key
if not isinstance(key, int):
raise TypeError('%s key name must be an int.' % filter_arg.replace('...', _key_name))
elif not isinstance(value, dict):
raise TypeError('%s key value must be a dictionary' % filter_arg.replace('...', _key_name))
filter_model.validate(value)
# construct segment value model
segment_schema = { 'schema': { 'segment_value': 'string' } }
segment_model = jsonModel(segment_schema)
# construct filter function
def filter_function(*args):
max_index = len(args) - 1
for filter in positional_filters:
criteria_match = True
for key, value in filter.items():
if key > max_index:
criteria_match = False
break
segment_criteria = { '.segment_value': value }
segment_data = { 'segment_value': args[key] }
if not segment_model.query(segment_criteria, segment_data):
criteria_match = False
break
if criteria_match:
return True
return False
return filter_function | 0.006119 |
def calibrate_percentile_ranks(
self,
peptides=None,
num_peptides_per_length=int(1e5),
alleles=None,
bins=None):
"""
Compute the cumulative distribution of ic50 values for a set of alleles
over a large universe of random peptides, to enable computing quantiles in
this distribution later.
Parameters
----------
peptides : sequence of string or EncodableSequences, optional
Peptides to use
num_peptides_per_length : int, optional
If peptides argument is not specified, then num_peptides_per_length
peptides are randomly sampled from a uniform distribution for each
supported length
alleles : sequence of string, optional
Alleles to perform calibration for. If not specified all supported
alleles will be calibrated.
bins : object
Anything that can be passed to numpy.histogram's "bins" argument
can be used here, i.e. either an integer or a sequence giving bin
edges. This is in ic50 space.
Returns
----------
EncodableSequences : peptides used for calibration
"""
if bins is None:
bins = to_ic50(numpy.linspace(1, 0, 1000))
if alleles is None:
alleles = self.supported_alleles
if peptides is None:
peptides = []
lengths = range(
self.supported_peptide_lengths[0],
self.supported_peptide_lengths[1] + 1)
for length in lengths:
peptides.extend(
random_peptides(num_peptides_per_length, length))
encoded_peptides = EncodableSequences.create(peptides)
for (i, allele) in enumerate(alleles):
predictions = self.predict(encoded_peptides, allele=allele)
transform = PercentRankTransform()
transform.fit(predictions, bins=bins)
self.allele_to_percent_rank_transform[allele] = transform
return encoded_peptides | 0.00142 |
def get_report(self, ledger_id, report_id):
"""Get report info
Arguments:
ledger_id:
Id for ledger for report
report_id:
Report id assigned by mCASH
"""
return self.do_req('GET',
self.merchant_api_base_url + '/ledger/' +
ledger_id + '/report/' +
report_id + '/').json() | 0.004556 |
def class_in_progress(stack=None):
"""True if currently inside a class definition, else False."""
if stack is None:
stack = inspect.stack()
for frame in stack:
statement_list = frame[4]
if statement_list is None:
continue
if statement_list[0].strip().startswith('class '):
return True
return False | 0.00271 |
def main(output):
"""
Generate a c7n-org subscriptions config file
"""
client = SubscriptionClient(Session().get_credentials())
subs = [sub.serialize(True) for sub in client.subscriptions.list()]
results = []
for sub in subs:
sub_info = {
'subscription_id': sub['subscriptionId'],
'name': sub['displayName']
}
results.append(sub_info)
print(
yaml.safe_dump(
{'subscriptions': results},
default_flow_style=False),
file=output) | 0.001828 |
def list_healthchecks(self, service_id, version_number):
"""List all of the healthchecks for a particular service and version."""
content = self._fetch("/service/%s/version/%d/healthcheck" % (service_id, version_number))
return map(lambda x: FastlyHealthCheck(self, x), content) | 0.021127 |
def update_time_login(u_name):
'''
Update the login time for user.
'''
entry = TabMember.update(
time_login=tools.timestamp()
).where(
TabMember.user_name == u_name
)
entry.execute() | 0.007634 |
def items(self):
"""D.items() -> a set-like object providing a view on D's items"""
keycol = self._keycol
for row in self.__iter__():
yield (row[keycol], dict(row)) | 0.01 |
def sync_update_price_info(self):
"""Update current price info."""
loop = asyncio.get_event_loop()
task = loop.create_task(self.update_price_info())
loop.run_until_complete(task) | 0.009524 |
def __run_blast_select_loop(input_file, popens, fields):
'''
Run the select(2) loop to handle blast I/O to the given set of Popen
objects.
Yields records back that have been read from blast processes.
'''
def make_nonblocking(f):
fl = fcntl.fcntl(f.fileno(), fcntl.F_GETFL)
fl |= os.O_NONBLOCK
fcntl.fcntl(f.fileno(), fcntl.F_SETFL, fl)
rfds = set()
wfds = set()
fd_map = {}
for p in popens:
make_nonblocking(p.stdout)
rfds.add(p.stdout.fileno())
fd_map[p.stdout.fileno()] = {
'popen': p,
'query_buffer': '',
'result_buffer': ''}
make_nonblocking(p.stdin)
wfds.add(p.stdin.fileno())
fd_map[p.stdin.fileno()] = fd_map[p.stdout.fileno()]
while len(rfds) + len(wfds) > 0:
# XXX: Should we be tracking excepted file descriptors as well?
rl, wl, _ = select.select(rfds, wfds, [])
# For each of our readable blast processes, read response
# records and emit them
for fd in rl:
rs = fd_map[fd]['result_buffer']
rbuf = os.read(fd, select.PIPE_BUF)
# The blast process has finished emitting records. Stop
# attempting to read from or write to it. If we have
# excess data in our result_buffer, c'est la vie.
if rbuf == '':
p = fd_map[fd]['popen']
rfds.remove(p.stdout.fileno())
p.stdout.close()
if not p.stdin.closed:
wfds.remove(p.stdin.fileno())
p.stdin.close()
continue
rs += rbuf
while True:
rec, rs = __read_single_query_result(rs, fields)
if rec is None:
break
yield rec
fd_map[fd]['result_buffer'] = rs
# For each of our writable blast processes, grab a new query
# sequence and send it off to them.
for fd in wl:
qs = fd_map[fd]['query_buffer']
if not qs:
ql = __read_single_fasta_query_lines(input_file)
# No more input records available. Close the pipe to
# signal this to the blast process.
if ql is None:
p = fd_map[fd]['popen']
wfds.remove(p.stdin.fileno())
p.stdin.close()
continue
qs = ''.join(ql)
# XXX: For some reason, despite select(2) indicating that
# this file descriptor is writable, writes can fail
# with EWOULDBLOCK. Handle this gracefully.
try:
written = os.write(fd, qs)
qs = qs[written:]
except OSError, e:
assert e.errno == errno.EWOULDBLOCK
fd_map[fd]['query_buffer'] = qs | 0.000677 |
def print_code_table(self, out=sys.stdout):
"""
Print code table overview
"""
out.write(u'bits code (value) symbol\n')
for symbol, (bitsize, value) in sorted(self._table.items()):
out.write(u'{b:4d} {c:10} ({v:5d}) {s!r}\n'.format(
b=bitsize, v=value, s=symbol, c=bin(value)[2:].rjust(bitsize, '0')
)) | 0.007673 |
def plot1d(self, grid=None, size=64, limits=None, weight=None, figsize=None, f="identity", axes=None, xlabel=None, ylabel=None, **kwargs):
"""Plot the subspace using sane defaults to get a quick look at the data.
:param grid: A 2d numpy array with the counts, if None it will be calculated using limits provided and Subspace.histogram
:param size: Passed to Subspace.histogram
:param limits: Limits for the subspace in the form [[xmin, xmax], [ymin, ymax]], if None it will be calculated using Subspace.limits_sigma
:param figsize: (x, y) tuple passed to pylab.figure for setting the figure size
:param xlabel: String for label on x axis (may contain latex)
:param ylabel: Same for y axis
:param kwargs: extra argument passed to ...,
"""
import pylab
f = _parse_f(f)
limits = self.limits(limits)
assert self.dimension == 1, "can only plot 1d, not %s" % self.dimension
if limits is None:
limits = self.limits_sigma()
if grid is None:
grid = self.histogram(limits=limits, size=size, weight=weight)
if figsize is not None:
pylab.figure(num=None, figsize=figsize, dpi=80, facecolor='w', edgecolor='k')
if axes is None:
axes = pylab.gca()
# if xlabel:
pylab.xlabel(xlabel or self.expressions[0])
# if ylabel:
# pylab.ylabel(ylabel or self.expressions[1])
pylab.ylabel("counts" or ylabel)
# axes.set_aspect(aspect)
N = len(grid)
xmin, xmax = limits[0]
return pylab.plot(np.arange(N) / (N - 1.0) * (xmax - xmin) + xmin, f(grid,), drawstyle="steps", **kwargs) | 0.005263 |
def get_virtual_transactions(blockchain_name, blockchain_opts, first_block_height, last_block_height, tx_filter=None, **hints):
"""
Get the sequence of virtualchain transactions from a particular blockchain over a given range of block heights.
Returns a list of tuples in the format of [(block height, [txs])], where
each tx in [txs] is the parsed transaction. The parsed transaction will conform to... # TODO write a spec for this
Each transaction has at least the following fields:
`version`: the version of the transaction
`txindex`: the index into the block where this tx occurs
`ins`: a list of transaction inputs, where each member is a dict with:
`outpoint`: a dict of {'hash': txid of transaction that fed it in, 'index': the index into the feeder tx's outputs list}
`script`: the signature script for this input
`outs`: a list of transaction outputs, where each member is a dict with:
`value`: the amount of currency units spent (in the fundamental units of the chain)
`script`: the spending script for this input
`senders`: a list of information in 1-to-1 correspondence with each input regarding the transactions that funded it:
`value`: the amount of currency units sent (in fundamental units of the chain)
`script_pubkey`: the spending script for the sending transaction
Returns [(block height, [txs])] on success
Returns None on error.
Raises ValueError on unknown blockchain
"""
if blockchain_name == 'bitcoin':
return get_bitcoin_virtual_transactions(blockchain_opts, first_block_height, last_block_height, tx_filter=tx_filter, **hints)
else:
raise ValueError("Unknown blockchain {}".format(blockchain_name)) | 0.007345 |
def trim_join_unit(join_unit, length):
"""
Reduce join_unit's shape along item axis to length.
Extra items that didn't fit are returned as a separate block.
"""
if 0 not in join_unit.indexers:
extra_indexers = join_unit.indexers
if join_unit.block is None:
extra_block = None
else:
extra_block = join_unit.block.getitem_block(slice(length, None))
join_unit.block = join_unit.block.getitem_block(slice(length))
else:
extra_block = join_unit.block
extra_indexers = copy.copy(join_unit.indexers)
extra_indexers[0] = extra_indexers[0][length:]
join_unit.indexers[0] = join_unit.indexers[0][:length]
extra_shape = (join_unit.shape[0] - length,) + join_unit.shape[1:]
join_unit.shape = (length,) + join_unit.shape[1:]
return JoinUnit(block=extra_block, indexers=extra_indexers,
shape=extra_shape) | 0.001059 |
def future(self,in_days=None,in_hours=None,in_minutes=None,in_seconds=None):
"""
Function to return a future timestep
"""
future = None
# Initialize variables to 0
dd, hh, mm, ss = [0 for i in range(4)]
if (in_days != None):
dd = dd + in_days
if (in_hours != None):
hh = hh + in_hours
if (in_minutes != None):
mm = mm + in_minutes
if (in_seconds != None):
ss = ss + in_seconds
# Set the hours, minutes and seconds from now (minus the days)
dnow = datetime.datetime.utcnow() # Now
d = dnow + \
datetime.timedelta(hours=hh, minutes=mm, seconds = ss)
# Time from midnight
for_total_seconds = d - \
d.replace(hour=0, minute=0, second=0, microsecond=0)
# Convert into minutes since midnight
try:
msm = for_total_seconds.total_seconds()/60.
except:
# For versions before 2.7
msm = self.timedelta_total_seconds(for_total_seconds)/60.
if (dd<len(self.days)):
for timestep in self.days[dd].timesteps:
if timestep.name >= msm:
future = timestep
return future
else:
print('ERROR: requested date is outside the forecast range selected,' + str(len(self.days)))
return False | 0.010526 |
def _execute_command(cmd, at_time=None):
'''
Helper function to execute the command
:param str cmd: the command to run
:param str at_time: If passed, the cmd will be scheduled.
Returns: bool
'''
if at_time:
cmd = 'echo \'{0}\' | at {1}'.format(cmd, _cmd_quote(at_time))
return not bool(__salt__['cmd.retcode'](cmd, python_shell=True)) | 0.002653 |
def GetHostMemKernOvhdMB(self):
'''Undocumented.'''
counter = c_uint()
ret = vmGuestLib.VMGuestLib_GetHostMemKernOvhdMB(self.handle.value, byref(counter))
if ret != VMGUESTLIB_ERROR_SUCCESS: raise VMGuestLibException(ret)
return counter.value | 0.014184 |
def get_params(self):
'''Get the parameters for this object. Returns as a dict.'''
out = {}
out['__class__'] = self.__class__
out['params'] = dict(steps=[])
for name, step in self.steps:
out['params']['steps'].append([name, step.get_params(deep=True)])
return out | 0.006116 |
def _check_params(ignored_variables, ignored_interactions):
"""Helper for sample methods"""
if ignored_variables is None:
ignored_variables = set()
elif not isinstance(ignored_variables, abc.Container):
ignored_variables = set(ignored_variables)
if ignored_interactions is None:
ignored_interactions = set()
elif not isinstance(ignored_interactions, abc.Container):
ignored_interactions = set(ignored_interactions)
return ignored_variables, ignored_interactions | 0.001923 |
def choose_coord_units(header):
"""Return the appropriate key code for the units value for the axes by
examining the FITS header.
"""
cunit = header['CUNIT1']
match = re.match(r'^deg\s*$', cunit)
if match:
return 'degree'
# raise WCSError("Don't understand units '%s'" % (cunit))
return 'degree' | 0.002976 |
def is_in_expr(expr, find):
"""Returns True if `find` is a subtree of `expr`."""
return expr == find or (isinstance(expr, ExprNode) and expr.is_in(find)) | 0.019108 |
def addTab(self, tab):
"""
Adds a new tab to this panel bar.
:param tab | <XViewPanelItem> || <str>
:return <int>
"""
if not isinstance(tab, XViewPanelItem):
tab = XViewPanelItem(tab, self)
tab.setFixedHeight(self.height())
index = len(self.items())
self.layout().insertWidget(index, tab)
self.setCurrentIndex(index)
return tab | 0.004505 |
def reverse(viewname, urlconf=None, args=None, kwargs=None, prefix=None):
"""Wraps Django's reverse to prepend the correct locale."""
prefixer = get_url_prefix()
if prefixer:
prefix = prefix or '/'
url = django_reverse(viewname, urlconf, args, kwargs, prefix)
if prefixer:
url = prefixer.fix(url)
# Ensure any unicode characters in the URL are escaped.
return iri_to_uri(url) | 0.002375 |
def sound_speed(self, value):
"""Sets the sound speed in m/s. Default is 1525.0. If this function is
called, `sound_speed_mode` will be set to fixed."""
if not self.sound_speed_mode:
self.sound_speed_mode = 1
self.pdx.SoundSpeed = float(value) | 0.006969 |
def remove_groups(self, group_list):
"""Remove groups from the model.
Members of each group are not removed
from the model (i.e. metabolites, reactions, and genes in the group
stay in the model after any groups containing them are removed).
Parameters
----------
group_list : list
A list of `cobra.Group` objects to remove from the model.
"""
if isinstance(group_list, string_types) or \
hasattr(group_list, "id"):
warn("need to pass in a list")
group_list = [group_list]
for group in group_list:
# make sure the group is in the model
if group.id not in self.groups:
LOGGER.warning("%r not in %r. Ignored.", group, self)
else:
self.groups.remove(group)
group._model = None | 0.002242 |
def dump(self):
""" dump extracted data into a single hdf5file,
:return: None
:Example:
>>> # dump data into an hdf5 formated file
>>> datafields = ['s', 'Sx', 'Sy', 'enx', 'eny']
>>> datascript = 'sddsprintdata.sh'
>>> datapath = './tests/tracking'
>>> hdf5file = './tests/tracking/test.h5'
>>> A = DataExtracter('test.sig', *datafields)
>>> A.setDataScript(datascript)
>>> A.setDataPath (datapath)
>>> A.setH5file (hdf5file)
>>> A.extractData().dump()
>>>
>>> # read dumped file
>>> fd = h5py.File(hdf5file, 'r')
>>> d_s = fd['s'][:]
>>> d_sx = fd['Sx'][:]
>>>
>>> # plot dumped data
>>> import matplotlib.pyplot as plt
>>> plt.figure(1)
>>> plt.plot(d_s, d_sx, 'r-')
>>> plt.xlabel('$s$')
>>> plt.ylabel('$\sigma_x$')
>>> plt.show()
Just like the following figure shows:
.. image:: ../../images/test_DataExtracter.png
:width: 400px
"""
f = h5py.File(self.h5file, 'w')
for i, k in enumerate(self.kwslist):
v = self.h5data[:, i]
dset = f.create_dataset(k, shape=v.shape, dtype=v.dtype)
dset[...] = v
f.close() | 0.002257 |
def ligas(self, query):
""" Retorna o resultado da busca ao Cartola por um determinado termo de pesquisa.
Args:
query (str): Termo para utilizar na busca.
Returns:
Uma lista de instâncias de cartolafc.Liga, uma para cada liga contento o termo utilizado na busca.
"""
url = '{api_url}/ligas'.format(api_url=self._api_url)
data = self._request(url, params=dict(q=query))
return [Liga.from_dict(liga_info) for liga_info in data] | 0.007874 |
def gpg_profile_put_key( blockchain_id, key_id, key_name=None, immutable=True, txid=None, key_url=None, use_key_server=True, key_server=None, proxy=None, wallet_keys=None, gpghome=None ):
"""
Put a local GPG key into a blockchain ID's global account.
If the URL is not given, the key will be replicated to the default PGP key server and to either immutable (if @immutable) or mutable data.
Return {'status': True, 'key_url': key_url, 'key_id': key fingerprint, ...} on success
Return {'error': ...} on error
"""
if key_name is not None:
assert is_valid_keyname(key_name)
if key_server is None:
key_server = DEFAULT_KEY_SERVER
if gpghome is None:
gpghome = get_default_gpg_home()
put_res = {}
extra_fields = {}
key_data = None
if key_name is not None:
extra_fields = {'keyName': key_name}
if key_url is None:
gpg = gnupg.GPG( homedir=gpghome )
if use_key_server:
# replicate key data to default server first
res = gpg.send_keys( key_server, key_id )
if len(res.data) > 0:
# error
log.error("GPG failed to upload key '%s'" % key_id)
log.error("GPG error:\n%s" % res.stderr)
return {'error': 'Failed to repliate GPG key to default keyserver'}
key_data = gpg.export_keys( [key_id] )
if immutable:
# replicate to immutable storage
immutable_result = client.put_immutable( blockchain_id, key_id, {key_id: key_data}, proxy=proxy, txid=txid, wallet_keys=wallet_keys )
if 'error' in immutable_result:
return {'error': 'Failed to store hash of key %s to the blockchain. Error message: "%s"' % (key_id, immutable_result['error'])}
else:
put_res['transaction_hash'] = immutable_result['transaction_hash']
put_res['zonefile_hash'] = immutable_result['zonefile_hash']
key_url = client.make_immutable_data_url( blockchain_id, key_id, client.get_data_hash(key_data) )
else:
# replicate to mutable storage
mutable_name = key_name
if key_name is None:
mutable_name = key_id
mutable_result = client.put_mutable( blockchain_id, key_id, {mutable_name: key_data}, proxy=proxy, wallet_keys=wallet_keys )
if 'error' in mutable_result:
return {'error': 'Failed to store key %s. Error message: "%s"' % (key_id, mutable_result['error'])}
key_url = client.make_mutable_data_url( blockchain_id, key_id, mutable_result['version'] )
put_account_res = client.put_account( blockchain_id, "pgp", key_id, key_url, proxy=proxy, wallet_keys=wallet_keys, **extra_fields )
if 'error' in put_account_res:
return put_account_res
else:
put_account_res.update( put_res )
put_account_res['key_url'] = key_url
put_account_res['key_id'] = key_id
return put_account_res | 0.012833 |
def get_user_id(self, user_id, mount_point='app-id', wrap_ttl=None):
"""GET /auth/<mount_point>/map/user-id/<user_id>
:param user_id:
:type user_id:
:param mount_point:
:type mount_point:
:param wrap_ttl:
:type wrap_ttl:
:return:
:rtype:
"""
path = '/v1/auth/{0}/map/user-id/{1}'.format(mount_point, user_id)
return self._adapter.get(path, wrap_ttl=wrap_ttl).json() | 0.004329 |
def _construct_message(self):
"""Build the message params."""
self.message["text"] = ""
if self.from_:
self.message["text"] += "From: " + self.from_ + "\n"
if self.subject:
self.message["text"] += "Subject: " + self.subject + "\n"
self.message["text"] += self.body
self._add_attachments() | 0.00554 |
def _set_interface_type(self, v, load=False):
"""
Setter method for interface_type, mapped from YANG variable /mpls_state/rsvp/interfaces/interface_type (dcm-interface-type)
If this variable is read-only (config: false) in the
source YANG file, then _set_interface_type is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_interface_type() directly.
YANG Description: MPLS RSVP interface type
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=RestrictedClassType(base_type=unicode, restriction_type="dict_key", restriction_arg={u'dcm-interface-type-unknown': {'value': 1}, u'dcm-interface-type-loopback': {'value': 7}, u'dcm-interface-type-ve': {'value': 6}, u'dcm-interface-type-ethernet': {'value': 2}, u'dcm-interface-type-fiber-channel': {'value': 8}, u'dcm-interface-type-port-channel': {'value': 5}},), is_leaf=True, yang_name="interface-type", rest_name="interface-type", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='dcm-interface-type', is_config=False)
except (TypeError, ValueError):
raise ValueError({
'error-string': """interface_type must be of a type compatible with dcm-interface-type""",
'defined-type': "brocade-mpls-operational:dcm-interface-type",
'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=unicode, restriction_type="dict_key", restriction_arg={u'dcm-interface-type-unknown': {'value': 1}, u'dcm-interface-type-loopback': {'value': 7}, u'dcm-interface-type-ve': {'value': 6}, u'dcm-interface-type-ethernet': {'value': 2}, u'dcm-interface-type-fiber-channel': {'value': 8}, u'dcm-interface-type-port-channel': {'value': 5}},), is_leaf=True, yang_name="interface-type", rest_name="interface-type", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='dcm-interface-type', is_config=False)""",
})
self.__interface_type = t
if hasattr(self, '_set'):
self._set() | 0.004052 |
def pitch(ax, ay, az):
'''Angle of x-axis relative to ground (theta)
Args
----
ax: ndarray
x-axis acceleration values
ay: ndarray
y-axis acceleration values
az: ndarray
z-axis acceleration values
Returns
-------
pitch: ndarray
Pitch angle in radians
'''
import numpy
# arctan2 not needed here to cover all quadrants, just for consistency
return numpy.arctan(ax, numpy.sqrt(ay**2+az**2)) | 0.002119 |
def logToFile(path, level=logging.INFO):
"""
Create a log handler that logs to the given file.
"""
logger = logging.getLogger()
logger.setLevel(level)
formatter = logging.Formatter(
'%(asctime)s %(name)s %(levelname)s %(message)s')
handler = logging.FileHandler(path)
handler.setFormatter(formatter)
logger.addHandler(handler) | 0.002703 |
def h2o_median_absolute_error(y_actual, y_predicted):
"""
Median absolute error regression loss
:param y_actual: H2OFrame of actual response.
:param y_predicted: H2OFrame of predicted response.
:returns: median absolute error loss (best is 0.0)
"""
ModelBase._check_targets(y_actual, y_predicted)
return (y_predicted - y_actual).abs().median() | 0.00266 |
def ObjectModifiedEventHandler(obj, event):
"""Object has been modified
"""
# only snapshot supported objects
if not supports_snapshots(obj):
return
# take a new snapshot
take_snapshot(obj, action="edit")
# reindex the object in the auditlog catalog
reindex_object(obj) | 0.003205 |
def resize(image, x, y, stretch=False, top=None, left=None, mode='RGB',
resample=None):
"""Return an image resized."""
if x <= 0:
raise ValueError('x must be greater than zero')
if y <= 0:
raise ValueError('y must be greater than zero')
from PIL import Image
resample = Image.ANTIALIAS if resample is None else resample
if not isinstance(resample, numbers.Number):
try:
resample = getattr(Image, resample.upper())
except:
raise ValueError("(1) Didn't understand resample=%s" % resample)
if not isinstance(resample, numbers.Number):
raise ValueError("(2) Didn't understand resample=%s" % resample)
size = x, y
if stretch:
return image.resize(size, resample=resample)
result = Image.new(mode, size)
ratios = [d1 / d2 for d1, d2 in zip(size, image.size)]
if ratios[0] < ratios[1]:
new_size = (size[0], int(image.size[1] * ratios[0]))
else:
new_size = (int(image.size[0] * ratios[1]), size[1])
image = image.resize(new_size, resample=resample)
if left is None:
box_x = int((x - new_size[0]) / 2)
elif left:
box_x = 0
else:
box_x = x - new_size[0]
if top is None:
box_y = int((y - new_size[1]) / 2)
elif top:
box_y = 0
else:
box_y = y - new_size[1]
result.paste(image, box=(box_x, box_y))
return result | 0.001382 |
def bird(zenith, airmass_relative, aod380, aod500, precipitable_water,
ozone=0.3, pressure=101325., dni_extra=1364., asymmetry=0.85,
albedo=0.2):
"""
Bird Simple Clear Sky Broadband Solar Radiation Model
Based on NREL Excel implementation by Daryl R. Myers [1, 2].
Bird and Hulstrom define the zenith as the "angle between a line to
the sun and the local zenith". There is no distinction in the paper
between solar zenith and apparent (or refracted) zenith, but the
relative airmass is defined using the Kasten 1966 expression, which
requires apparent zenith. Although the formulation for calculated
zenith is never explicitly defined in the report, since the purpose
was to compare existing clear sky models with "rigorous radiative
transfer models" (RTM) it is possible that apparent zenith was
obtained as output from the RTM. However, the implentation presented
in PVLIB is tested against the NREL Excel implementation by Daryl
Myers which uses an analytical expression for solar zenith instead
of apparent zenith.
Parameters
----------
zenith : numeric
Solar or apparent zenith angle in degrees - see note above
airmass_relative : numeric
Relative airmass
aod380 : numeric
Aerosol optical depth [cm] measured at 380[nm]
aod500 : numeric
Aerosol optical depth [cm] measured at 500[nm]
precipitable_water : numeric
Precipitable water [cm]
ozone : numeric
Atmospheric ozone [cm], defaults to 0.3[cm]
pressure : numeric
Ambient pressure [Pa], defaults to 101325[Pa]
dni_extra : numeric
Extraterrestrial radiation [W/m^2], defaults to 1364[W/m^2]
asymmetry : numeric
Asymmetry factor, defaults to 0.85
albedo : numeric
Albedo, defaults to 0.2
Returns
-------
clearsky : DataFrame (if Series input) or OrderedDict of arrays
DataFrame/OrderedDict contains the columns/keys
``'dhi', 'dni', 'ghi', 'direct_horizontal'`` in [W/m^2].
See also
--------
pvlib.atmosphere.bird_hulstrom80_aod_bb
pvlib.atmosphere.relativeairmass
References
----------
[1] R. E. Bird and R. L Hulstrom, "A Simplified Clear Sky model for Direct
and Diffuse Insolation on Horizontal Surfaces" SERI Technical Report
SERI/TR-642-761, Feb 1981. Solar Energy Research Institute, Golden, CO.
[2] Daryl R. Myers, "Solar Radiation: Practical Modeling for Renewable
Energy Applications", pp. 46-51 CRC Press (2013)
`NREL Bird Clear Sky Model <http://rredc.nrel.gov/solar/models/clearsky/>`_
`SERI/TR-642-761 <http://rredc.nrel.gov/solar/pubs/pdfs/tr-642-761.pdf>`_
`Error Reports <http://rredc.nrel.gov/solar/models/clearsky/error_reports.html>`_
"""
etr = dni_extra # extraradiation
ze_rad = np.deg2rad(zenith) # zenith in radians
airmass = airmass_relative
# Bird clear sky model
am_press = atmosphere.get_absolute_airmass(airmass, pressure)
t_rayleigh = (
np.exp(-0.0903 * am_press ** 0.84 * (
1.0 + am_press - am_press ** 1.01
))
)
am_o3 = ozone*airmass
t_ozone = (
1.0 - 0.1611 * am_o3 * (1.0 + 139.48 * am_o3) ** -0.3034 -
0.002715 * am_o3 / (1.0 + 0.044 * am_o3 + 0.0003 * am_o3 ** 2.0)
)
t_gases = np.exp(-0.0127 * am_press ** 0.26)
am_h2o = airmass * precipitable_water
t_water = (
1.0 - 2.4959 * am_h2o / (
(1.0 + 79.034 * am_h2o) ** 0.6828 + 6.385 * am_h2o
)
)
bird_huldstrom = atmosphere.bird_hulstrom80_aod_bb(aod380, aod500)
t_aerosol = np.exp(
-(bird_huldstrom ** 0.873) *
(1.0 + bird_huldstrom - bird_huldstrom ** 0.7088) * airmass ** 0.9108
)
taa = 1.0 - 0.1 * (1.0 - airmass + airmass ** 1.06) * (1.0 - t_aerosol)
rs = 0.0685 + (1.0 - asymmetry) * (1.0 - t_aerosol / taa)
id_ = 0.9662 * etr * t_aerosol * t_water * t_gases * t_ozone * t_rayleigh
ze_cos = np.where(zenith < 90, np.cos(ze_rad), 0.0)
id_nh = id_ * ze_cos
ias = (
etr * ze_cos * 0.79 * t_ozone * t_gases * t_water * taa *
(0.5 * (1.0 - t_rayleigh) + asymmetry * (1.0 - (t_aerosol / taa))) / (
1.0 - airmass + airmass ** 1.02
)
)
gh = (id_nh + ias) / (1.0 - albedo * rs)
diffuse_horiz = gh - id_nh
# TODO: be DRY, use decorator to wrap methods that need to return either
# OrderedDict or DataFrame instead of repeating this boilerplate code
irrads = OrderedDict()
irrads['direct_horizontal'] = id_nh
irrads['ghi'] = gh
irrads['dni'] = id_
irrads['dhi'] = diffuse_horiz
if isinstance(irrads['dni'], pd.Series):
irrads = pd.DataFrame.from_dict(irrads)
return irrads | 0.000418 |
def growth_coefficients(start_date, end_date, ref_date, alpha, samples):
"""
Build a matrix of growth factors according to the CAGR formula y'=y0 (1+a)^(t'-t0).
a growth rate alpha
t0 start date
t' end date
y' output
y0 start value
"""
start_offset = 0
if ref_date < start_date:
offset_delta = rdelta.relativedelta(start_date, ref_date)
start_offset = offset_delta.months + 12 * offset_delta.years
start_date = ref_date
end_offset = 0
if ref_date > end_date:
offset_delta = rdelta.relativedelta(ref_date, end_date)
end_offset = offset_delta.months + 12 * offset_delta.years
end_date = ref_date
delta_ar = rdelta.relativedelta(ref_date, start_date)
ar = delta_ar.months + 12 * delta_ar.years
delta_br = rdelta.relativedelta(end_date, ref_date)
br = delta_br.months + 12 * delta_br.years
# we place the ref point on the lower interval (delta_ar + 1) but let it start from 0
# in turn we let the upper interval start from 1
g = np.fromfunction(lambda i, j: np.power(1 - alpha, np.abs(i) / 12), (ar + 1, samples), dtype=float)
h = np.fromfunction(lambda i, j: np.power(1 + alpha, np.abs(i + 1) / 12), (br, samples), dtype=float)
g = np.flipud(g)
# now join the two arrays
a = np.vstack((g, h))
if start_offset > 0:
a = a[start_offset:]
if end_offset > 0:
a = a[:-end_offset]
return a | 0.003432 |
def update_items(self, ocean_backend, enrich_backend):
"""Retrieve the commits not present in the original repository and delete
the corresponding documents from the raw and enriched indexes"""
fltr = {
'name': 'origin',
'value': [self.perceval_backend.origin]
}
logger.debug("[update-items] Checking commits for %s.", self.perceval_backend.origin)
git_repo = GitRepository(self.perceval_backend.uri, self.perceval_backend.gitpath)
try:
current_hashes = set([commit for commit in git_repo.rev_list()])
except Exception as e:
logger.error("Skip updating branch info for repo %s, git rev-list command failed: %s", git_repo.uri, e)
return
raw_hashes = set([item['data']['commit']
for item in ocean_backend.fetch(ignore_incremental=True, _filter=fltr)])
hashes_to_delete = list(raw_hashes.difference(current_hashes))
to_process = []
for _hash in hashes_to_delete:
to_process.append(_hash)
if len(to_process) != MAX_BULK_UPDATE_SIZE:
continue
# delete documents from the raw index
self.remove_commits(to_process, ocean_backend.elastic.index_url,
'data.commit', self.perceval_backend.origin)
# delete documents from the enriched index
self.remove_commits(to_process, enrich_backend.elastic.index_url,
'hash', self.perceval_backend.origin)
to_process = []
if to_process:
# delete documents from the raw index
self.remove_commits(to_process, ocean_backend.elastic.index_url,
'data.commit', self.perceval_backend.origin)
# delete documents from the enriched index
self.remove_commits(to_process, enrich_backend.elastic.index_url,
'hash', self.perceval_backend.origin)
logger.debug("[update-items] %s commits deleted from %s with origin %s.",
len(hashes_to_delete), ocean_backend.elastic.anonymize_url(ocean_backend.elastic.index_url),
self.perceval_backend.origin)
logger.debug("[update-items] %s commits deleted from %s with origin %s.",
len(hashes_to_delete), enrich_backend.elastic.anonymize_url(enrich_backend.elastic.index_url),
self.perceval_backend.origin)
# update branch info
self.delete_commit_branches(enrich_backend)
self.add_commit_branches(git_repo, enrich_backend) | 0.004115 |
def _read_bytes_from_framed_body(self, b):
"""Reads the requested number of bytes from a streaming framed message body.
:param int b: Number of bytes to read
:returns: Bytes read from source stream and decrypted
:rtype: bytes
"""
plaintext = b""
final_frame = False
_LOGGER.debug("collecting %d bytes", b)
while len(plaintext) < b and not final_frame:
_LOGGER.debug("Reading frame")
frame_data, final_frame = deserialize_frame(
stream=self.source_stream, header=self._header, verifier=self.verifier
)
_LOGGER.debug("Read complete for frame %d", frame_data.sequence_number)
if frame_data.sequence_number != self.last_sequence_number + 1:
raise SerializationError("Malformed message: frames out of order")
self.last_sequence_number += 1
aad_content_string = aws_encryption_sdk.internal.utils.get_aad_content_string(
content_type=self._header.content_type, is_final_frame=frame_data.final_frame
)
associated_data = assemble_content_aad(
message_id=self._header.message_id,
aad_content_string=aad_content_string,
seq_num=frame_data.sequence_number,
length=len(frame_data.ciphertext),
)
plaintext += decrypt(
algorithm=self._header.algorithm,
key=self._derived_data_key,
encrypted_data=frame_data,
associated_data=associated_data,
)
plaintext_length = len(plaintext)
_LOGGER.debug("bytes collected: %d", plaintext_length)
if final_frame:
_LOGGER.debug("Reading footer")
self.footer = deserialize_footer(stream=self.source_stream, verifier=self.verifier)
return plaintext | 0.00468 |
def licenses(source=DEFAULT_LICENSE_FILE):
'''Feed the licenses from a JSON file'''
if source.startswith('http'):
json_licenses = requests.get(source).json()
else:
with open(source) as fp:
json_licenses = json.load(fp)
if len(json_licenses):
log.info('Dropping existing licenses')
License.drop_collection()
for json_license in json_licenses:
flags = []
for field, flag in FLAGS_MAP.items():
if json_license.get(field, False):
flags.append(flag)
license = License.objects.create(
id=json_license['id'],
title=json_license['title'],
url=json_license['url'] or None,
maintainer=json_license['maintainer'] or None,
flags=flags,
active=json_license.get('active', False),
alternate_urls=json_license.get('alternate_urls', []),
alternate_titles=json_license.get('alternate_titles', []),
)
log.info('Added license "%s"', license.title)
try:
License.objects.get(id=DEFAULT_LICENSE['id'])
except License.DoesNotExist:
License.objects.create(**DEFAULT_LICENSE)
log.info('Added license "%s"', DEFAULT_LICENSE['title'])
success('Done') | 0.000776 |
def from_name(cls, name):
"""Create an author by name, automatically populating the hash."""
return Author(name=name, sha512=cls.hash_name(name)) | 0.01227 |
def get_blocks_in_grimm_from_breakpoint_graph(bg):
"""
:param bg: a breakpoint graph, that contians all the information
:type bg: ``bg.breakpoint_graph.BreakpointGraph``
:return: list of strings, which represent genomes present in breakpoint graph as orders of blocks and is compatible with GRIMM format
"""
result = []
genomes = bg.get_overall_set_of_colors()
for genome in genomes:
genome_graph = bg.get_genome_graph(color=genome)
genome_blocks_orders = genome_graph.get_blocks_order()
blocks_orders = genome_blocks_orders[genome]
if len(blocks_orders) > 0:
result.append(">{genome_name}".format(genome_name=genome.name))
for chr_type, blocks_order in blocks_orders:
string = " ".join(value if sign == "+" else sign + value for sign, value in blocks_order)
string += " {chr_type}".format(chr_type=chr_type)
result.append(string)
return result | 0.003846 |
def earningsTodayDF(token='', version=''):
'''Returns earnings that will be reported today as two arrays: before the open bto and after market close amc.
Each array contains an object with all keys from earnings, a quote object, and a headline key.
https://iexcloud.io/docs/api/#earnings-today
Updates at 9am, 11am, 12pm UTC daily
Args:
token (string); Access token
version (string); API version
Returns:
DataFrame: result
'''
x = earningsToday(token, version)
z = []
for k in x:
ds = x[k]
for d in ds:
d['when'] = k
z.extend(ds)
df = pd.io.json.json_normalize(z)
if not df.empty:
df.drop_duplicates(inplace=True)
_toDatetime(df)
_reindex(df, 'symbol')
return df | 0.00375 |
def update_discount_promotion_by_id(cls, discount_promotion_id, discount_promotion, **kwargs):
"""Update DiscountPromotion
Update attributes of DiscountPromotion
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.update_discount_promotion_by_id(discount_promotion_id, discount_promotion, async=True)
>>> result = thread.get()
:param async bool
:param str discount_promotion_id: ID of discountPromotion to update. (required)
:param DiscountPromotion discount_promotion: Attributes of discountPromotion to update. (required)
:return: DiscountPromotion
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async'):
return cls._update_discount_promotion_by_id_with_http_info(discount_promotion_id, discount_promotion, **kwargs)
else:
(data) = cls._update_discount_promotion_by_id_with_http_info(discount_promotion_id, discount_promotion, **kwargs)
return data | 0.006683 |
def get_package_version(self):
"""
Get the version of the package
:return:
"""
output = subprocess.check_output([
'{}'.format(self.python),
'setup.py',
'--version',
]).decode()
return output.rstrip() | 0.006873 |
def show(uuid):
'''
Show manifest of a given image
uuid : string
uuid of image
CLI Example:
.. code-block:: bash
salt '*' imgadm.show e42f8c84-bbea-11e2-b920-078fab2aab1f
salt '*' imgadm.show plexinc/pms-docker:plexpass
'''
ret = {}
if _is_uuid(uuid) or _is_docker_uuid(uuid):
cmd = 'imgadm show {0}'.format(uuid)
res = __salt__['cmd.run_all'](cmd, python_shell=False)
retcode = res['retcode']
if retcode != 0:
ret['Error'] = _exit_status(retcode, res['stderr'])
else:
ret = salt.utils.json.loads(res['stdout'])
else:
ret['Error'] = "{} is not a valid uuid.".format(uuid)
return ret | 0.001381 |
def _get_fstype_from_parser(self, fstype=None):
"""Load fstype information from the parser instance."""
if fstype:
self.fstype = fstype
elif self.index in self.disk.parser.fstypes:
self.fstype = self.disk.parser.fstypes[self.index]
elif '*' in self.disk.parser.fstypes:
self.fstype = self.disk.parser.fstypes['*']
elif '?' in self.disk.parser.fstypes and self.disk.parser.fstypes['?'] is not None:
self.fstype = "?" + self.disk.parser.fstypes['?']
else:
self.fstype = ""
if self.fstype in VOLUME_SYSTEM_TYPES:
self.volumes.vstype = self.fstype
self.fstype = 'volumesystem'
# convert fstype from string to a FileSystemType object
if not isinstance(self.fstype, filesystems.FileSystemType):
if self.fstype.startswith("?"):
fallback = FILE_SYSTEM_TYPES[self.fstype[1:]]
self.fstype = filesystems.FallbackFileSystemType(fallback)
else:
self.fstype = FILE_SYSTEM_TYPES[self.fstype] | 0.002712 |
def callback_save_indices():
'''Save index from bokeh textinput'''
import datetime
import os
import pylleo
import yamlord
if datadirs_select.value != 'None':
path_dir = os.path.join(parent_input.value, datadirs_select.value)
cal_yaml_path = os.path.join(path_dir, 'cal.yml')
param = (param_select.value).lower().replace('-','_')
region = region_select.value
start = int(start_input.value)
end = int(end_input.value)
msg = '''
Updated calibration times for:<br>
<b>{}/{}</b>
<br>
<br>
star index: {}<br>
end index: {}<br>
'''.format(param, region, start, end)
output_window.text = output_template.format(msg)
cal_dict = pylleo.lleocal.read_cal(cal_yaml_path)
# Generalize for Class-ifying
cal_dict = pylleo.lleocal.update(data, cal_dict, param, region, start, end)
yamlord.write_yaml(cal_dict, cal_yaml_path)
else:
msg = '''
You must first load data and select indices for calibration
regions before you can save the indices to `cal.yml`
'''
output_window.text = output_template.format(msg)
return None | 0.003094 |
def logs(self):
""" Returns the list of :class:`~explauto.experiment.log.ExperimentLog`.
.. note:: The logs will be returned as a vector if repeat was set as one in the :meth:`~explauto.experiment.pool.ExperimentPool.run` method else it will be a matrix where each rows represents the n repetitions of an experiment.
"""
if not hasattr(self, '_logs'):
raise ValueError('You have to run the pool of experiments first!')
logs = self._logs.reshape(-1) if self._logs.shape[1] == 1 else self._logs
return deepcopy(logs) | 0.008666 |
def getSplits(self, login, tableName, maxSplits):
"""
Parameters:
- login
- tableName
- maxSplits
"""
self.send_getSplits(login, tableName, maxSplits)
return self.recv_getSplits() | 0.004673 |
def _convert_to_config(self):
"""self.parsed_data->self.config, parse unrecognized extra args via KVLoader."""
# remove subconfigs list from namespace before transforming the Namespace
if '_flags' in self.parsed_data:
subcs = self.parsed_data._flags
del self.parsed_data._flags
else:
subcs = []
for k, v in vars(self.parsed_data).iteritems():
if v is None:
# it was a flag that shares the name of an alias
subcs.append(self.alias_flags[k])
else:
# eval the KV assignment
self._exec_config_str(k, v)
for subc in subcs:
self._load_flag(subc)
if self.extra_args:
sub_parser = KeyValueConfigLoader()
sub_parser.load_config(self.extra_args)
self.config._merge(sub_parser.config)
self.extra_args = sub_parser.extra_args | 0.004184 |
def stream(self, date_created_from=values.unset, date_created_to=values.unset,
limit=None, page_size=None):
"""
Streams ExecutionInstance records from the API as a generator stream.
This operation lazily loads records as efficiently as possible until the limit
is reached.
The results are returned as a generator, so this operation is memory efficient.
:param datetime date_created_from: Only show Executions that started on or after this ISO8601 date-time.
:param datetime date_created_to: Only show Executions that started before this this ISO8601 date-time.
:param int limit: Upper limit for the number of records to return. stream()
guarantees to never return more than limit. Default is no limit
:param int page_size: Number of records to fetch per request, when not set will use
the default value of 50 records. If no page_size is defined
but a limit is defined, stream() will attempt to read the
limit with the most efficient page size, i.e. min(limit, 1000)
:returns: Generator that will yield up to limit results
:rtype: list[twilio.rest.studio.v1.flow.execution.ExecutionInstance]
"""
limits = self._version.read_limits(limit, page_size)
page = self.page(
date_created_from=date_created_from,
date_created_to=date_created_to,
page_size=limits['page_size'],
)
return self._version.stream(page, limits['limit'], limits['page_limit']) | 0.008526 |
def get_widget(name):
"""
Give back a widget class according to his name.
"""
for widget in registry:
if widget.__name__ == name:
return widget
raise WidgetNotFound(
_('The widget %s has not been registered.') % name) | 0.003774 |
def noise_get_turbulence(
n: tcod.noise.Noise,
f: Sequence[float],
oc: float,
typ: int = NOISE_DEFAULT,
) -> float:
"""Return the turbulence noise sampled from the ``f`` coordinate.
Args:
n (Noise): A Noise instance.
f (Sequence[float]): The point to sample the noise from.
typ (int): The noise algorithm to use.
octaves (float): The level of level. Should be more than 1.
Returns:
float: The sampled noise value.
"""
return float(
lib.TCOD_noise_get_turbulence_ex(
n.noise_c, ffi.new("float[4]", f), oc, typ
)
) | 0.001605 |
def readline(self, limit=-1):
"""Read and return a line from the stream.
If limit is specified, at most limit bytes will be read.
"""
if not self._universal and limit < 0:
# Shortcut common case - newline found in buffer.
i = self._readbuffer.find(b'\n', self._offset) + 1
if i > 0:
line = self._readbuffer[self._offset: i]
self._offset = i
return line
if not self._universal:
return io.BufferedIOBase.readline(self, limit)
line = b''
while limit < 0 or len(line) < limit:
readahead = self.peek(2)
if readahead == b'':
return line
#
# Search for universal newlines or line chunks.
#
# The pattern returns either a line chunk or a newline, but not
# both. Combined with peek(2), we are assured that the sequence
# '\r\n' is always retrieved completely and never split into
# separate newlines - '\r', '\n' due to coincidental readaheads.
#
match = self.PATTERN.search(readahead)
newline = match.group('newline')
if newline is not None:
if self.newlines is None:
self.newlines = []
if newline not in self.newlines:
self.newlines.append(newline)
self._offset += len(newline)
return line + b'\n'
chunk = match.group('chunk')
if limit >= 0:
chunk = chunk[: limit - len(line)]
self._offset += len(chunk)
line += chunk
return line | 0.001155 |
def get_account_invitation(self, account_id, invitation_id, **kwargs): # noqa: E501
"""Details of a user invitation. # noqa: E501
An endpoint for retrieving the details of an active user invitation sent for a new or an existing user to join the account. **Example usage:** `curl https://api.us-east-1.mbedcloud.com/v3/accounts/{account-id}/user-invitations/{invitation-id} -H 'Authorization: Bearer API_KEY'` # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass asynchronous=True
>>> thread = api.get_account_invitation(account_id, invitation_id, asynchronous=True)
>>> result = thread.get()
:param asynchronous bool
:param str account_id: Account ID. (required)
:param str invitation_id: The ID of the invitation to be retrieved. (required)
:return: UserInvitationResp
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('asynchronous'):
return self.get_account_invitation_with_http_info(account_id, invitation_id, **kwargs) # noqa: E501
else:
(data) = self.get_account_invitation_with_http_info(account_id, invitation_id, **kwargs) # noqa: E501
return data | 0.001439 |
def _words_by_salience_score(vocab, topic_word_distrib, doc_topic_distrib, doc_lengths, n=None, least_to_most=False):
"""Return words in `vocab` ordered by saliency score."""
saliency = get_word_saliency(topic_word_distrib, doc_topic_distrib, doc_lengths)
return _words_by_score(vocab, saliency, least_to_most=least_to_most, n=n) | 0.008798 |
def creation_date(self, value):
"""
The date on which the bond was issued.
:param creation_date:
:return:
"""
self._creation_date = parse(value).date() if isinstance(value, type_check) else value | 0.012346 |
def locate(self, pattern):
'''Find sequences matching a pattern.
:param pattern: Sequence for which to find matches.
:type pattern: str
:returns: Indices of pattern matches.
:rtype: list of ints
'''
if self.circular:
if len(pattern) >= 2 * len(self):
raise ValueError('Search pattern longer than searchable ' +
'sequence.')
seq = self + self[:len(pattern) - 1]
return super(NucleicAcid, seq).locate(pattern)
else:
return super(NucleicAcid, self).locate(pattern) | 0.003205 |
def get_variables_by_attributes(self, **kwargs):
""" Returns variables that match specific conditions.
* Can pass in key=value parameters and variables are returned that
contain all of the matches. For example,
>>> # Get variables with x-axis attribute.
>>> vs = nc.get_variables_by_attributes(axis='X')
>>> # Get variables with matching "standard_name" attribute.
>>> nc.get_variables_by_attributes(standard_name='northward_sea_water_velocity')
* Can pass in key=callable parameter and variables are returned if the
callable returns True. The callable should accept a single parameter,
the attribute value. None is given as the attribute value when the
attribute does not exist on the variable. For example,
>>> # Get Axis variables.
>>> vs = nc.get_variables_by_attributes(axis=lambda v: v in ['X', 'Y', 'Z', 'T'])
>>> # Get variables that don't have an "axis" attribute.
>>> vs = nc.get_variables_by_attributes(axis=lambda v: v is None)
>>> # Get variables that have a "grid_mapping" attribute.
>>> vs = nc.get_variables_by_attributes(grid_mapping=lambda v: v is not None)
"""
vs = []
has_value_flag = False
for vname in self.variables:
var = self.variables[vname]
for k, v in kwargs.items():
if callable(v):
has_value_flag = v(getattr(var, k, None))
if has_value_flag is False:
break
elif hasattr(var, k) and getattr(var, k) == v:
has_value_flag = True
else:
has_value_flag = False
break
if has_value_flag is True:
vs.append(self.variables[vname])
return vs | 0.003203 |
def add_to_fileswitcher(self, plugin, tabs, data, icon):
"""Add a plugin to the File Switcher."""
if self.fileswitcher is None:
from spyder.widgets.fileswitcher import FileSwitcher
self.fileswitcher = FileSwitcher(self, plugin, tabs, data, icon)
else:
self.fileswitcher.add_plugin(plugin, tabs, data, icon)
self.fileswitcher.sig_goto_file.connect(
plugin.get_current_tab_manager().set_stack_index) | 0.004073 |
def show(format, full):
""" Print current allocation to the console. """
# load asset allocation
app = AppAggregate()
app.logger = logger
model = app.get_asset_allocation()
if format == "ascii":
formatter = AsciiFormatter()
elif format == "html":
formatter = HtmlFormatter
else:
raise ValueError(f"Unknown formatter {format}")
# formatters can display stock information with --full
output = formatter.format(model, full=full)
print(output) | 0.001965 |
def save(self):
"""
Saves the object. If the object has not been saved before (i.e. it's
new), then a new object is created. Otherwise, any changes are
submitted to the API.
"""
if not self.id:
save_method = Panoptes.client().post
force_reload = False
else:
if not self.modified_attributes:
return
if not self._loaded:
self.reload()
save_method = Panoptes.client().put
force_reload = True
response, response_etag = save_method(
self.url(self.id),
json={self._api_slug: self._savable_dict(
modified_attributes=self.modified_attributes
)},
etag=self.etag
)
raw_resource_response = response[self._api_slug][0]
self.set_raw(raw_resource_response, response_etag)
if force_reload:
self._loaded = False
return response | 0.002 |
def lookup_api_key_info():
"""Given a dbapi cursor, lookup all the api keys and their information."""
info = {}
with db_connect() as conn:
with conn.cursor() as cursor:
cursor.execute(ALL_KEY_INFO_SQL_STMT)
for row in cursor.fetchall():
id, key, name, groups = row
user_id = "api_key:{}".format(id)
info[key] = dict(id=id, user_id=user_id,
name=name, groups=groups)
return info | 0.001976 |
def hosted_number_orders(self):
"""
:rtype: twilio.rest.preview.hosted_numbers.hosted_number_order.HostedNumberOrderList
"""
if self._hosted_number_orders is None:
self._hosted_number_orders = HostedNumberOrderList(self)
return self._hosted_number_orders | 0.009804 |
def red(numbers):
"""Encode the deltas to reduce entropy."""
line = 0
deltas = []
for value in numbers:
deltas.append(value - line)
line = value
return b64encode(compress(b''.join(chr(i).encode('latin1') for i in deltas))).decode('latin1') | 0.059055 |
def main(_):
"""Run the sample attack"""
eps = FLAGS.max_epsilon / 255.0
batch_shape = [FLAGS.batch_size, FLAGS.image_height, FLAGS.image_width, 3]
with tf.Graph().as_default():
x_input = tf.placeholder(tf.float32, shape=batch_shape)
noisy_images = x_input + eps * tf.sign(tf.random_normal(batch_shape))
x_output = tf.clip_by_value(noisy_images, 0.0, 1.0)
with tf.Session(FLAGS.master) as sess:
for filenames, images in load_images(FLAGS.input_dir, batch_shape):
out_images = sess.run(x_output, feed_dict={x_input: images})
save_images(out_images, filenames, FLAGS.output_dir) | 0.009615 |
def _sighash_anyone_can_pay(self, index, copy_tx, sighash_type):
'''
int, byte-like, Tx, int -> bytes
Applies SIGHASH_ANYONECANPAY procedure.
Should be called by another SIGHASH procedure.
Not on its own.
https://en.bitcoin.it/wiki/OP_CHECKSIG#Procedure_for_Hashtype_SIGHASH_ANYONECANPAY
'''
# The txCopy input vector is resized to a length of one.
copy_tx_ins = [copy_tx.tx_ins[index]]
copy_tx = copy_tx.copy(tx_ins=copy_tx_ins)
return self._sighash_final_hashing(
copy_tx, sighash_type | shared.SIGHASH_ANYONECANPAY) | 0.003226 |
def explore(node):
""" Given a node, explores on relatives, siblings and children
:param node: GraphNode from which to explore
:return: set of explored GraphNodes
"""
explored = set()
explored.add(node)
dfs(node, callback=lambda n: explored.add(n))
return explored | 0.003378 |
def noninteractive_changeset_update(self, fqn, template, old_parameters,
parameters, stack_policy, tags,
**kwargs):
"""Update a Cloudformation stack using a change set.
This is required for stacks with a defined Transform (i.e. SAM), as the
default update_stack API cannot be used with them.
Args:
fqn (str): The fully qualified name of the Cloudformation stack.
template (:class:`stacker.providers.base.Template`): A Template
object to use when updating the stack.
old_parameters (list): A list of dictionaries that defines the
parameter list on the existing Cloudformation stack.
parameters (list): A list of dictionaries that defines the
parameter list to be applied to the Cloudformation stack.
stack_policy (:class:`stacker.providers.base.Template`): A template
object representing a stack policy.
tags (list): A list of dictionaries that defines the tags
that should be applied to the Cloudformation stack.
"""
logger.debug("Using noninterative changeset provider mode "
"for %s.", fqn)
_changes, change_set_id = create_change_set(
self.cloudformation, fqn, template, parameters, tags,
'UPDATE', service_role=self.service_role, **kwargs
)
self.deal_with_changeset_stack_policy(fqn, stack_policy)
self.cloudformation.execute_change_set(
ChangeSetName=change_set_id,
) | 0.00242 |
def _returnPD(self, code, tablename, **kwargs):
"""
private function to take a sas code normally to create a table, generate pandas data frame and cleanup.
:param code: string of SAS code
:param tablename: the name of the SAS Data Set
:param kwargs:
:return: Pandas Data Frame
"""
libref = kwargs.get('libref','work')
ll = self.sas._io.submit(code)
check, errorMsg = self._checkLogForError(ll['LOG'])
if not check:
raise ValueError("Internal code execution failed: " + errorMsg)
if isinstance(tablename, str):
pd = self.sas.sasdata2dataframe(tablename, libref)
self.sas._io.submit("proc delete data=%s.%s; run;" % (libref, tablename))
elif isinstance(tablename, list):
pd = dict()
for t in tablename:
# strip leading '_' from names and capitalize for dictionary labels
if self.sas.exist(t, libref):
pd[t.replace('_', '').capitalize()] = self.sas.sasdata2dataframe(t, libref)
self.sas._io.submit("proc delete data=%s.%s; run;" % (libref, t))
else:
raise SyntaxError("The tablename must be a string or list %s was submitted" % str(type(tablename)))
return pd | 0.007593 |
def set_cfg_value(config, section, option, value):
"""Set configuration value."""
if isinstance(value, list):
value = '\n'.join(value)
config[section][option] = value | 0.005376 |
def get_command_line(self):
"""Returns the command line for the job."""
# In python 2, the command line is unicode, which needs to be converted to string before pickling;
# In python 3, the command line is bytes, which can be pickled directly
return loads(self.command_line) if isinstance(self.command_line, bytes) else loads(self.command_line.encode()) | 0.00813 |
def ma_fitplane(bma, gt=None, perc=(2,98), origmask=True):
"""Fit a plane to values in input array
"""
if gt is None:
gt = [0, 1, 0, 0, 0, -1]
#Filter, can be useful to remove outliers
if perc is not None:
from pygeotools.lib import filtlib
bma_f = filtlib.perc_fltr(bma, perc)
else:
bma_f = bma
#Get indices
x_f, y_f = get_xy_ma(bma_f, gt, origmask=origmask)
#Regardless of desired output (origmask True or False), for fit, need to limit to valid pixels only
bma_f_mask = np.ma.getmaskarray(bma_f)
#Create xyz stack, needed for SVD
xyz = np.vstack((np.ma.array(x_f, mask=bma_f_mask).compressed(), \
np.ma.array(y_f, mask=bma_f_mask).compressed(), bma_f.compressed())).T
#coeff = fitPlaneSVD(xyz)
coeff = fitPlaneLSQ(xyz)
print(coeff)
vals = coeff[0]*x_f + coeff[1]*y_f + coeff[2]
resid = bma_f - vals
return vals, resid, coeff | 0.013757 |
def getCatalogFile(catalog_dir, mc_source_id):
"""
Inputs:
catalog_dir = string corresponding to directory containing the stellar catalog infiles
mc_source_id = integer corresponding the target MC_SOURCE_ID value
Outputs:
catalog_infile = string corresponding to filename of stellar catalog containing mc_source_id
"""
catalog_infiles = sorted(glob.glob(catalog_dir + '/*catalog*.fits'))
mc_source_id_array = []
catalog_infile_index_array = []
for ii, catalog_infile in enumerate(catalog_infiles):
mc_source_id_min = int(os.path.basename(catalog_infile).split('.')[0].split('mc_source_id_')[-1].split('-')[0])
mc_source_id_max = int(os.path.basename(catalog_infile).split('.')[0].split('mc_source_id_')[-1].split('-')[1])
assert (mc_source_id_max > mc_source_id_min) & (mc_source_id_min >= 1), 'Found invalue MC_SOURCE_ID values in filenames'
mc_source_id_array.append(np.arange(mc_source_id_min, mc_source_id_max + 1))
catalog_infile_index_array.append(np.tile(ii, 1 + (mc_source_id_max - mc_source_id_min)))
mc_source_id_array = np.concatenate(mc_source_id_array)
catalog_infile_index_array = np.concatenate(catalog_infile_index_array)
assert len(mc_source_id_array) == len(np.unique(mc_source_id_array)), 'Found non-unique MC_SOURCE_ID values in filenames'
assert np.in1d(mc_source_id, mc_source_id_array), 'Requested MC_SOURCE_ID value not among files'
mc_source_id_index = np.nonzero(mc_source_id == mc_source_id_array)[0]
return catalog_infiles[catalog_infile_index_array[mc_source_id_index]] | 0.006173 |
def get_items_of_reminder_per_page(self, reminder_id, per_page=1000, page=1):
"""
Get items of reminder per page
:param reminder_id: the reminder id
:param per_page: How many objects per page. Default: 1000
:param page: Which page. Default: 1
:return: list
"""
return self._get_resource_per_page(
resource=REMINDER_ITEMS,
per_page=per_page,
page=page,
params={'reminder_id': reminder_id},
) | 0.003914 |
def get_args(get_item):
"""Parse env, key, default out of input dict.
Args:
get_item: dict. contains keys env/key/default
Returns:
(env, key, has_default, default) tuple, where
env: str. env var name.
key: str. save env value to this context key.
has_default: bool. True if default specified.
default: the value of default, if specified.
Raises:
ContextError: envGet is not a list of dicts.
KeyNotInContextError: If env or key not found in get_config.
"""
if not isinstance(get_item, dict):
raise ContextError('envGet must contain a list of dicts.')
env = get_item.get('env', None)
if not env:
raise KeyNotInContextError(
'context envGet[env] must exist in context for envGet.')
key = get_item.get('key', None)
if not key:
raise KeyNotInContextError(
'context envGet[key] must exist in context for envGet.')
if 'default' in get_item:
has_default = True
default = get_item['default']
else:
has_default = False
default = None
return (env, key, has_default, default) | 0.000844 |
def is_orthogonal(dicoms, log_details=False):
"""
Validate that volume is orthonormal
:param dicoms: check that we have a volume without skewing
"""
first_image_orient1 = numpy.array(dicoms[0].ImageOrientationPatient)[0:3]
first_image_orient2 = numpy.array(dicoms[0].ImageOrientationPatient)[3:6]
first_image_pos = numpy.array(dicoms[0].ImagePositionPatient)
last_image_pos = numpy.array(dicoms[-1].ImagePositionPatient)
first_image_dir = numpy.cross(first_image_orient1, first_image_orient2)
first_image_dir /= numpy.linalg.norm(first_image_dir)
combined_dir = last_image_pos - first_image_pos
combined_dir /= numpy.linalg.norm(combined_dir)
if not numpy.allclose(first_image_dir, combined_dir, rtol=0.05, atol=0.05) \
and not numpy.allclose(first_image_dir, -combined_dir, rtol=0.05, atol=0.05):
if log_details:
logger.warning('Orthogonality check failed: non cubical image')
logger.warning('---------------------------------------------------------')
logger.warning(first_image_dir)
logger.warning(combined_dir)
logger.warning('---------------------------------------------------------')
return False
return True | 0.003956 |
def _on_loop_end(self, variables):
"""
performs on-loop-end actions like callbacks
variables contains local namespace variables.
Parameters
---------
variables : dict of available variables
Returns
-------
None
"""
for callback in self.callbacks:
if hasattr(callback, 'on_loop_end'):
self.logs_[str(callback)].append(callback.on_loop_end(**variables)) | 0.006383 |
def _cmp_date(self):
"""Returns Calendar date used for comparison.
Use the earliest date out of all CalendarDates in this instance,
or some date in the future if there are no CalendarDates (e.g.
when Date is a phrase).
"""
dates = sorted(val for val in self.kw.values()
if isinstance(val, CalendarDate))
if dates:
return dates[0]
# return date very far in the future
return CalendarDate() | 0.004032 |
def Downsampled(cls, stats, interval=None):
"""Constructs a copy of given stats but downsampled to given interval.
Args:
stats: A `ClientStats` instance.
interval: A downsampling interval.
Returns:
A downsampled `ClientStats` instance.
"""
interval = interval or cls.DEFAULT_SAMPLING_INTERVAL
result = cls(stats)
result.cpu_samples = cls._Downsample(
kind=CpuSample, samples=stats.cpu_samples, interval=interval)
result.io_samples = cls._Downsample(
kind=IOSample, samples=stats.io_samples, interval=interval)
return result | 0.001678 |
def ssgsea(data, gene_sets, outdir="ssGSEA_", sample_norm_method='rank', min_size=15, max_size=2000,
permutation_num=0, weighted_score_type=0.25, scale=True, ascending=False, processes=1,
figsize=(7,6), format='pdf', graph_num=20, no_plot=False, seed=None, verbose=False):
"""Run Gene Set Enrichment Analysis with single sample GSEA tool
:param data: Expression table, pd.Series, pd.DataFrame, GCT file, or .rnk file format.
:param gene_sets: Enrichr Library name or .gmt gene sets file or dict of gene sets. Same input with GSEA.
:param outdir: Results output directory.
:param str sample_norm_method: "Sample normalization method. Choose from {'rank', 'log', 'log_rank'}. Default: rank.
1. 'rank': Rank your expression data, and transform by 10000*rank_dat/gene_numbers
2. 'log' : Do not rank, but transform data by log(data + exp(1)), while data = data[data<1] =1.
3. 'log_rank': Rank your expression data, and transform by log(10000*rank_dat/gene_numbers+ exp(1))
4. 'custom': Do nothing, and use your own rank value to calculate enrichment score.
see here: https://github.com/GSEA-MSigDB/ssGSEAProjection-gpmodule/blob/master/src/ssGSEAProjection.Library.R, line 86
:param int min_size: Minimum allowed number of genes from gene set also the data set. Default: 15.
:param int max_size: Maximum allowed number of genes from gene set also the data set. Default: 2000.
:param int permutation_num: Number of permutations for significance computation. Default: 0.
:param str weighted_score_type: Refer to :func:`algorithm.enrichment_score`. Default:0.25.
:param bool scale: If True, normalize the scores by number of genes in the gene sets.
:param bool ascending: Sorting order of rankings. Default: False.
:param int processes: Number of Processes you are going to use. Default: 1.
:param list figsize: Matplotlib figsize, accept a tuple or list, e.g. [width,height]. Default: [7,6].
:param str format: Matplotlib figure format. Default: 'pdf'.
:param int graph_num: Plot graphs for top sets of each phenotype.
:param bool no_plot: If equals to True, no figure will be drawn. Default: False.
:param seed: Random seed. expect an integer. Default:None.
:param bool verbose: Bool, increase output verbosity, print out progress of your job, Default: False.
:return: Return a ssGSEA obj.
All results store to a dictionary, access enrichment score by obj.resultsOnSamples,
and normalized enrichment score by obj.res2d.
if permutation_num > 0, additional results contain::
| {es: enrichment score,
| nes: normalized enrichment score,
| p: P-value,
| fdr: FDR,
| size: gene set size,
| matched_size: genes matched to the data,
| genes: gene names from the data set
| ledge_genes: leading edge genes, if permutation_num >0}
"""
ss = SingleSampleGSEA(data, gene_sets, outdir, sample_norm_method, min_size, max_size,
permutation_num, weighted_score_type, scale, ascending,
processes, figsize, format, graph_num, no_plot, seed, verbose)
ss.run()
return ss | 0.008009 |
def queue(self, name, url=None, method=None, reservation_sid=None,
post_work_activity_sid=None, **kwargs):
"""
Create a <Queue> element
:param name: Queue name
:param url: Action URL
:param method: Action URL method
:param reservation_sid: TaskRouter Reservation SID
:param post_work_activity_sid: TaskRouter Activity SID
:param kwargs: additional attributes
:returns: <Queue> element
"""
return self.nest(Queue(
name,
url=url,
method=method,
reservation_sid=reservation_sid,
post_work_activity_sid=post_work_activity_sid,
**kwargs
)) | 0.004178 |
def make_primitive_extrapolate_ends(cas_coords, smoothing_level=2):
"""Generates smoothed helix primitives and extrapolates lost ends.
Notes
-----
From an input list of CA coordinates, the running average is
calculated to form a primitive. The smoothing_level dictates how
many times to calculate the running average. A higher
smoothing_level generates a 'smoother' primitive - i.e. the
points on the primitive more closely fit a smooth curve in R^3.
Each time the smoothing level is increased by 1, a point is lost
from either end of the primitive. To correct for this, the primitive
is extrapolated at the ends to approximate the lost values. There
is a trade-off then between the smoothness of the primitive and
its accuracy at the ends.
Parameters
----------
cas_coords : list(numpy.array or float or tuple)
Each element of the list must have length 3.
smoothing_level : int
Number of times to run the averaging.
Returns
-------
final_primitive : list(numpy.array)
Each array has length 3.
"""
try:
smoothed_primitive = make_primitive_smoothed(
cas_coords, smoothing_level=smoothing_level)
except ValueError:
smoothed_primitive = make_primitive_smoothed(
cas_coords, smoothing_level=smoothing_level - 1)
# if returned smoothed primitive is too short, lower the smoothing
# level and try again.
if len(smoothed_primitive) < 3:
smoothed_primitive = make_primitive_smoothed(
cas_coords, smoothing_level=smoothing_level - 1)
final_primitive = []
for ca in cas_coords:
prim_dists = [distance(ca, p) for p in smoothed_primitive]
closest_indices = sorted([x[0] for x in sorted(
enumerate(prim_dists), key=lambda k: k[1])[:3]])
a, b, c = [smoothed_primitive[x] for x in closest_indices]
ab_foot = find_foot(a, b, ca)
bc_foot = find_foot(b, c, ca)
ca_foot = (ab_foot + bc_foot) / 2
final_primitive.append(ca_foot)
return final_primitive | 0.000476 |
def can_revert(self):
"""
Return True if we can revert the draft version of the document to the
currently published version.
"""
if self.can_publish:
with self.published_context():
return self.count(Q._uid == self._uid) > 0
return False | 0.006369 |
def bokehjssrcdir(self):
''' The absolute path of the BokehJS source code in the installed
Bokeh source tree.
'''
if self._is_dev or self.debugjs:
bokehjssrcdir = abspath(join(ROOT_DIR, '..', 'bokehjs', 'src'))
if isdir(bokehjssrcdir):
return bokehjssrcdir
return None | 0.005698 |
def setDecel(self, vehID, decel):
"""setDecel(string, double) -> None
Sets the preferred maximal deceleration in m/s^2 for this vehicle.
"""
self._connection._sendDoubleCmd(
tc.CMD_SET_VEHICLE_VARIABLE, tc.VAR_DECEL, vehID, decel) | 0.007273 |
def _validate_entries(self, processes, callback=None):
"""
Verify that the actual file contents match the recorded hashes stored in the manifest files
"""
errors = list()
if os.name == 'posix':
worker_init = posix_multiprocessing_worker_initializer
else:
worker_init = None
args = ((self.path,
self.normalized_filesystem_names.get(rel_path, rel_path),
hashes,
self.algorithms) for rel_path, hashes in self.entries.items())
try:
if processes == 1:
count = 0
hash_results = []
totalHashes = len(self.entries.items())
for i in args:
hash_results.append(_calc_hashes(i))
count += 1
if callback:
if not callback(count, totalHashes):
raise BaggingInterruptedError("Bag validation interrupted!")
else:
pool = None
try:
pool = multiprocessing.Pool(processes if processes else None, initializer=worker_init)
hash_results = pool.map(_calc_hashes, args)
finally:
if pool:
pool.terminate()
except BaggingInterruptedError:
raise
# Any unhandled exceptions are probably fatal
except:
LOGGER.error(_("Unable to calculate file hashes for %s"), self)
raise
for rel_path, f_hashes, hashes in hash_results:
for alg, computed_hash in f_hashes.items():
stored_hash = hashes[alg]
if stored_hash.lower() != computed_hash:
e = ChecksumMismatch(rel_path, alg, stored_hash.lower(), computed_hash)
LOGGER.warning(force_unicode(e))
errors.append(e)
if errors:
raise BagValidationError(_("Bag validation failed"), errors) | 0.003396 |
def as_instruction(self, specification):
"""Convert the specification into an instruction
:param specification: a specification with a key
:data:`knittingpattern.Instruction.TYPE`
The instruction is not added.
.. seealso:: :meth:`add_instruction`
"""
instruction = self._instruction_class(specification)
type_ = instruction.type
if type_ in self._type_to_instruction:
instruction.inherit_from(self._type_to_instruction[type_])
return instruction | 0.00369 |
def _generate_provenance(self):
"""Function to generate provenance at the end of the IF."""
# noinspection PyTypeChecker
hazard = definition(
self._provenance['hazard_keywords']['hazard'])
exposures = [
definition(layer.keywords['exposure']) for layer in self.exposures
]
# InaSAFE
set_provenance(
self._provenance, provenance_impact_function_name, self.name)
set_provenance(
self._provenance,
provenance_analysis_extent,
self._analysis_extent.asWkt())
set_provenance(
self._provenance,
provenance_analysis_question,
get_multi_exposure_analysis_question(hazard, exposures))
set_provenance(
self._provenance,
provenance_data_store_uri,
self.datastore.uri_path)
# Map title
set_provenance(self._provenance, provenance_map_title, self.name)
# CRS
set_provenance(
self._provenance, provenance_crs, self._crs.authid())
# Debug mode
set_provenance(
self._provenance, provenance_debug_mode, self.debug_mode)
self._provenance_ready = True | 0.00161 |
def get_results():
"""Parse all search result pages."""
base = "http://www.smackjeeves.com/search.php?submit=Search+for+Webcomics&search_mode=webcomics&comic_title=&special=all&last_update=3&style_all=on&genre_all=on&format_all=on&sort_by=2&start=%d"
session = requests.Session()
# store info in a dictionary {name -> url, number of comics, adult flag, bounce flag}
res = {}
# a search for an empty string returned 286 result pages
result_pages = 286
print("Parsing", result_pages, "search result pages...", file=sys.stderr)
for i in range(0, result_pages):
print(i+1, file=sys.stderr, end=" ")
handle_url(base % (i*12), session, res)
save_result(res, json_file) | 0.004178 |
def update1(self, key: str, data: np.ndarray, size: int) -> None:
""" Update one entry in specific record in datastore """
print(data)
if key in self.get_keys():
self.data[key][data[0]] = data
else:
newdata = np.zeros((size, 6))
newdata[data[0]] = data
self.data[key] = newdata | 0.005602 |
def parse(
text = None,
humour = 75
):
"""
Parse input text using various triggers, some returning text and some for
engaging functions. If triggered, a trigger returns text or True if and if
not triggered, returns False. If no triggers are triggered, return False, if
one trigger is triggered, return the value returned by that trigger, and if
multiple triggers are triggered, return a list of the values returned by
those triggers.
Options such as humour engage or disengage various triggers.
"""
triggers = []
# general
if humour >= 75:
triggers.extend([
trigger_keyphrases(
text = text,
keyphrases = [
"image"
],
response = "http://i.imgur.com/MiqrlTh.jpg"
),
trigger_keyphrases(
text = text,
keyphrases = [
"sup",
"hi"
],
response = "sup home bean"
),
trigger_keyphrases(
text = text,
keyphrases = [
"thanks",
"thank you"
],
response = "you're welcome, boo ;)"
)
])
# information
triggers.extend([
trigger_keyphrases(
text = text,
keyphrases = [
"where are you",
"IP",
"I.P.",
"IP address",
"I.P. address",
"ip address"
],
function = report_IP
),
trigger_keyphrases(
text = text,
keyphrases = [
"how are you",
"are you well",
"status"
],
function = report_system_status,
kwargs = {"humour": humour}
),
trigger_keyphrases(
text = text,
keyphrases = [
"heartbeat"
],
function = heartbeat_message
),
trigger_keyphrases(
text = text,
keyphrases = [
"METAR"
],
function = report_METAR,
kwargs = {"text": text}
),
trigger_keyphrases(
text = text,
keyphrases = [
"TAF"
],
response = report_TAF,
kwargs = {"text": text}
),
trigger_keyphrases(
text = text,
keyphrases = [
"rain"
],
response = report_rain_times,
kwargs = {"text": text}
)
])
# actions
triggers.extend([
trigger_keyphrases(
text = text,
keyphrases = [
"command",
"run command",
"engage command",
"execute command"
],
response = command()
),
trigger_keyphrases(
text = text,
keyphrases = [
"restart"
],
function = restart,
confirm = True,
confirmation_prompt = "Do you want to restart this "
"program? (y/n)",
confirmation_feedback_confirm = "confirm restart",
confirmation_feedback_deny = "deny restart"
)
])
if any(triggers):
responses = [response for response in triggers if response]
if len(responses) > 1:
return responses
else:
return responses[0]
else:
return False | 0.027946 |
def get(self, endpoint, params=None):
"""Send an HTTP GET request to QuadrigaCX.
:param endpoint: API endpoint.
:type endpoint: str | unicode
:param params: URL parameters.
:type params: dict
:return: Response body from QuadrigaCX.
:rtype: dict
:raise quadriga.exceptions.RequestError: If HTTP OK was not returned.
"""
response = self._session.get(
url=self._url + endpoint,
params=params,
timeout=self._timeout
)
return self._handle_response(response) | 0.003419 |
def svd_to_stream(uvectors, stachans, k, sampling_rate):
"""
Convert the singular vectors output by SVD to streams.
One stream will be generated for each singular vector level,
for all channels. Useful for plotting, and aiding seismologists thinking
of waveforms!
:type svectors: list
:param svectors: List of :class:`numpy.ndarray` Singular vectors
:type stachans: list
:param stachans: List of station.channel Strings
:type k: int
:param k: Number of streams to return = number of SV's to include
:type sampling_rate: float
:param sampling_rate: Sampling rate in Hz
:returns:
svstreams, List of :class:`obspy.core.stream.Stream`, with
svStreams[0] being composed of the highest rank singular vectors.
"""
svstreams = []
for i in range(k):
svstream = []
for j, stachan in enumerate(stachans):
if len(uvectors[j]) <= k:
warnings.warn('Too few traces at %s for a %02d dimensional '
'subspace. Detector streams will not include '
'this channel.' % ('.'.join(stachan[0],
stachan[1]), k))
else:
svstream.append(Trace(uvectors[j][i],
header={'station': stachan[0],
'channel': stachan[1],
'sampling_rate': sampling_rate}))
svstreams.append(Stream(svstream))
return svstreams | 0.00063 |
def generate_random_sframe(num_rows, column_codes, random_seed = 0):
"""
Creates a random SFrame with `num_rows` rows and randomly
generated column types determined by `column_codes`. The output
SFrame is deterministic based on `random_seed`.
`column_types` is a string with each character denoting one type
of column, with the output SFrame having one column for each
character in the string. The legend is as follows:
n: numeric column, uniform 0-1 distribution.
N: numeric column, uniform 0-1 distribution, 1% NaNs.
r: numeric column, uniform -100 to 100 distribution.
R: numeric column, uniform -10000 to 10000 distribution, 1% NaNs.
b: binary integer column, uniform distribution
z: integer column with random integers between 1 and 10.
Z: integer column with random integers between 1 and 100.
s: categorical string column with 10 different unique short strings.
S: categorical string column with 100 different unique short strings.
c: categorical column with short string keys and 1000 unique values, triangle distribution.
C: categorical column with short string keys and 100000 unique values, triangle distribution.
x: categorical column with 128bit hex hashes and 1000 unique values.
X: categorical column with 256bit hex hashes and 100000 unique values.
h: column with unique 128bit hex hashes.
H: column with unique 256bit hex hashes.
l: categorical list with between 0 and 10 unique integer elements from a pool of 100 unique values.
L: categorical list with between 0 and 100 unique integer elements from a pool of 1000 unique values.
M: categorical list with between 0 and 10 unique string elements from a pool of 100 unique values.
m: categorical list with between 0 and 100 unique string elements from a pool of 1000 unique values.
v: numeric vector with 10 elements and uniform 0-1 elements.
V: numeric vector with 1000 elements and uniform 0-1 elements.
w: numeric vector with 10 elements and uniform 0-1 elements, 1% NANs.
W: numeric vector with 1000 elements and uniform 0-1 elements, 1% NANs.
d: dictionary with with between 0 and 10 string keys from a
pool of 100 unique keys, and random 0-1 values.
D: dictionary with with between 0 and 100 string keys from a
pool of 1000 unique keys, and random 0-1 values.
For example::
X = generate_random_sframe(10, 'nnv')
will generate a 10 row SFrame with 2 floating point columns and
one column of length 10 vectors.
"""
from ..extensions import _generate_random_sframe
assert isinstance(column_codes, str)
assert isinstance(num_rows, int)
assert isinstance(random_seed, int)
X = _generate_random_sframe(num_rows, column_codes, random_seed, False, 0)
X.__materialize__()
return X | 0.006673 |
def readTupleQuotes(self, symbol, start, end):
''' read quotes as tuple '''
if end is None:
end=sys.maxint
session=self.getReadSession()()
try:
rows=session.query(Quote).filter(and_(Quote.symbol == symbol,
Quote.time >= int(start),
Quote.time < int(end)))
finally:
self.getReadSession().remove()
return rows | 0.015504 |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.