text
stringlengths 78
104k
| score
float64 0
0.18
|
---|---|
def construct(key_data, algorithm=None):
"""
Construct a Key object for the given algorithm with the given
key_data.
"""
# Allow for pulling the algorithm off of the passed in jwk.
if not algorithm and isinstance(key_data, dict):
algorithm = key_data.get('alg', None)
if not algorithm:
raise JWKError('Unable to find a algorithm for key: %s' % key_data)
key_class = get_key(algorithm)
if not key_class:
raise JWKError('Unable to find a algorithm for key: %s' % key_data)
return key_class(key_data, algorithm) | 0.001739 |
def get_changes(self, name, *args):
"""Return a list of changes for the named refactoring action.
Changes are dictionaries describing a single action to be
taken for the refactoring to be successful.
A change has an action and possibly a type. In the description
below, the action is before the slash and the type after it.
change: Change file contents
- file: The path to the file to change
- contents: The new contents for the file
- Diff: A unified diff showing the changes introduced
create/file: Create a new file
- file: The file to create
create/directory: Create a new directory
- path: The directory to create
move/file: Rename a file
- source: The path to the source file
- destination: The path to the destination file name
move/directory: Rename a directory
- source: The path to the source directory
- destination: The path to the destination directory name
delete/file: Delete a file
- file: The file to delete
delete/directory: Delete a directory
- path: The directory to delete
"""
if not name.startswith("refactor_"):
raise ValueError("Bad refactoring name {0}".format(name))
method = getattr(self, name)
if not method.refactor_notes.get('available', True):
raise RuntimeError("Method not available")
return method(*args) | 0.001337 |
def delete_certificate(ctx, slot, management_key, pin):
"""
Delete a certificate.
Delete a certificate from a slot on the YubiKey.
"""
controller = ctx.obj['controller']
_ensure_authenticated(ctx, controller, pin, management_key)
controller.delete_certificate(slot) | 0.003401 |
def default_listener(col_attr, default):
"""Establish a default-setting listener."""
@event.listens_for(col_attr, "init_scalar", retval=True, propagate=True)
def init_scalar(target, value, dict_):
if default.is_callable:
# the callable of ColumnDefault always accepts a context argument
value = default.arg(None)
elif default.is_scalar:
value = default.arg
else:
raise NotImplementedError(
"Can't invoke pre-default for a SQL-level column default")
dict_[col_attr.key] = value
return value | 0.001642 |
def expand_role(self, role):
"""Expand an IAM role name into an ARN.
If the role is already in the form of an ARN, then the role is simply returned. Otherwise we retrieve the full
ARN and return it.
Args:
role (str): An AWS IAM role (either name or full ARN).
Returns:
str: The corresponding AWS IAM role ARN.
"""
if '/' in role:
return role
else:
return self.boto_session.resource('iam').Role(role).arn | 0.005792 |
def inspect(self):
"""Inspect device requests and sensors, update model
Returns
-------
Tornado future that resolves with:
model_changes : Nested AttrDict or None
Contains sets of added/removed request/sensor names
Example structure:
{'requests': {
'added': set(['req1', 'req2']),
'removed': set(['req10', 'req20'])}
'sensors': {
'added': set(['sens1', 'sens2']),
'removed': set(['sens10', 'sens20'])}
}
If there are no changes keys may be omitted. If an item is in both
the 'added' and 'removed' sets that means that it changed.
If neither request not sensor changes are present, None is returned
instead of a nested structure.
"""
timeout_manager = future_timeout_manager(self.sync_timeout)
sensor_index_before = copy.copy(self._sensors_index)
request_index_before = copy.copy(self._requests_index)
try:
request_changes = yield self.inspect_requests(
timeout=timeout_manager.remaining())
sensor_changes = yield self.inspect_sensors(
timeout=timeout_manager.remaining())
except Exception:
# Ensure atomicity of sensor and request updates ; if the one
# fails, the other should act as if it has failed too.
self._sensors_index = sensor_index_before
self._requests_index = request_index_before
raise
model_changes = AttrDict()
if request_changes:
model_changes.requests = request_changes
if sensor_changes:
model_changes.sensors = sensor_changes
if model_changes:
raise Return(model_changes) | 0.001085 |
def validate(self, processes=1, fast=False, completeness_only=False, callback=None):
"""Checks the structure and contents are valid.
If you supply the parameter fast=True the Payload-Oxum (if present) will
be used to check that the payload files are present and accounted for,
instead of re-calculating fixities and comparing them against the
manifest. By default validate() will re-calculate fixities (fast=False).
"""
self._validate_structure()
self._validate_bagittxt()
self._validate_fetch()
self._validate_contents(processes=processes, fast=fast, completeness_only=completeness_only, callback=callback)
return True | 0.008427 |
def plot(self, axis, ith_plot, total_plots, limits):
"""
Plot the histogram as a whole over all groups.
Do not plot as individual groups like other plot types.
"""
print(self.plot_type_str.upper() + " plot")
print("%5s %9s %s" % ("id", " #points", "group"))
for idx, group in enumerate(self.groups):
print("%5s %9s %s" % (idx + 1, len(self.groups[group]), group))
print('')
datasets = []
colors = []
minx = np.inf
maxx = -np.inf
for idx, group in enumerate(self.groups):
x = date2num([logevent.datetime
for logevent in self.groups[group]])
minx = min(minx, min(x))
maxx = max(maxx, max(x))
datasets.append(x)
color, marker = self.color_map(group)
colors.append(color)
if total_plots > 1:
# if more than one plot, move histogram to twin axis on the right
twin_axis = axis.twinx()
twin_axis.set_ylabel(self.ylabel)
axis.set_zorder(twin_axis.get_zorder() + 1) # put ax ahead of ax2
axis.patch.set_visible(False) # hide the 'canvas'
axis = twin_axis
n_bins = max(1, int((maxx - minx) * 24. * 60. * 60. / self.bucketsize))
if n_bins > 1000:
# warning for too many buckets
print("warning: %i buckets, will take a while to render. "
"consider increasing --bucketsize." % n_bins)
n, bins, artists = axis.hist(datasets, bins=n_bins, align='mid',
log=self.logscale,
histtype="barstacked"
if self.barstacked else "bar",
color=colors, edgecolor="none",
linewidth=0, alpha=0.8, picker=True,
label=map(str, self.groups.keys()))
# scale current y-axis to match min and max values
axis.set_ylim(np.min(n), np.max(n))
# add meta-data for picking
if len(self.groups) > 1:
for g, group in enumerate(self.groups.keys()):
for i in range(len(artists[g])):
artists[g][i]._mt_plot_type = self
artists[g][i]._mt_group = group
artists[g][i]._mt_n = n[g][i]
if self.barstacked:
artists[g][i]._mt_n -= (n[g - 1][i] if g > 0 else 0)
artists[g][i]._mt_bin = bins[i]
else:
for i in range(len(artists)):
artists[i]._mt_plot_type = self
artists[i]._mt_group = group
artists[i]._mt_n = n[i]
artists[i]._mt_bin = bins[i]
return artists | 0.000696 |
def list_files(root, suffix, prefix=False):
"""List all files ending with a suffix at a given root
Args:
root (str): Path to directory whose folders need to be listed
suffix (str or tuple): Suffix of the files to match, e.g. '.png' or ('.jpg', '.png').
It uses the Python "str.endswith" method and is passed directly
prefix (bool, optional): If true, prepends the path to each result, otherwise
only returns the name of the files found
"""
root = os.path.expanduser(root)
files = list(
filter(
lambda p: os.path.isfile(os.path.join(root, p)) and p.endswith(suffix),
os.listdir(root)
)
)
if prefix is True:
files = [os.path.join(root, d) for d in files]
return files | 0.005025 |
def log_player_trades_with_port(self, player, to_port, port, to_player):
"""
:param player: catan.game.Player
:param to_port: list of tuples, [(int, game.board.Terrain), (int, game.board.Terrain)]
:param port: catan.board.Port
:param to_player: list of tuples, [(int, game.board.Terrain), (int, game.board.Terrain)]
"""
self._log('{0} trades '.format(player.color))
# to_port items
self._log('[')
for i, (num, res) in enumerate(to_port):
if i > 0:
self._log(', ')
self._log('{0} {1}'.format(num, res.value))
self._log(']')
self._log(' to port {0} for '.format(port.type.value))
# to_player items
self._log('[')
for i, (num, res) in enumerate(to_player):
if i > 0:
self._log(', ')
self._log('{0} {1}'.format(num, res.value))
self._log(']')
self._log('\n') | 0.004107 |
def np_lst_sq_xval(vecMdl, aryFuncChnk, aryIdxTrn, aryIdxTst):
"""Least squares fitting in numpy with cross-validation.
"""
varNumXval = aryIdxTrn.shape[-1]
varNumVoxChnk = aryFuncChnk.shape[-1]
# pre-allocate ary to collect cross-validation
# error for every xval fold
aryResXval = np.empty((varNumVoxChnk,
varNumXval),
dtype=np.float32)
# loop over cross-validation folds
for idxXval in range(varNumXval):
# Get pRF time course models for trn and tst:
vecMdlTrn = vecMdl[aryIdxTrn[:, idxXval], :]
vecMdlTst = vecMdl[aryIdxTst[:, idxXval], :]
# Get functional data for trn and tst:
aryFuncChnkTrn = aryFuncChnk[
aryIdxTrn[:, idxXval], :]
aryFuncChnkTst = aryFuncChnk[
aryIdxTst[:, idxXval], :]
# Numpy linalg.lstsq is used to calculate the
# parameter estimates of the current model:
vecTmpPe = np.linalg.lstsq(vecMdlTrn,
aryFuncChnkTrn,
rcond=-1)[0]
# calculate model prediction time course
aryMdlPrdTc = np.dot(vecMdlTst, vecTmpPe)
# calculate residual sum of squares between
# test data and model prediction time course
aryResXval[:, idxXval] = np.sum(
(np.subtract(aryFuncChnkTst,
aryMdlPrdTc))**2, axis=0)
return aryResXval | 0.000679 |
def get_assessment_taken_ids_by_banks(self, bank_ids):
"""Gets the list of ``AssessmentTaken Ids`` corresponding to a list of ``Banks``.
arg: bank_ids (osid.id.IdList): list of bank ``Ids``
return: (osid.id.IdList) - list of bank ``Ids``
raise: NullArgument - ``bank_ids`` is ``null``
raise: OperationFailed - unable to complete request
raise: PermissionDenied - authorization failure occurred
*compliance: mandatory -- This method must be implemented.*
"""
# Implemented from template for
# osid.resource.ResourceBinSession.get_resource_ids_by_bins
id_list = []
for assessment_taken in self.get_assessments_taken_by_banks(bank_ids):
id_list.append(assessment_taken.get_id())
return IdList(id_list) | 0.00365 |
def get_column(columns, column_tys, index):
"""
Get column corresponding to passed-in index from ptr returned
by groupBySum.
Args:
columns (List<WeldObject>): List of columns as WeldObjects
column_tys (List<str>): List of each column data ty
index (int): index of selected column
Returns:
A WeldObject representing this computation
"""
weld_obj = WeldObject(encoder_, decoder_)
columns_var = weld_obj.update(columns, tys=WeldVec(column_tys), override=False)
if isinstance(columns, WeldObject):
columns_var = columns.obj_id
weld_obj.dependencies[columns_var] = columns
weld_template = """
map(
%(columns)s,
|elem: %(ty)s| elem.$%(index)s
)
"""
weld_obj.weld_code = weld_template % {"columns": columns_var,
"ty": column_tys,
"index": index}
return weld_obj | 0.002075 |
def MAVOL_serial(self,days,rev=0):
""" see make_serial()
成較量移動平均 list 化,資料格式請見 def make_serial()
"""
return self.make_serial(self.stock_vol,days,rev=0) | 0.02924 |
def complete(self):
"""
Complete current task
:return:
:rtype: requests.models.Response
"""
return self._post_request(
data='',
endpoint=self.ENDPOINT + '/' + str(self.id) + '/complete'
) | 0.007491 |
def reshape_fortran(tensor, shape):
"""The missing Fortran reshape for mx.NDArray
Parameters
----------
tensor : NDArray
source tensor
shape : NDArray
desired shape
Returns
-------
output : NDArray
reordered result
"""
return tensor.T.reshape(tuple(reversed(shape))).T | 0.002994 |
def fnr(y, z):
"""False negative rate `fn / (fn + tp)`
"""
tp, tn, fp, fn = contingency_table(y, z)
return fn / (fn + tp) | 0.007299 |
def find_free_prefix(self, auth, vrf, args):
""" Finds free prefixes in the sources given in `args`.
* `auth` [BaseAuth]
AAA options.
* `vrf` [vrf]
Full VRF-dict specifying in which VRF the prefix should be
unique.
* `args` [find_free_prefix_args]
Arguments to the find free prefix function.
Returns a list of dicts.
Prefixes can be found in two ways: from a pool of from a prefix.
From a pool
The `args` argument is set to a dict with key :attr:`from-pool` set to a
pool spec. This is the pool from which the prefix will be assigned.
Also the key :attr:`family` needs to be set to the adress family (integer
4 or 6) of the requested prefix. Optionally, also the key
:attr:`prefix_length` can be added to the `attr` argument, and will then
override the default prefix length.
Example::
args = {
'from-pool': { 'name': 'CUSTOMER-' },
'family': 6,
'prefix_length': 64
}
From a prefix
Instead of specifying a pool, a prefix which will be searched
for new prefixes can be specified. In `args`, the key
:attr:`from-prefix` is set to list of prefixes you want to
allocate from and the key :attr:`prefix_length` is set to
the wanted prefix length.
Example::
args = {
'from-prefix': ['192.0.2.0/24'],
'prefix_length': 27
}
The key :attr:`count` can also be set in the `args` argument to specify
how many prefixes that should be returned. If omitted, the default
value is 1000.
The internal backend function :func:`find_free_prefix` is used
internally by the :func:`add_prefix` function to find available
prefixes from the given sources. It's also exposed over XML-RPC,
please see the XML-RPC documentation for
:py:func:`nipap.xmlrpc.NipapXMLRPC.find_free_prefix` for full
understanding.
"""
# input sanity
if type(args) is not dict:
raise NipapInputError("invalid input, please provide dict as args")
# TODO: find good default value for max_num
# TODO: let max_num be configurable from configuration file
max_count = 1000
if 'count' in args:
if int(args['count']) > max_count:
raise NipapValueError("count over the maximum result size")
else:
args['count'] = 1
if 'from-pool' in args:
if 'from-prefix' in args:
raise NipapInputError("specify 'from-pool' OR 'from-prefix'")
if 'family' not in args:
raise NipapMissingInputError("'family' must be specified with 'from-pool' mode")
try:
assert int(args['family']) in [ 4, 6 ]
except (TypeError, AssertionError):
raise NipapValueError("incorrect family specified, must be 4 or 6")
elif 'from-prefix' in args:
if type(args['from-prefix']) is not list:
raise NipapInputError("from-prefix should be a list")
if 'from-pool' in args:
raise NipapInputError("specify 'from-pool' OR 'from-prefix'")
if 'prefix_length' not in args:
raise NipapMissingInputError("'prefix_length' must be specified with 'from-prefix'")
if 'family' in args:
raise NipapExtraneousInputError("'family' is superfluous when in 'from-prefix' mode")
# determine prefixes
prefixes = []
wpl = 0
if 'from-pool' in args:
# extract prefixes from
pool_result = self.list_pool(auth, args['from-pool'])
self._logger.debug(args)
if pool_result == []:
raise NipapNonExistentError("Non-existent pool specified")
for p in pool_result[0]['prefixes']:
if self._get_afi(p) == int(args['family']):
prefixes.append(p)
if len(prefixes) == 0:
raise NipapInputError('No prefixes of family %s in pool' % unicode(args['family']))
if 'prefix_length' not in args:
if int(args['family']) == 4:
wpl = pool_result[0]['ipv4_default_prefix_length']
else:
wpl = pool_result[0]['ipv6_default_prefix_length']
afi = None
if 'from-prefix' in args:
for prefix in args['from-prefix']:
prefix_afi = self._get_afi(prefix)
if afi is None:
afi = prefix_afi
elif afi != prefix_afi:
raise NipapInputError("mixing of address-family is not allowed for 'from-prefix' arg")
prefixes.append(prefix)
if 'prefix_length' in args:
try:
wpl = int(args['prefix_length'])
except ValueError:
raise NipapValueError("prefix length must be integer")
# sanity check the wanted prefix length
if afi == 4:
if wpl < 0 or wpl > 32:
raise NipapValueError("the specified wanted prefix length argument must be between 0 and 32 for ipv4")
elif afi == 6:
if wpl < 0 or wpl > 128:
raise NipapValueError("the specified wanted prefix length argument must be between 0 and 128 for ipv6")
# build SQL
params = {}
# TODO: this makes me want to piss my pants
# we should really write a patch to psycopg2 or something to
# properly adapt an python list of texts with values looking
# like prefixes to a postgresql array of inets
sql_prefix = ' UNION '.join('SELECT %(prefix' + unicode(prefixes.index(p)) + ')s AS prefix' for p in prefixes)
for p in prefixes:
params['prefix' + unicode(prefixes.index(p))] = unicode(p)
damp = 'SELECT array_agg((prefix::text)::inet) FROM (' + sql_prefix + ') AS a'
sql = """SELECT * FROM find_free_prefix(%(vrf_id)s, (""" + damp + """), %(prefix_length)s, %(max_result)s) AS prefix"""
v = self._get_vrf(auth, vrf or {}, '')
params['vrf_id'] = v['id']
params['prefixes'] = prefixes
params['prefix_length'] = wpl
params['max_result'] = args['count']
self._execute(sql, params)
res = list()
for row in self._curs_pg:
res.append(unicode(row['prefix']))
return res | 0.002927 |
def _idle(self):
"""
since imaplib doesn't support IMAP4r1 IDLE, we'll do it by hand
"""
socket = None
try:
# build a new command tag (Xnnn) as bytes:
self.command_tag = (self.command_tag + 1) % 1000
command_tag = b"X" + bytes(str(self.command_tag).zfill(3), "ascii")
# make sure we have selected anything before idling:
directories = self.mailbox.split(",")
self.connection.select(directories[0])
socket = self.connection.socket()
# send IDLE command and check response:
socket.write(command_tag + b" IDLE\r\n")
try:
response = socket.read(4096).decode("ascii")
except socket_error:
raise imaplib.IMAP4.abort("Server didn't respond to 'IDLE' in time")
if not response.lower().startswith("+ idling"):
raise imaplib.IMAP4.abort("While initializing IDLE: " + str(response))
# wait for changes (EXISTS, EXPUNGE, etc.):
socket.settimeout(self.cache_timeout)
while True:
try:
response = socket.read(4096).decode("ascii")
if response.upper().startswith("* OK"):
continue # ignore '* OK Still here'
else:
break
except socket_error: # IDLE timed out
break
finally: # terminate IDLE command gracefully
if socket is None:
return
socket.settimeout(self.read_timeout)
socket.write(b"DONE\r\n") # important! Can't query IMAP again otherwise
try:
response = socket.read(4096).decode("ascii")
except socket_error:
raise imaplib.IMAP4.abort("Server didn't respond to 'DONE' in time")
# sometimes, more messages come in between reading and DONEing; so read them again:
if response.startswith("* "):
try:
response = socket.read(4096).decode("ascii")
except socket_error:
raise imaplib.IMAP4.abort(
"Server sent more continuations, but no 'DONE' ack"
)
expected_response = (command_tag + b" OK").decode("ascii")
if not response.lower().startswith(expected_response.lower()):
raise imaplib.IMAP4.abort("While terminating IDLE: " + response) | 0.003129 |
def write(self, group_id, handle):
'''Write this parameter group, with parameters, to a file handle.
Parameters
----------
group_id : int
The numerical ID of the group.
handle : file handle
An open, writable, binary file handle.
'''
name = self.name.encode('utf-8')
desc = self.desc.encode('utf-8')
handle.write(struct.pack('bb', len(name), -group_id))
handle.write(name)
handle.write(struct.pack('<h', 3 + len(desc)))
handle.write(struct.pack('B', len(desc)))
handle.write(desc)
for param in self.params.values():
param.write(group_id, handle) | 0.002886 |
def AddArguments(cls, argument_group):
"""Adds command line arguments the helper supports to an argument group.
This function takes an argument parser or an argument group object and adds
to it all the command line arguments this helper supports.
Args:
argument_group (argparse._ArgumentGroup|argparse.ArgumentParser):
argparse group.
"""
argument_group.add_argument(
'--fields', dest='fields', type=str, action='store',
default=cls._DEFAULT_FIELDS, help=(
'Defines which fields should be included in the output.'))
argument_group.add_argument(
'--additional_fields', dest='additional_fields', type=str,
action='store', default='', help=(
'Defines extra fields to be included in the output, in addition to'
' the default fields, which are {0:s}.'.format(
cls._DEFAULT_FIELDS)))
argument_group.add_argument(
'--timestamp_format', dest='timestamp_format', type=str,
action='store', default=cls._DEFAULT_TIMESTAMP_FORMAT, help=(
'Set the timestamp format that will be used in the datetime'
'column of the XLSX spreadsheet.')) | 0.000836 |
def execute_withdrawal(self, withdrawal_params, private_key):
"""
This function is to sign the message generated from the create withdrawal function and submit it to the
blockchain for transfer from the smart contract to the owners address.
Execution of this function is as follows::
execute_withdrawal(withdrawal_params=create_withdrawal, private_key=kp)
The expected return result for this function is as follows::
{
'event_type': 'withdrawal',
'amount': -100000,
'asset_id': 'ab38352559b8b203bde5fddfa0b07d8b2525e132',
'status': 'confirming',
'id': '96e5f797-435b-40ab-9085-4e95c6749218',
'blockchain': 'neo',
'reason_code': 9,
'address': 'fea2b883725ef2d194c9060f606cd0a0468a2c59',
'transaction_hash': None,
'created_at': '2018-08-05T10:03:58.885Z',
'updated_at': '2018-08-05T10:03:59.828Z',
'contract_hash': 'a195c1549e7da61b8da315765a790ac7e7633b82'
}
:param withdrawal_params: Dictionary from the create withdrawal function to sign and submit to the blockchain.
:type withdrawal_params: dict
:param private_key: The Private Key (ETH) or KeyPair (NEO) for the wallet being used to sign deposit message.
:type private_key: KeyPair or str
:return: Dictionary with the status of the withdrawal request and blockchain transaction details.
"""
withdrawal_id = withdrawal_params['id']
api_params = self.sign_execute_withdrawal_function[self.blockchain](withdrawal_params, private_key)
return self.request.post(path='/withdrawals/{}/broadcast'.format(withdrawal_id), json_data=api_params) | 0.004915 |
def update_xml_element(self):
"""
Updates the xml element contents to matches the instance contents.
:returns: Updated XML element.
:rtype: lxml.etree._Element
"""
if not hasattr(self, 'xml_element'):
self.xml_element = etree.Element(self.name, nsmap=NSMAP)
for element in self.xml_element:
self.xml_element.remove(element)
self.xml_element.tail = ''
self.xml_element.text = self.convert_html_to_xml()
return self.xml_element | 0.003745 |
def get_2q_nodes(self):
"""Deprecated. Use twoQ_gates()."""
warnings.warn('The method get_2q_nodes() is being replaced by twoQ_gates()',
'Returning a list of data_dicts is also deprecated, twoQ_gates() '
'returns a list of DAGNodes.',
DeprecationWarning, 2)
two_q_nodes = []
for node in self._multi_graph.nodes():
if node.type == 'op' and len(node.qargs) == 2:
two_q_nodes.append(node.data_dict)
return two_q_nodes | 0.007273 |
def _process_params(self, params):
""" Converts Unicode/lists/booleans inside HTTP parameters """
processed_params = {}
for key, value in params.items():
processed_params[key] = self._process_param_value(value)
return processed_params | 0.007168 |
def get_outcome_probs(self):
"""
Parses a wavefunction (array of complex amplitudes) and returns a dictionary of
outcomes and associated probabilities.
:return: A dict with outcomes as keys and probabilities as values.
:rtype: dict
"""
outcome_dict = {}
qubit_num = len(self)
for index, amplitude in enumerate(self.amplitudes):
outcome = get_bitstring_from_index(index, qubit_num)
outcome_dict[outcome] = abs(amplitude) ** 2
return outcome_dict | 0.005464 |
def _on_read_only_error(self, command, future):
"""Invoked when a Redis node returns an error indicating it's in
read-only mode. It will use the ``INFO REPLICATION`` command to
attempt to find the master server and failover to that, reissuing
the command to that server.
:param command: The command that was being executed
:type command: tredis.client.Command
:param future: The execution future
:type future: tornado.concurrent.Future
"""
failover_future = concurrent.TracebackFuture()
def on_replication_info(_):
common.maybe_raise_exception(failover_future)
LOGGER.debug('Failover closing current read-only connection')
self._closing = True
database = self._connection.database
self._connection.close()
self._connected.clear()
self._connect_future = concurrent.Future()
info = failover_future.result()
LOGGER.debug('Failover connecting to %s:%s', info['master_host'],
info['master_port'])
self._connection = _Connection(
info['master_host'], info['master_port'], database, self._read,
self._on_closed, self.io_loop, self._clustering)
# When the connection is re-established, re-run the command
self.io_loop.add_future(
self._connect_future,
lambda f: self._connection.execute(
command._replace(connection=self._connection), future))
# Use the normal connection processing flow when connecting
self.io_loop.add_future(self._connection.connect(),
self._on_connected)
if self._clustering:
command.connection.set_readonly(True)
LOGGER.debug('%s is read-only, need to failover to new master',
command.connection.name)
cmd = Command(
self._build_command(['INFO', 'REPLICATION']), self._connection,
None, common.format_info_response)
self.io_loop.add_future(failover_future, on_replication_info)
cmd.connection.execute(cmd, failover_future) | 0.000891 |
def as_manager(cls, obj):
"""
Convert obj into TaskManager instance. Accepts string, filepath, dictionary, `TaskManager` object.
If obj is None, the manager is initialized from the user config file.
"""
if isinstance(obj, cls): return obj
if obj is None: return cls.from_user_config()
if is_string(obj):
if os.path.exists(obj):
return cls.from_file(obj)
else:
return cls.from_string(obj)
elif isinstance(obj, collections.abc.Mapping):
return cls.from_dict(obj)
else:
raise TypeError("Don't know how to convert type %s to TaskManager" % type(obj)) | 0.008571 |
def main():
"""Main entry point for script."""
parser = argparse.ArgumentParser(
description='Auto-generate a RESTful API service '
'from an existing database.'
)
parser.add_argument(
'URI',
help='Database URI in the format '
'postgresql+psycopg2://user:password@host/database')
parser.add_argument(
'-d',
'--debug',
help='Turn on debug logging',
action='store_true',
default=False)
parser.add_argument(
'-p',
'--port',
help='Port for service to listen on',
default=5000)
parser.add_argument(
'-l',
'--local-only',
help='Only provide service on localhost (will not be accessible'
' from other machines)',
action='store_true',
default=False)
parser.add_argument(
'-r',
'--read-only',
help='Make all database resources read-only (i.e. only the HTTP GET method is supported)',
action='store_true',
default=False)
parser.add_argument(
'-s',
'--schema',
help='Use this named schema instead of default',
default=None)
args = parser.parse_args()
app = get_app(args.URI, read_only=args.read_only, schema=args.schema)
if args.debug:
app.config['DEBUG'] = True
if args.local_only:
host = '127.0.0.1'
else:
host = '0.0.0.0'
app.config['SECRET_KEY'] = '42'
app.run(host=host, port=int(args.port)) | 0.001974 |
def unmarshal(self, v):
"""
Convert a date in "2012-12-13" format to a :class:`datetime.date` object.
"""
if not isinstance(v, date):
# 2012-12-13
v = datetime.strptime(v, "%Y-%m-%d").date()
return v | 0.011407 |
def clean_locks(root=None):
'''
Remove unused locks that do not currently (with regard to repositories
used) lock any package.
root
Operate on a different root directory.
CLI Example:
.. code-block:: bash
salt '*' pkg.clean_locks
'''
LCK = "removed"
out = {LCK: 0}
locks = os.path.join(root, os.path.relpath(LOCKS, os.path.sep)) if root else LOCKS
if not os.path.exists(locks):
return out
for node in __zypper__(root=root).xml.call('cl').getElementsByTagName("message"):
text = node.childNodes[0].nodeValue.lower()
if text.startswith(LCK):
out[LCK] = text.split(" ")[1]
break
return out | 0.004237 |
def libvlc_video_get_chapter_description(p_mi, i_title):
'''Get the description of available chapters for specific title.
@param p_mi: the media player.
@param i_title: selected title.
@return: list containing description of available chapter for title i_title.
'''
f = _Cfunctions.get('libvlc_video_get_chapter_description', None) or \
_Cfunction('libvlc_video_get_chapter_description', ((1,), (1,),), None,
ctypes.POINTER(TrackDescription), MediaPlayer, ctypes.c_int)
return f(p_mi, i_title) | 0.007286 |
def _on_decisions_event(self, event=None, **kwargs):
"""Called when an Event is received on the decisions channel. Saves
the value in group_decisions. If num_subperiods is None, immediately
broadcasts the event back out on the group_decisions channel.
"""
if not self.ran_ready_function:
logger.warning('ignoring decision from {} before when_all_players_ready: {}'.format(event.participant.code, event.value))
return
with track('_on_decisions_event'):
self.group_decisions[event.participant.code] = event.value
self._group_decisions_updated = True
self.save(update_fields=['group_decisions', '_group_decisions_updated'])
if not self.num_subperiods() and not self.rate_limit():
self.send('group_decisions', self.group_decisions) | 0.004635 |
def get_parameters(self):
"""
Parse DockerRunBuilder options and create object with properties for docker-py run command
:return: DockerContainerParameters
"""
import argparse
parser = argparse.ArgumentParser(add_help=False)
# without parameter
parser.add_argument("-i", "--interactive", action="store_true", dest="stdin_open")
parser.add_argument("-d", "--detach", action="store_true", dest="detach")
parser.add_argument("-t", "--tty", action="store_true", dest="tty")
parser.add_argument("--init", action="store_true", dest="init")
parser.add_argument("--privileged", action="store_true", dest="privileged")
parser.add_argument("-P", "--publish-all", action="store_true", dest="publish_all_ports")
parser.add_argument("--read-only", action="store_true", dest="read_only")
parser.add_argument("--rm", action="store_true", dest="remove")
# string parameter
parser.add_argument("--entrypoint", action="store", dest="entrypoint")
parser.add_argument("-h", "--hostname", action="store", dest="hostname")
parser.add_argument("--name", action="store", dest="name")
parser.add_argument("--ipc", action="store", dest="ipc_mode")
parser.add_argument("--isolation", action="store", dest="isolation")
parser.add_argument("--mac-address", action="store", dest="mac_address")
parser.add_argument("-m", "--memory", action="store", dest="mem_limit")
parser.add_argument("--network", action="store", dest="network")
parser.add_argument("--platform", action="store", dest="platform")
parser.add_argument("--runtime", action="store", dest="runtime")
parser.add_argument("--stop-signal", action="store", dest="stop_signal")
parser.add_argument("-u", "--user", action="store", dest="user")
parser.add_argument("-w", "--workdir", action="store", dest="working_dir")
# int parameter
parser.add_argument("--pids-limit", action="store", dest="pids_limit", type=int)
# list parameter
parser.add_argument("-e", "--env", action="append", dest="env_variables")
parser.add_argument("--cap-add", action="append", dest="cap_add")
parser.add_argument("--cap-drop", action="append", dest="cap_drop")
parser.add_argument("--device", action="append", dest="devices")
parser.add_argument("--dns", action="append", dest="dns")
parser.add_argument("--group-add", action="append", dest="group_add")
parser.add_argument("--mount", action="append", dest="mounts")
parser.add_argument("-v", "--volume", action="append", dest="volumes")
# dict parameter
parser.add_argument("-l", "--label", action="append", dest="labels")
parser.add_argument("-p", "--publish", action="append", dest="port_mappings")
# health
parser.add_argument("--health-cmd", action="store", dest="health_cmd")
parser.add_argument("--health-interval", action="store", dest="health_interval", type=int)
parser.add_argument("--health-retries", action="store", dest="health_retries", type=int)
parser.add_argument("--health-timeout", action="store", dest="health_timeout", type=int)
parser.add_argument("--no-healthcheck", action="store_true", dest="no_healthcheck")
args, _ = parser.parse_known_args(args=self.options)
command = self.arguments
options_dict = vars(args)
# create Healthcheck object
if not options_dict.pop("no_healthcheck", None):
options_dict["healthcheck"] = Healthcheck(
test=options_dict.pop("health_cmd", None),
interval=options_dict.pop("health_interval", None),
timeout=options_dict.pop("health_timeout", None),
retries=options_dict.pop("health_retries", None)
)
else:
options_dict['healthcheck'] = None
# parse dictionary
# {'name': 'separator'}
with_dictionary_parameter = {'labels': '='}
for name, separator in with_dictionary_parameter.items():
if options_dict[name] is not None:
dictionary = {}
for item in options_dict[name]:
try:
key, value = item.split(separator)
dictionary[key] = value
except ValueError:
dictionary = options_dict[name]
raise ConuException('Wrong format of dictionary: {name}'.format(name=name))
break
options_dict[name] = dictionary
# parse ports
# create dictionary according to https://docker-py.readthedocs.io/en/stable/containers.html
if options_dict['port_mappings'] is not None:
dictionary = {}
for port_string in options_dict['port_mappings']:
colon_count = port_string.count(':')
if colon_count == 2:
split_array = port_string.split(':')
if split_array[1] == '':
# format - ip::containerPort
# create dictionary - {'1111/tcp': ('127.0.0.1', None)}
dictionary[split_array[2]] = (split_array[0], None)
else:
# format - ip:hostPort:containerPort
# create dictionary - {'1111/tcp': ('127.0.0.1', 1111)}
dictionary[split_array[2]] = (split_array[0], int(split_array[1]))
elif colon_count == 1:
# format - hostPort:containerPort
# create dictionary - {'2222/tcp': 3333}
split_array = port_string.split(':')
dictionary[split_array[1]] = int(split_array[0])
elif colon_count == 0:
# format - containerPort
# create dictionary - {'2222/tcp': None}
dictionary[port_string] = None
else:
raise ConuException('Wrong format of port mappings')
options_dict['port_mappings'] = dictionary
container_parameters = DockerContainerParameters(cap_add=options_dict['cap_add'],
cap_drop=options_dict['cap_drop'],
command=command, detach=options_dict['detach'],
devices=options_dict['devices'], dns=options_dict['dns'],
entrypoint=options_dict['entrypoint'],
env_variables=options_dict['env_variables'],
group_add=options_dict['group_add'],
healthcheck=options_dict['healthcheck'],
hostname=options_dict['hostname'],
init=options_dict['init'],
ipc_mode=options_dict['ipc_mode'],
isolation=options_dict['isolation'],
labels=options_dict['labels'],
mac_address=options_dict['mac_address'],
mem_limit=options_dict['mem_limit'],
mounts=options_dict['mounts'],
name=options_dict['name'],
network=options_dict['network'],
pids_limit=options_dict['pids_limit'],
platform=options_dict['platform'],
port_mappings=options_dict['port_mappings'],
privileged=options_dict['privileged'],
publish_all_ports=options_dict['publish_all_ports'],
read_only=options_dict['read_only'],
remove=options_dict['remove'],
runtime=options_dict['runtime'],
stdin_open=options_dict['stdin_open'],
stop_signal=options_dict['stop_signal'],
tty=options_dict['tty'],
user=options_dict['user'],
volumes=options_dict['volumes'],
working_dir=options_dict['working_dir']
)
return container_parameters | 0.005762 |
def path(self, which=None):
"""Extend ``nailgun.entity_mixins.Entity.path``.
The format of the returned path depends on the value of ``which``:
build_pxe_default
/provisioning_templates/build_pxe_default
clone
/provisioning_templates/clone
revision
/provisioning_templates/revision
``super`` is called otherwise.
"""
if which in ('build_pxe_default', 'clone', 'revision'):
prefix = 'self' if which == 'clone' else 'base'
return '{0}/{1}'.format(
super(ProvisioningTemplate, self).path(prefix),
which
)
return super(ProvisioningTemplate, self).path(which) | 0.002725 |
def process_flagged_blocks(self, content: str) -> str:
'''Replace flagged blocks either with their contents or nothing, depending on the value
of ``FOLIANT_FLAGS`` environment variable and ``flags`` config value.
:param content: Markdown content
:returns: Markdown content without flagged blocks
'''
def _sub(flagged_block):
options = self.get_options(flagged_block.group('options'))
required_flags = {
flag.lower()
for flag in re.split(self._flag_delimiters, options.get('flags', ''))
if flag
} | {
f'target:{target.lower()}'
for target in re.split(self._flag_delimiters, options.get('targets', ''))
if target
} | {
f'backend:{backend.lower()}'
for backend in re.split(self._flag_delimiters, options.get('backends', ''))
if backend
}
env_flags = {
flag.lower()
for flag in re.split(self._flag_delimiters, getenv(self._flags_envvar, ''))
if flag
}
config_flags = {flag.lower() for flag in self.options['flags']}
set_flags = env_flags \
| config_flags \
| {f'target:{self.context["target"]}', f'backend:{self.context["backend"]}'}
kind = options.get('kind', 'all')
if (kind == 'all' and required_flags <= set_flags) \
or (kind == 'any' and required_flags & set_flags) \
or (kind == 'none' and not required_flags & set_flags):
return flagged_block.group('body').strip()
else:
return ''
return self.pattern.sub(_sub, content) | 0.004942 |
def get_branding(self):
"""Gets a branding, such as an image or logo, expressed using the ``Asset`` interface.
return: (osid.repository.AssetList) - a list of assets
raise: OperationFailed - unable to complete request
*compliance: mandatory -- This method must be implemented.*
"""
mgr = self.my_osid_object._get_provider_manager('REPOSITORY')
lookup_session = mgr.get_asset_lookup_session()
lookup_session.use_federated_repository_view()
return lookup_session.get_assets_by_ids(self.get_branding_ids()) | 0.00519 |
def get_build_controller(self, controller_id):
"""GetBuildController.
Gets a controller
:param int controller_id:
:rtype: :class:`<BuildController> <azure.devops.v5_0.build.models.BuildController>`
"""
route_values = {}
if controller_id is not None:
route_values['controllerId'] = self._serialize.url('controller_id', controller_id, 'int')
response = self._send(http_method='GET',
location_id='fcac1932-2ee1-437f-9b6f-7f696be858f6',
version='5.0',
route_values=route_values)
return self._deserialize('BuildController', response) | 0.007123 |
def postinit(self, expr=None, globals=None, locals=None):
"""Do some setup after initialisation.
:param expr: The expression to be executed.
:type expr: NodeNG or None
:param globals:The globals dictionary to execute with.
:type globals: NodeNG or None
:param locals: The locals dictionary to execute with.
:type locals: NodeNG or None
"""
self.expr = expr
self.globals = globals
self.locals = locals | 0.004073 |
def rename_categories(self, new_categories, inplace=False):
"""
Rename categories.
Parameters
----------
new_categories : list-like, dict-like or callable
* list-like: all items must be unique and the number of items in
the new categories must match the existing number of categories.
* dict-like: specifies a mapping from
old categories to new. Categories not contained in the mapping
are passed through and extra categories in the mapping are
ignored.
.. versionadded:: 0.21.0
* callable : a callable that is called on all items in the old
categories and whose return values comprise the new categories.
.. versionadded:: 0.23.0
.. warning::
Currently, Series are considered list like. In a future version
of pandas they'll be considered dict-like.
inplace : bool, default False
Whether or not to rename the categories inplace or return a copy of
this categorical with renamed categories.
Returns
-------
cat : Categorical or None
With ``inplace=False``, the new categorical is returned.
With ``inplace=True``, there is no return value.
Raises
------
ValueError
If new categories are list-like and do not have the same number of
items than the current categories or do not validate as categories
See Also
--------
reorder_categories
add_categories
remove_categories
remove_unused_categories
set_categories
Examples
--------
>>> c = pd.Categorical(['a', 'a', 'b'])
>>> c.rename_categories([0, 1])
[0, 0, 1]
Categories (2, int64): [0, 1]
For dict-like ``new_categories``, extra keys are ignored and
categories not in the dictionary are passed through
>>> c.rename_categories({'a': 'A', 'c': 'C'})
[A, A, b]
Categories (2, object): [A, b]
You may also provide a callable to create the new categories
>>> c.rename_categories(lambda x: x.upper())
[A, A, B]
Categories (2, object): [A, B]
"""
inplace = validate_bool_kwarg(inplace, 'inplace')
cat = self if inplace else self.copy()
if isinstance(new_categories, ABCSeries):
msg = ("Treating Series 'new_categories' as a list-like and using "
"the values. In a future version, 'rename_categories' will "
"treat Series like a dictionary.\n"
"For dict-like, use 'new_categories.to_dict()'\n"
"For list-like, use 'new_categories.values'.")
warn(msg, FutureWarning, stacklevel=2)
new_categories = list(new_categories)
if is_dict_like(new_categories):
cat.categories = [new_categories.get(item, item)
for item in cat.categories]
elif callable(new_categories):
cat.categories = [new_categories(item) for item in cat.categories]
else:
cat.categories = new_categories
if not inplace:
return cat | 0.000605 |
def modification_time(self):
"""dfdatetime.DateTimeValues: modification time or None if not available."""
timestamp = getattr(self._tar_info, 'mtime', None)
if timestamp is None:
return None
return dfdatetime_posix_time.PosixTime(timestamp=timestamp) | 0.011029 |
def create_packages_archive(packages, filename):
"""
Create a tar archive which will contain the files for the packages listed in packages.
"""
import tarfile
tar = tarfile.open(filename, "w")
def add(src, dst):
logger.debug('adding to tar: %s -> %s', src, dst)
tar.add(src, dst)
def add_files_for_package(sub_package_path, root_package_path, root_package_name):
for root, dirs, files in os.walk(sub_package_path):
if '.svn' in dirs:
dirs.remove('.svn')
for f in files:
if not f.endswith(".pyc") and not f.startswith("."):
add(dereference(root + "/" + f), root.replace(root_package_path, root_package_name) + "/" + f)
for package in packages:
# Put a submodule's entire package in the archive. This is the
# magic that usually packages everything you need without
# having to attach packages/modules explicitly
if not getattr(package, "__path__", None) and '.' in package.__name__:
package = __import__(package.__name__.rpartition('.')[0], None, None, 'non_empty')
n = package.__name__.replace(".", "/")
if getattr(package, "__path__", None):
# TODO: (BUG) picking only the first path does not
# properly deal with namespaced packages in different
# directories
p = package.__path__[0]
if p.endswith('.egg') and os.path.isfile(p):
raise 'egg files not supported!!!'
# Add the entire egg file
# p = p[:p.find('.egg') + 4]
# add(dereference(p), os.path.basename(p))
else:
# include __init__ files from parent projects
root = []
for parent in package.__name__.split('.')[0:-1]:
root.append(parent)
module_name = '.'.join(root)
directory = '/'.join(root)
add(dereference(__import__(module_name, None, None, 'non_empty').__path__[0] + "/__init__.py"),
directory + "/__init__.py")
add_files_for_package(p, p, n)
# include egg-info directories that are parallel:
for egg_info_path in glob.glob(p + '*.egg-info'):
logger.debug(
'Adding package metadata to archive for "%s" found at "%s"',
package.__name__,
egg_info_path
)
add_files_for_package(egg_info_path, p, n)
else:
f = package.__file__
if f.endswith("pyc"):
f = f[:-3] + "py"
if n.find(".") == -1:
add(dereference(f), os.path.basename(f))
else:
add(dereference(f), n + ".py")
tar.close() | 0.002401 |
def get_band_gap(self):
"""
Returns band gap data.
Returns:
A dict {"energy","direct","transition"}:
"energy": band gap energy
"direct": A boolean telling if the gap is direct or not
"transition": kpoint labels of the transition (e.g., "\\Gamma-X")
"""
if self.is_metal():
return {"energy": 0.0, "direct": False, "transition": None}
cbm = self.get_cbm()
vbm = self.get_vbm()
result = dict(direct=False, energy=0.0, transition=None)
result["energy"] = cbm["energy"] - vbm["energy"]
if (cbm["kpoint"].label is not None and cbm["kpoint"].label == vbm[
"kpoint"].label) \
or np.linalg.norm(cbm["kpoint"].cart_coords
- vbm["kpoint"].cart_coords) < 0.01:
result["direct"] = True
result["transition"] = "-".join(
[str(c.label) if c.label is not None else
str("(") + ",".join(["{0:.3f}".format(c.frac_coords[i])
for i in range(3)])
+ str(")") for c in [vbm["kpoint"], cbm["kpoint"]]])
return result | 0.001671 |
def __start_datanode(self, job):
"""
Launches the Hadoop datanode.
:param job: The underlying job.
"""
self.hdfsContainerID = dockerCheckOutput(job=job,
defer=STOP,
workDir=os.getcwd(),
tool="quay.io/ucsc_cgl/apache-hadoop-worker:2.6.2",
dockerParameters=["--net=host",
"-d",
"-v", "/mnt/ephemeral/:/ephemeral/:rw"],
parameters=[self.masterIP])[:-1] | 0.009079 |
def content_preview(self, request):
"""
Admin view to preview Entry.content in HTML,
useful when using markups to write entries.
"""
data = request.POST.get('data', '')
entry = self.model(content=data)
return TemplateResponse(
request, 'admin/zinnia/entry/preview.html',
{'preview': entry.html_content}) | 0.005222 |
def got(self, *args, **kwargs):
"""Does `.request` match the given :ref:`message spec <message spec>`?
>>> s = MockupDB(auto_ismaster=True)
>>> port = s.run()
>>> s.got(timeout=0) # No request enqueued.
False
>>> from pymongo import MongoClient
>>> client = MongoClient(s.uri)
>>> future = go(client.db.command, 'foo')
>>> s.got('foo')
True
>>> s.got(OpMsg('foo', namespace='db'))
True
>>> s.got(OpMsg('foo', key='value'))
False
>>> s.ok()
>>> future() == {'ok': 1}
True
>>> s.stop()
"""
timeout = kwargs.pop('timeout', self._request_timeout)
end = time.time() + timeout
matcher = make_matcher(*args, **kwargs)
while not self._stopped:
try:
# Short timeout so we notice if the server is stopped.
request = self._request_q.peek(timeout=timeout)
except Empty:
if time.time() > end:
return False
else:
return matcher.matches(request) | 0.001759 |
def _tokenize(sentence):
'''Tokenizer and Stemmer'''
_tokens = nltk.word_tokenize(sentence)
tokens = [stemmer.stem(tk) for tk in _tokens]
return tokens | 0.005952 |
def addAllowedType(self, assoc_type, session_type=None):
"""Add an association type and session type to the allowed
types list. The assocation/session pairs are tried in the
order that they are added."""
if self.allowed_types is None:
self.allowed_types = []
if session_type is None:
available = getSessionTypes(assoc_type)
if not available:
raise ValueError('No session available for association type %r'
% (assoc_type,))
for session_type in getSessionTypes(assoc_type):
self.addAllowedType(assoc_type, session_type)
else:
checkSessionType(assoc_type, session_type)
self.allowed_types.append((assoc_type, session_type)) | 0.002475 |
def find(self, _id, instance = None):
""" Find
Args:
_id (str): instance id or binding Id
Keyword Arguments:
instance (AtlasServiceInstance.Instance): Existing instance
Returns:
AtlasServiceInstance.Instance or AtlasServiceBinding.Binding: An instance or binding.
"""
if instance is None:
# We are looking for an instance
return self.service_instance.find(_id)
else:
# We are looking for a binding
return self.service_binding.find(_id, instance) | 0.015873 |
def create_host_template(resource_root, name, cluster_name):
"""
Create a host template.
@param resource_root: The root Resource object.
@param name: Host template name
@param cluster_name: Cluster name
@return: An ApiHostTemplate object for the created host template.
@since: API v3
"""
apitemplate = ApiHostTemplate(resource_root, name, [])
return call(resource_root.post,
HOST_TEMPLATES_PATH % (cluster_name,),
ApiHostTemplate, True, data=[apitemplate], api_version=3)[0] | 0.011858 |
def convert_nsarg(
nsarg: str,
api_url: str = None,
namespace_targets: Mapping[str, List[str]] = None,
canonicalize: bool = False,
decanonicalize: bool = False,
) -> str:
"""[De]Canonicalize NSArg
Args:
nsarg (str): bel statement string or partial string (e.g. subject or object)
api_url (str): BEL.bio api url to use, e.g. https://api.bel.bio/v1
namespace_targets (Mapping[str, List[str]]): formatted as in configuration file example
canonicalize (bool): use canonicalize endpoint/namespace targets
decanonicalize (bool): use decanonicalize endpoint/namespace targets
Results:
str: converted NSArg
"""
if not api_url:
api_url = config["bel_api"]["servers"]["api_url"]
if not api_url:
log.error("Missing api url - cannot convert namespace")
return None
params = None
if namespace_targets:
namespace_targets_str = json.dumps(namespace_targets)
params = {"namespace_targets": namespace_targets_str}
if not namespace_targets:
if canonicalize:
api_url = api_url + "/terms/{}/canonicalized"
elif decanonicalize:
api_url = api_url + "/terms/{}/decanonicalized"
else:
log.warning("Missing (de)canonical flag - cannot convert namespaces")
return nsarg
else:
api_url = (
api_url + "/terms/{}/canonicalized"
) # overriding with namespace_targets
request_url = api_url.format(url_path_param_quoting(nsarg))
r = get_url(request_url, params=params, timeout=10)
if r and r.status_code == 200:
nsarg = r.json().get("term_id", nsarg)
elif not r or r.status_code == 404:
log.error(f"[de]Canonicalization endpoint missing: {request_url}")
return nsarg | 0.002174 |
def parse(self):
"""
Try to extract domain (full, naked, sub-domain), IP and port.
"""
if self.target.endswith("/"):
self.target = self.target[:-1]
if self._is_proto(self.target):
try:
self.protocol, self.target = self.target.split("://")
self.logger.info("{} Protocol detected: {}".format(COLORED_COMBOS.NOTIFY, self.protocol))
if self.protocol.lower() == "https" and self.port == 80:
self.port = 443
except ValueError:
raise HostHandlerException("Could not make domain and protocol from host")
if ":" in self.target:
self._extract_port(self.target)
if self.validate_ip(self.target):
self.logger.info("{} Detected {} as an IP address.".format(COLORED_COMBOS.NOTIFY, self.target))
self.is_ip = True
else:
domains = []
if self.target.startswith("www."):
# Obviously an FQDN
domains.extend((self.target, self.target.split("www.")[1]))
self.fqdn = self.target
self.naked = ".".join(self.fqdn.split('.')[1:])
else:
domains.append(self.target)
domain_levels = self.target.split(".")
if len(domain_levels) == 2 or (len(domain_levels) == 3 and domain_levels[1] == "co"):
self.logger.info("{} Found {} to be a naked domain".format(COLORED_COMBOS.NOTIFY, self.target))
self.naked = self.target
try:
self.dns_results = DNSHandler.query_dns(domains, self.dns_records)
except Timeout:
raise HostHandlerException("DNS Query timed out. Maybe target has DNS protection ?")
if self.dns_results.get("CNAME"):
# Naked domains shouldn't hold CNAME records according to RFC regulations
self.logger.info("{} Found {} to be an FQDN by CNAME presence in DNS records".format(
COLORED_COMBOS.NOTIFY, self.target))
self.fqdn = self.target
self.naked = ".".join(self.fqdn.split('.')[1:])
self.create_host_dir_and_set_file_logger()
self.write_up() | 0.004781 |
def get_cache_key(request, meta, orgaMode, currentOrga):
"""Return the cache key to use"""
# Caching
cacheKey = None
if 'cache_time' in meta:
if meta['cache_time'] > 0:
# by default, no cache by user
useUser = False
# If a logged user in needed, cache the result by user
if ('only_logged_user' in meta and meta['only_logged_user']) or \
('only_member_user' in meta and meta['only_member_user']) or \
('only_admin_user' in meta and meta['only_admin_user']) or \
('only_orga_member_user' in meta and meta['only_orga_member_user']) or \
('only_orga_admin_user' in meta and meta['only_orga_admin_user']):
useUser = True
# If a value if present in meta, use it
if 'cache_by_user' in meta:
useUser = meta['cache_by_user']
cacheKey = '-'
# Add user info if needed
if useUser:
cacheKey += str(request.user.pk) + 'usr-'
# Add orga
if orgaMode:
cacheKey += str(currentOrga.pk) + 'org-'
# Add current query
cacheKey += request.get_full_path()
# Add current template (if the template changed, cache must be invalided)
cacheKey += meta['template_tag']
return cacheKey | 0.004234 |
def _extract_from_included(self, data):
"""Extract included data matching the items in ``data``.
For each item in ``data``, extract the full data from the included
data.
"""
return (item for item in self.included_data
if item['type'] == data['type'] and
str(item['id']) == str(data['id'])) | 0.005525 |
def fixedvar(self):
"""Returns the name of a member in this type that is non-custom
so that it would terminate the auto-class variable context chain.
"""
possible = [m for m in self.members.values() if not m.is_custom]
#If any of the possible variables is not allocatable or pointer, it will always
#have a value and we can just use that.
sufficient = [m for m in possible if "allocatable" not in m.modifiers
and "pointer" not in m.modifiers]
if len(sufficient) > 0:
return [sufficient[0].name]
else:
return [m.name for m in possible] | 0.007669 |
def upload_pdf(self, pdf):
"""
上传电子发票中的消费凭证 PDF
详情请参考
https://mp.weixin.qq.com/wiki?id=mp1497082828_r1cI2
:param pdf: 要上传的 PDF 文件,一个 File-object
:return: 64位整数,在将发票卡券插入用户卡包时使用用于关联pdf和发票卡券。有效期为3天。
"""
return self._post(
'platform/setpdf',
files={
'pdf': pdf,
},
result_processor=lambda x: x['s_media_id'],
) | 0.004515 |
def push_blob(self,
filename=None,
progress=None,
data=None, digest=None,
check_exists=True):
# pylint: disable=too-many-arguments
"""
Upload a file to the registry and return its (SHA-256) hash.
The registry is content-addressable so the file's content (aka blob)
can be retrieved later by passing the hash to :meth:`pull_blob`.
:param filename: File to upload.
:type filename: str
:param data: Data to upload if ``filename`` isn't given. The data is uploaded in chunks and you must also pass ``digest``.
:type data: Generator or iterator
:param digest: Hash of the data to be uploaded in ``data``, if specified.
:type digest: str (hex-encoded SHA-256, prefixed by ``sha256:``)
:param progress: Optional function to call as the upload progresses. The function will be called with the hash of the file's content (or ``digest``), the blob just read from the file (or chunk from ``data``) and if ``filename`` is specified the total size of the file.
:type progress: function(dgst, chunk, size)
:param check_exists: Whether to check if a blob with the same hash already exists in the registry. If so, it won't be uploaded again.
:type check_exists: bool
:rtype: str
:returns: Hash of file's content.
"""
if filename is None:
dgst = digest
else:
dgst = hash_file(filename)
if check_exists:
try:
self._request('head', 'blobs/' + dgst)
return dgst
except requests.exceptions.HTTPError as ex:
# pylint: disable=no-member
if ex.response.status_code != requests.codes.not_found:
raise
r = self._request('post', 'blobs/uploads/')
upload_url = r.headers['Location']
url_parts = list(urlparse.urlparse(upload_url))
query = urlparse.parse_qs(url_parts[4])
query.update({'digest': dgst})
url_parts[4] = urlencode(query, True)
url_parts[0] = 'http' if self._insecure else 'https'
upload_url = urlparse.urlunparse(url_parts)
if filename is None:
data = _ReportingChunks(dgst, data, progress) if progress else data
self._base_request('put', upload_url, data=data)
else:
with open(filename, 'rb') as f:
data = _ReportingFile(dgst, f, progress) if progress else f
self._base_request('put', upload_url, data=data)
return dgst | 0.004167 |
def write_listing_to_textfile(textfile, tracklisting):
"""Write tracklisting to a text file."""
with codecs.open(textfile, 'wb', 'utf-8') as text:
text.write(tracklisting) | 0.005348 |
def get_authed_registries():
"""Reads the local Docker client config for the current user
and returns all registries to which the user may be logged in.
This is intended to be run client-side, not by the daemon."""
result = set()
if not os.path.exists(constants.DOCKER_CONFIG_PATH):
return result
config = json.load(open(constants.DOCKER_CONFIG_PATH, 'r'))
for registry in config.get('auths', {}).iterkeys():
try:
parsed = urlparse(registry)
except Exception:
log_to_client('Error parsing registry {} from Docker config, will skip this registry').format(registry)
# This logic assumes the auth is either of the form
# gamechanger.io (no scheme, no path after host) or
# of the form https://index.docker.io/v1/ (scheme,
# netloc parses correctly, additional path does not matter).
# These are the formats I saw in my personal config file,
# not sure what other formats it might accept.
result.add(parsed.netloc) if parsed.netloc else result.add(parsed.path)
return result | 0.001808 |
def QA_fetch_future_min_adv(
code,
start, end=None,
frequence='1min',
if_drop_index=True,
collections=DATABASE.future_min):
'''
'获取股票分钟线'
:param code:
:param start:
:param end:
:param frequence:
:param if_drop_index:
:param collections:
:return:
'''
if frequence in ['1min', '1m']:
frequence = '1min'
elif frequence in ['5min', '5m']:
frequence = '5min'
elif frequence in ['15min', '15m']:
frequence = '15min'
elif frequence in ['30min', '30m']:
frequence = '30min'
elif frequence in ['60min', '60m']:
frequence = '60min'
# __data = [] 没有使用
end = start if end is None else end
if len(start) == 10:
start = '{} 00:00:00'.format(start)
if len(end) == 10:
end = '{} 15:00:00'.format(end)
# 🛠 todo 报告错误 如果开始时间 在 结束时间之后
# if start == end:
# 🛠 todo 如果相等,根据 frequence 获取开始时间的 时间段 QA_fetch_index_min_adv, 不支持start end是相等的
#print("QA Error QA_fetch_index_min_adv parameter code=%s , start=%s, end=%s is equal, should have time span! " % (code, start, end))
# return None
res = QA_fetch_future_min(
code, start, end, format='pd', frequence=frequence)
if res is None:
print("QA Error QA_fetch_future_min_adv parameter code=%s start=%s end=%s frequence=%s call QA_fetch_future_min return None" % (
code, start, end, frequence))
else:
res_reset_index = res.set_index(
['datetime', 'code'], drop=if_drop_index)
# if res_reset_index is None:
# print("QA Error QA_fetch_index_min_adv set index 'date, code' return None")
return QA_DataStruct_Future_min(res_reset_index) | 0.003452 |
def cancelPendingResultsFor( self, params ):
"""Cancel any results pending for experiments at the given point
in the parameter space.
:param params: the experimental parameters"""
# grab the result job ids
jobs = self.pendingResultsFor(params)
if len(jobs) > 0:
# abort in the cluster
self._abortJobs(jobs)
# cancel in the notebook
self.notebook().cancelPendingResultsFor(params) | 0.015238 |
def modify_ssh_template(auth, url, ssh_template, template_name= None, template_id = None):
"""
Function takes input of a dictionry containing the required key/value pair for the modification
of a ssh template.
:param auth:
:param url:
:param ssh_template: Human readable label which is the name of the specific ssh template
:param template_id Internal IMC number which designates the specific ssh template
:return: int value of HTTP response code 201 for proper creation or 404 for failed creation
:rtype int
Sample of proper KV pairs. Please see documentation for valid values for different fields.
ssh_template = {
"type": "0",
"name": "ssh_admin_template",
"authType": "3",
"authTypeStr": "Password + Super Password",
"userName": "newadmin",
"password": "password",
"superPassword": "password",
"port": "22",
"timeout": "10",
"retries": "3",
"keyFileName": "",
"keyPhrase": ""
}
"""
if template_name is None:
template_name = ssh_template['name']
if template_id is None:
ssh_templates = get_ssh_template(auth, url)
template_id = None
for template in ssh_templates:
if template['name'] == template_name:
template_id = template['id']
f_url = url + "/imcrs/plat/res/ssh/"+str(template_id)+"/update"
response = requests.put(f_url, data = json.dumps(ssh_template), auth=auth, headers=HEADERS)
try:
return response.status_code
except requests.exceptions.RequestException as error:
return "Error:\n" + str(error) + " modify_ssh_template: An Error has occured" | 0.008449 |
def getChild(self, name, ns=None, default=None):
"""
Get a child by (optional) name and/or (optional) namespace.
@param name: The name of a child element (may contain prefix).
@type name: basestring
@param ns: An optional namespace used to match the child.
@type ns: (I{prefix}, I{name})
@param default: Returned when child not-found.
@type default: L{Element}
@return: The requested child, or I{default} when not-found.
@rtype: L{Element}
"""
if ns is None:
prefix, name = splitPrefix(name)
if prefix is not None:
ns = self.resolvePrefix(prefix)
for c in self.children:
if c.match(name, ns):
return c
return default | 0.002506 |
def set_buffer_limits(self, high=None, low=None):
"""Set the low and high watermarks for the read buffer."""
if high is None:
high = self.default_buffer_size
if low is None:
low = high // 2
self._buffer_high = high
self._buffer_low = low | 0.006645 |
def AddInformationalOptions(self, argument_group):
"""Adds the informational options to the argument group.
Args:
argument_group (argparse._ArgumentGroup): argparse argument group.
"""
argument_group.add_argument(
'-d', '--debug', dest='debug', action='store_true', default=False,
help='Enable debug output.')
argument_group.add_argument(
'-q', '--quiet', dest='quiet', action='store_true', default=False,
help='Disable informational output.') | 0.001988 |
def dataframe(self, predicate=None, filtered_columns=None, columns=None, df_class=None):
"""Return the partition as a Pandas dataframe
:param predicate: If defined, a callable that is called for each row, and if it returns true, the
row is included in the output.
:param filtered_columns: If defined, the value is a dict of column names and
associated values. Only rows where all of the named columms have the given values will be returned.
Setting the argument will overwrite any value set for the predicate
:param columns: A list or tuple of column names to return
:return: Pandas dataframe
"""
from operator import itemgetter
from ambry.pands import AmbryDataFrame
df_class = df_class or AmbryDataFrame
if columns:
ig = itemgetter(*columns)
else:
ig = None
columns = self.table.header
if filtered_columns:
def maybe_quote(v):
from six import string_types
if isinstance(v, string_types):
return '"{}"'.format(v)
else:
return v
code = ' and '.join("row.{} == {}".format(k, maybe_quote(v))
for k, v in filtered_columns.items())
predicate = eval('lambda row: {}'.format(code))
if predicate:
def yielder():
for row in self.reader:
if predicate(row):
if ig:
yield ig(row)
else:
yield row.dict
df = df_class(yielder(), columns=columns, partition=self.measuredim)
return df
else:
def yielder():
for row in self.reader:
yield row.values()
# Put column names in header order
columns = [c for c in self.table.header if c in columns]
return df_class(yielder(), columns=columns, partition=self.measuredim) | 0.003808 |
def get_abbreviation_of(self, name):
"""Get abbreviation of a language."""
for language in self.user_data.languages:
if language['language_string'] == name:
return language['language']
return None | 0.008065 |
def get_route(self, name):
'''Get a child :class:`Router` by its :attr:`name`.
This method search child routes recursively.
'''
for route in self.routes:
if route.name == name:
return route
for child in self.routes:
route = child.get_route(name)
if route:
return route | 0.005305 |
def window_cover(self, window_shape, pad=True):
""" Iterate over a grid of windows of a specified shape covering an image.
The image is divided into a grid of tiles of size window_shape. Each iteration returns
the next window.
Args:
window_shape (tuple): The desired shape of each image as (height,
width) in pixels.
pad: (bool): Whether or not to pad edge cells. If False, cells that do not
have the desired shape will not be returned. Defaults to True.
Yields:
image: image object of same type.
"""
size_y, size_x = window_shape[0], window_shape[1]
_ndepth, _nheight, _nwidth = self.shape
nheight, _m = divmod(_nheight, size_y)
nwidth, _n = divmod(_nwidth, size_x)
img = self
if pad is True:
new_height, new_width = _nheight, _nwidth
if _m != 0:
new_height = (nheight + 1) * size_y
if _n != 0:
new_width = (nwidth + 1) * size_x
if (new_height, new_width) != (_nheight, _nwidth):
bounds = box(0, 0, new_width, new_height)
geom = ops.transform(self.__geo_transform__.fwd, bounds)
img = self[geom]
row_lims = range(0, img.shape[1], size_y)
col_lims = range(0, img.shape[2], size_x)
for maxy, maxx in product(row_lims, col_lims):
reg = img[:, maxy:(maxy + size_y), maxx:(maxx + size_x)]
if pad is False:
if reg.shape[1:] == window_shape:
yield reg
else:
yield reg | 0.002992 |
def iterate_with_selected_objects(analysis_objects: Mapping[Any, Any], **selections: Mapping[str, Any]) -> Iterator[Tuple[Any, Any]]:
""" Iterate over an analysis dictionary with selected attributes.
Args:
analysis_objects: Analysis objects dictionary.
selections: Keyword arguments used to select attributes from the analysis dictionary.
Yields:
object: Matching analysis object.
"""
for key_index, obj in analysis_objects.items():
# If selections is empty, we return every object. If it's not empty, then we only want to return
# objects which are selected in through the selections.
selected_obj = not selections or all([getattr(key_index, selector) == selected_value for selector, selected_value in selections.items()])
if selected_obj:
yield key_index, obj | 0.005875 |
def defer(target, args=None, kwargs=None, callback=None):
"""Perform operation in thread with callback
Instances are cached until finished, at which point
they are garbage collected. If we didn't do this,
Python would step in and garbage collect the thread
before having had time to finish, resulting in an
exception.
Arguments:
target (callable): Method or function to call
callback (callable, optional): Method or function to call
once `target` has finished.
Returns:
None
"""
obj = _defer(target, args, kwargs, callback)
obj.finished.connect(lambda: _defer_cleanup(obj))
obj.start()
_defer_threads.append(obj)
return obj | 0.001389 |
def split(self, split_on):
"""
Splits the AST if its operation is `split_on` (i.e., return all the arguments). Otherwise, return a list with
just the AST.
"""
if self.op in split_on: return list(self.args)
else: return [ self ] | 0.025455 |
def get_data_info(self):
"""
imports er tables and places data into Data_info data structure
outlined bellow:
Data_info - {er_samples: {er_samples.txt info}
er_sites: {er_sites.txt info}
er_locations: {er_locations.txt info}
er_ages: {er_ages.txt info}}
"""
Data_info = {}
data_er_samples = {}
data_er_sites = {}
data_er_locations = {}
data_er_ages = {}
if self.data_model == 3.0:
print(("data model: %1.1f" % (self.data_model)))
Data_info["er_samples"] = []
Data_info["er_sites"] = []
Data_info["er_locations"] = []
Data_info["er_ages"] = []
# self.magic_file may have a full path, but this breaks cb.Contribution
# determine if magic_file exists in WD, and if it doesn't, copy it in
magic_file_real = os.path.realpath(self.magic_file)
magic_file_short = os.path.split(self.magic_file)[1]
WD_file_real = os.path.realpath(
os.path.join(self.WD, magic_file_short))
if magic_file_real == WD_file_real:
fnames = {'measurements': magic_file_short}
else:
# copy measurements file to WD, keeping original name
shutil.copy(magic_file_real, WD_file_real)
fnames = {'measurements': magic_file_short}
self.con = cb.Contribution(self.WD, custom_filenames=fnames, read_tables=[
'measurements', 'specimens', 'samples', 'sites', 'locations', 'criteria', 'ages'])
if 'specimens' in self.con.tables:
spec_container = self.con.tables['specimens']
self.spec_data = spec_container.df
else:
self.con.add_empty_magic_table('specimens')
self.spec_data = self.con.tables['specimens'].df
if 'samples' in self.con.tables:
samp_container = self.con.tables['samples']
samp_container.front_and_backfill(['azimuth', 'dip'])
self.samp_data = samp_container.df
samp_data2 = self.samp_data.rename(
columns=map_magic.samp_magic3_2_magic2_map)
data_er_samples = samp_data2.T.to_dict()
else:
self.con.add_empty_magic_table('samples')
self.samp_data = self.con.tables['samples'].df
if 'sites' in self.con.tables:
site_container = self.con.tables['sites']
self.site_data = site_container.df
if 'age' in self.site_data.columns:
self.site_data = self.site_data[self.site_data['age'].notnull(
)]
age_ids = [col for col in self.site_data.columns if col.startswith(
"age") or col == "site"]
age_data = self.site_data[age_ids].rename(
columns=map_magic.site_magic3_2_magic2_map)
# save this in 2.5 format
er_ages = age_data.to_dict('records')
data_er_ages = {}
for s in er_ages:
s = self.convert_ages_to_calendar_year(s)
data_er_ages[s['er_site_name']] = s
sites = self.site_data.rename(
columns=map_magic.site_magic3_2_magic2_map)
# pick out what is needed by thellier_gui and put in 2.5 format
er_sites = sites.to_dict('records')
data_er_sites = {}
for s in er_sites:
data_er_sites[s['er_site_name']] = s
else:
self.con.add_empty_magic_table('sites')
self.site_data = self.con.tables['sites'].df
if 'locations' in self.con.tables:
location_container = self.con.tables["locations"]
self.loc_data = location_container.df # only need this for saving tables
if self.loc_data['location'].isnull().any():
self.loc_data.replace(
{'location': {None: 'unknown'}}, inplace=True)
self.loc_data.set_index('location', inplace=True)
self.loc_data['location'] = self.loc_data.index
loc2_data = self.loc_data.rename(
columns=map_magic.loc_magic3_2_magic2_map)
data_er_locations = loc2_data.to_dict('index')
else:
self.con.add_empty_magic_table('locations')
self.loc_data = self.con.tables['locations'].df
else: # try 2.5 data model
print(("data model: %1.1f" % (self.data_model)))
self.read_magic_file(os.path.join(
self.WD, "er_samples.txt"), 'er_sample_name')
try:
data_er_samples = self.read_magic_file(
os.path.join(self.WD, "er_samples.txt"), 'er_sample_name')
except:
print("-W- Can't find er_sample.txt in project directory")
try:
data_er_sites = self.read_magic_file(
os.path.join(self.WD, "er_sites.txt"), 'er_site_name')
except:
print("-W- Can't find er_sites.txt in project directory")
try:
data_er_locations = self.read_magic_file(os.path.join(
self.WD, "er_locations.txt"), 'er_location_name')
except:
print("-W- Can't find er_locations.txt in project directory")
try:
data_er_ages = self.read_magic_file(
os.path.join(self.WD, "er_ages.txt"), 'er_sample_name')
except:
try:
data_er_ages = self.read_magic_file(
os.path.join(self.WD, "er_ages.txt"), 'er_site_name')
except:
print("-W- Can't find er_ages in project directory")
Data_info["er_samples"] = data_er_samples
Data_info["er_sites"] = data_er_sites
Data_info["er_locations"] = data_er_locations
Data_info["er_ages"] = data_er_ages
return(Data_info) | 0.002209 |
def set_version(new_version_number=None, old_version_number=''):
"""
Set package version as listed in `__version__` in `__init__.py`.
"""
if new_version_number is None:
return ValueError
import fileinput
import sys
file = join('timezonefinder', '__init__.py')
for line in fileinput.input(file, inplace=1):
if old_version_number in line:
line = line.replace(old_version_number, new_version_number)
sys.stdout.write(line) | 0.002041 |
def find_end(self, text, start_token, end_token, ignore_end_token=None):
'''find the of a token.
Returns the offset in the string immediately after the matching end_token'''
if not text.startswith(start_token):
raise MAVParseError("invalid token start")
offset = len(start_token)
nesting = 1
while nesting > 0:
idx1 = text[offset:].find(start_token)
idx2 = text[offset:].find(end_token)
# Check for false positives due to another similar token
# For example, make sure idx2 points to the second '}' in ${{field: ${name}}}
if ignore_end_token:
combined_token = ignore_end_token + end_token
if text[offset+idx2:offset+idx2+len(combined_token)] == combined_token:
idx2 += len(ignore_end_token)
if idx1 == -1 and idx2 == -1:
raise MAVParseError("token nesting error")
if idx1 == -1 or idx1 > idx2:
offset += idx2 + len(end_token)
nesting -= 1
else:
offset += idx1 + len(start_token)
nesting += 1
return offset | 0.004163 |
def parse_date(dateString):
'''Parses a variety of date formats into a 9-tuple in GMT'''
if not dateString:
return None
for handler in _date_handlers:
try:
date9tuple = handler(dateString)
except (KeyError, OverflowError, ValueError):
continue
if not date9tuple:
continue
if len(date9tuple) != 9:
continue
return date9tuple
return None | 0.002237 |
def from_whypo(cls, xml, encoding='utf-8'):
"""Constructor from xml element *WHYPO*
:param xml.etree.ElementTree xml: the xml *WHYPO* element
:param string encoding: encoding of the xml
"""
word = unicode(xml.get('WORD'), encoding)
confidence = float(xml.get('CM'))
return cls(word, confidence) | 0.005682 |
def _read_response(self, response):
"""
JSON Documentation: https://www.jfrog.com/confluence/display/RTF/Security+Configuration+JSON
"""
self.name = response['name']
self.includesPattern = response['includesPattern']
self.excludesPattern = response['excludesPattern']
self._repositories = response.get('repositories', [])
if 'principals' in response:
if 'users' in response['principals']:
self._users = response['principals']['users']
if 'groups' in response['principals']:
self._groups = response['principals']['groups'] | 0.00468 |
def process_log_record(self, log_record):
"""Add customer record keys and rename threadName key."""
log_record["version"] = __version__
log_record["program"] = PROGRAM_NAME
log_record["service_name"] = log_record.pop('threadName', None)
# return jsonlogger.JsonFormatter.process_log_record(self, log_record)
return log_record | 0.005348 |
def list(self, name, iterator=False, **kwargs):
"""
Returns a list of the files under the specified path
name must be in the form of `s3://bucket/prefix`
Parameters
----------
keys: optional
if True then this will return the actual boto keys for files
that are encountered
objects: optional
if True then this will return the actual boto objects for
files or prefixes that are encountered
delimiter: optional
if set this
iterator: optional
if True return iterator rather than converting to list object
"""
assert self._is_s3(name), "name must be in form s3://bucket/key"
it = self._list(bucket=self._bucket_name(name), prefix=self._key_name(name), **kwargs)
return iter(it) if iterator else list(it) | 0.003444 |
def age(self):
"""
:returns: The age of the user associated with this profile.
"""
if self.is_logged_in_user:
# Retrieve the logged-in user's profile age
return int(self._user_age_xpb.get_text_(self.profile_tree).strip())
else:
# Retrieve a non logged-in user's profile age
return int(self._age_xpb.get_text_(self.profile_tree)) | 0.007194 |
def exit_ok(self, message, exit_code=None):
"""Log a message and exit
:param exit_code: if not None, exit with the provided value as exit code
:type exit_code: int
:param message: message for the exit reason
:type message: str
:return: None
"""
logger.info("Exiting...")
if message:
logger.info("-----")
logger.error("Exit message: %s", message)
logger.info("-----")
self.request_stop()
if exit_code is not None:
exit(exit_code) | 0.005291 |
def score_(self):
"""
The concordance score (also known as the c-index) of the fit. The c-index is a generalization of the ROC AUC
to survival data, including censorships.
For this purpose, the ``score_`` is a measure of the predictive accuracy of the fitted model
onto the training dataset.
References
----------
https://stats.stackexchange.com/questions/133817/stratified-concordance-index-survivalsurvconcordance
"""
# pylint: disable=access-member-before-definition
if not hasattr(self, "_concordance_score_"):
if self.strata:
# https://stats.stackexchange.com/questions/133817/stratified-concordance-index-survivalsurvconcordance
num_correct, num_tied, num_pairs = 0, 0, 0
for _, _df in self._predicted_partial_hazards_.groupby(self.strata):
if _df.shape[0] == 1:
continue
_num_correct, _num_tied, _num_pairs = _concordance_summary_statistics(
_df["T"].values, -_df["P"].values, _df["E"].values
)
num_correct += _num_correct
num_tied += _num_tied
num_pairs += _num_pairs
else:
df = self._predicted_partial_hazards_
num_correct, num_tied, num_pairs = _concordance_summary_statistics(
df["T"].values, -df["P"].values, df["E"].values
)
self._concordance_score_ = _concordance_ratio(num_correct, num_tied, num_pairs)
return self._concordance_score_
return self._concordance_score_ | 0.004667 |
def _get_cursor(self):
'''
Yield a SQLCipher cursor
'''
_options = self._get_options()
conn = sqlcipher.connect(_options.get('database'),
timeout=float(_options.get('timeout')))
conn.execute('pragma key="{0}"'.format(_options.get('pass')))
cursor = conn.cursor()
try:
yield cursor
except sqlcipher.Error as err:
log.exception('Error in ext_pillar SQLCipher: %s', err.args)
finally:
conn.close() | 0.003683 |
def rings_full_data(self):
""" Returns a generator for iterating over each ring
Yields
------
For each ring, tuple composed by ring ID, list of edges, list of nodes
Notes
-----
Circuit breakers must be closed to find rings, this is done automatically.
"""
#close circuit breakers
for circ_breaker in self.circuit_breakers():
if not circ_breaker.status == 'closed':
circ_breaker.close()
logger.info('Circuit breakers were closed in order to find MV '
'rings')
#find True rings (cycles from station through breaker and back to station)
for ring_nodes in nx.cycle_basis(self._graph, root=self._station):
edges_ring = []
for node in ring_nodes:
for edge in self.graph_branches_from_node(node):
nodes_in_the_branch = self.graph_nodes_from_branch(edge[1]['branch'])
if (nodes_in_the_branch[0] in ring_nodes and
nodes_in_the_branch[1] in ring_nodes
):
if not edge[1]['branch'] in edges_ring:
edges_ring.append(edge[1]['branch'])
yield (edges_ring[0].ring,edges_ring,ring_nodes) | 0.008215 |
def get_statements_by_hash(hash_list, ev_limit=100, best_first=True, tries=2):
"""Get fully formed statements from a list of hashes.
Parameters
----------
hash_list : list[int or str]
A list of statement hashes.
ev_limit : int or None
Limit the amount of evidence returned per Statement. Default is 100.
best_first : bool
If True, the preassembled statements will be sorted by the amount of
evidence they have, and those with the most evidence will be
prioritized. When using `max_stmts`, this means you will get the "best"
statements. If False, statements will be queried in arbitrary order.
tries : int > 0
Set the number of times to try the query. The database often caches
results, so if a query times out the first time, trying again after a
timeout will often succeed fast enough to avoid a timeout. This can
also help gracefully handle an unreliable connection, if you're
willing to wait. Default is 2.
"""
if not isinstance(hash_list, list):
raise ValueError("The `hash_list` input is a list, not %s."
% type(hash_list))
if not hash_list:
return []
if isinstance(hash_list[0], str):
hash_list = [int(h) for h in hash_list]
if not all([isinstance(h, int) for h in hash_list]):
raise ValueError("Hashes must be ints or strings that can be "
"converted into ints.")
resp = submit_statement_request('post', 'from_hashes', ev_limit=ev_limit,
data={'hashes': hash_list},
best_first=best_first, tries=tries)
return stmts_from_json(resp.json()['statements'].values()) | 0.000566 |
def __as_list(value: List[JsonObjTypes]) -> List[JsonTypes]:
""" Return a json array as a list
:param value: array
:return: array with JsonObj instances removed
"""
return [e._as_dict if isinstance(e, JsonObj) else e for e in value] | 0.007326 |
def json(value,
schema = None,
allow_empty = False,
json_serializer = None,
**kwargs):
"""Validate that ``value`` conforms to the supplied JSON Schema.
.. note::
``schema`` supports JSON Schema Drafts 3 - 7. Unless the JSON Schema indicates the
meta-schema using a ``$schema`` property, the schema will be assumed to conform to
Draft 7.
.. hint::
If either ``value`` or ``schema`` is a string, this validator will assume it is a
JSON object and try to convert it into a :class:`dict <python:dict>`.
You can override the JSON serializer used by passing it to the
``json_serializer`` property. By default, will utilize the Python
:class:`json <json>` encoder/decoder.
:param value: The value to validate.
:param schema: An optional JSON Schema against which ``value`` will be validated.
:param allow_empty: If ``True``, returns :obj:`None <python:None>` if
``value`` is empty. If ``False``, raises a
:class:`EmptyValueError <validator_collection.errors.EmptyValueError>`
if ``value`` is empty. Defaults to ``False``.
:type allow_empty: :class:`bool <python:bool>`
:param json_serializer: The JSON encoder/decoder to use to deserialize a
string passed in ``value``. If not supplied, will default to the Python
:class:`json <python:json>` encoder/decoder.
:type json_serializer: callable
:returns: ``value`` / :obj:`None <python:None>`
:rtype: :class:`dict <python:dict>` / :class:`list <python:list>` of
:class:`dict <python:dict>` / :obj:`None <python:None>`
:raises EmptyValueError: if ``value`` is empty and ``allow_empty`` is ``False``
:raises CannotCoerceError: if ``value`` cannot be coerced to a
:class:`dict <python:dict>`
:raises NotJSONError: if ``value`` cannot be deserialized from JSON
:raises NotJSONSchemaError: if ``schema`` is not a valid JSON Schema object
:raises JSONValidationError: if ``value`` does not validate against the JSON Schema
"""
original_value = value
original_schema = schema
if not value and not allow_empty:
raise errors.EmptyValueError('value (%s) was empty' % value)
elif not value:
return None
if not json_serializer:
json_serializer = json_
if isinstance(value, str):
try:
value = json_serializer.loads(value)
except Exception:
raise errors.CannotCoerceError(
'value (%s) cannot be deserialized from JSON' % original_value
)
if isinstance(schema, str):
try:
schema = dict(schema,
allow_empty = allow_empty,
json_serializer = json_serializer,
**kwargs)
except Exception:
raise errors.CannotCoerceError(
'schema (%s) cannot be coerced to a dict' % original_schema
)
if not isinstance(value, (list, dict_)):
raise errors.NotJSONError('value (%s) is not a JSON object' % original_value)
if original_schema and not isinstance(schema, dict_):
raise errors.NotJSONError('schema (%s) is not a JSON object' % original_schema)
if not schema:
return value
try:
jsonschema.validate(value, schema)
except jsonschema.exceptions.ValidationError as error:
raise errors.JSONValidationError(error.message)
except jsonschema.exceptions.SchemaError as error:
raise errors.NotJSONSchemaError(error.message)
return value | 0.00529 |
def get_sitk_image_from_ndarray(data3d):
"""
Prepare SimpleItk Image object and rescale data to unsigned types.
Simple ITK with version higher than 1.0.0 can not write signed int16. This function check
the SimpleITK version and use work around with Rescale Intercept and Rescale Slope
:param data3d:
:return:
"""
import SimpleITK as sitk
rescale_intercept = None
if sitk.Version.MajorVersion() > 0:
if data3d.dtype == np.int8:
rescale_intercept = -2**7
data3d = (data3d - rescale_intercept).astype(np.uint8)
elif data3d.dtype == np.int16:
# simpleitk is not able to store this. It uses only 11 bites
# rescale_intercept = -2**15
rescale_intercept = -2**10
data3d = (data3d - rescale_intercept).astype(np.uint16)
elif data3d.dtype == np.int32:
rescale_intercept = -2**31
data3d = (data3d - rescale_intercept).astype(np.uint16)
dim = sitk.GetImageFromArray(data3d)
if sitk.Version.MajorVersion() > 0:
if rescale_intercept is not None:
# rescale slope (0028|1053), rescale intercept (0028|1052)
dim.SetMetaData("0028|1052", str(rescale_intercept))
dim.SetMetaData("0028|1053", "1")
return dim | 0.002292 |
def _raw_read(self):
"""
Reads data from the socket and writes it to the memory bio
used by libssl to decrypt the data. Returns the unencrypted
data for the purpose of debugging handshakes.
:return:
A byte string of ciphertext from the socket. Used for
debugging the handshake only.
"""
data = self._raw_bytes
try:
data += self._socket.recv(8192)
except (socket_.error):
pass
output = data
written = libssl.BIO_write(self._rbio, data, len(data))
self._raw_bytes = data[written:]
return output | 0.003096 |
def _get_default_value_to_cache(self, xblock):
"""
Perform special logic to provide a field's default value for caching.
"""
try:
# pylint: disable=protected-access
return self.from_json(xblock._field_data.default(xblock, self.name))
except KeyError:
if self._default is UNIQUE_ID:
return self._check_or_enforce_type(self._calculate_unique_id(xblock))
else:
return self.default | 0.008048 |
def generate_molecule_object_dict(source, format, values):
"""Generate a dictionary that represents a Squonk MoleculeObject when
written as JSON
:param source: Molecules in molfile or smiles format
:param format: The format of the molecule. Either 'mol' or 'smiles'
:param values: Optional dict of values (properties) for the MoleculeObject
"""
m = {"uuid": str(uuid.uuid4()), "source": source, "format": format}
if values:
m["values"] = values
return m | 0.002008 |
def absent(
name,
region,
user=None,
opts=False):
'''
Remove the named SQS queue if it exists.
name
Name of the SQS queue.
region
Region to remove the queue from
user
Name of the user performing the SQS operations
opts
Include additional arguments and options to the aws command line
'''
ret = {'name': name, 'result': True, 'comment': '', 'changes': {}}
does_exist = __salt__['aws_sqs.queue_exists'](name, region, opts, user)
if does_exist:
if __opts__['test']:
ret['result'] = None
ret['comment'] = 'AWS SQS queue {0} is set to be removed'.format(
name)
return ret
removed = __salt__['aws_sqs.delete_queue'](name, region, opts, user)
if removed['retcode'] == 0:
ret['changes']['removed'] = removed['stdout']
else:
ret['result'] = False
ret['comment'] = removed['stderr']
else:
ret['comment'] = '{0} does not exist in {1}'.format(name, region)
return ret | 0.000904 |
def log_file_list_handler(self, **kwargs): # noqa: E501
"""log_file_list_handler # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.log_file_list_handler(async_req=True)
>>> result = thread.get()
:param async_req bool
:return: None
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.log_file_list_handler_with_http_info(**kwargs) # noqa: E501
else:
(data) = self.log_file_list_handler_with_http_info(**kwargs) # noqa: E501
return data | 0.0025 |
def get(self):
'''taobao.time.get 获取前台展示的店铺类目
获取淘宝系统当前时间'''
request = TOPRequest('taobao.time.get')
self.create(self.execute(request))
return self.time | 0.015 |
def strip_filter(value):
'''
Strips HTML tags from strings according to SANITIZER_ALLOWED_TAGS,
SANITIZER_ALLOWED_ATTRIBUTES and SANITIZER_ALLOWED_STYLES variables in
settings.
Example usage:
{% load sanitizer %}
{{ post.content|strip_html }}
'''
if isinstance(value, basestring):
value = bleach.clean(value, tags=ALLOWED_TAGS,
attributes=ALLOWED_ATTRIBUTES,
styles=ALLOWED_STYLES, strip=True)
return value | 0.003876 |
def _import_class(self, class_path):
"""Try and import the specified namespaced class.
:param str class_path: The full path to the class (foo.bar.Baz)
:rtype: class
"""
LOGGER.debug('Importing %s', class_path)
try:
return utils.import_namespaced_class(class_path)
except ImportError as error:
LOGGER.critical('Could not import %s: %s', class_path, error)
return None | 0.004348 |
def publisher(self_url=None, hub_url=None):
"""This decorator makes it easier to implement a websub publisher. You use
it on an endpoint, and Link headers will automatically be added. To also
include these links in your template html/atom/rss (and you should!) you
can use the following to get the raw links:
- {{ websub_self_url }}
- {{ websub_hub_url }}
And the following to get them wrapped in <link tags>:
- {{ websub_self_link }}
- {{ websub_hub_link }}
If hub_url is not given, the hub needs to be a flask_websub one and the
hub and publisher need to share their application for the url to be
auto-discovered. If that is not the case, you need to set
config['HUB_URL'].
If self_url is not given, the url of the current request will be used. Note
that this includes url query arguments. If this is not what you want,
override it.
"""
def decorator(topic_view):
@functools.wraps(topic_view)
def wrapper(*args, **kwargs):
nonlocal hub_url, self_url
if not self_url:
self_url = request.url
if not hub_url:
try:
hub_url = url_for('websub_hub.endpoint', _external=True)
except BuildError:
hub_url = current_app.config['HUB_URL']
stack.top.websub_self_url = self_url
stack.top.websub_hub_url = hub_url
stack.top.websub_self_link = Markup(SELF_LINK % self_url)
stack.top.websub_hub_link = Markup(HUB_LINK % hub_url)
resp = make_response(topic_view(*args, **kwargs))
resp.headers.add('Link', HEADER_VALUE % (self_url, hub_url))
return resp
return wrapper
return decorator | 0.00056 |
def bytes_to_number(b, endian='big'):
"""
Convert a string to an integer.
:param b:
String or bytearray to convert.
:param endian:
Byte order to convert into ('big' or 'little' endian-ness, default
'big')
Assumes bytes are 8 bits.
This is a special-case version of string_to_number with a full base-256
ASCII alphabet. It is the reverse of ``number_to_bytes(n)``.
Examples::
>>> bytes_to_number(b'*')
42
>>> bytes_to_number(b'\\xff')
255
>>> bytes_to_number(b'\\x01\\x00')
256
>>> bytes_to_number(b'\\x00\\x01', endian='little')
256
"""
if endian == 'big':
b = reversed(b)
n = 0
for i, ch in enumerate(bytearray(b)):
n ^= ch << i * 8
return n | 0.001241 |
def get_available_mfds():
'''
Returns an ordered dictionary with the available GSIM classes
keyed by class name
'''
mfds = {}
for fname in os.listdir(os.path.dirname(__file__)):
if fname.endswith('.py'):
modname, _ext = os.path.splitext(fname)
mod = importlib.import_module(
'openquake.hmtk.faults.mfd.' + modname)
for cls in mod.__dict__.values():
if inspect.isclass(cls) and issubclass(cls, BaseMFDfromSlip):
mfds[cls.__name__] = cls
return dict((k, mfds[k]) for k in sorted(mfds)) | 0.001647 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.