text
stringlengths 78
104k
| score
float64 0
0.18
|
---|---|
def _RemoveUsers(self, remove_users):
"""Deprovision Linux user accounts that do not appear in account metadata.
Args:
remove_users: list, the username strings of the Linux accounts to remove.
"""
for username in remove_users:
self.utils.RemoveUser(username)
self.user_ssh_keys.pop(username, None)
self.invalid_users -= set(remove_users) | 0.007979 |
def get_balance(self, address, api_token):
"""
returns the balance in wei
with some inspiration from PyWallet
"""
broadcast_url = self.base_url + '?module=account&action=balance'
broadcast_url += '&address=%s' % address
broadcast_url += '&tag=latest'
if api_token:
'&apikey=%s' % api_token
response = requests.get(broadcast_url)
if int(response.status_code) == 200:
balance = int(response.json().get('result', None))
logging.info('Balance check succeeded: %s', response.json())
return balance
raise BroadcastError(response.text) | 0.003012 |
def write_documentation(self, output_file):
"Issue documentation report on output_file file like object"
if not self.is_opened():
raise helpers.HIDError("Device has to be opened to get documentation")
#format
class CompundVarDict(object):
"""Compound variables dictionary.
Keys are strings mapping variables.
If any string has a '.' on it, it means that is an
object with an attribute. The attribute name will be
used then as the returned item value.
"""
def __init__(self, parent):
self.parent = parent
def __getitem__(self, key):
if '.' not in key:
return self.parent[key]
else:
all_keys = key.split('.')
curr_var = self.parent[all_keys[0]]
for item in all_keys[1:]:
new_var = getattr(curr_var, item)
curr_var = new_var
return new_var
dev_vars = vars(self)
dev_vars['main_usage_str'] = repr(
usage_pages.HidUsage(self.hid_caps.usage_page,
self.hid_caps.usage) )
output_file.write( """\n\
HID device documentation report
===============================
Top Level Details
-----------------
Manufacturer String: %(vendor_name)s
Product String: %(product_name)s
Serial Number: %(serial_number)s
Vendor ID: 0x%(vendor_id)04x
Product ID: 0x%(product_id)04x
Version number: 0x%(version_number)04x
Device Path: %(device_path)s
Device Instance Id: %(instance_id)s
Parent Instance Id: %(parent_instance_id)s
Top level usage: Page=0x%(hid_caps.usage_page)04x, Usage=0x%(hid_caps.usage)02x
Usage identification: %(main_usage_str)s
Link collections: %(hid_caps.number_link_collection_nodes)d collection(s)
Reports
-------
Input Report
~~~~~~~~~~~~
Length: %(hid_caps.input_report_byte_length)d byte(s)
Buttons: %(hid_caps.number_input_button_caps)d button(s)
Values: %(hid_caps.number_input_value_caps)d value(s)
Output Report
~~~~~~~~~~~~~
length: %(hid_caps.output_report_byte_length)d byte(s)
Buttons: %(hid_caps.number_output_button_caps)d button(s)
Values: %(hid_caps.number_output_value_caps)d value(s)
Feature Report
~~~~~~~~~~~~~
Length: %(hid_caps.feature_report_byte_length)d byte(s)
Buttons: %(hid_caps.number_feature_button_caps)d button(s)
Values: %(hid_caps.number_feature_value_caps)d value(s)
""" % CompundVarDict(dev_vars)) #better than vars()!
#return
# inspect caps
for report_kind in [winapi.HidP_Input,
winapi.HidP_Output, winapi.HidP_Feature]:
all_usages = self.usages_storage.get(report_kind, [])
if all_usages:
output_file.write('*** %s Caps ***\n\n' % {
winapi.HidP_Input : "Input",
winapi.HidP_Output : "Output",
winapi.HidP_Feature : "Feature"
}[report_kind])
# normalize usages to allow sorting by usage or min range value
for item in all_usages:
if getattr(item, 'usage', None) != None:
item.flat_id = item.usage
elif getattr(item, 'usage_min', None) != None:
item.flat_id = item.usage_min
else:
item.flat_id = None
sorted(all_usages, key=attrgetter('usage_page', 'flat_id'))
for usage_item in all_usages:
# remove helper attribute
del usage_item.flat_id
all_items = usage_item.inspect()
# sort first by 'usage_page'...
usage_page = all_items["usage_page"]
del all_items["usage_page"]
if "usage" in all_items:
usage = all_items["usage"]
output_file.write(" Usage {0} ({0:#x}), "\
"Page {1:#x}\n".format(usage, usage_page))
output_file.write(" ({0})\n".format(
repr(usage_pages.HidUsage(usage_page, usage))) )
del all_items["usage"]
elif 'usage_min' in all_items:
usage = (all_items["usage_min"], all_items["usage_max"])
output_file.write(" Usage Range {0}~{1} ({0:#x}~{1:#x}),"
" Page {2:#x} ({3})\n".format(
usage[0], usage[1], usage_page,
str(usage_pages.UsagePage(usage_page))) )
del all_items["usage_min"]
del all_items["usage_max"]
else:
raise AttributeError("Expecting any usage id")
attribs = list( all_items.keys() )
attribs.sort()
for key in attribs:
if 'usage' in key:
output_file.write("{0}{1}: {2} ({2:#x})\n".format(' '*8,
key, all_items[key]))
else:
output_file.write("{0}{1}: {2}\n".format(' '*8,
key, all_items[key]))
output_file.write('\n') | 0.005376 |
def thermal_conductivity(self, temperature, volume):
"""
Eq(17) in 10.1103/PhysRevB.90.174107
Args:
temperature (float): temperature in K
volume (float): in Ang^3
Returns:
float: thermal conductivity in W/K/m
"""
gamma = self.gruneisen_parameter(temperature, volume)
theta_d = self.debye_temperature(volume) # K
theta_a = theta_d * self.natoms**(-1./3.) # K
prefactor = (0.849 * 3 * 4**(1./3.)) / (20. * np.pi**3)
# kg/K^3/s^3
prefactor = prefactor * (self.kb/self.hbar)**3 * self.avg_mass
kappa = prefactor / (gamma**2 - 0.514 * gamma + 0.228)
# kg/K/s^3 * Ang = (kg m/s^2)/(Ks)*1e-10
# = N/(Ks)*1e-10 = Nm/(Kms)*1e-10 = W/K/m*1e-10
kappa = kappa * theta_a**2 * volume**(1./3.) * 1e-10
return kappa | 0.002307 |
def index_service(self, service_id):
"""
Index a service in search engine.
"""
from hypermap.aggregator.models import Service
service = Service.objects.get(id=service_id)
if not service.is_valid:
LOGGER.debug('Not indexing service with id %s in search engine as it is not valid' % service.id)
return
LOGGER.debug('Indexing service %s' % service.id)
layer_to_process = service.layer_set.all()
for layer in layer_to_process:
if not settings.REGISTRY_SKIP_CELERY:
index_layer(layer.id, use_cache=True)
else:
index_layer(layer.id) | 0.003215 |
def save(self, path, compressed=True, exist_ok=False):
"""
Save the GADDAG to file.
Args:
path: path to save the GADDAG to.
compressed: compress the saved GADDAG using gzip.
exist_ok: overwrite existing file at `path`.
"""
path = os.path.expandvars(os.path.expanduser(path))
if os.path.isfile(path) and not exist_ok:
raise OSError(17, os.strerror(17), path)
if os.path.isdir(path):
path = os.path.join(path, "out.gdg")
if compressed:
bytes_written = cgaddag.gdg_save_compressed(self.gdg, path.encode("ascii"))
else:
bytes_written = cgaddag.gdg_save(self.gdg, path.encode("ascii"))
if bytes_written == -1:
errno = ctypes.c_int.in_dll(ctypes.pythonapi, "errno").value
raise OSError(errno, os.strerror(errno), path)
return bytes_written | 0.003212 |
def get_required_setting(setting, value_re, invalid_msg):
"""
Return a constant from ``django.conf.settings``. The `setting`
argument is the constant name, the `value_re` argument is a regular
expression used to validate the setting value and the `invalid_msg`
argument is used as exception message if the value is not valid.
"""
try:
value = getattr(settings, setting)
except AttributeError:
raise AnalyticalException("%s setting: not found" % setting)
if not value:
raise AnalyticalException("%s setting is not set" % setting)
value = str(value)
if not value_re.search(value):
raise AnalyticalException("%s setting: %s: '%s'"
% (setting, invalid_msg, value))
return value | 0.001269 |
def fetch_url(url):
"""
Fetch the given url, strip formfeeds and decode
it into the defined encoding
"""
with closing(urllib.urlopen(url)) as f:
if f.code is 200:
response = f.read()
return strip_formfeeds(response).decode(ENCODING) | 0.003509 |
def forward(self, pred, target):
"""Compute the loss model.
:param pred: predicted Variable
:param target: Target Variable
:return: Loss
"""
loss = th.FloatTensor([0])
for i in range(1, self.moments):
mk_pred = th.mean(th.pow(pred, i), 0)
mk_tar = th.mean(th.pow(target, i), 0)
loss.add_(th.mean((mk_pred - mk_tar) ** 2)) # L2
return loss | 0.004525 |
def round_sig_error2(x, ex1, ex2, n):
'''Find min(ex1,ex2) rounded to n sig-figs and make the floating point x
and max(ex,ex2) match the number of decimals.'''
minerr = min(ex1,ex2)
minstex = round_sig(minerr,n)
if minstex.find('.') < 0:
extra_zeros = len(minstex) - n
sigfigs = len(str(int(x))) - extra_zeros
stx = round_sig(x,sigfigs)
maxstex = round_sig(max(ex1,ex2),sigfigs)
else:
num_after_dec = len(string.split(minstex,'.')[1])
stx = ("%%.%df" % num_after_dec) % (x)
maxstex = ("%%.%df" % num_after_dec) % (max(ex1,ex2))
if ex1 < ex2:
return stx,minstex,maxstex
else:
return stx,maxstex,minstex | 0.017021 |
def skip(reason):
"""The skip decorator allows for you to always bypass a test.
:param reason: Expects a string
"""
def decorator(test_func):
if not isinstance(test_func, (type, ClassObjType)):
func_data = None
if test_func.__name__ == 'DECORATOR_ONCALL':
# Call down and save the results
func_data = test_func()
@functools.wraps(test_func)
def skip_wrapper(*args, **kwargs):
other_data = {
'real_func': func_data[0] if func_data else test_func,
'metadata': func_data[1] if func_data else None
}
raise TestSkippedException(test_func, reason, other_data)
test_func = skip_wrapper
return test_func
return decorator | 0.001205 |
def _iter_config_props(cls):
"""Iterate over all ConfigProperty attributes, yielding (attr_name, config_property) """
props = inspect.getmembers(cls, lambda a: isinstance(a, ConfigProperty))
for attr_name, config_prop in props:
yield attr_name, config_prop | 0.013699 |
def get_all_role_config_groups(resource_root, service_name,
cluster_name="default"):
"""
Get all role config groups in the specified service.
@param resource_root: The root Resource object.
@param service_name: Service name.
@param cluster_name: Cluster name.
@return: A list of ApiRoleConfigGroup objects.
@since: API v3
"""
return call(resource_root.get,
_get_role_config_groups_path(cluster_name, service_name),
ApiRoleConfigGroup, True, api_version=3) | 0.014315 |
def get_checksum32(oqparam, hazard=False):
"""
Build an unsigned 32 bit integer from the input files of a calculation.
:param oqparam: an OqParam instance
:param hazard: if True, consider only the hazard files
:returns: the checkume
"""
# NB: using adler32 & 0xffffffff is the documented way to get a checksum
# which is the same between Python 2 and Python 3
checksum = 0
for fname in get_input_files(oqparam, hazard):
checksum = _checksum(fname, checksum)
if hazard:
hazard_params = []
for key, val in vars(oqparam).items():
if key in ('rupture_mesh_spacing', 'complex_fault_mesh_spacing',
'width_of_mfd_bin', 'area_source_discretization',
'random_seed', 'ses_seed', 'truncation_level',
'maximum_distance', 'investigation_time',
'number_of_logic_tree_samples', 'imtls',
'ses_per_logic_tree_path', 'minimum_magnitude',
'prefilter_sources', 'sites',
'pointsource_distance', 'filter_distance'):
hazard_params.append('%s = %s' % (key, val))
data = '\n'.join(hazard_params).encode('utf8')
checksum = zlib.adler32(data, checksum) & 0xffffffff
return checksum | 0.000751 |
def validate_kwargs(func, kwargs):
"""Validate arguments to be supplied to func."""
func_name = func.__name__
argspec = inspect.getargspec(func)
all_args = argspec.args[:]
defaults = list(argspec.defaults or [])
# ignore implicit 'self' argument
if inspect.ismethod(func) and all_args[:1] == ['self']:
all_args[:1] = []
# don't require arguments that have defaults
if defaults:
required = all_args[:-len(defaults)]
else:
required = all_args[:]
# translate 'foo_' to avoid reserved names like 'id'
trans = {
arg: arg.endswith('_') and arg[:-1] or arg
for arg
in all_args
}
for key in list(kwargs):
key_adj = '%s_' % key
if key_adj in all_args:
kwargs[key_adj] = kwargs.pop(key)
# figure out what we're missing
supplied = sorted(kwargs)
missing = [
trans.get(arg, arg) for arg in required
if arg not in supplied
]
if missing:
raise MeteorError(
400,
func.err,
'Missing required arguments to %s: %s' % (
func_name,
' '.join(missing),
),
)
# figure out what is extra
extra = [
arg for arg in supplied
if arg not in all_args
]
if extra:
raise MeteorError(
400,
func.err,
'Unknown arguments to %s: %s' % (func_name, ' '.join(extra)),
) | 0.000675 |
def uncomment_lines(lines):
"""Uncomment the given list of lines and return them. The first hash mark
following any amount of whitespace will be removed on each line."""
ret = []
for line in lines:
ws_prefix, rest, ignore = RE_LINE_SPLITTER_UNCOMMENT.match(line).groups()
ret.append(ws_prefix + rest)
return ''.join(ret) | 0.005602 |
def calculate_affinity(user1, user2, round=False): # pragma: no cover
"""
Quick one-off affinity calculations.
Creates an instance of the ``MALAffinity`` class with ``user1``,
then calculates affinity with ``user2``.
:param str user1: First user
:param str user2: Second user
:param round: Decimal places to round affinity values to.
Specify ``False`` for no rounding
:type round: int or False
:return: (float affinity, int shared)
:rtype: tuple
"""
return MALAffinity(base_user=user1, round=round).calculate_affinity(user2) | 0.001715 |
def eth_call(self, from_, to=None, gas=None,
gas_price=None, value=None, data=None,
block=BLOCK_TAG_LATEST):
"""https://github.com/ethereum/wiki/wiki/JSON-RPC#eth_call
:param from_: From account address
:type from_: str
:param to: To account address (optional)
:type to: str
:param gas: Gas amount for current transaction (optional)
:type gas: int
:param gas_price: Gas price for current transaction (optional)
:type gas_price: int
:param value: Amount of ether to send (optional)
:type value: int
:param data: Additional data for transaction (optional)
:type data: hex
:param block: Block tag or number (optional)
:type block: int or BLOCK_TAGS
:rtype: str
"""
block = validate_block(block)
obj = {}
obj['from'] = from_
if to is not None:
obj['to'] = to
if gas is not None:
obj['gas'] = hex(gas)
if gas_price is not None:
obj['gasPrice'] = hex(gas_price)
if value is not None:
obj['value'] = hex(ether_to_wei(value))
if data is not None:
obj['data'] = data
return (yield from self.rpc_call('eth_call', [obj, block])) | 0.003012 |
def sendPinnedLocation(
self, location, message=None, thread_id=None, thread_type=None
):
"""
Sends a given location to a thread as a pinned location
:param location: Location to send
:param message: Additional message
:param thread_id: User/Group ID to send to. See :ref:`intro_threads`
:param thread_type: See :ref:`intro_threads`
:type location: models.LocationAttachment
:type message: models.Message
:type thread_type: models.ThreadType
:return: :ref:`Message ID <intro_message_ids>` of the sent message
:raises: FBchatException if request failed
"""
self._sendLocation(
location=location,
current=False,
message=message,
thread_id=thread_id,
thread_type=thread_type,
) | 0.003492 |
def get_canonical_absolute_expanded_path(path):
"""Get the canonical form of the absolute path from a possibly relative path
(which may have symlinks, etc.)"""
return os.path.normcase(
os.path.normpath(
os.path.realpath( # remove any symbolic links
os.path.abspath( # may not be needed with realpath, to be safe
os.path.expanduser(path))))) | 0.011468 |
def delta_cdf(self, d_min, d_max, n=None):
r'''Computes the difference in cumulative distribution function between
two particle size diameters.
.. math::
\Delta Q_n = Q_n(d_{max}) - Q_n(d_{min})
Parameters
----------
d_min : float
Lower particle size diameter, [m]
d_max : float
Upper particle size diameter, [m]
n : int, optional
None (for the `order` specified when the distribution was created),
0 (number), 1 (length), 2 (area), 3 (volume/mass),
or any integer, [-]
Returns
-------
delta_cdf : float
The difference in the cumulative distribution function for the two
diameters specified, [-]
Examples
--------
>>> psd = PSDLognormal(s=0.5, d_characteristic=5E-6, order=3)
>>> psd.delta_cdf(1e-6, 1e-5)
0.9165280099853876
'''
return self.cdf(d_max, n=n) - self.cdf(d_min, n=n) | 0.005597 |
def writeGif(
filename, images, duration=0.1, repeat=True, dither=False,
nq=0, subRectangles=True, dispose=None):
""" writeGif(filename, images, duration=0.1, repeat=True, dither=False,
nq=0, subRectangles=True, dispose=None)
Write an animated gif from the specified images.
Parameters
----------
filename : string
The name of the file to write the image to.
images : list
Should be a list consisting of PIL images or numpy arrays.
The latter should be between 0 and 255 for integer types, and
between 0 and 1 for float types.
duration : scalar or list of scalars
The duration for all frames, or (if a list) for each frame.
repeat : bool or integer
The amount of loops. If True, loops infinitetely.
dither : bool
Whether to apply dithering
nq : integer
If nonzero, applies the NeuQuant quantization algorithm to create
the color palette. This algorithm is superior, but slower than
the standard PIL algorithm. The value of nq is the quality
parameter. 1 represents the best quality. 10 is in general a
good tradeoff between quality and speed. When using this option,
better results are usually obtained when subRectangles is False.
subRectangles : False, True, or a list of 2-element tuples
Whether to use sub-rectangles. If True, the minimal rectangle that
is required to update each frame is automatically detected. This
can give significant reductions in file size, particularly if only
a part of the image changes. One can also give a list of x-y
coordinates if you want to do the cropping yourself. The default
is True.
dispose : int
How to dispose each frame. 1 means that each frame is to be left
in place. 2 means the background color should be restored after
each frame. 3 means the decoder should restore the previous frame.
If subRectangles==False, the default is 2, otherwise it is 1.
"""
# Check PIL
if PIL is None:
raise RuntimeError("Need PIL to write animated gif files.")
# Check images
images = checkImages(images)
# Instantiate writer object
gifWriter = GifWriter()
# Check loops
if repeat is False:
loops = 1
elif repeat is True:
loops = 0 # zero means infinite
else:
loops = int(repeat)
# Check duration
if hasattr(duration, '__len__'):
if len(duration) == len(images):
duration = [d for d in duration]
else:
raise ValueError("len(duration) doesn't match amount of images.")
else:
duration = [duration for im in images]
# Check subrectangles
if subRectangles:
images, xy = gifWriter.handleSubRectangles(images, subRectangles)
defaultDispose = 1 # Leave image in place
else:
# Normal mode
xy = [(0, 0) for im in images]
defaultDispose = 2 # Restore to background color.
# Check dispose
if dispose is None:
dispose = defaultDispose
if hasattr(dispose, '__len__'):
if len(dispose) != len(images):
raise ValueError("len(xy) doesn't match amount of images.")
else:
dispose = [dispose for im in images]
# Make images in a format that we can write easy
images = gifWriter.convertImagesToPIL(images, dither, nq)
# Write
fp = open(filename, 'wb')
try:
gifWriter.writeGifToFile(fp, images, duration, loops, xy, dispose)
finally:
fp.close() | 0.000277 |
def imagetransformer_sep_channels_16l_16h_imgnet_lrg_loc_128():
"""separate rgb embeddings."""
hparams = imagetransformer_sep_channels_12l_16h_imagenet_large()
hparams.num_hidden_layers = 16
hparams.local_attention = True
hparams.batch_size = 1
hparams.block_length = 128
return hparams | 0.026667 |
def GetDate(text=None, selected=None, **kwargs):
"""Prompt the user for a date.
This will raise a Zenity Calendar Dialog for the user to pick a date.
It will return a datetime.date object with the date or None if the
user hit cancel.
text - Text to be displayed in the calendar dialog.
selected - A datetime.date object that will be the pre-selected date.
kwargs - Optional command line parameters for Zenity such as height,
width, etc."""
args = ['--date-format=%d/%m/%Y']
if text:
args.append('--text=%s' % text)
if selected:
args.append('--day=%d' % selected.day)
args.append('--month=%d' % selected.month)
args.append('--year=%d' % selected.year)
for generic_args in kwargs_helper(kwargs):
args.append('--%s=%s' % generic_args)
p = run_zenity('--calendar', *args)
if p.wait() == 0:
retval = p.stdout.read().strip()
day, month, year = [int(x) for x in retval.split('/')]
return date(year, month, day) | 0.00382 |
def consult_filters(self, url_info: URLInfo, url_record: URLRecord, is_redirect: bool=False) \
-> Tuple[bool, str, dict]:
'''Consult the URL filter.
Args:
url_record: The URL record.
is_redirect: Whether the request is a redirect and it is
desired that it spans hosts.
Returns
tuple:
1. bool: The verdict
2. str: A short reason string: nofilters, filters, redirect
3. dict: The result from :func:`DemuxURLFilter.test_info`
'''
if not self._url_filter:
return True, 'nofilters', None
test_info = self._url_filter.test_info(url_info, url_record)
verdict = test_info['verdict']
if verdict:
reason = 'filters'
elif is_redirect and self.is_only_span_hosts_failed(test_info):
verdict = True
reason = 'redirect'
else:
reason = 'filters'
return verdict, reason, test_info | 0.004912 |
def eni_absent(
name,
release_eip=False,
region=None,
key=None,
keyid=None,
profile=None):
'''
Ensure the EC2 ENI is absent.
.. versionadded:: 2016.3.0
name
Name tag associated with the ENI.
release_eip
True/False - release any EIP associated with the ENI
region
Region to connect to.
key
Secret key to be used.
keyid
Access key to be used.
profile
A dict with region, key and keyid, or a pillar key (string)
that contains a dict with region, key and keyid.
'''
ret = {'name': name, 'result': True, 'comment': '', 'changes': {}}
r = __salt__['boto_ec2.get_network_interface'](
name=name, region=region, key=key, keyid=keyid, profile=profile
)
if 'error' in r:
ret['result'] = False
ret['comment'] = 'Error when attempting to find eni: {0}.'.format(
r['error']['message']
)
return ret
if not r['result']:
if __opts__['test']:
ret['comment'] = 'ENI is set to be deleted.'
ret['result'] = None
return ret
else:
if __opts__['test']:
ret['comment'] = 'ENI is set to be deleted.'
if release_eip and 'allocationId' in r['result']:
ret['comment'] = ' '.join([ret['comment'], 'Allocated/associated EIP is set to be released'])
ret['result'] = None
return ret
if 'id' in r['result']['attachment']:
result_detach = __salt__['boto_ec2.detach_network_interface'](
name=name, force=True, region=region, key=key,
keyid=keyid, profile=profile
)
if 'error' in result_detach:
ret['result'] = False
ret['comment'] = 'Failed to detach ENI: {0}'.format(
result_detach['error']['message']
)
return ret
# TODO: Ensure the detach occurs before continuing
result_delete = __salt__['boto_ec2.delete_network_interface'](
name=name, region=region, key=key,
keyid=keyid, profile=profile
)
if 'error' in result_delete:
ret['result'] = False
ret['comment'] = 'Failed to delete ENI: {0}'.format(
result_delete['error']['message']
)
return ret
ret['comment'] = 'Deleted ENI {0}'.format(name)
ret['changes']['id'] = None
if release_eip and 'allocationId' in r['result']:
_ret = __salt__['boto_ec2.release_eip_address'](public_ip=None,
allocation_id=r['result']['allocationId'],
region=region,
key=key,
keyid=keyid,
profile=profile)
if not _ret:
ret['comment'] = ' '.join([ret['comment'], 'Failed to release EIP allocated to the ENI.'])
ret['result'] = False
return ret
else:
ret['comment'] = ' '.join([ret['comment'], 'EIP released.'])
ret['changes']['eip released'] = True
return ret | 0.001169 |
def _add(self, ctx, table_name, record_id, column_values):
"""
:type column_values: list of (column, value_json)
"""
vsctl_table = self._get_table(table_name)
ovsrec_row = ctx.must_get_row(vsctl_table, record_id)
for column, value in column_values:
ctx.add_column(ovsrec_row, column, value)
ctx.invalidate_cache() | 0.005249 |
def get_authorization_query_session(self):
"""Gets the ``OsidSession`` associated with the authorization query service.
return: (osid.authorization.AuthorizationQuerySession) - an
``AuthorizationQuerySession``
raise: OperationFailed - unable to complete request
raise: Unimplemented - ``supports_authorization_query()`` is
``false``
*compliance: optional -- This method must be implemented if
``supports_authorization_query()`` is ``true``.*
"""
if not self.supports_authorization_query():
raise errors.Unimplemented()
# pylint: disable=no-member
return sessions.AuthorizationQuerySession(runtime=self._runtime) | 0.00406 |
def get_item_metadata(self, handle):
"""Return dictionary containing all metadata associated with handle.
In other words all the metadata added using the ``add_item_metadata``
method.
:param handle: handle for accessing an item before the dataset is
frozen
:returns: dictionary containing item metadata
"""
if not os.path.isdir(self._metadata_fragments_abspath):
return {}
prefix = self._handle_to_fragment_absprefixpath(handle)
def list_abspaths(dirname):
for f in os.listdir(dirname):
yield os.path.join(dirname, f)
files = [f for f in list_abspaths(self._metadata_fragments_abspath)
if f.startswith(prefix)]
metadata = {}
for f in files:
key = f.split('.')[-2] # filename: identifier.key.json
with open(f) as fh:
value = json.load(fh)
metadata[key] = value
return metadata | 0.001961 |
def check_spam(self, ip=None, email=None, name=None, login=None, realname=None,
subject=None, body=None, subject_type='plain', body_type='plain'):
""" http://api.yandex.ru/cleanweb/doc/dg/concepts/check-spam.xml
subject_type = plain|html|bbcode
body_type = plain|html|bbcode
"""
data = {'ip': ip, 'email': email, 'name': name, 'login': login, 'realname': realname,
'body-%s' % body_type: body, 'subject-%s' % subject_type: subject}
r = self.request('post', 'http://cleanweb-api.yandex.ru/1.0/check-spam', data=data)
root = ET.fromstring(r.content)
return {
'id': root.findtext('id'),
'spam_flag': yesnobool(root.find('text').attrib['spam-flag']),
'links': [(link.attrib['href'], yesnobool(link.attrib['spam-flag'])) for link in root.findall('./links/link')]
} | 0.008791 |
def create_from_tuples(self, tuples, **args):
"""
Creates from a list of (subj,subj_name,obj) tuples
"""
amap = {}
subject_label_map = {}
for a in tuples:
subj = a[0]
subject_label_map[subj] = a[1]
if subj not in amap:
amap[subj] = []
amap[subj].append(a[2])
aset = AssociationSet(subject_label_map=subject_label_map, association_map=amap, **args)
return aset | 0.006148 |
def launch_ipython_legacy_shell(args): # pylint: disable=unused-argument
"""Open the SolveBio shell (IPython wrapper) for older IPython versions"""
try:
from IPython.config.loader import Config
except ImportError:
_print("The SolveBio Python shell requires IPython.\n"
"To install, type: 'pip install ipython'")
return False
try:
# see if we're already inside IPython
get_ipython # pylint: disable=undefined-variable
except NameError:
cfg = Config()
prompt_config = cfg.PromptManager
prompt_config.in_template = '[SolveBio] In <\\#>: '
prompt_config.in2_template = ' .\\D.: '
prompt_config.out_template = 'Out<\\#>: '
banner1 = '\nSolveBio Python shell started.'
exit_msg = 'Quitting SolveBio shell.'
else:
_print("Running nested copies of IPython.")
cfg = Config()
banner1 = exit_msg = ''
# First import the embeddable shell class
try:
from IPython.terminal.embed import InteractiveShellEmbed
except ImportError:
# pylint: disable=import-error,no-name-in-module
from IPython.frontend.terminal.embed import InteractiveShellEmbed
path = os.path.dirname(os.path.abspath(__file__))
init_file = '{}/ipython_init.py'.format(path)
exec(compile(open(init_file).read(), init_file, 'exec'),
globals(), locals())
InteractiveShellEmbed(config=cfg, banner1=banner1, exit_msg=exit_msg)() | 0.000665 |
def point_in_multipolygon(point, multipoly):
"""
valid whether the point is located in a mulitpolygon (donut polygon is not supported)
Keyword arguments:
point -- point geojson object
multipoly -- multipolygon geojson object
if(point inside multipoly) return true else false
"""
coords_array = [multipoly['coordinates']] if multipoly[
'type'] == "MultiPolygon" else multipoly['coordinates']
for coords in coords_array:
if _point_in_polygon(point, coords):
return True
return False | 0.003584 |
def AddKeywordsForName(self, name, keywords):
"""Associates keywords with name.
Records that keywords are associated with name.
Args:
name: A name which should be associated with some keywords.
keywords: A collection of keywords to associate with name.
"""
data_store.DB.IndexAddKeywordsForName(self.urn, name, keywords) | 0.002825 |
def get_tags(self):
"""Returns a list of set of tags."""
return sorted([frozenset(meta_graph.meta_info_def.tags)
for meta_graph in self.meta_graphs]) | 0.005682 |
def reads(paths, filename='data.h5', options=None, **keywords):
""" Reads data from an HDF5 file (high level).
High level function to read one or more pieces of data from an HDF5
file located at the paths specified in `paths` into Python
types. Each path is specified as a POSIX style path where the data
to read is located.
There are various options that can be used to influence how the data
is read. They can be passed as an already constructed ``Options``
into `options` or as additional keywords that will be used to make
one by ``options = Options(**keywords)``.
Paths are POSIX style and can either be given directly as ``str`` or
``bytes``, or the separated path can be given as an iterable of
``str`` and ``bytes``. Each part of a separated path is escaped
using ``utilities.escape_path``. Otherwise, the path is assumed to
be already escaped. Escaping is done so that targets with a part
that starts with one or more periods, contain slashes, and/or
contain nulls can be used without causing the wrong Group to be
looked in or the wrong target to be looked at. It essentially allows
one to make a Dataset named ``'..'`` or ``'a/a'`` instead of moving
around in the Dataset hierarchy.
Parameters
----------
paths : iterable of paths
An iterable of paths to read data from. Each must be a POSIX
style path where the directory name is the Group to put it in
and the basename is the name to write it to. The format of
paths is described in the paragraph above.
filename : str, optional
The name of the HDF5 file to read data from.
options : Options, optional
The options to use when reading. Is mutually exclusive with any
additional keyword arguments given (set to ``None`` or don't
provide to use them).
**keywords :
If `options` was not provided or was ``None``, these are used as
arguments to make a ``Options``.
Returns
-------
datas : iterable
An iterable holding the piece of data for each path in `paths`
in the same order.
Raises
------
exceptions.CantReadError
If reading the data can't be done.
See Also
--------
utilities.process_path
utilities.escape_path
read : Reads just a single piece of data
writes
write
Options
utilities.read_data : Low level version.
"""
# Pack the different options into an Options class if an Options was
# not given. By default, the matlab_compatible option is set to
# False. So, if it wasn't passed in the keywords, this needs to be
# added to override the default value (True) for a new Options.
if not isinstance(options, Options):
kw = copy.deepcopy(keywords)
if 'matlab_compatible' not in kw:
kw['matlab_compatible'] = False
options = Options(**kw)
# Process the paths and stuff the group names and target names as
# tuples into toread.
toread = []
for p in paths:
groupname, targetname = utilities.process_path(p)
# Pack them into toread
toread.append((groupname, targetname))
# Open the hdf5 file and start reading the data. This is all wrapped
# in a try block, so that the file can be closed if any errors
# happen (the error is re-raised).
try:
f = None
f = h5py.File(filename, mode='r')
# Read the data item by item
datas = []
for groupname, targetname in toread:
# Check that the containing group is in f and is indeed a
# group. If it isn't an error needs to be thrown.
if groupname not in f \
or not isinstance(f[groupname], h5py.Group):
raise exceptions.CantReadError( \
'Could not find containing Group ' \
+ groupname + '.')
# Hand off everything to the low level reader.
datas.append(utilities.read_data(f, f[groupname],
targetname, options))
except:
raise
finally:
if f is not None:
f.close()
return datas | 0.000946 |
def expandtree(self, model=None):
"""
Goes through the expand options associated with this context and
returns a trie of data.
:param model: subclass of <orb.Model> || None
:return: <dict>
"""
if model and not self.columns:
schema = model.schema()
defaults = schema.columns(flags=orb.Column.Flags.AutoExpand).keys()
defaults += schema.collectors(flags=orb.Collector.Flags.AutoExpand).keys()
else:
defaults = []
expand = self.expand or defaults
if not expand:
return {}
def build_tree(parts, tree):
tree.setdefault(parts[0], {})
if len(parts) > 1:
build_tree(parts[1:], tree[parts[0]])
tree = {}
for branch in expand:
build_tree(branch.split('.'), tree)
return tree | 0.003356 |
def find_class_in_list(klass, lst):
"""
Returns the first occurrence of an instance of type `klass` in
the given list, or None if no such instance is present.
"""
filtered = list(filter(lambda x: x.__class__ == klass, lst))
if filtered:
return filtered[0]
return None | 0.006579 |
def chunk(iterator, max_size):
"""Chunk a list/set/etc.
:param iter iterator: The iterable object to chunk.
:param int max_size: Max size of each chunk. Remainder chunk may be smaller.
:return: Yield list of items.
:rtype: iter
"""
gen = iter(iterator)
while True:
chunked = list()
for i, item in enumerate(gen):
chunked.append(item)
if i >= max_size - 1:
break
if not chunked:
return
yield chunked | 0.003876 |
def slfo(wnd, res=50, neighbors=2, max_miss=.7, start_delta=1e-4):
"""
Side Lobe Fall Off (dB/oct).
Finds the side lobe peak fall off numerically in dB/octave by using the
``scipy.optimize.fmin`` function.
Hint
----
Originally, Harris rounded the results he found to a multiple of -6, you can
use the AudioLazy ``rint`` function for that: ``rint(falloff, 6)``.
Parameters
----------
res :
Zero-padding factor. 1 for no zero-padding, 2 for twice the length, etc..
neighbors :
Number of neighbors needed by ``get_peaks`` to define a peak.
max_miss :
Maximum percent of peaks that might be missed when approximating them
by a line.
start_delta :
Minimum acceptable value for an orthogonal deviation from the
approximation line to include a peak.
"""
# Finds all side lobe peaks, to find the "best" line for it afterwards
spectrum = dB20(rfft(wnd, res * len(wnd)))
peak_indices = list(get_peaks(spectrum, neighbors=neighbors))
log2_peak_indices = np.log2(peak_indices) # Base 2 ensures result in dB/oct
peaks = spectrum[peak_indices]
npeaks = len(peak_indices)
# This length (actually, twice the length) is the "weight" of each peak
lengths = np.array([0] + (1 - z **-2)(log2_peak_indices).skip(2).take(inf) +
[0]) # Extreme values weights to zero
max_length = sum(lengths)
# First guess for the polynomial "a*x + b" is at the center
idx = np.searchsorted(log2_peak_indices,
.5 * (log2_peak_indices[-1] + log2_peak_indices[0]))
a = ((peaks[idx+1] - peaks[idx]) /
(log2_peak_indices[idx+1] - log2_peak_indices[idx]))
b = peaks[idx] - a * log2_peak_indices[idx]
# Scoring for the optimization function
def score(vect, show=False):
a, b = vect
h = start_delta * (1 + a ** 2) ** .5 # Vertical deviation
while True:
pdelta = peaks - (a * log2_peak_indices + b)
peaks_idx_included = np.nonzero((pdelta < h) & (pdelta > -h))
missing = npeaks - len(peaks_idx_included[0])
if missing < npeaks * max_miss:
break
h *= 2
pdelta_included = pdelta[peaks_idx_included]
real_delta = max(pdelta_included) - min(pdelta_included)
total_length = sum(lengths[peaks_idx_included])
if show: # For debug
print(real_delta, len(peaks_idx_included[0]))
return -total_length / max_length + 4 * real_delta ** .5
a, b = so.fmin(score, [a, b], xtol=1e-12, ftol=1e-12, disp=False)
# # For Debug only
# score([a, b], show=True)
# plt.figure()
# plt.plot(log2_peak_indices, peaks, "x-")
# plt.plot(log2_peak_indices, a * log2_peak_indices + b)
# plt.show()
return a | 0.01127 |
def nhmmer(self, output_path, unpack, threads, evalue):
'''
nhmmer - Search input path using nhmmer
Parameters
----------
output_path : str
A string containing the path to the input sequences
unpack : obj
UnpackRawReads object, returns string command that will output
sequences to stdout when called on command line
(use: unpack.command_line())
threads : str
Number of threads to run. For compiling command line.
evalue : str
evalue to use. For compiling commmand line.
Returns
-------
output_table_list : array
Includes the name of the output domtblout table given by hmmer
'''
logging.debug("Using %i HMMs to search" % (len(self.search_hmm)))
output_table_list = []
if len(self.search_hmm) > 1:
for hmm in self.search_hmm:
out = os.path.join(os.path.split(output_path)[0], os.path.basename(hmm).split('.')[0] + '_' + os.path.split(output_path)[1])
output_table_list.append(out)
elif len(self.search_hmm) == 1:
output_table_list.append(output_path)
else:
raise Exception("Programming error: Expected 1 or more HMMs")
input_pipe = unpack.command_line()
searcher = NhmmerSearcher(threads, extra_args='--incE %s -E %s' % (evalue, evalue))
searcher.hmmsearch(input_pipe, self.search_hmm, output_table_list)
hmmtables = [HMMSearchResult.import_from_nhmmer_table(x) for x in output_table_list]
return hmmtables, output_table_list | 0.003018 |
def call_good_cb(self):
"""
If good_cb returns True then keep it
:return:
"""
with LiveExecution.lock:
if self.good_cb and not self.good_cb():
self.good_cb = None | 0.008696 |
def all_simple_bb_paths(self, start_address, end_address):
"""Return a list of path between start and end address.
"""
bb_start = self._find_basic_block(start_address)
bb_end = self._find_basic_block(end_address)
paths = networkx.all_simple_paths(self._graph, source=bb_start.address, target=bb_end.address)
return ([self._bb_by_addr[addr] for addr in path] for path in paths) | 0.007042 |
def clear(self):
"""
Clears the dict.
"""
self.__values.clear()
self.__access_keys = []
self.__modified_times.clear() | 0.011696 |
def nEx(mt, x, n):
""" nEx : Returns the EPV of a pure endowment (deferred capital).
Pure endowment benefits are conditional on the survival of the policyholder. (v^n * npx) """
return mt.Dx[x + n] / mt.Dx[x] | 0.013575 |
def create(hdf5, name, dtype, shape=(None,), compression=None,
fillvalue=0, attrs=None):
"""
:param hdf5: a h5py.File object
:param name: an hdf5 key string
:param dtype: dtype of the dataset (usually composite)
:param shape: shape of the dataset (can be extendable)
:param compression: None or 'gzip' are recommended
:param attrs: dictionary of attributes of the dataset
:returns: a HDF5 dataset
"""
if shape[0] is None: # extendable dataset
dset = hdf5.create_dataset(
name, (0,) + shape[1:], dtype, chunks=True, maxshape=shape,
compression=compression)
else: # fixed-shape dataset
dset = hdf5.create_dataset(name, shape, dtype, fillvalue=fillvalue,
compression=compression)
if attrs:
for k, v in attrs.items():
dset.attrs[k] = maybe_encode(v)
return dset | 0.001091 |
def add_stream_logger(level=logging.DEBUG, name=None):
"""
Add a stream logger. This can be used for printing all SDK calls to stdout
while working in an interactive session. Note this is a logger for the
entire module, which will apply to all environments started in the same
session. If you need a specific logger pass a ``logfile`` to
:func:`~sdk.init`
Args:
level(int): :mod:`logging` log level
name(str): logger name, will default to the root logger.
Returns:
None
"""
logger = logging.getLogger(name)
logger.setLevel(level)
handler = logging.StreamHandler()
handler.setFormatter(get_default_log_formatter())
handler.setLevel(level)
logger.addHandler(handler) | 0.001332 |
def read_cache(stream):
"""Read a cache file from the given stream
:return: tuple(version, entries_dict, extension_data, content_sha)
* version is the integer version number
* entries dict is a dictionary which maps IndexEntry instances to a path at a stage
* extension_data is '' or 4 bytes of type + 4 bytes of size + size bytes
* content_sha is a 20 byte sha on all cache file contents"""
version, num_entries = read_header(stream)
count = 0
entries = {}
read = stream.read
tell = stream.tell
while count < num_entries:
beginoffset = tell()
ctime = unpack(">8s", read(8))[0]
mtime = unpack(">8s", read(8))[0]
(dev, ino, mode, uid, gid, size, sha, flags) = \
unpack(">LLLLLL20sH", read(20 + 4 * 6 + 2))
path_size = flags & CE_NAMEMASK
path = read(path_size).decode(defenc)
real_size = ((tell() - beginoffset + 8) & ~7)
read((beginoffset + real_size) - tell())
entry = IndexEntry((mode, sha, flags, path, ctime, mtime, dev, ino, uid, gid, size))
# entry_key would be the method to use, but we safe the effort
entries[(path, entry.stage)] = entry
count += 1
# END for each entry
# the footer contains extension data and a sha on the content so far
# Keep the extension footer,and verify we have a sha in the end
# Extension data format is:
# 4 bytes ID
# 4 bytes length of chunk
# repeated 0 - N times
extension_data = stream.read(~0)
assert len(extension_data) > 19, "Index Footer was not at least a sha on content as it was only %i bytes in size"\
% len(extension_data)
content_sha = extension_data[-20:]
# truncate the sha in the end as we will dynamically create it anyway
extension_data = extension_data[:-20]
return (version, entries, extension_data, content_sha) | 0.002083 |
def __signalReceived(self, *args):
"""Received signal. Cancel previous timer and store args to be forwarded later."""
if self.__disconnecting:
return
with self.__lock:
self.__args = args
if self.__rateLimit == 0:
self.__timer.stop()
self.__timer.start((self.__delay * 1000) + 1)
else:
now = time.time()
if self.__lastFlushTime is None:
leakTime = 0
else:
lastFlush = self.__lastFlushTime
leakTime = max(0, (lastFlush + (1.0 / self.__rateLimit)) - now)
self.__timer.stop()
# Note: original was min() below.
timeout = (max(leakTime, self.__delay) * 1000) + 1
self.__timer.start(timeout) | 0.00454 |
def general_symbolic(target, eqn=None, arg_map=None):
r'''
A general function to interpret a sympy equation and evaluate the linear
components of the source term.
Parameters
----------
target : OpenPNM object
The OpenPNM object where the result will be applied.
eqn : sympy symbolic expression for the source terms
e.g. y = a*x**b + c
arg_map : Dict mapping the symbols in the expression to OpenPNM data
on the target. Must contain 'x' which is the independent variable.
e.g. arg_map={'a':'pore.a', 'b':'pore.b', 'c':'pore.c', 'x':'pore.x'}
Example
----------
>>> import openpnm as op
>>> from openpnm.models.physics import generic_source_term as gst
>>> import scipy as sp
>>> import sympy as _syp
>>> pn = op.network.Cubic(shape=[5, 5, 5], spacing=0.0001)
>>> water = op.phases.Water(network=pn)
>>> water['pore.a'] = 1
>>> water['pore.b'] = 2
>>> water['pore.c'] = 3
>>> water['pore.x'] = sp.random.random(water.Np)
>>> a, b, c, x = _syp.symbols('a,b,c,x')
>>> y = a*x**b + c
>>> arg_map = {'a':'pore.a', 'b':'pore.b', 'c':'pore.c', 'x':'pore.x'}
>>> water.add_model(propname='pore.general',
... model=gst.general_symbolic,
... eqn=y, arg_map=arg_map,
... regen_mode='normal')
>>> assert 'pore.general.rate' in water.props()
>>> assert 'pore.general.S1' in water.props()
>>> assert 'pore.general.S1' in water.props()
'''
# First make sure all the symbols have been allocated dict items
for arg in _syp.postorder_traversal(eqn):
if _syp.srepr(arg)[:6] == 'Symbol':
key = _syp.srepr(arg)[7:].strip('(').strip(')').strip("'")
if key not in arg_map.keys():
raise Exception('argument mapping incomplete, missing '+key)
if 'x' not in arg_map.keys():
raise Exception('argument mapping must contain "x" for the ' +
'independent variable')
# Get the data
data = {}
args = {}
for key in arg_map.keys():
data[key] = target[arg_map[key]]
# Callable functions
args[key] = _syp.symbols(key)
r, s1, s2 = _build_func(eqn, **args)
r_val = r(*data.values())
s1_val = s1(*data.values())
s2_val = s2(*data.values())
values = {'S1': s1_val, 'S2': s2_val, 'rate': r_val}
return values | 0.000412 |
def _is_pid_running_on_windows(pid):
"""Check if a process is running on windows systems based on the pid."""
pid = str(pid)
# Hide flashing command prompt
startupinfo = subprocess.STARTUPINFO()
startupinfo.dwFlags |= subprocess.STARTF_USESHOWWINDOW
process = subprocess.Popen(r'tasklist /fi "PID eq {0}"'.format(pid),
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
startupinfo=startupinfo)
stdoutdata, stderrdata = process.communicate()
stdoutdata = to_text_string(stdoutdata)
process.kill()
check = pid in stdoutdata
return check | 0.001447 |
def karma(nick, rest):
"Return or change the karma value for some(one|thing)"
karmee = rest.strip('++').strip('--').strip('~~')
if '++' in rest:
Karma.store.change(karmee, 1)
elif '--' in rest:
Karma.store.change(karmee, -1)
elif '~~' in rest:
change = random.choice([-1, 0, 1])
Karma.store.change(karmee, change)
if change == 1:
return "%s karma++" % karmee
elif change == 0:
return "%s karma shall remain the same" % karmee
elif change == -1:
return "%s karma--" % karmee
elif '==' in rest:
t1, t2 = rest.split('==')
try:
Karma.store.link(t1, t2)
except SameName:
Karma.store.change(nick, -1)
return "Don't try to link a name to itself!"
except AlreadyLinked:
return "Those names were previously linked."
score = Karma.store.lookup(t1)
return "%s and %s are now linked and have a score of %s" % (t1, t2, score)
else:
karmee = rest or nick
score = Karma.store.lookup(karmee)
return "%s has %s karmas" % (karmee, score) | 0.031665 |
def _author_line(self):
"""
Helper method to concatenate author and institution values, if necessary
:return: string
"""
if self.author and self.institution:
return self.author + ";" + self.institution
elif self.author:
return self.author
else:
return self.institution | 0.008333 |
def get_access_flags_string(value):
"""
Transform an access flag field to the corresponding string
:param value: the value of the access flags
:type value: int
:rtype: string
"""
flags = []
for k, v in ACCESS_FLAGS.items():
if (k & value) == k:
flags.append(v)
return " ".join(flags) | 0.002924 |
def create_group(self, title = None, parent = None, image = 1,
y = 2999, mon = 12, d = 28, h = 23, min_ = 59,
s = 59):
"""This method creates a new group.
A group title is needed or no group will be created.
If a parent is given, the group will be created as a sub-group.
title must be a string, image an unsigned int >0 and parent a v1Group.
With y, mon, d, h, min_ and s you can set an expiration date like on
entries.
"""
if title is None:
raise KPError("Need a group title to create a group.")
elif type(title) is not str or image < 1 or(parent is not None and \
type(parent) is not v1Group) or type(image) is not int:
raise KPError("Wrong type or value for title or image or parent")
id_ = 1
for i in self.groups:
if i.id_ >= id_:
id_ = i.id_ + 1
group = v1Group(id_, title, image, self)
group.creation = datetime.now().replace(microsecond=0)
group.last_mod = datetime.now().replace(microsecond=0)
group.last_access = datetime.now().replace(microsecond=0)
if group.set_expire(y, mon, d, h, min_, s) is False:
group.set_expire()
# If no parent is given, just append the new group at the end
if parent is None:
group.parent = self.root_group
self.root_group.children.append(group)
group.level = 0
self.groups.append(group)
# Else insert the group behind the parent
else:
if parent in self.groups:
parent.children.append(group)
group.parent = parent
group.level = parent.level+1
self.groups.insert(self.groups.index(parent)+1, group)
else:
raise KPError("Given parent doesn't exist")
self._num_groups += 1
return True | 0.014077 |
def get(self, sid):
"""
Constructs a CertificateContext
:param sid: A string that uniquely identifies the Certificate.
:returns: twilio.rest.preview.deployed_devices.fleet.certificate.CertificateContext
:rtype: twilio.rest.preview.deployed_devices.fleet.certificate.CertificateContext
"""
return CertificateContext(self._version, fleet_sid=self._solution['fleet_sid'], sid=sid, ) | 0.011442 |
def get_file_type(variant_source):
"""Check what kind of file variant source is
Args:
variant_source (str): Path to variant source
Returns:
file_type (str): 'vcf', 'gemini' or 'unknown'
"""
file_type = 'unknown'
valid_vcf_suffixes = ('.vcf', '.vcf.gz')
if variant_source:
logger.debug("Check file type with file: {0}".format(variant_source))
if variant_source.endswith('.db'):
file_type = 'gemini'
logger.debug("File {0} is a gemini database".format(variant_source))
elif variant_source.endswith(valid_vcf_suffixes):
file_type = 'vcf'
logger.debug("File {0} is a vcf".format(variant_source))
else:
logger.debug("File is in a unknown format")
return file_type | 0.006002 |
def get(self, req, driver):
"""Get info of a network
Get info of a specific netowrk with id on special cloud
with:
:Param req
:Type object Request
"""
response = driver.get_network(req.params, id)
data = {
'action': "get",
'controller': "network",
'id': id,
'cloud': req.environ['calplus.cloud'],
'response': response
}
return data | 0.004219 |
def create_quiz_submission_start_quiz_taking_session(self, quiz_id, course_id, access_code=None, preview=None):
"""
Create the quiz submission (start a quiz-taking session).
Start taking a Quiz by creating a QuizSubmission which you can use to answer
questions and submit your answers.
<b>Responses</b>
* <b>200 OK</b> if the request was successful
* <b>400 Bad Request</b> if the quiz is locked
* <b>403 Forbidden</b> if an invalid access code is specified
* <b>403 Forbidden</b> if the Quiz's IP filter restriction does not pass
* <b>409 Conflict</b> if a QuizSubmission already exists for this user and quiz
"""
path = {}
data = {}
params = {}
# REQUIRED - PATH - course_id
"""ID"""
path["course_id"] = course_id
# REQUIRED - PATH - quiz_id
"""ID"""
path["quiz_id"] = quiz_id
# OPTIONAL - access_code
"""Access code for the Quiz, if any."""
if access_code is not None:
data["access_code"] = access_code
# OPTIONAL - preview
"""Whether this should be a preview QuizSubmission and not count towards
the user's course record. Teachers only."""
if preview is not None:
data["preview"] = preview
self.logger.debug("POST /api/v1/courses/{course_id}/quizzes/{quiz_id}/submissions with query params: {params} and form data: {data}".format(params=params, data=data, **path))
return self.generic_request("POST", "/api/v1/courses/{course_id}/quizzes/{quiz_id}/submissions".format(**path), data=data, params=params, no_data=True) | 0.006325 |
def log_append(self, oldbinsha, message, newbinsha=None):
"""Append a logentry to the logfile of this ref
:param oldbinsha: binary sha this ref used to point to
:param message: A message describing the change
:param newbinsha: The sha the ref points to now. If None, our current commit sha
will be used
:return: added RefLogEntry instance"""
# NOTE: we use the committer of the currently active commit - this should be
# correct to allow overriding the committer on a per-commit level.
# See https://github.com/gitpython-developers/GitPython/pull/146
try:
committer_or_reader = self.commit.committer
except ValueError:
committer_or_reader = self.repo.config_reader()
# end handle newly cloned repositories
return RefLog.append_entry(committer_or_reader, RefLog.path(self), oldbinsha,
(newbinsha is None and self.commit.binsha) or newbinsha,
message) | 0.005703 |
def _get_token_create_url(config):
'''
Create Vault url for token creation
'''
role_name = config.get('role_name', None)
auth_path = '/v1/auth/token/create'
base_url = config['url']
return '/'.join(x.strip('/') for x in (base_url, auth_path, role_name) if x) | 0.006993 |
def _sample_field(self, sample):
"""Returns string representation of sample-format values.
Raises:
KeyError: if requested sample is not defined.
"""
tag_values = self.sample_tag_values[sample].values()
if tag_values:
return ":".join(tag_values)
else:
return "." | 0.00578 |
def process_all(self, texts:Collection[str]) -> List[List[str]]:
"Process a list of `texts`."
if self.n_cpus <= 1: return self._process_all_1(texts)
with ProcessPoolExecutor(self.n_cpus) as e:
return sum(e.map(self._process_all_1, partition_by_cores(texts, self.n_cpus)), []) | 0.016077 |
def get_many(self, type: Type[T], query: Mapping[str, Any], streaming: bool = False) -> Iterable[T]:
"""Gets a query from the data pipeline, which contains a request for multiple objects.
1) Extracts the query the sequence of data sources.
2) Inserts the results into the data sinks (if appropriate).
3) Transforms the results into the requested type if it wasn't already.
4) Inserts the transformed result into any data sinks.
Args:
query: The query being requested (contains a request for multiple objects).
context: The context for the extraction (mutable).
streaming: Specifies whether the results should be returned as a generator (default False).
Returns:
The requested objects or a generator of the objects if streaming is True.
"""
LOGGER.info("Getting SourceHandlers for \"{type}\"".format(type=type.__name__))
try:
handlers = self._get_types[type]
except KeyError:
try:
LOGGER.info("Building new SourceHandlers for \"{type}\"".format(type=type.__name__))
handlers = self._get_handlers(type)
except NoConversionError:
handlers = None
self._get_types[type] = handlers
if handlers is None:
raise NoConversionError("No source can provide \"{type}\"".format(type=type.__name__))
LOGGER.info("Creating new PipelineContext")
context = self._new_context()
LOGGER.info("Querying SourceHandlers for \"{type}\"".format(type=type.__name__))
for handler in handlers:
try:
return handler.get_many(query, context, streaming)
except NotFoundError:
pass
raise NotFoundError("No source returned a query result!") | 0.005917 |
def correct_db_restart(self):
"""Ensure DB is consistent after unexpected restarts. """
LOG.info("Checking consistency of DB")
# Any Segments allocated that's not in Network or FW DB, release it
seg_netid_dict = self.service_segs.get_seg_netid_src(fw_const.FW_CONST)
vlan_netid_dict = self.service_vlans.get_seg_netid_src(
fw_const.FW_CONST)
for netid in seg_netid_dict:
net = self.get_network(netid)
fw_net = self.get_fw_by_netid(netid)
if not net or not fw_net:
if netid in vlan_netid_dict:
vlan_net = vlan_netid_dict[netid]
else:
vlan_net = None
self.delete_os_nwk_db(netid, seg_netid_dict[netid], vlan_net)
LOG.info("Allocated segment for net %s not in DB "
"returning", net)
return
# Any VLANs allocated that's not in Network or FW DB, release it
# For Virtual case, this list will be empty
for netid in vlan_netid_dict:
net = self.get_network(netid)
fw_net = self.get_fw_by_netid(netid)
if not net or not fw_net:
if netid in seg_netid_dict:
vlan_net = seg_netid_dict[netid]
else:
vlan_net = None
self.delete_os_nwk_db(netid, vlan_net, vlan_netid_dict[netid])
LOG.info("Allocated vlan for net %s not in DB returning",
net)
return
# Release all IP's from DB that has no NetID or SubnetID
self.service_in_ip.release_subnet_no_netid()
self.service_out_ip.release_subnet_no_netid()
# It leaves out following possibilities not covered by above.
# 1. Crash can happen just after creating FWID in DB (for init state)
# 2. Crash can happen after 1 + IP address allocation
# 3. Crash can happen after 2 + create OS network
# IP address allocated will be freed as above.
# Only OS network will remain for case 3.
# Also, create that FW DB entry only if that FWID didn't exist.
# Delete all dummy networks created for dummy router from OS if it's
# ID is not in NetDB
# Delete all dummy routers and its associated networks/subnetfrom OS
# if it's ID is not in FWDB
fw_dict = self.get_all_fw_db()
for fw_id in fw_dict:
rtr_nwk = fw_id[0:4] + fw_const.DUMMY_SERVICE_NWK + (
fw_id[len(fw_id) - 4:])
net_list = self.os_helper.get_network_by_name(rtr_nwk)
# TODO(padkrish) Come back to finish this. Not sure of this.
# The router interface should be deleted first and then the network
# Try using show_router
for net in net_list:
# Check for if it's there in NetDB
net_db_item = self.get_network(net.get('id'))
if not net_db_item:
self.os_helper.delete_network_all_subnets(net.get('id'))
LOG.info("Router Network %s not in DB, returning",
net.get('id'))
return
rtr_name = fw_id[0:4] + fw_const.DUMMY_SERVICE_RTR + (
fw_id[len(fw_id) - 4:])
rtr_list = self.os_helper.get_rtr_by_name(rtr_name)
for rtr in rtr_list:
fw_db_item = self.get_fw_by_rtrid(rtr.get('id'))
if not fw_db_item:
# There should be only one
if not net_list:
LOG.error("net_list len is 0, router net not "
"found")
return
fw_type = fw_dict[fw_id].get('fw_type')
if fw_type == fw_const.FW_TENANT_EDGE:
rtr_net = net_list[0]
rtr_subnet_lt = (
self.os_helper.get_subnets_for_net(rtr_net))
if rtr_subnet_lt is None:
LOG.error("router subnet not found for "
"net %s", rtr_net)
return
rtr_subnet_id = rtr_subnet_lt[0].get('id')
LOG.info("Deleted dummy router network %s",
rtr.get('id'))
ret = self.delete_os_dummy_rtr_nwk(rtr.get('id'),
rtr_net.get('id'),
rtr_subnet_id)
return ret
LOG.info("Done Checking consistency of DB, no issues") | 0.000418 |
def michalewicz(theta):
"""Michalewicz function"""
x, y = theta
obj = - np.sin(x) * np.sin(x ** 2 / np.pi) ** 20 - \
np.sin(y) * np.sin(2 * y ** 2 / np.pi) ** 20
grad = np.array([
- np.cos(x) * np.sin(x ** 2 / np.pi) ** 20 - (40 / np.pi) * x *
np.sin(x) * np.sin(x ** 2 / np.pi) ** 19 * np.cos(x ** 2 / np.pi),
- np.cos(y) * np.sin(2 * y ** 2 / np.pi) ** 20 - (80 / np.pi) * y * np.sin(y) *
np.sin(2 * y ** 2 / np.pi) ** 19 * np.cos(2 * y ** 2 / np.pi),
])
return obj, grad | 0.003711 |
def main(argv=sys.argv, stream=sys.stderr):
"""Entry point for ``tappy`` command."""
args = parse_args(argv)
suite = build_suite(args)
runner = unittest.TextTestRunner(verbosity=args.verbose, stream=stream)
result = runner.run(suite)
return get_status(result) | 0.003521 |
def save(self, inplace=True):
"""
Saves modification to the api server.
"""
data = self._modified_data()
data = data['permissions']
if bool(data):
url = six.text_type(self.href) + self._URL['permissions']
extra = {'resource': self.__class__.__name__, 'query': data}
logger.info('Modifying permissions', extra=extra)
self._api.patch(url=url, data=data, append_base=False)
else:
raise ResourceNotModified() | 0.003846 |
def start_agent(agent, recp, desc, allocation_id=None, *args, **kwargs):
'''
Tells remote host agent to start agent identified by desc.
The result value of the fiber is IRecipient.
'''
f = fiber.Fiber()
f.add_callback(agent.initiate_protocol, IRecipient(recp), desc,
allocation_id, *args, **kwargs)
f.add_callback(StartAgentRequester.notify_finish)
f.succeed(StartAgentRequester)
return f | 0.002257 |
def send(self, smtp=None, **kw):
"""
Sends message.
:param smtp: When set, parameters from this dictionary overwrite
options from config. See `emails.Message.send` for more information.
:param kwargs: Parameters for `emails.Message.send`
:return: Response objects from emails backend.
For default `emails.backend.smtp.STMPBackend` returns an `emails.backend.smtp.SMTPResponse` object.
"""
smtp_options = {}
smtp_options.update(self.config.smtp_options)
if smtp:
smtp_options.update(smtp)
return super(Message, self).send(smtp=smtp_options, **kw) | 0.0059 |
def _format_object_mask(objectmask):
"""Format the new style object mask.
This wraps the user mask with mask[USER_MASK] if it does not already
have one. This makes it slightly easier for users.
:param objectmask: a string-based object mask
"""
objectmask = objectmask.strip()
if (not objectmask.startswith('mask') and
not objectmask.startswith('[')):
objectmask = "mask[%s]" % objectmask
return objectmask | 0.002169 |
def Uninstall(self, package_name, keep_data=False, timeout_ms=None):
"""Removes a package from the device.
Args:
package_name: Package name of target package.
keep_data: whether to keep the data and cache directories
timeout_ms: Expected timeout for pushing and installing.
Returns:
The pm uninstall output.
"""
cmd = ['pm uninstall']
if keep_data:
cmd.append('-k')
cmd.append('"%s"' % package_name)
return self.Shell(' '.join(cmd), timeout_ms=timeout_ms) | 0.003484 |
def is_jid(jid):
'''
Returns True if the passed in value is a job id
'''
if not isinstance(jid, six.string_types):
return False
if len(jid) != 20 and (len(jid) <= 21 or jid[20] != '_'):
return False
try:
int(jid[:20])
return True
except ValueError:
return False | 0.00304 |
def to_table_data(self):
"""
:raises ValueError:
:raises pytablereader.error.ValidationError:
"""
self._validate_source_data()
header_list = []
for json_record in self._buffer:
for key in json_record:
if key not in header_list:
header_list.append(key)
self._loader.inc_table_count()
yield TableData(
self._make_table_name(),
header_list,
self._buffer,
dp_extractor=self._loader.dp_extractor,
type_hints=self._extract_type_hints(header_list),
) | 0.00315 |
def filter_arc(w, h, aspect):
"""Aspect ratio convertor. you must specify output size and source aspect ratio (as float)"""
taspect = float(w)/h
if abs(taspect - aspect) < 0.01:
return "scale=%s:%s"%(w,h)
if taspect > aspect: # pillarbox
pt = 0
ph = h
pw = int (h*aspect)
pl = int((w - pw)/2.0)
else: # letterbox
pl = 0
pw = w
ph = int(w * (1/aspect))
pt = int((h - ph)/2.0)
return "scale=%s:%s[out];[out]pad=%s:%s:%s:%s:black" % (pw,ph,w,h,pl,pt) | 0.022099 |
def run_copy(self,
source_project_dataset_tables,
destination_project_dataset_table,
write_disposition='WRITE_EMPTY',
create_disposition='CREATE_IF_NEEDED',
labels=None):
"""
Executes a BigQuery copy command to copy data from one BigQuery table
to another. See here:
https://cloud.google.com/bigquery/docs/reference/v2/jobs#configuration.copy
For more details about these parameters.
:param source_project_dataset_tables: One or more dotted
``(project:|project.)<dataset>.<table>``
BigQuery tables to use as the source data. Use a list if there are
multiple source tables.
If ``<project>`` is not included, project will be the project defined
in the connection json.
:type source_project_dataset_tables: list|string
:param destination_project_dataset_table: The destination BigQuery
table. Format is: ``(project:|project.)<dataset>.<table>``
:type destination_project_dataset_table: str
:param write_disposition: The write disposition if the table already exists.
:type write_disposition: str
:param create_disposition: The create disposition if the table doesn't exist.
:type create_disposition: str
:param labels: a dictionary containing labels for the job/query,
passed to BigQuery
:type labels: dict
"""
source_project_dataset_tables = ([
source_project_dataset_tables
] if not isinstance(source_project_dataset_tables, list) else
source_project_dataset_tables)
source_project_dataset_tables_fixup = []
for source_project_dataset_table in source_project_dataset_tables:
source_project, source_dataset, source_table = \
_split_tablename(table_input=source_project_dataset_table,
default_project_id=self.project_id,
var_name='source_project_dataset_table')
source_project_dataset_tables_fixup.append({
'projectId':
source_project,
'datasetId':
source_dataset,
'tableId':
source_table
})
destination_project, destination_dataset, destination_table = \
_split_tablename(table_input=destination_project_dataset_table,
default_project_id=self.project_id)
configuration = {
'copy': {
'createDisposition': create_disposition,
'writeDisposition': write_disposition,
'sourceTables': source_project_dataset_tables_fixup,
'destinationTable': {
'projectId': destination_project,
'datasetId': destination_dataset,
'tableId': destination_table
}
}
}
if labels:
configuration['labels'] = labels
return self.run_with_configuration(configuration) | 0.003162 |
def detach(self) -> iostream.IOStream:
"""Take control of the underlying stream.
Returns the underlying `.IOStream` object and stops all
further HTTP processing. Intended for implementing protocols
like websockets that tunnel over an HTTP handshake.
This method is only supported when HTTP/1.1 is used.
.. versionadded:: 5.1
"""
self._finished = True
# TODO: add detach to HTTPConnection?
return self.request.connection.detach() | 0.003914 |
def decrypt(self, message, **kwargs):
"""Decrypt the contents of a string or file-like object ``message``.
:type message: file or str or :class:`io.BytesIO`
:param message: A string or file-like object to decrypt.
:param bool always_trust: Instruct GnuPG to ignore trust checks.
:param str passphrase: The passphrase for the secret key used for decryption.
:param str output: A filename to write the decrypted output to.
"""
stream = _make_binary_stream(message, self._encoding)
result = self.decrypt_file(stream, **kwargs)
stream.close()
return result | 0.00468 |
def get_instances(self):
"""Mostly used for debugging"""
return ["<%s prefix:%s (uid:%s)>" % (self.__class__.__name__,
i.prefix, self.uid)
for i in self.instances] | 0.008333 |
def path_yield(path):
"""Yield on all path parts."""
for part in (x for x in path.strip(SEP).split(SEP) if x not in (None, '')):
yield part | 0.006452 |
def replace(self, key, value, time=0, compress_level=-1):
"""
Replace a key/value to server ony if it does exist.
:param key: Key's name
:type key: six.string_types
:param value: A value to be stored on server.
:type value: object
:param time: Time in seconds that your key will expire.
:type time: int
:param compress_level: How much to compress.
0 = no compression, 1 = fastest, 9 = slowest but best,
-1 = default compression level.
:type compress_level: int
:return: True if key is replace False if key does not exists
:rtype: bool
"""
server = self._get_server(key)
return server.replace(key, value, time, compress_level) | 0.002597 |
def JG(cpu, target):
"""
Jumps short if greater.
:param cpu: current CPU.
:param target: destination operand.
"""
cpu.PC = Operators.ITEBV(cpu.address_bit_size, Operators.AND(cpu.ZF == False, cpu.SF == cpu.OF), target.read(), cpu.PC) | 0.014184 |
def temporary_dir(delete=True, dir=None,
prefix='elasticluster.', suffix='.d'):
"""
Make a temporary directory and make it current for the code in this context.
Delete temporary directory upon exit from the context, unless
``delete=False`` is passed in the arguments.
Arguments *suffix*, *prefix* and *dir* are exactly as in
:func:`tempfile.mkdtemp()` (but have different defaults).
"""
cwd = os.getcwd()
tmpdir = tempfile.mkdtemp(suffix, prefix, dir)
os.chdir(tmpdir)
yield
os.chdir(cwd)
if delete:
shutil.rmtree(tmpdir, ignore_errors=True) | 0.003221 |
def draw_figs(FIGS):
"""
Can only be used if matplotlib backend is set to TKAgg
Does not play well with wxPython
Parameters
_________
FIGS : dictionary of figure names as keys and numbers as values
"""
is_win = True if sys.platform in ['win32', 'win64'] else False
if not is_win:
plt.ion()
for fig in list(FIGS.keys()):
plt.draw()
plt.show()
plt.ioff()
if is_win:
# this style basically works for Windows
plt.draw()
print("You must manually close all plots to continue")
plt.show() | 0.001661 |
def _clean_listofcomponents_tuples(listofcomponents_tuples):
"""force 3 items in the tuple"""
def to3tuple(item):
"""return a 3 item tuple"""
if len(item) == 3:
return item
else:
return (item[0], item[1], None)
return [to3tuple(item) for item in listofcomponents_tuples] | 0.00303 |
def _resample_obspy(samples, sr, newsr, window='hanning', lowpass=True):
# type: (np.ndarray, int, int, str, bool) -> np.ndarray
"""
Resample using Fourier method. The same as resample_scipy but with
low-pass filtering for upsampling
"""
from scipy.signal import resample
from math import ceil
factor = sr/float(newsr)
if newsr < sr and lowpass:
# be sure filter still behaves good
if factor > 16:
logger.info("Automatic filter design is unstable for resampling "
"factors (current sampling rate/new sampling rate) "
"above 16. Manual resampling is necessary.")
freq = min(sr, newsr) * 0.5 / float(factor)
logger.debug(f"resample_obspy: lowpass {freq}")
samples = lowpass_cheby2(samples, freq=freq, sr=sr, maxorder=12)
num = int(ceil(len(samples) / factor))
return _applyMultichan(samples,
lambda S: resample(S, num, window=window)) | 0.002979 |
def _all_get_table_col(self, key, column, fullname):
""" Creates a pytables column instance.
The type of column depends on the type of `column[0]`.
Note that data in `column` must be homogeneous!
"""
val = column[0]
try:
# # We do not want to loose int_
if type(val) is int:
return pt.IntCol()
if isinstance(val, (str, bytes)):
itemsize = int(self._prm_get_longest_stringsize(column))
return pt.StringCol(itemsize)
if isinstance(val, np.ndarray):
if (np.issubdtype(val.dtype, str) or
np.issubdtype(val.dtype, bytes)):
itemsize = int(self._prm_get_longest_stringsize(column))
return pt.StringCol(itemsize, shape=val.shape)
else:
return pt.Col.from_dtype(np.dtype((val.dtype, val.shape)))
else:
return pt.Col.from_dtype(np.dtype(type(val)))
except Exception:
self._logger.error('Failure in storing `%s` of Parameter/Result `%s`.'
' Its type was `%s`.' % (key, fullname, repr(type(val))))
raise | 0.003205 |
def readFile(self, pathToFile):
"""
Returns data from a file.
@type pathToFile: str
@param pathToFile: Path to the file.
@rtype: str
@return: The data from file.
"""
fd = open(pathToFile, "rb")
data = fd.read()
fd.close()
return data | 0.011765 |
def GetVectorAsNumpy(numpy_type, buf, count, offset):
""" GetVecAsNumpy decodes values starting at buf[head] as
`numpy_type`, where `numpy_type` is a numpy dtype. """
if np is not None:
# TODO: could set .flags.writeable = False to make users jump through
# hoops before modifying...
return np.frombuffer(buf, dtype=numpy_type, count=count, offset=offset)
else:
raise NumpyRequiredForThisFeature('Numpy was not found.') | 0.002114 |
def encode(self, variables, attributes):
"""
Encode the variables and attributes in this store
Parameters
----------
variables : dict-like
Dictionary of key/value (variable name / xr.Variable) pairs
attributes : dict-like
Dictionary of key/value (attribute name / attribute) pairs
Returns
-------
variables : dict-like
attributes : dict-like
"""
variables = OrderedDict([(k, self.encode_variable(v))
for k, v in variables.items()])
attributes = OrderedDict([(k, self.encode_attribute(v))
for k, v in attributes.items()])
return variables, attributes | 0.002649 |
def add_result(self, values):
"""
Add a tuple or increment the value of an existing one
in the rule results dictionary.
"""
idx = [values['host']]
for gid in self.key_gids[1:]:
idx.append(values[gid])
idx = tuple(idx)
try:
self.results[idx] += 1
except KeyError:
self.results[idx] = 1
self._last_idx = idx | 0.004587 |
def add_section_break(self):
"""Return `w:sectPr` element for new section added at end of document.
The last `w:sectPr` becomes the second-to-last, with the new `w:sectPr` being an
exact clone of the previous one, except that all header and footer references
are removed (and are therefore now "inherited" from the prior section).
A copy of the previously-last `w:sectPr` will now appear in a new `w:p` at the
end of the document. The returned `w:sectPr` is the sentinel `w:sectPr` for the
document (and as implemented, *is* the prior sentinel `w:sectPr` with headers
and footers removed).
"""
# ---get the sectPr at file-end, which controls last section (sections[-1])---
sentinel_sectPr = self.get_or_add_sectPr()
# ---add exact copy to new `w:p` element; that is now second-to last section---
self.add_p().set_sectPr(sentinel_sectPr.clone())
# ---remove any header or footer references from "new" last section---
for hdrftr_ref in sentinel_sectPr.xpath("w:headerReference|w:footerReference"):
sentinel_sectPr.remove(hdrftr_ref)
# ---the sentinel `w:sectPr` now controls the new last section---
return sentinel_sectPr | 0.00788 |
def _call(self, mthd, uri, admin, data, headers, std_headers):
"""
Handles all the common functionality required for API calls. Returns
the resulting response object.
"""
if not uri.startswith("http"):
uri = "/".join((self.auth_endpoint.rstrip("/"), uri))
if admin:
# Admin calls use a different port
uri = re.sub(r":\d+/", ":35357/", uri)
if std_headers:
hdrs = self._standard_headers()
else:
hdrs = {}
if headers:
hdrs.update(headers)
kwargs = {"headers": hdrs}
if data:
kwargs["body"] = data
if "tokens" in uri:
# We'll handle the exception here
kwargs["raise_exception"] = False
return pyrax.http.request(mthd, uri, verify=self.verify_ssl, **kwargs) | 0.002312 |
def get_access_list( self, email, uid ):
'''
Takes and email and matching uid, builds a uref and fetches an access structure that looks like:
{
"count": 1,
"code": 0,
"ts": 1428522205,
"limit": 100,
"offset": 0,
"total": 1,
"data": [
{
"granted": true,
"resource": {
"aid": "J8MY0Bu8Xs",
"rid": "RESOURCE_MONTHLY",
"image_url": "/images2/default/file-document.png",
"name": "Month",
"description": ""
},
"user": {
"first_name": "WackoJacko",
"last_name": "AndDot",
"email": "[email protected]",
"api_token": "CDEADBEEFcafebabe0x1337KxYzCiuIHaX0Ri3GH"
},
"access_id": "1VShIZzLeIod"
}
]
}
'''
path = '/api/v3/access/list'
# package the user
userRef = {
'uid': uid,
'email': email,
'timestamp': int(time.time())
}
serialized = json.dumps(userRef)#, indent=2)
user_ref = utinypass.crypto.aesencrypt(self.private_key, serialized)
# prepare the request
data = {
'aid': self.app_id,
'user_ref': user_ref,
}
# doit
r = requests.get( self.base_url + path, data=data )
if r.status_code != 200:
raise ValueError( path + ":" + r.reason )
access_struct = json.loads( r.content )
return access_struct | 0.017338 |
def _group_batches_shared(xs, caller_batch_fn, prep_data_fn):
"""Shared functionality for grouping by batches for variant calling and joint calling.
"""
singles = []
batch_groups = collections.defaultdict(list)
for args in xs:
data = utils.to_single_data(args)
caller, batch = caller_batch_fn(data)
region = _list_to_tuple(data["region"]) if "region" in data else ()
if batch is not None:
batches = batch if isinstance(batch, (list, tuple)) else [batch]
for b in batches:
batch_groups[(b, region, caller)].append(utils.deepish_copy(data))
else:
data = prep_data_fn(data, [data])
singles.append(data)
batches = []
for batch, items in batch_groups.items():
batch_data = utils.deepish_copy(_pick_lead_item(items))
# For nested primary batches, split permanently by batch
if tz.get_in(["metadata", "batch"], batch_data):
batch_name = batch[0]
batch_data["metadata"]["batch"] = batch_name
batch_data = prep_data_fn(batch_data, items)
batch_data["group_orig"] = _collapse_subitems(batch_data, items)
batch_data["group"] = batch
batches.append(batch_data)
return singles + batches | 0.002327 |
def cookie_name_check(cookie_name):
""" Check cookie name for validity. Return True if name is valid
:param cookie_name: name to check
:return: bool
"""
cookie_match = WHTTPCookie.cookie_name_non_compliance_re.match(cookie_name.encode('us-ascii'))
return len(cookie_name) > 0 and cookie_match is None | 0.028754 |
def transform_flask_bare_import(node):
'''Translates a flask.ext.wtf bare import into a non-magical import.
Translates:
import flask.ext.admin as admin
Into:
import flask_admin as admin
'''
new_names = []
for (name, as_name) in node.names:
match = re.match(r'flask\.ext\.(.*)', name)
from_name = match.group(1)
actual_module_name = 'flask_{}'.format(from_name)
new_names.append((actual_module_name, as_name))
new_node = nodes.Import()
copy_node_info(node, new_node)
new_node.names = new_names
mark_transformed(new_node)
return new_node | 0.001587 |
def __set_unkown_effect(self, hgvs_string):
"""Sets a flag for unkown effect according to HGVS syntax. The
COSMIC database also uses unconventional questionmarks to denote
missing information.
Args:
hgvs_string (str): hgvs syntax with "p." removed
"""
# Standard use by HGVS of indicating unknown effect.
unknown_effect_list = ['?', '(=)', '='] # unknown effect symbols
if hgvs_string in unknown_effect_list:
self.unknown_effect = True
elif "(" in hgvs_string:
# parethesis in HGVS indicate expected outcomes
self.unknown_effect = True
else:
self.unknown_effect = False
# detect if there are missing information. commonly COSMIC will
# have insertions with p.?_?ins? or deleteions with ?del indicating
# missing information.
if "?" in hgvs_string:
self.is_missing_info = True
else:
self.is_missing_info = False | 0.001967 |
def available_input_formats():
"""
Return all available input formats.
Returns
-------
formats : list
all available input formats
"""
input_formats = []
for v in pkg_resources.iter_entry_points(DRIVERS_ENTRY_POINT):
logger.debug("driver found: %s", v)
driver_ = v.load()
if hasattr(driver_, "METADATA") and (driver_.METADATA["mode"] in ["r", "rw"]):
input_formats.append(driver_.METADATA["driver_name"])
return input_formats | 0.00396 |
def init_neutron_consumer(self, mq):
"""
Init openstack neutron mq
1. Check if enable listening neutron notification
2. Create consumer
:param mq: class ternya.mq.MQ
"""
if not self.enable_component_notification(Openstack.Neutron):
log.debug("disable listening neutron notification")
return
for i in range(self.config.neutron_mq_consumer_count):
mq.create_consumer(self.config.neutron_mq_exchange,
self.config.neutron_mq_queue,
ProcessFactory.process(Openstack.Neutron))
log.debug("enable listening openstack neutron notification.") | 0.002833 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.