text
stringlengths 78
104k
| score
float64 0
0.18
|
---|---|
def _take_values(self, item: Node) -> DictBasicType:
"""Takes snapshot of the object and replaces _parent property value on None to avoid
infitinite recursion in GPflow tree traversing.
:param item: GPflow node object.
:return: dictionary snapshot of the node object."""
values = super()._take_values(item)
values['_parent'] = None
return values | 0.007444 |
def alpha_div(alphas, Ks, dim, num_q, rhos, nus):
r'''
Estimate the alpha divergence between distributions:
\int p^\alpha q^(1-\alpha)
based on kNN distances.
Used in Renyi, Hellinger, Bhattacharyya, Tsallis divergences.
Enforces that estimates are >= 0.
Returns divergence estimates with shape (num_alphas, num_Ks).
'''
return _get_alpha_div(alphas, Ks, dim)(num_q, rhos, nus) | 0.002381 |
def fetchMyCgi(self):
"""Fetches statistics from my_cgi.cgi"""
try:
response = urlopen(Request('http://{}/my_cgi.cgi'.format(self.ip), b'request=create_chklst'));
except (HTTPError, URLError):
_LOGGER.warning("Failed to open url to {}".format(self.ip))
self._error_report = True
return None
lines = response.readlines()
return {line.decode().split(':')[0].strip(): line.decode().split(':')[1].strip() for line in lines} | 0.009843 |
def _cache_from_source(path: str) -> str:
"""Return the path to the cached file for the given path. The original path
does not have to exist."""
cache_path, cache_file = os.path.split(importlib.util.cache_from_source(path))
filename, _ = os.path.splitext(cache_file)
return os.path.join(cache_path, filename + ".lpyc") | 0.005917 |
def load(self, path, name):
"""Imports the specified ``fgic`` file from the hard disk.
:param path: filedirectory to which the ``fgic`` file is written.
:param name: filename, without file extension
"""
filename = name + '.fgic'
filepath = aux.joinpath(path, filename)
with zipfile.ZipFile(filepath, 'r') as containerZip:
#Convert the zipfile data into a str object, necessary since
#containerZip.read() returns a bytes object.
jsonString = io.TextIOWrapper(containerZip.open('data'),
encoding='utf-8'
).read()
infoString = io.TextIOWrapper(containerZip.open('info'),
encoding='utf-8'
).read()
self.container = json.loads(jsonString, object_hook=Fgi.jsonHook)
self.info.update(json.loads(infoString))
self._matrixTemplate = self.info['_matrixTemplate']
del self.info['_matrixTemplate'] | 0.003666 |
def validate_line(self, line):
"""Validate Unicode IPA string relative to panphon.
line -- String of IPA characters. Can contain whitespace and limited
punctuation.
"""
line0 = line
pos = 0
while line:
seg_m = self.ft.seg_regex.match(line)
wsp_m = self.ws_punc_regex.match(line)
if seg_m:
length = len(seg_m.group(0))
line = line[length:]
pos += length
elif wsp_m:
length = len(wsp_m.group(0))
line = line[length:]
pos += length
else:
msg = 'IPA not valid at position {} in "{}".'.format(pos, line0.strip())
# msg = msg.decode('utf-8')
print(msg, file=sys.stderr)
line = line[1:]
pos += 1 | 0.003405 |
def create_shell(console, manage_dict=None, extra_vars=None, exit_hooks=None):
"""Creates the shell"""
manage_dict = manage_dict or MANAGE_DICT
_vars = globals()
_vars.update(locals())
auto_imported = import_objects(manage_dict)
if extra_vars:
auto_imported.update(extra_vars)
_vars.update(auto_imported)
msgs = []
if manage_dict['shell']['banner']['enabled']:
msgs.append(
manage_dict['shell']['banner']['message'].format(**manage_dict)
)
if auto_imported and manage_dict['shell']['auto_import']['display']:
auto_imported_names = [
key for key in auto_imported.keys()
if key not in ['__builtins__', 'builtins']
]
msgs.append('\tAuto imported: {0}\n'.format(auto_imported_names))
banner_msg = u'\n'.join(msgs)
exec_init(manage_dict, _vars)
exec_init_script(manage_dict, _vars)
atexit_functions = [
import_string(func_name) for func_name in
manage_dict['shell'].get('exit_hooks', [])
]
atexit_functions += exit_hooks or []
for atexit_function in atexit_functions:
atexit.register(atexit_function)
if console == 'ptpython':
try:
from ptpython.repl import embed
embed({}, _vars)
except ImportError:
click.echo("ptpython is not installed!")
return
if console == 'bpython':
try:
from bpython import embed
embed(locals_=_vars, banner=banner_msg)
except ImportError:
click.echo("bpython is not installed!")
return
try:
if console == 'ipython':
from IPython import start_ipython
from traitlets.config import Config
c = Config()
c.TerminalInteractiveShell.banner2 = banner_msg
c.InteractiveShellApp.extensions = [
extension for extension in
manage_dict['shell'].get('ipython_extensions', [])
]
c.InteractiveShellApp.exec_lines = [
exec_line for exec_line in
manage_dict['shell'].get('ipython_exec_lines', [])
]
if manage_dict['shell'].get('ipython_auto_reload', True) is True:
c.InteractiveShellApp.extensions.append('autoreload')
c.InteractiveShellApp.exec_lines.append('%autoreload 2')
start_ipython(argv=[], user_ns=_vars, config=c)
else:
raise ImportError
except ImportError:
if manage_dict['shell']['readline_enabled']:
import readline
import rlcompleter
readline.set_completer(rlcompleter.Completer(_vars).complete)
readline.parse_and_bind('tab: complete')
shell = code.InteractiveConsole(_vars)
shell.interact(banner=banner_msg) | 0.00035 |
def _main_ctxmgr(func):
'''
A decorator wrapper for :class:`ServerMainContextManager`
Usage example:
.. code:: python
@aiotools.main
def mymain():
server_args = do_init()
stop_sig = yield server_args
if stop_sig == signal.SIGINT:
do_graceful_shutdown()
else:
do_forced_shutdown()
aiotools.start_server(..., main_ctxmgr=mymain, ...)
'''
@functools.wraps(func)
def helper(*args, **kwargs):
return ServerMainContextManager(func, args, kwargs)
return helper | 0.001689 |
def update_role(self, service_name, deployment_name, role_name,
os_virtual_hard_disk=None, network_config=None,
availability_set_name=None, data_virtual_hard_disks=None,
role_size=None, role_type='PersistentVMRole',
resource_extension_references=None,
provision_guest_agent=None):
'''
Updates the specified virtual machine.
service_name:
The name of the service.
deployment_name:
The name of the deployment.
role_name:
The name of the role.
os_virtual_hard_disk:
Contains the parameters Windows Azure uses to create the operating
system disk for the virtual machine.
network_config:
Encapsulates the metadata required to create the virtual network
configuration for a virtual machine. If you do not include a
network configuration set you will not be able to access the VM
through VIPs over the internet. If your virtual machine belongs to
a virtual network you can not specify which subnet address space
it resides under.
availability_set_name:
Specifies the name of an availability set to which to add the
virtual machine. This value controls the virtual machine allocation
in the Windows Azure environment. Virtual machines specified in the
same availability set are allocated to different nodes to maximize
availability.
data_virtual_hard_disks:
Contains the parameters Windows Azure uses to create a data disk
for a virtual machine.
role_size:
The size of the virtual machine to allocate. The default value is
Small. Possible values are: ExtraSmall, Small, Medium, Large,
ExtraLarge. The specified value must be compatible with the disk
selected in the OSVirtualHardDisk values.
role_type:
The type of the role for the virtual machine. The only supported
value is PersistentVMRole.
resource_extension_references:
Optional. Contains a collection of resource extensions that are to
be installed on the Virtual Machine. This element is used if
provision_guest_agent is set to True.
provision_guest_agent:
Optional. Indicates whether the VM Agent is installed on the
Virtual Machine. To run a resource extension in a Virtual Machine,
this service must be installed.
'''
_validate_not_none('service_name', service_name)
_validate_not_none('deployment_name', deployment_name)
_validate_not_none('role_name', role_name)
return self._perform_put(
self._get_role_path(service_name, deployment_name, role_name),
_XmlSerializer.update_role_to_xml(
role_name,
os_virtual_hard_disk,
role_type,
network_config,
availability_set_name,
data_virtual_hard_disks,
role_size,
resource_extension_references,
provision_guest_agent),
as_async=True) | 0.002112 |
def commands(self):
"""
Returns a list of commands supported by the motor
controller.
"""
self._commands, value = self.get_attr_set(self._commands, 'commands')
return value | 0.009091 |
def insertPhenotypeAssociationSet(self, phenotypeAssociationSet):
"""
Inserts the specified phenotype annotation set into this repository.
"""
datasetId = phenotypeAssociationSet.getParentContainer().getId()
attributes = json.dumps(phenotypeAssociationSet.getAttributes())
try:
models.Phenotypeassociationset.create(
id=phenotypeAssociationSet.getId(),
name=phenotypeAssociationSet.getLocalId(),
datasetid=datasetId,
dataurl=phenotypeAssociationSet._dataUrl,
attributes=attributes)
except Exception:
raise exceptions.DuplicateNameException(
phenotypeAssociationSet.getParentContainer().getId()) | 0.002597 |
def set(self, value):
"""
Sets the value of the DNS name
:param value:
A unicode string
"""
if not isinstance(value, str_cls):
raise TypeError(unwrap(
'''
%s value must be a unicode string, not %s
''',
type_name(self),
type_name(value)
))
if value.startswith('.'):
encoded_value = b'.' + value[1:].encode(self._encoding)
else:
encoded_value = value.encode(self._encoding)
self._unicode = value
self.contents = encoded_value
self._header = None
if self._trailer != b'':
self._trailer = b'' | 0.002732 |
def read(self, fileobj):
"""Return if all data could be read and the atom payload"""
fileobj.seek(self._dataoffset, 0)
data = fileobj.read(self.datalength)
return len(data) == self.datalength, data | 0.008696 |
def check_params(num_rows, num_cols, padding):
"""Validation and typcasting"""
num_rows = check_int(num_rows, 'num_rows', min_value=1)
num_cols = check_int(num_cols, 'num_cols', min_value=1)
padding = check_int(padding, 'padding', min_value=0)
return num_rows, num_cols, padding | 0.003333 |
def _on_call_service_msg(self, msg):
"""
Stub service handler. Start a thread to import the mitogen.service
implementation from, and deliver the message to the newly constructed
pool. This must be done as CALL_SERVICE for e.g. PushFileService may
race with a CALL_FUNCTION blocking the main thread waiting for a result
from that service.
"""
if not msg.is_dead:
th = threading.Thread(target=self._service_stub_main, args=(msg,))
th.start() | 0.003795 |
def pHYs(self):
"""
pHYs chunk in PNG image, or |None| if not present
"""
match = lambda chunk: chunk.type_name == PNG_CHUNK_TYPE.pHYs # noqa
return self._find_first(match) | 0.014085 |
def create_similar(self, content, width, height):
"""Create a new surface that is as compatible as possible
for uploading to and the use in conjunction with this surface.
For example the new surface will have the same fallback resolution
and :class:`FontOptions`.
Generally, the new surface will also use the same backend as other,
unless that is not possible for some reason.
Initially the surface contents are all 0
(transparent if contents have transparency, black otherwise.)
Use :meth:`create_similar_image` if you need an image surface
which can be painted quickly to the target surface.
:param content: the :ref:`CONTENT` string for the new surface.
:param width: width of the new surface (in device-space units)
:param height: height of the new surface (in device-space units)
:type content: str
:type width: int
:type height: int
:returns: A new instance of :class:`Surface` or one of its subclasses.
"""
return Surface._from_pointer(
cairo.cairo_surface_create_similar(
self._pointer, content, width, height),
incref=False) | 0.001627 |
def RGB_to_CMY(cobj, *args, **kwargs):
"""
RGB to CMY conversion.
NOTE: CMYK and CMY values range from 0.0 to 1.0
"""
cmy_c = 1.0 - cobj.rgb_r
cmy_m = 1.0 - cobj.rgb_g
cmy_y = 1.0 - cobj.rgb_b
return CMYColor(cmy_c, cmy_m, cmy_y) | 0.003802 |
def auto_convert_numeric_string_cell(flagable, cell_str, position, worksheet, flags, units):
'''
Handles the string containing numeric case of cell and attempts
auto-conversion for auto_convert_cell.
'''
def numerify_str(cell_str, flag_level='minor', flag_text=""):
'''
Differentiates between int and float strings. Expects a numeric string.
'''
if re.search(allregex.integer_regex, cell_str):
flagable.flag_change(flags, flag_level, position, worksheet)
return int(cell_str)
else:
flagable.flag_change(flags, flag_level, worksheet, position)
return float(cell_str)
def numerify_percentage_str(cell_str, flag_level='minor', flag_text=""):
flagable.flag_change(flags, flag_level, position, worksheet)
return float(cell_str) / 100
def convert_to_int_or_float(cell_str, flag_level='minor', flag_text=""):
if not cell_str:
conversion = 0
flagable.flag_change(flags, 'warning', position, worksheet,
flagable.FLAGS['empty-to-zero-string'])
if re.search(allregex.numerical_regex, cell_str):
conversion = numerify_str(cell_str, flag_level, flag_text)
# Comma separated?
elif re.search(allregex.comma_sep_numerical_regex, cell_str):
smashed_cell = ''.join(cell_str.split(','))
conversion = numerify_str(smashed_cell, flag_level, flag_text)
# Ends in percentage sign
elif re.search(allregex.percent_numerical_regex, cell_str):
cell_str = allregex.percent_numerical_regex.search(cell_str).group(1)
conversion = numerify_percentage_str(cell_str, flag_level, flag_text)
# Ends in + or - sign (estimate)?
elif re.search(allregex.estimate_numerical_regex, cell_str):
cell_str = cell_str[:-1].replace(",","")
conversion = numerify_str(cell_str, flag_level, flag_text)
# Begins with money symbol?
elif re.search(allregex.begins_with_monetary_symbol_regex, cell_str):
symbol = cell_str[0]
cell_str = cell_str[1:]
try:
conversion = convert_to_int_or_float(cell_str, 'interpreted',
flagable.FLAGS['monetary-removal'])
if re.search(allregex.contains_dollar_symbol_regex, symbol):
units[position] = UNITS_DOLLAR
elif re.search(allregex.contains_pound_symbol_regex, symbol):
units[position] = UNITS_POUND
elif re.search(allregex.contains_euro_symbol_regex, symbol):
units[position] = UNITS_EURO
except ValueError:
conversion = cell_str
flagable.flag_change(flags, 'warning', position, worksheet,
flagable.FLAGS['failed-monetary-convert'])
# Number ending in 'k'?
elif re.search(allregex.ends_with_thousands_scaling_regex, cell_str):
cell_str = cell_str.rstrip()[:-1]
try:
conversion = 1000*convert_to_int_or_float(cell_str, 'interpreted',
flagable.FLAGS['thousands-convert'])
except ValueError:
flagable.flag_change(flags, 'warning', position, worksheet,
flagable.FLAGS['failed-thousands-convert'])
# Number ending in 'M' or 'MM'?
elif re.search(allregex.ends_with_millions_scaling_regex, cell_str):
if cell_str[-2] == "M":
cell_str = cell_str[:-2]
else:
cell_str = cell_str[:-1]
try:
conversion = 1000000*convert_to_int_or_float(cell_str, 'interpreted',
flagable.FLAGS['millions-convert'])
except ValueError:
flagable.flag_change(flags, 'warning', position, worksheet,
flagable.FLAGS['failed-millions-convert'])
else:
raise ValueError("Cannot convert cell")
return conversion
# Try converting
try:
return convert_to_int_or_float(cell_str)
# Couldn't convert?
except ValueError:
flagable.flag_change(flags, 'minor', position, worksheet,
flagable.FLAGS['failed-convert-numeric-string'])
return cell_str | 0.003154 |
def grab_hidden_properties(self):
# type: () -> dict
"""
A one-shot access to hidden properties (the field is then destroyed)
:return: A copy of the hidden properties dictionary on the first call
:raise AttributeError: On any call after the first one
"""
# Copy properties
result = self.__hidden_properties.copy()
# Destroy the field
self.__hidden_properties.clear()
del self.__hidden_properties
return result | 0.005917 |
def jsonp(*args, **kw):
"""
Returns a JSON response with a callback wrapper, if asked for.
Consider using CORS instead, as JSONP makes the client app insecure.
See the :func:`~coaster.views.decorators.cors` decorator.
"""
data = json.dumps(dict(*args, **kw), indent=2)
callback = request.args.get('callback', request.args.get('jsonp'))
if callback and __jsoncallback_re.search(callback) is not None:
data = callback + u'(' + data + u');'
mimetype = 'application/javascript'
else:
mimetype = 'application/json'
return Response(data, mimetype=mimetype) | 0.001629 |
def to_reminders(self, ical, label=None, priority=None, tags=None,
tail=None, sep=" ", postdate=None, posttime=None):
"""Return Remind commands for all events of a iCalendar"""
if not hasattr(ical, 'vevent_list'):
return ''
reminders = [self.to_remind(vevent, label, priority, tags, tail, sep,
postdate, posttime)
for vevent in ical.vevent_list]
return ''.join(reminders) | 0.006073 |
def _resample_samplerate(samples, sr, newsr):
# type: (np.ndarray, int, int, str) -> np.ndarray
"""
Uses https://github.com/tuxu/python-samplerate
"""
try:
from samplerate import resample
except ImportError:
return None
ratio = newsr/sr
return _applyMultichan(samples,
lambda S: resample(S, ratio, 'sinc_best')) | 0.002584 |
def get_ref(self):
"""
Return the ID of the resource to which this not is attached
"""
if self.ref_key == 'NETWORK':
return self.network
elif self.ref_key == 'NODE':
return self.node
elif self.ref_key == 'LINK':
return self.link
elif self.ref_key == 'GROUP':
return self.group
elif self.ref_key == 'SCENARIO':
return self.scenario
elif self.ref_key == 'PROJECT':
return self.project | 0.003774 |
def get_web_server(self, listen_addr, debug=False, **ssl_args):
"""Setup WebSocketServer on listen_addr (host, port)."""
return geventwebsocket.WebSocketServer(
listen_addr,
self.resource,
debug=debug,
**{key: val for key, val in ssl_args.items() if val is not None}
) | 0.005882 |
def Parse(self):
"""Iterator returning dict for each entry in history."""
for timestamp, url, title in self.Query(self.VISITS_QUERY):
if not isinstance(timestamp, (long, int)):
timestamp = 0
yield [timestamp, "FIREFOX3_VISIT", url, title] | 0.011236 |
def asset_class(self) -> str:
""" Returns the full asset class path for this stock """
result = self.parent.name if self.parent else ""
# Iterate to the top asset class and add names.
cursor = self.parent
while cursor:
result = cursor.name + ":" + result
cursor = cursor.parent
return result | 0.00551 |
def remove_control_from_group(self, process_id, wit_ref_name, group_id, control_id):
"""RemoveControlFromGroup.
[Preview API] Removes a control from the work item form.
:param str process_id: The ID of the process.
:param str wit_ref_name: The reference name of the work item type.
:param str group_id: The ID of the group.
:param str control_id: The ID of the control to remove.
"""
route_values = {}
if process_id is not None:
route_values['processId'] = self._serialize.url('process_id', process_id, 'str')
if wit_ref_name is not None:
route_values['witRefName'] = self._serialize.url('wit_ref_name', wit_ref_name, 'str')
if group_id is not None:
route_values['groupId'] = self._serialize.url('group_id', group_id, 'str')
if control_id is not None:
route_values['controlId'] = self._serialize.url('control_id', control_id, 'str')
self._send(http_method='DELETE',
location_id='1f59b363-a2d0-4b7e-9bc6-eb9f5f3f0e58',
version='5.0-preview.1',
route_values=route_values) | 0.005942 |
def missingDataValue(self):
""" Returns the value to indicate missing data.
"""
value = getMissingDataValue(self._array)
fieldNames = self._array.dtype.names
# If the missing value attibute is a list with the same length as the number of fields,
# return the missing value for field that equals the self.nodeName.
if hasattr(value, '__len__') and len(value) == len(fieldNames):
idx = fieldNames.index(self.nodeName)
return value[idx]
else:
return value | 0.00722 |
def hex_to_name(hex_value, spec=u'css3'):
"""
Convert a hexadecimal color value to its corresponding normalized
color name, if any such name exists.
The optional keyword argument ``spec`` determines which
specification's list of color names will be used; valid values are
``html4``, ``css2``, ``css21`` and ``css3``, and the default is
``css3``.
When no color name for the value is found in the given
specification, ``ValueError`` is raised.
"""
if spec not in SUPPORTED_SPECIFICATIONS:
raise ValueError(SPECIFICATION_ERROR_TEMPLATE.format(spec=spec))
normalized = normalize_hex(hex_value)
name = {u'css2': CSS2_HEX_TO_NAMES,
u'css21': CSS21_HEX_TO_NAMES,
u'css3': CSS3_HEX_TO_NAMES,
u'html4': HTML4_HEX_TO_NAMES}[spec].get(normalized)
if name is None:
raise ValueError(
u"'{}' has no defined color name in {}".format(hex_value, spec)
)
return name | 0.001017 |
def get_config():
"""Retrieve the config as a dictionary of key-value pairs."""
self = H2OConfigReader._get_instance()
if not self._config_loaded:
self._read_config()
return self._config | 0.008696 |
def inline_graphics(soup):
"""
inline-graphic tags
"""
inline_graphics = []
inline_graphic_tags = raw_parser.inline_graphic(soup)
position = 1
for tag in inline_graphic_tags:
item = {}
copy_attribute(tag.attrs, 'xlink:href', item, 'xlink_href')
# Get the tag type
nodenames = ["sub-article"]
details = tag_details(tag, nodenames)
copy_attribute(details, 'type', item)
# Increment the position
item['position'] = position
# Ordinal should be the same as position in this case but set it anyway
item['ordinal'] = tag_ordinal(tag)
inline_graphics.append(item)
return inline_graphics | 0.001414 |
def xception(c, k=8, n_middle=8):
"Preview version of Xception network. Not tested yet - use at own risk. No pretrained model yet."
layers = [
conv(3, k*4, 3, 2),
conv(k*4, k*8, 3),
ConvSkip(k*8, k*16, act=False),
ConvSkip(k*16, k*32),
ConvSkip(k*32, k*91),
]
for i in range(n_middle): layers.append(middle_flow(k*91))
layers += [
ConvSkip(k*91,k*128),
sep_conv(k*128,k*192,act=False),
sep_conv(k*192,k*256),
nn.ReLU(),
nn.AdaptiveAvgPool2d(1),
Flatten(),
nn.Linear(k*256,c)
]
return nn.Sequential(*layers) | 0.012698 |
def _on_prop_changed(self, instance, meth_name, res, args, kwargs):
"""Called by the observation code, we are interested in
__setitem__"""
if not self._itsme and meth_name == "__setitem__": self.update_widget(args[0])
return | 0.019455 |
def formatmonthname(self, theyear, themonth, withyear=True):
"""
Change colspan to "5", add "today" button, and return a month
name as a table row.
"""
display_month = month_name[themonth]
if isinstance(display_month, six.binary_type) and self.encoding:
display_month = display_month.decode(self.encoding)
if withyear:
s = u'%s %s' % (display_month, theyear)
else:
s = u'%s' % display_month
return ('<tr><th colspan="5" class="month">'
'<button id="cal-today-btn" class="btn btn-small">'
'Today</button> %s</th></tr>' % s) | 0.003012 |
def pci_lookup_name1(
access: (IN, ctypes.POINTER(pci_access)),
buf: (IN, ctypes.c_char_p),
size: (IN, ctypes.c_int),
flags: (IN, ctypes.c_int),
arg1: (IN, ctypes.c_int),
) -> ctypes.c_char_p:
"""
Conversion of PCI ID's to names (according to the pci.ids file).
char *pci_lookup_name(
struct pci_access *a, char *buf, int size, int flags, ...
) PCI_ABI;
This is a variant of pci_lookup_name() that gets called with one argument.
It is required because ctypes doesn't support varadic functions.
"""
pass | 0.001773 |
def _flush(self, buffer):
"""
Flush the write buffers of the stream if applicable.
Args:
buffer (memoryview): Buffer content.
"""
with _handle_client_error():
self._client.put_object(
Body=buffer.tobytes(), **self._client_kwargs) | 0.006452 |
def log10norm(x, mu, sigma=1.0):
""" Scale scipy lognorm from natural log to base 10
x : input parameter
mu : mean of the underlying log10 gaussian
sigma : variance of underlying log10 gaussian
"""
return stats.lognorm(sigma * np.log(10), scale=mu).pdf(x) | 0.003497 |
def blueprint_name_to_url(name):
""" remove the last . in the string it it ends with a .
for the url structure must follow the flask routing format
it should be /model/method instead of /model/method/
"""
if name[-1:] == ".":
name = name[:-1]
name = str(name).replace(".", "/")
return name | 0.005479 |
def buy(self, price, volume, symbol, order_type=ft.OrderType.NORMAL, adjust_limit=0, acc_id=0):
"""买入"""
ret, data = self._trade_ctx.place_order(price=price, qty=volume, code=symbol, trd_side=ft.TrdSide.BUY,
order_type=order_type, adjust_limit=adjust_limit,
trd_env=self._env_type, acc_id=acc_id)
if ret != ft.RET_OK:
return ret, data
order_id = 0
for ix, row in data.iterrows():
order_id = str(row['order_id'])
return ret, order_id | 0.009967 |
def columns(self):
"""
:return: the list of column in this table
"""
c = self._connection.cursor()
c.execute("describe `%s`.`%s`" % (self._db, self._name))
self._cols = []
for col in c.fetchall():
self._cols.append(Column.build(col, table=self, con=self._connection))
return self._cols | 0.00831 |
def get_resource_type_from_included_serializer(self):
"""
Check to see it this resource has a different resource_name when
included and return that name, or None
"""
field_name = self.field_name or self.parent.field_name
parent = self.get_parent_serializer()
if parent is not None:
# accept both singular and plural versions of field_name
field_names = [
inflection.singularize(field_name),
inflection.pluralize(field_name)
]
includes = get_included_serializers(parent)
for field in field_names:
if field in includes.keys():
return get_resource_type_from_serializer(includes[field])
return None | 0.002538 |
def _range2cols(areas):
"""
Convert comma separated list of column names and ranges to indices.
Parameters
----------
areas : str
A string containing a sequence of column ranges (or areas).
Returns
-------
cols : list
A list of 0-based column indices.
Examples
--------
>>> _range2cols('A:E')
[0, 1, 2, 3, 4]
>>> _range2cols('A,C,Z:AB')
[0, 2, 25, 26, 27]
"""
cols = []
for rng in areas.split(","):
if ":" in rng:
rng = rng.split(":")
cols.extend(lrange(_excel2num(rng[0]), _excel2num(rng[1]) + 1))
else:
cols.append(_excel2num(rng))
return cols | 0.001449 |
def logs_update(self):
"""
Function updates logs.
"""
Gdk.threads_enter()
if not self.debugging:
self.debugging = True
self.debug_btn.set_label('Info logs')
else:
self.debugging = False
self.debug_btn.set_label('Debug logs')
for record in self.debug_logs['logs']:
if self.debugging:
# Create a new root tree element
if getattr(record, 'event_type', '') != "cmd_retcode":
self.store.append([format_entry(record, show_level=True, colorize=True)])
else:
if int(record.levelno) > 10:
self.store.append([format_entry(record, colorize=True)])
Gdk.threads_leave() | 0.003846 |
def _compute_filename(self, request: BaseRequest):
'''Get the appropriate filename from the request.'''
path = self._path_namer.get_filename(request.url_info)
if os.path.isdir(path):
path += '.f'
else:
dir_name, name = os.path.split(path)
path = os.path.join(anti_clobber_dir_path(dir_name), name)
return path | 0.005168 |
def get(self,id):
'''Return all the semantic tag related to the given tag id
:returns: a semantic tag or None
:rtype: list of ckan.model.semantictag.SemanticTag object
'''
query = meta.Session.query(TagSemanticTag).filter(TagSemanticTag.id==id)
return query.first() | 0.035842 |
def _init_decoder(self):
"""
Set-up the _decoder attribute if necessary.
"""
# Note: content-encoding value should be case-insensitive, per RFC 7230
# Section 3.2
content_encoding = self.headers.get('content-encoding', '').lower()
if self._decoder is None and content_encoding in self.CONTENT_DECODERS:
self._decoder = _get_decoder(content_encoding) | 0.004796 |
def find(self, dtype):
"""
Parameters
----------
dtype : PandasExtensionDtype or string
Returns
-------
return the first matching dtype, otherwise return None
"""
if not isinstance(dtype, str):
dtype_type = dtype
if not isinstance(dtype, type):
dtype_type = type(dtype)
if issubclass(dtype_type, ExtensionDtype):
return dtype
return None
for dtype_type in self.dtypes:
try:
return dtype_type.construct_from_string(dtype)
except TypeError:
pass
return None | 0.002928 |
def line_statuses(self, filename):
"""
Return a list of tuples `(lineno, status)` of all the lines found in
the Cobertura report for the given file `filename` where `lineno` is
the line number and `status` is coverage status of the line which can
be either `True` (line hit) or `False` (line miss).
"""
line_elements = self._get_lines_by_filename(filename)
lines_w_status = []
for line in line_elements:
lineno = int(line.attrib['number'])
status = line.attrib['hits'] != '0'
lines_w_status.append((lineno, status))
return lines_w_status | 0.003053 |
def close(self):
"""Close the TarFile. In write-mode, two finishing zero blocks are
appended to the archive.
"""
if self.closed:
return
if self.mode in "aw":
self.fileobj.write(NUL * (BLOCKSIZE * 2))
self.offset += (BLOCKSIZE * 2)
# fill up the end with zero-blocks
# (like option -b20 for tar does)
blocks, remainder = divmod(self.offset, RECORDSIZE)
if remainder > 0:
self.fileobj.write(NUL * (RECORDSIZE - remainder))
if not self._extfileobj:
self.fileobj.close()
self.closed = True | 0.00304 |
def validate_maintenance_window(window):
"""Validate PreferredMaintenanceWindow for DBInstance"""
days = ("Mon", "Tue", "Wed", "Thu", "Fri", "Sat", "Sun")
day_re = r'[A-Z]{1}[a-z]{2}'
hour = r'[01]?[0-9]|2[0-3]'
minute = r'[0-5][0-9]'
r = ("(?P<start_day>%s):(?P<start_hour>%s):(?P<start_minute>%s)-"
"(?P<end_day>%s):(?P<end_hour>%s):(?P<end_minute>%s)") % (day_re,
hour,
minute,
day_re,
hour,
minute)
range_regex = re.compile(r)
m = range_regex.match(window)
if not m:
raise ValueError("DBInstance PreferredMaintenanceWindow must be in "
"the format: ddd:hh24:mi-ddd:hh24:mi")
if m.group('start_day') not in days or m.group('end_day') not in days:
raise ValueError("DBInstance PreferredMaintenanceWindow day part of "
"ranges must be one of: %s" % ", ".join(days))
start_ts = (days.index(m.group('start_day')) * 24 * 60) + \
(int(m.group('start_hour')) * 60) + int(m.group('start_minute'))
end_ts = (days.index(m.group('end_day')) * 24 * 60) + \
(int(m.group('end_hour')) * 60) + int(m.group('end_minute'))
if abs(end_ts - start_ts) < 30:
raise ValueError("DBInstance PreferredMaintenanceWindow must be at "
"least 30 minutes long.")
return window | 0.000601 |
def set_user_methods(self, user_methods, forced=False):
r'''Method used to select certain property methods as having a higher
priority than were set by default. If `forced` is true, then methods
which were not specified are excluded from consideration.
As a side effect, `method` is removed to ensure than the new methods
will be used in calculations afterwards.
An exception is raised if any of the methods specified aren't available
for the chemical. An exception is raised if no methods are provided.
Parameters
----------
user_methods : str or list
Methods by name to be considered or prefered
forced : bool, optional
If True, only the user specified methods will ever be considered;
if False other methods will be considered if no user methods
suceed
'''
# Accept either a string or a list of methods, and whether
# or not to only consider the false methods
if isinstance(user_methods, str):
user_methods = [user_methods]
# The user's order matters and is retained for use by select_valid_methods
self.user_methods = user_methods
self.forced = forced
# Validate that the user's specified methods are actual methods
if set(self.user_methods).difference(self.all_methods):
raise Exception("One of the given methods is not available for this chemical")
if not self.user_methods and self.forced:
raise Exception('Only user specified methods are considered when forced is True, but no methods were provided')
# Remove previously selected methods
self.method = None
self.sorted_valid_methods = []
self.T_cached = None | 0.002769 |
def phonenumber_validation(data):
""" Validates phonenumber
Similar to phonenumber_field.validators.validate_international_phonenumber() but uses a different message if the
country prefix is absent.
"""
from phonenumber_field.phonenumber import to_python
phone_number = to_python(data)
if not phone_number:
return data
elif not phone_number.country_code:
raise serializers.ValidationError(_("Phone number needs to include valid country code (E.g +37255555555)."))
elif not phone_number.is_valid():
raise serializers.ValidationError(_('The phone number entered is not valid.'))
return data | 0.006116 |
def check_timeseries_id(self, dataset):
'''
Checks that if a variable exists for the time series id it has the appropriate attributes
:param netCDF4.Dataset dataset: An open netCDF dataset
'''
results = []
required_ctx = TestCtx(BaseCheck.HIGH, 'Required variable for time series identifier')
recommended_ctx = TestCtx(BaseCheck.MEDIUM, 'Recommended attributes for the timeSeries variable')
# A variable with cf_role="timeseries_id" MUST exist for this to be a valid timeseries incomplete
timeseries_ids = dataset.get_variables_by_attributes(cf_role='timeseries_id')
required_ctx.assert_true(timeseries_ids, 'a unique variable must define attribute cf_role="timeseries_id"')
results.append(required_ctx.to_result())
if not timeseries_ids:
return results
timevar = util.get_time_variable(dataset)
nc_timevar = dataset.variables[timevar]
time_dimensions = nc_timevar.dimensions
timeseries_variable = timeseries_ids[0]
dims = timeseries_variable.dimensions
required_ctx.assert_true(
time_dimensions and time_dimensions[0] == dims[0],
'{} must have a dimension and that dimension must be shared by the time variable'.format(timeseries_variable.name)
)
recommended_ctx.assert_true(
getattr(timeseries_variable, 'long_name', '') != "",
"long_name attribute should exist and not be empty"
)
results.append(recommended_ctx.to_result())
return results | 0.005664 |
def unbind_handler(self, svc_ref):
"""
Called if a command service is gone.
Unregisters its commands.
:param svc_ref: A reference to the unbound service
:return: True if the commands have been unregistered
"""
if svc_ref not in self._bound_references:
# Unknown reference
return False
# Unregister its commands
namespace, commands = self._reference_commands[svc_ref]
for command in commands:
self.unregister(namespace, command)
# Release the service
self._context.unget_service(svc_ref)
del self._bound_references[svc_ref]
del self._reference_commands[svc_ref]
return True | 0.002736 |
def after_return(self, status, retval, task_id, args, kwargs, einfo):
"""
After a task has run (both succesfully or with a failure) clear the
lock if "unlock_before_run" is False.
"""
# Only clear the lock after the task's execution if the
# "unlock_before_run" option is False
if not self.unlock_before_run():
key = self.get_key(args, kwargs)
self.once_backend.clear_lock(key) | 0.004376 |
def stop_trial(self, trial, error=False, error_msg=None, stop_logger=True):
"""Only returns resources if resources allocated."""
prior_status = trial.status
self._stop_trial(
trial, error=error, error_msg=error_msg, stop_logger=stop_logger)
if prior_status == Trial.RUNNING:
logger.debug("Returning resources for Trial %s.", str(trial))
self._return_resources(trial.resources)
out = self._find_item(self._running, trial)
for result_id in out:
self._running.pop(result_id) | 0.003454 |
def delete_object_in_seconds(self, cont, obj, seconds, extra_info=None):
"""
Sets the object in the specified container to be deleted after the
specified number of seconds.
The 'extra_info' parameter is included for backwards compatibility. It
is no longer used at all, and will not be modified with swiftclient
info, since swiftclient is not used any more.
"""
return self._manager.delete_object_in_seconds(cont, obj, seconds) | 0.004065 |
def ec2_route_table_main_route_table_id(self, lookup, default=None):
"""
Args:
lookup: the friendly name of the VPC whose main route table we are looking up
default: the optional value to return if lookup failed; returns None if not set
Returns:
the ID of the main route table of the named VPC, or default if no match/multiple matches found
"""
vpc_id = self.ec2_vpc_vpc_id(lookup)
if vpc_id is None:
return default
route_table = EFAwsResolver.__CLIENTS["ec2"].describe_route_tables(Filters=[
{'Name': 'vpc-id', 'Values': [vpc_id]},
{'Name': 'association.main', 'Values': ['true']}
])
if len(route_table["RouteTables"]) is not 1:
return default
return route_table["RouteTables"][0]["RouteTableId"] | 0.008986 |
def get_conn(self):
"""Return a AzureDLFileSystem object."""
conn = self.get_connection(self.conn_id)
service_options = conn.extra_dejson
self.account_name = service_options.get('account_name')
adlCreds = lib.auth(tenant_id=service_options.get('tenant'),
client_secret=conn.password,
client_id=conn.login)
adlsFileSystemClient = core.AzureDLFileSystem(adlCreds,
store_name=self.account_name)
adlsFileSystemClient.connect()
return adlsFileSystemClient | 0.0048 |
def get_text(node, strategy):
"""
Get the most confident text results, either those with @index = 1 or the first text results or empty string.
"""
textEquivs = node.get_TextEquiv()
if not textEquivs:
log.debug("No text results on %s %s", node, node.id)
return ''
# elif strategy == 'index1':
else:
if len(textEquivs) > 1:
index1 = [x for x in textEquivs if x.index == 1]
if index1:
return index1[0].get_Unicode().strip()
return textEquivs[0].get_Unicode().strip() | 0.003546 |
def get_token_credentials(cls, username, request):
""" Get api token for user with username of :username:
Used by Token-based auth as `credentials_callback` kwarg.
"""
try:
user = cls.get_item(username=username)
except Exception as ex:
log.error(str(ex))
forget(request)
else:
if user:
return user.api_key.token | 0.004717 |
def clean_up_inverse(self, current):
"""
Clean up current.
Python doesn't have variable lookbehinds, so we have to do negative lookaheads.
!(...) when converted to regular expression is atomic, so once it matches, that's it.
So we use the pattern `(?:(?!(?:stuff|to|exclude)<x>))[^/]*?)` where <x> is everything
that comes after the negative group. `!(this|that)other` --> `(?:(?!(?:this|that)other))[^/]*?)`.
We have to update the list before | in nested cases: *(!(...)|stuff). Before we close a parent
`extmatch`: `*(!(...))`. And of course on path separators (when path mode is on): `!(...)/stuff`.
Lastly we make sure all is accounted for when finishing the pattern at the end. If there is nothing
to store, we store `$`: `(?:(?!(?:this|that)$))[^/]*?)`.
"""
if not self.inv_ext:
return
index = len(current) - 1
while index >= 0:
if isinstance(current[index], InvPlaceholder):
content = current[index + 1:]
content.append(_EOP if not self.pathname else self.path_eop)
current[index] = (''.join(content)) + (_EXCLA_GROUP_CLOSE % str(current[index]))
index -= 1
self.inv_ext = 0 | 0.007752 |
def rebuild(self, recreate=True, force=False, **kwargs):
"Recreate (if needed) the wx_obj and apply new properties"
# detect if this involves a spec that needs to recreate the wx_obj:
needs_rebuild = any([isinstance(spec, (StyleSpec, InitSpec))
for spec_name, spec in self._meta.specs.items()
if spec_name in kwargs])
# validate if this gui object needs and support recreation
if needs_rebuild and recreate or force:
if DEBUG: print "rebuilding window!"
# recreate the wx_obj! warning: it will call Destroy()
self.__init__(**kwargs)
else:
if DEBUG: print "just setting attr!"
for name, value in kwargs.items():
setattr(self, name, value) | 0.007134 |
def attributesToBinary(cls, attributes):
"""
:rtype: (str|None,int)
:return: the binary data and the number of chunks it was composed from
"""
chunks = [(int(k), v) for k, v in iteritems(attributes) if cls._isValidChunkName(k)]
chunks.sort()
numChunks = int(attributes[u'numChunks'])
if numChunks:
serializedJob = b''.join(v for k, v in chunks)
compressed = base64.b64decode(serializedJob)
if compressed[0] == b'C'[0]:
binary = bz2.decompress(compressed[1:])
elif compressed[0] == b'U'[0]:
binary = compressed[1:]
else:
raise RuntimeError('Unexpected prefix {}'.format(compressed[0]))
else:
binary = None
return binary, numChunks | 0.004825 |
def multi_bulk(self, args):
'''Multi bulk encoding for list/tuple ``args``
'''
return null_array if args is None else b''.join(self._pack(args)) | 0.011905 |
def initialize(self, init_value, context=None, force=False):
"""
Initialize the configuration manager
:param force: force initialization even if it's already initialized
:return:
"""
if not force and self._instance is not None:
raise ConfigurationAlreadyInitializedError(
'Configuration manager object is already initialized.'
)
self.__class__._instance = Root(init_value, context=context) | 0.004098 |
def map_abi_data(normalizers, types, data):
"""
This function will apply normalizers to your data, in the
context of the relevant types. Each normalizer is in the format:
def normalizer(datatype, data):
# Conditionally modify data
return (datatype, data)
Where datatype is a valid ABI type string, like "uint".
In case of an array, like "bool[2]", normalizer will receive `data`
as an iterable of typed data, like `[("bool", True), ("bool", False)]`.
Internals
---
This is accomplished by:
1. Decorating the data tree with types
2. Recursively mapping each of the normalizers to the data
3. Stripping the types back out of the tree
"""
pipeline = itertools.chain(
[abi_data_tree(types)],
map(data_tree_map, normalizers),
[partial(recursive_map, strip_abi_type)],
)
return pipe(data, *pipeline) | 0.001101 |
def align(self, other):
"""
Align two time series so that len(self) == len(other) and self.timstamps == other.timestamps.
:return: :tuple:(`TimeSeries` object(the aligned self), `TimeSeries` object(the aligned other))
"""
if isinstance(other, TimeSeries):
aligned, other_aligned = {}, {}
i, other_i = self.iteritems_silent(), other.iteritems_silent()
item, other_item = next(i), next(other_i)
while item and other_item:
# Unpack timestamps and values.
timestamp, value = item
other_timestamp, other_value = other_item
if timestamp == other_timestamp:
aligned[timestamp] = value
other_aligned[other_timestamp] = other_value
item = next(i)
other_item = next(other_i)
elif timestamp < other_timestamp:
aligned[timestamp] = value
other_aligned[timestamp] = other_value
item = next(i)
else:
aligned[other_timestamp] = value
other_aligned[other_timestamp] = other_value
other_item = next(other_i)
# Align remaining items.
while item:
timestamp, value = item
aligned[timestamp] = value
other_aligned[timestamp] = other.values[-1]
item = next(i)
while other_item:
other_timestamp, other_value = other_item
aligned[other_timestamp] = self.values[-1]
other_aligned[other_timestamp] = other_value
other_item = next(other_i)
return TimeSeries(aligned), TimeSeries(other_aligned) | 0.002188 |
def module(self):
"""The module in which the Class is defined.
Python equivalent of the CLIPS defglobal-module command.
"""
modname = ffi.string(lib.EnvDefclassModule(self._env, self._cls))
defmodule = lib.EnvFindDefmodule(self._env, modname)
return Module(self._env, defmodule) | 0.006079 |
def convert_examples_to_features(examples, tokenizer, max_seq_length,
doc_stride, max_query_length, is_training):
"""Loads a data file into a list of `InputBatch`s."""
unique_id = 1000000000
features = []
for (example_index, example) in enumerate(examples):
query_tokens = tokenizer.tokenize(example.question_text)
if len(query_tokens) > max_query_length:
query_tokens = query_tokens[0:max_query_length]
tok_to_orig_index = []
orig_to_tok_index = []
all_doc_tokens = []
for (i, token) in enumerate(example.doc_tokens):
orig_to_tok_index.append(len(all_doc_tokens))
sub_tokens = tokenizer.tokenize(token)
for sub_token in sub_tokens:
tok_to_orig_index.append(i)
all_doc_tokens.append(sub_token)
tok_start_position = None
tok_end_position = None
if is_training and example.is_impossible:
tok_start_position = -1
tok_end_position = -1
if is_training and not example.is_impossible:
tok_start_position = orig_to_tok_index[example.start_position]
if example.end_position < len(example.doc_tokens) - 1:
tok_end_position = orig_to_tok_index[example.end_position + 1] - 1
else:
tok_end_position = len(all_doc_tokens) - 1
(tok_start_position, tok_end_position) = _improve_answer_span(
all_doc_tokens, tok_start_position, tok_end_position, tokenizer,
example.orig_answer_text)
# The -3 accounts for [CLS], [SEP] and [SEP]
max_tokens_for_doc = max_seq_length - len(query_tokens) - 3
# We can have documents that are longer than the maximum sequence length.
# To deal with this we do a sliding window approach, where we take chunks
# of the up to our max length with a stride of `doc_stride`.
_DocSpan = collections.namedtuple( # pylint: disable=invalid-name
"DocSpan", ["start", "length"])
doc_spans = []
start_offset = 0
while start_offset < len(all_doc_tokens):
length = len(all_doc_tokens) - start_offset
if length > max_tokens_for_doc:
length = max_tokens_for_doc
doc_spans.append(_DocSpan(start=start_offset, length=length))
if start_offset + length == len(all_doc_tokens):
break
start_offset += min(length, doc_stride)
for (doc_span_index, doc_span) in enumerate(doc_spans):
tokens = []
token_to_orig_map = {}
token_is_max_context = {}
segment_ids = []
tokens.append("[CLS]")
segment_ids.append(0)
for token in query_tokens:
tokens.append(token)
segment_ids.append(0)
tokens.append("[SEP]")
segment_ids.append(0)
for i in range(doc_span.length):
split_token_index = doc_span.start + i
token_to_orig_map[len(tokens)] = tok_to_orig_index[split_token_index]
is_max_context = _check_is_max_context(doc_spans, doc_span_index,
split_token_index)
token_is_max_context[len(tokens)] = is_max_context
tokens.append(all_doc_tokens[split_token_index])
segment_ids.append(1)
tokens.append("[SEP]")
segment_ids.append(1)
input_ids = tokenizer.convert_tokens_to_ids(tokens)
# The mask has 1 for real tokens and 0 for padding tokens. Only real
# tokens are attended to.
input_mask = [1] * len(input_ids)
# Zero-pad up to the sequence length.
while len(input_ids) < max_seq_length:
input_ids.append(0)
input_mask.append(0)
segment_ids.append(0)
assert len(input_ids) == max_seq_length
assert len(input_mask) == max_seq_length
assert len(segment_ids) == max_seq_length
start_position = None
end_position = None
if is_training and not example.is_impossible:
# For training, if our document chunk does not contain an annotation
# we throw it out, since there is nothing to predict.
doc_start = doc_span.start
doc_end = doc_span.start + doc_span.length - 1
out_of_span = False
if not (tok_start_position >= doc_start and
tok_end_position <= doc_end):
out_of_span = True
if out_of_span:
start_position = 0
end_position = 0
else:
doc_offset = len(query_tokens) + 2
start_position = tok_start_position - doc_start + doc_offset
end_position = tok_end_position - doc_start + doc_offset
if is_training and example.is_impossible:
start_position = 0
end_position = 0
if example_index < 20:
logger.info("*** Example ***")
logger.info("unique_id: %s" % (unique_id))
logger.info("example_index: %s" % (example_index))
logger.info("doc_span_index: %s" % (doc_span_index))
logger.info("tokens: %s" % " ".join(tokens))
logger.info("token_to_orig_map: %s" % " ".join([
"%d:%d" % (x, y) for (x, y) in token_to_orig_map.items()]))
logger.info("token_is_max_context: %s" % " ".join([
"%d:%s" % (x, y) for (x, y) in token_is_max_context.items()
]))
logger.info("input_ids: %s" % " ".join([str(x) for x in input_ids]))
logger.info(
"input_mask: %s" % " ".join([str(x) for x in input_mask]))
logger.info(
"segment_ids: %s" % " ".join([str(x) for x in segment_ids]))
if is_training and example.is_impossible:
logger.info("impossible example")
if is_training and not example.is_impossible:
answer_text = " ".join(tokens[start_position:(end_position + 1)])
logger.info("start_position: %d" % (start_position))
logger.info("end_position: %d" % (end_position))
logger.info(
"answer: %s" % (answer_text))
features.append(
InputFeatures(
unique_id=unique_id,
example_index=example_index,
doc_span_index=doc_span_index,
tokens=tokens,
token_to_orig_map=token_to_orig_map,
token_is_max_context=token_is_max_context,
input_ids=input_ids,
input_mask=input_mask,
segment_ids=segment_ids,
start_position=start_position,
end_position=end_position,
is_impossible=example.is_impossible))
unique_id += 1
return features | 0.00177 |
def MI_enumInstanceNames(self,
env,
objPath):
# pylint: disable=invalid-name
"""Return instance names of a given CIM class
Implements the WBEM operation EnumerateInstanceNames in terms
of the enum_instances method. A derived class will not normally
override this method.
"""
logger = env.get_logger()
logger.log_debug('CIMProvider2 MI_enumInstanceNames called...')
model = pywbem.CIMInstance(classname=objPath.classname,
path=objPath)
gen = self.enum_instances(env=env,
model=model,
keys_only=True)
try:
iter(gen)
except TypeError:
logger.log_debug('CIMProvider2 MI_enumInstanceNames returning')
return
for inst in gen:
yield inst.path
logger.log_debug('CIMProvider2 MI_enumInstanceNames returning') | 0.004878 |
def _mk_connectivity_pits(self, i12, flats, elev, mag, dX, dY):
"""
Helper function for _mk_adjacency_matrix. This is a more general
version of _mk_adjacency_flats which drains pits and flats to nearby
but non-adjacent pixels. The slope magnitude (and flats mask) is
updated for these pits and flats so that the TWI can be computed.
"""
e = elev.data.ravel()
pit_i = []
pit_j = []
pit_prop = []
warn_pits = []
pits = i12[flats & (elev > 0)]
I = np.argsort(e[pits])
for pit in pits[I]:
# find drains
pit_area = np.array([pit], 'int64')
drain = None
epit = e[pit]
for it in range(self.drain_pits_max_iter):
border = get_border_index(pit_area, elev.shape, elev.size)
eborder = e[border]
emin = eborder.min()
if emin < epit:
drain = border[eborder < epit]
break
pit_area = np.concatenate([pit_area, border[eborder == emin]])
if drain is None:
warn_pits.append(pit)
continue
ipit, jpit = np.unravel_index(pit, elev.shape)
Idrain, Jdrain = np.unravel_index(drain, elev.shape)
# filter by drain distance in coordinate space
if self.drain_pits_max_dist:
dij = np.sqrt((ipit - Idrain)**2 + (jpit-Jdrain)**2)
b = dij <= self.drain_pits_max_dist
if not b.any():
warn_pits.append(pit)
continue
drain = drain[b]
Idrain = Idrain[b]
Jdrain = Jdrain[b]
# calculate real distances
dx = [_get_dX_mean(dX, ipit, idrain) * (jpit - jdrain)
for idrain, jdrain in zip(Idrain, Jdrain)]
dy = [dY[make_slice(ipit, idrain)].sum() for idrain in Idrain]
dxy = np.sqrt(np.array(dx)**2 + np.array(dy)**2)
# filter by drain distance in real space
if self.drain_pits_max_dist_XY:
b = dxy <= self.drain_pits_max_dist_XY
if not b.any():
warn_pits.append(pit)
continue
drain = drain[b]
dxy = dxy[b]
# calculate magnitudes
s = (e[pit]-e[drain]) / dxy
# connectivity info
# TODO proportion calculation (_mk_connectivity_flats used elev?)
pit_i += [pit for i in drain]
pit_j += drain.tolist()
pit_prop += s.tolist()
# update pit magnitude and flats mask
mag[ipit, jpit] = np.mean(s)
flats[ipit, jpit] = False
if warn_pits:
warnings.warn("Warning %d pits had no place to drain to in this "
"chunk" % len(warn_pits))
# Note: returning flats and mag here is not strictly necessary
return (np.array(pit_i, 'int64'),
np.array(pit_j, 'int64'),
np.array(pit_prop, 'float64'),
flats,
mag) | 0.00304 |
def _unify_call_signature(i, dist_fn):
"""Creates `dist_fn_wrapped` which calls `dist_fn` with all prev nodes.
Args:
i: Python `int` corresponding to position in topologically sorted DAG.
dist_fn: Python `callable` which takes a subset of previously constructed
distributions (in reverse order) and produces a new distribution instance.
Returns:
dist_fn_wrapped: Python `callable` which takes all previous distributions
(in non reverse order) and produces a new distribution instance.
args: `tuple` of `str` representing the arg names of `dist_fn` (and in non
wrapped, "natural" order). `None` is returned only if the input is not a
`callable`.
"""
if distribution_util.is_distribution_instance(dist_fn):
return (lambda *_: dist_fn), None
if not callable(dist_fn):
raise TypeError('{} must be either `tfd.Distribution`-like or '
'`callable`.'.format(dist_fn))
args = _get_required_args(dist_fn)
if not args:
return (lambda *_: dist_fn()), ()
@functools.wraps(dist_fn)
def dist_fn_wrapped(*xs):
"""Calls `dist_fn` with reversed and truncated args."""
if i != len(xs):
raise ValueError(
'Internal Error: Unexpected number of inputs provided to {}-th '
'distribution maker (dist_fn: {}, expected: {}, saw: {}).'.format(
i, dist_fn, i, len(xs)))
if len(xs) < len(args):
raise ValueError(
'Internal Error: Too few inputs provided to {}-th distribution maker '
'(dist_fn: {}, expected: {}, saw: {}).'.format(
i, dist_fn, len(args), len(xs)))
return dist_fn(*reversed(xs[-len(args):]))
return dist_fn_wrapped, args | 0.007634 |
def move_group(self, group, parent, index=None):
"""
Move group to be a child of new parent.
:param group: The group to move.
:type group: :class:`keepassdb.model.Group`
:param parent: The new parent for the group.
:type parent: :class:`keepassdb.model.Group`
:param index: The 0-based index within the parent (defaults to appending
group to end of parent's children).
:type index: int
"""
if not isinstance(group, Group):
raise TypeError("group param must be of type Group")
if parent is not None and not isinstance(parent, Group):
raise TypeError("parent param must be of type Group")
if group is parent:
raise ValueError("group and parent are the same")
if parent is None:
parent = self.root
elif parent not in self.groups:
raise exc.UnboundModelError("Parent group doesn't exist / is not bound to this database.")
if group not in self.groups:
raise exc.UnboundModelError("Group doesn't exist / is not bound to this database.")
curr_parent = group.parent
curr_parent.children.remove(group)
if index is None:
parent.children.append(group)
self.log.debug("Moving {0!r} to child of {1!r}, (appending)".format(group, parent))
else:
parent.children.insert(index, group)
self.log.debug("Moving {0!r} to child of {1!r}, (at position {2!r})".format(group, parent, index))
#Recurse down and reset level of all moved nodes
def set_level(g):
g.level = g.parent.level + 1
for child in g.children:
set_level(child)
group.parent = parent
set_level(group)
group.modified = util.now()
self._rebuild_groups() | 0.008629 |
def execute_task(self, task, workflow_id, data=None):
""" Celery task that runs a single task on a worker.
Args:
self (Task): Reference to itself, the celery task object.
task (BaseTask): Reference to the task object that performs the work
in its run() method.
workflow_id (string): The unique ID of the workflow run that started this task.
data (MultiTaskData): An optional MultiTaskData object that contains the data
that has been passed down from upstream tasks.
"""
start_time = datetime.utcnow()
store_doc = DataStore(**self.app.user_options['config'].data_store,
auto_connect=True).get(workflow_id)
store_loc = 'log.{}.tasks.{}'.format(task.dag_name, task.name)
def handle_callback(message, event_type, exc=None):
msg = '{}: {}'.format(message, str(exc)) if exc is not None else message
# set the logging level
if event_type == JobEventName.Stopped:
logger.warning(msg)
elif event_type == JobEventName.Aborted:
logger.error(msg)
else:
logger.info(msg)
current_time = datetime.utcnow()
# store provenance information about a task
if event_type != JobEventName.Started:
duration = (current_time - start_time).total_seconds()
store_doc.set(key='{}.end_time'.format(store_loc),
value=current_time,
section=DataStoreDocumentSection.Meta)
store_doc.set(key='{}.duration'.format(store_loc),
value=duration,
section=DataStoreDocumentSection.Meta)
else:
# store provenance information about a task
store_doc.set(key='{}.start_time'.format(store_loc),
value=start_time,
section=DataStoreDocumentSection.Meta)
store_doc.set(key='{}.worker'.format(store_loc),
value=self.request.hostname,
section=DataStoreDocumentSection.Meta)
store_doc.set(key='{}.queue'.format(store_loc),
value=task.queue,
section=DataStoreDocumentSection.Meta)
duration = None
# send custom celery event
self.send_event(event_type,
job_type=JobType.Task,
name=task.name,
queue=task.queue,
time=current_time,
workflow_id=workflow_id,
duration=duration)
# store job specific meta information wth the job
self.update_state(meta={'name': task.name,
'queue': task.queue,
'type': JobType.Task,
'workflow_id': workflow_id})
# send start celery event
handle_callback('Start task <{}>'.format(task.name), JobEventName.Started)
# run the task and capture the result
return task._run(
data=data,
store=store_doc,
signal=TaskSignal(Client(
SignalConnection(**self.app.user_options['config'].signal, auto_connect=True),
request_key=workflow_id),
task.dag_name),
context=TaskContext(task.name, task.dag_name, task.workflow_name,
workflow_id, self.request.hostname),
success_callback=partial(handle_callback,
message='Complete task <{}>'.format(task.name),
event_type=JobEventName.Succeeded),
stop_callback=partial(handle_callback,
message='Stop task <{}>'.format(task.name),
event_type=JobEventName.Stopped),
abort_callback=partial(handle_callback,
message='Abort workflow <{}> by task <{}>'.format(
task.workflow_name, task.name),
event_type=JobEventName.Aborted)) | 0.001688 |
def _build_collapse_to_gene_dict(graph) -> Dict[BaseEntity, Set[BaseEntity]]:
"""Build a collapse dictionary.
:param pybel.BELGraph graph: A BEL graph
:return: A dictionary of {node: set of PyBEL node tuples}
"""
collapse_dict = defaultdict(set)
r2g = {}
for gene_node, rna_node, d in graph.edges(data=True):
if d[RELATION] != TRANSCRIBED_TO:
continue
collapse_dict[gene_node].add(rna_node)
r2g[rna_node] = gene_node
for rna_node, protein_node, d in graph.edges(data=True):
if d[RELATION] != TRANSLATED_TO:
continue
if rna_node not in r2g:
raise ValueError('Should complete origin before running this function')
collapse_dict[r2g[rna_node]].add(protein_node)
return collapse_dict | 0.002481 |
def add_keywords_from_dict(self, keyword_dict):
"""To add keywords from a dictionary
Args:
keyword_dict (dict): A dictionary with `str` key and (list `str`) as value
Examples:
>>> keyword_dict = {
"java": ["java_2e", "java programing"],
"product management": ["PM", "product manager"]
}
>>> keyword_processor.add_keywords_from_dict(keyword_dict)
Raises:
AttributeError: If value for a key in `keyword_dict` is not a list.
"""
for clean_name, keywords in keyword_dict.items():
if not isinstance(keywords, list):
raise AttributeError("Value of key {} should be a list".format(clean_name))
for keyword in keywords:
self.add_keyword(keyword, clean_name) | 0.00464 |
def contamination_detection(self):
"""
Calculate the levels of contamination in the reads
"""
self.qualityobject = quality.Quality(self)
self.qualityobject.contamination_finder(input_path=self.sequencepath,
report_path=self.reportpath) | 0.006192 |
def outlineColor(self, value):
""" sets the outline color """
if isinstance(value, (list, Color)):
if value is list:
self._outlineColor = value
else:
self._outlineColor = value.asList | 0.007843 |
def close(self):
"""Close by closing the :attr:`transport`
Return the ``connection_lost`` event which can be used to wait
for complete transport closure.
"""
if not self._closed:
closed = False
event = self.event('connection_lost')
if self.transport:
if self._loop.get_debug():
self.logger.debug('Closing connection %s', self)
if self.transport.can_write_eof():
try:
self.transport.write_eof()
except Exception:
pass
try:
worker = self.close_pipeline()
self.transport.close()
closed = self._loop.create_task(
self._close(event.waiter(), worker)
)
except Exception:
pass
self._closed = closed or True
if not closed:
event.fire()
return self._closed | 0.001873 |
async def streamstorm(self, text, opts=None, user=None):
'''
Evaluate a storm query and yield result messages.
Yields:
((str,dict)): Storm messages.
'''
if opts is None:
opts = {}
MSG_QUEUE_SIZE = 1000
chan = asyncio.Queue(MSG_QUEUE_SIZE, loop=self.loop)
if user is None:
user = self.auth.getUserByName('root')
# promote ourself to a synapse task
synt = await self.boss.promote('storm', user=user, info={'query': text})
show = opts.get('show')
async def runStorm():
cancelled = False
tick = s_common.now()
count = 0
try:
# First, try text parsing. If this fails, we won't be able to get
# a storm runtime in the snap, so catch and pass the `err` message
# before handing a `fini` message along.
self.getStormQuery(text)
await chan.put(('init', {'tick': tick, 'text': text, 'task': synt.iden}))
shownode = (show is None or 'node' in show)
async with await self.snap(user=user) as snap:
if show is None:
snap.link(chan.put)
else:
[snap.on(n, chan.put) for n in show]
if shownode:
async for pode in snap.iterStormPodes(text, opts=opts, user=user):
await chan.put(('node', pode))
count += 1
else:
async for item in snap.storm(text, opts=opts, user=user):
count += 1
except asyncio.CancelledError:
logger.warning('Storm runtime cancelled.')
cancelled = True
raise
except Exception as e:
logger.exception('Error during storm execution')
enfo = s_common.err(e)
enfo[1].pop('esrc', None)
enfo[1].pop('ename', None)
await chan.put(('err', enfo))
finally:
if cancelled:
return
tock = s_common.now()
took = tock - tick
await chan.put(('fini', {'tock': tock, 'took': took, 'count': count}))
await synt.worker(runStorm())
while True:
mesg = await chan.get()
yield mesg
if mesg[0] == 'fini':
break | 0.003502 |
def _process_json(data):
"""
return a list of GradCommittee objects.
"""
requests = []
for item in data:
committee = GradCommittee()
committee.status = item.get('status')
committee.committee_type = item.get('committeeType')
committee.dept = item.get('dept')
committee.degree_title = item.get('degreeTitle')
committee.degree_type = item.get('degreeType')
committee.major_full_name = item.get('majorFullName')
committee.start_date = datetime_from_string(item.get('startDate'))
committee.end_date = datetime_from_string(item.get('endDate'))
for member in item.get('members'):
if member.get('status') == "inactive":
continue
com_mem = GradCommitteeMember()
com_mem.first_name = member.get('nameFirst')
com_mem.last_name = member.get('nameLast')
if member.get('memberType') is not None and\
len(member.get('memberType')) > 0:
com_mem.member_type = member.get('memberType').lower()
if member.get('readingType') is not None and\
len(member.get('readingType')) > 0:
com_mem.reading_type = member.get('readingType').lower()
com_mem.dept = member.get('dept')
com_mem.email = member.get('email')
com_mem.status = member.get('status')
committee.members.append(com_mem)
requests.append(committee)
return requests | 0.000656 |
def map_metabolite2kegg(metabolite):
"""
Return a KEGG compound identifier for the metabolite if it exists.
First see if there is an unambiguous mapping to a single KEGG compound ID
provided with the model. If not, check if there is any KEGG compound ID in
a list of mappings. KEGG IDs may map to compounds, drugs and glycans. KEGG
compound IDs are sorted so we keep the lowest that is there. If none of
this works try mapping to KEGG via the CompoundMatcher by the name of the
metabolite. If the metabolite cannot be mapped at all we simply map it back
to its own ID.
Parameters
----------
metabolite : cobra.Metabolite
The metabolite to be mapped to its KEGG compound identifier.
Returns
-------
None
If the metabolite could not be mapped.
str
The smallest KEGG compound identifier that was found.
"""
logger.debug("Looking for KEGG compound identifier for %s.", metabolite.id)
kegg_annotation = metabolite.annotation.get("kegg.compound")
if kegg_annotation is None:
# TODO (Moritz Beber): Currently name matching is very slow and
# inaccurate. We disable it until there is a better solution.
# if metabolite.name:
# # The compound matcher uses regular expression and chokes
# # with a low level error on `[` in the name, for example.
# df = compound_matcher.match(metabolite.name)
# try:
# return df.loc[df["score"] > threshold, "CID"].iat[0]
# except (IndexError, AttributeError):
# logger.warning(
# "Could not match the name %r to any kegg.compound "
# "annotation for metabolite %s.",
# metabolite.name, metabolite.id
# )
# return
# else:
logger.warning("No kegg.compound annotation for metabolite %s.",
metabolite.id)
return
if isinstance(kegg_annotation, string_types) and \
kegg_annotation.startswith("C"):
return kegg_annotation
elif isinstance(kegg_annotation, Iterable):
try:
return get_smallest_compound_id(kegg_annotation)
except ValueError:
return
logger.warning(
"No matching kegg.compound annotation for metabolite %s.",
metabolite.id
)
return | 0.000413 |
def publish_collated_document(cursor, model, parent_model):
"""Publish a given `module`'s collated content in the context of
the `parent_model`. Note, the model's content is expected to already
have the collated content. This will just persist that content to
the archive.
"""
html = bytes(cnxepub.DocumentContentFormatter(model))
sha1 = hashlib.new('sha1', html).hexdigest()
cursor.execute("SELECT fileid FROM files WHERE sha1 = %s", (sha1,))
try:
fileid = cursor.fetchone()[0]
except TypeError:
file_args = {
'media_type': 'text/html',
'data': psycopg2.Binary(html),
}
cursor.execute("""\
INSERT INTO files (file, media_type)
VALUES (%(data)s, %(media_type)s)
RETURNING fileid""", file_args)
fileid = cursor.fetchone()[0]
args = {
'module_ident_hash': model.ident_hash,
'parent_ident_hash': parent_model.ident_hash,
'fileid': fileid,
}
stmt = """\
INSERT INTO collated_file_associations (context, item, fileid)
VALUES
((SELECT module_ident FROM modules
WHERE ident_hash(uuid, major_version, minor_version)
= %(parent_ident_hash)s),
(SELECT module_ident FROM modules
WHERE ident_hash(uuid, major_version, minor_version)
= %(module_ident_hash)s),
%(fileid)s)"""
cursor.execute(stmt, args) | 0.000725 |
def start_timer(self, reprate):
"""Start the digital output task that serves as the acquistion trigger"""
print 'starting digital output at rate {} Hz'.format(reprate)
self.trigger_task = DigitalOutTask(self.trigger_src, reprate)
self.trigger_task.start() | 0.010453 |
def package(self, value):
"""
Setter for **self.__package** attribute.
:param value: Attribute value.
:type value: unicode
"""
if value is not None:
assert type(value) is unicode, "'{0}' attribute: '{1}' type is not 'unicode'!".format(
"package", value)
self.__package = value | 0.008287 |
def get_load(jid):
'''
Included for API consistency
'''
options = _get_options(ret=None)
_response = _request("GET", options['url'] + options['db'] + '/' + jid)
if 'error' in _response:
log.error('Unable to get JID "%s" : "%s"', jid, _response)
return {}
return {_response['id']: _response} | 0.002994 |
def parse_contexts(contexts):
"""
Convert a contexts JSON to an Elasticsearch-compatible list of key-value pairs
For example, the JSON
{
"data": [
{
"data": {
"unique": true
},
"schema": "iglu:com.acme/unduplicated/jsonschema/1-0-0"
},
{
"data": {
"value": 1
},
"schema": "iglu:com.acme/duplicated/jsonschema/1-0-0"
},
{
"data": {
"value": 2
},
"schema": "iglu:com.acme/duplicated/jsonschema/1-0-0"
}
],
"schema": "iglu:com.snowplowanalytics.snowplow/contexts/jsonschema/1-0-0"
}
would become
[
("context_com_acme_duplicated_1", [{"value": 1}, {"value": 2}]),
("context_com_acme_unduplicated_1", [{"unique": true}])
]
"""
my_json = json.loads(contexts)
data = my_json['data']
distinct_contexts = {}
for context in data:
schema = fix_schema("contexts", context['schema'])
inner_data = context['data']
if schema not in distinct_contexts:
distinct_contexts[schema] = [inner_data]
else:
distinct_contexts[schema].append(inner_data)
output = []
for key in distinct_contexts:
output.append((key, distinct_contexts[key]))
return output | 0.001467 |
async def open_wallet_search(wallet_handle: int,
type_: str,
query_json: str,
options_json: str) -> int:
"""
Search for wallet records
:param wallet_handle: wallet handler (created by open_wallet).
:param type_: allows to separate different record types collections
:param query_json: MongoDB style query to wallet record tags:
{
"tagName": "tagValue",
$or: {
"tagName2": { $regex: 'pattern' },
"tagName3": { $gte: '123' },
},
}
:param options_json: //TODO: FIXME: Think about replacing by bitmask
{
retrieveRecords: (optional, true by default) If false only "counts" will be calculated,
retrieveTotalCount: (optional, false by default) Calculate total count,
retrieveType: (optional, false by default) Retrieve record type,
retrieveValue: (optional, true by default) Retrieve record value,
retrieveTags: (optional, true by default) Retrieve record tags,
}
:return: search_handle: Wallet search handle that can be used later
to fetch records by small batches (with fetch_wallet_search_next_records)
"""
logger = logging.getLogger(__name__)
logger.debug("open_wallet_search: >>> wallet_handle: %r, type_: %r, query_json: %r, options_json: %r",
wallet_handle,
type_,
query_json,
options_json)
if not hasattr(open_wallet_search, "cb"):
logger.debug("open_wallet_search: Creating callback")
open_wallet_search.cb = create_cb(CFUNCTYPE(None, c_int32, c_int32, c_int32))
c_wallet_handle = c_int32(wallet_handle)
c_type = c_char_p(type_.encode('utf-8'))
c_query_json = c_char_p(query_json.encode('utf-8'))
c_options_json = c_char_p(options_json.encode('utf-8'))
search_handle = await do_call('indy_open_wallet_search',
c_wallet_handle,
c_type,
c_query_json,
c_options_json,
open_wallet_search.cb)
res = search_handle
logger.debug("open_wallet_search: <<< res: %r", res)
return res | 0.002158 |
def friction_plate_Martin_1999(Re, plate_enlargement_factor):
r'''Calculates Darcy friction factor for single-phase flow in a
Chevron-style plate heat exchanger according to [1]_.
.. math::
\frac{1}{\sqrt{f_f}} = \frac{\cos \phi}{\sqrt{0.045\tan\phi
+ 0.09\sin\phi + f_0/\cos(\phi)}} + \frac{1-\cos\phi}{\sqrt{3.8f_1}}
.. math::
f_0 = 16/Re \text{ for } Re < 2000
.. math::
f_0 = (1.56\ln Re - 3)^{-2} \text{ for } Re \ge 2000
.. math::
f_1 = \frac{149}{Re} + 0.9625 \text{ for } Re < 2000
.. math::
f_1 = \frac{9.75}{Re^{0.289}} \text{ for } Re \ge 2000
Parameters
----------
Re : float
Reynolds number with respect to the hydraulic diameter of the channels,
[-]
plate_enlargement_factor : float
The extra surface area multiplier as compared to a flat plate
caused the corrugations, [-]
Returns
-------
fd : float
Darcy friction factor [-]
Notes
-----
Based on experimental data from Re from 200 - 10000 and enhancement
factors calculated with chevron angles of 0 to 80 degrees. See
`PlateExchanger` for further clarification on the definitions.
The length the friction factor gets multiplied by is not the flow path
length, but rather the straight path length from port to port as if there
were no chevrons.
Note there is a discontinuity at Re = 2000 for the transition from
laminar to turbulent flow, although the literature suggests the transition
is actually smooth.
This was first developed in [2]_ and only minor modifications by the
original author were made before its republication in [1]_.
This formula is also suggested in [3]_
Examples
--------
>>> friction_plate_Martin_1999(Re=20000, plate_enlargement_factor=1.15)
2.284018089834134
References
----------
.. [1] Martin, Holger. "Economic optimization of compact heat exchangers."
EF-Conference on Compact Heat Exchangers and Enhancement Technology for
the Process Industries, Banff, Canada, July 18-23, 1999, 1999.
https://publikationen.bibliothek.kit.edu/1000034866.
.. [2] Martin, Holger. "A Theoretical Approach to Predict the Performance
of Chevron-Type Plate Heat Exchangers." Chemical Engineering and
Processing: Process Intensification 35, no. 4 (January 1, 1996): 301-10.
https://doi.org/10.1016/0255-2701(95)04129-X.
.. [3] Shah, Ramesh K., and Dusan P. Sekulic. Fundamentals of Heat
Exchanger Design. 1st edition. Hoboken, NJ: Wiley, 2002.
'''
phi = plate_enlargement_factor
if Re < 2000.:
f0 = 16./Re
f1 = 149./Re + 0.9625
else:
f0 = (1.56*log(Re) - 3.0)**-2
f1 = 9.75*Re**-0.289
rhs = cos(phi)*(0.045*tan(phi) + 0.09*sin(phi) + f0/cos(phi))**-0.5
rhs += (1. - cos(phi))*(3.8*f1)**-0.5
ff = rhs**-2.
return ff*4.0 | 0.007913 |
def read_byte(self):
"""Read one byte of cooked data
"""
buf = b''
if len(self.cookedq) > 0:
buf = bytes([self.cookedq[0]])
self.cookedq = self.cookedq[1:]
else:
yield from self.process_rawq()
if not self.eof:
yield from self.fill_rawq()
yield from self.process_rawq()
# There now should be data so lets read again
buf = yield from self.read_byte()
return buf | 0.003846 |
def revoke_admin_privileges(name, **client_args):
'''
Revoke cluster administration privileges from a user.
name
Name of the user from whom admin privileges will be revoked.
CLI Example:
.. code-block:: bash
salt '*' influxdb.revoke_admin_privileges <name>
'''
client = _client(**client_args)
client.revoke_admin_privileges(name)
return True | 0.002513 |
def doAffiliate(self):
"""Direct the user sign up with an affiliate OpenID provider."""
sreg_req = sreg.SRegRequest(['nickname'], ['fullname', 'email'])
href = sreg_req.toMessage().toURL(OPENID_PROVIDER_URL)
message = """Get an OpenID at <a href=%s>%s</a>""" % (
quoteattr(href), OPENID_PROVIDER_NAME)
self.render(message) | 0.005333 |
def Handle_Note(self, msg):
""" Handle a new note.
:param msg: the received note
:type msg: dict
:returns: The message to reply with
:rtype: str
"""
note_text = msg['object']['note']
note_tags = msg['object']['tags']
if 'ID' in msg['object']:
note_id = msg['object']['ID']
self.db.addItem("note", {"note": note_text,
"tags": note_tags},
note_id)
else:
note_id = self.db.addItem("note", {"note": note_text,
"tags": note_tags})
reply = {"status": "OK",
"type": "Note",
"object": {
"received note": msg['object']['note'],
"received tags": msg['object']['tags'],
"ID": note_id}
}
return json.dumps(reply) | 0.002039 |
def run_conditional_decorators(self, context):
"""Evaluate the step decorators to decide whether to run step or not.
Use pypyr.dsl.Step.run_step if you intend on executing the step the
same way pypyr does.
Args:
context: (pypyr.context.Context) The pypyr context. This arg will
mutate.
"""
logger.debug("starting")
# The decorator attributes might contain formatting expressions that
# change whether they evaluate True or False, thus apply formatting at
# last possible instant.
run_me = context.get_formatted_as_type(self.run_me, out_type=bool)
skip_me = context.get_formatted_as_type(self.skip_me, out_type=bool)
swallow_me = context.get_formatted_as_type(self.swallow_me,
out_type=bool)
if run_me:
if not skip_me:
try:
if self.retry_decorator:
self.retry_decorator.retry_loop(context,
self.invoke_step)
else:
self.invoke_step(context=context)
except Exception as ex_info:
if swallow_me:
logger.error(
f"{self.name} Ignoring error because swallow "
"is True for this step.\n"
f"{type(ex_info).__name__}: {ex_info}")
else:
raise
else:
logger.info(
f"{self.name} not running because skip is True.")
else:
logger.info(f"{self.name} not running because run is False.")
logger.debug("done") | 0.001101 |
def check_perms(perms, user, slug, raise_exception=False):
"""a helper user to check if a user has the permissions
for a given slug"""
if isinstance(perms, string_types):
perms = {perms}
else:
perms = set(perms)
allowed_users = ACLRule.get_users_for(perms, slug)
if allowed_users:
return user in allowed_users
if perms.issubset(set(WALIKI_ANONYMOUS_USER_PERMISSIONS)):
return True
if is_authenticated(user) and perms.issubset(set(WALIKI_LOGGED_USER_PERMISSIONS)):
return True
# First check if the user has the permission (even anon users)
if user.has_perms(['waliki.%s' % p for p in perms]):
return True
# In case the 403 handler should be called raise the exception
if raise_exception:
raise PermissionDenied
# As the last resort, show the login form
return False | 0.00227 |
def click_download(self, event):
"""
event for download button
"""
args ['parallel'] = self.p.get()
args ['file_type'] = self.optionmenu.get()
args ['no_redirects'] = self.t.get()
args ['query'] = self.entry_query.get()
args ['min_file_size'] = int( self.entry_min.get())
args ['max_file_size'] = int( self.entry_max.get())
args ['limit'] = int( self.entry_limit.get())
args ['website']= self.entry_website.get()
args ['option']= self.engine.get()
print(args)
self.check_threat()
download_content_gui( **args ) | 0.061111 |
def map_statements(self):
"""Run the ontology mapping on the statements."""
for stmt in self.statements:
for agent in stmt.agent_list():
if agent is None:
continue
all_mappings = []
for db_name, db_id in agent.db_refs.items():
if isinstance(db_id, list):
db_id = db_id[0][0]
mappings = self._map_id(db_name, db_id)
all_mappings += mappings
for map_db_name, map_db_id, score, orig_db_name in all_mappings:
if map_db_name in agent.db_refs:
continue
if self.scored:
# If the original one is a scored grounding,
# we take that score and multiply it with the mapping
# score. Otherwise we assume the original score is 1.
try:
orig_score = agent.db_refs[orig_db_name][0][1]
except Exception:
orig_score = 1.0
agent.db_refs[map_db_name] = \
[(map_db_id, score * orig_score)]
else:
if map_db_name in ('UN', 'HUME'):
agent.db_refs[map_db_name] = [(map_db_id, 1.0)]
else:
agent.db_refs[map_db_name] = map_db_id | 0.001984 |
def send_notification(self, title, message, typ=1, url=None, sender=None):
"""
sends a message to user of this role's private mq exchange
"""
self.user.send_notification(title=title, message=message, typ=typ, url=url,
sender=sender) | 0.009967 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.