text
stringlengths 78
104k
| score
float64 0
0.18
|
---|---|
def _firefox_profile():
"""Configure the Firefox profile, respecting FIREFOX_PROFILE_PATH if set"""
profile_dir = os.environ.get(FIREFOX_PROFILE_ENV_VAR)
if profile_dir:
LOGGER.info(u"Using firefox profile: %s", profile_dir)
try:
firefox_profile = webdriver.FirefoxProfile(profile_dir)
except OSError as err:
if err.errno == errno.ENOENT:
raise BrowserConfigError(
u"Firefox profile directory {env_var}={profile_dir} does not exist".format(
env_var=FIREFOX_PROFILE_ENV_VAR, profile_dir=profile_dir))
elif err.errno == errno.EACCES:
raise BrowserConfigError(
u"Firefox profile directory {env_var}={profile_dir} has incorrect permissions. It must be \
readable and executable.".format(env_var=FIREFOX_PROFILE_ENV_VAR, profile_dir=profile_dir))
else:
# Some other OSError:
raise BrowserConfigError(
u"Problem with firefox profile directory {env_var}={profile_dir}: {msg}"
.format(env_var=FIREFOX_PROFILE_ENV_VAR, profile_dir=profile_dir, msg=str(err)))
else:
LOGGER.info("Using default firefox profile")
firefox_profile = webdriver.FirefoxProfile()
# Bypasses the security prompt displayed by the browser when it attempts to
# access a media device (e.g., a webcam)
firefox_profile.set_preference('media.navigator.permission.disabled', True)
# Disable the initial url fetch to 'learn more' from mozilla (so you don't have to
# be online to run bok-choy on firefox)
firefox_profile.set_preference('browser.startup.homepage', 'about:blank')
firefox_profile.set_preference('startup.homepage_welcome_url', 'about:blank')
firefox_profile.set_preference('startup.homepage_welcome_url.additional', 'about:blank')
# Disable fetching an updated version of firefox
firefox_profile.set_preference('app.update.enabled', False)
# Disable plugin checking
firefox_profile.set_preference('plugins.hide_infobar_for_outdated_plugin', True)
# Disable health reporter
firefox_profile.set_preference('datareporting.healthreport.service.enabled', False)
# Disable all data upload (Telemetry and FHR)
firefox_profile.set_preference('datareporting.policy.dataSubmissionEnabled', False)
# Disable crash reporter
firefox_profile.set_preference('toolkit.crashreporter.enabled', False)
# Disable the JSON Viewer
firefox_profile.set_preference('devtools.jsonview.enabled', False)
# Grant OS focus to the launched browser so focus-related tests function correctly
firefox_profile.set_preference('focusmanager.testmode', True)
for function in FIREFOX_PROFILE_CUSTOMIZERS:
function(firefox_profile)
return firefox_profile | 0.005705 |
def put_warmer(self, doc_types=None, indices=None, name=None, warmer=None, querystring_args=None):
"""
Put new warmer into index (or type)
:param doc_types: list of document types
:param warmer: anything with ``serialize`` method or a dictionary
:param name: warmer name
:param querystring_args: additional arguments passed as GET params to ES
"""
if not querystring_args:
querystring_args = {}
doc_types_str = ''
if doc_types:
doc_types_str = '/' + ','.join(doc_types)
path = '/{0}{1}/_warmer/{2}'.format(','.join(indices), doc_types_str, name)
if hasattr(warmer, 'serialize'):
body = warmer.serialize()
else:
body = warmer
return self._send_request(method='PUT', path=path, body=body, params=querystring_args) | 0.006881 |
def is_ancestor_of_log(self, id_, log_id):
"""Tests if an ``Id`` is an ancestor of a log.
arg: id (osid.id.Id): an ``Id``
arg: log_id (osid.id.Id): the ``Id`` of a log
return: (boolean) - ``true`` if the ``id`` is an ancestor of the
``log_id,`` ``false`` otherwise
raise: NotFound - ``log_id`` is not found
raise: NullArgument - ``id`` or ``log_id`` is ``null``
raise: OperationFailed - unable to complete request
raise: PermissionDenied - authorization failure
*compliance: mandatory -- This method must be implemented.*
*implementation notes*: If ``id`` is not found return ``false``.
"""
# Implemented from template for
# osid.resource.BinHierarchySession.is_ancestor_of_bin
if self._catalog_session is not None:
return self._catalog_session.is_ancestor_of_catalog(id_=id_, catalog_id=log_id)
return self._hierarchy_session.is_ancestor(id_=id_, ancestor_id=log_id) | 0.002918 |
def status_for_order(self, order_id, stock):
"""Status For An Existing Order
https://starfighter.readme.io/docs/status-for-an-existing-order
"""
url_fragment = 'venues/{venue}/stocks/{stock}/orders/{order_id}'.format(
venue=self.venue,
stock=stock,
order_id=order_id,
)
url = urljoin(self.base_url, url_fragment)
return self.session.get(url).json() | 0.006803 |
def validate_address(self, address_deets):
"""Validates a customer address and returns back a collection of address matches."""
request = self._post('addresses/validate', address_deets)
return self.responder(request) | 0.0125 |
def _attributes(note, data):
"""
attribute of the note
:param note: note object
:param data:
:return:
"""
# attribute of the note: the link to the website
note_attribute = EvernoteMgr.set_note_attribute(data)
if note_attribute:
note.attributes = note_attribute
return note | 0.005495 |
def setup(self, # pylint: disable=arguments-differ
endpoint=None,
username=None,
password=None,
incident_id=None,
sketch_id=None):
"""Setup a connection to a Timesketch server and create a sketch if needed.
Args:
endpoint: str, Timesketch endpoint (e.g. http://timesketch.com/)
username: str, Username to authenticate against the Timesketch endpoint.
password: str, Password to authenticate against the Timesketch endpoint.
incident_id: str, Incident ID or reference. Used in sketch description.
sketch_id: int, Sketch ID to add the resulting timeline to. If not
provided, a new sketch is created.
"""
self.timesketch_api = timesketch_utils.TimesketchApiClient(
endpoint, username, password)
self.incident_id = None
self.sketch_id = int(sketch_id) if sketch_id else None
# Check that we have a timesketch session
if not self.timesketch_api.session:
message = 'Could not connect to Timesketch server at ' + endpoint
self.state.add_error(message, critical=True)
return
if not self.sketch_id: # No sketch id is provided, create it
if incident_id:
sketch_name = 'Sketch for incident ID: ' + incident_id
else:
sketch_name = 'Untitled sketch'
sketch_description = 'Sketch generated by dfTimewolf'
self.sketch_id = self.timesketch_api.create_sketch(
sketch_name, sketch_description)
print('Sketch {0:d} created'.format(self.sketch_id)) | 0.009021 |
def get_counted_number(context, config, variables, **kw):
"""Compute the number for the sequence type "Counter"
"""
# This "context" is defined by the user in the Setup and can be actually
# anything. However, we assume it is something like "sample" or similar
ctx = config.get("context")
# get object behind the context name (falls back to the current context)
obj = variables.get(ctx, context)
# get the counter type, which is either "backreference" or "contained"
counter_type = config.get("counter_type")
# the counter reference is either the "relationship" for
# "backreference" or the meta type for contained objects
counter_reference = config.get("counter_reference")
# This should be a list of existing items, including the current context
# object
seq_items = get_objects_in_sequence(obj, counter_type, counter_reference)
number = len(seq_items)
return number | 0.001063 |
def artifact_bundles(self):
"""
Gets the Artifact Bundles API client.
Returns:
ArtifactBundles:
"""
if not self.__artifact_bundles:
self.__artifact_bundles = ArtifactBundles(self.__connection)
return self.__artifact_bundles | 0.006757 |
def organization_requests(self, id, **kwargs):
"https://developer.zendesk.com/rest_api/docs/core/requests#list-requests"
api_path = "/api/v2/organizations/{id}/requests.json"
api_path = api_path.format(id=id)
return self.call(api_path, **kwargs) | 0.01083 |
def encode_callbacks(callbacks):
"""Encode callbacks to as a dict suitable for JSON encoding."""
from furious.async import Async
if not callbacks:
return
encoded_callbacks = {}
for event, callback in callbacks.iteritems():
if callable(callback):
callback, _ = get_function_path_and_options(callback)
elif isinstance(callback, Async):
callback = callback.to_dict()
encoded_callbacks[event] = callback
return encoded_callbacks | 0.003929 |
def create_subject_access_review(self, body, **kwargs):
"""
create a SubjectAccessReview
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.create_subject_access_review(body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param V1SubjectAccessReview body: (required)
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
:param str pretty: If 'true', then the output is pretty printed.
:return: V1SubjectAccessReview
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.create_subject_access_review_with_http_info(body, **kwargs)
else:
(data) = self.create_subject_access_review_with_http_info(body, **kwargs)
return data | 0.003997 |
def user_loc_value_to_class(axis_tag, user_loc):
"""Return the OS/2 weight or width class that is closest to the provided
user location. For weight the user location is between 0 and 1000 and for
width it is a percentage.
>>> user_loc_value_to_class('wght', 310)
310
>>> user_loc_value_to_class('wdth', 62)
2
"""
if axis_tag == "wght":
return int(user_loc)
elif axis_tag == "wdth":
return min(
sorted(WIDTH_CLASS_TO_VALUE.items()),
key=lambda item: abs(item[1] - user_loc),
)[0]
raise NotImplementedError | 0.001672 |
def full_newton(s, nodes1, t, nodes2):
r"""Perform a Newton iteration until convergence to a solution.
This assumes :math:`s` and :math:`t` are sufficiently close to an
intersection. It **does not** govern the maximum distance away
that the solution can lie, though the subdivided intervals that contain
:math:`s` and :math:`t` could be used.
To avoid round-off issues near ``0.0``, this reverses the direction
of a curve and replaces the parameter value :math:`\nu` with
:math:`1 - \nu` whenever :math:`\nu < \tau` (here we use a threshold
:math:`\tau` equal to :math:`2^{-10}`, i.e. ``ZERO_THRESHOLD``).
Args:
s (float): The parameter along the first curve where the iteration
will start.
nodes1 (numpy.ndarray): Control points of the first curve.
t (float): The parameter along the second curve where the iteration
will start.
nodes2 (numpy.ndarray): Control points of the second curve.
Returns:
Tuple[float, float]: The pair of :math:`s` and :math:`t` values that
Newton's method converged to.
"""
if s < ZERO_THRESHOLD:
reversed1 = np.asfortranarray(nodes1[:, ::-1])
if t < ZERO_THRESHOLD:
reversed2 = np.asfortranarray(nodes2[:, ::-1])
refined_s, refined_t = full_newton_nonzero(
1.0 - s, reversed1, 1.0 - t, reversed2
)
return 1.0 - refined_s, 1.0 - refined_t
else:
refined_s, refined_t = full_newton_nonzero(
1.0 - s, reversed1, t, nodes2
)
return 1.0 - refined_s, refined_t
else:
if t < ZERO_THRESHOLD:
reversed2 = np.asfortranarray(nodes2[:, ::-1])
refined_s, refined_t = full_newton_nonzero(
s, nodes1, 1.0 - t, reversed2
)
return refined_s, 1.0 - refined_t
else:
return full_newton_nonzero(s, nodes1, t, nodes2) | 0.000503 |
def init(opts):
'''
Open the connection to the Arista switch over the eAPI.
'''
proxy_dict = opts.get('proxy', {})
conn_args = proxy_dict.copy()
conn_args.pop('proxytype', None)
opts['multiprocessing'] = conn_args.get('multiprocessing', True)
# This is not a SSH-based proxy, so it should be safe to enable
# multiprocessing.
try:
conn = pyeapi.client.connect(**conn_args)
node = pyeapi.client.Node(conn, enablepwd=conn_args.get('enablepwd'))
pyeapi_device['connection'] = node
pyeapi_device['initialized'] = True
pyeapi_device['up'] = True
except pyeapi.eapilib.ConnectionError as cerr:
log.error('Unable to connect to %s', conn_args['host'], exc_info=True)
return False
return True | 0.001271 |
def process_loaded_configs(self, values):
"""Takes the loaded config values (from YAML files) and performs the
following clean up steps:
1. remove all value keys that are not uppercase
2. resolve any keys with missing values
Note: resolving missing values does not fail fast, we will collect
all missing values and report it to a post handler, then finally fail.
@param values dictionary of raw, newly loaded config values
"""
unresolved_value_keys = self._process_config_values([], values, [])
if len(unresolved_value_keys) > 0:
msg = "Unresolved values for: {}".format(unresolved_value_keys)
# Even though we will fail, there might be a situation when we want to
# do something with the list of missing values, so pass it to a handler.
self.on_process_loaded_configs_failure(values, unresolved_value_keys)
if self.ignore_errors:
# If we're ignoring errors, at least log it
logging.warn(msg)
else:
# end program
raise LookupError(msg)
# All the config values were checked and everything looks good,
# let's inform post handler for any additional work.
self.on_process_loaded_configs_complete(values)
return values | 0.006498 |
def _on_library_name_changed(self, renderer, path, new_library_name):
"""Callback handling a change of a library name
:param Gtk.CellRenderer renderer: Cell renderer showing the library name
:param path: Path of library within the list store
:param str new_library_name: New library name
"""
old_library_name = self.library_list_store[int(path)][self.KEY_STORAGE_ID]
if old_library_name == new_library_name:
return
library_path = self.library_list_store[int(path)][self.VALUE_STORAGE_ID]
library_config = self.core_config_model.get_current_config_value("LIBRARY_PATHS", use_preliminary=True,
default={})
del library_config[old_library_name]
library_config[new_library_name] = library_path
self.core_config_model.set_preliminary_config_value("LIBRARY_PATHS", library_config)
self._select_row_by_column_value(self.view['library_tree_view'], self.library_list_store,
self.KEY_STORAGE_ID, new_library_name) | 0.007937 |
def _make_cssmin(python_only=False):
"""
Generate CSS minifier.
:Parameters:
`python_only` : ``bool``
Use only the python variant. If true, the c extension is not even
tried to be loaded.
:Return: Minifier
:Rtype: ``callable``
"""
# pylint: disable = W0612
# ("unused" variables)
# pylint: disable = R0911, R0912, R0914, R0915
# (too many anything)
if not python_only:
try:
import _rcssmin
except ImportError:
pass
else:
return _rcssmin.cssmin
nl = r'(?:[\n\f]|\r\n?)' # pylint: disable = C0103
spacechar = r'[\r\n\f\040\t]'
unicoded = r'[0-9a-fA-F]{1,6}(?:[\040\n\t\f]|\r\n?)?'
escaped = r'[^\n\r\f0-9a-fA-F]'
escape = r'(?:\\(?:%(unicoded)s|%(escaped)s))' % locals()
nmchar = r'[^\000-\054\056\057\072-\100\133-\136\140\173-\177]'
#nmstart = r'[^\000-\100\133-\136\140\173-\177]'
#ident = (r'(?:'
# r'-?(?:%(nmstart)s|%(escape)s)%(nmchar)s*(?:%(escape)s%(nmchar)s*)*'
#r')') % locals()
comment = r'(?:/\*[^*]*\*+(?:[^/*][^*]*\*+)*/)'
# only for specific purposes. The bang is grouped:
_bang_comment = r'(?:/\*(!?)[^*]*\*+(?:[^/*][^*]*\*+)*/)'
string1 = \
r'(?:\047[^\047\\\r\n\f]*(?:\\[^\r\n\f][^\047\\\r\n\f]*)*\047)'
string2 = r'(?:"[^"\\\r\n\f]*(?:\\[^\r\n\f][^"\\\r\n\f]*)*")'
strings = r'(?:%s|%s)' % (string1, string2)
nl_string1 = \
r'(?:\047[^\047\\\r\n\f]*(?:\\(?:[^\r]|\r\n?)[^\047\\\r\n\f]*)*\047)'
nl_string2 = r'(?:"[^"\\\r\n\f]*(?:\\(?:[^\r]|\r\n?)[^"\\\r\n\f]*)*")'
nl_strings = r'(?:%s|%s)' % (nl_string1, nl_string2)
uri_nl_string1 = r'(?:\047[^\047\\]*(?:\\(?:[^\r]|\r\n?)[^\047\\]*)*\047)'
uri_nl_string2 = r'(?:"[^"\\]*(?:\\(?:[^\r]|\r\n?)[^"\\]*)*")'
uri_nl_strings = r'(?:%s|%s)' % (uri_nl_string1, uri_nl_string2)
nl_escaped = r'(?:\\%(nl)s)' % locals()
space = r'(?:%(spacechar)s|%(comment)s)' % locals()
ie7hack = r'(?:>/\*\*/)'
uri = (r'(?:'
r'(?:[^\000-\040"\047()\\\177]*'
r'(?:%(escape)s[^\000-\040"\047()\\\177]*)*)'
r'(?:'
r'(?:%(spacechar)s+|%(nl_escaped)s+)'
r'(?:'
r'(?:[^\000-\040"\047()\\\177]|%(escape)s|%(nl_escaped)s)'
r'[^\000-\040"\047()\\\177]*'
r'(?:%(escape)s[^\000-\040"\047()\\\177]*)*'
r')+'
r')*'
r')') % locals()
nl_unesc_sub = _re.compile(nl_escaped).sub
uri_space_sub = _re.compile((
r'(%(escape)s+)|%(spacechar)s+|%(nl_escaped)s+'
) % locals()).sub
uri_space_subber = lambda m: m.groups()[0] or ''
space_sub_simple = _re.compile((
r'[\r\n\f\040\t;]+|(%(comment)s+)'
) % locals()).sub
space_sub_banged = _re.compile((
r'[\r\n\f\040\t;]+|(%(_bang_comment)s+)'
) % locals()).sub
post_esc_sub = _re.compile(r'[\r\n\f\t]+').sub
main_sub = _re.compile((
r'([^\\"\047u>@\r\n\f\040\t/;:{}]+)'
r'|(?<=[{}(=:>+[,!])(%(space)s+)'
r'|^(%(space)s+)'
r'|(%(space)s+)(?=(([:{});=>+\],!])|$)?)'
r'|;(%(space)s*(?:;%(space)s*)*)(?=(\})?)'
r'|(\{)'
r'|(\})'
r'|(%(strings)s)'
r'|(?<!%(nmchar)s)url\(%(spacechar)s*('
r'%(uri_nl_strings)s'
r'|%(uri)s'
r')%(spacechar)s*\)'
r'|(@[mM][eE][dD][iI][aA])(?!%(nmchar)s)'
r'|(%(ie7hack)s)(%(space)s*)'
r'|(:[fF][iI][rR][sS][tT]-[lL]'
r'(?:[iI][nN][eE]|[eE][tT][tT][eE][rR]))'
r'(%(space)s*)(?=[{,])'
r'|(%(nl_strings)s)'
r'|(%(escape)s[^\\"\047u>@\r\n\f\040\t/;:{}]*)'
) % locals()).sub
#print main_sub.__self__.pattern
def main_subber(keep_bang_comments, base_dir=''):
""" Make main subber """
in_macie5, in_rule, at_media = [0], [0], [0]
if keep_bang_comments:
space_sub = space_sub_banged
def space_subber(match):
""" Space|Comment subber """
if match.lastindex:
group1, group2 = match.group(1, 2)
if group2:
if group1.endswith(r'\*/'):
in_macie5[0] = 1
else:
in_macie5[0] = 0
return group1
elif group1:
if group1.endswith(r'\*/'):
if in_macie5[0]:
return ''
in_macie5[0] = 1
return r'/*\*/'
elif in_macie5[0]:
in_macie5[0] = 0
return '/**/'
return ''
else:
space_sub = space_sub_simple
def space_subber(match):
""" Space|Comment subber """
if match.lastindex:
if match.group(1).endswith(r'\*/'):
if in_macie5[0]:
return ''
in_macie5[0] = 1
return r'/*\*/'
elif in_macie5[0]:
in_macie5[0] = 0
return '/**/'
return ''
def fn_space_post(group):
""" space with token after """
if group(5) is None or (
group(6) == ':' and not in_rule[0] and not at_media[0]):
return ' ' + space_sub(space_subber, group(4))
return space_sub(space_subber, group(4))
def fn_semicolon(group):
""" ; handler """
return ';' + space_sub(space_subber, group(7))
def fn_semicolon2(group):
""" ; handler """
if in_rule[0]:
return space_sub(space_subber, group(7))
return ';' + space_sub(space_subber, group(7))
def fn_open(group):
""" { handler """
# pylint: disable = W0613
if at_media[0]:
at_media[0] -= 1
else:
in_rule[0] = 1
return '{'
def fn_close(group):
""" } handler """
# pylint: disable = W0613
in_rule[0] = 0
return '}'
def fn_media(group):
""" @media handler """
at_media[0] += 1
return group(13)
def fn_ie7hack(group):
""" IE7 Hack handler """
if not in_rule[0] and not at_media[0]:
in_macie5[0] = 0
return group(14) + space_sub(space_subber, group(15))
return '>' + space_sub(space_subber, group(15))
def fn_url(group):
"""
Process url
"""
import os
url = uri_space_sub(uri_space_subber, group(12))
if base_dir:
if (url.startswith('"') and url.endswith('"')) or (url.startswith("'") and url.endswith("'")):
url = url[0] + os.path.join(base_dir, url[1:-1]).replace('\\', '/') + url[0]
else:
url = os.path.join(base_dir, url).replace('\\', '/')
return 'url(%s)' % url
table = (
None,
None,
None,
None,
fn_space_post, # space with token after
fn_space_post, # space with token after
fn_space_post, # space with token after
fn_semicolon, # semicolon
fn_semicolon2, # semicolon
fn_open, # {
fn_close, # }
lambda g: g(11), # string
# lambda g: 'url(%s)' % uri_space_sub(uri_space_subber, g(12)),
fn_url,
# url(...)
fn_media, # @media
None,
fn_ie7hack, # ie7hack
None,
lambda g: g(16) + ' ' + space_sub(space_subber, g(17)),
# :first-line|letter followed
# by [{,] (apparently space
# needed for IE6)
lambda g: nl_unesc_sub('', g(18)), # nl_string
lambda g: post_esc_sub(' ', g(19)), # escape
)
def func(match):
""" Main subber """
idx, group = match.lastindex, match.group
if idx > 3:
return table[idx](group)
# shortcuts for frequent operations below:
elif idx == 1: # not interesting
return group(1)
#else: # space with token before or at the beginning
return space_sub(space_subber, group(idx))
return func
def cssmin(style, keep_bang_comments=False, base_dir=''): # pylint: disable = W0621
"""
Minify CSS.
:Parameters:
`style` : ``str``
CSS to minify
`keep_bang_comments` : ``bool``
Keep comments starting with an exclamation mark? (``/*!...*/``)
:Return: Minified style
:Rtype: ``str``
"""
return main_sub(main_subber(keep_bang_comments, base_dir), style)
return cssmin | 0.00283 |
def dump_wcxf(self, C_out, scale_out, fmt='yaml', stream=None, **kwargs):
"""Return a string representation of the Wilson coefficients `C_out`
in WCxf format. If `stream` is specified, export it to a file.
`fmt` defaults to `yaml`, but can also be `json`.
Note that the Wilson coefficients are rotated into the Warsaw basis
as defined in WCxf, i.e. to the basis where the down-type and charged
lepton mass matrices are diagonal."""
wc = self.get_wcxf(C_out, scale_out)
return wc.dump(fmt=fmt, stream=stream, **kwargs) | 0.003436 |
def set_sampling_strategy(self, sensor_name, strategy_and_params):
"""Set the sampling strategy for the named sensor
Parameters
----------
sensor_name : str
Name of the sensor
strategy_and_params : seq of str or str
As tuple contains (<strat_name>, [<strat_parm1>, ...]) where the
strategy names and parameters are as defined by the KATCP spec. As
str contains the same elements in space-separated form.
Returns
-------
sensor_strategy : tuple
(success, info) with
success : bool
True if setting succeeded for this sensor, else False
info : tuple
Normalibed sensor strategy and parameters as tuple if
success == True else, sys.exc_info() tuple for the error
that occured.
"""
try:
strategy_and_params = resource.normalize_strategy_parameters(
strategy_and_params)
self._strategy_cache[sensor_name] = strategy_and_params
reply = yield self._inspecting_client.wrapped_request(
'sensor-sampling', sensor_name, *strategy_and_params)
if not reply.succeeded:
raise KATCPSensorError('Error setting strategy for sensor {0}: \n'
'{1!s}'.format(sensor_name, reply))
sensor_strategy = (True, strategy_and_params)
except Exception as e:
self._logger.exception('Exception found!')
sensor_strategy = (False, str(e))
raise tornado.gen.Return(sensor_strategy) | 0.001808 |
def tobytes(self, prim, skipprepack = False):
'''
Compatible to Parser.tobytes()
'''
stream = BytesIO()
self.tostream(prim, stream, skipprepack=skipprepack)
return stream.getvalue() | 0.017467 |
def create(cls, config_file=None, logical_volume = None, cfg = None, **params):
"""
Create a new instance based on the specified configuration file or the specified
configuration and the passed in parameters.
If the config_file argument is not None, the configuration is read from there.
Otherwise, the cfg argument is used.
The config file may include other config files with a #import reference. The included
config files must reside in the same directory as the specified file.
The logical_volume argument, if supplied, will be used to get the current physical
volume ID and use that as an override of the value specified in the config file. This
may be useful for debugging purposes when you want to debug with a production config
file but a test Volume.
The dictionary argument may be used to override any EC2 configuration values in the
config file.
"""
if config_file:
cfg = Config(path=config_file)
if cfg.has_section('EC2'):
# include any EC2 configuration values that aren't specified in params:
for option in cfg.options('EC2'):
if option not in params:
params[option] = cfg.get('EC2', option)
getter = CommandLineGetter()
getter.get(cls, params)
region = params.get('region')
ec2 = region.connect()
cls.add_credentials(cfg, ec2.aws_access_key_id, ec2.aws_secret_access_key)
ami = params.get('ami')
kp = params.get('keypair')
group = params.get('group')
zone = params.get('zone')
# deal with possibly passed in logical volume:
if logical_volume != None:
cfg.set('EBS', 'logical_volume_name', logical_volume.name)
cfg_fp = StringIO.StringIO()
cfg.write(cfg_fp)
# deal with the possibility that zone and/or keypair are strings read from the config file:
if isinstance(zone, Zone):
zone = zone.name
if isinstance(kp, KeyPair):
kp = kp.name
reservation = ami.run(min_count=1,
max_count=params.get('quantity', 1),
key_name=kp,
security_groups=[group],
instance_type=params.get('instance_type'),
placement = zone,
user_data = cfg_fp.getvalue())
l = []
i = 0
elastic_ip = params.get('elastic_ip')
instances = reservation.instances
if elastic_ip != None and instances.__len__() > 0:
instance = instances[0]
print 'Waiting for instance to start so we can set its elastic IP address...'
# Sometimes we get a message from ec2 that says that the instance does not exist.
# Hopefully the following delay will giv eec2 enough time to get to a stable state:
time.sleep(5)
while instance.update() != 'running':
time.sleep(1)
instance.use_ip(elastic_ip)
print 'set the elastic IP of the first instance to %s' % elastic_ip
for instance in instances:
s = cls()
s.ec2 = ec2
s.name = params.get('name') + '' if i==0 else str(i)
s.description = params.get('description')
s.region_name = region.name
s.instance_id = instance.id
if elastic_ip and i == 0:
s.elastic_ip = elastic_ip
s.put()
l.append(s)
i += 1
return l | 0.011339 |
def all(self, endpoint, *args, **kwargs):
"""Retrieve all the data of a paginated endpoint, using GET.
:returns: The endpoint unpaginated data
:rtype: dict
"""
# 1. Initialize the pagination parameters.
kwargs.setdefault('params', {})['offset'] = 0
kwargs.setdefault('params', {})['limit'] = self.limit
kwargs['__method__'] = 'get'
# 2. Create an initial paginated request.
payload = self.request(endpoint, *args, **kwargs)
has_next = payload.get('result', {}).setdefault(
'meta', {'next': None}
)['next']
# 3. Loop until the end
while has_next:
# 4. Increment the offset
kwargs['params']['offset'] += self.limit
# 5. Query again
_payload = self.request(endpoint, *args, **kwargs)
# 6. Add the paginated data to the global one
payload['result']['data'].extend(_payload['result']['data'])
# 7. Compute has_next
has_next = _payload['result']['meta']['next']
del payload['result']['meta']
return payload | 0.001761 |
def get_process(self, dwProcessId):
"""
@type dwProcessId: int
@param dwProcessId: Global ID of the process to look for.
@rtype: L{Process}
@return: Process object with the given global ID.
"""
self.__initialize_snapshot()
if dwProcessId not in self.__processDict:
msg = "Unknown process ID %d" % dwProcessId
raise KeyError(msg)
return self.__processDict[dwProcessId] | 0.004292 |
def scan_code_for_ctypes(co, instrs, i):
"""Detects ctypes dependencies, using reasonable heuristics that should
cover most common ctypes usages; returns a tuple of two lists, one
containing names of binaries detected as dependencies, the other containing
warnings.
"""
def _libFromConst(i):
"""Extracts library name from an expected LOAD_CONST instruction and
appends it to local binaries list.
"""
op, oparg, conditional, curline = instrs[i]
if op == LOAD_CONST:
soname = co.co_consts[oparg]
b.append(soname)
b = []
op, oparg, conditional, curline = instrs[i]
if op in (LOAD_GLOBAL, LOAD_NAME):
name = co.co_names[oparg]
if name in ("CDLL", "WinDLL"):
# Guesses ctypes imports of this type: CDLL("library.so")
# LOAD_GLOBAL 0 (CDLL) <--- we "are" here right now
# LOAD_CONST 1 ('library.so')
_libFromConst(i+1)
elif name == "ctypes":
# Guesses ctypes imports of this type: ctypes.DLL("library.so")
# LOAD_GLOBAL 0 (ctypes) <--- we "are" here right now
# LOAD_ATTR 1 (CDLL)
# LOAD_CONST 1 ('library.so')
op2, oparg2, conditional2, curline2 = instrs[i+1]
if op2 == LOAD_ATTR:
if co.co_names[oparg2] in ("CDLL", "WinDLL"):
# Fetch next, and finally get the library name
_libFromConst(i+2)
elif name in ("cdll", "windll"):
# Guesses ctypes imports of these types:
# * cdll.library (only valid on Windows)
# LOAD_GLOBAL 0 (cdll) <--- we "are" here right now
# LOAD_ATTR 1 (library)
# * cdll.LoadLibrary("library.so")
# LOAD_GLOBAL 0 (cdll) <--- we "are" here right now
# LOAD_ATTR 1 (LoadLibrary)
# LOAD_CONST 1 ('library.so')
op2, oparg2, conditional2, curline2 = instrs[i+1]
if op2 == LOAD_ATTR:
if co.co_names[oparg2] != "LoadLibrary":
# First type
soname = co.co_names[oparg2] + ".dll"
b.append(soname)
else:
# Second type, needs to fetch one more instruction
_libFromConst(i+2)
# If any of the libraries has been requested with anything different from
# the bare filename, drop that entry and warn the user - pyinstaller would
# need to patch the compiled pyc file to make it work correctly!
w = []
for bin in list(b):
if bin != os.path.basename(bin):
b.remove(bin)
w.append("W: ignoring %s - ctypes imports only supported using bare filenames" % (bin,))
return b, w | 0.001046 |
def get_record(cls, id_, with_deleted=False):
"""Retrieve the record by id.
Raise a database exception if the record does not exist.
:param id_: record ID.
:param with_deleted: If `True` then it includes deleted records.
:returns: The :class:`Record` instance.
"""
with db.session.no_autoflush:
query = RecordMetadata.query.filter_by(id=id_)
if not with_deleted:
query = query.filter(RecordMetadata.json != None) # noqa
obj = query.one()
return cls(obj.json, model=obj) | 0.003378 |
def isRef(self, elem, attr):
"""Determine whether an attribute is of type Ref. In case we
have DTD(s) then this is simple, otherwise we use an
heuristic: name Ref (upper or lowercase). """
if elem is None: elem__o = None
else: elem__o = elem._o
if attr is None: attr__o = None
else: attr__o = attr._o
ret = libxml2mod.xmlIsRef(self._o, elem__o, attr__o)
return ret | 0.013605 |
def send_unsent(self):
"""
Emails that were not being able to send will be stored in :attr:`self.unsent`.
Use this function to attempt to send these again
"""
for i in range(len(self.unsent)):
recipients, msg_string = self.unsent.pop(i)
self._attempt_send(recipients, msg_string) | 0.008746 |
def updateD_H(self, x):
"""
Compute Hessian for update of D
See [2] for derivation of Hessian
"""
self.precompute(x)
H = zeros((len(x), len(x)))
Ai = zeros(self.A.shape[0])
Aj = zeros(Ai.shape)
for i in range(len(x)):
Ai = self.A[:, i]
ti = dot(self.AD, outer(self.R[:, i], Ai)) + dot(outer(Ai, self.R[i, :]), self.ADt)
for j in range(i, len(x)):
Aj = self.A[:, j]
tj = outer(Ai, Aj)
H[i, j] = (
self.E * (self.R[i, j] * tj + self.R[j, i] * tj.T) -
ti * (
dot(self.AD, outer(self.R[:, j], Aj)) +
dot(outer(Aj, self.R[j, :]), self.ADt)
)
).sum()
H[j, i] = H[i, j]
H *= -2
e = eigvals(H).min()
H = H + (eye(H.shape[0]) * e)
return H | 0.003125 |
def threshold(self, value, inclusive=False):
"""Return True if > than treshold value (or >= threshold value if
inclusive=True).
"""
if inclusive:
def function(x, y):
return True if x >= y else False
else:
def function(x, y):
return True if x > y else False
return self.operation(value, function) | 0.005 |
def validate_gaslimit(self, header: BlockHeader) -> None:
"""
Validate the gas limit on the given header.
"""
parent_header = self.get_block_header_by_hash(header.parent_hash)
low_bound, high_bound = compute_gas_limit_bounds(parent_header)
if header.gas_limit < low_bound:
raise ValidationError(
"The gas limit on block {0} is too low: {1}. It must be at least {2}".format(
encode_hex(header.hash), header.gas_limit, low_bound))
elif header.gas_limit > high_bound:
raise ValidationError(
"The gas limit on block {0} is too high: {1}. It must be at most {2}".format(
encode_hex(header.hash), header.gas_limit, high_bound)) | 0.005175 |
def ParseArguments(self):
"""Parses the command line arguments.
Returns:
bool: True if the arguments were successfully parsed.
"""
loggers.ConfigureLogging()
argument_parser = argparse.ArgumentParser(
description=self.DESCRIPTION, epilog=self.EPILOG, add_help=False,
formatter_class=argparse.RawDescriptionHelpFormatter)
self.AddBasicOptions(argument_parser)
helpers_manager.ArgumentHelperManager.AddCommandLineArguments(
argument_parser, names=['storage_file'])
data_location_group = argument_parser.add_argument_group(
'data location arguments')
argument_helper_names = ['artifact_definitions', 'data_location']
helpers_manager.ArgumentHelperManager.AddCommandLineArguments(
data_location_group, names=argument_helper_names)
extraction_group = argument_parser.add_argument_group(
'extraction arguments')
argument_helper_names = [
'artifact_filters', 'extraction', 'filter_file', 'hashers',
'parsers', 'yara_rules']
helpers_manager.ArgumentHelperManager.AddCommandLineArguments(
extraction_group, names=argument_helper_names)
self.AddStorageMediaImageOptions(extraction_group)
self.AddTimeZoneOption(extraction_group)
self.AddVSSProcessingOptions(extraction_group)
self.AddCredentialOptions(extraction_group)
info_group = argument_parser.add_argument_group('informational arguments')
self.AddInformationalOptions(info_group)
info_group.add_argument(
'--info', dest='show_info', action='store_true', default=False,
help='Print out information about supported plugins and parsers.')
info_group.add_argument(
'--use_markdown', '--use-markdown', dest='use_markdown',
action='store_true', default=False, help=(
'Output lists in Markdown format use in combination with '
'"--hashers list", "--parsers list" or "--timezone list"'))
info_group.add_argument(
'--no_dependencies_check', '--no-dependencies-check',
dest='dependencies_check', action='store_false', default=True,
help='Disable the dependencies check.')
self.AddLogFileOptions(info_group)
helpers_manager.ArgumentHelperManager.AddCommandLineArguments(
info_group, names=['status_view'])
output_group = argument_parser.add_argument_group('output arguments')
helpers_manager.ArgumentHelperManager.AddCommandLineArguments(
output_group, names=['text_prepend'])
processing_group = argument_parser.add_argument_group(
'processing arguments')
self.AddPerformanceOptions(processing_group)
self.AddProcessingOptions(processing_group)
processing_group.add_argument(
'--sigsegv_handler', '--sigsegv-handler', dest='sigsegv_handler',
action='store_true', default=False, help=(
'Enables the SIGSEGV handler. WARNING this functionality is '
'experimental and will a deadlock worker process if a real '
'segfault is caught, but not signal SIGSEGV. This functionality '
'is therefore primarily intended for debugging purposes'))
profiling_group = argument_parser.add_argument_group('profiling arguments')
helpers_manager.ArgumentHelperManager.AddCommandLineArguments(
profiling_group, names=['profiling'])
storage_group = argument_parser.add_argument_group('storage arguments')
helpers_manager.ArgumentHelperManager.AddCommandLineArguments(
storage_group, names=['storage_format'])
argument_parser.add_argument(
self._SOURCE_OPTION, action='store', metavar='SOURCE', nargs='?',
default=None, type=str, help=(
'Path to a source device, file or directory. If the source is '
'a supported storage media device or image file, archive file '
'or a directory, the files within are processed recursively.'))
try:
options = argument_parser.parse_args()
except UnicodeEncodeError:
# If we get here we are attempting to print help in a non-Unicode
# terminal.
self._output_writer.Write('\n')
self._output_writer.Write(argument_parser.format_help())
return False
# Properly prepare the attributes according to local encoding.
if self.preferred_encoding == 'ascii':
logger.warning(
'The preferred encoding of your system is ASCII, which is not '
'optimal for the typically non-ASCII characters that need to be '
'parsed and processed. The tool will most likely crash and die, '
'perhaps in a way that may not be recoverable. A five second delay '
'is introduced to give you time to cancel the runtime and '
'reconfigure your preferred encoding, otherwise continue at own '
'risk.')
time.sleep(5)
if self._process_archives:
logger.warning(
'Scanning archive files currently can cause deadlock. Continue at '
'your own risk.')
time.sleep(5)
try:
self.ParseOptions(options)
except errors.BadConfigOption as exception:
self._output_writer.Write('ERROR: {0!s}\n'.format(exception))
self._output_writer.Write('\n')
self._output_writer.Write(argument_parser.format_usage())
return False
self._command_line_arguments = self.GetCommandLineArguments()
loggers.ConfigureLogging(
debug_output=self._debug_mode, filename=self._log_file,
quiet_mode=self._quiet_mode)
return True | 0.002902 |
def ProcessContent(self, strip_expansion=False):
"""Processes the file contents."""
self._ParseFile()
if strip_expansion:
# Without a collection the expansions become blank, removing them.
collection = None
else:
collection = MacroCollection()
for section in self._sections:
section.BindMacroCollection(collection)
result = ''
for section in self._sections:
result += section.text
self._processed_content = result | 0.012632 |
def target_outdated(target,deps):
"""Determine whether a target is out of date.
target_outdated(target,deps) -> 1/0
deps: list of filenames which MUST exist.
target: single filename which may or may not exist.
If target doesn't exist or is older than any file listed in deps, return
true, otherwise return false.
"""
try:
target_time = os.path.getmtime(target)
except os.error:
return 1
for dep in deps:
dep_time = os.path.getmtime(dep)
if dep_time > target_time:
#print "For target",target,"Dep failed:",dep # dbg
#print "times (dep,tar):",dep_time,target_time # dbg
return 1
return 0 | 0.005714 |
def external_links(self) -> List['ExternalLink']:
"""Return a list of found external link objects.
Note:
Templates adjacent to external links are considered part of the
link. In reality, this depends on the contents of the template:
>>> WikiText(
... 'http://example.com{{dead link}}'
...).external_links[0].url
'http://example.com{{dead link}}'
>>> WikiText(
... '[http://example.com{{space template}} text]'
...).external_links[0].url
'http://example.com{{space template}}'
"""
external_links = [] # type: List['ExternalLink']
external_links_append = external_links.append
type_to_spans = self._type_to_spans
lststr = self._lststr
ss, se = self._span
spans = type_to_spans.setdefault('ExternalLink', [])
if not spans:
# All the added spans will be new.
spans_append = spans.append
for m in EXTERNAL_LINK_FINDITER(self._ext_link_shadow):
s, e = m.span()
span = [ss + s, ss + e]
spans_append(span)
external_links_append(
ExternalLink(lststr, type_to_spans, span, 'ExternalLink'))
return external_links
# There are already some ExternalLink spans. Use the already existing
# ones when the detected span is one of those.
span_tuple_to_span_get = {(s[0], s[1]): s for s in spans}.get
for m in EXTERNAL_LINK_FINDITER(self._ext_link_shadow):
s, e = m.span()
span = s, e = [s + ss, e + ss]
old_span = span_tuple_to_span_get((s, e))
if old_span is None:
insort(spans, span)
else:
span = old_span
external_links_append(
ExternalLink(lststr, type_to_spans, span, 'ExternalLink'))
return external_links | 0.001003 |
def account_get(self, key):
"""
Get account number for the **public key**
:param key: Public key to get account for
:type key: str
:raises: :py:exc:`nano.rpc.RPCException`
>>> rpc.account_get(
... key="3068BB1CA04525BB0E416C485FE6A67FD52540227D267CC8B6E8DA958A7FA039"
... )
"xrb_1e5aqegc1jb7qe964u4adzmcezyo6o146zb8hm6dft8tkp79za3sxwjym5rx"
"""
key = self._process_value(key, 'publickey')
payload = {"key": key}
resp = self.call('account_get', payload)
return resp['account'] | 0.005017 |
def reset_small(self, eq):
"""Reset numbers smaller than 1e-12 in f and g equations"""
assert eq in ('f', 'g')
for idx, var in enumerate(self.__dict__[eq]):
if abs(var) <= 1e-12:
self.__dict__[eq][idx] = 0 | 0.007782 |
def _batch_norm_op(self, input_batch, mean, variance, use_batch_stats,
stat_dtype):
"""Creates a batch normalization op.
It uses the tf.nn.batch_normalization op by default and the
tf.nn.fused_batch_norm op to support fused batch normalization.
Args:
input_batch: A input Tensor of arbitrary dimension.
mean: A mean tensor, of the same dtype as `input_batch`.
variance: A variance tensor, of the same dtype as `input_batch`.
use_batch_stats: A bool value that indicates whether the operation should
use the batch statistics.
stat_dtype: TensorFlow datatype used for the moving mean and variance.
Returns:
A batch normalization operation.
The current mean tensor, of datatype `stat_dtype`.
The current variance tensor, of datatype `stat_dtype`.
"""
if self._fused:
# For the non-training case where not using batch stats,
# pass in the moving statistic variables directly.
# These will already be in the correct dtype, even for float16 input.
batch_norm_op, mean, variance = self._fused_batch_norm_op(
input_batch,
self._moving_mean, self._moving_variance, use_batch_stats)
else:
batch_norm_op = tf.nn.batch_normalization(
input_batch,
mean,
variance,
self._beta,
self._gamma,
self._eps,
name="batch_norm")
# We'll echo the supplied mean and variance so that they can also be used
# to update the moving statistics. Cast to matching type if necessary.
if input_batch.dtype.base_dtype != stat_dtype:
mean = tf.cast(mean, stat_dtype)
variance = tf.cast(variance, stat_dtype)
return batch_norm_op, mean, variance | 0.005618 |
def create_panel_of_normals(items, group_id, work_dir):
"""Create a panel of normals from one or more background read counts.
"""
out_file = os.path.join(work_dir, "%s-%s-pon.hdf5" % (dd.get_sample_name(items[0]), group_id))
if not utils.file_exists(out_file):
with file_transaction(items[0], out_file) as tx_out_file:
params = ["-T", "CreateReadCountPanelOfNormals",
"-O", tx_out_file,
"--annotated-intervals", tz.get_in(["regions", "bins", "gcannotated"], items[0])]
for data in items:
params += ["-I", tz.get_in(["depth", "bins", "target"], data)]
_run_with_memory_scaling(params, tx_out_file, items[0], ld_preload=True)
return out_file | 0.005242 |
def packages(ciprcfg, env, opts, console):
"""
List installed packages for this project
"""
for name, source in ciprcfg.packages.items():
console.normal('- %s' % name)
if opts.long_details:
console.normal(' - directory: %s' % path.join(env.package_dir, name))
console.normal(' - source: %s' % source) | 0.005571 |
async def read_loop(self):
"""
Infinite loop that reads messages off of the socket while not closed.
When a message is received its corresponding pending Future is set
to have the message as its result.
This is never used directly and is fired as a separate callback on the
I/O loop via the `connect()` method.
"""
while not self.closing:
try:
xid, zxid, response = await self.read_response()
except (ConnectionAbortedError, asyncio.CancelledError):
return
except Exception as e:
log.exception("Error reading response.")
self.abort()
return
payload_log.debug("[RECV] (xid: %s) %s", xid, response)
if xid == protocol.WATCH_XID:
self.watch_handler(response)
continue
elif xid in protocol.SPECIAL_XIDS:
f = self.pending_specials[xid].pop()
else:
f = self.pending.pop(xid)
if isinstance(response, Exception):
f.set_exception(response)
elif not f.cancelled():
f.set_result((zxid, response)) | 0.001616 |
def getdict(self, crop=True):
"""Get final dictionary. If ``crop`` is ``True``, apply
:func:`.cnvrep.bcrop` to returned array.
"""
D = self.Y
if crop:
D = cr.bcrop(D, self.cri.dsz, self.cri.dimN)
return D | 0.007547 |
def ssh(lancet, print_cmd, environment):
"""
SSH into the given environment, based on the dploi configuration.
"""
namespace = {}
with open(lancet.config.get('dploi', 'deployment_spec')) as fh:
code = compile(fh.read(), 'deployment.py', 'exec')
exec(code, {}, namespace)
config = namespace['settings'][environment]
host = '{}@{}'.format(config['user'], config['hosts'][0])
cmd = ['ssh', '-p', str(config.get('port', 22)), host]
if print_cmd:
click.echo(' '.join(quote(s) for s in cmd))
else:
lancet.defer_to_shell(*cmd) | 0.001684 |
def find_matching(root_path,
relative_paths_to_search,
file_pattern):
"""
Given an absolute `root_path`, a list of relative paths to that
absolute root path (`relative_paths_to_search`), and a `file_pattern`
like '*.sql', returns information about the files. For example:
> find_matching('/root/path', 'models', '*.sql')
[ { 'absolute_path': '/root/path/models/model_one.sql',
'relative_path': 'models/model_one.sql',
'searched_path': 'models' },
{ 'absolute_path': '/root/path/models/subdirectory/model_two.sql',
'relative_path': 'models/subdirectory/model_two.sql',
'searched_path': 'models' } ]
"""
matching = []
root_path = os.path.normpath(root_path)
for relative_path_to_search in relative_paths_to_search:
absolute_path_to_search = os.path.join(
root_path, relative_path_to_search)
walk_results = os.walk(absolute_path_to_search)
for current_path, subdirectories, local_files in walk_results:
for local_file in local_files:
absolute_path = os.path.join(current_path, local_file)
relative_path = os.path.relpath(
absolute_path, absolute_path_to_search)
if fnmatch.fnmatch(local_file, file_pattern):
matching.append({
'searched_path': relative_path_to_search,
'absolute_path': absolute_path,
'relative_path': relative_path,
})
return matching | 0.000621 |
def init_db(uri, base, **kwargs):
"""Create engine and tables
:param uri: db uri
:param base: declarative base
:returns: an engine
"""
engine = create_engine(uri, **kwargs)
base.metadata.create_all(engine)
return engine | 0.003984 |
def reverse_action(self, url_name, *args, **kwargs):
"""
Extended DRF with fallback to requested namespace if request.version is missing
"""
if self.request and not self.request.version:
return reverse(self.get_url_name(url_name), *args, **kwargs)
return super().reverse_action(url_name, *args, **kwargs) | 0.008403 |
def create_proxy_model(name, model_mixins, base_model, attrs=None, module=None):
"""
Create a Django Proxy Model on the fly, to be used by any Cascade Plugin.
"""
from django.apps import apps
class Meta:
proxy = True
app_label = 'cmsplugin_cascade'
name = str(name + 'Model')
try:
Model = apps.get_registered_model(Meta.app_label, name)
except LookupError:
bases = model_mixins + (base_model,)
attrs = dict(attrs or {}, Meta=Meta, __module__=module)
Model = type(name, bases, attrs)
fake_proxy_models[name] = bases
return Model | 0.003221 |
def distance_to_point(self, point):
"""
Computes the absolute distance from the plane to the point
:param point: Point for which distance is computed
:return: Distance between the plane and the point
"""
return np.abs(np.dot(self.normal_vector, point) + self.d) | 0.006472 |
def columns_used(self):
"""
Returns all the columns used in this model for filtering
and in the model expression.
"""
return list(tz.unique(tz.concatv(
util.columns_in_filters(self.fit_filters),
util.columns_in_filters(self.predict_filters),
util.columns_in_formula(self.model_expression)))) | 0.005435 |
def _to_DOM(self):
"""
Dumps object data to a fully traversable DOM representation of the
object.
:returns: a ``xml.etree.Element`` object
"""
root_node = ET.Element("observation")
reception_time_node = ET.SubElement(root_node, "reception_time")
reception_time_node.text = str(self._reception_time)
root_node.append(self._location._to_DOM())
root_node.append(self._weather._to_DOM())
return root_node | 0.004082 |
def execute_function(function_request):
"""
Given a request created by
`beanstalk_dispatch.common.create_request_body`, executes the
request. This function is to be run on a beanstalk worker.
"""
dispatch_table = getattr(settings, 'BEANSTALK_DISPATCH_TABLE', None)
if dispatch_table is None:
raise BeanstalkDispatchError('No beanstalk dispatch table configured')
for key in (FUNCTION, ARGS, KWARGS):
if key not in function_request.keys():
raise BeanstalkDispatchError(
'Please provide a {} argument'.format(key))
function_path = dispatch_table.get(
function_request[FUNCTION], ''
)
if function_path:
runnable = locate(function_path)
if not runnable:
raise BeanstalkDispatchError(
'Unable to locate function: {}'.format(function_path))
args = function_request[ARGS]
kwargs = function_request[KWARGS]
if inspect.isclass(runnable):
if issubclass(runnable, SafeTask):
task = runnable()
else:
raise BeanstalkDispatchError(
'Requested task is not a SafeTask subclass: {}'.format(
function_request[FUNCTION]))
else:
task = SafeTask()
task.run = runnable
task.process(*args, **kwargs)
else:
raise BeanstalkDispatchError(
'Requested function not found: {}'.format(
function_request[FUNCTION])) | 0.000652 |
def strip_vht(self, idx):
"""strip(12 byte) radiotap.vht
:idx: int
:return: int
idx
:return: collections.namedtuple
"""
vht = collections.namedtuple(
'vht', ['known_bits', 'have_stbc', 'have_txop_ps', 'have_gi',
'have_sgi_nsym_da', 'have_ldpc_extra', 'have_beamformed',
'have_bw', 'have_gid', 'have_paid', 'stbc', 'txop_ps', 'gi',
'sgi_nysm_da', 'ldpc_extra', 'group_id', 'partial_id',
'beamformed', 'user_0', 'user_1', 'user_2', 'user_3'])
user = collections.namedtuple('user', ['nss', 'mcs', 'coding'])
idx = Radiotap.align(idx, 2)
known, flags, bw = struct.unpack_from('<HBB', self._rtap, idx)
mcs_nss_0, mcs_nss_1, mcs_nss_2, mcs_nss_3 = struct.unpack_from('<BBBB', self._rtap, idx + 4)
coding, group_id, partial_id = struct.unpack_from('<BBH', self._rtap, idx + 8)
known_bits = format(known, '032b')[::-1]
vht.known_bits = known_bits
vht.have_stbc = int(known_bits[0]) # Space Time Block Coding
vht.have_txop_ps = int(known_bits[1]) # TXOP_PS_NOT_ALLOWD
vht.have_gi = int(known_bits[2]) # Short/Long Guard Interval
vht.have_sgi_nsym_da = int(known_bits[3]) # Short Guard Interval Nsym Disambiguation
vht.have_ldpc_extra = int(known_bits[4]) # LDPC(Low Density Parity Check)
vht.have_beamformed = int(known_bits[5]) # Beamformed
vht.have_bw = int(known_bits[6]) # Bandwidth
vht.have_gid = int(known_bits[7]) # Group ID
vht.have_paid = int(known_bits[8]) # Partial AID
flag_bits = format(flags, '032b')[::-1]
vht.flag_bits = flag_bits
vht.stbc = int(flag_bits[0])
vht.txop_ps = int(flag_bits[1])
vht.gi = int(flag_bits[2])
vht.sgi_nysm_da = int(flag_bits[3])
vht.ldpc_extra = int(flag_bits[4])
vht.beamformed = int(flag_bits[5])
vht.group_id = group_id
vht.partial_id = partial_id
vht.bw = bw
vht.user_0 = user(None, None, None)
vht.user_1 = user(None, None, None)
vht.user_2 = user(None, None, None)
vht.user_3 = user(None, None, None)
for (i, mcs_nss) in enumerate([mcs_nss_0, mcs_nss_1, mcs_nss_2, mcs_nss_3]):
if mcs_nss:
nss = mcs_nss & 0xf0 >> 4
mcs = (mcs_nss & 0xf0) >> 4
coding = (coding & 2**i) >> i
if i == 0:
vht.user_0 = user(nss, mcs, coding)
elif i == 1:
vht.user_1 = user(nss, mcs, coding)
elif i == 2:
vht.user_2 = user(nss, mcs, coding)
elif i == 3:
vht.user_3 = user(nss, mcs, coding)
return idx + 12, vht | 0.002752 |
def _prm_select_shared_pandas_data(self, pd_node, full_name, **kwargs):
"""Reads a DataFrame from dis.
:param pd_node:
hdf5 node storing the pandas DataFrame
:param full_name:
Full name of the parameter or result whose data is to be loaded
:param kwargs:
Arguments passed to pandas' select method
"""
try:
pathname = pd_node._v_pathname
pandas_store = self._hdf5store
return pandas_store.select(pathname, **kwargs)
except:
self._logger.error('Failed loading `%s` of `%s`.' % (pd_node._v_name, full_name))
raise | 0.005979 |
def canonical_request(self, method, path, content, timestamp):
"""Return the canonical request string."""
request = collections.OrderedDict([
('Method', method.upper()),
('Hashed Path', path),
('X-Ops-Content-Hash', content),
('X-Ops-Timestamp', timestamp),
('X-Ops-UserId', self.user_id),
])
return '\n'.join(['%s:%s' % (key, value)
for key, value in request.items()]) | 0.004107 |
def _format_heatmap(self, filename, heatmap, execution_count):
"""Formats heatmap for UI."""
with open(filename) as src_file:
file_source = src_file.read().split('\n')
skip_map = self._calc_skips(heatmap, len(file_source))
run_time = sum(time for time in heatmap.values())
return {
'name': filename,
'heatmap': heatmap,
'executionCount': execution_count,
'srcCode': self._skip_lines(file_source, skip_map),
'runTime': run_time
} | 0.003623 |
def mongo_getattr(rec, key):
"""
Get value from dict using MongoDB dot-separated path semantics.
For example:
>>> assert mongo_getattr({'a': {'b': 1}, 'x': 2}, 'a.b') == 1
>>> assert mongo_getattr({'a': {'b': 1}, 'x': 2}, 'x') == 2
>>> assert mongo_getattr({'a': {'b': 1}, 'x': 2}, 'a.b.c') is None
:param rec: mongodb document
:param key: path to mongo value
:param default: default to return if not found
:return: value, potentially nested, or default if not found
:raise: AttributeError, if record is not a dict or key is not found.
"""
if not isinstance(rec, collections.Mapping):
raise AttributeError('input record must act like a dict')
if not rec:
raise AttributeError('Empty dict')
if not '.' in key:
return rec.get(key)
for key_part in key.split('.'):
if not isinstance(rec, collections.Mapping):
raise AttributeError('not a mapping for rec_part %s' % key_part)
if not key_part in rec:
raise AttributeError('key %s not in dict %s' % key)
rec = rec[key_part]
return rec | 0.002674 |
def check_file_version(notebook, source_path, outputs_path):
"""Raise if file version in source file would override outputs"""
if not insert_or_test_version_number():
return
_, ext = os.path.splitext(source_path)
if ext.endswith('.ipynb'):
return
version = notebook.metadata.get('jupytext', {}).get('text_representation', {}).get('format_version')
format_name = format_name_for_ext(notebook.metadata, ext)
fmt = get_format_implementation(ext, format_name)
current = fmt.current_version_number
# Missing version, still generated by jupytext?
if notebook.metadata and not version:
version = current
# Same version? OK
if version == fmt.current_version_number:
return
# Version larger than minimum readable version
if (fmt.min_readable_version_number or current) <= version <= current:
return
raise JupytextFormatError("File {} is in format/version={}/{} (current version is {}). "
"It would not be safe to override the source of {} with that file. "
"Please remove one or the other file."
.format(os.path.basename(source_path),
format_name, version, current,
os.path.basename(outputs_path))) | 0.002939 |
def createSomeItems(store, itemType, values, counter):
"""
Create some instances of a particular type in a store.
"""
for i in counter:
itemType(store=store, **values) | 0.005236 |
def render(self, request, template, context):
"""
Returns a response. By default, this will contain the rendered PDF, but
if both ``allow_force_html`` is ``True`` and the querystring
``html=true`` was set it will return a plain HTML.
"""
if self.allow_force_html and self.request.GET.get('html', False):
html = get_template(template).render(context)
return HttpResponse(html)
else:
response = HttpResponse(content_type='application/pdf')
if self.prompt_download:
response['Content-Disposition'] = 'attachment; filename="{}"' \
.format(self.get_download_name())
helpers.render_pdf(
template=template,
file_=response,
url_fetcher=self.url_fetcher,
context=context,
)
return response | 0.002174 |
def _ttm_me_compute(self, V, edims, sdims, transp):
"""
Assume Y = T x_i V_i for i = 1...n can fit into memory
"""
shapeY = np.copy(self.shape)
# Determine size of Y
for n in np.union1d(edims, sdims):
shapeY[n] = V[n].shape[1] if transp else V[n].shape[0]
# Allocate Y (final result) and v (vectors for elementwise computations)
Y = zeros(shapeY)
shapeY = array(shapeY)
v = [None for _ in range(len(edims))]
for i in range(np.prod(shapeY[edims])):
rsubs = unravel_index(shapeY[edims], i) | 0.004983 |
def van_dec_2d(x, skip_connections, output_shape, first_depth, hparams=None):
"""The VAN decoder.
Args:
x: The analogy information to decode.
skip_connections: The encoder layers which can be used as skip connections.
output_shape: The shape of the desired output image.
first_depth: The depth of the first layer of the van image encoder.
hparams: The python hparams.
Returns:
The decoded image prediction.
"""
with tf.variable_scope('van_dec'):
dec = tf.layers.conv2d_transpose(
x, first_depth * 4, 3, padding='same', activation=tf.nn.relu, strides=2)
dec = tf.nn.dropout(dec, hparams.van_keep_prob)
dec = tf.contrib.layers.layer_norm(dec)
dec = tf.layers.conv2d_transpose(
dec,
first_depth * 4,
3,
padding='same',
activation=tf.nn.relu,
strides=1)
dec = tf.nn.dropout(dec, hparams.van_keep_prob)
dec = tf.layers.conv2d_transpose(
dec,
first_depth * 2,
3,
padding='same',
activation=tf.nn.relu,
strides=1)
dec = tf.nn.dropout(dec, hparams.van_keep_prob)
dec = tf.contrib.layers.layer_norm(dec)
dec = tf.layers.conv2d_transpose(
dec,
first_depth * 2,
3,
padding='same',
activation=tf.nn.relu,
strides=2)
dec = tf.nn.dropout(dec, hparams.van_keep_prob)
dec = tf.layers.conv2d_transpose(
dec, first_depth, 3, padding='same', activation=tf.nn.relu, strides=1)
dec = tf.nn.dropout(dec, hparams.van_keep_prob)
dec = tf.contrib.layers.layer_norm(dec)
dec = tf.layers.conv2d_transpose(
dec,
output_shape[3] + 1,
3,
padding='same',
activation=tf.nn.relu,
strides=2)
dec = tf.nn.dropout(dec, hparams.van_keep_prob)
out_mask = tf.layers.conv2d_transpose(
dec, output_shape[3] + 1, 3, strides=1, padding='same', activation=None)
mask = tf.nn.sigmoid(out_mask[:, :, :, 3:4])
out = out_mask[:, :, :, :3]
return out * mask + skip_connections[0] * (1 - mask) | 0.002408 |
def returner(load):
'''
Return data to the local job cache
'''
serial = salt.payload.Serial(__opts__)
# if a minion is returning a standalone job, get a jobid
if load['jid'] == 'req':
load['jid'] = prep_jid(nocache=load.get('nocache', False))
jid_dir = salt.utils.jid.jid_dir(load['jid'], _job_dir(), __opts__['hash_type'])
if os.path.exists(os.path.join(jid_dir, 'nocache')):
return
hn_dir = os.path.join(jid_dir, load['id'])
try:
os.makedirs(hn_dir)
except OSError as err:
if err.errno == errno.EEXIST:
# Minion has already returned this jid and it should be dropped
log.error(
'An extra return was detected from minion %s, please verify '
'the minion, this could be a replay attack', load['id']
)
return False
elif err.errno == errno.ENOENT:
log.error(
'An inconsistency occurred, a job was received with a job id '
'(%s) that is not present in the local cache', load['jid']
)
return False
raise
serial.dump(
dict((key, load[key]) for key in ['return', 'retcode', 'success'] if key in load),
# Use atomic open here to avoid the file being read before it's
# completely written to. Refs #1935
salt.utils.atomicfile.atomic_open(
os.path.join(hn_dir, RETURN_P), 'w+b'
)
)
if 'out' in load:
serial.dump(
load['out'],
# Use atomic open here to avoid the file being read before
# it's completely written to. Refs #1935
salt.utils.atomicfile.atomic_open(
os.path.join(hn_dir, OUT_P), 'w+b'
)
) | 0.001675 |
def move(self, fromp, to):
''' Usage: move/mv/rename/ren <from> <to> - \
move a file / dir remotely at Baidu Yun
from - source path (file / dir)
to - destination path (file / dir)
'''
frompp = get_pcs_path(fromp)
top = get_pcs_path(to)
pars = {
'method' : 'move',
'from' : frompp,
'to' : top }
self.pd("Remote moving: '{}' =mm=> '{}'".format(fromp, to))
return self.__post(pcsurl + 'file', pars, self.__move_act) | 0.040909 |
def rgb2term(r: int, g: int, b: int) -> str:
""" Convert an rgb value to a terminal code. """
return hex2term_map[rgb2termhex(r, g, b)] | 0.006993 |
def show(self, wait = False):
"""Show the window."""
self.tk.deiconify()
self._visible = True
self._modal = wait
if self._modal:
self.tk.grab_set() | 0.020101 |
def clear_transaction(self):
"""
Clear the currently active transaction (if exists). If the
transaction stack is not empty, then a new pipeline will
be initialized.
:returns: No return value.
:raises: ``ValueError`` if no transaction is active.
"""
with self._transaction_lock:
local = self._transaction_local
if not local.pipes:
raise ValueError('No transaction is currently active.')
local.abort() | 0.003876 |
def reset_field_value(self, name):
"""
Resets value of a field
"""
name = self.get_real_name(name)
if name and self._can_write_field(name):
if name in self.__modified_data__:
del self.__modified_data__[name]
if name in self.__deleted_fields__:
self.__deleted_fields__.remove(name)
try:
self.__original_data__[name].clear_modified_data()
except (KeyError, AttributeError):
pass | 0.003766 |
def bootstrap(self, config):
""" Initialize a new node from scratch and start it. """
pg_hba = config.get('pg_hba', [])
method = config.get('method') or 'initdb'
self._running_custom_bootstrap = method != 'initdb' and method in config and 'command' in config[method]
if self._running_custom_bootstrap:
do_initialize = self._custom_bootstrap
config = config[method]
else:
do_initialize = self._initdb
return do_initialize(config) and self.append_pg_hba(pg_hba) and self.save_configuration_files() \
and self._configure_server_parameters() and self.start() | 0.006079 |
def user_warning(channel, user, warnings, max_warnings):
"""
Creates an embed UI containing an user warning message
Args:
channel (discord.Channel): The Discord channel to bind the embed to
user (discord.User): The user to warn
warnings (str): The warnings for the user
max_warnings (str): The maximum warnings for the user
Returns:
ui (ui_embed.UI): The embed UI object
"""
username = user.name
if isinstance(user, discord.Member):
if user.nick is not None:
username = user.nick
warning_count_text = "warnings" if warnings != 1 else "warning"
warning_text = "{} {}".format(warnings, warning_count_text)
result_text = "at {} you will be banned".format(max_warnings)
if warnings >= max_warnings:
result_text = "you are being banned because you have more than the maximum warnings"
# Create embed UI object
gui = ui_embed.UI(
channel,
"Warning {}".format(username),
"You now have {} {}, {}".format(warning_text, username, result_text),
modulename=modulename
)
return gui | 0.001764 |
def copy_fs(
src_fs, # type: Union[FS, Text]
dst_fs, # type: Union[FS, Text]
walker=None, # type: Optional[Walker]
on_copy=None, # type: Optional[_OnCopy]
workers=0, # type: int
):
# type: (...) -> None
"""Copy the contents of one filesystem to another.
Arguments:
src_fs (FS or str): Source filesystem (URL or instance).
dst_fs (FS or str): Destination filesystem (URL or instance).
walker (~fs.walk.Walker, optional): A walker object that will be
used to scan for files in ``src_fs``. Set this if you only want
to consider a sub-set of the resources in ``src_fs``.
on_copy (callable): A function callback called after a single file copy
is executed. Expected signature is ``(src_fs, src_path, dst_fs,
dst_path)``.
workers (int): Use `worker` threads to copy data, or ``0`` (default) for
a single-threaded copy.
"""
return copy_dir(
src_fs, "/", dst_fs, "/", walker=walker, on_copy=on_copy, workers=workers
) | 0.002809 |
def _gcs_get_keys(bucket, pattern):
""" Get names of all Google Cloud Storage keys in a specified bucket that match a pattern. """
return [obj for obj in list(bucket.objects()) if fnmatch.fnmatch(obj.metadata.name, pattern)] | 0.02193 |
def numbers(self):
"""
Access the numbers
:returns: twilio.rest.pricing.v1.voice.number.NumberList
:rtype: twilio.rest.pricing.v1.voice.number.NumberList
"""
if self._numbers is None:
self._numbers = NumberList(self._version, )
return self._numbers | 0.006309 |
def _tobytes(self):
"""Serializes the write buffer into a single string (bytes).
Returns:
a string (bytes) object.
"""
if not self._has_view:
# fast path optimization
if len(self._deque) == 0:
return b""
elif len(self._deque) == 1:
# no copy
return self._deque[0]
else:
return b"".join(self._deque)
else:
tmp = [x.tobytes() if isinstance(x, memoryview) else x
for x in self._deque]
return b"".join(tmp) | 0.003279 |
def util_pattern_space(time_series, lag, dim):
"""Create a set of sequences with given lag and dimension
Args:
time_series: Vector or string of the sample data
lag: Lag between beginning of sequences
dim: Dimension (number of patterns)
Returns:
2D array of vectors
"""
n = len(time_series)
if lag * dim > n:
raise Exception('Result matrix exceeded size limit, try to change lag or dim.')
elif lag < 1:
raise Exception('Lag should be greater or equal to 1.')
pattern_space = np.empty((n - lag * (dim - 1), dim))
for i in range(n - lag * (dim - 1)):
for j in range(dim):
pattern_space[i][j] = time_series[i + j * lag]
return pattern_space | 0.002681 |
def _STM(cpu, insn_id, base, regs):
"""
STM (Store Multiple) stores a non-empty subset (or possibly all) of the general-purpose registers to
sequential memory locations.
:param int insn_id: should be one of ARM_INS_STM, ARM_INS_STMIB, ARM_INS_STMDA, ARM_INS_STMDB
:param Armv7Operand base: Specifies the base register.
:param list[Armv7Operand] regs:
Is a list of registers. It specifies the set of registers to be stored by the STM instruction.
The registers are stored in sequence, the lowest-numbered register to the lowest
memory address (start_address), through to the highest-numbered register to the
highest memory address (end_address).
"""
if cpu.instruction.usermode:
raise NotImplementedError("Use of the S bit is not supported")
increment = insn_id in (cs.arm.ARM_INS_STM, cs.arm.ARM_INS_STMIB)
after = insn_id in (cs.arm.ARM_INS_STM, cs.arm.ARM_INS_STMDA)
address = base.read()
for reg in regs:
if not after:
address += (1 if increment else -1) * (reg.size // 8)
cpu.write_int(address, reg.read(), reg.size)
if after:
address += (1 if increment else -1) * (reg.size // 8)
if cpu.instruction.writeback:
base.writeback(address) | 0.00504 |
def parse_perfmetric(metric):
"""Return (sympy expressions, event names and symbols dict) from performance metric str."""
# Find all perfs counter references
perfcounters = re.findall(r'[A-Z0-9_]+:[A-Z0-9\[\]|\-]+(?::[A-Za-z0-9\-_=]+)*', metric)
# Build a temporary metric, with parser-friendly Symbol names
temp_metric = metric
temp_pc_names = {"SYM{}".format(re.sub("[\[\]\-|=:]", "_", pc)): pc
for i, pc in enumerate(perfcounters)}
for var_name, pc in temp_pc_names.items():
temp_metric = temp_metric.replace(pc, var_name)
# Parse temporary expression
expr = parse_expr(temp_metric)
# Rename symbols to originals
for s in expr.free_symbols:
if s.name in temp_pc_names:
s.name = temp_pc_names[str(s)]
events = {s: MachineModel.parse_perfctr_event(s.name) for s in expr.free_symbols
if s.name in perfcounters}
return expr, events | 0.007835 |
def detect_sense(curve, tol):
""" Detects the sense, i.e. clockwise or counter-clockwise, of the curve.
:param curve: 2-dimensional trim curve
:type curve: abstract.Curve
:param tol: tolerance value
:type tol: float
:return: True if detection is successful, False otherwise
:rtype: bool
"""
if curve.opt_get('reversed') is None:
# Detect sense since it is unset
pts = curve.evalpts
num_pts = len(pts)
for idx in range(1, num_pts - 1):
sense = detect_ccw(pts[idx - 1], pts[idx], pts[idx + 1], tol)
if sense < 0: # cw
curve.opt = ['reversed', 0]
return True
elif sense > 0: # ccw
curve.opt = ['reversed', 1]
return True
else:
continue
# One final test with random points to determine the orientation
sense = detect_ccw(pts[int(num_pts/3)], pts[int(2*num_pts/3)], pts[-int(num_pts/3)], tol)
if sense < 0: # cw
curve.opt = ['reversed', 0]
return True
elif sense > 0: # ccw
curve.opt = ['reversed', 1]
return True
else:
# Cannot determine the sense
return False
else:
# Don't touch the sense value as it has been already set
return True | 0.001466 |
def _ModifiedDecoder(wire_type, decode_value, modify_value):
"""Like SimpleDecoder but additionally invokes modify_value on every value
before storing it. Usually modify_value is ZigZagDecode.
"""
# Reusing _SimpleDecoder is slightly slower than copying a bunch of code, but
# not enough to make a significant difference.
def InnerDecode(buffer, pos):
(result, new_pos) = decode_value(buffer, pos)
return (modify_value(result), new_pos)
return _SimpleDecoder(wire_type, InnerDecode) | 0.011834 |
def join2(ol,*sps):
'''
from elist.elist import *
ol = [1,2,3,4]
join2(ol,"-","+","*")
'''
rslt =""
length = ol.__len__()
for i in range(0,length-1):
rslt = rslt + str(ol[i]) + sps[i]
rslt = rslt + str(ol[length - 1])
return(rslt) | 0.013793 |
def from_Composition(composition):
"""Return the LilyPond equivalent of a Composition in a string."""
# warning Throw exception
if not hasattr(composition, 'tracks'):
return False
result = '\\header { title = "%s" composer = "%s" opus = "%s" } '\
% (composition.title, composition.author, composition.subtitle)
for track in composition.tracks:
result += from_Track(track) + ' '
return result[:-1] | 0.004494 |
def xmlrpc_reschedule(self):
"""
Reschedule all running tasks.
"""
if not len(self.scheduled_tasks) == 0:
self.reschedule = list(self.scheduled_tasks.items())
self.scheduled_tasks = {}
return True | 0.011494 |
def DFS_Tree(G):
"""
Return an oriented tree constructed from dfs.
"""
if not G.vertices:
raise GraphInsertError("This graph have no vertices.")
pred = {}
T = digraph.DiGraph()
vertex_data = DFS(G)
for vertex in vertex_data:
pred[vertex] = vertex_data[vertex][0]
queue = Queue()
for vertex in pred:
if pred[vertex] == None:
queue.put(vertex)
while queue.qsize() > 0:
current = queue.get()
for element in pred:
if pred[element] == current:
T.add_edge(current, element)
queue.put(element)
return T | 0.00303 |
def diff_aff(self):
"""Symmetric diffusion affinity matrix
Return or calculate the symmetric diffusion affinity matrix
.. math:: A(x,y) = K(x,y) (d(x) d(y))^{-1/2}
where :math:`d` is the degrees (row sums of the kernel.)
Returns
-------
diff_aff : array-like, shape=[n_samples, n_samples]
symmetric diffusion affinity matrix defined as a
doubly-stochastic form of the kernel matrix
"""
row_degrees = np.array(self.kernel.sum(axis=1)).reshape(-1, 1)
col_degrees = np.array(self.kernel.sum(axis=0)).reshape(1, -1)
if sparse.issparse(self.kernel):
return self.kernel.multiply(1 / np.sqrt(row_degrees)).multiply(
1 / np.sqrt(col_degrees))
else:
return (self.kernel / np.sqrt(row_degrees)) / np.sqrt(col_degrees) | 0.002294 |
def send(self, targetPath, parent, diff, showProgress=True, allowDryRun=True):
""" Return context manager for stream to send a (incremental) snapshot. """
if parent is not None:
cmd = ["btrfs", "send", "-p", parent, targetPath]
else:
cmd = ["btrfs", "send", targetPath]
if Store.skipDryRun(logger, self.dryrun and allowDryRun)("Command: %s", cmd):
return None
process = subprocess.Popen(
cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, stdin=DEVNULL)
_makeNice(process)
return _Reader(process, process.stdout, targetPath, diff, showProgress) | 0.006126 |
def _set_attribute(self, obj, attr, value):
"""Set an attribute of an object to a specific value.
Return True if the attribute was changed and False otherwise.
"""
field = obj._meta.get_field(attr)
if field.max_length is not None and len(value) > field.max_length:
cleaned_value = value[:field.max_length]
logger.warn('The attribute "%s" was trimmed from "%s" to "%s"',
attr, value, cleaned_value)
else:
cleaned_value = value
old_value = getattr(obj, attr)
if cleaned_value != old_value:
setattr(obj, attr, cleaned_value)
return True
return False | 0.002837 |
def items(self):
"""
Implements the dict.items() method
"""
self.sync()
for k, v in self.db.items():
try:
yield self.key_conv['from'](k), v
except KeyError:
yield k, v | 0.007605 |
def ask(self, question, default=False):
""" Ask a y/n question to the user.
"""
choices = '[%s/%s]' % ('Y' if default else 'y', 'n' if default else 'N')
while True:
response = raw_input('%s %s' % (question, choices)).strip()
if not response:
return default
elif response in 'yYoO':
return True
elif response in 'nN':
return False | 0.006565 |
def store(self, report, test_id):
'''
:param report: the report to store
:param test_id: the id of the test reported
:return: report id
'''
report_d = report.to_dict()
content = self._serialize_dict(report_d)
report_id = self.insert(
['test_id', 'content', 'status', 'reason'],
[test_id, content, report.get_status(), report.get('reason')],
)
return report_id | 0.00432 |
def train(self, X_feat, X_seq, y, id_vec=None, n_folds=10, use_stored_folds=None, n_cores=1,
train_global_model=False):
"""Train the Concise model in cross-validation.
Args:
X_feat: See :py:func:`concise.Concise.train`
X_seq: See :py:func:`concise.Concise.train`
y: See :py:func:`concise.Concise.train`
id_vec: List of character id's used to differentiate the trainig samples. Returned by :py:func:`concise.prepare_data`.
n_folds (int): Number of CV-folds to use.
use_stored_folds (chr or None): File path to a .json file containing the fold information (as returned by :py:func:`concise.ConciseCV.get_folds`). If None, the folds are generated.
n_cores (int): Number of CPU cores used for training. If available, GPU is used for training and this argument is ignored.
train_global_model (bool): In addition to training the model in cross-validation, should the global model be fitted (using all the samples from :code:`(X_feat, X_seq, y)`).
"""
# TODO: input check - dimensions
self._use_stored_folds = use_stored_folds
self._n_folds = n_folds
self._n_rows = X_feat.shape[0]
# TODO: - fix the get_cv_accuracy
# save:
# - each model
# - each model's performance
# - each model's predictions
# - globally:
# - mean perfomance
# - sd performance
# - predictions
self._kf = self._get_folds(self._n_rows, self._n_folds, self._use_stored_folds)
cv_obj = {}
if id_vec is None:
id_vec = np.arange(1, self._n_rows + 1)
best_val_acc_epoch_l = []
for fold, train, test in self._kf:
X_feat_train = X_feat[train]
X_seq_train = X_seq[train]
y_train = y[train]
X_feat_test = X_feat[test]
X_seq_test = X_seq[test]
y_test = y[test]
id_vec_test = id_vec[test]
print(fold, "/", n_folds)
# copy the object
dc = copy.deepcopy(self._concise_model)
dc.train(X_feat_train, X_seq_train, y_train,
X_feat_test, X_seq_test, y_test,
n_cores=n_cores
)
dc._test(X_feat_test, X_seq_test, y_test, id_vec_test)
cv_obj[fold] = dc
best_val_acc_epoch_l.append(dc.get_accuracy()["best_val_acc_epoch"])
self._cv_model = cv_obj
# additionaly train the global model
if train_global_model:
dc = copy.deepcopy(self._concise_model)
# overwrite n_epochs with the best average number of best epochs
dc._param["n_epochs"] = int(np.array(best_val_acc_epoch_l).mean())
print("tranining global model with n_epochs = " + str(dc._param["n_epochs"]))
dc.train(X_feat, X_seq, y,
n_cores=n_cores
)
dc._test(X_feat, X_seq, y, id_vec)
self._concise_global_model = dc | 0.003914 |
def OnInsertTabs(self, event):
"""Insert one table into grid"""
with undo.group(_("Insert table")):
self.grid.actions.insert_tabs(self.grid.current_table - 1, 1)
self.grid.GetTable().ResetView()
self.grid.actions.zoom()
event.Skip() | 0.006969 |
def bundle_apps(self, bundle_name, bundle_apps):
"""Bundle multiple Job or Playbook Apps (.tcx files) into a single zip file.
Args:
bundle_name (str): The output name of the bundle zip file.
bundle_apps (list): A list of Apps to include in the bundle.
"""
bundle_file = os.path.join(
self.app_path, self.args.outdir, '{}-bundle.zip'.format(bundle_name)
)
z = zipfile.ZipFile(bundle_file, 'w')
for app in bundle_apps:
# update package data
self.package_data['bundle'].append(
{'action': 'Adding App:', 'output': os.path.basename(app)}
)
z.write(app, os.path.basename(app))
# update package data
self.package_data['bundle'].append(
{'action': 'Created Bundle:', 'output': os.path.basename(bundle_file)}
)
z.close() | 0.00547 |
def itertable(table):
"""Auxiliary function for iterating over a data table."""
for item in table:
res = {
k.lower(): nfd(v) if isinstance(v, text_type) else v for k, v in item.items()}
for extra in res.pop('extra', []):
k, _, v = extra.partition(':')
res[k.strip()] = v.strip()
yield res | 0.005618 |
def turningpoint(self):
"""Turning point (index and value tuple) in the recession part of the
MA approximation of the instantaneous unit hydrograph."""
coefs = self.coefs
old_dc = coefs[1]-coefs[0]
for idx in range(self.order-2):
new_dc = coefs[idx+2]-coefs[idx+1]
if (old_dc < 0.) and (new_dc > old_dc):
return idx, coefs[idx]
old_dc = new_dc
raise RuntimeError(
'Not able to detect a turning point in the impulse response '
'defined by the MA coefficients %s.'
% objecttools.repr_values(coefs)) | 0.003165 |
def configure(self, args):
"""Configure the set of plugins with the given args.
After configuration, disabled plugins are removed from the plugins list.
"""
for plug in self._plugins:
plug_name = self.plugin_name(plug)
plug.enabled = getattr(args, "plugin_%s" % plug_name, False)
if plug.enabled and getattr(plug, "configure", None):
if callable(getattr(plug, "configure", None)):
plug.configure(args)
LOG.debug("Available plugins: %s", self._plugins)
self.plugins = [plugin for plugin in self._plugins if getattr(plugin, "enabled", False)]
LOG.debug("Enabled plugins: %s", self.plugins) | 0.005587 |
def num_employers(self, num_employers):
'''Sets the number of employer bees; at least two are required
Args:
num_employers (int): number of employer bees
'''
if num_employers < 2:
self._logger.log(
'warn',
'Two employers are needed: setting to two'
)
num_employers = 2
self._num_employers = num_employers
self._logger.log('debug', 'Number of employers set to {}'.format(
num_employers
))
self._limit = num_employers * len(self._value_ranges)
self._logger.log('debug', 'Limit set to {}'.format(self._limit)) | 0.002976 |
def create_payload(self):
"""Reset ``errata_id`` from DB ID to ``errata_id``."""
payload = super(ContentViewFilterRule, self).create_payload()
if 'errata_id' in payload:
if not hasattr(self.errata, 'errata_id'):
self.errata = self.errata.read()
payload['errata_id'] = self.errata.errata_id
return payload | 0.005319 |
def get_serialized_info(self):
"""See base class for details."""
# Flatten tf-example features dict
# Use NonMutableDict to ensure there is no collision between features keys
features_dict = utils.NonMutableDict()
for feature_key, feature in self._feature_dict.items():
serialized_info = feature.get_serialized_info()
# Features can be either containers (dict of other features) or plain
# features (ex: single tensor). Plain features have a None
# feature.features_keys
if not feature.serialized_keys:
features_dict[feature_key] = serialized_info
else:
# Sanity check which should always be True, as feature.serialized_keys
# is computed using feature.get_serialized_info()
_assert_keys_match(serialized_info.keys(), feature.serialized_keys)
features_dict.update({
posixpath.join(feature_key, k): v
for k, v in serialized_info.items()
})
return features_dict | 0.007049 |
def uninstall_logger(logger=None, module=None):
"""
Uninstalls given logger in given module or default logger in caller introspected module.
:param logger: Logger to uninstall.
:type logger: Logger
:param module: Module.
:type module: ModuleType
:return: Definition success.
:rtype: bool
"""
logger = logging.getLogger(Constants.logger) if logger is None else logger
if module is None:
# Note: inspect.getmodule() can return the wrong module if it has been imported with different relatives paths.
module = sys.modules.get(inspect.currentframe().f_back.f_globals["__name__"])
hasattr(module, "LOGGER") and delattr(module, "LOGGER")
return True | 0.00561 |
def add_tip(self, tip_length) -> None:
"""
Add a tip to the pipette for position tracking and validation
(effectively updates the pipette's critical point)
:param tip_length: a positive, non-zero float representing the distance
in Z from the end of the pipette nozzle to the end of the tip
:return:
"""
assert tip_length > 0.0, "tip_length must be greater than 0"
assert not self.has_tip
self._has_tip = True
self._current_tip_length = tip_length | 0.003711 |
def _onShortcutMoveLine(self, down):
"""Move line up or down
Actually, not a selected text, but next or previous block is moved
TODO keep bookmarks when moving
"""
startBlock, endBlock = self._selectedBlocks()
startBlockNumber = startBlock.blockNumber()
endBlockNumber = endBlock.blockNumber()
def _moveBlock(block, newNumber):
text = block.text()
with self:
del self.lines[block.blockNumber()]
self.lines.insert(newNumber, text)
if down: # move next block up
blockToMove = endBlock.next()
if not blockToMove.isValid():
return
# if operaiton is UnDone, marks are located incorrectly
markMargin = self.getMargin("mark_area")
if markMargin:
markMargin.clearBookmarks(startBlock, endBlock.next())
_moveBlock(blockToMove, startBlockNumber)
self._selectLines(startBlockNumber + 1, endBlockNumber + 1)
else: # move previous block down
blockToMove = startBlock.previous()
if not blockToMove.isValid():
return
# if operaiton is UnDone, marks are located incorrectly
markMargin = self.getMargin("mark_area")
if markMargin:
markMargin.clearBookmarks(startBlock, endBlock)
_moveBlock(blockToMove, endBlockNumber)
self._selectLines(startBlockNumber - 1, endBlockNumber - 1)
if markMargin:
markMargin.update() | 0.001253 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.