text
stringlengths 78
104k
| score
float64 0
0.18
|
---|---|
def _AlignDecodedDataOffset(self, decoded_data_offset):
"""Aligns the encoded file with the decoded data offset.
Args:
decoded_data_offset (int): decoded data offset.
"""
self._file_object.seek(0, os.SEEK_SET)
self._decoder = self._GetDecoder()
self._decoded_data = b''
encoded_data_offset = 0
encoded_data_size = self._file_object.get_size()
while encoded_data_offset < encoded_data_size:
read_count = self._ReadEncodedData(self._ENCODED_DATA_BUFFER_SIZE)
if read_count == 0:
break
encoded_data_offset += read_count
if decoded_data_offset < self._decoded_data_size:
self._decoded_data_offset = decoded_data_offset
break
decoded_data_offset -= self._decoded_data_size | 0.007792 |
def _base_request(self, method):
"""Factory method for generating the base XML requests."""
request = E.Element(method)
request.set('xmlns', 'AnetApi/xml/v1/schema/AnetApiSchema.xsd')
request.append(self.client_auth)
return request | 0.00738 |
def _setDeviceID(self, value, device, message):
"""
Set the hardware device number. This is only needed if more that one
device is on the same serial buss.
:Parameters:
value : `int`
The device ID to set in the range of 0 - 127.
device : `int`
The device is the integer number of the hardware devices ID and
is only used with the Pololu Protocol.
message : `bool`
If set to `True` a text message will be returned, if set to `False`
the integer stored in the Qik will be returned.
:Returns:
A text message or an int. See the `message` parameter above. If
`value` and `device` are the same `OK` or `0` will be returned
depending on the value of `message`.
"""
if value != device:
result = self._setConfig(self.DEVICE_ID, value, device, message)
self._deviceConfig[value] = self._deviceConfig.pop(device)
elif message:
result = self._CONFIG_RETURN.get(0)
elif not message:
result = 0
return result | 0.001747 |
def django_logging_dict(log_dir, handlers=['file'], filename='debug.log'):
"""Extends :func:`logthing.utils.default_logging_dict` with django
specific values.
"""
d = default_logging_dict(log_dir, handlers, filename)
d['handlers'].update({
'mail_admins':{
'level':'ERROR',
'class':'django.utils.log.AdminEmailHandler',
}
})
d['loggers'].update({
'django.db.backends': { # stop SQL debug from going to main logger
'handlers': ['file', 'mail_admins'],
'level': 'ERROR',
'propagate': False,
},
'django.request': {
'handlers': ['file', 'mail_admins'],
'level': 'ERROR',
'propagate': False,
},
})
return d | 0.00641 |
def save_attribute(elements, module_path):
""" Recursively save attributes with module name and signature. """
for elem, signature in elements.items():
if isinstance(signature, dict): # Submodule case
save_attribute(signature, module_path + (elem,))
elif signature.isattribute():
assert elem not in attributes # we need unicity
attributes[elem] = (module_path, signature,)
elif isinstance(signature, Class):
save_attribute(signature.fields, module_path + (elem,)) | 0.001835 |
def filter_by(lookup_dict,
grain='os_family',
merge=None,
default='default',
base=None):
'''
.. versionadded:: 0.17.0
Look up the given grain in a given dictionary for the current OS and return
the result
Although this may occasionally be useful at the CLI, the primary intent of
this function is for use in Jinja to make short work of creating lookup
tables for OS-specific data. For example:
.. code-block:: jinja
{% set apache = salt['grains.filter_by']({
'Debian': {'pkg': 'apache2', 'srv': 'apache2'},
'RedHat': {'pkg': 'httpd', 'srv': 'httpd'},
}), default='Debian' %}
myapache:
pkg.installed:
- name: {{ apache.pkg }}
service.running:
- name: {{ apache.srv }}
Values in the lookup table may be overridden by values in Pillar. An
example Pillar to override values in the example above could be as follows:
.. code-block:: yaml
apache:
lookup:
pkg: apache_13
srv: apache
The call to ``filter_by()`` would be modified as follows to reference those
Pillar values:
.. code-block:: jinja
{% set apache = salt['grains.filter_by']({
...
}, merge=salt['pillar.get']('apache:lookup')) %}
:param lookup_dict: A dictionary, keyed by a grain, containing a value or
values relevant to systems matching that grain. For example, a key
could be the grain for an OS and the value could the name of a package
on that particular OS.
:param grain: The name of a grain to match with the current system's
grains. For example, the value of the "os_family" grain for the current
system could be used to pull values from the ``lookup_dict``
dictionary.
:param merge: A dictionary to merge with the ``lookup_dict`` before doing
the lookup. This allows Pillar to override the values in the
``lookup_dict``. This could be useful, for example, to override the
values for non-standard package names such as when using a different
Python version from the default Python version provided by the OS
(e.g., ``python26-mysql`` instead of ``python-mysql``).
:param default: default lookup_dict's key used if the grain does not exists
or if the grain value has no match on lookup_dict.
.. versionadded:: 2014.1.0
:param base: A lookup_dict key to use for a base dictionary. The
grain-selected ``lookup_dict`` is merged over this and then finally
the ``merge`` dictionary is merged. This allows common values for
each case to be collected in the base and overridden by the grain
selection dictionary and the merge dictionary. Default is None.
.. versionadded:: 2015.8.11,2016.3.2
CLI Example:
.. code-block:: bash
salt '*' grains.filter_by '{Debian: Debheads rule, RedHat: I love my hat}'
# this one will render {D: {E: I, G: H}, J: K}
salt '*' grains.filter_by '{A: B, C: {D: {E: F,G: H}}}' 'xxx' '{D: {E: I},J: K}' 'C'
'''
ret = lookup_dict.get(
__grains__.get(
grain, default),
lookup_dict.get(
default, None)
)
if base and base in lookup_dict:
base_values = lookup_dict[base]
if ret is None:
ret = base_values
elif isinstance(base_values, collections.Mapping):
if not isinstance(ret, collections.Mapping):
raise SaltException('filter_by default and look-up values must both be dictionaries.')
ret = salt.utils.dictupdate.update(copy.deepcopy(base_values), ret)
if merge:
if not isinstance(merge, collections.Mapping):
raise SaltException('filter_by merge argument must be a dictionary.')
else:
if ret is None:
ret = merge
else:
salt.utils.dictupdate.update(ret, merge)
return ret | 0.001221 |
def _instantiate(cls, params):
"""
Helper to instantiate Attention classes from parameters. Warns in log if parameter is not supported
by class constructor.
:param cls: Attention class.
:param params: configuration parameters.
:return: instance of `cls` type.
"""
sig_params = inspect.signature(cls.__init__).parameters
valid_params = dict()
for key, value in params.items():
if key in sig_params:
valid_params[key] = value
else:
logger.debug('Type %s does not support parameter \'%s\'' % (cls.__name__, key))
return cls(**valid_params) | 0.004831 |
def unhide_selected():
'''Unhide the selected objects'''
hidden_state = current_representation().hidden_state
selection_state = current_representation().selection_state
res = {}
# Take the hidden state and flip the selected atoms bits.
for k in selection_state:
visible = hidden_state[k].invert()
visible_and_selected = visible.add(selection_state[k]) # Add some atoms to be visible
res[k] = visible_and_selected.invert()
current_representation().hide(res) | 0.009634 |
def check_video(video, languages=None, age=None, undefined=False):
"""Perform some checks on the `video`.
All the checks are optional. Return `False` if any of this check fails:
* `languages` already exist in `video`'s :attr:`~subliminal.video.Video.subtitle_languages`.
* `video` is older than `age`.
* `video` has an `undefined` language in :attr:`~subliminal.video.Video.subtitle_languages`.
:param video: video to check.
:type video: :class:`~subliminal.video.Video`
:param languages: desired languages.
:type languages: set of :class:`~babelfish.language.Language`
:param datetime.timedelta age: maximum age of the video.
:param bool undefined: fail on existing undefined language.
:return: `True` if the video passes the checks, `False` otherwise.
:rtype: bool
"""
# language test
if languages and not (languages - video.subtitle_languages):
logger.debug('All languages %r exist', languages)
return False
# age test
if age and video.age > age:
logger.debug('Video is older than %r', age)
return False
# undefined test
if undefined and Language('und') in video.subtitle_languages:
logger.debug('Undefined language found')
return False
return True | 0.002304 |
def _do_api_call(self, endpoint_info, json):
"""
Utility function to perform an API call with retries
:param endpoint_info: Tuple of method and endpoint
:type endpoint_info: tuple[string, string]
:param json: Parameters for this API call.
:type json: dict
:return: If the api call returns a OK status code,
this function returns the response in JSON. Otherwise,
we throw an AirflowException.
:rtype: dict
"""
method, endpoint = endpoint_info
url = 'https://{host}/{endpoint}'.format(
host=self._parse_host(self.databricks_conn.host),
endpoint=endpoint)
if 'token' in self.databricks_conn.extra_dejson:
self.log.info('Using token auth.')
auth = _TokenAuth(self.databricks_conn.extra_dejson['token'])
else:
self.log.info('Using basic auth.')
auth = (self.databricks_conn.login, self.databricks_conn.password)
if method == 'GET':
request_func = requests.get
elif method == 'POST':
request_func = requests.post
else:
raise AirflowException('Unexpected HTTP Method: ' + method)
attempt_num = 1
while True:
try:
response = request_func(
url,
json=json,
auth=auth,
headers=USER_AGENT_HEADER,
timeout=self.timeout_seconds)
response.raise_for_status()
return response.json()
except requests_exceptions.RequestException as e:
if not _retryable_error(e):
# In this case, the user probably made a mistake.
# Don't retry.
raise AirflowException('Response: {0}, Status Code: {1}'.format(
e.response.content, e.response.status_code))
self._log_request_error(attempt_num, e)
if attempt_num == self.retry_limit:
raise AirflowException(('API requests to Databricks failed {} times. ' +
'Giving up.').format(self.retry_limit))
attempt_num += 1
sleep(self.retry_delay) | 0.001732 |
def processGif(searchStr):
'''
This function returns the url of the gif searched for
with the given search parameters using the Giphy API.
Thanks!
Fails gracefully when it can't find a gif by returning an
appropriate image url with the failure message on it.
'''
# Sanitizing searchStr
# TODO: Find a better way to do this
searchStr.replace('| ', ' ')
searchStr.replace('|', ' ')
searchStr.replace(', ', ' ')
searchStr.replace(',', ' ')
searchStr.rstrip()
searchStr = searchStr.strip('./?\'!,')
searchStr = searchStr.replace(' ', '+')
if searchStr is None or searchStr == '':
print("No search parameters specified!")
return no_search_params
api_url = 'http://api.giphy.com/v1/gifs/search'
api_key = 'dc6zaTOxFJmzC'
payload = {
'q': searchStr,
'limit': 1,
'api_key': api_key,
}
r = requests.get(api_url, params=payload)
parsed_json = json.loads(r.text)
# print(parsed_json)
if len(parsed_json['data']) == 0:
print("Couldn't find suitable match for gif! :(")
return -1
else: # Success!
imgURL = parsed_json['data'][0]['images']['fixed_height']['url']
# print(imgURL)
return imgURL | 0.002327 |
def umi_below_threshold(umi_quals, quality_encoding, quality_filter_threshold):
''' return true if any of the umi quals is below the threshold'''
below_threshold = get_below_threshold(
umi_quals, quality_encoding, quality_filter_threshold)
return any(below_threshold) | 0.006944 |
def import_agent(self,
parent,
agent_uri=None,
agent_content=None,
retry=google.api_core.gapic_v1.method.DEFAULT,
timeout=google.api_core.gapic_v1.method.DEFAULT,
metadata=None):
"""
Imports the specified agent from a ZIP file.
Uploads new intents and entity types without deleting the existing ones.
Intents and entity types with the same name are replaced with the new
versions from ImportAgentRequest.
Operation <response: ``google.protobuf.Empty``,
metadata: [google.protobuf.Struct][google.protobuf.Struct]>
Example:
>>> import dialogflow_v2
>>>
>>> client = dialogflow_v2.AgentsClient()
>>>
>>> parent = client.project_path('[PROJECT]')
>>>
>>> response = client.import_agent(parent)
>>>
>>> def callback(operation_future):
... # Handle result.
... result = operation_future.result()
>>>
>>> response.add_done_callback(callback)
>>>
>>> # Handle metadata.
>>> metadata = response.metadata()
Args:
parent (str): Required. The project that the agent to import is associated with.
Format: ``projects/<Project ID>``.
agent_uri (str): The URI to a Google Cloud Storage file containing the agent to import.
Note: The URI must start with \"gs://\".
agent_content (bytes): The agent to import.
Example for how to import an agent via the command line:
curl \
'https://dialogflow.googleapis.com/v2/projects/<project_name>/agent:import\
-X POST \
-H 'Authorization: Bearer '$(gcloud auth print-access-token) \
-H 'Accept: application/json' \
-H 'Content-Type: application/json' \
--compressed \
--data-binary \"{
::
'agentContent': '$(cat <agent zip file> | base64 -w 0)'
}\"
retry (Optional[google.api_core.retry.Retry]): A retry object used
to retry requests. If ``None`` is specified, requests will not
be retried.
timeout (Optional[float]): The amount of time, in seconds, to wait
for the request to complete. Note that if ``retry`` is
specified, the timeout applies to each individual attempt.
metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata
that is provided to the method.
Returns:
A :class:`~google.cloud.dialogflow_v2.types._OperationFuture` instance.
Raises:
google.api_core.exceptions.GoogleAPICallError: If the request
failed for any reason.
google.api_core.exceptions.RetryError: If the request failed due
to a retryable error and retry attempts failed.
ValueError: If the parameters are invalid.
"""
# Wrap the transport method to add retry and timeout logic.
if 'import_agent' not in self._inner_api_calls:
self._inner_api_calls[
'import_agent'] = google.api_core.gapic_v1.method.wrap_method(
self.transport.import_agent,
default_retry=self._method_configs['ImportAgent'].retry,
default_timeout=self._method_configs['ImportAgent']
.timeout,
client_info=self._client_info,
)
# Sanity check: We have some fields which are mutually exclusive;
# raise ValueError if more than one is sent.
google.api_core.protobuf_helpers.check_oneof(
agent_uri=agent_uri,
agent_content=agent_content,
)
request = agent_pb2.ImportAgentRequest(
parent=parent,
agent_uri=agent_uri,
agent_content=agent_content,
)
operation = self._inner_api_calls['import_agent'](
request, retry=retry, timeout=timeout, metadata=metadata)
return google.api_core.operation.from_gapic(
operation,
self.transport._operations_client,
empty_pb2.Empty,
metadata_type=struct_pb2.Struct,
) | 0.002858 |
def delete(self, path=None, method='DELETE', **options):
""" Equals :meth:`route` with a ``DELETE`` method parameter. """
return self.route(path, method, **options) | 0.011111 |
def strip_accents(x):
u"""Strip accents in the input phrase X.
Strip accents in the input phrase X (assumed in UTF-8) by replacing
accented characters with their unaccented cousins (e.g. é by e).
:param x: the input phrase to strip.
:type x: string
:return: Return such a stripped X.
"""
x = re_latex_lowercase_a.sub("a", x)
x = re_latex_lowercase_ae.sub("ae", x)
x = re_latex_lowercase_oe.sub("oe", x)
x = re_latex_lowercase_e.sub("e", x)
x = re_latex_lowercase_i.sub("i", x)
x = re_latex_lowercase_o.sub("o", x)
x = re_latex_lowercase_u.sub("u", x)
x = re_latex_lowercase_y.sub("x", x)
x = re_latex_lowercase_c.sub("c", x)
x = re_latex_lowercase_n.sub("n", x)
x = re_latex_uppercase_a.sub("A", x)
x = re_latex_uppercase_ae.sub("AE", x)
x = re_latex_uppercase_oe.sub("OE", x)
x = re_latex_uppercase_e.sub("E", x)
x = re_latex_uppercase_i.sub("I", x)
x = re_latex_uppercase_o.sub("O", x)
x = re_latex_uppercase_u.sub("U", x)
x = re_latex_uppercase_y.sub("Y", x)
x = re_latex_uppercase_c.sub("C", x)
x = re_latex_uppercase_n.sub("N", x)
# convert input into Unicode string:
try:
y = unicode(x, "utf-8")
except Exception:
return x # something went wrong, probably the input wasn't UTF-8
# asciify Latin-1 lowercase characters:
y = re_unicode_lowercase_a.sub("a", y)
y = re_unicode_lowercase_ae.sub("ae", y)
y = re_unicode_lowercase_oe.sub("oe", y)
y = re_unicode_lowercase_e.sub("e", y)
y = re_unicode_lowercase_i.sub("i", y)
y = re_unicode_lowercase_o.sub("o", y)
y = re_unicode_lowercase_u.sub("u", y)
y = re_unicode_lowercase_y.sub("y", y)
y = re_unicode_lowercase_c.sub("c", y)
y = re_unicode_lowercase_n.sub("n", y)
y = re_unicode_lowercase_ss.sub("ss", y)
# asciify Latin-1 uppercase characters:
y = re_unicode_uppercase_a.sub("A", y)
y = re_unicode_uppercase_ae.sub("AE", y)
y = re_unicode_uppercase_oe.sub("OE", y)
y = re_unicode_uppercase_e.sub("E", y)
y = re_unicode_uppercase_i.sub("I", y)
y = re_unicode_uppercase_o.sub("O", y)
y = re_unicode_uppercase_u.sub("U", y)
y = re_unicode_uppercase_y.sub("Y", y)
y = re_unicode_uppercase_c.sub("C", y)
y = re_unicode_uppercase_n.sub("N", y)
# return UTF-8 representation of the Unicode string:
return y.encode("utf-8") | 0.000415 |
def spawn(self, url, force_spawn=False):
"""use the url for creation of domain and fetch cookies
- init cache dir by the url domain as ``<base>/domain``
- save the cookies to file ``<base>/domain/cookie.txt``
- init ``headers.get/post/json`` with response info
- init ``site_dir/site_raw/site_media``
:param url:
:type url:
:param force_spawn:
:type force_spawn:
:return:
:rtype:
"""
_url, domain = self.get_domain_home_from_url(url)
if not _url:
return False
self.cache['site_dir'] = os.path.join(self.cache['base'], self.domain)
for k in ['raw', 'media']:
self.cache['site_' + k] = os.path.join(self.cache['site_dir'], k)
helper.mkdir_p(self.cache['site_' + k], True)
ck_pth = os.path.join(self.cache['site_dir'], 'cookie.txt')
helper.mkdir_p(ck_pth)
name = os.path.join(self.cache['site_raw'], 'homepage')
# not force spawn and file ok
if not force_spawn and helper.is_file_ok(name):
# zlog.debug('{} exist!'.format(name))
self.sess.cookies = self.load_cookies(ck_pth)
return True
else:
zlog.debug('{} not exist!'.format(name))
res = self.sess.get(url, headers=self.__header__)
if res.status_code != 200:
return False
if res:
helper.write_file(res.content, name)
# self.load(url)
for k, v in self.headers.items():
self.headers[k] = res.request.headers
self.dump_cookies(cookies=self.sess.cookies, save_to=ck_pth)
return True | 0.001185 |
def get_variables_substitution_dictionaries(self, lhs_graph, rhs_graph):
"""
Looks for sub-isomorphisms of rhs into lhs
:param lhs_graph: The graph to look sub-isomorphisms into (the bigger graph)
:param rhs_graph: The smaller graph
:return: The list of matching names
"""
if not rhs_graph:
return {}, {}, {}
self.matching_code_container.add_graph_to_namespace(lhs_graph)
self.matching_code_container.add_graph_to_namespace(rhs_graph)
return self.__collect_variables_that_match_graph(lhs_graph, rhs_graph) | 0.005017 |
def _compute_sorted_indices(self):
"""
The smoothers need sorted data. This sorts it from the perspective of each column.
if self._x[0][3] is the 9th-smallest value in self._x[0], then _xi_sorted[3] = 8
We only have to sort the data once.
"""
sorted_indices = []
for to_sort in [self.y] + self.x:
data_w_indices = [(val, i) for (i, val) in enumerate(to_sort)]
data_w_indices.sort()
sorted_indices.append([i for val, i in data_w_indices])
# save in meaningful variable names
self._yi_sorted = sorted_indices[0] # list (like self.y)
self._xi_sorted = sorted_indices[1:] | 0.005822 |
def ComputeFortranSuffixes(suffixes, ppsuffixes):
"""suffixes are fortran source files, and ppsuffixes the ones to be
pre-processed. Both should be sequences, not strings."""
assert len(suffixes) > 0
s = suffixes[0]
sup = s.upper()
upper_suffixes = [_.upper() for _ in suffixes]
if SCons.Util.case_sensitive_suffixes(s, sup):
ppsuffixes.extend(upper_suffixes)
else:
suffixes.extend(upper_suffixes) | 0.002247 |
def didLastExecutedUpgradeSucceeded(self) -> bool:
"""
Checks last record in upgrade log to find out whether it
is about scheduling upgrade. If so - checks whether current version
is equals to the one in that record
:returns: upgrade execution result
"""
lastEventInfo = self.lastActionEventInfo
if lastEventInfo:
ev_data = lastEventInfo.data
currentPkgVersion = NodeControlUtil.curr_pkg_info(ev_data.pkg_name)[0]
if currentPkgVersion:
return currentPkgVersion.upstream == ev_data.version
else:
logger.warning(
"{} failed to get information about package {} "
"scheduled for last upgrade"
.format(self, ev_data.pkg_name)
)
return False | 0.003472 |
def _get_qvm_qc(name: str, qvm_type: str, device: AbstractDevice, noise_model: NoiseModel = None,
requires_executable: bool = False,
connection: ForestConnection = None) -> QuantumComputer:
"""Construct a QuantumComputer backed by a QVM.
This is a minimal wrapper over the QuantumComputer, QVM, and QVMCompiler constructors.
:param name: A string identifying this particular quantum computer.
:param qvm_type: The type of QVM. Either qvm or pyqvm.
:param device: A device following the AbstractDevice interface.
:param noise_model: An optional noise model
:param requires_executable: Whether this QVM will refuse to run a :py:class:`Program` and
only accept the result of :py:func:`compiler.native_quil_to_executable`. Setting this
to True better emulates the behavior of a QPU.
:param connection: An optional :py:class:`ForestConnection` object. If not specified,
the default values for URL endpoints will be used.
:return: A QuantumComputer backed by a QVM with the above options.
"""
if connection is None:
connection = ForestConnection()
return QuantumComputer(name=name,
qam=_get_qvm_or_pyqvm(
qvm_type=qvm_type,
connection=connection,
noise_model=noise_model,
device=device,
requires_executable=requires_executable),
device=device,
compiler=QVMCompiler(
device=device,
endpoint=connection.compiler_endpoint)) | 0.003474 |
def require_setting(self, name, feature='this feature'):
"""Raises an exception if the given app setting is not defined.
As a generalization, this method should called from a Consumer's
:py:meth:`~rejected.consumer.Consumer.initialize` method. If a required
setting is not found, this method will cause the
consumer to shutdown prior to receiving any messages from RabbitMQ.
:param name: The parameter name
:type name: :class:`str`
:param feature: A friendly name for the setting feature
:type feature: :class:`str`
:raises: :exc:`~rejected.errors.ConfigurationException`
"""
if name not in self.settings:
raise ConfigurationException(
"You must define the '{}' setting in your "
"application to use {}".format(name, feature)) | 0.002301 |
def interconnect_link_topologies(self):
"""
Gets the InterconnectLinkTopologies API client.
Returns:
InterconnectLinkTopologies:
"""
if not self.__interconnect_link_topologies:
self.__interconnect_link_topologies = InterconnectLinkTopologies(self.__connection)
return self.__interconnect_link_topologies | 0.007979 |
def set_hostname(hostname=None):
'''
Sets the hostname on the server.
.. versionadded:: 2019.2.0
Args:
hostname(str): The new hostname to set.
CLI Example:
.. code-block:: bash
salt '*' cimc.set_hostname foobar
'''
if not hostname:
raise salt.exceptions.CommandExecutionError("Hostname option must be provided.")
dn = "sys/rack-unit-1/mgmt/if-1"
inconfig = """<mgmtIf dn="sys/rack-unit-1/mgmt/if-1" hostname="{0}" ></mgmtIf>""".format(hostname)
ret = __proxy__['cimc.set_config_modify'](dn, inconfig, False)
try:
if ret['outConfig']['mgmtIf'][0]['status'] == 'modified':
return True
else:
return False
except Exception as err:
return False | 0.003886 |
def url_join(base, *args):
"""
Helper function to join an arbitrary number of url segments together.
"""
scheme, netloc, path, query, fragment = urlsplit(base)
path = path if len(path) else "/"
path = posixpath.join(path, *[('%s' % x) for x in args])
return urlunsplit([scheme, netloc, path, query, fragment]) | 0.002967 |
def query(self, query):
"""Q.query(query string) -> category string -- return the matched
category for any user query
"""
self.query = query
self.process_query()
matching_corpus_index = self.match_query_to_corpus()
return self.category_list[matching_corpus_index].strip() | 0.034965 |
def round_f1(y_true, y_predicted):
"""
Calculates F1 (binary) measure.
Args:
y_true: list of true values
y_predicted: list of predicted values
Returns:
F1 score
"""
try:
predictions = [np.round(x) for x in y_predicted]
except TypeError:
predictions = y_predicted
return f1_score(y_true, predictions) | 0.002674 |
def _evaluate(self,R,z,phi=0.,t=0.):
"""
NAME:
_evaluate
PURPOSE:
evaluate the potential at R,z
INPUT:
R - Galactocentric cylindrical radius
z - vertical height
phi - azimuth
t - time
OUTPUT:
Phi(R,z)
HISTORY:
2013-06-28 - Started - Bovy (IAS)
"""
r= nu.sqrt(R**2.+z**2.)
return 2.*nu.pi*self.rc**(3.-self.alpha)/r*(r/self.rc*special.gamma(1.-self.alpha/2.)*special.gammainc(1.-self.alpha/2.,(r/self.rc)**2.)-special.gamma(1.5-self.alpha/2.)*special.gammainc(1.5-self.alpha/2.,(r/self.rc)**2.)) | 0.015291 |
def get_root_graph(self,root):
"""Return back a graph containing just the root and children"""
children = self.get_children(root)
g = Graph()
nodes = [root]+children
for node in nodes: g.add_node(node)
node_ids = [x.id for x in nodes]
edges = [x for x in self._edges.values() if x.node1.id in node_ids and x.node2.id in node_ids]
for e in edges: g.add_edge(e)
return g | 0.035545 |
def add_key_filters(self, key_filters):
"""
Adds key filters to the inputs.
:param key_filters: a list of filters
:type key_filters: list
:rtype: :class:`RiakMapReduce`
"""
if self._input_mode == 'query':
raise ValueError('Key filters are not supported in a query.')
self._key_filters.extend(key_filters)
return self | 0.004975 |
def delete_refresh_token(self, refresh_token):
"""
Deletes a refresh token after use
:param refresh_token: The refresh token to delete.
"""
access_token = self.fetch_by_refresh_token(refresh_token)
self.mc.delete(self._generate_cache_key(access_token.token))
self.mc.delete(self._generate_cache_key(refresh_token)) | 0.005405 |
async def execute_filter(filter_: FilterObj, args):
"""
Helper for executing filter
:param filter_:
:param args:
:return:
"""
if filter_.is_async:
return await filter_.filter(*args, **filter_.kwargs)
else:
return filter_.filter(*args, **filter_.kwargs) | 0.003322 |
def get_password(entry=None, username=None, prompt=None, always_ask=False):
"""
Prompt the user for a password on stdin.
:param username: The username to get the password for. Default is the current user.
:param entry: The entry in the keychain. This is a caller specific key.
:param prompt: The entry in the keychain. This is a caller specific key.
:param always_ask: Force the user to enter the password every time.
"""
password = None
if username is None:
username = get_username()
has_keychain = initialize_keychain()
# Unlock the user's keychain otherwise, if running under SSH, 'security(1)' will thrown an error.
unlock_keychain(username)
if prompt is None:
prompt = "Enter %s's password: " % username
if has_keychain and entry is not None and always_ask is False:
password = get_password_from_keyring(entry, username)
if password is None:
password = getpass.getpass(prompt=prompt)
return password | 0.002944 |
def get_data_sharing_consent(username, enterprise_customer_uuid, course_id=None, program_uuid=None):
"""
Get the data sharing consent object associated with a certain user, enterprise customer, and other scope.
:param username: The user that grants consent
:param enterprise_customer_uuid: The consent requester
:param course_id (optional): A course ID to which consent may be related
:param program_uuid (optional): A program to which consent may be related
:return: The data sharing consent object, or None if the enterprise customer for the given UUID does not exist.
"""
EnterpriseCustomer = apps.get_model('enterprise', 'EnterpriseCustomer') # pylint: disable=invalid-name
try:
if course_id:
return get_course_data_sharing_consent(username, course_id, enterprise_customer_uuid)
return get_program_data_sharing_consent(username, program_uuid, enterprise_customer_uuid)
except EnterpriseCustomer.DoesNotExist:
return None | 0.006958 |
def ProcessListDirectory(self, responses):
"""Processes the results of the ListDirectory client action.
Args:
responses: a flow Responses object.
"""
if not responses.success:
raise flow.FlowError("Unable to list directory.")
with data_store.DB.GetMutationPool() as pool:
for response in responses:
stat_entry = rdf_client_fs.StatEntry(response)
filesystem.CreateAFF4Object(
stat_entry, self.client_urn, pool, token=self.token)
self.SendReply(stat_entry) | 0.00566 |
def get_ntp_stats(self):
"""Implementation of get_ntp_stats for IOS."""
ntp_stats = []
command = "show ntp associations"
output = self._send_command(command)
for line in output.splitlines():
# Skip first two lines and last line of command output
if line == "" or "address" in line or "sys.peer" in line:
continue
if "%NTP is not enabled" in line:
return []
elif len(line.split()) == 9:
address, ref_clock, st, when, poll, reach, delay, offset, disp = (
line.split()
)
address_regex = re.match(r"(\W*)([0-9.*]*)", address)
try:
ntp_stats.append(
{
"remote": py23_compat.text_type(address_regex.group(2)),
"synchronized": ("*" in address_regex.group(1)),
"referenceid": py23_compat.text_type(ref_clock),
"stratum": int(st),
"type": "-",
"when": py23_compat.text_type(when),
"hostpoll": int(poll),
"reachability": int(reach),
"delay": float(delay),
"offset": float(offset),
"jitter": float(disp),
}
)
except Exception:
continue
return ntp_stats | 0.002635 |
def rooms(self):
"""
:rtype: twilio.rest.video.v1.room.RoomList
"""
if self._rooms is None:
self._rooms = RoomList(self)
return self._rooms | 0.010471 |
def parse_file(self, sls_path):
'''
Given a typical Salt SLS path (e.g.: apache.vhosts.standard), find the
file on the file system and parse it
'''
config = self.state.document.settings.env.config
formulas_dirs = config.formulas_dirs
fpath = sls_path.replace('.', '/')
name_options = (
'{0}.sls'.format(fpath),
os.path.join(fpath, 'init.sls')
)
paths = [os.path.join(fdir, fname)
for fname in name_options
for fdir in formulas_dirs]
for i in paths:
try:
with open(i, 'rb') as f:
return f.readlines()
except IOError:
pass
raise IOError("Could not find sls file '{0}'".format(sls_path)) | 0.004878 |
def fix_config(self, options):
"""
Fixes the options, if necessary. I.e., it adds all required elements to the dictionary.
:param options: the options to fix
:type options: dict
:return: the (potentially) fixed options
:rtype: dict
"""
options = super(Predict, self).fix_config(options)
opt = "model"
if opt not in options:
options[opt] = "."
if opt not in self.help:
self.help[opt] = "The serialized model to use for making predictions (string)."
opt = "storage_name"
if opt not in options:
options[opt] = "unknown"
if opt not in self.help:
self.help[opt] = "The name of the model (or ModelContainer) in storage to use (string)."
return options | 0.00612 |
def _set_stream_parameters(self, **kwargs):
"""
Sets the stream parameters which are expected to be declared
constant.
"""
with util.disable_constant(self):
self.param.set_param(**kwargs) | 0.008368 |
def dateparser(self, dformat='%d/%m/%Y'):
"""
Returns a date parser for pandas
"""
def dateparse(dates):
return [pd.datetime.strptime(d, dformat) for d in dates]
return dateparse | 0.008621 |
def read_packets(self):
"""Read packets from the socket and parse them"""
while self.running:
packet_length = self.client.recv(2)
if len(packet_length) < 2:
self.stop()
continue
packet_length = struct.unpack("<h", packet_length)[0] - 2
data = self.client.recv(packet_length)
packno = data[0]
try:
parser = "Packet" + format(packno, 'x').upper() + "Parser"
packet_class = getattr(packets, parser)
packet_class().parse(self.world, self.player, data, self._evman)
except AttributeError as e:
pass
if packno == 2:
self.stop()
continue | 0.003881 |
def create_data_dir():
"""
Creates the DATA_DIR.
:return:
"""
from django_productline.context import PRODUCT_CONTEXT
if not os.path.exists(PRODUCT_CONTEXT.DATA_DIR):
os.mkdir(PRODUCT_CONTEXT.DATA_DIR)
print('*** Created DATA_DIR in %s' % PRODUCT_CONTEXT.DATA_DIR)
else:
print('...DATA_DIR already exists.') | 0.002793 |
def click(self, force_no_call=False, milis=None):
"""
Call when the button is pressed. This start the callback function in a thread
If :milis is given, will release the button after :milis miliseconds
"""
if self.clicked:
return False
if not force_no_call and self.flags & self.CALL_ON_PRESS:
if self.flags & self.THREADED_CALL:
start_new_thread(self.func, ())
else:
self.func()
super().click()
if milis is not None:
start_new_thread(self.release, (), {'milis': milis}) | 0.004862 |
def _tryMatch(self, textToMatchObject):
"""Try to find themselves in the text.
Returns (count, matchedRule) or (None, None) if doesn't match
"""
for rule in self.context.rules:
ruleTryMatchResult = rule.tryMatch(textToMatchObject)
if ruleTryMatchResult is not None:
_logger.debug('\tmatched rule %s at %d in included context %s/%s',
rule.shortId(),
textToMatchObject.currentColumnIndex,
self.context.parser.syntax.name,
self.context.name)
return ruleTryMatchResult
else:
return None | 0.004225 |
def _merge_files(windows, nb_cpu):
# type: (Iterable[pd.DataFrame], int) -> pd.DataFrame
"""Merge lists of chromosome bin df chromosome-wise.
windows is an OrderedDict where the keys are files, the values are lists of
dfs, one per chromosome.
Returns a list of dataframes, one per chromosome, with the collective count
per bin for all files.
TODO: is it faster to merge all in one command?
"""
# windows is a list of chromosome dfs per file
windows = iter(windows) # can iterate over because it is odict_values
merged = next(windows)
# if there is only one file, the merging is skipped since the windows is used up
for chromosome_dfs in windows:
# merge_same_files merges the chromosome files in parallel
merged = merge_same_files(merged, chromosome_dfs, nb_cpu)
return merged | 0.002339 |
def is_time_valid(self, timestamp):
"""Check if time is valid for this Timerange
If sec_from_morning is not provided, get the value.
:param timestamp: time to check
:type timestamp: int
:return: True if time is valid (in interval), False otherwise
:rtype: bool
"""
sec_from_morning = get_sec_from_morning(timestamp)
return (self.is_valid and
self.hstart * 3600 + self.mstart * 60 <=
sec_from_morning <=
self.hend * 3600 + self.mend * 60) | 0.003578 |
def extract_urls(url, data, unescape=HTMLParser.HTMLParser().unescape):
"""Extracts the URLs from an HTML document."""
parts = urlparse.urlparse(url)
prefix = '%s://%s' % (parts.scheme, parts.netloc)
accessed_dir = os.path.dirname(parts.path)
if not accessed_dir.endswith('/'):
accessed_dir += '/'
for pattern, replacement in REPLACEMENT_REGEXES:
fixed = replacement % {
'base': prefix,
'accessed_dir': accessed_dir,
}
data = re.sub(pattern, fixed, data)
result = set()
for match in re.finditer(MAYBE_HTML_URL_REGEX, data):
found_url = unescape(match.groupdict()['absurl'])
found_url = clean_url(
found_url,
force_scheme=parts[0]) # Use the main page's scheme
result.add(found_url)
return result | 0.00119 |
def grant_winsta_and_desktop(th):
'''
Grant the token's user access to the current process's window station and
desktop.
'''
current_sid = win32security.GetTokenInformation(th, win32security.TokenUser)[0]
# Add permissions for the sid to the current windows station and thread id.
# This prevents windows error 0xC0000142.
winsta = win32process.GetProcessWindowStation()
set_user_perm(winsta, WINSTA_ALL, current_sid)
desktop = win32service.GetThreadDesktop(win32api.GetCurrentThreadId())
set_user_perm(desktop, DESKTOP_ALL, current_sid) | 0.003442 |
def setup_handler(context):
"""Generic setup handler
"""
if context.readDataFile('senaite.lims.txt') is None:
return
logger.info("SENAITE setup handler [BEGIN]")
portal = context.getSite() # noqa
# Custom setup handlers
setup_html_filter(portal)
logger.info("SENAITE setup handler [DONE]") | 0.002994 |
def log(self, facility, level, text, pid=False):
"""Send the message text to all registered hosts.
The facility and level will be used to create the packet's PRI
part. The HEADER will be automatically determined from the
current time and hostname. The MSG will be set from the
running program's name and the text parameter.
This is the simplest way to use reSyslog.Syslog, creating log
messages containing the current time, hostname, program name,
etc. This is how you do it::
logger = syslog.Syslog()
logger.add_host("localhost")
logger.log(Facility.USER, Level.INFO, "Hello World")
If pid is True the process ID will be prepended to the text
parameter, enclosed in square brackets and followed by a
colon.
"""
pri = PRI(facility, level)
header = HEADER()
if pid:
msg = MSG(content=text, pid=os.getpid())
else:
msg = MSG(content=text)
packet = Packet(pri, header, msg)
self._send_packet_to_hosts(packet) | 0.001797 |
def enable_servicegroup_host_checks(self, servicegroup):
"""Enable host checks for a servicegroup
Format of the line that triggers function call::
ENABLE_SERVICEGROUP_HOST_CHECKS;<servicegroup_name>
:param servicegroup: servicegroup to enable
:type servicegroup: alignak.objects.servicegroup.Servicegroup
:return: None
"""
for service_id in servicegroup.get_services():
if service_id in self.daemon.services:
host_id = self.daemon.services[service_id].host
self.enable_host_check(self.daemon.hosts[host_id]) | 0.003241 |
def build_upstream_edge_predicate(nodes: Iterable[BaseEntity]) -> EdgePredicate:
"""Build an edge predicate that pass for relations for which one of the given nodes is the object."""
nodes = set(nodes)
def upstream_filter(graph: BELGraph, u: BaseEntity, v: BaseEntity, k: str) -> bool:
"""Pass for relations for which one of the given nodes is the object."""
return v in nodes and graph[u][v][k][RELATION] in CAUSAL_RELATIONS
return upstream_filter | 0.010373 |
def query_metric_stats(self, metric_type, metric_id=None, start=None, end=None, bucketDuration=None, **query_options):
"""
Query for metric aggregates from the server. This is called buckets in the Hawkular-Metrics documentation.
:param metric_type: MetricType to be matched (required)
:param metric_id: Exact string matching metric id or None for tags matching only
:param start: Milliseconds since epoch or datetime instance
:param end: Milliseconds since epoch or datetime instance
:param bucketDuration: The timedelta or duration of buckets. Can be a string presentation or timedelta object
:param query_options: For possible query_options, see the Hawkular-Metrics documentation.
"""
if start is not None:
if type(start) is datetime:
query_options['start'] = datetime_to_time_millis(start)
else:
query_options['start'] = start
if end is not None:
if type(end) is datetime:
query_options['end'] = datetime_to_time_millis(end)
else:
query_options['end'] = end
if bucketDuration is not None:
if type(bucketDuration) is timedelta:
query_options['bucketDuration'] = timedelta_to_duration(bucketDuration)
else:
query_options['bucketDuration'] = bucketDuration
if metric_id is not None:
url = self._get_metrics_stats_url(self._get_metrics_single_url(metric_type, metric_id))
else:
if len(query_options) < 0:
raise HawkularError('Tags are required when querying without metric_id')
url = self._get_metrics_stats_url(self._get_url(metric_type))
return self._get(url, **query_options) | 0.005482 |
def doTranslate(option, urlOrPaths, serverEndpoint=ServerEndpoint, verbose=Verbose, tikaServerJar=TikaServerJar,
responseMimeType='text/plain',
services={'all': '/translate/all'}):
'''
Translate the file from source language to destination language.
:param option:
:param urlOrPaths:
:param serverEndpoint:
:param verbose:
:param tikaServerJar:
:param responseMimeType:
:param services:
:return:
'''
paths = getPaths(urlOrPaths)
return [doTranslate1(option, path, serverEndpoint, verbose, tikaServerJar, responseMimeType, services)
for path in paths] | 0.006182 |
async def discover_slave(self, service, timeout, **kwargs):
"""Perform Slave discovery for specified service."""
# TODO: use kwargs to change how slaves are picked up
# (eg: round-robin, priority, random, etc)
idle_timeout = timeout
pools = self._pools[:]
for sentinel in pools:
try:
with async_timeout(timeout, loop=self._loop):
address = await self._get_slave_address(
sentinel, service) # add **kwargs
pool = self._slaves[service]
with async_timeout(timeout, loop=self._loop), \
contextlib.ExitStack() as stack:
conn = await pool._create_new_connection(address)
stack.callback(conn.close)
await self._verify_service_role(conn, 'slave')
stack.pop_all()
return conn
except asyncio.CancelledError:
raise
except asyncio.TimeoutError:
continue
except DiscoverError:
await asyncio.sleep(idle_timeout, loop=self._loop)
continue
except RedisError as err:
raise SlaveReplyError("Service {} error".format(service), err)
except Exception:
await asyncio.sleep(idle_timeout, loop=self._loop)
continue
raise SlaveNotFoundError("No slave found for {}".format(service)) | 0.001324 |
def clean(source):
"""
Clean up the source:
* Replace use of Fn::Join with Fn::Sub
"""
if isinstance(source, dict):
for key, value in source.items():
if key == "Fn::Join":
return convert_join(value)
else:
source[key] = clean(value)
elif isinstance(source, list):
return [clean(item) for item in source]
return source | 0.002387 |
def validate_maildirs(ctx, param, value):
""" Check that folders are maildirs. """
for path in value:
for subdir in MD_SUBDIRS:
if not os.path.isdir(os.path.join(path, subdir)):
raise click.BadParameter(
'{} is not a maildir (missing {!r} sub-directory).'.format(
path, subdir))
return value | 0.002611 |
def hasFeature(self, prop, check_softs=False):
"""Return if there is a property with that name."""
return prop in self.props or (check_softs and
any([fs.hasFeature(prop) for fs in self.props.get(SoftFeatures.SOFT, [])])) | 0.010909 |
def path_total_size(path_: str) -> int:
"""Compute total size of the given file/dir."""
if path.isfile(path_):
return path.getsize(path_)
total_size = 0
for root_dir, _, files in os.walk(path_):
for file_ in files:
total_size += path.getsize(path.join(root_dir, file_))
return total_size | 0.002985 |
def match_regex(self, regex: Pattern, required: bool = False,
meaning: str = "") -> str:
"""Parse input based on a regular expression .
Args:
regex: Compiled regular expression object.
required: Should the exception be raised on unexpected input?
meaning: Meaning of `regex` (for use in error messages).
Raises:
UnexpectedInput: If no syntactically correct keyword is found.
"""
mo = regex.match(self.input, self.offset)
if mo:
self.offset = mo.end()
return mo.group()
if required:
raise UnexpectedInput(self, meaning) | 0.004412 |
def _raise_error_if_not_of_type(arg, expected_type, arg_name=None):
"""
Check if the input is of expected type.
Parameters
----------
arg : Input argument.
expected_type : A type OR a list of types that the argument is expected
to be.
arg_name : The name of the variable in the function being used. No
name is assumed if set to None.
Examples
--------
_raise_error_if_not_of_type(sf, str, 'sf')
_raise_error_if_not_of_type(sf, [str, int], 'sf')
"""
display_name = "%s " % arg_name if arg_name is not None else "Argument "
lst_expected_type = [expected_type] if \
type(expected_type) == type else expected_type
err_msg = "%smust be of type %s " % (display_name,
' or '.join([x.__name__ for x in lst_expected_type]))
err_msg += "(not %s)." % type(arg).__name__
if not any(map(lambda x: isinstance(arg, x), lst_expected_type)):
raise TypeError(err_msg) | 0.002893 |
def encrypt_email(email):
"""
The default encryption function for storing emails in the database. This
uses AES and the encryption key defined in the applications configuration.
:param email:
The email address.
"""
aes = SimpleAES(flask.current_app.config["AES_KEY"])
return aes.encrypt(email) | 0.003021 |
def dumppickle(obj, fname, protocol=-1):
"""
Pickle object `obj` to file `fname`.
"""
with open(fname, 'wb') as fout: # 'b' for binary, needed on Windows
pickle.dump(obj, fout, protocol=protocol) | 0.004545 |
def create_content_if_changed(self, page, language, ctype, body):
"""Create a :class:`Content <pages.models.Content>` for a particular
page and language only if the content has changed from the last
time.
:param page: the concerned page object.
:param language: the wanted language.
:param ctype: the content type.
:param body: the content of the Content object.
"""
try:
content = self.filter(
page=page, language=language,
type=ctype).latest('creation_date')
if content.body == body:
return content
except self.model.DoesNotExist:
pass
content = self.create(
page=page, language=language, body=body,
type=ctype)
# Delete old revisions
if settings.PAGE_CONTENT_REVISION_DEPTH:
oldest_content = self.filter(
page=page, language=language,
type=ctype
).order_by('-creation_date')[settings.PAGE_CONTENT_REVISION_DEPTH:]
for c in oldest_content:
c.delete()
return content | 0.001704 |
def load(data_path):
"""
Extract data from provided file and return it as a string.
"""
with open(data_path, "r") as data_file:
raw_data = data_file.read()
data_file.close()
return raw_data | 0.008 |
def load_module_from_name(dotted_name, path=None, use_sys=True):
"""Load a Python module from its name.
:type dotted_name: str
:param dotted_name: python name of a module or package
:type path: list or None
:param path:
optional list of path where the module or package should be
searched (use sys.path if nothing or None is given)
:type use_sys: bool
:param use_sys:
boolean indicating whether the sys.modules dictionary should be
used or not
:raise ImportError: if the module or package is not found
:rtype: module
:return: the loaded module
"""
return load_module_from_modpath(dotted_name.split("."), path, use_sys) | 0.001437 |
def is_dicteq(dict1_, dict2_, almosteq_ok=True, verbose_err=True):
""" Checks to see if dicts are the same. Performs recursion. Handles numpy """
import utool as ut
assert len(dict1_) == len(dict2_), 'dicts are not of same length'
try:
for (key1, val1), (key2, val2) in zip(dict1_.items(), dict2_.items()):
assert key1 == key2, 'key mismatch'
assert type(val1) == type(val2), 'vals are not same type'
if HAVE_NUMPY and np.iterable(val1):
if almosteq_ok and ut.is_float(val1):
assert np.all(ut.almost_eq(val1, val2)), 'float vals are not within thresh'
else:
assert all([np.all(x1 == x2) for (x1, x2) in zip(val1, val2)]), 'np vals are different'
elif isinstance(val1, dict):
is_dicteq(val1, val2, almosteq_ok=almosteq_ok, verbose_err=verbose_err)
else:
assert val1 == val2, 'vals are different'
except AssertionError as ex:
if verbose_err:
ut.printex(ex)
return False
return True | 0.004533 |
def run_ansible(playbooks, inventory_path=None, roles=None, extra_vars=None,
tags=None, on_error_continue=False, basedir='.'):
"""Run Ansible.
Args:
playbooks (list): list of paths to the playbooks to run
inventory_path (str): path to the hosts file (inventory)
extra_var (dict): extra vars to pass
tags (list): list of tags to run
on_error_continue(bool): Don't throw any exception in case a host is
unreachable or the playbooks run with errors
Raises:
:py:class:`enoslib.errors.EnosFailedHostsError`: if a task returns an
error on a host and ``on_error_continue==False``
:py:class:`enoslib.errors.EnosUnreachableHostsError`: if a host is
unreachable (through ssh) and ``on_error_continue==False``
"""
inventory, variable_manager, loader, options = _load_defaults(
inventory_path=inventory_path,
roles=roles,
extra_vars=extra_vars,
tags=tags,
basedir=basedir
)
passwords = {}
for path in playbooks:
logger.info("Running playbook %s with vars:\n%s" % (path, extra_vars))
pbex = PlaybookExecutor(
playbooks=[path],
inventory=inventory,
variable_manager=variable_manager,
loader=loader,
options=options,
passwords=passwords
)
code = pbex.run()
stats = pbex._tqm._stats
hosts = stats.processed.keys()
result = [{h: stats.summarize(h)} for h in hosts]
results = {"code": code, "result": result, "playbook": path}
print(results)
failed_hosts = []
unreachable_hosts = []
for h in hosts:
t = stats.summarize(h)
if t["failures"] > 0:
failed_hosts.append(h)
if t["unreachable"] > 0:
unreachable_hosts.append(h)
if len(failed_hosts) > 0:
logger.error("Failed hosts: %s" % failed_hosts)
if not on_error_continue:
raise EnosFailedHostsError(failed_hosts)
if len(unreachable_hosts) > 0:
logger.error("Unreachable hosts: %s" % unreachable_hosts)
if not on_error_continue:
raise EnosUnreachableHostsError(unreachable_hosts) | 0.000862 |
def listcomprehension_walk2(self, node):
"""List comprehensions the way they are done in Python 2 and
sometimes in Python 3.
They're more other comprehensions, e.g. set comprehensions
See if we can combine code.
"""
p = self.prec
self.prec = 27
code = Code(node[1].attr, self.scanner, self.currentclass)
ast = self.build_ast(code._tokens, code._customize)
self.customize(code._customize)
# skip over: sstmt, stmt, return, ret_expr
# and other singleton derivations
while (len(ast) == 1
or (ast in ('sstmt', 'return')
and ast[-1] in ('RETURN_LAST', 'RETURN_VALUE'))):
self.prec = 100
ast = ast[0]
n = ast[1]
# collection = node[-3]
collections = [node[-3]]
list_ifs = []
if self.version == 3.0 and n != 'list_iter':
# FIXME 3.0 is a snowflake here. We need
# special code for this. Not sure if this is totally
# correct.
stores = [ast[3]]
assert ast[4] == 'comp_iter'
n = ast[4]
# Find the list comprehension body. It is the inner-most
# node that is not comp_.. .
while n == 'comp_iter':
if n[0] == 'comp_for':
n = n[0]
stores.append(n[2])
n = n[3]
elif n[0] in ('comp_if', 'comp_if_not'):
n = n[0]
# FIXME: just a guess
if n[0].kind == 'expr':
list_ifs.append(n)
else:
list_ifs.append([1])
n = n[2]
pass
else:
break
pass
# Skip over n[0] which is something like: _[1]
self.preorder(n[1])
else:
assert n == 'list_iter'
stores = []
# Find the list comprehension body. It is the inner-most
# node that is not list_.. .
while n == 'list_iter':
n = n[0] # recurse one step
if n == 'list_for':
stores.append(n[2])
n = n[3]
if self.version >= 3.6 and n[0] == 'list_for':
# Dog-paddle down largely singleton reductions
# to find the collection (expr)
c = n[0][0]
if c == 'expr':
c = c[0]
# FIXME: grammar is wonky here? Is this really an attribute?
if c == 'attribute':
c = c[0]
collections.append(c)
pass
elif n in ('list_if', 'list_if_not'):
# FIXME: just a guess
if n[0].kind == 'expr':
list_ifs.append(n)
else:
list_ifs.append([1])
n = n[2]
pass
pass
assert n == 'lc_body', ast
self.preorder(n[0])
# FIXME: add indentation around "for"'s and "in"'s
if self.version < 3.6:
self.write(' for ')
self.preorder(stores[0])
self.write(' in ')
self.preorder(collections[0])
if list_ifs:
self.preorder(list_ifs[0])
pass
else:
for i, store in enumerate(stores):
self.write(' for ')
self.preorder(store)
self.write(' in ')
self.preorder(collections[i])
if i < len(list_ifs):
self.preorder(list_ifs[i])
pass
pass
self.prec = p | 0.001013 |
def parse_timedelta(value):
"""
Parses a string and return a datetime.timedelta.
:param value: string to parse
:type value: str
:return: timedelta object or None if value is None
:rtype: timedelta/None
:raise: TypeError when value is not string
:raise: ValueError when value is not proper timedelta string
"""
if value is None:
return None
if not isinstance(value, six.string_types):
raise TypeError('value must be a string type')
match = INTERVAL_REGEX.search(value)
if match:
data = match.groupdict()
return timedelta(**dict((key, int(data[key] or 0)) for key in data))
else:
raise ValueError("Value '%s' doesn't appear to be a valid timedelta "
"string" % value) | 0.001267 |
def ichunks_list(list_, chunksize):
"""
input must be a list.
SeeAlso:
ichunks
References:
http://stackoverflow.com/questions/434287/iterate-over-a-list-in-chunks
"""
return (list_[ix:ix + chunksize] for ix in range(0, len(list_), chunksize)) | 0.003521 |
def refresh(self):
"""Refresh reloads data from the server. It raises an error if it fails to get the object's metadata"""
self.metadata = self.db.read(self.path).json() | 0.016043 |
def previous_session_label(self, session_label):
"""
Given a session label, returns the label of the previous session.
Parameters
----------
session_label: pd.Timestamp
A session whose previous session is desired.
Returns
-------
pd.Timestamp
The previous session label (midnight UTC).
Notes
-----
Raises ValueError if the given session is the first session in this
calendar.
"""
idx = self.schedule.index.get_loc(session_label)
if idx == 0:
raise ValueError("There is no previous session as this is the"
" beginning of the exchange calendar.")
return self.schedule.index[idx - 1] | 0.002574 |
def lt(self, event_property, value):
"""A less-than filter chain.
>>> request_time = EventExpression('request', 'elapsed_ms')
>>> filtered = request_time.lt('elapsed_ms', 500)
>>> print(filtered)
request(elapsed_ms).lt(elapsed_ms, 500)
"""
c = self.copy()
c.filters.append(filters.LT(event_property, value))
return c | 0.005141 |
def get_grouped_psf_model(template_psf_model, star_group, pars_to_set):
"""
Construct a joint PSF model which consists of a sum of PSF's templated on
a specific model, but whose parameters are given by a table of objects.
Parameters
----------
template_psf_model : `astropy.modeling.Fittable2DModel` instance
The model to use for *individual* objects. Must have parameters named
``x_0``, ``y_0``, and ``flux``.
star_group : `~astropy.table.Table`
Table of stars for which the compound PSF will be constructed. It
must have columns named ``x_0``, ``y_0``, and ``flux_0``.
Returns
-------
group_psf
An `astropy.modeling` ``CompoundModel`` instance which is a sum of the
given PSF models.
"""
group_psf = None
for star in star_group:
psf_to_add = template_psf_model.copy()
for param_tab_name, param_name in pars_to_set.items():
setattr(psf_to_add, param_name, star[param_tab_name])
if group_psf is None:
# this is the first one only
group_psf = psf_to_add
else:
group_psf += psf_to_add
return group_psf | 0.00084 |
def defaultcolour(self, colour):
"""
Auxiliary method to choose a default colour.
Give me a user provided colour : if it is None, I change it to the default colour, respecting negative.
Plus, if the image is in RGB mode and you give me 128 for a gray, I translate this to the expected (128, 128, 128) ...
"""
if colour == None:
if self.negative == True:
if self.pilimage.mode == "L" :
return 0
else :
return (0, 0, 0)
else :
if self.pilimage.mode == "L" :
return 255
else :
return (255, 255, 255)
else :
if self.pilimage.mode == "RGB" and type(colour) == type(0):
return (colour, colour, colour)
else :
return colour | 0.015625 |
def actnorm_3d(name, x, logscale_factor=3.):
"""Applies actnorm to each time-step independently.
There are a total of 2*n_channels*n_steps parameters learnt.
Args:
name: variable scope.
x: 5-D Tensor, (NTHWC)
logscale_factor: Increases the learning rate of the scale by
logscale_factor.
Returns:
x: 5-D Tensor, (NTHWC) with the per-timestep, per-channel normalization.
"""
with tf.variable_scope(name, reuse=tf.AUTO_REUSE):
x = tf.unstack(x, axis=1)
x_normed = []
for ind, x_step in enumerate(x):
x_step, _ = actnorm("actnorm_%d" % ind, x_step,
logscale_factor=logscale_factor)
x_normed.append(x_step)
return tf.stack(x_normed, axis=1), None | 0.006702 |
def init_kernel(self):
'''
Initializes the covariance matrix with a guess at
the GP kernel parameters.
'''
if self.kernel_params is None:
X = self.apply_mask(self.fpix / self.flux.reshape(-1, 1))
y = self.apply_mask(self.flux) - np.dot(X, np.linalg.solve(
np.dot(X.T, X), np.dot(X.T, self.apply_mask(self.flux))))
white = np.nanmedian([np.nanstd(c) for c in Chunks(y, 13)])
amp = self.gp_factor * np.nanstd(y)
tau = 30.0
if self.kernel == 'Basic':
self.kernel_params = [white, amp, tau]
elif self.kernel == 'QuasiPeriodic':
self.kernel_params = [white, amp, 1., 20.] | 0.002703 |
def by_location(self, location, cc=None, radius=None, term=None, num_biz_requested=None, category=None):
"""
Perform a Yelp Review Search based on a location specifier.
Args:
location - textual location specifier of form: "address, neighborhood, city, state or zip, optional country"
cc - ISO 3166-1 alpha-2 country code. (Optional)
radius - search radius (in miles) (Optional)
term - Search term to filter by (Optional)
num_biz_requested - Maximum number of matching results to return (Optional)
category - '+'-seperated list of categories to filter by. See
http://www.yelp.com/developers/documentation/category_list
for list of valid categories. (Optional)
"""
header, content = self._http_request(
self.BASE_URL,
location = location,
cc = cc,
radius = radius,
term = term,
num_biz_requested = num_biz_requested
)
return json.loads(content) | 0.018709 |
def get_sigla(self, work):
"""Returns a list of all of the sigla for `work`.
:param work: name of work
:type work: `str`
:rtype: `list` of `str`
"""
return [os.path.splitext(os.path.basename(path))[0]
for path in glob.glob(os.path.join(self._path, work, '*.txt'))] | 0.006061 |
def delete_subtrie(self, key):
"""
Given a key prefix, delete the whole subtrie that starts with the key prefix.
Key will be encoded into binary array format first.
It will call `_set` with `if_delete_subtrie` set to True.
"""
validate_is_bytes(key)
self.root_hash = self._set(
self.root_hash,
encode_to_bin(key),
value=b'',
if_delete_subtrie=True,
) | 0.006452 |
def _merge_doc(original, to_merge):
# type: (str, str) -> str
"""Merge two usage strings together.
Args:
original: The source of headers and initial section lines.
to_merge: The source for the additional section lines to append.
Returns:
A new usage string that contains information from both usage strings.
"""
if not original:
return to_merge or ''
if not to_merge:
return original or ''
sections = []
for name in ('usage', 'arguments', 'options'):
sections.append(_merge_section(
_get_section(name, original),
_get_section(name, to_merge)
))
return format_usage('\n\n'.join(s for s in sections).rstrip()) | 0.001372 |
def digest_chunks(chunks, algorithms=(hashlib.md5, hashlib.sha1)):
"""
returns a base64 rep of the given digest algorithms from the
chunks of data
"""
hashes = [algorithm() for algorithm in algorithms]
for chunk in chunks:
for h in hashes:
h.update(chunk)
return [_b64encode_to_str(h.digest()) for h in hashes] | 0.00277 |
def _get_service_endpoint(context, svc, region=None, public=True):
"""
Parses the services dict to get the proper endpoint for the given service.
"""
region = _safe_region(region)
# If a specific context is passed, use that. Otherwise, use the global
# identity reference.
context = context or identity
url_type = {True: "public", False: "private"}[public]
svc_obj = context.services.get(svc)
if not svc_obj:
return None
ep = svc_obj.endpoints.get(region, {}).get(url_type)
if not ep:
# Try the "ALL" region, and substitute the actual region
ep = svc_obj.endpoints.get("ALL", {}).get(url_type)
return ep | 0.001473 |
def change_volume(self, increment):
"""调整音量大小"""
if increment == 1:
self.volume += 5
else:
self.volume -= 5
self.volume = max(min(self.volume, 100), 0) | 0.009662 |
def wait(self, timeout=None):
""" Wait for the job to complete, or a timeout to happen.
This is more efficient than the version in the base Job class, in that we can
use a call that blocks for the poll duration rather than a sleep. That means we
shouldn't block unnecessarily long and can also poll less.
Args:
timeout: how long to wait (in seconds) before giving up; default None which means no timeout.
Returns:
The QueryJob
"""
poll = 30
while not self._is_complete:
try:
query_result = self._api.jobs_query_results(self._job_id,
project_id=self._context.project_id,
page_size=0,
timeout=poll * 1000)
except Exception as e:
raise e
if query_result['jobComplete']:
if 'totalBytesProcessed' in query_result:
self._bytes_processed = int(query_result['totalBytesProcessed'])
self._cache_hit = query_result.get('cacheHit', None)
if 'totalRows' in query_result:
self._total_rows = int(query_result['totalRows'])
break
if timeout is not None:
timeout -= poll
if timeout <= 0:
break
self._refresh_state()
return self | 0.008876 |
def pushd(path):
""" A context that enters a given directory and restores the old state on exit.
The original directory is returned as the context variable.
"""
saved = os.getcwd()
os.chdir(path)
try:
yield saved
finally:
os.chdir(saved) | 0.006993 |
def _get_path_entry_from_list(self, query_path):
""" Returns the config entry at query path
:param query_path: list(str), config header path to follow for entry
:return: (list, str, dict, OrderedDict), config entry requested
:raises: exceptions.ResourceNotFoundError
"""
cur_data = self.config_file_contents
try:
for child in query_path:
cur_data = cur_data[child]
return cur_data
except (AttributeError, KeyError):
raise errors.ResourceNotFoundError('Could not find query path %s in the config file contents' %
query_path) | 0.004367 |
def add_bgp_error_metadata(code, sub_code, def_desc='unknown'):
"""Decorator for all exceptions that want to set exception class meta-data.
"""
# Check registry if we already have an exception with same code/sub-code
if _EXCEPTION_REGISTRY.get((code, sub_code)) is not None:
raise ValueError('BGPSException with code %d and sub-code %d '
'already defined.' % (code, sub_code))
def decorator(subclass):
"""Sets class constants for exception code and sub-code.
If given class is sub-class of BGPSException we sets class constants.
"""
if issubclass(subclass, BGPSException):
_EXCEPTION_REGISTRY[(code, sub_code)] = subclass
subclass.CODE = code
subclass.SUB_CODE = sub_code
subclass.DEF_DESC = def_desc
return subclass
return decorator | 0.001136 |
def accuracy(mod_y, ref_y, summary=True, name="accuracy"):
"""Accuracy computation op.
Parameters
----------
mod_y : tf.Tensor
Model output tensor.
ref_y : tf.Tensor
Reference input tensor.
summary : bool, optional (default = True)
Whether to save tf summary for the op.
Returns
-------
tf.Tensor : accuracy op. tensor
"""
with tf.name_scope(name):
mod_pred = tf.argmax(mod_y, 1)
correct_pred = tf.equal(mod_pred, tf.argmax(ref_y, 1))
accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32))
if summary:
tf.summary.scalar('accuracy', accuracy)
return accuracy | 0.002601 |
def to_strings(self, use_colors=True):
"""Convert an edit script to a pair of strings representing the operation in a human readable way.
:param use_colors: Boolean indicating whether to use terminal color codes to color the output.
:return: Tuple with text corresponding to the first pronunciation and the text of the second one.
"""
edit_script = self.to_primitive()
colors = collections.defaultdict(str)
if use_colors:
colors['red'] = '\x1b[31m'
colors['normal'] = '\x1b[m'
colors['green'] = '\x1b[32m'
colors['on_red'] = '\x1b[41m'
src_txt = ''
dst_txt = ''
for op in edit_script:
if op['op_code'] == 'match':
width = max(len(op['from_symbol']), len(op['to_symbol']))
if op['from_symbol'] == op['to_symbol']:
src_txt += u'{green}{from_symbol: ^{width}}{normal}'.format(**_combine_dicts(colors,
op,
{'width': width}))
dst_txt += u'{green}{to_symbol: ^{width}}{normal}'.format(**_combine_dicts(colors,
op,
{'width': width}))
else:
src_txt += u'{red}{from_symbol: ^{width}}{normal}'.format(**_combine_dicts(colors,
op,
{'width': width}))
dst_txt += u'{red}{to_symbol: ^{width}}{normal}'.format(**_combine_dicts(colors,
op,
{'width': width}))
elif op['op_code'] == 'insert':
space = ' '*len(op['to_symbol'])
src_txt += u'{on_red}{space}{normal}'.format(space=space, **_combine_dicts(colors, op))
dst_txt += u'{red}{to_symbol}{normal}'.format(**_combine_dicts(colors, op))
elif op['op_code'] == 'delete':
space = ' '*len(op['from_symbol'])
src_txt += u'{red}{from_symbol}{normal}'.format(**_combine_dicts(colors, op))
dst_txt += u'{on_red}{space}{normal}'.format(space=space, **_combine_dicts(colors, op))
elif op['op_code'] == 'noninsert':
continue
src_txt += ' '
dst_txt += ' '
return src_txt, dst_txt | 0.007189 |
def ParseOptions(cls, options, output_module):
"""Parses and validates options.
Args:
options (argparse.Namespace): parser options.
output_module (OutputModule): output module to configure.
Raises:
BadConfigObject: when the output module object does not have the
SetCredentials or SetDatabaseName methods.
"""
if not hasattr(output_module, 'SetCredentials'):
raise errors.BadConfigObject('Unable to set username information.')
if not hasattr(output_module, 'SetDatabaseName'):
raise errors.BadConfigObject('Unable to set database information.')
username = cls._ParseStringOption(
options, 'username', default_value=cls._DEFAULT_USERNAME)
password = cls._ParseStringOption(
options, 'password', default_value=cls._DEFAULT_PASSWORD)
name = cls._ParseStringOption(
options, 'db_name', default_value=cls._DEFAULT_NAME)
output_module.SetCredentials(username=username, password=password)
output_module.SetDatabaseName(name)
server_config.ServerArgumentsHelper.ParseOptions(options, output_module) | 0.00271 |
def _new_packet_cb(self, packet):
"""Callback for newly arrived packets for the memory port"""
chan = packet.channel
cmd = packet.data[0]
payload = packet.data[1:]
if chan == CHAN_INFO:
if cmd == CMD_INFO_NBR:
self.nbr_of_mems = payload[0]
logger.info('{} memories found'.format(self.nbr_of_mems))
# Start requesting information about the memories,
# if there are any...
if self.nbr_of_mems > 0:
if not self._getting_count:
self._getting_count = True
logger.debug('Requesting first id')
pk = CRTPPacket()
pk.set_header(CRTPPort.MEM, CHAN_INFO)
pk.data = (CMD_INFO_DETAILS, 0)
self.cf.send_packet(pk, expected_reply=(
CMD_INFO_DETAILS, 0))
else:
self._refresh_callback()
if cmd == CMD_INFO_DETAILS:
# Did we get a good reply, otherwise try again:
if len(payload) < 5:
# Workaround for 1-wire bug when memory is detected
# but updating the info crashes the communication with
# the 1-wire. Fail by saying we only found 1 memory
# (the I2C).
logger.error(
'-------->Got good count, but no info on mem!')
self.nbr_of_mems = 1
if self._refresh_callback:
self._refresh_callback()
self._refresh_callback = None
return
# Create information about a new memory
# Id - 1 byte
mem_id = payload[0]
# Type - 1 byte
mem_type = payload[1]
# Size 4 bytes (as addr)
mem_size = struct.unpack('I', payload[2:6])[0]
# Addr (only valid for 1-wire?)
mem_addr_raw = struct.unpack('B' * 8, payload[6:14])
mem_addr = ''
for m in mem_addr_raw:
mem_addr += '{:02X}'.format(m)
if (not self.get_mem(mem_id)):
if mem_type == MemoryElement.TYPE_1W:
mem = OWElement(id=mem_id, type=mem_type,
size=mem_size,
addr=mem_addr, mem_handler=self)
self.mem_read_cb.add_callback(mem.new_data)
self.mem_write_cb.add_callback(mem.write_done)
self._ow_mems_left_to_update.append(mem.id)
elif mem_type == MemoryElement.TYPE_I2C:
mem = I2CElement(id=mem_id, type=mem_type,
size=mem_size,
mem_handler=self)
self.mem_read_cb.add_callback(mem.new_data)
self.mem_write_cb.add_callback(mem.write_done)
elif mem_type == MemoryElement.TYPE_DRIVER_LED:
mem = LEDDriverMemory(id=mem_id, type=mem_type,
size=mem_size, mem_handler=self)
logger.debug(mem)
self.mem_read_cb.add_callback(mem.new_data)
self.mem_write_cb.add_callback(mem.write_done)
elif mem_type == MemoryElement.TYPE_LOCO:
mem = LocoMemory(id=mem_id, type=mem_type,
size=mem_size, mem_handler=self)
logger.debug(mem)
self.mem_read_cb.add_callback(mem.new_data)
elif mem_type == MemoryElement.TYPE_TRAJ:
mem = TrajectoryMemory(id=mem_id, type=mem_type,
size=mem_size, mem_handler=self)
logger.debug(mem)
self.mem_write_cb.add_callback(mem.write_done)
elif mem_type == MemoryElement.TYPE_LOCO2:
mem = LocoMemory2(id=mem_id, type=mem_type,
size=mem_size, mem_handler=self)
logger.debug(mem)
self.mem_read_cb.add_callback(mem.new_data)
else:
mem = MemoryElement(id=mem_id, type=mem_type,
size=mem_size, mem_handler=self)
logger.debug(mem)
self.mems.append(mem)
self.mem_added_cb.call(mem)
self._fetch_id = mem_id + 1
if self.nbr_of_mems - 1 >= self._fetch_id:
logger.debug(
'Requesting information about memory {}'.format(
self._fetch_id))
pk = CRTPPacket()
pk.set_header(CRTPPort.MEM, CHAN_INFO)
pk.data = (CMD_INFO_DETAILS, self._fetch_id)
self.cf.send_packet(pk, expected_reply=(
CMD_INFO_DETAILS, self._fetch_id))
else:
logger.debug(
'Done getting all the memories, start reading the OWs')
ows = self.get_mems(MemoryElement.TYPE_1W)
# If there are any OW mems start reading them, otherwise
# we are done
for ow_mem in ows:
ow_mem.update(self._mem_update_done)
if len(ows) == 0:
if self._refresh_callback:
self._refresh_callback()
self._refresh_callback = None
if chan == CHAN_WRITE:
id = cmd
(addr, status) = struct.unpack('<IB', payload[0:5])
logger.debug(
'WRITE: Mem={}, addr=0x{:X}, status=0x{}'.format(
id, addr, status))
# Find the read request
if id in self._write_requests:
self._write_requests_lock.acquire()
wreq = self._write_requests[id][0]
if status == 0:
if wreq.write_done(addr):
# self._write_requests.pop(id, None)
# Remove the first item
self._write_requests[id].pop(0)
self.mem_write_cb.call(wreq.mem, wreq.addr)
# Get a new one to start (if there are any)
if len(self._write_requests[id]) > 0:
self._write_requests[id][0].start()
else:
logger.debug(
'Status {}: write resending...'.format(status))
wreq.resend()
self._write_requests_lock.release()
if chan == CHAN_READ:
id = cmd
(addr, status) = struct.unpack('<IB', payload[0:5])
data = struct.unpack('B' * len(payload[5:]), payload[5:])
logger.debug('READ: Mem={}, addr=0x{:X}, status=0x{}, '
'data={}'.format(id, addr, status, data))
# Find the read request
if id in self._read_requests:
logger.debug(
'READING: We are still interested in request for '
'mem {}'.format(id))
rreq = self._read_requests[id]
if status == 0:
if rreq.add_data(addr, payload[5:]):
self._read_requests.pop(id, None)
self.mem_read_cb.call(rreq.mem, rreq.addr, rreq.data)
else:
logger.debug('Status {}: resending...'.format(status))
rreq.resend() | 0.000247 |
def group(self):
"Group inherited from main element"
if self.main and self.main.group != type(self.main).__name__:
return self.main.group
else:
return 'AdjointLayout' | 0.014019 |
def _new_wire(self, source, sinks=None):
"""Create a new :py:class:`._Wire` with a unique routing key."""
# Assign sequential routing key to new nets.
wire = _Wire(source, sinks if sinks is not None else [], len(self._wires))
self._wires.append(wire)
return wire | 0.009901 |
def __fetch_1_27(self, from_date=None):
"""Fetch the pages from the backend url for MediaWiki >=1.27
The method retrieves, from a MediaWiki url, the
wiki pages.
:returns: a generator of pages
"""
logger.info("Looking for pages at url '%s'", self.url)
npages = 0 # number of pages processed
tpages = 0 # number of total pages
pages_done = [] # pages already retrieved in reviews API
namespaces_contents = self.__get_namespaces_contents()
arvcontinue = '' # pagination for getting revisions and their pages
while arvcontinue is not None:
raw_pages = self.client.get_pages_from_allrevisions(namespaces_contents, from_date, arvcontinue)
data_json = json.loads(raw_pages)
arvcontinue = data_json['continue']['arvcontinue'] if 'continue' in data_json else None
pages_json = data_json['query']['allrevisions']
for page in pages_json:
if page['pageid'] in pages_done:
logger.debug("Page %s already processed; skipped", page['pageid'])
continue
tpages += 1
pages_done.append(page['pageid'])
page_reviews = self.__get_page_reviews(page)
if not page_reviews:
logger.warning("Revisions not found in %s [page id: %s], page skipped",
page['title'], page['pageid'])
continue
yield page_reviews
npages += 1
logger.info("Total number of pages: %i, skipped %i", tpages, tpages - npages) | 0.004182 |
def _IsDirectory(parent, item):
"""Helper that returns if parent/item is a directory."""
return tf.io.gfile.isdir(os.path.join(parent, item)) | 0.02069 |
def mse(test, ref, mask=None):
"""Mean Squared Error (MSE)
Calculate the MSE between a test image and a reference image.
Parameters
----------
ref : np.ndarray
the reference image
test : np.ndarray
the tested image
mask : np.ndarray, optional
the mask for the ROI
Notes
-----
Compute the metric only on magnetude.
1/N * |ref - test|_2
Returns
-------
mse: float, the mse
"""
test, ref, mask = _preprocess_input(test, ref, mask)
if mask is not None:
test = mask * test
ref = mask * ref
return np.mean(np.square(test - ref)) | 0.001555 |
def get_diff_str(self, element, length):
'''get_diff_str
High-level api: Produce a string that indicates the difference between
two models.
Parameters
----------
element : `Element`
A node in model tree.
length : `int`
String length that has been consumed.
Returns
-------
str
A string that indicates the difference between two models.
'''
spaces = ' '*(self.get_width(element) - length)
return spaces + element.get('diff') | 0.003509 |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.