text
stringlengths 78
104k
| score
float64 0
0.18
|
---|---|
def clean_title(self, title):
"""Clean title with the use of og:site_name
in this case try to get rid of site name
and use TITLE_SPLITTERS to reformat title
"""
# check if we have the site name in opengraph data
if "site_name" in list(self.article.opengraph.keys()):
site_name = self.article.opengraph['site_name']
# remove the site name from title
title = title.replace(site_name, '').strip()
elif (self.article.schema and "publisher" in self.article.schema and
"name" in self.article.schema["publisher"]):
site_name = self.article.schema["publisher"]["name"]
# remove the site name from title
title = title.replace(site_name, '').strip()
# try to remove the domain from url
if self.article.domain:
pattern = re.compile(self.article.domain, re.IGNORECASE)
title = pattern.sub("", title).strip()
# split the title in words
# TechCrunch | my wonderfull article
# my wonderfull article | TechCrunch
title_words = title.split()
# check if first letter is in TITLE_SPLITTERS
# if so remove it
if title_words and title_words[0] in TITLE_SPLITTERS:
title_words.pop(0)
# check for a title that is empty or consists of only a
# title splitter to avoid a IndexError below
if not title_words:
return ""
# check if last letter is in TITLE_SPLITTERS
# if so remove it
if title_words[-1] in TITLE_SPLITTERS:
title_words.pop(-1)
# rebuild the title
title = " ".join(title_words).strip()
return title | 0.001151 |
def prompt_choices(name, choices, default=None, no_choice=('none',)):
"""
Grabs user input from command line from set of provided choices.
:param name: prompt text
:param choices: list or tuple of available choices.
:param default: default value if no input provided.
:param no_choice: acceptable list of strings for "null choice"
"""
_choices = []
options = []
for choice in choices:
options.append(choice)
_choices.append(choice)
while True:
rv = prompt(name + '? - (%s)' % ', '.join(options), default)
rv = rv.lower()
if rv in no_choice:
return None
if rv in _choices:
return rv | 0.001427 |
def handleMethodCallMessage(self, msg):
"""
Handles DBus MethodCall messages on behalf of the DBus Connection and
dispatches them to the appropriate exported object
"""
if (
msg.interface == 'org.freedesktop.DBus.Peer'
and msg.member == 'Ping'
):
r = message.MethodReturnMessage(
msg.serial,
destination=msg.sender,
)
self.conn.sendMessage(r)
return
if (
msg.interface == 'org.freedesktop.DBus.Introspectable'
and msg.member == 'Introspect'
):
xml = introspection.generateIntrospectionXML(
msg.path,
self.exports,
)
if xml is not None:
r = message.MethodReturnMessage(
msg.serial,
body=[xml],
destination=msg.sender,
signature='s',
)
self.conn.sendMessage(r)
return
# Try to get object from complete object path
o = self.exports.get(msg.path, None)
if o is None:
self._send_err(
msg,
'org.freedesktop.DBus.Error.UnknownObject',
'%s is not an object provided by this process.' % (msg.path),
)
return
if (
msg.interface == 'org.freedesktop.DBus.ObjectManager'
and msg.member == 'GetManagedObjects'
):
i_and_p = self.getManagedObjects(o.getObjectPath())
r = message.MethodReturnMessage(
msg.serial,
body=[i_and_p],
destination=msg.sender,
signature='a{oa{sa{sv}}}',
)
self.conn.sendMessage(r)
return
i = None
for x in o.getInterfaces():
if msg.interface:
if x.name == msg.interface:
i = x
break
else:
if msg.member in x.methods:
i = x
break
m = None
if i:
m = i.methods.get(msg.member, None)
if m is None:
self._send_err(
msg,
'org.freedesktop.DBus.Error.UnknownMethod',
(
'Method "%s" with signature "%s" on interface "%s" '
'doesn\'t exist'
) % (
msg.member, msg.signature or '',
msg.interface or '(null)',
),
)
return
msig = msg.signature if msg.signature is not None else ''
esig = m.sigIn if m.sigIn is not None else ''
if esig != msig:
self._send_err(
msg,
'org.freedesktop.DBus.Error.InvalidArgs',
'Call to %s has wrong args (%s, expected %s)' %
(msg.member, msg.signature or '', m.sigIn or '')
)
return
d = defer.maybeDeferred(
o.executeMethod,
i,
msg.member,
msg.body,
msg.sender,
)
if msg.expectReply:
def send_reply(return_values):
if isinstance(return_values, (list, tuple)):
if m.nret == 1:
return_values = [return_values]
else:
return_values = [return_values]
r = message.MethodReturnMessage(
msg.serial,
body=return_values,
destination=msg.sender,
signature=m.sigOut,
)
self.conn.sendMessage(r)
def send_error(err):
e = err.value
errMsg = err.getErrorMessage()
name = None
if hasattr(e, 'dbusErrorName'):
name = e.dbusErrorName
if name is None:
name = 'org.txdbus.PythonException.' + e.__class__.__name__
try:
marshal.validateErrorName(name)
except error.MarshallingError:
errMsg = ('!!(Invalid error name "%s")!! ' % name) + errMsg
name = 'org.txdbus.InvalidErrorName'
r = message.ErrorMessage(name, msg.serial,
body=[errMsg],
signature='s',
destination=msg.sender)
self.conn.sendMessage(r)
d.addCallback(send_reply)
d.addErrback(send_error) | 0.000416 |
def dissociate_values_or_ranges(self, vlan_id_range):
"""
Build a list of vlan ids given a combination of ranges and/or values
Examples:
>>> enet.dissociate_values_or_ranges('1-2,5')
[1, 2, 5]
>>> enet.dissociate_values_or_ranges('5')
[1, 2, 3, 4, 5]
>>> enet.dissociate_values_or_ranges('4-5,7-8')
[4, 5, 7, 8]
Args:
vlan_id_range: A combination of values or ranges. For example, '1-10,50,51,500-700'.
Returns:
list: vlan ids
"""
values_or_ranges = vlan_id_range.split(',')
vlan_ids = []
# The expected result is different if the vlan_id_range contains only one value
if len(values_or_ranges) == 1 and '-' not in values_or_ranges[0]:
vlan_ids = list(range(1, int(values_or_ranges[0]) + 1))
else:
for value_or_range in values_or_ranges:
value_or_range.strip()
if '-' not in value_or_range:
vlan_ids.append(int(value_or_range))
else:
start, end = value_or_range.split('-')
range_ids = range(int(start), int(end) + 1)
vlan_ids.extend(range_ids)
return vlan_ids | 0.00303 |
def msg_curse(self, args=None, max_width=10):
"""Return the list to display in the UI."""
# Init the return message
ret = []
# Only process if stats exist...
if not self.stats or self.is_disable():
return ret
# Define the data: Bar (default behavor) or Sparkline
sparkline_tag = False
if self.args.sparkline and self.history_enable():
data = Sparkline(max_width)
sparkline_tag = data.available
if not sparkline_tag:
# Fallback to bar if Sparkline module is not installed
data = Bar(max_width)
# Build the string message
if 'cpu_name' in self.stats and 'cpu_hz_current' in self.stats and 'cpu_hz' in self.stats:
msg_name = '{} - '.format(self.stats['cpu_name'])
msg_freq = '{:.2f}/{:.2f}GHz'.format(self._hz_to_ghz(self.stats['cpu_hz_current']),
self._hz_to_ghz(self.stats['cpu_hz']))
if len(msg_name + msg_freq) - 6 <= max_width:
ret.append(self.curse_add_line(msg_name))
ret.append(self.curse_add_line(msg_freq))
ret.append(self.curse_new_line())
for key in ['cpu', 'mem', 'swap']:
if key == 'cpu' and args.percpu:
if sparkline_tag:
raw_cpu = self.get_raw_history(item='percpu', nb=data.size)
for cpu_index, cpu in enumerate(self.stats['percpu']):
if sparkline_tag:
# Sparkline display an history
data.percents = [i[1][cpu_index]['total'] for i in raw_cpu]
# A simple padding in order to align metrics to the right
data.percents += [None] * (data.size - len(data.percents))
else:
# Bar only the last value
data.percent = cpu['total']
if cpu[cpu['key']] < 10:
msg = '{:3}{} '.format(key.upper(), cpu['cpu_number'])
else:
msg = '{:4} '.format(cpu['cpu_number'])
ret.extend(self._msg_create_line(msg, data, key))
ret.append(self.curse_new_line())
else:
if sparkline_tag:
# Sparkline display an history
data.percents = [i[1] for i in self.get_raw_history(item=key, nb=data.size)]
# A simple padding in order to align metrics to the right
data.percents += [None] * (data.size - len(data.percents))
else:
# Bar only the last value
data.percent = self.stats[key]
msg = '{:4} '.format(key.upper())
ret.extend(self._msg_create_line(msg, data, key))
ret.append(self.curse_new_line())
# Remove the last new line
ret.pop()
# Return the message with decoration
return ret | 0.002936 |
def quit(self):
"""Close threads and socket."""
# stop all threads and close the socket
self.receive_thread.stopped = True
# self.receive_thread._Thread__stop()
self.message_thread.stopped = True
# self.message_thread._Thread__stop()
# self.ping_thread.stopped = True
# self.ping_thread._Thread__stop()
self.receive_thread.join()
_LOGGER.info('receive_thread exited')
self.message_thread.join()
_LOGGER.info('message_thread exited')
self.socket.close() | 0.003466 |
def connect(command, data=None, env=None, cwd=None):
"""Spawns a new process from the given command."""
# TODO: support piped commands
command_str = expand_args(command).pop()
environ = dict(os.environ)
environ.update(env or {})
process = subprocess.Popen(command_str,
universal_newlines=True,
shell=False,
env=environ,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
bufsize=0,
cwd=cwd,
)
return ConnectedCommand(process=process) | 0.018116 |
def export_graphviz(self, fade_nodes=None):
"""Returns a string, Graphviz script for visualizing the program.
Parameters
----------
fade_nodes : list, optional
A list of node indices to fade out for showing which were removed
during evolution.
Returns
-------
output : string
The Graphviz script to plot the tree representation of the program.
"""
terminals = []
if fade_nodes is None:
fade_nodes = []
output = 'digraph program {\nnode [style=filled]\n'
for i, node in enumerate(self.program):
fill = '#cecece'
if isinstance(node, _Function):
if i not in fade_nodes:
fill = '#136ed4'
terminals.append([node.arity, i])
output += ('%d [label="%s", fillcolor="%s"] ;\n'
% (i, node.name, fill))
else:
if i not in fade_nodes:
fill = '#60a6f6'
if isinstance(node, int):
if self.feature_names is None:
feature_name = 'X%s' % node
else:
feature_name = self.feature_names[node]
output += ('%d [label="%s", fillcolor="%s"] ;\n'
% (i, feature_name, fill))
else:
output += ('%d [label="%.3f", fillcolor="%s"] ;\n'
% (i, node, fill))
if i == 0:
# A degenerative program of only one node
return output + '}'
terminals[-1][0] -= 1
terminals[-1].append(i)
while terminals[-1][0] == 0:
output += '%d -> %d ;\n' % (terminals[-1][1],
terminals[-1][-1])
terminals[-1].pop()
if len(terminals[-1]) == 2:
parent = terminals[-1][-1]
terminals.pop()
if not terminals:
return output + '}'
terminals[-1].append(parent)
terminals[-1][0] -= 1
# We should never get here
return None | 0.000846 |
def promote_pipeline(conf, args):
"""Export a pipeline from a lower environment and import into higher environment."""
src = conf.config['instances'][args.src_instance]
src_url = api.build_pipeline_url(build_instance_url(src))
src_auth = tuple([conf.creds['instances'][args.src_instance]['user'], conf.creds['instances'][args.src_instance]['pass']])
verify_ssl = src.get('verify_ssl', True)
export_json = api.export_pipeline(src_url, args.src_pipeline_id, src_auth, verify_ssl)
# Import the pipeline to the destination
dest = conf.config['instances'][args.dest_instance]
dest_url = api.build_pipeline_url(build_instance_url(dest))
dest_auth = tuple([conf.creds['instances'][args.dest_instance]['user'], conf.creds['instances'][args.dest_instance]['pass']])
dest_pipeline_id = args.dest_pipeline_id
if dest_pipeline_id and api.pipeline_status(dest_url, dest_pipeline_id, dest_auth, verify_ssl)['status'] != api.STATUS_STOPPED:
api.stop_pipeline(dest_url, dest_pipeline_id, dest_auth, verify_ssl)
else:
# No destination pipeline id was provided, must be a new pipeline.
create_json = api.create_pipeline(dest_url, dest_auth, export_json, verify_ssl)
dest_pipeline_id = create_json['info']['pipelineId']
result = api.import_pipeline(dest_url, dest_pipeline_id, dest_auth, export_json, verify_ssl, overwrite=True)
# Start the imported pipeline
if args.start_dest:
api.start_pipeline(dest_url, dest_pipeline_id, dest_auth, verify_ssl)
return result | 0.005145 |
def get_input_files(oqparam, hazard=False):
"""
:param oqparam: an OqParam instance
:param hazard: if True, consider only the hazard files
:returns: input path names in a specific order
"""
fnames = [] # files entering in the checksum
for key in oqparam.inputs:
fname = oqparam.inputs[key]
if hazard and key not in ('site_model', 'source_model_logic_tree',
'gsim_logic_tree', 'source'):
continue
# collect .hdf5 tables for the GSIMs, if any
elif key == 'gsim_logic_tree':
gsim_lt = get_gsim_lt(oqparam)
for gsims in gsim_lt.values.values():
for gsim in gsims:
table = getattr(gsim, 'GMPE_TABLE', None)
if table:
fnames.append(table)
fnames.append(fname)
elif key == 'source_model': # UCERF
f = oqparam.inputs['source_model']
fnames.append(f)
fname = nrml.read(f).sourceModel.UCERFSource['filename']
fnames.append(os.path.join(os.path.dirname(f), fname))
elif key == 'exposure': # fname is a list
for exp in asset.Exposure.read_headers(fname):
fnames.extend(exp.datafiles)
fnames.extend(fname)
elif isinstance(fname, dict):
fnames.extend(fname.values())
elif isinstance(fname, list):
for f in fname:
if f == oqparam.input_dir:
raise InvalidFile('%s there is an empty path in %s' %
(oqparam.inputs['job_ini'], key))
fnames.extend(fname)
elif key == 'source_model_logic_tree':
for smpaths in logictree.collect_info(fname).smpaths.values():
fnames.extend(smpaths)
fnames.append(fname)
else:
fnames.append(fname)
return sorted(fnames) | 0.000511 |
def generate_video(source, outname, settings, options=None):
"""Video processor.
:param source: path to a video
:param outname: path to the generated video
:param settings: settings dict
:param options: array of options passed to ffmpeg
"""
logger = logging.getLogger(__name__)
# Don't transcode if source is in the required format and
# has fitting datedimensions, copy instead.
converter = settings['video_converter']
w_src, h_src = video_size(source, converter=converter)
w_dst, h_dst = settings['video_size']
logger.debug('Video size: %i, %i -> %i, %i', w_src, h_src, w_dst, h_dst)
base, src_ext = splitext(source)
base, dst_ext = splitext(outname)
if dst_ext == src_ext and w_src <= w_dst and h_src <= h_dst:
logger.debug('Video is smaller than the max size, copying it instead')
shutil.copy(source, outname)
return
# http://stackoverflow.com/questions/8218363/maintaining-ffmpeg-aspect-ratio
# + I made a drawing on paper to figure this out
if h_dst * w_src < h_src * w_dst:
# biggest fitting dimension is height
resize_opt = ['-vf', "scale=trunc(oh*a/2)*2:%i" % h_dst]
else:
# biggest fitting dimension is width
resize_opt = ['-vf', "scale=%i:trunc(ow/a/2)*2" % w_dst]
# do not resize if input dimensions are smaller than output dimensions
if w_src <= w_dst and h_src <= h_dst:
resize_opt = []
# Encoding options improved, thanks to
# http://ffmpeg.org/trac/ffmpeg/wiki/vpxEncodingGuide
cmd = [converter, '-i', source, '-y'] # -y to overwrite output files
if options is not None:
cmd += options
cmd += resize_opt + [outname]
logger.debug('Processing video: %s', ' '.join(cmd))
check_subprocess(cmd, source, outname) | 0.00055 |
def avl_rotate_double(root, direction):
"""
Double rotation, either 0 (left) or 1 (right).
Figure:
c,1 (right)
----------->
a a c
/ b,0 / a,1 / \
b ---> b --> b a
\ /
c c
"""
other_side = 1 - direction
root[other_side] = avl_rotate_single(root[other_side], other_side)
return avl_rotate_single(root, direction) | 0.003906 |
def object_from_json(self, object_type, object_json, parent=None):
"""
Given a blob of JSON representing a Zenpy object, recursively deserialize it and
any nested objects it contains. This method also adds the deserialized object
to the relevant cache if applicable.
"""
if not isinstance(object_json, dict):
return object_json
obj = self.instantiate_object(object_type, parent)
for key, value in object_json.items():
if key not in self.skip_attrs:
key, value = self._deserialize(key, obj, value)
if isinstance(value, dict):
value = ProxyDict(value, dirty_callback=getattr(
obj, '_dirty_callback', None))
elif isinstance(value, list):
value = ProxyList(value, dirty_callback=getattr(
obj, '_dirty_callback', None))
setattr(obj, key, value)
if hasattr(obj, '_clean_dirty'):
obj._clean_dirty()
self.api.cache.add(obj)
return obj | 0.003717 |
def create_graph_from_data(self, data):
"""Use CGNN to create a graph from scratch. All the possible structures
are tested, which leads to a super exponential complexity. It would be
preferable to start from a graph skeleton for large graphs.
Args:
data (pandas.DataFrame): Observational data on which causal
discovery has to be performed.
Returns:
networkx.DiGraph: Solution given by CGNN.
"""
warnings.warn("An exhaustive search of the causal structure of CGNN without"
" skeleton is super-exponential in the number of variables.")
# Building all possible candidates:
nb_vars = len(list(data.columns))
data = scale(data.values).astype('float32')
candidates = [np.reshape(np.array(i), (nb_vars, nb_vars)) for i in itertools.product([0, 1], repeat=nb_vars*nb_vars)
if (np.trace(np.reshape(np.array(i), (nb_vars, nb_vars))) == 0
and nx.is_directed_acyclic_graph(nx.DiGraph(np.reshape(np.array(i), (nb_vars, nb_vars)))))]
warnings.warn("A total of {} graphs will be evaluated.".format(len(candidates)))
scores = [parallel_graph_evaluation(data, i, nh=self.nh, nb_runs=self.nb_runs, gpu=self.gpu,
nb_jobs=self.nb_jobs, lr=self.lr, train_epochs=self.train_epochs,
test_epochs=self.test_epochs, verbose=self.verbose) for i in candidates]
final_candidate = candidates[scores.index(min(scores))]
output = np.zeros(final_candidate.shape)
# Retrieve the confidence score on each edge.
for (i, j), x in np.ndenumerate(final_candidate):
if x > 0:
cand = final_candidate
cand[i, j] = 0
output[i, j] = min(scores) - scores[candidates.index(cand)]
return nx.DiGraph(candidates[output],
{idx: i for idx, i in enumerate(data.columns)}) | 0.005358 |
def expect_column_values_to_be_between(self,
column,
min_value=None,
max_value=None,
allow_cross_type_comparisons=None,
parse_strings_as_datetimes=None,
output_strftime_format=None,
mostly=None,
result_format=None, include_config=False, catch_exceptions=None, meta=None
):
"""Expect column entries to be between a minimum value and a maximum value (inclusive).
expect_column_values_to_be_between is a :func:`column_map_expectation <great_expectations.data_asset.dataset.Dataset.column_map_expectation>`.
Args:
column (str): \
The column name.
min_value (comparable type or None): The minimum value for a column entry.
max_value (comparable type or None): The maximum value for a column entry.
Keyword Args:
allow_cross_type_comparisons (boolean or None) : If True, allow comparisons between types (e.g. integer and\
string). Otherwise, attempting such comparisons will raise an exception.
parse_strings_as_datetimes (boolean or None) : If True, parse min_value, max_value, and all non-null column\
values to datetimes before making comparisons.
output_strftime_format (str or None): \
A valid strfime format for datetime output. Only used if parse_strings_as_datetimes=True.
mostly (None or a float between 0 and 1): \
Return `"success": True` if at least mostly percent of values match the expectation. \
For more detail, see :ref:`mostly`.
Other Parameters:
result_format (str or None): \
Which output mode to use: `BOOLEAN_ONLY`, `BASIC`, `COMPLETE`, or `SUMMARY`.
For more detail, see :ref:`result_format <result_format>`.
include_config (boolean): \
If True, then include the expectation config as part of the result object. \
For more detail, see :ref:`include_config`.
catch_exceptions (boolean or None): \
If True, then catch exceptions and include them as part of the result object. \
For more detail, see :ref:`catch_exceptions`.
meta (dict or None): \
A JSON-serializable dictionary (nesting allowed) that will be included in the output without modification. \
For more detail, see :ref:`meta`.
Returns:
A JSON-serializable expectation result object.
Exact fields vary depending on the values passed to :ref:`result_format <result_format>` and
:ref:`include_config`, :ref:`catch_exceptions`, and :ref:`meta`.
Notes:
* min_value and max_value are both inclusive.
* If min_value is None, then max_value is treated as an upper bound, and the number of acceptable rows has no minimum.
* If max_value is None, then min_value is treated as a lower bound, and the number of acceptable rows has no maximum.
See Also:
expect_column_value_lengths_to_be_between
"""
raise NotImplementedError | 0.007982 |
def plot_gross_leverage(returns, positions, ax=None, **kwargs):
"""
Plots gross leverage versus date.
Gross leverage is the sum of long and short exposure per share
divided by net asset value.
Parameters
----------
returns : pd.Series
Daily returns of the strategy, noncumulative.
- See full explanation in tears.create_full_tear_sheet.
positions : pd.DataFrame
Daily net position values.
- See full explanation in create_full_tear_sheet.
ax : matplotlib.Axes, optional
Axes upon which to plot.
**kwargs, optional
Passed to plotting function.
Returns
-------
ax : matplotlib.Axes
The axes that were plotted on.
"""
if ax is None:
ax = plt.gca()
gl = timeseries.gross_lev(positions)
gl.plot(lw=0.5, color='limegreen', legend=False, ax=ax, **kwargs)
ax.axhline(gl.mean(), color='g', linestyle='--', lw=3)
ax.set_title('Gross leverage')
ax.set_ylabel('Gross leverage')
ax.set_xlabel('')
return ax | 0.00095 |
def ip_address(value,
allow_empty = False,
**kwargs):
"""Validate that ``value`` is a valid IP address.
.. note::
First, the validator will check if the address is a valid IPv6 address.
If that doesn't work, the validator will check if the address is a valid
IPv4 address.
If neither works, the validator will raise an error (as always).
:param value: The value to validate.
:param allow_empty: If ``True``, returns :obj:`None <python:None>` if
``value`` is empty. If ``False``, raises a
:class:`EmptyValueError <validator_collection.errors.EmptyValueError>`
if ``value`` is empty. Defaults to ``False``.
:type allow_empty: :class:`bool <python:bool>`
:returns: ``value`` / :obj:`None <python:None>`
:raises EmptyValueError: if ``value`` is empty and ``allow_empty`` is ``False``
:raises InvalidIPAddressError: if ``value`` is not a valid IP address or empty with
``allow_empty`` set to ``True``
"""
if not value and not allow_empty:
raise errors.EmptyValueError('value (%s) was empty' % value)
elif not value:
return None
if is_py2 and value and isinstance(value, unicode):
value = value.encode('utf-8')
try:
value = ipv6(value, force_run = True) # pylint: disable=E1123
ipv6_failed = False
except ValueError:
ipv6_failed = True
if ipv6_failed:
try:
value = ipv4(value, force_run = True) # pylint: disable=E1123
except ValueError:
raise errors.InvalidIPAddressError('value (%s) is not a valid IPv6 or '
'IPv4 address' % value)
return value | 0.006693 |
def profile_settings_validations(self):
"""Create 2 default validations rules for each output variable.
* One validation rule to check that the output variable is not null.
* One validation rule to ensure the output value is of the correct type.
"""
ij = self.load_install_json(self.args.ij)
validations = {'rules': [], 'outputs': []}
job_id = randint(1000, 9999)
output_variables = ij.get('playbook', {}).get('outputVariables') or []
if self.args.permutation_id is not None:
output_variables = self._output_permutations[self.args.permutation_id]
# for o in ij.get('playbook', {}).get('outputVariables') or []:
for o in output_variables:
variable = '#App:{}:{}!{}'.format(job_id, o.get('name'), o.get('type'))
validations['outputs'].append(variable)
# null check
od = OrderedDict()
if o.get('type').endswith('Array'):
od['data'] = [None, []]
od['data_type'] = 'redis'
od['operator'] = 'ni'
else:
od['data'] = None
od['data_type'] = 'redis'
od['operator'] = 'ne'
od['variable'] = variable
validations['rules'].append(od)
# type check
od = OrderedDict()
if o.get('type').endswith('Array'):
od['data'] = 'array'
od['data_type'] = 'redis'
od['operator'] = 'it'
elif o.get('type').endswith('Binary'):
od['data'] = 'binary'
od['data_type'] = 'redis'
od['operator'] = 'it'
elif o.get('type').endswith('Entity') or o.get('type') == 'KeyValue':
od['data'] = 'entity'
od['data_type'] = 'redis'
od['operator'] = 'it'
else:
od['data'] = 'string'
od['data_type'] = 'redis'
od['operator'] = 'it'
od['variable'] = variable
validations['rules'].append(od)
return validations | 0.002789 |
def cbpdnmd_xstep(k):
"""Do the X step of the cbpdn stage. The only parameter is the slice
index `k` and there are no return values; all inputs and outputs are
from and to global variables.
"""
YU0 = mp_Z_Y0[k] + mp_S[k] - mp_Z_U0[k]
YU1 = mp_Z_Y1[k] - mp_Z_U1[k]
if mp_cri.Cd == 1:
b = np.conj(mp_Df) * sl.rfftn(YU0, None, mp_cri.axisN) + \
sl.rfftn(YU1, None, mp_cri.axisN)
Xf = sl.solvedbi_sm(mp_Df, 1.0, b, axis=mp_cri.axisM)
else:
b = sl.inner(np.conj(mp_Df), sl.rfftn(YU0, None, mp_cri.axisN),
axis=mp_cri.axisC) + \
sl.rfftn(YU1, None, mp_cri.axisN)
Xf = sl.solvemdbi_ism(mp_Df, 1.0, b, mp_cri.axisM, mp_cri.axisC)
mp_Z_X[k] = sl.irfftn(Xf, mp_cri.Nv, mp_cri.axisN)
mp_DX[k] = sl.irfftn(sl.inner(mp_Df, Xf), mp_cri.Nv, mp_cri.axisN) | 0.002331 |
def get_portchannel_info_by_intf_output_lacp_actor_brcd_state(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
get_portchannel_info_by_intf = ET.Element("get_portchannel_info_by_intf")
config = get_portchannel_info_by_intf
output = ET.SubElement(get_portchannel_info_by_intf, "output")
lacp = ET.SubElement(output, "lacp")
actor_brcd_state = ET.SubElement(lacp, "actor-brcd-state")
actor_brcd_state.text = kwargs.pop('actor_brcd_state')
callback = kwargs.pop('callback', self._callback)
return callback(config) | 0.004808 |
def rfecv(model, X, y, ax=None, step=1, groups=None, cv=None,
scoring=None, **kwargs):
"""
Performs recursive feature elimination with cross-validation to determine
an optimal number of features for a model. Visualizes the feature subsets
with respect to the cross-validation score.
This helper function is a quick wrapper to utilize the RFECV visualizer
for one-off analysis.
Parameters
----------
model : a scikit-learn estimator
An object that implements ``fit`` and provides information about the
relative importance of features with either a ``coef_`` or
``feature_importances_`` attribute.
Note that the object is cloned for each validation.
X : array-like, shape (n_samples, n_features)
Training vector, where n_samples is the number of samples and
n_features is the number of features.
y : array-like, shape (n_samples) or (n_samples, n_features), optional
Target relative to X for classification or regression.
ax : matplotlib.Axes object, optional
The axes object to plot the figure on.
step : int or float, optional (default=1)
If greater than or equal to 1, then step corresponds to the (integer)
number of features to remove at each iteration. If within (0.0, 1.0),
then step corresponds to the percentage (rounded down) of features to
remove at each iteration.
groups : array-like, with shape (n_samples,), optional
Group labels for the samples used while splitting the dataset into
train/test set.
cv : int, cross-validation generator or an iterable, optional
Determines the cross-validation splitting strategy.
Possible inputs for cv are:
- None, to use the default 3-fold cross-validation,
- integer, to specify the number of folds.
- An object to be used as a cross-validation generator.
- An iterable yielding train/test splits.
see the scikit-learn
`cross-validation guide <http://scikit-learn.org/stable/modules/cross_validation.html>`_
for more information on the possible strategies that can be used here.
scoring : string, callable or None, optional, default: None
A string or scorer callable object / function with signature
``scorer(estimator, X, y)``. See scikit-learn model evaluation
documentation for names of possible metrics.
kwargs : dict
Keyword arguments that are passed to the base class and may influence
the visualization as defined in other Visualizers. These arguments are
also passed to the `poof()` method, e.g. can pass a path to save the
figure to.
Returns
-------
ax : matplotlib axes
Returns the axes that the rfecv were drawn on.
"""
# Initialize the visualizer
oz = RFECV(model, ax=ax, step=step, groups=groups, cv=cv, scoring=scoring)
# Fit and poof the visualizer
oz.fit(X, y)
oz.poof(**kwargs)
return oz.ax | 0.000657 |
def main(args):
"""
Main function - launches the program
"""
if args:
if not args.outputRepository:
HOME_DIR = os.path.expanduser('~')
# Utility's base directory
BASE_DIR = os.path.abspath(os.path.dirname(__file__))
DOWNLOAD_DIR = HOME_DIR + '/landsat'
ZIP_DIR = DOWNLOAD_DIR + '/zip'
else:
ZIP_DIR = args.outputRepository
if args.subs == 'search':
try:
if args.start:
args.start = reformat_date(parse(args.start))
if args.end:
args.end = reformat_date(parse(args.end))
except TypeError:
exit("You date format is incorrect. Please try again!", 1)
s = Search()
clipper = Clipper()
if args.search_subs == 'shapefile':
clipper.shapefile(args.path)
elif args.search_subs == 'query':
clipper.query(args.name)
result = s.search(args.limit,args.start,args.end,clipper)
try:
if result['status'] == 'SUCCESS':
if result['total'] > 200:
exit('Too many results. Please narrow your search or limit your query with -l options')
else:
if args.outputRepository:
with open(ZIP_DIR+'/result.geojson', 'w') as outfile:
json.dump(result['results'], outfile)
print ("The geojsonFIle have been created here: %s" %
ZIP_DIR)
else:
print ("the IDs which matched with request are : ")
for i in result['ID']:
print (i)
if args.download:
gs = GsHelper(ZIP_DIR)
if (args.password) and (args.user):
print('Starting the download:')
for item in result['downloads']:
login=args.user
mdp=args.password
gs.single_download(login,mdp,item['download'],item['id'],ZIP_DIR)
print ("%s have been downloaded ... continuing downloading" % item['id'])
print("%s images were downloaded"
% result['total'])
exit("The downloaded images are located here: %s" %
ZIP_DIR)
else:
exit("Please give a loggin and a password for theia downloading")
else:
exit("")
elif result['status'] == 'error':
exit(result['message'])
except KeyError:
exit('Too Many API queries. You can only query DevSeed\'s '
'API 5 times per minute', 1)
elif args.subs == 'download':
gs = GsHelper(ZIP_DIR)
print('Starting the download:')
if (args.password) and (args.user):
for scene in args.scenes:
login=args.user
mdp=args.password
download='http://spirit.cnes.fr/resto/Landsat/'+scene+'/$download'
testD=gs.checkifDownloadExist(login,mdp,download,scene)
if testD:
gs.single_download(login,mdp,download,scene,ZIP_DIR)
else:
exit("SceneID has not been founded or wrong User/Password given!")
exit("The downloaded images are located here: %s" % gs.zip_dir)
else:
exit("Please give a loggin and a password for theia downloading") | 0.010463 |
def atlas_init_peer_info( peer_table, peer_hostport, blacklisted=False, whitelisted=False ):
"""
Initialize peer info table entry
"""
peer_table[peer_hostport] = {
"time": [],
"zonefile_inv": "",
"blacklisted": blacklisted,
"whitelisted": whitelisted
} | 0.013158 |
def isOpeningTag(self):
"""
Detect whether this tag is opening or not.
Returns:
bool: True if it is opening.
"""
if self.isTag() and \
not self.isComment() and \
not self.isEndTag() and \
not self.isNonPairTag():
return True
return False | 0.005814 |
def check_model(self):
"""
Check the model for various errors. This method checks for the following
errors.
* Checks if factors are defined for all the cliques or not.
* Check for running intersection property is not done explicitly over
here as it done in the add_edges method.
* Checks if cardinality information for all the variables is availble or not. If
not it raises an error.
* Check if cardinality of random variable remains same across all the
factors.
Returns
-------
check: boolean
True if all the checks are passed
"""
for clique in self.nodes():
factors = filter(lambda x: set(x.scope()) == set(clique), self.factors)
if not any(factors):
raise ValueError('Factors for all the cliques or clusters not defined.')
cardinalities = self.get_cardinality()
if len(set((x for clique in self.nodes() for x in clique))) != len(cardinalities):
raise ValueError('Factors for all the variables not defined.')
for factor in self.factors:
for variable, cardinality in zip(factor.scope(), factor.cardinality):
if (cardinalities[variable] != cardinality):
raise ValueError(
'Cardinality of variable {var} not matching among factors'.format(var=variable))
return True | 0.006152 |
def refresh_information(self, accept=MEDIA_TYPE_TAXII_V20):
"""Update the properties of this API Root.
This invokes the ``Get API Root Information`` endpoint.
"""
response = self.__raw = self._conn.get(self.url,
headers={"Accept": accept})
self._populate_fields(**response)
self._loaded_information = True | 0.004988 |
def _salt_send_domain_event(opaque, conn, domain, event, event_data):
'''
Helper function send a salt event for a libvirt domain.
:param opaque: the opaque data that is passed to the callback.
This is a dict with 'prefix', 'object' and 'event' keys.
:param conn: libvirt connection
:param domain: name of the domain related to the event
:param event: name of the event
:param event_data: additional event data dict to send
'''
data = {
'domain': {
'name': domain.name(),
'id': domain.ID(),
'uuid': domain.UUIDString()
},
'event': event
}
data.update(event_data)
_salt_send_event(opaque, conn, data) | 0.001377 |
def get_fallback_language(self, language_code=None, site_id=None):
"""
Find out what the fallback language is for a given language choice.
.. deprecated:: 1.5
Use :func:`get_fallback_languages` instead.
"""
choices = self.get_active_choices(language_code, site_id=site_id)
if choices and len(choices) > 1:
# Still take the last, like previous code.
# With multiple fallback languages that means taking the base language.
# Hence, upgrade the code to use get_fallback_languages() instead.
return choices[-1]
else:
return None | 0.004601 |
def tag(self, querystring, tags, afterwards=None, remove_rest=False):
"""
add tags to messages matching `querystring`.
This appends a tag operation to the write queue and raises
:exc:`~errors.DatabaseROError` if in read only mode.
:param querystring: notmuch search string
:type querystring: str
:param tags: a list of tags to be added
:type tags: list of str
:param afterwards: callback that gets called after successful
application of this tagging operation
:type afterwards: callable
:param remove_rest: remove tags from matching messages before tagging
:type remove_rest: bool
:exception: :exc:`~errors.DatabaseROError`
.. note::
This only adds the requested operation to the write queue.
You need to call :meth:`DBManager.flush` to actually write out.
"""
if self.ro:
raise DatabaseROError()
if remove_rest:
self.writequeue.append(('set', afterwards, querystring, tags))
else:
self.writequeue.append(('tag', afterwards, querystring, tags)) | 0.001699 |
def pluralize(word, count=None, format=u'{word}'):
"""
Converts the inputted word to the plural form of it. This method works
best if you use the inflect module, as it will just pass along the
request to inflect.plural If you do not have that module, then a simpler
and less impressive pluralization technique will be used.
:sa https://pypi.python.org/pypi/inflect
:param word | <str>
:return <str>
"""
if count == 1:
return word
elif count is not None:
return format.format(word=word, count=count)
word = nativestring(word)
if inflect_engine:
return format.format(word=inflect_engine.plural(word))
all_upper = EXPR_UPPERCASE.match(word) is not None
# go through the different plural expressions, searching for the
# proper replacement
for expr, plural in PLURAL_RULES:
results = expr.match(word)
if results:
result_dict = results.groupdict()
single = result_dict.get('single', '')
# check if its capitalized
if all_upper:
return format.format(word=single + plural.upper())
else:
return format.format(word=single + plural)
# by default, just include 's' at the end
if all_upper:
return format.format(word=word + 'S')
return format.format(word=word + 's') | 0.002831 |
def moduleInfo( module ):
"""
Generates HTML information to display for the about info for a module.
:param module | <module>
"""
data = module.__dict__
html = []
html.append( '<h2>%s</h2>' % data.get('__name__', 'Unknown') )
html.append( '<hr/>' )
ver = data.get('__version__', '0')
html.append( '<small>version: %s</small>' % ver)
html.append( '<br/>' )
html.append( nativestring(data.get('__doc__', '')) )
html.append( '<br/><br/><b>Authors</b><ul/>' )
for author in data.get('__authors__', []):
html.append( '<li>%s</li>' % author )
html.append( '</ul>' )
html.append( '<br/><br/><b>Depends on:</b>' )
for depends in data.get('__depends__', []):
html.append( '<li>%s</li>' % depends )
html.append( '</ul>' )
html.append( '' )
html.append( '<br/><br/><b>Credits</b></ul>' )
for credit in data.get('__credits__', []):
html.append('<li>%s: %s</li>' % credit)
html.append( '</ul>' )
opts = (data.get('__maintainer__', ''), data.get('__email__', ''))
html.append('<br/><br/><small>maintained by: %s email: %s</small>' % opts)
opts = (data.get('__copyright__', ''), data.get('__license__', ''))
html.append('<br/><small>%s | license: %s</small>' % opts)
return '\n'.join(html) | 0.027379 |
def forwards(self, orm):
"Write your forwards methods here."
fields = orm['avocado.DataField'].objects.filter(app_name='variants',
model_name='evs', field_name__in=('all_maf', 'aa_maf', 'ea_maf'))
for f in fields:
f.field_name = f.field_name.replace('maf', 'af')
f.save() | 0.009063 |
def intersects(self, other):
"""
Returns True if there exists a segmentlist in self that
intersects the corresponding segmentlist in other; returns
False otherwise.
See also:
.intersects_all(), .all_intersects(), .all_intersects_all()
"""
return any(key in self and self[key].intersects(value) for key, value in other.iteritems()) | 0.031519 |
def remove_manager(self, manager):
"""
Remove a single manager to the scope.
:param manager: single username to be added to the scope list of managers
:type manager: basestring
:raises APIError: when unable to update the scope manager
"""
select_action = 'remove_manager'
self._update_scope_project_team(select_action=select_action, user=manager, user_type='manager') | 0.009238 |
def setup(self):
"""When subclassing remember to call SubtitleChangeCommand::setup() to perform generic
checks."""
if not isinstance(self.filePath, str):
raise TypeError("File path is not a string!")
if self.controller is None:
raise ValueError("Command controller hasn't been specified!") | 0.008696 |
def get_ssh_key(self, ssh_key_id):
"""
Return a SSHKey object by its ID.
"""
return SSHKey.get_object(api_token=self.token, ssh_key_id=ssh_key_id) | 0.010989 |
def _repr_html_(self):
"""
Jupyter Notebook magic repr function.
"""
rows, c = '', ''
s = '<tr><td><strong>{k}</strong></td><td style="{stl}">{v}</td></tr>'
for k, v in self.__dict__.items():
if k == '_colour':
k = 'colour'
c = utils.text_colour_for_hex(v)
style = 'color:{}; background-color:{}'.format(c, v)
else:
style = 'color:black; background-color:white'
if k == 'component':
try:
v = v._repr_html_()
except AttributeError:
v = v.__repr__()
rows += s.format(k=k, v=v, stl=style)
html = '<table>{}</table>'.format(rows)
return html | 0.002538 |
def cancel_bbuild(self, build_execution_configuration_id, **kwargs):
"""
Cancel the build execution defined with given executionConfigurationId.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.cancel_bbuild(build_execution_configuration_id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param int build_execution_configuration_id: Build Execution Configuration ID. See org.jboss.pnc.spi.executor.BuildExecutionConfiguration. (required)
:return: None
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.cancel_bbuild_with_http_info(build_execution_configuration_id, **kwargs)
else:
(data) = self.cancel_bbuild_with_http_info(build_execution_configuration_id, **kwargs)
return data | 0.00549 |
def ssh_check_mech(self, desired_mech):
"""
Check if the given OID is the Kerberos V5 OID (server mode).
:param str desired_mech: The desired GSS-API mechanism of the client
:return: ``True`` if the given OID is supported, otherwise C{False}
"""
from pyasn1.codec.der import decoder
mech, __ = decoder.decode(desired_mech)
if mech.__str__() != self._krb5_mech:
return False
return True | 0.004246 |
def resetPassword(self, email=True):
"""
resets a users password for an account. The password will be randomly
generated and emailed by the system.
Input:
email - boolean that an email password will be sent to the
user's profile email address. The default is True.
"""
url = self.root + "/reset"
params = {
"f" : "json",
"email" : email
}
return self._post(url=url,
param_dict=params,
securityHandler=self._securityHandler,
proxy_url=self._proxy_url,
proxy_port=self._proxy_port) | 0.011034 |
def publication_history(self):
"""List of tuples of authored publications in the form
(title, abbreviation, type, issn), where issn is only given
for journals. abbreviation and issn may be None.
"""
pub_hist = self.xml.findall('author-profile/journal-history/')
hist = []
for pub in pub_hist:
try:
issn = pub.find("issn").text
except AttributeError:
issn = None
try:
abbr = pub.find("sourcetitle-abbrev").text
except AttributeError:
abbr = None
hist.append((pub.find("sourcetitle").text, abbr, pub.get("type"), issn))
return hist | 0.004178 |
def heatmaps_to_keypoints(maps, rois):
"""Extract predicted keypoint locations from heatmaps. Output has shape
(#rois, 4, #keypoints) with the 4 rows corresponding to (x, y, logit, prob)
for each keypoint.
"""
# This function converts a discrete image coordinate in a HEATMAP_SIZE x
# HEATMAP_SIZE image to a continuous keypoint coordinate. We maintain
# consistency with keypoints_to_heatmap_labels by using the conversion from
# Heckbert 1990: c = d + 0.5, where d is a discrete coordinate and c is a
# continuous coordinate.
offset_x = rois[:, 0]
offset_y = rois[:, 1]
widths = rois[:, 2] - rois[:, 0]
heights = rois[:, 3] - rois[:, 1]
widths = np.maximum(widths, 1)
heights = np.maximum(heights, 1)
widths_ceil = np.ceil(widths)
heights_ceil = np.ceil(heights)
# NCHW to NHWC for use with OpenCV
maps = np.transpose(maps, [0, 2, 3, 1])
min_size = 0 # cfg.KRCNN.INFERENCE_MIN_SIZE
num_keypoints = maps.shape[3]
xy_preds = np.zeros((len(rois), 3, num_keypoints), dtype=np.float32)
end_scores = np.zeros((len(rois), num_keypoints), dtype=np.float32)
for i in range(len(rois)):
if min_size > 0:
roi_map_width = int(np.maximum(widths_ceil[i], min_size))
roi_map_height = int(np.maximum(heights_ceil[i], min_size))
else:
roi_map_width = widths_ceil[i]
roi_map_height = heights_ceil[i]
width_correction = widths[i] / roi_map_width
height_correction = heights[i] / roi_map_height
roi_map = cv2.resize(
maps[i], (roi_map_width, roi_map_height), interpolation=cv2.INTER_CUBIC
)
# Bring back to CHW
roi_map = np.transpose(roi_map, [2, 0, 1])
# roi_map_probs = scores_to_probs(roi_map.copy())
w = roi_map.shape[2]
pos = roi_map.reshape(num_keypoints, -1).argmax(axis=1)
x_int = pos % w
y_int = (pos - x_int) // w
# assert (roi_map_probs[k, y_int, x_int] ==
# roi_map_probs[k, :, :].max())
x = (x_int + 0.5) * width_correction
y = (y_int + 0.5) * height_correction
xy_preds[i, 0, :] = x + offset_x[i]
xy_preds[i, 1, :] = y + offset_y[i]
xy_preds[i, 2, :] = 1
end_scores[i, :] = roi_map[np.arange(num_keypoints), y_int, x_int]
return np.transpose(xy_preds, [0, 2, 1]), end_scores | 0.00083 |
def get_host_advanced(name=None, ipv4addr=None, mac=None, **api_opts):
'''
Get all host information
CLI Example:
.. code-block:: bash
salt-call infoblox.get_host_advanced hostname.domain.ca
'''
infoblox = _get_infoblox(**api_opts)
host = infoblox.get_host_advanced(name=name, mac=mac, ipv4addr=ipv4addr)
return host | 0.002793 |
def all_pairs(seq1, seq2=None):
"""Yields all pairs drawn from ``seq1`` and ``seq2``.
If ``seq2`` is ``None``, ``seq2 = seq1``.
>>> stop_at.ed(all_pairs(xrange(100000), xrange(100000)), 8)
((0, 0), (0, 1), (0, 2), (0, 3), (0, 4), (0, 5), (0, 6), (0, 7))
"""
if seq2 is None: seq2 = seq1
for item1 in seq1:
for item2 in seq2:
yield (item1, item2) | 0.004963 |
def find(self, nameFilter=None, typeFilter=None, bindingModeFilter=None, boundFilter=None):
"""
Gets the list of services that the Watson IoT Platform can connect to.
The list can include a mixture of services that are either bound or unbound.
Parameters:
- nameFilter(string) - Filter the results by the specified name
- typeFilter(string) - Filter the results by the specified type, Available values : cloudant, eventstreams
- bindingModeFilter(string) - Filter the results by the specified binding mode, Available values : automatic, manual
- boundFilter(boolean) - Filter the results by the bound flag
Throws APIException on failure.
"""
queryParms = {}
if nameFilter:
queryParms["name"] = nameFilter
if typeFilter:
queryParms["type"] = typeFilter
if bindingModeFilter:
queryParms["bindingMode"] = bindingModeFilter
if boundFilter:
queryParms["bound"] = boundFilter
return IterableServiceBindingsList(self._apiClient, filters=queryParms) | 0.00945 |
def get_run_events(cls, crawler, run_id, start, end, level=None):
"""Events from a particular run"""
key = make_key(crawler, "events", run_id, level)
return cls.event_list(key, start, end) | 0.009434 |
def call(self, items, additional_fields, shape):
"""
Returns all items in an account that correspond to a list of ID's, in stable order.
:param items: a list of (id, changekey) tuples or Item objects
:param additional_fields: the extra fields that should be returned with the item, as FieldPath objects
:param shape: The shape of returned objects
:return: XML elements for the items, in stable order
"""
return self._pool_requests(payload_func=self.get_payload, **dict(
items=items,
additional_fields=additional_fields,
shape=shape,
)) | 0.006211 |
def get_client(self, service, region, public=True, cached=True,
client_class=None):
"""
Returns the client object for the specified service and region.
By default the public endpoint is used. If you wish to work with a
services internal endpoints, specify `public=False`.
By default, if a client has already been created for the given service,
region, and public values, that will be returned. To force a new client
to be created, pass 'cached=False'.
"""
if not self.authenticated:
raise exc.NotAuthenticated("You must authenticate before trying "
"to create clients.")
clt = ep = None
mapped_service = self.service_mapping.get(service) or service
svc = self.services.get(mapped_service)
if svc:
ep = svc.endpoints.get(region)
if ep:
clt = ep._get_client(public=public, cached=cached,
client_class=client_class)
if not clt:
raise exc.NoSuchClient("There is no client available for the "
"service '%s' in the region '%s'." % (service, region))
return clt | 0.004983 |
def forestplot(trace_obj, vars=None, alpha=0.05, quartiles=True, rhat=True,
main=None, xtitle=None, xrange=None, ylabels=None, chain_spacing=0.05, vline=0):
""" Forest plot (model summary plot)
Generates a "forest plot" of 100*(1-alpha)% credible intervals for either the
set of variables in a given model, or a specified set of nodes.
:Arguments:
trace_obj: NpTrace or MultiTrace object
Trace(s) from an MCMC sample.
vars: list
List of variables to plot (defaults to None, which results in all
variables plotted).
alpha (optional): float
Alpha value for (1-alpha)*100% credible intervals (defaults to 0.05).
quartiles (optional): bool
Flag for plotting the interquartile range, in addition to the
(1-alpha)*100% intervals (defaults to True).
rhat (optional): bool
Flag for plotting Gelman-Rubin statistics. Requires 2 or more
chains (defaults to True).
main (optional): string
Title for main plot. Passing False results in titles being
suppressed; passing None (default) results in default titles.
xtitle (optional): string
Label for x-axis. Defaults to no label
xrange (optional): list or tuple
Range for x-axis. Defaults to matplotlib's best guess.
ylabels (optional): list
User-defined labels for each variable. If not provided, the node
__name__ attributes are used.
chain_spacing (optional): float
Plot spacing between chains (defaults to 0.05).
vline (optional): numeric
Location of vertical reference line (defaults to 0).
"""
if not gridspec:
print_(
'\nYour installation of matplotlib is not recent enough to support summary_plot; this function is disabled until matplotlib is updated.')
return
# Quantiles to be calculated
qlist = [100 * alpha / 2, 50, 100 * (1 - alpha / 2)]
if quartiles:
qlist = [100 * alpha / 2, 25, 50, 75, 100 * (1 - alpha / 2)]
# Range for x-axis
plotrange = None
# Number of chains
chains = None
# Gridspec
gs = None
# Subplots
interval_plot = None
rhat_plot = None
try:
# First try MultiTrace type
traces = trace_obj.traces
if rhat and len(traces) > 1:
from .diagnostics import gelman_rubin
R = gelman_rubin(trace_obj)
if vars is not None:
R = {v: R[v] for v in vars}
else:
rhat = False
except AttributeError:
# Single NpTrace
traces = [trace_obj]
# Can't calculate Gelman-Rubin with a single trace
rhat = False
if vars is None:
vars = traces[0].varnames
# Empty list for y-axis labels
labels = []
chains = len(traces)
if gs is None:
# Initialize plot
if rhat and chains > 1:
gs = gridspec.GridSpec(1, 2, width_ratios=[3, 1])
else:
gs = gridspec.GridSpec(1, 1)
# Subplot for confidence intervals
interval_plot = subplot(gs[0])
for j, tr in enumerate(traces):
# Get quantiles
trace_quantiles = quantiles(tr, qlist)
hpd_intervals = hpd(tr, alpha)
# Counter for current variable
var = 1
for varname in vars:
var_quantiles = trace_quantiles[varname]
quants = var_quantiles.values()
var_hpd = hpd_intervals[varname].T
# Substitute HPD interval for quantile
quants[0] = var_hpd[0].T
quants[-1] = var_hpd[1].T
# Ensure x-axis contains range of current interval
if plotrange:
plotrange = [min(
plotrange[0],
np.min(quants)),
max(plotrange[1],
np.max(quants))]
else:
plotrange = [np.min(quants), np.max(quants)]
# Number of elements in current variable
value = tr[varname][0]
k = np.size(value)
# Append variable name(s) to list
if not j:
if k > 1:
names = var_str(varname, shape(value))
labels += names
else:
labels.append(varname)
# labels.append('\n'.join(varname.split('_')))
# Add spacing for each chain, if more than one
e = [0] + [(chain_spacing * ((i + 2) / 2)) *
(-1) ** i for i in range(chains - 1)]
# Deal with multivariate nodes
if k > 1:
for i, q in enumerate(np.transpose(quants).squeeze()):
# Y coordinate with jitter
y = -(var + i) + e[j]
if quartiles:
# Plot median
plot(q[2], y, 'bo', markersize=4)
# Plot quartile interval
errorbar(
x=(q[1],
q[3]),
y=(y,
y),
linewidth=2,
color="blue")
else:
# Plot median
plot(q[1], y, 'bo', markersize=4)
# Plot outer interval
errorbar(
x=(q[0],
q[-1]),
y=(y,
y),
linewidth=1,
color="blue")
else:
# Y coordinate with jitter
y = -var + e[j]
if quartiles:
# Plot median
plot(quants[2], y, 'bo', markersize=4)
# Plot quartile interval
errorbar(
x=(quants[1],
quants[3]),
y=(y,
y),
linewidth=2,
color="blue")
else:
# Plot median
plot(quants[1], y, 'bo', markersize=4)
# Plot outer interval
errorbar(
x=(quants[0],
quants[-1]),
y=(y,
y),
linewidth=1,
color="blue")
# Increment index
var += k
labels = ylabels or labels
# Update margins
left_margin = np.max([len(x) for x in labels]) * 0.015
gs.update(left=left_margin, right=0.95, top=0.9, bottom=0.05)
# Define range of y-axis
ylim(-var + 0.5, -0.5)
datarange = plotrange[1] - plotrange[0]
xlim(plotrange[0] - 0.05 * datarange, plotrange[1] + 0.05 * datarange)
# Add variable labels
yticks([-(l + 1) for l in range(len(labels))], labels)
# Add title
if main is not False:
plot_title = main or str(int((
1 - alpha) * 100)) + "% Credible Intervals"
title(plot_title)
# Add x-axis label
if xtitle is not None:
xlabel(xtitle)
# Constrain to specified range
if xrange is not None:
xlim(*xrange)
# Remove ticklines on y-axes
for ticks in interval_plot.yaxis.get_major_ticks():
ticks.tick1On = False
ticks.tick2On = False
for loc, spine in interval_plot.spines.iteritems():
if loc in ['bottom', 'top']:
pass
# spine.set_position(('outward',10)) # outward by 10 points
elif loc in ['left', 'right']:
spine.set_color('none') # don't draw spine
# Reference line
axvline(vline, color='k', linestyle='--')
# Genenerate Gelman-Rubin plot
if rhat and chains > 1:
# If there are multiple chains, calculate R-hat
rhat_plot = subplot(gs[1])
if main is not False:
title("R-hat")
# Set x range
xlim(0.9, 2.1)
# X axis labels
xticks((1.0, 1.5, 2.0), ("1", "1.5", "2+"))
yticks([-(l + 1) for l in range(len(labels))], "")
i = 1
for varname in vars:
value = traces[0][varname][0]
k = np.size(value)
if k > 1:
plot([min(r, 2) for r in R[varname]], [-(j + i)
for j in range(k)], 'bo', markersize=4)
else:
plot(min(R[varname], 2), -i, 'bo', markersize=4)
i += k
# Define range of y-axis
ylim(-i + 0.5, -0.5)
# Remove ticklines on y-axes
for ticks in rhat_plot.yaxis.get_major_ticks():
ticks.tick1On = False
ticks.tick2On = False
for loc, spine in rhat_plot.spines.iteritems():
if loc in ['bottom', 'top']:
pass
# spine.set_position(('outward',10)) # outward by 10 points
elif loc in ['left', 'right']:
spine.set_color('none') # don't draw spine
return gs | 0.00086 |
def get(self, log_set):
"""
Get a specific log or log set
:param log_set: The log set or log to get. Ex: `.get(log_set='app')` or
`.get(log_set='app/log')`
:type log_set: str
:returns: The response of your log set or log
:rtype: dict
:raises: This will raise a
:class:`ServerException<logentries_api.exceptions.ServerException>`
if there is an error from Logentries
"""
response = requests.get(self.base_url + log_set.rstrip('/'))
if not response.ok:
raise ServerException(
'{}: {}'.format(response.status_code, response.text))
return response.json() | 0.002833 |
def setup(app) -> Dict[str, Any]:
"""
Sets up Sphinx extension.
"""
app.add_config_value("uqbar_api_directory_name", "api", "env")
app.add_config_value("uqbar_api_document_empty_modules", False, "env")
app.add_config_value("uqbar_api_document_private_members", False, "env")
app.add_config_value("uqbar_api_document_private_modules", False, "env")
app.add_config_value("uqbar_api_member_documenter_classes", None, "env")
app.add_config_value("uqbar_api_module_documenter_class", None, "env")
app.add_config_value("uqbar_api_root_documenter_class", None, "env")
app.add_config_value("uqbar_api_source_paths", None, "env")
app.add_config_value("uqbar_api_title", "API", "html")
app.connect("builder-inited", on_builder_inited)
return {
"version": uqbar.__version__,
"parallel_read_safe": True,
"parallel_write_safe": True,
} | 0.001104 |
def _compose_mro(cls, types): # noqa
"""Calculates the method resolution order for a given class *cls*.
Includes relevant abstract base classes (with their respective bases) from
the *types* iterable. Uses a modified C3 linearization algorithm.
"""
bases = set(cls.__mro__)
# Remove entries which are already present in the __mro__ or unrelated.
def is_related(_type):
return ( # :off
_type not in bases and
hasattr(_type, '__mro__') and
issubclass(cls, _type)
) # :on
types = [n for n in types if is_related(n)]
# Remove entries which are strict bases of other entries (they will end up
# in the MRO anyway.
def is_strict_base(_typ):
for other in types:
if _typ != other and _typ in other.__mro__:
return True
return False
types = [n for n in types if not is_strict_base(n)]
# Subclasses of the ABCs in *types* which are also implemented by
# *cls* can be used to stabilize ABC ordering.
type_set = set(types)
mro = []
for typ in types:
found = []
for sub in typ.__subclasses__():
if sub not in bases and issubclass(cls, sub):
found.append([s for s in sub.__mro__ if s in type_set])
if not found:
mro.append(typ)
continue
# Favor subclasses with the biggest number of useful bases
found.sort(key=len, reverse=True)
for sub in found:
for subcls in sub:
if subcls not in mro:
mro.append(subcls)
return _c3_mro(cls, abcs=mro) | 0.000607 |
def arktimestamp(arkt, forfilename=False):
"""Returns a human-readable timestamp given an Ark timestamp 'arct'.
An Ark timestamp is the number of seconds since Genesis block,
2017:03:21 15:55:44."""
t = arkt + time.mktime((2017, 3, 21, 15, 55, 44, 0, 0, 0))
return '%d %s' % (arkt, timestamp(t)) | 0.003165 |
def to_bitarray(data, width=8):
''' Convert data (list of integers, bytearray or integer) to bitarray '''
if isinstance(data, list) or isinstance(data, bytearray):
data = combine_hex(data)
return [True if digit == '1' else False for digit in bin(data)[2:].zfill(width)] | 0.00692 |
def get_driver(browser='firefox', args=None):
"""
:param browser:
:param args:
:rtype: RemoteDriver
:return:
"""
if browser not in BROWSER_MAPPING.keys():
raise RuntimeError("unknown browser %s. allowed: %s" % (browser, ", ".join(BROWSER_MAPPING.keys())))
driver_cls = BROWSER_MAPPING.get(browser)
safe_args = {}
if args is not None:
expected_arguments = inspect.getargspec(driver_cls.__init__).args
expected_arguments.remove("self")
for arg in expected_arguments:
if arg in args:
safe_args[arg] = args[arg]
return driver_cls(**safe_args) | 0.003115 |
def make_image(self, conf, images, chain=None, parent_chain=None, made=None, ignore_deps=False, ignore_parent=False, pushing=False):
"""Make us an image"""
made = {} if made is None else made
chain = [] if chain is None else chain
parent_chain = [] if parent_chain is None else parent_chain
if conf.name in made:
return
if conf.name in chain and not ignore_deps:
raise BadCommand("Recursive dependency images", chain=chain + [conf.name])
if conf.name in parent_chain and not ignore_parent:
raise BadCommand("Recursive FROM statements", chain=parent_chain + [conf.name])
if conf.name not in images:
raise NoSuchImage(looking_for=conf.name, available=images.keys())
if not ignore_deps:
for dependency, image in conf.dependency_images():
self.make_image(images[dependency], images, chain=chain + [conf.name], made=made, pushing=pushing)
if not ignore_parent:
for dep in conf.commands.dependent_images:
if not isinstance(dep, six.string_types):
self.make_image(dep, images, chain, parent_chain + [conf.name], made=made, pushing=pushing)
# Should have all our dependencies now
log.info("Making image for '%s' (%s)", conf.name, conf.image_name)
cached = self.build_image(conf, pushing=pushing)
made[conf.name] = True
return cached | 0.004746 |
def get_lang_array(self):
"""gets supported langs as an array"""
r = self.yandex_translate_request("getLangs", "")
self.handle_errors(r)
return r.json()["dirs"] | 0.010309 |
def add_ip_scope(name, description, auth, url, startip=None, endip=None, network_address=None):
"""
Function takes input of four strings Start Ip, endIp, name, and description to add new Ip Scope
to terminal access in the HPE IMC base platform
:param name: str Name of the owner of this IP scope ex. 'admin'
:param description: str description of the Ip scope
:param auth: requests auth object #usually auth.creds from auth pyhpeimc.auth.class
:param url: base url of IMC RS interface #usually auth.url from pyhpeimc.auth.authclass
:param startip: str Start of IP address scope ex. '10.101.0.1'
:param endip: str End of IP address scope ex. '10.101.0.254'
:param network_address: ipv4 network address + subnet bits of target scope
:return: 200 if successfull
:rtype:
>>> from pyhpeimc.auth import *
>>> from pyhpeimc.plat.termaccess import *
>>> auth = IMCAuth("http://", "10.101.0.203", "8080", "admin", "admin")
>>> delete_ip_scope('10.50.0.0/24', auth.creds, auth.url)
<Response [204]>
>>> new_scope = add_ip_scope('10.50.0.1', '10.50.0.254', 'cyoung', 'test group', auth.creds, auth.url)
>>> assert type(new_scope) is int
>>> assert new_scope == 200
>>> existing_scope = add_ip_scope('10.50.0.1', '10.50.0.254', 'cyoung', 'test group', auth.creds, auth.url)
>>> assert type(existing_scope) is int
>>> assert existing_scope == 409
"""
if network_address is not None:
nw_address = ipaddress.IPv4Network(network_address)
startip = nw_address[1]
endip = nw_address[-2]
f_url = url + "/imcrs/res/access/assignedIpScope"
payload = ('''{ "startIp": "%s", "endIp": "%s","name": "%s","description": "%s" }'''
% (str(startip), str(endip), str(name), str(description)))
response = requests.post(f_url, auth=auth, headers=HEADERS, data=payload)
try:
if response.status_code == 200:
# print("IP Scope Successfully Created")
return response.status_code
elif response.status_code == 409:
# print ("IP Scope Already Exists")
return response.status_code
except requests.exceptions.RequestException as error:
return "Error:\n" + str(error) + " add_ip_scope: An Error has occured" | 0.00345 |
def _set_properties(self, api_response):
"""Update properties from resource in body of ``api_response``
:type api_response: dict
:param api_response: response returned from an API call
"""
cleaned = api_response.copy()
self._scrub_local_properties(cleaned)
statistics = cleaned.get("statistics", {})
if "creationTime" in statistics:
statistics["creationTime"] = float(statistics["creationTime"])
if "startTime" in statistics:
statistics["startTime"] = float(statistics["startTime"])
if "endTime" in statistics:
statistics["endTime"] = float(statistics["endTime"])
self._properties.clear()
self._properties.update(cleaned)
self._copy_configuration_properties(cleaned.get("configuration", {}))
# For Future interface
self._set_future_result() | 0.002222 |
def applyslicer(array, slicer, pmask, cval = 0):
r"""
Apply a slicer returned by the iterator to a new array of the same
dimensionality as the one used to initialize the iterator.
Notes
-----
If ``array`` has more dimensions than ``slicer`` and ``pmask``, the first ones
are sliced.
Parameters
----------
array : array_like
A n-dimensional array.
slicer : list
List if `slice()` instances as returned by `next()`.
pmask : narray
The array mask as returned by `next()`.
cval : number
Value to fill undefined positions.
Experiments
-----------
>>> import numpy
>>> from medpy.iterators import CentredPatchIterator
>>> arr = numpy.arange(0, 25).reshape((5,5))
>>> for patch, pmask, _, slicer in CentredPatchIterator(arr, 3):
>>> new_patch = CentredPatchIterator.applyslicer(arr, slicer, pmask)
>>> print numpy.all(new_patch == patch)
True
...
"""
l = len(slicer)
patch = numpy.zeros(list(pmask.shape[:l]) + list(array.shape[l:]), array.dtype)
if not 0 == cval: patch.fill(cval)
sliced = array[slicer]
patch[pmask] = sliced.reshape([numpy.prod(sliced.shape[:l])] + list(sliced.shape[l:]))
return patch | 0.010381 |
def SaveState( self, config_parser ):
"""Retrieve window state to be restored on the next run..."""
if not config_parser.has_section( 'window' ):
config_parser.add_section( 'window' )
if self.IsMaximized():
config_parser.set( 'window', 'maximized', str(True))
else:
config_parser.set( 'window', 'maximized', str(False))
size = self.GetSizeTuple()
position = self.GetPositionTuple()
config_parser.set( 'window', 'width', str(size[0]) )
config_parser.set( 'window', 'height', str(size[1]) )
config_parser.set( 'window', 'x', str(position[0]) )
config_parser.set( 'window', 'y', str(position[1]) )
for control in self.ProfileListControls:
control.SaveState( config_parser )
return config_parser | 0.024852 |
def _postprocess(self, x, out=None):
"""Return the post-processed version of ``x``.
C2C: use ``tmp_f`` (C2C operation)
R2C: use ``tmp_f`` (C2C operation)
HALFC: use ``tmp_f`` (C2C operation)
The result is stored in ``out`` if given, otherwise in
a temporary or a new array.
"""
if out is None:
if self.domain.field == ComplexNumbers():
out = self._tmp_r if self._tmp_r is not None else self._tmp_f
else:
out = self._tmp_f
return dft_postprocess_data(
out, real_grid=self.domain.grid, recip_grid=self.range.grid,
shift=self.shifts, axes=self.axes, sign=self.sign,
interp=self.domain.interp, op='multiply', out=out) | 0.002567 |
def all_subs(bounds):
"""given a list of tuples specifying the bounds of an array, all_subs()
returns a list of all the tuples of subscripts for that array."""
idx_list = []
for i in range(len(bounds)):
this_dim = bounds[i]
lo,hi = this_dim[0],this_dim[1] # bounds for this dimension
this_dim_idxs = range(lo,hi+1) # indexes for this dimension
idx_list.append(this_dim_idxs)
return idx2subs(idx_list) | 0.008677 |
def _fcontext_add_or_delete_policy(action, name, filetype=None, sel_type=None, sel_user=None, sel_level=None):
'''
.. versionadded:: 2019.2.0
Performs the action as called from ``fcontext_add_policy`` or ``fcontext_delete_policy``.
Returns the result of the call to semanage.
'''
if action not in ['add', 'delete']:
raise SaltInvocationError('Actions supported are "add" and "delete", not "{0}".'.format(action))
cmd = 'semanage fcontext --{0}'.format(action)
# "semanage --ftype a" isn't valid on Centos 6,
# don't pass --ftype since "a" is the default filetype.
if filetype is not None and filetype != 'a':
_validate_filetype(filetype)
cmd += ' --ftype {0}'.format(filetype)
if sel_type is not None:
cmd += ' --type {0}'.format(sel_type)
if sel_user is not None:
cmd += ' --seuser {0}'.format(sel_user)
if sel_level is not None:
cmd += ' --range {0}'.format(sel_level)
cmd += ' ' + re.escape(name)
return __salt__['cmd.run_all'](cmd) | 0.003828 |
def to_nodename(string, invalid=None, raise_exc=False):
"""Makes a Quilt Node name (perhaps an ugly one) out of any string.
This should match whatever the current definition of a node name is, as
defined in is_nodename().
This isn't an isomorphic change, the original name can't be recovered
from the change in all cases, so it must be stored separately (`FileNode`
metadata)
If `invalid` is given, it should be an iterable of names that the returned
string cannot match -- for example, other node names.
If `raise_exc` is True, an exception is raised when the converted string
is present in `invalid`. Otherwise, the converted string will have a
number appended to its name.
Example:
# replace special chars -> remove prefix underscores -> rename keywords
# '!if' -> '_if' -> 'if' -> 'if_'
>>> to_nodename('!if') -> 'if_'
>>> to_nodename('if', ['if_']) -> 'if__2'
>>> to_nodename('9#blah') -> 'n9_blah'
>>> to_nodename('9:blah', ['n9_blah', 'n9_blah_2']) -> 'n9_blah_3'
:param string: string to convert to a nodename
:param invalid: Container of names to avoid. Efficiency: Use set or dict
:param raise_exc: Raise an exception on name conflicts if truthy.
:returns: valid node name
"""
string = to_identifier(string)
#TODO: Remove this stanza once keywords are permissible in nodenames
if keyword.iskeyword(string):
string += '_' # there are no keywords ending in "_"
# Done if no deduplication needed
if invalid is None:
return string
# Deduplicate
if string in invalid and raise_exc:
raise QuiltException("Conflicting node name after string conversion: {!r}".format(string))
result = string
counter = 1
while result in invalid:
# first conflicted name will be "somenode_2"
# The result is "somenode", "somenode_2", "somenode_3"..
counter += 1
result = "{}_{}".format(string, counter)
return result | 0.001495 |
def column_to_bq_schema(self):
"""Convert a column to a bigquery schema object.
"""
kwargs = {}
if len(self.fields) > 0:
fields = [field.column_to_bq_schema() for field in self.fields]
kwargs = {"fields": fields}
return google.cloud.bigquery.SchemaField(self.name, self.dtype,
self.mode, **kwargs) | 0.004866 |
def _ParseAnalysisPluginOptions(self, options):
"""Parses the analysis plugin options.
Args:
options (argparse.Namespace): command line arguments.
"""
# Get a list of all available plugins.
analysis_plugin_info = self._analysis_manager.GetAllPluginInformation()
# Use set-comprehension to create a set of the analysis plugin names.
analysis_plugin_names = {
name.lower() for name, _, _ in analysis_plugin_info}
analysis_plugins = self.ParseStringOption(options, 'analysis_plugins')
if not analysis_plugins:
return
# Use set-comprehension to create a set of the requested plugin names.
requested_plugin_names = {
name.strip().lower() for name in analysis_plugins.split(',')}
# Check to see if we are trying to load plugins that do not exist.
difference = requested_plugin_names.difference(analysis_plugin_names)
if difference:
raise errors.BadConfigOption(
'Non-existent analysis plugins specified: {0:s}'.format(
' '.join(difference)))
self._analysis_plugins = self._GetAnalysisPlugins(analysis_plugins)
for analysis_plugin in self._analysis_plugins:
helpers_manager.ArgumentHelperManager.ParseOptions(
options, analysis_plugin) | 0.003145 |
def display_the_graphic_connection(self):
"""
The following permits to attribute the function "display_the_graphic" to the slider.
Because, to make a connection, we can not have parameters for the function, but "display_the_graphic" has some.
"""
self.display_the_graphic(self.num_line, self.wavelength, self.data_wanted, self.information) | 0.013193 |
def has_path(nodes, A, B):
r"""Test if nodes from a breadth_first_order search lead from A to
B.
Parameters
----------
nodes : array_like
Nodes from breadth_first_oder_seatch
A : array_like
The set of educt states
B : array_like
The set of product states
Returns
-------
has_path : boolean
True if there exists a path, else False
"""
x1 = np.intersect1d(nodes, A).size > 0
x2 = np.intersect1d(nodes, B).size > 0
return x1 and x2 | 0.001927 |
def _sparse_series_to_coo(ss, row_levels=(0, ), column_levels=(1, ),
sort_labels=False):
"""
Convert a SparseSeries to a scipy.sparse.coo_matrix using index
levels row_levels, column_levels as the row and column
labels respectively. Returns the sparse_matrix, row and column labels.
"""
import scipy.sparse
if ss.index.nlevels < 2:
raise ValueError('to_coo requires MultiIndex with nlevels > 2')
if not ss.index.is_unique:
raise ValueError('Duplicate index entries are not allowed in to_coo '
'transformation.')
# to keep things simple, only rely on integer indexing (not labels)
row_levels = [ss.index._get_level_number(x) for x in row_levels]
column_levels = [ss.index._get_level_number(x) for x in column_levels]
v, i, j, rows, columns = _to_ijv(ss, row_levels=row_levels,
column_levels=column_levels,
sort_labels=sort_labels)
sparse_matrix = scipy.sparse.coo_matrix(
(v, (i, j)), shape=(len(rows), len(columns)))
return sparse_matrix, rows, columns | 0.000861 |
def _tokenize_field_path(path):
"""Lex a field path into tokens (including dots).
Args:
path (str): field path to be lexed.
Returns:
List(str): tokens
"""
pos = 0
get_token = TOKENS_REGEX.match
match = get_token(path)
while match is not None:
type_ = match.lastgroup
value = match.group(type_)
yield value
pos = match.end()
match = get_token(path, pos)
if pos != len(path):
raise ValueError("Path {} not consumed, residue: {}".format(path, path[pos:])) | 0.003617 |
def _stdin_raw_nonblock(self):
"""Use the raw Win32 handle of sys.stdin to do non-blocking reads"""
# WARNING: This is experimental, and produces inconsistent results.
# It's possible for the handle not to be appropriate for use
# with WaitForSingleObject, among other things.
handle = msvcrt.get_osfhandle(sys.stdin.fileno())
result = WaitForSingleObject(handle, 100)
if result == WAIT_FAILED:
raise ctypes.WinError()
elif result == WAIT_TIMEOUT:
print(".", end='')
return None
else:
data = ctypes.create_string_buffer(256)
bytesRead = DWORD(0)
print('?', end='')
if not ReadFile(handle, data, 256,
ctypes.byref(bytesRead), None):
raise ctypes.WinError()
# This ensures the non-blocking works with an actual console
# Not checking the error, so the processing will still work with
# other handle types
FlushConsoleInputBuffer(handle)
data = data.value
data = data.replace('\r\n', '\n')
data = data.replace('\r', '\n')
print(repr(data) + " ", end='')
return data | 0.002333 |
def to_string(self):
"""Returns this token as a plain string, suitable for storage.
The resulting string includes the token's secret, so you should never
send or store this string where a third party can read it.
"""
items = [
('oauth_token', self.key),
('oauth_token_secret', self.secret),
]
if self.callback_confirmed is not None:
items.append(('oauth_callback_confirmed', self.callback_confirmed))
return urlencode(items) | 0.005693 |
def add_block(self, name):
""" Adds a new block to the AST.
`name`
Block name.
* Raises a ``ValueError`` exception if `name` is invalid or
an existing block name matches value provided for `name`.
"""
if not self.RE_NAME.match(name):
raise ValueError(u"Invalid block name '{0}'"
.format(common.from_utf8(name)))
if name in self._block_map:
raise ValueError(u"Block '{0}' already exists"
.format(common.from_utf8(name)))
# add new block and index mapping
self._block_map[name] = len(self._ast[2]) # must come first
option_list = []
block = [name, option_list]
self._ast[2].append(block) | 0.002491 |
def guess_encoding(request):
""" Try to guess the encoding of a request without going through the slow chardet process"""
ctype = request.headers.get('content-type')
if not ctype:
# we don't have a content-type, somehow, so...
LOGGER.warning("%s: no content-type; headers are %s",
request.url, request.headers)
return 'utf-8'
# explicit declaration
match = re.search(r'charset=([^ ;]*)(;| |$)', ctype)
if match:
return match[1]
# html default
if ctype.startswith('text/html'):
return 'iso-8859-1'
# everything else's default
return 'utf-8' | 0.003101 |
def astensor(array: TensorLike) -> BKTensor:
"""Converts a numpy array to the backend's tensor object
"""
array = np.asarray(array, dtype=CTYPE)
return array | 0.00578 |
def register_service(self, **kwargs):
"""
register this service with consul
kwargs passed to Consul.agent.service.register
"""
kwargs.setdefault('name', self.app.name)
self.session.agent.service.register(**kwargs) | 0.007663 |
def setup_concept_scheme(rdf, defaultcs):
"""Make sure all concepts have an inScheme property, using the given
default concept scheme if necessary."""
for conc in rdf.subjects(RDF.type, SKOS.Concept):
# check concept scheme
cs = rdf.value(conc, SKOS.inScheme, None, any=True)
if cs is None: # need to set inScheme
rdf.add((conc, SKOS.inScheme, defaultcs)) | 0.002475 |
def get_available_networks(self, **kwargs):
"""
Retrieves the list of Ethernet networks, Fiber Channel networks, and network sets that are available to a
server profile, along with their respective ports.
Args:
enclosureGroupUri (str): The URI of the enclosure group associated with the resource.
functionType (str): The FunctionType (Ethernet or FibreChannel) to filter the list of networks returned.
serverHardwareTypeUri (str): The URI of the server hardware type associated with the resource.
serverHardwareUri (str): The URI of the server hardware associated with the resource.
view (str): Returns a specific subset of the attributes of the resource or collection, by
specifying the name of a predefined view. The default view is expand (show all attributes
of the resource and all elements of collections of resources).
Values:
Ethernet
Specifies that the connection is to an Ethernet network or a network set.
FibreChannel
Specifies that the connection is to a Fibre Channel network.
profileUri (str): If the URI of the server profile is provided the list of available networks will
include only networks that share a scope with the server profile.
scopeUris (str): An expression to restrict the resources returned according to the scopes
to which they are assigned
Returns:
list: Available networks.
"""
uri = self._helper.build_uri_with_query_string(kwargs, '/available-networks')
return self._helper.do_get(uri) | 0.008641 |
def line_transformation_suggestor(line_transformation, line_filter=None):
"""
Returns a suggestor (a function that takes a list of lines and yields
patches) where suggestions are the result of line-by-line transformations.
@param line_transformation Function that, given a line, returns another
line
with which to replace the given one. If the
output line is different from the input line,
the
user will be prompted about whether to make the
change. If the output is None, this means "I
don't have a suggestion, but the user should
still be asked if zhe wants to edit the line."
@param line_filter Given a line, returns True or False. If False,
a line is ignored (as if line_transformation
returned the line itself for that line).
"""
def suggestor(lines):
for line_number, line in enumerate(lines):
if line_filter and not line_filter(line):
continue
candidate = line_transformation(line)
if candidate is None:
yield Patch(line_number)
else:
yield Patch(line_number, new_lines=[candidate])
return suggestor | 0.000678 |
def Validate(self):
"""Check the Method is well constructed."""
ValidateMultiple(self.probe, "Method has invalid probes")
Validate(self.target, "Method has invalid target")
Validate(self.hint, "Method has invalid hint") | 0.004255 |
def get(self, sid):
"""
Constructs a AlphaSenderContext
:param sid: The sid
:returns: twilio.rest.messaging.v1.service.alpha_sender.AlphaSenderContext
:rtype: twilio.rest.messaging.v1.service.alpha_sender.AlphaSenderContext
"""
return AlphaSenderContext(self._version, service_sid=self._solution['service_sid'], sid=sid, ) | 0.013158 |
def preProcess(self, variables, domains, constraints, vconstraints):
"""
Preprocess variable domains
This method is called before starting to look for solutions,
and is used to prune domains with specific constraint logic
when possible. For instance, any constraints with a single
variable may be applied on all possible values and removed,
since they may act on individual values even without further
knowledge about other assignments.
@param variables: Variables affected by that constraint, in the
same order provided by the user
@type variables: sequence
@param domains: Dictionary mapping variables to their domains
@type domains: dict
@param constraints: List of pairs of (constraint, variables)
@type constraints: list
@param vconstraints: Dictionary mapping variables to a list of
constraints affecting the given variables.
@type vconstraints: dict
"""
if len(variables) == 1:
variable = variables[0]
domain = domains[variable]
for value in domain[:]:
if not self(variables, domains, {variable: value}):
domain.remove(value)
constraints.remove((self, variables))
vconstraints[variable].remove((self, variables)) | 0.001408 |
def frames(self, most_recent=False):
"""Retrieve a new frame from the PhoXi and convert it to a ColorImage,
a DepthImage, and an IrImage.
Parameters
----------
most_recent: bool
If true, the OpenCV buffer is emptied for the webcam before reading the most recent frame.
Returns
-------
:obj:`tuple` of :obj:`ColorImage`, :obj:`DepthImage`, :obj:`IrImage`, :obj:`numpy.ndarray`
The ColorImage, DepthImage, and IrImage of the current frame.
"""
if most_recent:
for i in xrange(4):
self._cap.grab()
for i in range(1):
if self._adjust_exposure:
try:
command = 'v4l2-ctl -d /dev/video{} -c exposure_auto=1 -c exposure_auto_priority=0 -c exposure_absolute=100 -c saturation=60 -c gain=140'.format(self._device_id)
FNULL = open(os.devnull, 'w')
subprocess.call(shlex.split(command), stdout=FNULL, stderr=subprocess.STDOUT)
except:
pass
ret, frame = self._cap.read()
rgb_data = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
return ColorImage(rgb_data, frame=self._frame), None, None | 0.005551 |
def list_users_in_group_category(self, group_category_id, search_term=None, unassigned=None):
"""
List users in group category.
Returns a list of users in the group category.
"""
path = {}
data = {}
params = {}
# REQUIRED - PATH - group_category_id
"""ID"""
path["group_category_id"] = group_category_id
# OPTIONAL - search_term
"""The partial name or full ID of the users to match and return in the results
list. Must be at least 3 characters."""
if search_term is not None:
params["search_term"] = search_term
# OPTIONAL - unassigned
"""Set this value to true if you wish only to search unassigned users in the
group category."""
if unassigned is not None:
params["unassigned"] = unassigned
self.logger.debug("GET /api/v1/group_categories/{group_category_id}/users with query params: {params} and form data: {data}".format(params=params, data=data, **path))
return self.generic_request("GET", "/api/v1/group_categories/{group_category_id}/users".format(**path), data=data, params=params, all_pages=True) | 0.005733 |
def _read_vtc(vtc_file):
"""Read the VTC file.
Parameters
----------
vtc_file : str
path to vtc file
Returns
-------
mpg_file : list of str
list of avi files
start_time : list of datetime
list of start time of the avi files
end_time : list of datetime
list of end time of the avi files
"""
with vtc_file.open('rb') as f:
filebytes = f.read()
hdr = {}
hdr['file_guid'] = hexlify(filebytes[:16])
# not sure about the 4 Bytes inbetween
i = 20
mpg_file = []
start_time = []
end_time = []
while i < len(filebytes):
mpg_file.append(_make_str(unpack('c' * 261, filebytes[i:i + 261])))
i += 261
Location = filebytes[i:i + 16]
correct = b'\xff\xfe\xf8^\xfc\xdc\xe5D\x8f\xae\x19\xf5\xd6"\xb6\xd4'
assert Location == correct
i += 16
start_time.append(_filetime_to_dt(unpack('<q',
filebytes[i:(i + 8)])[0]))
i += 8
end_time.append(_filetime_to_dt(unpack('<q',
filebytes[i:(i + 8)])[0]))
i += 8
return mpg_file, start_time, end_time | 0.00082 |
def remove_usb_controller(self, name):
"""Removes a USB controller from the machine.
in name of type str
raises :class:`VBoxErrorObjectNotFound`
A USB controller with given type doesn't exist.
"""
if not isinstance(name, basestring):
raise TypeError("name can only be an instance of type basestring")
self._call("removeUSBController",
in_p=[name]) | 0.008869 |
def ParsePythonFlags(self, start_line=0):
"""Parse python/swig style flags."""
modname = None # name of current module
modlist = []
flag = None
for line_num in range(start_line, len(self.output)): # collect flags
line = self.output[line_num].rstrip()
if not line: # blank
continue
mobj = self.module_py_re.match(line)
if mobj: # start of a new module
modname = mobj.group(1)
logging.debug('Module: %s' % line)
if flag:
modlist.append(flag)
self.module_list.append(modname)
self.modules.setdefault(modname, [])
modlist = self.modules[modname]
flag = None
continue
mobj = self.flag_py_re.match(line)
if mobj: # start of a new flag
if flag:
modlist.append(flag)
logging.debug('Flag: %s' % line)
flag = Flag(mobj.group(1), mobj.group(2))
continue
if not flag: # continuation of a flag
logging.error('Flag info, but no current flag "%s"' % line)
mobj = self.flag_default_py_re.match(line)
if mobj: # (default: '...')
flag.default = mobj.group(1)
logging.debug('Fdef: %s' % line)
continue
mobj = self.flag_tips_py_re.match(line)
if mobj: # (tips)
flag.tips = mobj.group(1)
logging.debug('Ftip: %s' % line)
continue
if flag and flag.help:
flag.help += line # multiflags tack on an extra line
else:
logging.info('Extra: %s' % line)
if flag:
modlist.append(flag) | 0.010375 |
def deleteTable(self, login, tableName):
"""
Parameters:
- login
- tableName
"""
self.send_deleteTable(login, tableName)
self.recv_deleteTable() | 0.005747 |
def get_response(self, deflate=True):
"""
Returns the Logout Response defated, base64encoded
:param deflate: It makes the deflate process optional
:type: bool
:return: Logout Response maybe deflated and base64 encoded
:rtype: string
"""
if deflate:
response = OneLogin_Saml2_Utils.deflate_and_base64_encode(self.__logout_response)
else:
response = b64encode(self.__logout_response)
return response | 0.005988 |
def rc_channels_raw_encode(self, time_boot_ms, port, chan1_raw, chan2_raw, chan3_raw, chan4_raw, chan5_raw, chan6_raw, chan7_raw, chan8_raw, rssi):
'''
The RAW values of the RC channels received. The standard PPM
modulation is as follows: 1000 microseconds: 0%, 2000
microseconds: 100%. Individual receivers/transmitters
might violate this specification.
time_boot_ms : Timestamp (milliseconds since system boot) (uint32_t)
port : Servo output port (set of 8 outputs = 1 port). Most MAVs will just use one, but this allows for more than 8 servos. (uint8_t)
chan1_raw : RC channel 1 value, in microseconds. A value of UINT16_MAX implies the channel is unused. (uint16_t)
chan2_raw : RC channel 2 value, in microseconds. A value of UINT16_MAX implies the channel is unused. (uint16_t)
chan3_raw : RC channel 3 value, in microseconds. A value of UINT16_MAX implies the channel is unused. (uint16_t)
chan4_raw : RC channel 4 value, in microseconds. A value of UINT16_MAX implies the channel is unused. (uint16_t)
chan5_raw : RC channel 5 value, in microseconds. A value of UINT16_MAX implies the channel is unused. (uint16_t)
chan6_raw : RC channel 6 value, in microseconds. A value of UINT16_MAX implies the channel is unused. (uint16_t)
chan7_raw : RC channel 7 value, in microseconds. A value of UINT16_MAX implies the channel is unused. (uint16_t)
chan8_raw : RC channel 8 value, in microseconds. A value of UINT16_MAX implies the channel is unused. (uint16_t)
rssi : Receive signal strength indicator, 0: 0%, 100: 100%, 255: invalid/unknown. (uint8_t)
'''
return MAVLink_rc_channels_raw_message(time_boot_ms, port, chan1_raw, chan2_raw, chan3_raw, chan4_raw, chan5_raw, chan6_raw, chan7_raw, chan8_raw, rssi) | 0.006874 |
def dumps(obj, mesh_filename=None, *args, **kwargs): # pylint: disable=unused-argument
'''
obj: A dictionary mapping names to a 3-dimension array.
mesh_filename: If provided, this value is included in the <DataFileName>
attribute, which Meshlab doesn't seem to use.
TODO Maybe reconstruct this using xml.etree
'''
point_template = '<point x="%f" y="%f" z="%f" name="%s"/>\n'
file_template = """
<!DOCTYPE PickedPoints>
<PickedPoints>
<DocumentData>
<DateTime time="16:00:00" date="2014-12-31"/>
<User name="bodylabs"/>
<DataFileName name="%s"/>
</DocumentData>
%s
</PickedPoints>
"""
from blmath.numerics import isnumericarray
if not isinstance(obj, dict) or not all([isnumericarray(point) for point in obj.itervalues()]):
raise ValueError('obj should be a dict of points')
points = '\n'.join([point_template % (tuple(xyz) + (name,)) for name, xyz in obj.iteritems()])
return file_template % (mesh_filename, points) | 0.004869 |
def visit_Assign(self, node):
"""
Create Assign node for final Cxx representation.
It tries to handle multi assignment like:
>> a = b = c = 2
If only one local variable is assigned, typing is added:
>> int a = 2;
TODO: Handle case of multi-assignement for some local variables.
Finally, process OpenMP clause like #pragma omp atomic
"""
if not all(isinstance(n, (ast.Name, ast.Subscript))
for n in node.targets):
raise PythranSyntaxError(
"Must assign to an identifier or a subscript",
node)
value = self.visit(node.value)
targets = [self.visit(t) for t in node.targets]
alltargets = "= ".join(targets)
islocal = (len(targets) == 1 and
isinstance(node.targets[0], ast.Name) and
node.targets[0].id in self.scope[node] and
node.targets[0].id not in self.openmp_deps)
if islocal:
# remove this decls from local decls
self.ldecls.difference_update(t.id for t in node.targets)
# add a local declaration
if self.types[node.targets[0]].iscombined():
alltargets = '{} {}'.format(self.typeof(node.targets[0]),
alltargets)
elif isinstance(self.types[node.targets[0]],
self.types.builder.Assignable):
alltargets = '{} {}'.format(
self.types.builder.Assignable(
self.types.builder.NamedType(
'decltype({})'.format(value))),
alltargets)
else:
assert isinstance(self.types[node.targets[0]],
self.types.builder.Lazy)
alltargets = '{} {}'.format(
self.types.builder.Lazy(
self.types.builder.NamedType(
'decltype({})'.format(value))),
alltargets)
stmt = Assign(alltargets, value)
return self.process_omp_attachements(node, stmt) | 0.00091 |
def _check_times(self, min_times, max_times, step):
'''
Make sure that the arguments are valid
:raises: KittyException if not valid
'''
kassert.is_int(min_times)
kassert.is_int(max_times)
kassert.is_int(step)
if not((min_times >= 0) and (max_times > 0) and (max_times >= min_times) and (step > 0)):
raise KittyException('one of the checks failed: min_times(%d)>=0, max_times(%d)>0, max_times>=min_times, step > 0' % (min_times, max_times)) | 0.007752 |
def required(self, fn):
"""Request decorator. Forces authentication."""
@functools.wraps(fn)
def decorated(*args, **kwargs):
if (not self._check_auth()
# Don't try to force authentication if the request is part
# of the authentication process - otherwise we end up in a
# loop.
and request.blueprint != self.blueprint.name):
return redirect(url_for("%s.login" % self.blueprint.name,
next=request.url))
return fn(*args, **kwargs)
return decorated | 0.008104 |
def is_anagram(s, t):
"""
:type s: str
:type t: str
:rtype: bool
"""
maps = {}
mapt = {}
for i in s:
maps[i] = maps.get(i, 0) + 1
for i in t:
mapt[i] = mapt.get(i, 0) + 1
return maps == mapt | 0.004065 |
def Tautoignition(CASRN, AvailableMethods=False, Method=None):
r'''This function handles the retrieval or calculation of a chemical's
autoifnition temperature. Lookup is based on CASRNs. No predictive methods
are currently implemented. Will automatically select a data source to use
if no Method is provided; returns None if the data is not available.
Prefered source is 'IEC 60079-20-1 (2010)' [1]_, with the secondary source
'NFPA 497 (2008)' [2]_ having very similar data.
Examples
--------
>>> Tautoignition(CASRN='71-43-2')
771.15
Parameters
----------
CASRN : string
CASRN [-]
Returns
-------
Tautoignition : float
Autoignition point of the chemical, [K]
methods : list, only returned if AvailableMethods == True
List of methods which can be used to obtain Tautoignition with the
given inputs
Other Parameters
----------------
Method : string, optional
A string for the method name to use, as defined by constants in
Tautoignition_methods
AvailableMethods : bool, optional
If True, function will determine which methods can be used to obtain
Tautoignition for the desired chemical, and will return methods
instead of Tautoignition
Notes
-----
References
----------
.. [1] IEC. “IEC 60079-20-1:2010 Explosive atmospheres - Part 20-1:
Material characteristics for gas and vapour classification - Test
methods and data.” https://webstore.iec.ch/publication/635. See also
https://law.resource.org/pub/in/bis/S05/is.iec.60079.20.1.2010.pdf
.. [2] National Fire Protection Association. NFPA 497: Recommended
Practice for the Classification of Flammable Liquids, Gases, or Vapors
and of Hazardous. NFPA, 2008.
'''
def list_methods():
methods = []
if CASRN in IEC_2010.index and not np.isnan(IEC_2010.at[CASRN, 'Tautoignition']):
methods.append(IEC)
if CASRN in NFPA_2008.index and not np.isnan(NFPA_2008.at[CASRN, 'Tautoignition']):
methods.append(NFPA)
methods.append(NONE)
return methods
if AvailableMethods:
return list_methods()
if not Method:
Method = list_methods()[0]
if Method == IEC:
return float(IEC_2010.at[CASRN, 'Tautoignition'])
elif Method == NFPA:
return float(NFPA_2008.at[CASRN, 'Tautoignition'])
elif Method == NONE:
return None
else:
raise Exception('Failure in in function') | 0.001171 |
def _edit_task_config(env, task_config, confirm):
""" Launches text editor to edit provided task configuration file.
`env`
Runtime ``Environment`` instance.
`task_config`
Path to task configuration file.
`confirm`
If task config is invalid after edit, prompt to re-edit.
Return boolean.
* Raises ``InvalidTaskConfig`` if edited task config fails to parse
and `confirm` is ``False``.
"""
# get editor program
if common.IS_MACOSX:
def_editor = 'open'
else:
def_editor = 'vi'
editor = os.environ.get('EDITOR', def_editor)
def _edit_file(filename):
""" Launches editor for given filename.
"""
proc = subprocess.Popen('{0} {1}'.format(editor, filename),
shell=True)
proc.communicate()
if proc.returncode == 0:
try:
# parse temp configuration file
parser_ = parser.parse_config(filename, 'task')
registration.run_option_hooks(parser_,
disable_missing=False)
except (parser.ParseError, errors.InvalidTaskConfig) as exc:
reason = unicode(getattr(exc, 'reason', exc))
raise errors.InvalidTaskConfig(task_config, reason=reason)
return True
else:
return False
try:
# create temp copy of task config
fd, tmpname = tempfile.mkstemp(suffix='.cfg', prefix='focus_')
with open(task_config, 'r') as file_:
os.write(fd, file_.read())
os.close(fd)
while True:
try:
# launch editor
if not _edit_file(tmpname):
return False
# overwrite original with temp
with open(tmpname, 'r') as temp:
with open(task_config, 'w', 0) as config:
config.write(temp.read())
return True
except errors.InvalidTaskConfig as exc:
if not confirm:
raise # reraise
# prompt to re-edit
env.io.error(unicode(exc))
while True:
try:
resp = env.io.prompt('Would you like to retry? (y/n) ')
resp = resp.strip().lower()
except KeyboardInterrupt:
return True
if resp == 'y':
break
elif resp == 'n':
return True
except OSError:
return False
finally:
common.safe_remove_file(tmpname) | 0.000361 |
def page_down(self, n=1, interval=0, pre_dl=None, post_dl=None):
"""Pres page_down key n times.
**中文文档**
按 page_down 键n次。
"""
self.delay(pre_dl)
self.k.tap_key(self.k.page_down, n, interval)
self.delay(post_dl) | 0.007463 |
def get_d_moments(model,x):
'''
Gradients with respect to x of the moments (mean and sdev.) of the GP
:param model: GPy model.
:param x: location where the gradients are evaluated.
'''
input_dim = model.input_dim
x = reshape(x,input_dim)
_, v = model.predict(x)
dmdx, dvdx = model.predictive_gradients(x)
dmdx = dmdx[:,:,0]
dsdx = dvdx / (2*np.sqrt(v))
return (dmdx, dsdx) | 0.011905 |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.