code
stringlengths 75
104k
| docstring
stringlengths 1
46.9k
|
---|---|
def obj_res(data, fail_on=['type', 'obj', 'res']):
"""
Given some CLI input data,
Returns the following and their types:
obj - the role grantee
res - the resource that the role applies to
"""
errors = []
if not data.get('type', None) and 'type' in fail_on:
errors += ['You must provide a role type to use this command.']
# Find the grantee, and remove them from resource_list
obj = None
obj_type = None
for fd in ACTOR_FIELDS:
if data.get(fd, False):
if not obj:
obj = data[fd]
obj_type = fd
else:
errors += ['You can not give a role to a user '
'and team at the same time.']
break
if not obj and 'obj' in fail_on:
errors += ['You must specify either user or '
'team to use this command.']
# Out of the resource list, pick out available valid resource field
res = None
res_type = None
for fd in RESOURCE_FIELDS:
if data.get(fd, False):
if not res:
res = data[fd]
res_type = fd
if res_type == 'target_team':
res_type = 'team'
else:
errors += ['You can only give a role to one '
'type of resource at a time.']
break
if not res and 'res' in fail_on:
errors += ['You must specify a target resource '
'to use this command.']
if errors:
raise exc.UsageError("\n".join(errors))
return obj, obj_type, res, res_type | Given some CLI input data,
Returns the following and their types:
obj - the role grantee
res - the resource that the role applies to |
def _sentence_context(match, language='latin', case_insensitive=True):
"""Take one incoming regex match object and return the sentence in which
the match occurs.
:rtype : str
:param match: regex.match
:param language: str
"""
language_punct = {'greek': r'\.|;',
'latin': r'\.|\?|!'}
assert language in language_punct.keys(), \
'Available punctuation schemes: {}'.format(language_punct.keys())
start = match.start()
end = match.end()
window = 1000
snippet_left = match.string[start - window:start + 1]
snippet_right = match.string[end:end + window]
re_match = match.string[match.start():match.end()]
comp_sent_boundary = regex.compile(language_punct[language], flags=regex.VERSION1)
# Left
left_punct = []
for punct in comp_sent_boundary.finditer(snippet_left):
end = punct.end()
left_punct.append(end)
try:
last_period = left_punct.pop() + 1
except IndexError:
last_period = 0
# Right
right_punct = []
for punct in comp_sent_boundary.finditer(snippet_right):
end = punct.end()
right_punct.append(end)
try:
first_period = right_punct.pop(0)
except IndexError:
first_period = 0
sentence = snippet_left[last_period:-1] + '*' + re_match + '*' + snippet_right[0:first_period]
return sentence | Take one incoming regex match object and return the sentence in which
the match occurs.
:rtype : str
:param match: regex.match
:param language: str |
def _graphite_url(self, query, raw_data=False, graphite_url=None):
"""Build Graphite URL."""
query = escape.url_escape(query)
graphite_url = graphite_url or self.reactor.options.get('public_graphite_url')
url = "{base}/render/?target={query}&from=-{from_time}&until=-{until}".format(
base=graphite_url, query=query,
from_time=self.from_time.as_graphite(),
until=self.until.as_graphite(),
)
if raw_data:
url = "{}&format=raw".format(url)
return url | Build Graphite URL. |
def create_graph_rules(address_mapper):
"""Creates tasks used to parse Structs from BUILD files.
:param address_mapper_key: The subject key for an AddressMapper instance.
:param symbol_table: A SymbolTable instance to provide symbols for Address lookups.
"""
@rule(AddressMapper, [])
def address_mapper_singleton():
return address_mapper
return [
address_mapper_singleton,
# BUILD file parsing.
hydrate_struct,
parse_address_family,
# Spec handling: locate directories that contain build files, and request
# AddressFamilies for each of them.
addresses_from_address_families,
# Root rules representing parameters that might be provided via root subjects.
RootRule(Address),
RootRule(BuildFileAddress),
RootRule(BuildFileAddresses),
RootRule(Specs),
] | Creates tasks used to parse Structs from BUILD files.
:param address_mapper_key: The subject key for an AddressMapper instance.
:param symbol_table: A SymbolTable instance to provide symbols for Address lookups. |
def _repack_options(options):
'''
Repack the options data
'''
return dict(
[
(six.text_type(x), _normalize(y))
for x, y in six.iteritems(salt.utils.data.repack_dictlist(options))
]
) | Repack the options data |
def calculate_leapdays(init_date, final_date):
"""Currently unsupported, it only works for differences in years."""
leap_days = (final_date.year - 1) // 4 - (init_date.year - 1) // 4
leap_days -= (final_date.year - 1) // 100 - (init_date.year - 1) // 100
leap_days += (final_date.year - 1) // 400 - (init_date.year - 1) // 400
# TODO: Internal date correction (e.g. init_date is 1-March or later)
return datetime.timedelta(days=leap_days) | Currently unsupported, it only works for differences in years. |
def get_auth_header(self, user_payload):
"""
Returns the value for authorization header
Args:
user_payload(dict, required): A `dict` containing required information
to create authentication token
"""
auth_token = self.get_auth_token(user_payload)
return '{auth_header_prefix} {auth_token}'.format(
auth_header_prefix=self.auth_header_prefix, auth_token=auth_token
) | Returns the value for authorization header
Args:
user_payload(dict, required): A `dict` containing required information
to create authentication token |
def to_cloudformation(self):
"""Generates CloudFormation resources from a SAM API resource
:returns: a tuple containing the RestApi, Deployment, and Stage for an empty Api.
:rtype: tuple
"""
rest_api = self._construct_rest_api()
deployment = self._construct_deployment(rest_api)
swagger = None
if rest_api.Body is not None:
swagger = rest_api.Body
elif rest_api.BodyS3Location is not None:
swagger = rest_api.BodyS3Location
stage = self._construct_stage(deployment, swagger)
permissions = self._construct_authorizer_lambda_permission()
return rest_api, deployment, stage, permissions | Generates CloudFormation resources from a SAM API resource
:returns: a tuple containing the RestApi, Deployment, and Stage for an empty Api.
:rtype: tuple |
def filterMapNames(regexText, records=getIndex(), excludeRegex=False, closestMatch=True):
"""matches each record against regexText according to parameters
NOTE: the code could be written more simply, but this is loop-optimized to
scale better with a large number of map records"""
bestScr = 99999 # a big enough number to not be a valid file system path
regex = re.compile(regexText, flags=re.IGNORECASE)
ret = []
if excludeRegex: # match only records that do NOT contain regex
if regexText and closestMatch: # then maps with fewer characters are better matches
for m in list(records):
if re.search(regex, m.name): continue # map must NOT contain specified phrase
score = len(m.name) # the map with the smallest map name means it has the largets matching character percentage
if score == bestScr:
bestScr = score
ret.append(m)
elif score < bestScr: # new set of best maps
bestScr = score
ret = [m]
else: # all maps that match regex are included
for m in list(records):
if re.search(regex, m.name): continue # map must NOT contain specified phrase
ret.append(m) # any mapname containing regex matches
else: # only match records that contain regex
if regexText and closestMatch: # then maps with fewer characters are better matches
for m in records:
if not re.search(regex, m.name): continue # map must contain specified phrase if excludeRegex==True
score = len(m.name) # the map with the smallest map name means it has the largets matching character percentage
if score == bestScr:
bestScr = score
ret.append(m)
elif score < bestScr: # new group of best maps
bestScr = score
ret = [m]
else: # all maps that match regex are included
for m in records:
if not re.search(regex, m.name): continue # map must contain specified phrase if excludeRegex==True
ret.append(m) # any mapname containing regex matches
return ret | matches each record against regexText according to parameters
NOTE: the code could be written more simply, but this is loop-optimized to
scale better with a large number of map records |
def parse(cls, fptr, offset, length):
"""Parse JPX free box.
Parameters
----------
f : file
Open file object.
offset : int
Start position of box in bytes.
length : int
Length of the box in bytes.
Returns
-------
FreeBox
Instance of the current free box.
"""
# Must seek to end of box.
nbytes = offset + length - fptr.tell()
fptr.read(nbytes)
return cls(length=length, offset=offset) | Parse JPX free box.
Parameters
----------
f : file
Open file object.
offset : int
Start position of box in bytes.
length : int
Length of the box in bytes.
Returns
-------
FreeBox
Instance of the current free box. |
def config(config, fork_name="", origin_name=""):
"""Setting various configuration options"""
state = read(config.configfile)
any_set = False
if fork_name:
update(config.configfile, {"FORK_NAME": fork_name})
success_out("fork-name set to: {}".format(fork_name))
any_set = True
if origin_name:
update(config.configfile, {"ORIGIN_NAME": origin_name})
success_out("origin-name set to: {}".format(origin_name))
any_set = True
if not any_set:
info_out("Fork-name: {}".format(state["FORK_NAME"])) | Setting various configuration options |
def cmd(command, *args, **kwargs):
'''
run commands from __proxy__
:mod:`salt.proxy.onyx<salt.proxy.onyx>`
command
function from `salt.proxy.onyx` to run
args
positional args to pass to `command` function
kwargs
key word arguments to pass to `command` function
.. code-block:: bash
salt '*' onyx.cmd sendline 'show ver'
salt '*' onyx.cmd show_run
salt '*' onyx.cmd check_password username=admin
password='$5$lkjsdfoi$blahblahblah' encrypted=True
'''
proxy_prefix = __opts__['proxy']['proxytype']
proxy_cmd = '.'.join([proxy_prefix, command])
if proxy_cmd not in __proxy__:
return False
for k in list(kwargs):
if k.startswith('__pub_'):
kwargs.pop(k)
return __proxy__[proxy_cmd](*args, **kwargs) | run commands from __proxy__
:mod:`salt.proxy.onyx<salt.proxy.onyx>`
command
function from `salt.proxy.onyx` to run
args
positional args to pass to `command` function
kwargs
key word arguments to pass to `command` function
.. code-block:: bash
salt '*' onyx.cmd sendline 'show ver'
salt '*' onyx.cmd show_run
salt '*' onyx.cmd check_password username=admin
password='$5$lkjsdfoi$blahblahblah' encrypted=True |
def _read_set(ctx: ReaderContext) -> lset.Set:
"""Return a set from the input stream."""
start = ctx.reader.advance()
assert start == "{"
def set_if_valid(s: Collection) -> lset.Set:
if len(s) != len(set(s)):
raise SyntaxError("Duplicated values in set")
return lset.set(s)
return _read_coll(ctx, set_if_valid, "}", "set") | Return a set from the input stream. |
def mkstemp(suffix="", prefix=template, dir=None, text=False):
"""User-callable function to create and return a unique temporary
file. The return value is a pair (fd, name) where fd is the
file descriptor returned by os.open, and name is the filename.
If 'suffix' is specified, the file name will end with that suffix,
otherwise there will be no suffix.
If 'prefix' is specified, the file name will begin with that prefix,
otherwise a default prefix is used.
If 'dir' is specified, the file will be created in that directory,
otherwise a default directory is used.
If 'text' is specified and true, the file is opened in text
mode. Else (the default) the file is opened in binary mode. On
some operating systems, this makes no difference.
The file is readable and writable only by the creating user ID.
If the operating system uses permission bits to indicate whether a
file is executable, the file is executable by no one. The file
descriptor is not inherited by children of this process.
Caller is responsible for deleting the file when done with it.
"""
if dir is None:
dir = gettempdir()
if text:
flags = _text_openflags
else:
flags = _bin_openflags
return _mkstemp_inner(dir, prefix, suffix, flags) | User-callable function to create and return a unique temporary
file. The return value is a pair (fd, name) where fd is the
file descriptor returned by os.open, and name is the filename.
If 'suffix' is specified, the file name will end with that suffix,
otherwise there will be no suffix.
If 'prefix' is specified, the file name will begin with that prefix,
otherwise a default prefix is used.
If 'dir' is specified, the file will be created in that directory,
otherwise a default directory is used.
If 'text' is specified and true, the file is opened in text
mode. Else (the default) the file is opened in binary mode. On
some operating systems, this makes no difference.
The file is readable and writable only by the creating user ID.
If the operating system uses permission bits to indicate whether a
file is executable, the file is executable by no one. The file
descriptor is not inherited by children of this process.
Caller is responsible for deleting the file when done with it. |
def username_user_password(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
username = ET.SubElement(config, "username", xmlns="urn:brocade.com:mgmt:brocade-aaa")
name_key = ET.SubElement(username, "name")
name_key.text = kwargs.pop('name')
user_password = ET.SubElement(username, "user-password")
user_password.text = kwargs.pop('user_password')
callback = kwargs.pop('callback', self._callback)
return callback(config) | Auto Generated Code |
def get_properties(self, feature):
"""Returns all contained properties associated with 'feature'"""
if not isinstance(feature, b2.build.feature.Feature):
feature = b2.build.feature.get(feature)
assert isinstance(feature, b2.build.feature.Feature)
result = []
for p in self.all_:
if p.feature == feature:
result.append(p)
return result | Returns all contained properties associated with 'feature |
def hflip(img):
"""Horizontally flip the given PIL Image.
Args:
img (PIL Image): Image to be flipped.
Returns:
PIL Image: Horizontall flipped image.
"""
if not _is_pil_image(img):
raise TypeError('img should be PIL Image. Got {}'.format(type(img)))
return img.transpose(Image.FLIP_LEFT_RIGHT) | Horizontally flip the given PIL Image.
Args:
img (PIL Image): Image to be flipped.
Returns:
PIL Image: Horizontall flipped image. |
def table_add(tab, data, col):
"""
Function to parse dictionary list **data** and add the data to table **tab** for column **col**
Parameters
----------
tab: Table class
Table to store values
data: list
Dictionary list from the SQL query
col: str
Column name (ie, dictionary key) for the column to add
"""
x = []
for i in range(len(data)):
# If the particular key is not present, use a place-holder value (used for photometry tables)
if col not in data[i]:
temp = ''
else:
temp = data[i][col]
# Fix up None elements
if temp is None: temp = ''
x.append(temp)
print('Adding column {}'.format(col))
tab.add_column(Column(x, name=col)) | Function to parse dictionary list **data** and add the data to table **tab** for column **col**
Parameters
----------
tab: Table class
Table to store values
data: list
Dictionary list from the SQL query
col: str
Column name (ie, dictionary key) for the column to add |
def lintersects(self, span):
"""
If this span intersects the left (starting) side of the given span.
"""
if isinstance(span, list):
return [sp for sp in span if self._lintersects(sp)]
return self._lintersects(span) | If this span intersects the left (starting) side of the given span. |
def reordi(iorder, ndim, array):
"""
Re-order the elements of an integer array according to
a given order vector.
http://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/reordi_c.html
:param iorder: Order vector to be used to re-order array.
:type iorder: Array of ints
:param ndim: Dimension of array.
:type ndim: int
:param array: Array to be re-ordered.
:type array: Array of ints
:return: Re-ordered Array.
:rtype: Array of ints
"""
iorder = stypes.toIntVector(iorder)
ndim = ctypes.c_int(ndim)
array = stypes.toIntVector(array)
libspice.reordi_c(iorder, ndim, array)
return stypes.cVectorToPython(array) | Re-order the elements of an integer array according to
a given order vector.
http://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/reordi_c.html
:param iorder: Order vector to be used to re-order array.
:type iorder: Array of ints
:param ndim: Dimension of array.
:type ndim: int
:param array: Array to be re-ordered.
:type array: Array of ints
:return: Re-ordered Array.
:rtype: Array of ints |
def unpublish(scm, published_branch, verbose, fake):
"""Removes a published branch from the remote repository."""
scm.fake = fake
scm.verbose = fake or verbose
scm.repo_check(require_remote=True)
branch = scm.fuzzy_match_branch(published_branch)
if not branch:
scm.display_available_branches()
raise click.BadArgumentUsage('Please specify a branch to unpublish')
branch_names = scm.get_branch_names(local=False)
if branch not in branch_names:
raise click.BadArgumentUsage(
"Branch {0} is not published. Use a branch that is published."
.format(crayons.yellow(branch)))
status_log(scm.unpublish_branch, 'Unpublishing {0}.'.format(
crayons.yellow(branch)), branch) | Removes a published branch from the remote repository. |
def share(track_id=None, url=None, users=None):
"""
Returns list of users track has been shared with.
Either track or url need to be provided.
"""
client = get_client()
if url:
track_id = client.get('/resolve', url=url).id
if not users:
return client.get('/tracks/%d/permissions' % track_id)
permissions = {'user_id': []}
for username in users:
# check cache for user
user = settings.users.get(username, None)
if user:
permissions['user_id'].append(user['id'])
else:
user = client.get('/resolve', url='http://soundcloud.com/%s' % username)
permissions['user_id'].append(user.id)
settings.users[username] = user.obj
settings.save()
return client.put('/tracks/%d/permissions' % track_id, permissions=permissions) | Returns list of users track has been shared with.
Either track or url need to be provided. |
def delete_host(zone, name, nameserver='127.0.0.1', timeout=5, port=53,
**kwargs):
'''
Delete the forward and reverse records for a host.
Returns true if any records are deleted.
CLI Example:
.. code-block:: bash
salt ns1 ddns.delete_host example.com host1
'''
fqdn = '{0}.{1}'.format(name, zone)
request = dns.message.make_query(fqdn, 'A')
answer = dns.query.udp(request, nameserver, timeout, port)
try:
ips = [i.address for i in answer.answer[0].items]
except IndexError:
ips = []
res = delete(zone, name, nameserver=nameserver, timeout=timeout, port=port,
**kwargs)
fqdn = fqdn + '.'
for ip in ips:
parts = ip.split('.')[::-1]
popped = []
# Iterate over possible reverse zones
while len(parts) > 1:
p = parts.pop(0)
popped.append(p)
zone = '{0}.{1}'.format('.'.join(parts), 'in-addr.arpa.')
name = '.'.join(popped)
ptr = delete(zone, name, 'PTR', fqdn, nameserver=nameserver,
timeout=timeout, port=port, **kwargs)
if ptr:
res = True
return res | Delete the forward and reverse records for a host.
Returns true if any records are deleted.
CLI Example:
.. code-block:: bash
salt ns1 ddns.delete_host example.com host1 |
def add_multiifo_input_list_opt(self, opt, inputs):
""" Add an option that determines a list of inputs from multiple
detectors. Files will be supplied as --opt ifo1:input1 ifo2:input2
.....
"""
# NOTE: Here we have to use the raw arguments functionality as the
# file and ifo are not space separated.
self.add_raw_arg(opt)
self.add_raw_arg(' ')
for infile in inputs:
self.add_raw_arg(infile.ifo)
self.add_raw_arg(':')
self.add_raw_arg(infile.name)
self.add_raw_arg(' ')
self._add_input(infile) | Add an option that determines a list of inputs from multiple
detectors. Files will be supplied as --opt ifo1:input1 ifo2:input2
..... |
def watch_from_file(connection, file_name):
""" Start watching a new volume
:type connection: boto.ec2.connection.EC2Connection
:param connection: EC2 connection object
:type file_name: str
:param file_name: path to config file
:returns: None
"""
with open(file_name, 'r') as filehandle:
for line in filehandle.xreadlines():
volume, interval, retention = line.rstrip().split(',')
watch(
connection,
get_volume_id(connection, volume),
interval, retention) | Start watching a new volume
:type connection: boto.ec2.connection.EC2Connection
:param connection: EC2 connection object
:type file_name: str
:param file_name: path to config file
:returns: None |
def plot_precision_recall_curve(y_true, y_probas,
title='Precision-Recall Curve',
curves=('micro', 'each_class'), ax=None,
figsize=None, cmap='nipy_spectral',
title_fontsize="large",
text_fontsize="medium"):
"""Generates the Precision Recall Curve from labels and probabilities
Args:
y_true (array-like, shape (n_samples)):
Ground truth (correct) target values.
y_probas (array-like, shape (n_samples, n_classes)):
Prediction probabilities for each class returned by a classifier.
curves (array-like): A listing of which curves should be plotted on the
resulting plot. Defaults to `("micro", "each_class")`
i.e. "micro" for micro-averaged curve
ax (:class:`matplotlib.axes.Axes`, optional): The axes upon which to
plot the curve. If None, the plot is drawn on a new set of axes.
figsize (2-tuple, optional): Tuple denoting figure size of the plot
e.g. (6, 6). Defaults to ``None``.
cmap (string or :class:`matplotlib.colors.Colormap` instance, optional):
Colormap used for plotting the projection. View Matplotlib Colormap
documentation for available options.
https://matplotlib.org/users/colormaps.html
title_fontsize (string or int, optional): Matplotlib-style fontsizes.
Use e.g. "small", "medium", "large" or integer-values. Defaults to
"large".
text_fontsize (string or int, optional): Matplotlib-style fontsizes.
Use e.g. "small", "medium", "large" or integer-values. Defaults to
"medium".
Returns:
ax (:class:`matplotlib.axes.Axes`): The axes on which the plot was
drawn.
Example:
>>> import scikitplot.plotters as skplt
>>> nb = GaussianNB()
>>> nb = nb.fit(X_train, y_train)
>>> y_probas = nb.predict_proba(X_test)
>>> skplt.plot_precision_recall_curve(y_test, y_probas)
<matplotlib.axes._subplots.AxesSubplot object at 0x7fe967d64490>
>>> plt.show()
.. image:: _static/examples/plot_precision_recall_curve.png
:align: center
:alt: Precision Recall Curve
"""
y_true = np.array(y_true)
y_probas = np.array(y_probas)
classes = np.unique(y_true)
probas = y_probas
if 'micro' not in curves and 'each_class' not in curves:
raise ValueError('Invalid argument for curves as it '
'only takes "micro" or "each_class"')
# Compute Precision-Recall curve and area for each class
precision = dict()
recall = dict()
average_precision = dict()
for i in range(len(classes)):
precision[i], recall[i], _ = precision_recall_curve(
y_true, probas[:, i], pos_label=classes[i])
y_true = label_binarize(y_true, classes=classes)
if len(classes) == 2:
y_true = np.hstack((1 - y_true, y_true))
for i in range(len(classes)):
average_precision[i] = average_precision_score(y_true[:, i],
probas[:, i])
# Compute micro-average ROC curve and ROC area
micro_key = 'micro'
i = 0
while micro_key in precision:
i += 1
micro_key += str(i)
precision[micro_key], recall[micro_key], _ = precision_recall_curve(
y_true.ravel(), probas.ravel())
average_precision[micro_key] = average_precision_score(y_true, probas,
average='micro')
if ax is None:
fig, ax = plt.subplots(1, 1, figsize=figsize)
ax.set_title(title, fontsize=title_fontsize)
if 'each_class' in curves:
for i in range(len(classes)):
color = plt.cm.get_cmap(cmap)(float(i) / len(classes))
ax.plot(recall[i], precision[i], lw=2,
label='Precision-recall curve of class {0} '
'(area = {1:0.3f})'.format(classes[i],
average_precision[i]),
color=color)
if 'micro' in curves:
ax.plot(recall[micro_key], precision[micro_key],
label='micro-average Precision-recall curve '
'(area = {0:0.3f})'.format(average_precision[micro_key]),
color='navy', linestyle=':', linewidth=4)
ax.set_xlim([0.0, 1.0])
ax.set_ylim([0.0, 1.05])
ax.set_xlabel('Recall')
ax.set_ylabel('Precision')
ax.tick_params(labelsize=text_fontsize)
ax.legend(loc='best', fontsize=text_fontsize)
return ax | Generates the Precision Recall Curve from labels and probabilities
Args:
y_true (array-like, shape (n_samples)):
Ground truth (correct) target values.
y_probas (array-like, shape (n_samples, n_classes)):
Prediction probabilities for each class returned by a classifier.
curves (array-like): A listing of which curves should be plotted on the
resulting plot. Defaults to `("micro", "each_class")`
i.e. "micro" for micro-averaged curve
ax (:class:`matplotlib.axes.Axes`, optional): The axes upon which to
plot the curve. If None, the plot is drawn on a new set of axes.
figsize (2-tuple, optional): Tuple denoting figure size of the plot
e.g. (6, 6). Defaults to ``None``.
cmap (string or :class:`matplotlib.colors.Colormap` instance, optional):
Colormap used for plotting the projection. View Matplotlib Colormap
documentation for available options.
https://matplotlib.org/users/colormaps.html
title_fontsize (string or int, optional): Matplotlib-style fontsizes.
Use e.g. "small", "medium", "large" or integer-values. Defaults to
"large".
text_fontsize (string or int, optional): Matplotlib-style fontsizes.
Use e.g. "small", "medium", "large" or integer-values. Defaults to
"medium".
Returns:
ax (:class:`matplotlib.axes.Axes`): The axes on which the plot was
drawn.
Example:
>>> import scikitplot.plotters as skplt
>>> nb = GaussianNB()
>>> nb = nb.fit(X_train, y_train)
>>> y_probas = nb.predict_proba(X_test)
>>> skplt.plot_precision_recall_curve(y_test, y_probas)
<matplotlib.axes._subplots.AxesSubplot object at 0x7fe967d64490>
>>> plt.show()
.. image:: _static/examples/plot_precision_recall_curve.png
:align: center
:alt: Precision Recall Curve |
def assign(self, value, termenc):
"""
>>> scanner = DefaultScanner()
>>> scanner.assign("01234", "ascii")
>>> scanner._data
u'01234'
"""
if self._termenc != termenc:
self._decoder = codecs.getincrementaldecoder(termenc)(errors='replace')
self._termenc = termenc
self._data = self._decoder.decode(value) | >>> scanner = DefaultScanner()
>>> scanner.assign("01234", "ascii")
>>> scanner._data
u'01234' |
def active_tcp():
'''
Return a dict containing information on all of the running TCP connections (currently linux and solaris only)
.. versionchanged:: 2015.8.4
Added support for SunOS
CLI Example:
.. code-block:: bash
salt '*' network.active_tcp
'''
if __grains__['kernel'] == 'Linux':
return salt.utils.network.active_tcp()
elif __grains__['kernel'] == 'SunOS':
# lets use netstat to mimic linux as close as possible
ret = {}
for connection in _netstat_sunos():
if not connection['proto'].startswith('tcp'):
continue
if connection['state'] != 'ESTABLISHED':
continue
ret[len(ret)+1] = {
'local_addr': '.'.join(connection['local-address'].split('.')[:-1]),
'local_port': '.'.join(connection['local-address'].split('.')[-1:]),
'remote_addr': '.'.join(connection['remote-address'].split('.')[:-1]),
'remote_port': '.'.join(connection['remote-address'].split('.')[-1:])
}
return ret
elif __grains__['kernel'] == 'AIX':
# lets use netstat to mimic linux as close as possible
ret = {}
for connection in _netstat_aix():
if not connection['proto'].startswith('tcp'):
continue
if connection['state'] != 'ESTABLISHED':
continue
ret[len(ret)+1] = {
'local_addr': '.'.join(connection['local-address'].split('.')[:-1]),
'local_port': '.'.join(connection['local-address'].split('.')[-1:]),
'remote_addr': '.'.join(connection['remote-address'].split('.')[:-1]),
'remote_port': '.'.join(connection['remote-address'].split('.')[-1:])
}
return ret
else:
return {} | Return a dict containing information on all of the running TCP connections (currently linux and solaris only)
.. versionchanged:: 2015.8.4
Added support for SunOS
CLI Example:
.. code-block:: bash
salt '*' network.active_tcp |
def to_bool(s):
"""
Convert string `s` into a boolean. `s` can be 'true', 'True', 1, 'false',
'False', 0.
Examples:
>>> to_bool("true")
True
>>> to_bool("0")
False
>>> to_bool(True)
True
"""
if isinstance(s, bool):
return s
elif s.lower() in ['true', '1']:
return True
elif s.lower() in ['false', '0']:
return False
else:
raise ValueError("Can't cast '%s' to bool" % (s)) | Convert string `s` into a boolean. `s` can be 'true', 'True', 1, 'false',
'False', 0.
Examples:
>>> to_bool("true")
True
>>> to_bool("0")
False
>>> to_bool(True)
True |
def _update(self, layer=None):
"""
Update layers in model.
"""
meta = getattr(self, ModelBase._meta_attr)
if not layer:
layers = self.layers
else:
# convert non-sequence to tuple
layers = _listify(layer)
for layer in layers:
# relative path to layer files from model file
path = os.path.abspath(os.path.join(meta.modelpath, layer))
getattr(self, layer).load(path) | Update layers in model. |
def cmd_set(context):
"""
Set the new "current" value for a key.
If the existing current version and the new version have identical /value/ and /status,
then nothing is written, to avoid stacking up redundant entreis in the version table.
Args:
context: a populated EFVersionContext object
"""
# If key value is a special symbol, see if this env allows it
if context.value in EFConfig.SPECIAL_VERSIONS and context.env_short not in EFConfig.SPECIAL_VERSION_ENVS:
fail("special version: {} not allowed in env: {}".format(context.value, context.env_short))
# If key value is a special symbol, the record cannot be marked "stable"
if context.value in EFConfig.SPECIAL_VERSIONS and context.stable:
fail("special versions such as: {} cannot be marked 'stable'".format(context.value))
# Resolve any references
if context.value == "=prod":
context.value = context.versionresolver.lookup("{},{}/{}".format(context.key, "prod", context.service_name))
elif context.value == "=staging":
context.value = context.versionresolver.lookup("{},{}/{}".format(context.key, "staging", context.service_name))
elif context.value == "=latest":
if not EFConfig.VERSION_KEYS[context.key]["allow_latest"]:
fail("=latest cannot be used with key: {}".format(context.key))
func_name = "_getlatest_" + context.key.replace("-", "_")
if func_name in globals() and isfunction(globals()[func_name]):
context.value = globals()[func_name](context)
else:
raise RuntimeError("{} version for {}/{} is '=latest' but can't look up because method not found: {}".format(
context.key, context.env, context.service_name, func_name))
# precheck to confirm coherent world state before attempting set - whatever that means for the current key type
try:
precheck(context)
except Exception as e:
fail("Precheck failed: {}".format(e.message))
s3_key = "{}/{}/{}".format(context.service_name, context.env, context.key)
s3_version_status = EFConfig.S3_VERSION_STATUS_STABLE if context.stable else EFConfig.S3_VERSION_STATUS_UNDEFINED
# If the set would put a value and status that are the same as the existing 'current' value/status, don't do it
context.limit = 1
current_version = get_versions(context)
# If there is no 'current version' it's ok, just means the set will write the first entry
if len(current_version) == 1 and current_version[0].status == s3_version_status and \
current_version[0].value == context.value:
print("Version not written because current version and new version have identical value and status: {} {}"
.format(current_version[0].value, current_version[0].status))
return
if not context.commit:
print("=== DRY RUN ===\nUse --commit to set value\n=== DRY RUN ===")
print("would set key: {} with value: {} {} {} {} {}".format(
s3_key, context.value, context.build_number, context.commit_hash, context.location, s3_version_status))
else:
context.aws_client("s3").put_object(
ACL='bucket-owner-full-control',
Body=context.value,
Bucket=EFConfig.S3_VERSION_BUCKET,
ContentEncoding=EFConfig.S3_VERSION_CONTENT_ENCODING,
Key=s3_key,
Metadata={
EFConfig.S3_VERSION_BUILDNUMBER_KEY: context.build_number,
EFConfig.S3_VERSION_COMMITHASH_KEY: context.commit_hash,
EFConfig.S3_VERSION_LOCATION_KEY: context.location,
EFConfig.S3_VERSION_MODIFIEDBY_KEY: context.aws_client("sts").get_caller_identity()["Arn"],
EFConfig.S3_VERSION_STATUS_KEY: s3_version_status
},
StorageClass='STANDARD'
)
print("set key: {} with value: {} {} {} {} {}".format(
s3_key, context.value, context.build_number, context.commit_hash, context.location, s3_version_status)) | Set the new "current" value for a key.
If the existing current version and the new version have identical /value/ and /status,
then nothing is written, to avoid stacking up redundant entreis in the version table.
Args:
context: a populated EFVersionContext object |
def get_offset_range(self, row_offset, column_offset):
"""
Gets an object which represents a range that's offset from the specified range.
The dimension of the returned range will match this range.
If the resulting range is forced outside the bounds of the worksheet grid,
an exception will be thrown.
:param int row_offset: The number of rows (positive, negative, or 0)
by which the range is to be offset.
:param int column_offset: he number of columns (positive, negative, or 0)
by which the range is to be offset.
:return: Range
"""
return self._get_range('offset_range', rowOffset=row_offset, columnOffset=column_offset) | Gets an object which represents a range that's offset from the specified range.
The dimension of the returned range will match this range.
If the resulting range is forced outside the bounds of the worksheet grid,
an exception will be thrown.
:param int row_offset: The number of rows (positive, negative, or 0)
by which the range is to be offset.
:param int column_offset: he number of columns (positive, negative, or 0)
by which the range is to be offset.
:return: Range |
def Async(f, n=None, timeout=None):
"""Concise usage for pool.submit.
Basic Usage Asnyc & threads ::
from torequests.main import Async, threads
import time
def use_submit(i):
time.sleep(i)
result = 'use_submit: %s' % i
print(result)
return result
@threads()
def use_decorator(i):
time.sleep(i)
result = 'use_decorator: %s' % i
print(result)
return result
new_use_submit = Async(use_submit)
tasks = [new_use_submit(i) for i in (2, 1, 0)
] + [use_decorator(i) for i in (2, 1, 0)]
print([type(i) for i in tasks])
results = [i.x for i in tasks]
print(results)
# use_submit: 0
# use_decorator: 0
# [<class 'torequests.main.NewFuture'>, <class 'torequests.main.NewFuture'>, <class 'torequests.main.NewFuture'>, <class 'torequests.main.NewFuture'>, <class 'torequests.main.NewFuture'>, <class 'torequests.main.NewFuture'>]
# use_submit: 1
# use_decorator: 1
# use_submit: 2
# use_decorator: 2
# ['use_submit: 2', 'use_submit: 1', 'use_submit: 0', 'use_decorator: 2', 'use_decorator: 1', 'use_decorator: 0']
"""
return threads(n=n, timeout=timeout)(f) | Concise usage for pool.submit.
Basic Usage Asnyc & threads ::
from torequests.main import Async, threads
import time
def use_submit(i):
time.sleep(i)
result = 'use_submit: %s' % i
print(result)
return result
@threads()
def use_decorator(i):
time.sleep(i)
result = 'use_decorator: %s' % i
print(result)
return result
new_use_submit = Async(use_submit)
tasks = [new_use_submit(i) for i in (2, 1, 0)
] + [use_decorator(i) for i in (2, 1, 0)]
print([type(i) for i in tasks])
results = [i.x for i in tasks]
print(results)
# use_submit: 0
# use_decorator: 0
# [<class 'torequests.main.NewFuture'>, <class 'torequests.main.NewFuture'>, <class 'torequests.main.NewFuture'>, <class 'torequests.main.NewFuture'>, <class 'torequests.main.NewFuture'>, <class 'torequests.main.NewFuture'>]
# use_submit: 1
# use_decorator: 1
# use_submit: 2
# use_decorator: 2
# ['use_submit: 2', 'use_submit: 1', 'use_submit: 0', 'use_decorator: 2', 'use_decorator: 1', 'use_decorator: 0'] |
def bus_inspector(self, bus, message):
"""
Inspect the bus for screensaver messages of interest
"""
# We only care about stuff on this interface. We did filter
# for it above, but even so we still hear from ourselves
# (hamster messages).
if message.get_interface() != self.screensaver_uri:
return True
member = message.get_member()
if member in ("SessionIdleChanged", "ActiveChanged"):
logger.debug("%s -> %s" % (member, message.get_args_list()))
idle_state = message.get_args_list()[0]
if idle_state:
self.idle_from = dt.datetime.now()
# from gnome screensaver 2.24 to 2.28 they have switched
# configuration keys and signal types.
# luckily we can determine key by signal type
if member == "SessionIdleChanged":
delay_key = "/apps/gnome-screensaver/idle_delay"
else:
delay_key = "/desktop/gnome/session/idle_delay"
client = gconf.Client.get_default()
self.timeout_minutes = client.get_int(delay_key)
else:
self.screen_locked = False
self.idle_from = None
if member == "ActiveChanged":
# ActiveChanged comes before SessionIdleChanged signal
# as a workaround for pre 2.26, we will wait a second - maybe
# SessionIdleChanged signal kicks in
def dispatch_active_changed(idle_state):
if not self.idle_was_there:
self.emit('idle-changed', idle_state)
self.idle_was_there = False
gobject.timeout_add_seconds(1, dispatch_active_changed, idle_state)
else:
# dispatch idle status change to interested parties
self.idle_was_there = True
self.emit('idle-changed', idle_state)
elif member == "Lock":
# in case of lock, lock signal will be sent first, followed by
# ActiveChanged and SessionIdle signals
logger.debug("Screen Lock Requested")
self.screen_locked = True
return | Inspect the bus for screensaver messages of interest |
def _session(self):
"""The current session used by the client.
The Session object allows you to persist certain parameters across
requests. It also persists cookies across all requests made from
the Session instance, and will use urllib3's connection pooling.
So if you're making several requests to the same host, the underlying
TCP connection will be reused, which can result in a significant
performance increase.
"""
if self._http_session is None:
self._http_session = requests.Session()
self._http_session.headers.update(self._get_headers())
self._http_session.verify = self._verify_https_request()
if all(self._credentials):
username, password = self._credentials
self._http_session.auth = requests_ntlm.HttpNtlmAuth(
username=username, password=password)
return self._http_session | The current session used by the client.
The Session object allows you to persist certain parameters across
requests. It also persists cookies across all requests made from
the Session instance, and will use urllib3's connection pooling.
So if you're making several requests to the same host, the underlying
TCP connection will be reused, which can result in a significant
performance increase. |
def tag_remove(self, *tags):
""" Return a view with the specified tags removed """
return View({**self.spec, 'tag': list(set(self.tags) - set(tags))}) | Return a view with the specified tags removed |
def google_poem(self, message, topic):
"""make a poem about __: show a google poem about __"""
r = requests.get("http://www.google.com/complete/search?output=toolbar&q=" + topic + "%20")
xmldoc = minidom.parseString(r.text)
item_list = xmldoc.getElementsByTagName("suggestion")
context = {"topic": topic, "lines": [x.attributes["data"].value for x in item_list[:4]]}
self.say(rendered_template("gpoem.html", context), message, html=True) | make a poem about __: show a google poem about __ |
def find_module(self, fullname, path=None):
"""
Tell if the module to load can be loaded by
the load_module function, ie: if it is a ``pygal.maps.*``
module.
"""
if fullname.startswith('pygal.maps.') and hasattr(
maps, fullname.split('.')[2]):
return self
return None | Tell if the module to load can be loaded by
the load_module function, ie: if it is a ``pygal.maps.*``
module. |
def savefig(writekey, dpi=None, ext=None):
"""Save current figure to file.
The `filename` is generated as follows:
filename = settings.figdir + writekey + settings.plot_suffix + '.' + settings.file_format_figs
"""
if dpi is None:
# we need this as in notebooks, the internal figures are also influenced by 'savefig.dpi' this...
if not isinstance(rcParams['savefig.dpi'], str) and rcParams['savefig.dpi'] < 150:
if settings._low_resolution_warning:
logg.warn(
'You are using a low resolution (dpi<150) for saving figures.\n'
'Consider running `set_figure_params(dpi_save=...)`, which will '
'adjust `matplotlib.rcParams[\'savefig.dpi\']`')
settings._low_resolution_warning = False
else:
dpi = rcParams['savefig.dpi']
if not os.path.exists(settings.figdir): os.makedirs(settings.figdir)
if settings.figdir[-1] != '/': settings.figdir += '/'
if ext is None: ext = settings.file_format_figs
filename = settings.figdir + writekey + settings.plot_suffix + '.' + ext
# output the following msg at warning level; it's really important for the user
logg.msg('saving figure to file', filename, v=1)
pl.savefig(filename, dpi=dpi, bbox_inches='tight') | Save current figure to file.
The `filename` is generated as follows:
filename = settings.figdir + writekey + settings.plot_suffix + '.' + settings.file_format_figs |
def set_mode(self, mode):
"""
Set *Vim* mode to ``mode``.
Supported modes:
* ``normal``
* ``insert``
* ``command``
* ``visual``
* ``visual-block``
This method behave as setter-only property.
Example:
>>> import headlessvim
>>> with headlessvim.open() as vim:
... vim.set_mode('insert')
... vim.mode = 'normal' # also accessible as property
...
:param string mode: *Vim* mode to set
:raises ValueError: if ``mode`` is not supported
"""
keys = '\033\033'
if mode == 'normal':
pass
elif mode == 'insert':
keys += 'i'
elif mode == 'command':
keys += ':'
elif mode == 'visual':
keys += 'v'
elif mode == 'visual-block':
keys += 'V'
else:
raise ValueError('mode {0} is not supported'.format(mode))
self.send_keys(keys) | Set *Vim* mode to ``mode``.
Supported modes:
* ``normal``
* ``insert``
* ``command``
* ``visual``
* ``visual-block``
This method behave as setter-only property.
Example:
>>> import headlessvim
>>> with headlessvim.open() as vim:
... vim.set_mode('insert')
... vim.mode = 'normal' # also accessible as property
...
:param string mode: *Vim* mode to set
:raises ValueError: if ``mode`` is not supported |
def insert(self, tag, identifier, parent, data):
"""
Insert the given meta data into the database
:param tag: The tag (equates to meta_data_id)
:param identifier: The identifier (a combination of the meta_data_id and the plate value)
:param parent: The parent plate identifier
:param data: The data (plate value)
:return: None
"""
# First try to add it into the tree
if self.global_plate_definitions.contains(identifier):
raise KeyError("Identifier {} already exists in tree".format(identifier))
self.global_plate_definitions.create_node(tag=tag, identifier=identifier, parent=parent, data=data)
# Now try to add it into the database
with switch_db(MetaDataModel, 'hyperstream'):
meta_data = MetaDataModel(tag=tag, parent=parent, data=data)
meta_data.save()
logging.info("Meta data {} inserted".format(identifier)) | Insert the given meta data into the database
:param tag: The tag (equates to meta_data_id)
:param identifier: The identifier (a combination of the meta_data_id and the plate value)
:param parent: The parent plate identifier
:param data: The data (plate value)
:return: None |
def _get_args_for_reloading():
"""Returns the executable. This contains a workaround for windows
if the executable is incorrectly reported to not have the .exe
extension which can cause bugs on reloading.
"""
rv = [sys.executable]
py_script = sys.argv[0]
if os.name == 'nt' and not os.path.exists(py_script) and \
os.path.exists(py_script + '.exe'):
py_script += '.exe'
rv.append(py_script)
rv.extend(sys.argv[1:])
return rv | Returns the executable. This contains a workaround for windows
if the executable is incorrectly reported to not have the .exe
extension which can cause bugs on reloading. |
def _validate_row_label(dataset, label=None, default_label='__id'):
"""
Validate a row label column. If the row label is not specified, a column is
created with row numbers, named with the string in the `default_label`
parameter.
Parameters
----------
dataset : SFrame
Input dataset.
label : str, optional
Name of the column containing row labels.
default_label : str, optional
The default column name if `label` is not specified. A column with row
numbers is added to the output SFrame in this case.
Returns
-------
dataset : SFrame
The input dataset, but with an additional row label column, *if* there
was no input label.
label : str
The final label column name.
"""
## If no label is provided, set it to be a default and add a row number to
# dataset. Check that this new name does not conflict with an existing
# name.
if not label:
## Try a bunch of variations of the default label to find one that's not
# already a column name.
label_name_base = default_label
label = default_label
i = 1
while label in dataset.column_names():
label = label_name_base + '.{}'.format(i)
i += 1
dataset = dataset.add_row_number(column_name=label)
## Validate the label name and types.
if not isinstance(label, str):
raise TypeError("The row label column name '{}' must be a string.".format(label))
if not label in dataset.column_names():
raise ToolkitError("Row label column '{}' not found in the dataset.".format(label))
if not dataset[label].dtype in (str, int):
raise TypeError("Row labels must be integers or strings.")
## Return the modified dataset and label
return dataset, label | Validate a row label column. If the row label is not specified, a column is
created with row numbers, named with the string in the `default_label`
parameter.
Parameters
----------
dataset : SFrame
Input dataset.
label : str, optional
Name of the column containing row labels.
default_label : str, optional
The default column name if `label` is not specified. A column with row
numbers is added to the output SFrame in this case.
Returns
-------
dataset : SFrame
The input dataset, but with an additional row label column, *if* there
was no input label.
label : str
The final label column name. |
def get_view_attr(view, key, default=None, cls_name=None):
"""
Get the attributes that was saved for the view
:param view: object (class or instance method)
:param key: string - the key
:param default: mixed - the default value
:param cls_name: str - To pass the class name associated to the view
in the case of decorators that may not give the real class name
:return: mixed
"""
ns = view_namespace(view, cls_name)
if ns:
if ns not in _views_attr:
return default
return _views_attr[ns].get(key, default)
return default | Get the attributes that was saved for the view
:param view: object (class or instance method)
:param key: string - the key
:param default: mixed - the default value
:param cls_name: str - To pass the class name associated to the view
in the case of decorators that may not give the real class name
:return: mixed |
def add_columns(self, layers: Union[np.ndarray, Dict[str, np.ndarray], loompy.LayerManager], col_attrs: Dict[str, np.ndarray], *, row_attrs: Dict[str, np.ndarray] = None, fill_values: Dict[str, np.ndarray] = None) -> None:
"""
Add columns of data and attribute values to the dataset.
Args:
layers (dict or numpy.ndarray or LayerManager):
Either:
1) A N-by-M matrix of float32s (N rows, M columns) in this case columns are added at the default layer
2) A dict {layer_name : matrix} specified so that the matrix (N, M) will be added to layer `layer_name`
3) A LayerManager object (such as what is returned by view.layers)
col_attrs (dict):
Column attributes, where keys are attribute names and values are numpy arrays (float or string) of length M
row_attrs (dict):
Optional row attributes, where keys are attribute names and values are numpy arrays (float or string) of length M
fill_values: dictionary of values to use if a column attribute is missing, or "auto" to fill with zeros or empty strings
Returns:
Nothing.
Notes
-----
- This will modify the underlying HDF5 file, which will interfere with any concurrent readers.
- Column attributes in the file that are NOT provided, will be deleted (unless fill value provided).
- Array with Nan should not be provided
"""
if self._file.mode != "r+":
raise IOError("Cannot add columns when connected in read-only mode")
# If this is an empty loom file, just assign the provided row and column attributes, and set the shape
is_new = self.shape == (0, 0)
if is_new:
if row_attrs is None:
raise ValueError("row_attrs must be provided when adding to an empty (new) Loom file")
for k, v in row_attrs.items():
self.ra[k] = v
self.shape = (self.ra[k].shape[0], self.shape[1])
if len(self.ca) == 0:
for k, v in col_attrs.items():
self.ca[k] = np.zeros(0, v.dtype)
layers_dict: Dict[str, np.ndarray] = {}
if isinstance(layers, np.ndarray):
layers_dict = {"": layers}
elif isinstance(layers, loompy.LayerManager):
layers_dict = {k: v[:, :] for k, v in layers.items()}
elif isinstance(layers, dict):
layers_dict = layers
else:
raise ValueError("Invalid type for layers argument")
n_cols = 0
for layer, matrix in layers_dict.items():
if not is_new and layer not in self.layers.keys():
raise ValueError(f"Layer {layer} does not exist in the target loom file")
if matrix.shape[0] != self.shape[0]:
raise ValueError(f"Layer {layer} has {matrix.shape[0]} rows but file has {self.shape[0]}")
if n_cols == 0:
n_cols = matrix.shape[1]
elif matrix.shape[1] != n_cols:
raise ValueError(f"Layer {layer} has {matrix.shape[1]} columns but the first layer had {n_cols}")
did_remove = False
todel = [] # type: List[str]
for key, vals in col_attrs.items():
if key not in self.col_attrs:
if fill_values is not None:
if fill_values == "auto":
fill_with = np.zeros(1, dtype=col_attrs[key].dtype)[0]
else:
fill_with = fill_values[key]
self.ca[key] = np.array([fill_with] * self.shape[1])
else:
did_remove = True
todel.append(key)
if len(vals) != n_cols:
raise ValueError(f"Each column attribute must have exactly {n_cols} values, but {key} had {len(vals)}")
for key in todel:
del col_attrs[key]
if did_remove:
logging.debug("Some column attributes were removed: " + ",".join(todel))
todel = []
did_remove = False
for key in self.col_attrs.keys():
if key not in col_attrs:
if fill_values is not None:
if fill_values == "auto":
fill_with = np.zeros(1, dtype=self.col_attrs[key].dtype)[0]
else:
fill_with = fill_values[key]
col_attrs[key] = np.array([fill_with] * n_cols)
else:
did_remove = True
todel.append(key)
for key in todel:
del self.ca[key] # delete_attr(key, axis=1)
if did_remove:
logging.debug("Some column attributes were removed: " + ",".join(todel))
if is_new:
for k, v in layers_dict.items():
self.layers[k] = v
for k, v in col_attrs.items():
self.ca[k] = v
else:
n_cols = n_cols + self.shape[1]
old_n_cols = self.shape[1]
# Must set new shape here, otherwise the attribute manager will complain
self.shape = (self.shape[0], n_cols)
todel = []
for key, vals in col_attrs.items():
if vals.shape[1:] != self.col_attrs[key].shape[1:]:
logging.debug(f"Removing attribute {key} because shape {vals.shape} did not match existing shape {self.col_attrs[key].shape} beyond first dimension")
todel.append(key)
else:
self.ca[key] = np.concatenate([self.ca[key], vals])
for key in todel:
del self.ca[key]
# Add the columns layerwise
for key in self.layers.keys():
self.layers[key]._resize(n_cols, axis=1)
self.layers[key][:, old_n_cols:n_cols] = layers_dict[key]
self._file.flush() | Add columns of data and attribute values to the dataset.
Args:
layers (dict or numpy.ndarray or LayerManager):
Either:
1) A N-by-M matrix of float32s (N rows, M columns) in this case columns are added at the default layer
2) A dict {layer_name : matrix} specified so that the matrix (N, M) will be added to layer `layer_name`
3) A LayerManager object (such as what is returned by view.layers)
col_attrs (dict):
Column attributes, where keys are attribute names and values are numpy arrays (float or string) of length M
row_attrs (dict):
Optional row attributes, where keys are attribute names and values are numpy arrays (float or string) of length M
fill_values: dictionary of values to use if a column attribute is missing, or "auto" to fill with zeros or empty strings
Returns:
Nothing.
Notes
-----
- This will modify the underlying HDF5 file, which will interfere with any concurrent readers.
- Column attributes in the file that are NOT provided, will be deleted (unless fill value provided).
- Array with Nan should not be provided |
def unload(module):
'''
Unload specified fault manager module
module: string
module to unload
CLI Example:
.. code-block:: bash
salt '*' fmadm.unload software-response
'''
ret = {}
fmadm = _check_fmadm()
cmd = '{cmd} unload {module}'.format(
cmd=fmadm,
module=module
)
res = __salt__['cmd.run_all'](cmd)
retcode = res['retcode']
result = {}
if retcode != 0:
result['Error'] = res['stderr']
else:
result = True
return result | Unload specified fault manager module
module: string
module to unload
CLI Example:
.. code-block:: bash
salt '*' fmadm.unload software-response |
def _already_resized_on_flickr(self,fn,pid,_megapixels):
"""Checks if image file (fn) with photo_id (pid) has already
been resized on flickr. If so, returns True"""
logger.debug("%s - resize requested"%(fn))
# Get width/height from flickr
width_flickr,height_flickr=self._getphoto_originalsize(pid)
# Now compute what image will be if we resize it
new_width,new_height=pusher_utils.resize_compute_width_height(\
fn,_megapixels)
if width_flickr==new_width and height_flickr==new_height:
return True
# Also return true if image couldn't be resized
elif not new_width:
return True
return False | Checks if image file (fn) with photo_id (pid) has already
been resized on flickr. If so, returns True |
def from_pubsec_file(cls: Type[SigningKeyType], path: str) -> SigningKeyType:
"""
Return SigningKey instance from Duniter WIF file
:param path: Path to WIF file
"""
with open(path, 'r') as fh:
pubsec_content = fh.read()
# line patterns
regex_pubkey = compile("pub: ([1-9A-HJ-NP-Za-km-z]{43,44})", MULTILINE)
regex_signkey = compile("sec: ([1-9A-HJ-NP-Za-km-z]{88,90})", MULTILINE)
# check public key field
match = search(regex_pubkey, pubsec_content)
if not match:
raise Exception('Error: Bad format PubSec v1 file, missing public key')
# check signkey field
match = search(regex_signkey, pubsec_content)
if not match:
raise Exception('Error: Bad format PubSec v1 file, missing sec key')
# capture signkey
signkey_hex = match.groups()[0]
# extract seed from signkey
seed = bytes(Base58Encoder.decode(signkey_hex)[0:32])
return cls(seed) | Return SigningKey instance from Duniter WIF file
:param path: Path to WIF file |
def lml(self):
"""
Log of the marginal likelihood.
Returns
-------
lml : float
Log of the marginal likelihood.
Notes
-----
The log of the marginal likelihood is given by ::
2⋅log(p(𝐲)) = -n⋅log(2π) - n⋅log(s) - log|D| - (Qᵀ𝐲)ᵀs⁻¹D⁻¹(Qᵀ𝐲)
+ (Qᵀ𝐲)ᵀs⁻¹D⁻¹(QᵀX𝜷)/2 - (QᵀX𝜷)ᵀs⁻¹D⁻¹(QᵀX𝜷).
By using the optimal 𝜷, the log of the marginal likelihood can be rewritten
as::
2⋅log(p(𝐲)) = -n⋅log(2π) - n⋅log(s) - log|D| + (Qᵀ𝐲)ᵀs⁻¹D⁻¹Qᵀ(X𝜷-𝐲).
In the extreme case where 𝜷 is such that 𝐲 = X𝜷, the maximum is attained as
s→0.
For optimals 𝜷 and s, the log of the marginal likelihood can be further
simplified to ::
2⋅log(p(𝐲; 𝜷, s)) = -n⋅log(2π) - n⋅log s - log|D| - n.
"""
reml = (self._logdetXX() - self._logdetH()) / 2
if self._optimal["scale"]:
lml = self._lml_optimal_scale()
else:
lml = self._lml_arbitrary_scale()
return lml + reml | Log of the marginal likelihood.
Returns
-------
lml : float
Log of the marginal likelihood.
Notes
-----
The log of the marginal likelihood is given by ::
2⋅log(p(𝐲)) = -n⋅log(2π) - n⋅log(s) - log|D| - (Qᵀ𝐲)ᵀs⁻¹D⁻¹(Qᵀ𝐲)
+ (Qᵀ𝐲)ᵀs⁻¹D⁻¹(QᵀX𝜷)/2 - (QᵀX𝜷)ᵀs⁻¹D⁻¹(QᵀX𝜷).
By using the optimal 𝜷, the log of the marginal likelihood can be rewritten
as::
2⋅log(p(𝐲)) = -n⋅log(2π) - n⋅log(s) - log|D| + (Qᵀ𝐲)ᵀs⁻¹D⁻¹Qᵀ(X𝜷-𝐲).
In the extreme case where 𝜷 is such that 𝐲 = X𝜷, the maximum is attained as
s→0.
For optimals 𝜷 and s, the log of the marginal likelihood can be further
simplified to ::
2⋅log(p(𝐲; 𝜷, s)) = -n⋅log(2π) - n⋅log s - log|D| - n. |
def _send_request(self, enforce_json, method, raise_for_status,
url, **kwargs):
"""Send HTTP request.
Args:
enforce_json (bool): Require properly-formatted JSON or raise :exc:`~pancloud.exceptions.PanCloudError`. Defaults to ``False``.
method (str): HTTP method.
raise_for_status (bool): If ``True``, raises :exc:`~pancloud.exceptions.HTTPError` if status_code not in 2XX. Defaults to ``False``.
url (str): Request URL.
**kwargs (dict): Re-packed key-word arguments.
Returns:
requests.Response: Requests Response() object
"""
r = self.session.request(method, url, **kwargs)
if raise_for_status:
r.raise_for_status()
if enforce_json:
if 'application/json' in self.session.headers.get(
'Accept', ''
):
try:
r.json()
except ValueError as e:
raise PanCloudError(
"Invalid JSON: {}".format(e)
)
return r | Send HTTP request.
Args:
enforce_json (bool): Require properly-formatted JSON or raise :exc:`~pancloud.exceptions.PanCloudError`. Defaults to ``False``.
method (str): HTTP method.
raise_for_status (bool): If ``True``, raises :exc:`~pancloud.exceptions.HTTPError` if status_code not in 2XX. Defaults to ``False``.
url (str): Request URL.
**kwargs (dict): Re-packed key-word arguments.
Returns:
requests.Response: Requests Response() object |
def __set_private_key(self, pk):
""" Internal method that sets the specified private key
:param pk: private key to set
:return: None
"""
self.__private_key = pk
self.__public_key = pk.public_key() | Internal method that sets the specified private key
:param pk: private key to set
:return: None |
def setActiveState(self, active):
""" Use this to enable or disable (grey out) a parameter. """
st = DISABLED
if active: st = NORMAL
self.entry.configure(state=st)
self.inputLabel.configure(state=st)
self.promptLabel.configure(state=st) | Use this to enable or disable (grey out) a parameter. |
def read_local_conf(local_conf):
"""Search for conf.py in any rel_source directory in CWD and if found read it and return.
:param str local_conf: Path to conf.py to read.
:return: Loaded conf.py.
:rtype: dict
"""
log = logging.getLogger(__name__)
# Attempt to read.
log.info('Reading config from %s...', local_conf)
try:
config = read_config(os.path.dirname(local_conf), '<local>')
except HandledError:
log.warning('Unable to read file, continuing with only CLI args.')
return dict()
# Filter and return.
return {k[4:]: v for k, v in config.items() if k.startswith('scv_') and not k[4:].startswith('_')} | Search for conf.py in any rel_source directory in CWD and if found read it and return.
:param str local_conf: Path to conf.py to read.
:return: Loaded conf.py.
:rtype: dict |
def get_element_types(obj, **kwargs):
"""Get element types as a set."""
max_iterable_length = kwargs.get('max_iterable_length', 10000)
consume_generator = kwargs.get('consume_generator', False)
if not isiterable(obj):
return None
if isgenerator(obj) and not consume_generator:
return None
t = get_types(obj, **kwargs)
if not t['too_big']:
if t['types']:
return "Element types: {}".format(', '.join([extract_type(t) for t in t['types']]))
else:
return None
else:
return "Element types: {}".format(', '.join([extract_type(t) for t in t['types']])) + " (based on first {} elements.)".format(max_iterable_length) | Get element types as a set. |
def disconnect(self, forced=False):
"""
Given the pipeline topology disconnects ``Pipers`` in the order output
-> input. This also disconnects inputs. See ``Dagger.connect``,
``Piper.connect`` and ``Piper.disconnect``. If "forced" is ``True``
``NuMap`` instances will be emptied.
Arguments:
- forced(``bool``) [default: ``False``] If set ``True`` all tasks from
all ``NuMaps`` instances used in the ``Dagger`` will be removed even
if they did not belong to this ``Dagger``.
"""
reversed_postorder = reversed(self.postorder())
self.log.debug('%s trying to disconnect in the order %s' % \
(repr(self), repr(reversed_postorder)))
for piper in reversed_postorder:
if piper.connected:
# we don't want to trigger an exception
piper.disconnect(forced)
self.log.debug('%s succesfuly disconnected' % repr(self)) | Given the pipeline topology disconnects ``Pipers`` in the order output
-> input. This also disconnects inputs. See ``Dagger.connect``,
``Piper.connect`` and ``Piper.disconnect``. If "forced" is ``True``
``NuMap`` instances will be emptied.
Arguments:
- forced(``bool``) [default: ``False``] If set ``True`` all tasks from
all ``NuMaps`` instances used in the ``Dagger`` will be removed even
if they did not belong to this ``Dagger``. |
def data(self, **query):
"""Query for Data object annotation."""
objects = self.cache['objects']
data = self.api.data.get(**query)['objects']
data_objects = []
for d in data:
_id = d['id']
if _id in objects:
# Update existing object
objects[_id].update(d)
else:
# Insert new object
objects[_id] = GenData(d, self)
data_objects.append(objects[_id])
# Hydrate reference fields
for d in data_objects:
count += 1
while True:
ref_annotation = {}
remove_annotation = []
for path, ann in d.annotation.items():
if ann['type'].startswith('data:'):
# Referenced data object found
# Copy annotation
_id = ann['value']
if _id not in objects:
try:
d_tmp = self.api.data(_id).get()
except slumber.exceptions.HttpClientError as ex:
if ex.response.status_code == 404:
continue
else:
raise ex
objects[_id] = GenData(d_tmp, self)
annotation = objects[_id].annotation
ref_annotation.update({path + '.' + k: v for k, v in annotation.items()})
remove_annotation.append(path)
if ref_annotation:
d.annotation.update(ref_annotation)
for path in remove_annotation:
del d.annotation[path]
else:
break
return data_objects | Query for Data object annotation. |
def sshpull(host, maildir, localmaildir, noop=False, verbose=False, filterfile=None):
"""Pull a remote maildir to the local one.
"""
store = _SSHStore(host, maildir)
_pull(store, localmaildir, noop, verbose, filterfile) | Pull a remote maildir to the local one. |
def add_zoom_buttons(viewer, canvas=None, color='black'):
"""Add zoom buttons to a canvas.
Parameters
----------
viewer : an ImageView subclass instance
If True, show the color bar; else remove it if present.
canvas : a DrawingCanvas instance
The canvas to which the buttons should be added. If not supplied
defaults to the private canvas of the viewer.
color : str
A color name, hex triplet. The default is 'black'.
"""
def zoom(box, canvas, event, pt, viewer, n):
zl = viewer.get_zoom()
zl += n
if zl == 0.0:
zl += n
viewer.zoom_to(zl + n)
def add_buttons(viewer, canvas, tag):
objs = []
wd, ht = viewer.get_window_size()
SquareBox = canvas.get_draw_class('squarebox')
Text = canvas.get_draw_class('text')
Compound = canvas.get_draw_class('compoundobject')
x1, y1 = wd - 20, ht // 2 + 20
zoomin = SquareBox(x1, y1, 15, color='yellow', fill=True,
fillcolor='gray', fillalpha=0.5, coord='window')
zoomin.editable = False
zoomin.pickable = True
zoomin.add_callback('pick-down', zoom, viewer, 1)
objs.append(zoomin)
x2, y2 = wd - 20, ht // 2 - 20
zoomout = SquareBox(x2, y2, 15, color='yellow', fill=True,
fillcolor='gray', fillalpha=0.5, coord='window')
zoomout.editable = False
zoomout.pickable = True
zoomout.add_callback('pick-down', zoom, viewer, -1)
objs.append(zoomout)
objs.append(Text(x1 - 4, y1 + 6, text='+', fontsize=18, color=color,
coord='window'))
objs.append(Text(x2 - 4, y2 + 6, text='--', fontsize=18, color=color,
coord='window'))
obj = Compound(*objs)
obj.opaque = False
canvas.add(obj, tag=tag)
def zoom_resize(viewer, width, height, canvas, tag):
try:
canvas.get_object_by_tag(tag)
except KeyError:
return False
canvas.delete_object_by_tag(tag)
add_buttons(viewer, canvas, tag)
tag = '_$zoom_buttons'
if canvas is None:
canvas = viewer.get_private_canvas()
canvas.ui_set_active(True)
canvas.register_for_cursor_drawing(viewer)
canvas.set_draw_mode('pick')
viewer.add_callback('configure', zoom_resize, canvas, tag)
add_buttons(viewer, canvas, tag) | Add zoom buttons to a canvas.
Parameters
----------
viewer : an ImageView subclass instance
If True, show the color bar; else remove it if present.
canvas : a DrawingCanvas instance
The canvas to which the buttons should be added. If not supplied
defaults to the private canvas of the viewer.
color : str
A color name, hex triplet. The default is 'black'. |
def to_json(obj):
"""
Convert obj to json. Used mostly to convert the classes in json_span.py until we switch to nested
dicts (or something better)
:param obj: the object to serialize to json
:return: json string
"""
try:
return json.dumps(obj, default=lambda obj: {k.lower(): v for k, v in obj.__dict__.items()},
sort_keys=False, separators=(',', ':')).encode()
except Exception as e:
logger.info("to_json: ", e, obj) | Convert obj to json. Used mostly to convert the classes in json_span.py until we switch to nested
dicts (or something better)
:param obj: the object to serialize to json
:return: json string |
def simple_attention(memory, att_size, mask, keep_prob=1.0, scope="simple_attention"):
"""Simple attention without any conditions.
Computes weighted sum of memory elements.
"""
with tf.variable_scope(scope):
BS, ML, MH = tf.unstack(tf.shape(memory))
memory_do = tf.nn.dropout(memory, keep_prob=keep_prob, noise_shape=[BS, 1, MH])
logits = tf.layers.dense(tf.layers.dense(memory_do, att_size, activation=tf.nn.tanh), 1, use_bias=False)
logits = softmax_mask(tf.squeeze(logits, [2]), mask)
att_weights = tf.expand_dims(tf.nn.softmax(logits), axis=2)
res = tf.reduce_sum(att_weights * memory, axis=1)
return res | Simple attention without any conditions.
Computes weighted sum of memory elements. |
def default_styles():
"""Generate default ODF styles."""
styles = {}
def _add_style(name, **kwargs):
styles[name] = _create_style(name, **kwargs)
_add_style('heading-1',
family='paragraph',
fontsize='24pt',
fontweight='bold',
)
_add_style('heading-2',
family='paragraph',
fontsize='22pt',
fontweight='bold',
)
_add_style('heading-3',
family='paragraph',
fontsize='20pt',
fontweight='bold',
)
_add_style('heading-4',
family='paragraph',
fontsize='18pt',
fontweight='bold',
)
_add_style('heading-5',
family='paragraph',
fontsize='16pt',
fontweight='bold',
)
_add_style('heading-6',
family='paragraph',
fontsize='14pt',
fontweight='bold',
)
_add_style('normal-paragraph',
family='paragraph',
fontsize='12pt',
marginbottom='0.25cm',
)
_add_style('code',
family='paragraph',
fontsize='10pt',
fontweight='bold',
fontfamily='Courier New',
color='#555555',
)
_add_style('quote',
family='paragraph',
fontsize='12pt',
fontstyle='italic',
)
_add_style('list-paragraph',
family='paragraph',
fontsize='12pt',
marginbottom='.1cm',
)
_add_style('sublist-paragraph',
family='paragraph',
fontsize='12pt',
marginbottom='.1cm',
)
_add_style('numbered-list-paragraph',
family='paragraph',
fontsize='12pt',
marginbottom='.1cm',
)
_add_style('normal-text',
family='text',
fontsize='12pt',
)
_add_style('italic',
family='text',
fontstyle='italic',
fontsize='12pt',
)
_add_style('bold',
family='text',
fontweight='bold',
fontsize='12pt',
)
_add_style('url',
family='text',
fontsize='12pt',
fontweight='bold',
fontfamily='Courier',
)
_add_style('inline-code',
family='text',
fontsize='10pt',
fontweight='bold',
fontfamily='Courier New',
color='#555555',
)
styles['_numbered_list'] = _numbered_style()
return styles | Generate default ODF styles. |
def build_parser(self, context):
"""
Create the final argument parser.
This method creates the non-early (full) argparse argument parser.
Unlike the early counterpart it is expected to have knowledge of
the full command tree.
This method relies on ``context.cmd_tree`` and produces
``context.parser``. Other ingredients can interact with the parser
up until :meth:`parse()` is called.
"""
context.parser, context.max_level = self._create_parser(context) | Create the final argument parser.
This method creates the non-early (full) argparse argument parser.
Unlike the early counterpart it is expected to have knowledge of
the full command tree.
This method relies on ``context.cmd_tree`` and produces
``context.parser``. Other ingredients can interact with the parser
up until :meth:`parse()` is called. |
def resolve_response_data(head_key, data_key, data):
"""
Resolves the responses you get from billomat
If you have done a get_one_element request then you will get a dictionary
If you have done a get_all_elements request then you will get a list with all elements in it
:param head_key: the head key e.g: CLIENTS
:param data_key: the data key e.g: CLIENT
:param data: the responses you got
:return: dict or list
"""
new_data = []
if isinstance(data, list):
for data_row in data:
if head_key in data_row and data_key in data_row[head_key]:
if isinstance(data_row[head_key][data_key], list):
new_data += data_row[head_key][data_key]
else:
new_data.append(data_row[head_key][data_key])
elif data_key in data_row:
return data_row[data_key]
else:
if head_key in data and data_key in data[head_key]:
new_data += data[head_key][data_key]
elif data_key in data:
return data[data_key]
return new_data | Resolves the responses you get from billomat
If you have done a get_one_element request then you will get a dictionary
If you have done a get_all_elements request then you will get a list with all elements in it
:param head_key: the head key e.g: CLIENTS
:param data_key: the data key e.g: CLIENT
:param data: the responses you got
:return: dict or list |
def uncomment_or_update_or_append_line(filename, prefix, new_line, comment='#',
keep_backup=True,
update_or_append_line=update_or_append_line):
'''Remove the comment of an commented out line and make the line "active".
If such an commented out line not exists it would be appended.
'''
uncommented = update_or_append_line(filename, prefix=comment+prefix,
new_line=new_line,
keep_backup=keep_backup, append=False)
if not uncommented:
update_or_append_line(filename, prefix, new_line,
keep_backup=keep_backup, append=True) | Remove the comment of an commented out line and make the line "active".
If such an commented out line not exists it would be appended. |
def autocorrect(query, possibilities, delta=0.75):
"""Attempts to figure out what possibility the query is
This autocorrect function is rather simple right now with plans for later
improvement. Right now, it just attempts to finish spelling a word as much
as possible, and then determines which possibility is closest to said word.
Args:
query (unicode): query to attempt to complete
possibilities (list): list of unicodes of possible answers for query
delta (float): Minimum delta similarity between query and
any given possibility for possibility to be considered.
Delta used by difflib.get_close_matches().
Returns:
unicode: best guess of correct answer
Raises:
AssertionError: raised if no matches found
Example:
.. code-block:: Python
>>> autocorrect('bowtei', ['bowtie2', 'bot'])
'bowtie2'
"""
# TODO: Make this way more robust and awesome using probability, n-grams?
possibilities = [possibility.lower() for possibility in possibilities]
# Don't waste time for exact matches
if query in possibilities:
return query
# Complete query as much as possible
options = [word for word in possibilities if word.startswith(query)]
if len(options) > 0:
possibilities = options
query = max_substring(options)
# Identify possible matches and return best match
matches = get_close_matches(query, possibilities, cutoff=delta)
# Raise error if no matches
try:
assert len(matches) > 0
except AssertionError:
raise AssertionError('No matches for "{0}" found'.format(query))
return matches[0] | Attempts to figure out what possibility the query is
This autocorrect function is rather simple right now with plans for later
improvement. Right now, it just attempts to finish spelling a word as much
as possible, and then determines which possibility is closest to said word.
Args:
query (unicode): query to attempt to complete
possibilities (list): list of unicodes of possible answers for query
delta (float): Minimum delta similarity between query and
any given possibility for possibility to be considered.
Delta used by difflib.get_close_matches().
Returns:
unicode: best guess of correct answer
Raises:
AssertionError: raised if no matches found
Example:
.. code-block:: Python
>>> autocorrect('bowtei', ['bowtie2', 'bot'])
'bowtie2' |
def _filter_by_moys_slow(self, moys):
"""Filter the Data Collection with a slow method that always works."""
_filt_values = []
_filt_datetimes = []
for i, d in enumerate(self.datetimes):
if d.moy in moys:
_filt_datetimes.append(d)
_filt_values.append(self._values[i])
return _filt_values, _filt_datetimes | Filter the Data Collection with a slow method that always works. |
def run_linters(files):
"""
Run through file list, and try to find a linter
that matches the given file type.
If it finds a linter, it will run it, and store the
resulting data in a dictionary (keyed to file_type).
:param files:
:return: {file_extension: lint_data}
"""
data = {}
for file_type, file_list in list(files.items()):
linter = LintFactory.get_linter(file_type)
if linter is not None:
data[file_type] = linter.run(file_list)
return data | Run through file list, and try to find a linter
that matches the given file type.
If it finds a linter, it will run it, and store the
resulting data in a dictionary (keyed to file_type).
:param files:
:return: {file_extension: lint_data} |
def query_relations(self,
environment_id,
collection_id,
entities=None,
context=None,
sort=None,
filter=None,
count=None,
evidence_count=None,
**kwargs):
"""
Knowledge Graph relationship query.
See the [Knowledge Graph
documentation](https://cloud.ibm.com/docs/services/discovery?topic=discovery-kg#kg)
for more details.
:param str environment_id: The ID of the environment.
:param str collection_id: The ID of the collection.
:param list[QueryRelationsEntity] entities: An array of entities to find
relationships for.
:param QueryEntitiesContext context: Entity text to provide context for the
queried entity and rank based on that association. For example, if you wanted to
query the city of London in England your query would look for `London` with the
context of `England`.
:param str sort: The sorting method for the relationships, can be `score` or
`frequency`. `frequency` is the number of unique times each entity is identified.
The default is `score`. This parameter cannot be used in the same query as the
**bias** parameter.
:param QueryRelationsFilter filter:
:param int count: The number of results to return. The default is `10`. The
maximum is `1000`.
:param int evidence_count: The number of evidence items to return for each result.
The default is `0`. The maximum number of evidence items per query is 10,000.
:param dict headers: A `dict` containing the request headers
:return: A `DetailedResponse` containing the result, headers and HTTP status code.
:rtype: DetailedResponse
"""
if environment_id is None:
raise ValueError('environment_id must be provided')
if collection_id is None:
raise ValueError('collection_id must be provided')
if entities is not None:
entities = [
self._convert_model(x, QueryRelationsEntity) for x in entities
]
if context is not None:
context = self._convert_model(context, QueryEntitiesContext)
if filter is not None:
filter = self._convert_model(filter, QueryRelationsFilter)
headers = {}
if 'headers' in kwargs:
headers.update(kwargs.get('headers'))
sdk_headers = get_sdk_headers('discovery', 'V1', 'query_relations')
headers.update(sdk_headers)
params = {'version': self.version}
data = {
'entities': entities,
'context': context,
'sort': sort,
'filter': filter,
'count': count,
'evidence_count': evidence_count
}
url = '/v1/environments/{0}/collections/{1}/query_relations'.format(
*self._encode_path_vars(environment_id, collection_id))
response = self.request(
method='POST',
url=url,
headers=headers,
params=params,
json=data,
accept_json=True)
return response | Knowledge Graph relationship query.
See the [Knowledge Graph
documentation](https://cloud.ibm.com/docs/services/discovery?topic=discovery-kg#kg)
for more details.
:param str environment_id: The ID of the environment.
:param str collection_id: The ID of the collection.
:param list[QueryRelationsEntity] entities: An array of entities to find
relationships for.
:param QueryEntitiesContext context: Entity text to provide context for the
queried entity and rank based on that association. For example, if you wanted to
query the city of London in England your query would look for `London` with the
context of `England`.
:param str sort: The sorting method for the relationships, can be `score` or
`frequency`. `frequency` is the number of unique times each entity is identified.
The default is `score`. This parameter cannot be used in the same query as the
**bias** parameter.
:param QueryRelationsFilter filter:
:param int count: The number of results to return. The default is `10`. The
maximum is `1000`.
:param int evidence_count: The number of evidence items to return for each result.
The default is `0`. The maximum number of evidence items per query is 10,000.
:param dict headers: A `dict` containing the request headers
:return: A `DetailedResponse` containing the result, headers and HTTP status code.
:rtype: DetailedResponse |
def patch(self, *args, **kwargs):
"""Patch only drafts.
Status required: ``'draft'``.
Meta information inside `_deposit` are preserved.
"""
return super(Deposit, self).patch(*args, **kwargs) | Patch only drafts.
Status required: ``'draft'``.
Meta information inside `_deposit` are preserved. |
def _CheckCollation(cursor):
"""Checks MySQL collation and warns if misconfigured."""
# Do not fail for wrong collation, because changing it is harder than changing
# the character set. Some providers only allow changing character set and then
# use the default collation. Also, misconfigured collation is not expected to
# have major negative impacts, since it only affects string sort order for
# some locales.
cur_collation_connection = _ReadVariable("collation_connection", cursor)
if cur_collation_connection != COLLATION:
logging.warning("Require MySQL collation_connection of %s, got %s.",
COLLATION, cur_collation_connection)
cur_collation_database = _ReadVariable("collation_database", cursor)
if cur_collation_database != COLLATION:
logging.warning(
"Require MySQL collation_database of %s, got %s."
" To create your database, use: %s", COLLATION, cur_collation_database,
CREATE_DATABASE_QUERY) | Checks MySQL collation and warns if misconfigured. |
def built_datetime(self):
"""Return the built time as a datetime object"""
from datetime import datetime
try:
return datetime.fromtimestamp(self.state.build_done)
except TypeError:
# build_done is null
return None | Return the built time as a datetime object |
def add_minutes(self, datetimestr, n):
"""Returns a time that n minutes after a time.
:param datetimestr: a datetime object or a datetime str
:param n: number of minutes, value can be negative
**中文文档**
返回给定日期N分钟之后的时间。
"""
a_datetime = self.parse_datetime(datetimestr)
return a_datetime + timedelta(seconds=60 * n) | Returns a time that n minutes after a time.
:param datetimestr: a datetime object or a datetime str
:param n: number of minutes, value can be negative
**中文文档**
返回给定日期N分钟之后的时间。 |
def merge_ids(self, token, channel, ids, delete=False):
"""
Call the restful endpoint to merge two RAMON objects into one.
Arguments:
token (str): The token to inspect
channel (str): The channel to inspect
ids (int[]): the list of the IDs to merge
delete (bool : False): Whether to delete after merging.
Returns:
json: The ID as returned by ndstore
"""
url = self.url() + "/merge/{}/".format(','.join([str(i) for i in ids]))
req = self.remote_utils.get_url(url)
if req.status_code is not 200:
raise RemoteDataUploadError('Could not merge ids {}'.format(
','.join([str(i) for i in ids])))
if delete:
self.delete_ramon(token, channel, ids[1:])
return True | Call the restful endpoint to merge two RAMON objects into one.
Arguments:
token (str): The token to inspect
channel (str): The channel to inspect
ids (int[]): the list of the IDs to merge
delete (bool : False): Whether to delete after merging.
Returns:
json: The ID as returned by ndstore |
def get_ttext(value):
"""ttext = <matches _ttext_matcher>
We allow any non-TOKEN_ENDS in ttext, but add defects to the token's
defects list if we find non-ttext characters. We also register defects for
*any* non-printables even though the RFC doesn't exclude all of them,
because we follow the spirit of RFC 5322.
"""
m = _non_token_end_matcher(value)
if not m:
raise errors.HeaderParseError(
"expected ttext but found '{}'".format(value))
ttext = m.group()
value = value[len(ttext):]
ttext = ValueTerminal(ttext, 'ttext')
_validate_xtext(ttext)
return ttext, value | ttext = <matches _ttext_matcher>
We allow any non-TOKEN_ENDS in ttext, but add defects to the token's
defects list if we find non-ttext characters. We also register defects for
*any* non-printables even though the RFC doesn't exclude all of them,
because we follow the spirit of RFC 5322. |
def _add_secondary_if_exists(secondary, out, get_retriever):
"""Add secondary files only if present locally or remotely.
"""
secondary = [_file_local_or_remote(y, get_retriever) for y in secondary]
secondary = [z for z in secondary if z]
if secondary:
out["secondaryFiles"] = [{"class": "File", "path": f} for f in secondary]
return out | Add secondary files only if present locally or remotely. |
def Chen_Friedel(m, x, rhol, rhog, mul, mug, sigma, D, roughness=0, L=1):
r'''Calculates two-phase pressure drop with the Chen modification of the
Friedel correlation, as given in [1]_ and also shown in [2]_ and [3]_.
.. math::
\Delta P = \Delta P_{Friedel}\Omega
For Bo < 2.5:
.. math::
\Omega = \frac{0.0333Re_{lo}^{0.45}}{Re_g^{0.09}(1 + 0.4\exp(-Bo))}
For Bo >= 2.5:
.. math::
\Omega = \frac{We^{0.2}}{2.5 + 0.06Bo}
Parameters
----------
m : float
Mass flow rate of fluid, [kg/s]
x : float
Quality of fluid, [-]
rhol : float
Liquid density, [kg/m^3]
rhog : float
Gas density, [kg/m^3]
mul : float
Viscosity of liquid, [Pa*s]
mug : float
Viscosity of gas, [Pa*s]
sigma : float
Surface tension, [N/m]
D : float
Diameter of pipe, [m]
roughness : float, optional
Roughness of pipe for use in calculating friction factor, [m]
L : float, optional
Length of pipe, [m]
Returns
-------
dP : float
Pressure drop of the two-phase flow, [Pa]
Notes
-----
Applicable ONLY to mini/microchannels; yields drastically too low
pressure drops for larger channels. For more details, see the `Friedel`
correlation.
It is not explicitly stated in [1]_ how to calculate the liquid mixture
density for use in calculation of Weber number; the homogeneous model is
assumed as it is used in the Friedel model.
The bond number used here is 1/4 the normal value, i.e.:
.. math::
Bo = \frac{g(\rho_l-\rho_g)D^2}{4\sigma}
Examples
--------
>>> Chen_Friedel(m=.0005, x=0.9, rhol=950., rhog=1.4, mul=1E-3, mug=1E-5,
... sigma=0.02, D=0.003, roughness=0, L=1)
6249.247540588871
References
----------
.. [1] Chen, Ing Youn, Kai-Shing Yang, Yu-Juei Chang, and Chi-Chung Wang.
"Two-Phase Pressure Drop of Air–water and R-410A in Small Horizontal
Tubes." International Journal of Multiphase Flow 27, no. 7 (July 2001):
1293-99. doi:10.1016/S0301-9322(01)00004-0.
.. [2] Kim, Sung-Min, and Issam Mudawar. "Universal Approach to Predicting
Two-Phase Frictional Pressure Drop for Adiabatic and Condensing Mini/
Micro-Channel Flows." International Journal of Heat and Mass Transfer
55, no. 11–12 (May 2012): 3246-61.
doi:10.1016/j.ijheatmasstransfer.2012.02.047.
.. [3] Choi, Kwang-Il, A. S. Pamitran, Chun-Young Oh, and Jong-Taek Oh.
"Two-Phase Pressure Drop of R-410A in Horizontal Smooth Minichannels."
International Journal of Refrigeration 31, no. 1 (January 2008): 119-29.
doi:10.1016/j.ijrefrig.2007.06.006.
'''
# Liquid-only properties, for calculation of E, dP_lo
v_lo = m/rhol/(pi/4*D**2)
Re_lo = Reynolds(V=v_lo, rho=rhol, mu=mul, D=D)
fd_lo = friction_factor(Re=Re_lo, eD=roughness/D)
dP_lo = fd_lo*L/D*(0.5*rhol*v_lo**2)
# Gas-only properties, for calculation of E
v_go = m/rhog/(pi/4*D**2)
Re_go = Reynolds(V=v_go, rho=rhog, mu=mug, D=D)
fd_go = friction_factor(Re=Re_go, eD=roughness/D)
F = x**0.78*(1-x)**0.224
H = (rhol/rhog)**0.91*(mug/mul)**0.19*(1 - mug/mul)**0.7
E = (1-x)**2 + x**2*(rhol*fd_go/(rhog*fd_lo))
# Homogeneous properties, for Froude/Weber numbers
rho_h = 1./(x/rhog + (1-x)/rhol)
Q_h = m/rho_h
v_h = Q_h/(pi/4*D**2)
Fr = Froude(V=v_h, L=D, squared=True) # checked with (m/(pi/4*D**2))**2/g/D/rho_h**2
We = Weber(V=v_h, L=D, rho=rho_h, sigma=sigma) # checked with (m/(pi/4*D**2))**2*D/sigma/rho_h
phi_lo2 = E + 3.24*F*H/(Fr**0.0454*We**0.035)
dP = phi_lo2*dP_lo
# Chen modification; Weber number is the same as above
# Weber is same
Bo = Bond(rhol=rhol, rhog=rhog, sigma=sigma, L=D)/4 # Custom definition
if Bo < 2.5:
# Actual gas flow, needed for this case only.
v_g = m*x/rhog/(pi/4*D**2)
Re_g = Reynolds(V=v_g, rho=rhog, mu=mug, D=D)
Omega = 0.0333*Re_lo**0.45/(Re_g**0.09*(1 + 0.5*exp(-Bo)))
else:
Omega = We**0.2/(2.5 + 0.06*Bo)
return dP*Omega | r'''Calculates two-phase pressure drop with the Chen modification of the
Friedel correlation, as given in [1]_ and also shown in [2]_ and [3]_.
.. math::
\Delta P = \Delta P_{Friedel}\Omega
For Bo < 2.5:
.. math::
\Omega = \frac{0.0333Re_{lo}^{0.45}}{Re_g^{0.09}(1 + 0.4\exp(-Bo))}
For Bo >= 2.5:
.. math::
\Omega = \frac{We^{0.2}}{2.5 + 0.06Bo}
Parameters
----------
m : float
Mass flow rate of fluid, [kg/s]
x : float
Quality of fluid, [-]
rhol : float
Liquid density, [kg/m^3]
rhog : float
Gas density, [kg/m^3]
mul : float
Viscosity of liquid, [Pa*s]
mug : float
Viscosity of gas, [Pa*s]
sigma : float
Surface tension, [N/m]
D : float
Diameter of pipe, [m]
roughness : float, optional
Roughness of pipe for use in calculating friction factor, [m]
L : float, optional
Length of pipe, [m]
Returns
-------
dP : float
Pressure drop of the two-phase flow, [Pa]
Notes
-----
Applicable ONLY to mini/microchannels; yields drastically too low
pressure drops for larger channels. For more details, see the `Friedel`
correlation.
It is not explicitly stated in [1]_ how to calculate the liquid mixture
density for use in calculation of Weber number; the homogeneous model is
assumed as it is used in the Friedel model.
The bond number used here is 1/4 the normal value, i.e.:
.. math::
Bo = \frac{g(\rho_l-\rho_g)D^2}{4\sigma}
Examples
--------
>>> Chen_Friedel(m=.0005, x=0.9, rhol=950., rhog=1.4, mul=1E-3, mug=1E-5,
... sigma=0.02, D=0.003, roughness=0, L=1)
6249.247540588871
References
----------
.. [1] Chen, Ing Youn, Kai-Shing Yang, Yu-Juei Chang, and Chi-Chung Wang.
"Two-Phase Pressure Drop of Air–water and R-410A in Small Horizontal
Tubes." International Journal of Multiphase Flow 27, no. 7 (July 2001):
1293-99. doi:10.1016/S0301-9322(01)00004-0.
.. [2] Kim, Sung-Min, and Issam Mudawar. "Universal Approach to Predicting
Two-Phase Frictional Pressure Drop for Adiabatic and Condensing Mini/
Micro-Channel Flows." International Journal of Heat and Mass Transfer
55, no. 11–12 (May 2012): 3246-61.
doi:10.1016/j.ijheatmasstransfer.2012.02.047.
.. [3] Choi, Kwang-Il, A. S. Pamitran, Chun-Young Oh, and Jong-Taek Oh.
"Two-Phase Pressure Drop of R-410A in Horizontal Smooth Minichannels."
International Journal of Refrigeration 31, no. 1 (January 2008): 119-29.
doi:10.1016/j.ijrefrig.2007.06.006. |
def verify_files(files, user):
'''
Verify that the named files exist and are owned by the named user
'''
if salt.utils.platform.is_windows():
return True
import pwd # after confirming not running Windows
try:
pwnam = pwd.getpwnam(user)
uid = pwnam[2]
except KeyError:
err = ('Failed to prepare the Salt environment for user '
'{0}. The user is not available.\n').format(user)
sys.stderr.write(err)
sys.exit(salt.defaults.exitcodes.EX_NOUSER)
for fn_ in files:
dirname = os.path.dirname(fn_)
try:
if dirname:
try:
os.makedirs(dirname)
except OSError as err:
if err.errno != errno.EEXIST:
raise
if not os.path.isfile(fn_):
with salt.utils.files.fopen(fn_, 'w'):
pass
except IOError as err:
if os.path.isfile(dirname):
msg = 'Failed to create path {0}, is {1} a file?'.format(fn_, dirname)
raise SaltSystemExit(msg=msg)
if err.errno != errno.EACCES:
raise
msg = 'No permissions to access "{0}", are you running as the correct user?'.format(fn_)
raise SaltSystemExit(msg=msg)
except OSError as err:
msg = 'Failed to create path "{0}" - {1}'.format(fn_, err)
raise SaltSystemExit(msg=msg)
stats = os.stat(fn_)
if uid != stats.st_uid:
try:
os.chown(fn_, uid, -1)
except OSError:
pass
return True | Verify that the named files exist and are owned by the named user |
def session(self):
""" Returns the current db session """
if not self.__session:
self.__session = dal.get_default_session()
return self.__session | Returns the current db session |
def contains_remove(self, item):
# type (Any, Any) -> Any
'''Takes a collection and an item and returns a new collection
of the same type with that item removed. The notion of "contains"
is defined by the object itself; the following must be ``True``:
.. code-block:: python
item not in contains_remove(obj, item)
This function is used by some lenses (particularly ContainsLens)
to remove items from containers when necessary.
The corresponding method call for this hook is
``obj._lens_contains_remove(item)``.
There is no default implementation.
'''
try:
self._lens_contains_remove
except AttributeError:
message = 'Don\'t know how to remove an item from {}'
raise NotImplementedError(message.format(type(self)))
else:
return self._lens_contains_remove(item) | Takes a collection and an item and returns a new collection
of the same type with that item removed. The notion of "contains"
is defined by the object itself; the following must be ``True``:
.. code-block:: python
item not in contains_remove(obj, item)
This function is used by some lenses (particularly ContainsLens)
to remove items from containers when necessary.
The corresponding method call for this hook is
``obj._lens_contains_remove(item)``.
There is no default implementation. |
def load_instackenv(self):
"""Load the instackenv.json file and wait till the ironic nodes are ready.
TODO(Gonéri): should be splitted, write_instackenv() to generate the
instackenv.json and instackenv_import() for the rest.
"""
self.add_environment_file(user='stack', filename='stackrc')
self.run('openstack baremetal import --json instackenv.json', user='stack')
ironic_node_nbr = 0
count_cmd = 'jq -M "{filter}|length" /home/stack/instackenv.json'
# Nodes are either in the .nodes list or at the root of the document
for f in ['.nodes', '.']:
try:
ironic_node_nbr = int(
self.run(count_cmd.format(filter=f), user='stack')[0])
except ValueError:
pass
if ironic_node_nbr > 0:
break
self._wait_for_ironic_nodes(ironic_node_nbr)
# register association with the newly created ironic nodes and the
# existing barematal nodes in the factory
self.baremetal_factory.set_ironic_uuid(self.list_nodes())
self.run('openstack baremetal configure boot', user='stack') | Load the instackenv.json file and wait till the ironic nodes are ready.
TODO(Gonéri): should be splitted, write_instackenv() to generate the
instackenv.json and instackenv_import() for the rest. |
def get_dataset(self, key, info):
"""Load a dataset."""
if self._polarization != key.polarization:
return
logger.debug('Reading %s.', key.name)
if key.name in ['longitude', 'latitude']:
logger.debug('Constructing coordinate arrays.')
if self.lons is None or self.lats is None:
self.lons, self.lats, self.alts = self.get_lonlatalts()
if key.name == 'latitude':
data = self.lats
else:
data = self.lons
data.attrs.update(info)
else:
calibration = key.calibration or 'gamma'
if calibration == 'sigma_nought':
calibration = 'sigmaNought'
elif calibration == 'beta_nought':
calibration = 'betaNought'
data = self.read_band()
# chunks = data.chunks # This seems to be slower for some reason
chunks = CHUNK_SIZE
logger.debug('Reading noise data.')
noise = self.noise.get_noise_correction(data.shape, chunks=chunks).fillna(0)
logger.debug('Reading calibration data.')
cal = self.calibration.get_calibration(calibration, data.shape, chunks=chunks)
cal_constant = self.calibration.get_calibration_constant()
logger.debug('Calibrating.')
data = data.where(data > 0)
data = data.astype(np.float64)
dn = data * data
data = ((dn - noise).clip(min=0) + cal_constant)
data = (np.sqrt(data) / cal).clip(min=0)
data.attrs.update(info)
del noise, cal
data.attrs['units'] = calibration
return data | Load a dataset. |
def get_summary(self):
"""
Return the function summary
Returns:
(str, str, str, list(str), list(str), listr(str), list(str), list(str);
contract_name, name, visibility, modifiers, vars read, vars written, internal_calls, external_calls_as_expressions
"""
return (self.contract.name, self.full_name, self.visibility,
[str(x) for x in self.modifiers],
[str(x) for x in self.state_variables_read + self.solidity_variables_read],
[str(x) for x in self.state_variables_written],
[str(x) for x in self.internal_calls],
[str(x) for x in self.external_calls_as_expressions]) | Return the function summary
Returns:
(str, str, str, list(str), list(str), listr(str), list(str), list(str);
contract_name, name, visibility, modifiers, vars read, vars written, internal_calls, external_calls_as_expressions |
def branches(self):
"""
Returns a data frame of all branches in origin. The DataFrame will have the columns:
* repository
* branch
* local
:returns: DataFrame
"""
# first pull the local branches
local_branches = self.repo.branches
data = [[x.name, True] for x in list(local_branches)]
# then the remotes
remote_branches = self.repo.git.branch(all=True).split('\n')
if sys.version_info.major == 2:
remote_branches = set([x.split('/')[-1] for x in remote_branches if 'remotes' in x])
else:
remote_branches = {x.split('/')[-1] for x in remote_branches if 'remotes' in x}
data += [[x, False] for x in remote_branches]
df = DataFrame(data, columns=['branch', 'local'])
df['repository'] = self._repo_name()
return df | Returns a data frame of all branches in origin. The DataFrame will have the columns:
* repository
* branch
* local
:returns: DataFrame |
def return_values_ssa(self):
"""
list(Return Values in SSA form): List of the return values in ssa form
"""
from slither.core.cfg.node import NodeType
from slither.slithir.operations import Return
from slither.slithir.variables import Constant
if self._return_values_ssa is None:
return_values_ssa = list()
returns = [n for n in self.nodes if n.type == NodeType.RETURN]
[return_values_ssa.extend(ir.values) for node in returns for ir in node.irs_ssa if isinstance(ir, Return)]
self._return_values_ssa = list(set([x for x in return_values_ssa if not isinstance(x, Constant)]))
return self._return_values_ssa | list(Return Values in SSA form): List of the return values in ssa form |
def compute_geometric_median(X, eps=1e-5):
"""
Estimate the geometric median of points in 2D.
Code from https://stackoverflow.com/a/30305181
Parameters
----------
X : (N,2) ndarray
Points in 2D. Second axis must be given in xy-form.
eps : float, optional
Distance threshold when to return the median.
Returns
-------
(2,) ndarray
Geometric median as xy-coordinate.
"""
y = np.mean(X, 0)
while True:
D = scipy.spatial.distance.cdist(X, [y])
nonzeros = (D != 0)[:, 0]
Dinv = 1 / D[nonzeros]
Dinvs = np.sum(Dinv)
W = Dinv / Dinvs
T = np.sum(W * X[nonzeros], 0)
num_zeros = len(X) - np.sum(nonzeros)
if num_zeros == 0:
y1 = T
elif num_zeros == len(X):
return y
else:
R = (T - y) * Dinvs
r = np.linalg.norm(R)
rinv = 0 if r == 0 else num_zeros/r
y1 = max(0, 1-rinv)*T + min(1, rinv)*y
if scipy.spatial.distance.euclidean(y, y1) < eps:
return y1
y = y1 | Estimate the geometric median of points in 2D.
Code from https://stackoverflow.com/a/30305181
Parameters
----------
X : (N,2) ndarray
Points in 2D. Second axis must be given in xy-form.
eps : float, optional
Distance threshold when to return the median.
Returns
-------
(2,) ndarray
Geometric median as xy-coordinate. |
def normalizeGroupValue(value):
"""
Normalizes group value.
* **value** must be a ``list``.
* **value** items must normalize as glyph names with
:func:`normalizeGlyphName`.
* Returned value will be a ``tuple`` of unencoded ``unicode`` strings.
"""
if not isinstance(value, (tuple, list)):
raise TypeError("Group value must be a list, not %s."
% type(value).__name__)
value = [normalizeGlyphName(v) for v in value]
return tuple([unicode(v) for v in value]) | Normalizes group value.
* **value** must be a ``list``.
* **value** items must normalize as glyph names with
:func:`normalizeGlyphName`.
* Returned value will be a ``tuple`` of unencoded ``unicode`` strings. |
def recommendations(self, **kwargs):
"""
Get a list of recommended movies for a movie.
Args:
language: (optional) ISO 639-1 code.
page: (optional) Minimum value of 1. Expected value is an integer.
Returns:
A dict representation of the JSON returned from the API.
"""
path = self._get_id_path('recommendations')
response = self._GET(path, kwargs)
self._set_attrs_to_values(response)
return response | Get a list of recommended movies for a movie.
Args:
language: (optional) ISO 639-1 code.
page: (optional) Minimum value of 1. Expected value is an integer.
Returns:
A dict representation of the JSON returned from the API. |
def emit(self, record):
"""
Override emit() method in handler parent for sending log to RESTful API
"""
# avoid infinite recursion
if record.name.startswith('requests'):
return
data, header = self._prepPayload(record)
try:
self.session.post(self._getEndpoint(),
data=data,
headers={'content-type': header})
except Exception:
self.handleError(record) | Override emit() method in handler parent for sending log to RESTful API |
def _nested_cwl_record(xs, want_attrs, input_files):
"""Convert arbitrarily nested samples into a nested list of dictionaries.
nests only at the record level, rather than within records. For batching
a top level list is all of the batches and sub-lists are samples within the
batch.
"""
if isinstance(xs, (list, tuple)):
return [_nested_cwl_record(x, want_attrs, input_files) for x in xs]
else:
assert isinstance(xs, dict), pprint.pformat(xs)
return _collapse_to_cwl_record_single(xs, want_attrs, input_files) | Convert arbitrarily nested samples into a nested list of dictionaries.
nests only at the record level, rather than within records. For batching
a top level list is all of the batches and sub-lists are samples within the
batch. |
def parse(self, response):
'''
根据对 ``start_urls`` 中提供链接的请求响应包内容,解析生成具体文章链接请求
:param Response response: 由 ``Scrapy`` 调用并传入的请求响应对象
'''
content_raw = response.body.decode()
self.logger.debug('响应body原始数据:{}'.format(content_raw))
content = json.loads(content_raw, encoding='UTF-8')
self.logger.debug(content)
# 文章发布日期
date = datetime.datetime.strptime(content['date'], '%Y%m%d')
strftime = date.strftime("%Y-%m-%d")
self.logger.info('日期:{}'.format(strftime))
# 处理头条文章列表,将其 `top` 标记到相应 __story__ 中
if 'top_stories' in content:
self.logger.info('处理头条文章')
for item in content['top_stories']:
for story in content['stories']:
if item['id'] == story['id']:
story['top'] = 1
break
self.logger.debug(item)
# 处理今日文章,并抛出具体文章请求
post_num = len(content['stories'])
self.logger.info('处理今日文章,共{:>2}篇'.format(post_num))
for item in content['stories']:
self.logger.info(item)
post_num = 0 if post_num < 0 else post_num
pub_time = date + datetime.timedelta(minutes=post_num)
post_num -= 1
url = 'http://news-at.zhihu.com/api/4/news/{}'.format(item['id'])
request = scrapy.Request(url, callback=self.parse_post)
post_dict = {
'spider': ZhihuDailySpider.name,
'date': pub_time.strftime("%Y-%m-%d %H:%M:%S"),
'meta': {
'spider.zhihu_daily.id': str(item.get('id', ''))
}
}
if item.get('top'):
post_dict['meta']['spider.zhihu_daily.top'] = \
str(item.get('top', 0))
request.meta['post'] = post_dict
self.item_list.append(post_dict)
yield request | 根据对 ``start_urls`` 中提供链接的请求响应包内容,解析生成具体文章链接请求
:param Response response: 由 ``Scrapy`` 调用并传入的请求响应对象 |
def __set_window_title(self):
"""
Sets the Component window title.
"""
if self.has_editor_tab():
windowTitle = "{0} - {1}".format(self.__default_window_title, self.get_current_editor().file)
else:
windowTitle = "{0}".format(self.__default_window_title)
LOGGER.debug("> Setting 'Script Editor' window title to '{0}'.".format(windowTitle))
self.setWindowTitle(windowTitle) | Sets the Component window title. |
def from_scf_task(cls, scf_task, ddk_tolerance=None, ph_tolerance=None, manager=None):
"""
Build tasks for the computation of Born effective charges from a ground-state task.
Args:
scf_task: ScfTask object.
ddk_tolerance: tolerance used in the DDK run if with_becs. None to use AbiPy default.
ph_tolerance: dict {"varname": value} with the tolerance used in the phonon run.
None to use AbiPy default.
manager: :class:`TaskManager` object.
"""
new = cls(manager=manager)
new.add_becs_from_scf_task(scf_task, ddk_tolerance, ph_tolerance)
return new | Build tasks for the computation of Born effective charges from a ground-state task.
Args:
scf_task: ScfTask object.
ddk_tolerance: tolerance used in the DDK run if with_becs. None to use AbiPy default.
ph_tolerance: dict {"varname": value} with the tolerance used in the phonon run.
None to use AbiPy default.
manager: :class:`TaskManager` object. |
def estimate_clock_model(params):
"""
implementing treetime clock
"""
if assure_tree(params, tmp_dir='clock_model_tmp'):
return 1
dates = utils.parse_dates(params.dates)
if len(dates)==0:
return 1
outdir = get_outdir(params, '_clock')
###########################################################################
### READ IN VCF
###########################################################################
#sets ref and fixed_pi to None if not VCF
aln, ref, fixed_pi = read_if_vcf(params)
is_vcf = True if ref is not None else False
###########################################################################
### ESTIMATE ROOT (if requested) AND DETERMINE TEMPORAL SIGNAL
###########################################################################
if params.aln is None and params.sequence_length is None:
print("one of arguments '--aln' and '--sequence-length' is required.", file=sys.stderr)
return 1
basename = get_basename(params, outdir)
myTree = TreeTime(dates=dates, tree=params.tree, aln=aln, gtr='JC69',
verbose=params.verbose, seq_len=params.sequence_length,
ref=ref)
myTree.tip_slack=params.tip_slack
if myTree.tree is None:
print("ERROR: tree loading failed. exiting...")
return 1
if params.clock_filter:
n_bad = [n.name for n in myTree.tree.get_terminals() if n.bad_branch]
myTree.clock_filter(n_iqd=params.clock_filter, reroot=params.reroot or 'least-squares')
n_bad_after = [n.name for n in myTree.tree.get_terminals() if n.bad_branch]
if len(n_bad_after)>len(n_bad):
print("The following leaves don't follow a loose clock and "
"will be ignored in rate estimation:\n\t"
+"\n\t".join(set(n_bad_after).difference(n_bad)))
if not params.keep_root:
# reroot to optimal root, this assigns clock_model to myTree
if params.covariation: # this requires branch length estimates
myTree.run(root="least-squares", max_iter=0,
use_covariation=params.covariation)
res = myTree.reroot(params.reroot,
force_positive=not params.allow_negative_rate)
myTree.get_clock_model(covariation=params.covariation)
if res==ttconf.ERROR:
print("ERROR: unknown root or rooting mechanism!\n"
"\tvalid choices are 'least-squares', 'ML', and 'ML-rough'")
return 1
else:
myTree.get_clock_model(covariation=params.covariation)
d2d = utils.DateConversion.from_regression(myTree.clock_model)
print('\n',d2d)
print('The R^2 value indicates the fraction of variation in'
'\nroot-to-tip distance explained by the sampling times.'
'\nHigher values corresponds more clock-like behavior (max 1.0).')
print('\nThe rate is the slope of the best fit of the date to'
'\nthe root-to-tip distance and provides an estimate of'
'\nthe substitution rate. The rate needs to be positive!'
'\nNegative rates suggest an inappropriate root.\n')
print('\nThe estimated rate and tree correspond to a root date:')
if params.covariation:
reg = myTree.clock_model
dp = np.array([reg['intercept']/reg['slope']**2,-1./reg['slope']])
droot = np.sqrt(reg['cov'][:2,:2].dot(dp).dot(dp))
print('\n--- root-date:\t %3.2f +/- %1.2f (one std-dev)\n\n'%(-d2d.intercept/d2d.clock_rate, droot))
else:
print('\n--- root-date:\t %3.2f\n\n'%(-d2d.intercept/d2d.clock_rate))
if not params.keep_root:
# write rerooted tree to file
outtree_name = basename+'rerooted.newick'
Phylo.write(myTree.tree, outtree_name, 'newick')
print("--- re-rooted tree written to \n\t%s\n"%outtree_name)
table_fname = basename+'rtt.csv'
with open(table_fname, 'w') as ofile:
ofile.write("#name, date, root-to-tip distance\n")
ofile.write("#Dates of nodes that didn't have a specified date are inferred from the root-to-tip regression.\n")
for n in myTree.tree.get_terminals():
if hasattr(n, "raw_date_constraint") and (n.raw_date_constraint is not None):
if np.isscalar(n.raw_date_constraint):
tmp_str = str(n.raw_date_constraint)
elif len(n.raw_date_constraint):
tmp_str = str(n.raw_date_constraint[0])+'-'+str(n.raw_date_constraint[1])
else:
tmp_str = ''
ofile.write("%s, %s, %f\n"%(n.name, tmp_str, n.dist2root))
else:
ofile.write("%s, %f, %f\n"%(n.name, d2d.numdate_from_dist2root(n.dist2root), n.dist2root))
for n in myTree.tree.get_nonterminals(order='preorder'):
ofile.write("%s, %f, %f\n"%(n.name, d2d.numdate_from_dist2root(n.dist2root), n.dist2root))
print("--- wrote dates and root-to-tip distances to \n\t%s\n"%table_fname)
###########################################################################
### PLOT AND SAVE RESULT
###########################################################################
plot_rtt(myTree, outdir+params.plot_rtt)
return 0 | implementing treetime clock |
def approx(x, y, xout, method='linear', rule=1, f=0, yleft=None,
yright=None, ties='mean'):
"""Linearly interpolate points.
Return a list of points which (linearly) interpolate given data points,
or a function performing the linear (or constant) interpolation.
Parameters
----------
x : array-like, shape=(n_samples,)
Numeric vector giving the coordinates of the points
to be interpolated.
y : array-like, shape=(n_samples,)
Numeric vector giving the coordinates of the points
to be interpolated.
xout : int, float or iterable
A scalar or iterable of numeric values specifying where
interpolation is to take place.
method : str, optional (default='linear')
Specifies the interpolation method to be used.
Choices are "linear" or "constant".
rule : int, optional (default=1)
An integer describing how interpolation is to take place
outside the interval ``[min(x), max(x)]``. If ``rule`` is 1 then
np.nans are returned for such points and if it is 2, the value at the
closest data extreme is used.
f : int, optional (default=0)
For ``method`` = "constant" a number between 0 and 1 inclusive,
indicating a compromise between left- and right-continuous step
functions. If y0 and y1 are the values to the left and right of the
point then the value is y0 if f == 0, y1 if f == 1, and y0*(1-f)+y1*f
for intermediate values. In this way the result is right-continuous
for f == 0 and left-continuous for f == 1, even for non-finite
``y`` values.
yleft : float, optional (default=None)
The value to be returned when input ``x`` values are less than
``min(x)``. The default is defined by the value of rule given below.
yright : float, optional (default=None)
The value to be returned when input ``x`` values are greater than
``max(x)``. The default is defined by the value of rule given below.
ties : str, optional (default='mean')
Handling of tied ``x`` values. Choices are "mean" or "ordered".
"""
if method not in VALID_APPROX:
raise ValueError('method must be one of %r' % VALID_APPROX)
# make sure xout is an array
xout = c(xout).astype(np.float64) # ensure double
# check method
method_key = method
# not a callable, actually, but serves the purpose..
method = get_callable(method_key, VALID_APPROX)
# copy/regularize vectors
x, y = _regularize(x, y, ties)
nx = x.shape[0]
# if len 1? (we've already handled where the size is 0, since we check that
# in the _regularize function when we call c1d)
if nx == 1:
if method_key == 'linear':
raise ValueError('need at least two points to '
'linearly interpolate')
# get yleft, yright
if yleft is None:
yleft = y[0] if rule != 1 else np.nan
if yright is None:
yright = y[-1] if rule != 1 else np.nan
# call the C subroutine
yout = C_Approx(x, y, xout, method, f, yleft, yright) # MemoryView
return xout, np.asarray(yout) | Linearly interpolate points.
Return a list of points which (linearly) interpolate given data points,
or a function performing the linear (or constant) interpolation.
Parameters
----------
x : array-like, shape=(n_samples,)
Numeric vector giving the coordinates of the points
to be interpolated.
y : array-like, shape=(n_samples,)
Numeric vector giving the coordinates of the points
to be interpolated.
xout : int, float or iterable
A scalar or iterable of numeric values specifying where
interpolation is to take place.
method : str, optional (default='linear')
Specifies the interpolation method to be used.
Choices are "linear" or "constant".
rule : int, optional (default=1)
An integer describing how interpolation is to take place
outside the interval ``[min(x), max(x)]``. If ``rule`` is 1 then
np.nans are returned for such points and if it is 2, the value at the
closest data extreme is used.
f : int, optional (default=0)
For ``method`` = "constant" a number between 0 and 1 inclusive,
indicating a compromise between left- and right-continuous step
functions. If y0 and y1 are the values to the left and right of the
point then the value is y0 if f == 0, y1 if f == 1, and y0*(1-f)+y1*f
for intermediate values. In this way the result is right-continuous
for f == 0 and left-continuous for f == 1, even for non-finite
``y`` values.
yleft : float, optional (default=None)
The value to be returned when input ``x`` values are less than
``min(x)``. The default is defined by the value of rule given below.
yright : float, optional (default=None)
The value to be returned when input ``x`` values are greater than
``max(x)``. The default is defined by the value of rule given below.
ties : str, optional (default='mean')
Handling of tied ``x`` values. Choices are "mean" or "ordered". |
def add_peer(self, peer_addr):
"Build a connection to the Hub at a given ``(host, port)`` address"
peer = connection.Peer(
self._ident, self._dispatcher, peer_addr, backend.Socket())
peer.start()
self._started_peers[peer_addr] = peer | Build a connection to the Hub at a given ``(host, port)`` address |
def main():
"""Create an organization, print out its attributes and delete it."""
org = Organization(name='junk org').create()
pprint(org.get_values()) # e.g. {'name': 'junk org', …}
org.delete() | Create an organization, print out its attributes and delete it. |
def subscribe_to_address_webhook(callback_url, subscription_address, event='tx-confirmation', confirmations=0, confidence=0.00, coin_symbol='btc', api_key=None):
'''
Subscribe to transaction webhooks on a given address.
Webhooks for transaction broadcast and each confirmation (up to 6).
Returns the blockcypher ID of the subscription
'''
assert is_valid_coin_symbol(coin_symbol)
assert is_valid_address_for_coinsymbol(subscription_address, coin_symbol)
assert api_key, 'api_key required'
url = make_url(coin_symbol, 'hooks')
params = {'token': api_key}
data = {
'event': event,
'url': callback_url,
'address': subscription_address,
}
if event == 'tx-confirmation' and confirmations:
data['confirmations'] = confirmations
elif event == 'tx-confidence' and confidence:
data['confidence'] = confidence
r = requests.post(url, json=data, params=params, verify=True, timeout=TIMEOUT_IN_SECONDS)
response_dict = get_valid_json(r)
return response_dict['id'] | Subscribe to transaction webhooks on a given address.
Webhooks for transaction broadcast and each confirmation (up to 6).
Returns the blockcypher ID of the subscription |
def logs(ctx, services, num, follow):
"""Show logs of daemonized service."""
logger.debug("running command %s (%s)", ctx.command.name, ctx.params,
extra={"command": ctx.command.name, "params": ctx.params})
home = ctx.obj["HOME"]
services_path = os.path.join(home, SERVICES)
tail_threads = []
for service in services:
logpath = os.path.join(services_path, service, LOGS_DIR, STDOUTLOG)
if os.path.exists(logpath):
logger.debug("tailing %s", logpath)
# TODO: Print log lines from multiple services sorted by timestamp
t = threading.Thread(target=Tailer, kwargs={"name": service,
"nlines": num,
"filepath": logpath,
"follow": follow})
t.daemon = True
t.start()
tail_threads.append(t)
if tail_threads:
while tail_threads[0].isAlive():
tail_threads[0].join(0.1) | Show logs of daemonized service. |
def purge(self):
"""
Purges read/write buffers.
"""
try:
self._device.setblocking(0)
while(self._device.recv(1)):
pass
except socket.error as err:
pass
finally:
self._device.setblocking(1) | Purges read/write buffers. |
def update_type_lookups(self):
""" Update type and typestring lookup dicts.
Must be called once the ``types`` and ``python_type_strings``
attributes are set so that ``type_to_typestring`` and
``typestring_to_type`` are constructed.
.. versionadded:: 0.2
Notes
-----
Subclasses need to call this function explicitly.
"""
self.type_to_typestring = dict(zip(self.types,
self.python_type_strings))
self.typestring_to_type = dict(zip(self.python_type_strings,
self.types)) | Update type and typestring lookup dicts.
Must be called once the ``types`` and ``python_type_strings``
attributes are set so that ``type_to_typestring`` and
``typestring_to_type`` are constructed.
.. versionadded:: 0.2
Notes
-----
Subclasses need to call this function explicitly. |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.