text
stringlengths 78
104k
| score
float64 0
0.18
|
---|---|
def do_action_to_ancestors(analysis_request, transition_id):
"""Promotes the transitiion passed in to ancestors, if any
"""
parent_ar = analysis_request.getParentAnalysisRequest()
if parent_ar:
do_action_for(parent_ar, transition_id) | 0.003891 |
def pop_job(self, returning=True):
"""
Pop a job from the pending jobs list.
When returning == True, we prioritize the jobs whose functions are known to be returning (function.returning is
True). As an optimization, we are sorting the pending jobs list according to job.function.returning.
:param bool returning: Only pop a pending job if the corresponding function returns.
:return: A pending job if we can find one, or None if we cannot find any that satisfies the requirement.
:rtype: angr.analyses.cfg.cfg_fast.CFGJob
"""
if not self:
return None
if not returning:
return self._pop_job(next(reversed(self._jobs.keys())))
# Prioritize returning functions
for func_addr in reversed(self._jobs.keys()):
if func_addr not in self._returning_functions:
continue
return self._pop_job(func_addr)
return None | 0.006141 |
def get_object_from_date_based_view(request, *args, **kwargs): # noqa
"""
Get object from generic date_based.detail view
Parameters
----------
request : instance
An instance of HttpRequest
Returns
-------
instance
An instance of model object or None
"""
import time
import datetime
from django.http import Http404
from django.db.models.fields import DateTimeField
try:
from django.utils import timezone
datetime_now = timezone.now
except ImportError:
datetime_now = datetime.datetime.now
year, month, day = kwargs['year'], kwargs['month'], kwargs['day']
month_format = kwargs.get('month_format', '%b')
day_format = kwargs.get('day_format', '%d')
date_field = kwargs['date_field']
queryset = kwargs['queryset']
object_id = kwargs.get('object_id', None)
slug = kwargs.get('slug', None)
slug_field = kwargs.get('slug_field', 'slug')
try:
tt = time.strptime(
'%s-%s-%s' % (year, month, day),
'%s-%s-%s' % ('%Y', month_format, day_format)
)
date = datetime.date(*tt[:3])
except ValueError:
raise Http404
model = queryset.model
if isinstance(model._meta.get_field(date_field), DateTimeField):
lookup_kwargs = {
'%s__range' % date_field: (
datetime.datetime.combine(date, datetime.time.min),
datetime.datetime.combine(date, datetime.time.max),
)}
else:
lookup_kwargs = {date_field: date}
now = datetime_now()
if date >= now.date() and not kwargs.get('allow_future', False):
lookup_kwargs['%s__lte' % date_field] = now
if object_id:
lookup_kwargs['pk'] = object_id
elif slug and slug_field:
lookup_kwargs['%s__exact' % slug_field] = slug
else:
raise AttributeError(
"Generic detail view must be called with either an "
"object_id or a slug/slug_field."
)
return get_object_or_404(queryset, **lookup_kwargs) | 0.000473 |
def maximum_font_point_size(self, value):
"""
Setter for **self.__maximum_font_point_size** attribute.
:param value: Attribute value.
:type value: int
"""
if value is not None:
assert type(value) in (int, float), "'{0}' attribute: '{1}' type is not 'int' or 'float'!".format(
"maximum_font_point_size", value)
assert value > self.__minimum_font_point_size, \
"'{0}' attribute: '{1}' need to be exactly superior to '{2}'!".format(
"maximum_font_point_size", value, self.__minimum_font_point_size)
self.__maximum_font_point_size = value | 0.007485 |
def publish_mainswitch_state(self, state):
"""publish changed mainswitch state"""
self.sequence_number += 1
self.publisher.send_multipart(msgs.MessageBuilder.mainswitch_state(self.sequence_number, state))
return self.sequence_number | 0.011364 |
def load_file(self, filename):
"""Load coordinates file.
Results are appended to previously loaded coordinates.
This can be used to load one file per color.
"""
if not os.path.isfile(filename):
return
self.logger.info('Loading coordinates from {0}'.format(filename))
if filename.endswith('.fits'):
fmt = 'fits'
else: # Assume ASCII
fmt = 'ascii'
try:
tab = Table.read(filename, format=fmt)
except Exception as e:
self.logger.error('{0}: {1}'.format(e.__class__.__name__, str(e)))
return
if self.use_radec:
colname0 = self.settings.get('ra_colname', 'ra')
colname1 = self.settings.get('dec_colname', 'dec')
else:
colname0 = self.settings.get('x_colname', 'x')
colname1 = self.settings.get('y_colname', 'y')
try:
col_0 = tab[colname0]
col_1 = tab[colname1]
except Exception as e:
self.logger.error('{0}: {1}'.format(e.__class__.__name__, str(e)))
return
nrows = len(col_0)
dummy_col = [None] * nrows
try:
oldrows = int(self.w.ntotal.get_text())
except ValueError:
oldrows = 0
self.w.ntotal.set_text(str(oldrows + nrows))
if self.use_radec:
ra = self._convert_radec(col_0)
dec = self._convert_radec(col_1)
x = y = dummy_col
else:
ra = dec = dummy_col
# X and Y always 0-indexed internally
x = col_0.data - self.pixelstart
y = col_1.data - self.pixelstart
args = [ra, dec, x, y]
# Load extra columns
for colname in self.extra_columns:
try:
col = tab[colname].data
except Exception as e:
self.logger.error(
'{0}: {1}'.format(e.__class__.__name__, str(e)))
col = dummy_col
args.append(col)
# Use list to preserve order. Does not handle duplicates.
key = (self.marktype, self.marksize, self.markcolor)
self.coords_dict[key] += list(zip(*args))
self.redo() | 0.000881 |
def rabin_karp_factor(s, t, k):
"""Find a common factor by Rabin-Karp
:param string s: haystack
:param string t: needle
:param int k: factor length
:returns: (i, j) such that s[i:i + k] == t[j:j + k] or None.
In case of tie, lexicographical minimum (i, j) is returned
:complexity: O(len(s) + len(t)) in expected time,
and O(len(s) + len(t) * k) in worst case
"""
last_pos = pow(DOMAIN, k - 1) % PRIME
pos = {}
assert k > 0
if len(s) < k or len(t) < k:
return None
hash_t = 0
for j in range(k): # store hashing values
hash_t = (DOMAIN * hash_t + ord(t[j])) % PRIME
for j in range(len(t) - k + 1):
if hash_t in pos:
pos[hash_t].append(j)
else:
pos[hash_t] = [j]
if j < len(t) - k:
hash_t = roll_hash(hash_t, ord(t[j]), ord(t[j + k]), last_pos)
hash_s = 0
for i in range(k): # preprocessing
hash_s = (DOMAIN * hash_s + ord(s[i])) % PRIME
for i in range(len(s) - k + 1):
if hash_s in pos: # is this signature in s?
for j in pos[hash_s]:
if matches(s, t, i, j, k):
return (i, j)
if i < len(s) - k:
hash_s = roll_hash(hash_s, ord(s[i]), ord(s[i + k]), last_pos)
return None | 0.00074 |
def toJSONFilters(actions):
"""Generate a JSON-to-JSON filter from stdin to stdout
The filter:
* reads a JSON-formatted pandoc document from stdin
* transforms it by walking the tree and performing the actions
* returns a new JSON-formatted pandoc document to stdout
The argument `actions` is a list of functions of the form
`action(key, value, format, meta)`, as described in more
detail under `walk`.
This function calls `applyJSONFilters`, with the `format`
argument provided by the first command-line argument,
if present. (Pandoc sets this by default when calling
filters.)
"""
try:
input_stream = io.TextIOWrapper(sys.stdin.buffer, encoding='utf-8')
except AttributeError:
# Python 2 does not have sys.stdin.buffer.
# REF: https://stackoverflow.com/questions/2467928/python-unicodeencode
input_stream = codecs.getreader("utf-8")(sys.stdin)
source = input_stream.read()
if len(sys.argv) > 1:
format = sys.argv[1]
else:
format = ""
sys.stdout.write(applyJSONFilters(actions, source, format)) | 0.000888 |
def api_version(self, verbose=False):
'''
Get information about the API
http://docs.opsview.com/doku.php?id=opsview4.6:restapi#api_version_information
'''
return self.__auth_req_get(self.rest_url, verbose=verbose) | 0.007905 |
def remove(self, address):
""" Remove an address from the connection pool, if present, closing
all connections to that address.
"""
with self.lock:
for connection in self.connections.pop(address, ()):
try:
connection.close()
except IOError:
pass | 0.00554 |
def from_bank_code(cls, country_code, bank_code):
"""Create a new BIC object from country- and bank-code.
Examples:
>>> bic = BIC.from_bank_code('DE', '20070000')
>>> bic.country_code
'DE'
>>> bic.bank_code
'DEUT'
>>> bic.location_code
'HH'
>>> BIC.from_bank_code('DE', '01010101')
Traceback (most recent call last):
...
ValueError: Invalid bank code '01010101' for country 'DE'
Args:
country_code (str): ISO 3166 alpha2 country-code.
bank_code (str): Country specific bank-code.
Returns:
BIC: a BIC object generated from the given country code and bank code.
Raises:
ValueError: If the given bank code wasn't found in the registry
Note:
This currently only works for German bank-codes.
"""
try:
return cls(registry.get('bank_code')[(country_code, bank_code)]['bic'])
except KeyError:
raise ValueError("Invalid bank code {!r} for country {!r}".format(bank_code,
country_code)) | 0.004804 |
def ping():
'''
Ping? Pong!
'''
dev = conn()
# Check that the underlying netconf connection still exists.
if dev._conn is None:
return False
# call rpc only if ncclient queue is empty. If not empty that means other
# rpc call is going on.
if hasattr(dev._conn, '_session'):
if dev._conn._session._transport.is_active():
# there is no on going rpc call. buffer tell can be 1 as it stores
# remaining char after "]]>]]>" which can be a new line char
if dev._conn._session._buffer.tell() <= 1 and \
dev._conn._session._q.empty():
return _rpc_file_list(dev)
else:
log.debug('skipped ping() call as proxy already getting data')
return True
else:
# ssh connection is lost
return False
else:
# other connection modes, like telnet
return _rpc_file_list(dev) | 0.001029 |
def get_pushes(self, repository_id, project=None, skip=None, top=None, search_criteria=None):
"""GetPushes.
[Preview API] Retrieves pushes associated with the specified repository.
:param str repository_id: The name or ID of the repository.
:param str project: Project ID or project name
:param int skip: Number of pushes to skip.
:param int top: Number of pushes to return.
:param :class:`<GitPushSearchCriteria> <azure.devops.v5_1.git.models.GitPushSearchCriteria>` search_criteria: Search criteria attributes: fromDate, toDate, pusherId, refName, includeRefUpdates or includeLinks. fromDate: Start date to search from. toDate: End date to search to. pusherId: Identity of the person who submitted the push. refName: Branch name to consider. includeRefUpdates: If true, include the list of refs that were updated by the push. includeLinks: Whether to include the _links field on the shallow references.
:rtype: [GitPush]
"""
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'str')
if repository_id is not None:
route_values['repositoryId'] = self._serialize.url('repository_id', repository_id, 'str')
query_parameters = {}
if skip is not None:
query_parameters['$skip'] = self._serialize.query('skip', skip, 'int')
if top is not None:
query_parameters['$top'] = self._serialize.query('top', top, 'int')
if search_criteria is not None:
if search_criteria.from_date is not None:
query_parameters['searchCriteria.fromDate'] = search_criteria.from_date
if search_criteria.to_date is not None:
query_parameters['searchCriteria.toDate'] = search_criteria.to_date
if search_criteria.pusher_id is not None:
query_parameters['searchCriteria.pusherId'] = search_criteria.pusher_id
if search_criteria.ref_name is not None:
query_parameters['searchCriteria.refName'] = search_criteria.ref_name
if search_criteria.include_ref_updates is not None:
query_parameters['searchCriteria.includeRefUpdates'] = search_criteria.include_ref_updates
if search_criteria.include_links is not None:
query_parameters['searchCriteria.includeLinks'] = search_criteria.include_links
response = self._send(http_method='GET',
location_id='ea98d07b-3c87-4971-8ede-a613694ffb55',
version='5.1-preview.2',
route_values=route_values,
query_parameters=query_parameters)
return self._deserialize('[GitPush]', self._unwrap_collection(response)) | 0.005626 |
def blur(dset,fwhm,prefix=None):
'''blurs ``dset`` with given ``fwhm`` runs 3dmerge to blur dataset to given ``fwhm``
default ``prefix`` is to suffix ``dset`` with ``_blur%.1fmm``'''
if prefix==None:
prefix = nl.suffix(dset,'_blur%.1fmm'%fwhm)
return available_method('blur')(dset,fwhm,prefix) | 0.031546 |
def register_callback_deleted(self, func, serialised=True):
"""
Register a callback for resource deletion. This will be called when any resource
is deleted within your agent.
If `serialised` is not set, the callbacks might arrive in a different order to they were requested.
The payload passed to your callback is an OrderedDict with the following keys
#!python
r : R_ENTITY, R_FEED, etc # the type of resource deleted
lid : <name> # the local name of the resource
id : <GUID> # the global Id of the resource
`Note` resource types are defined [here](../Core/Const.m.html)
`Example`
#!python
def deleted_callback(args):
print(args)
...
client.register_callback_deleted(deleted_callback)
This would print out something like the following on deletion of an R_ENTITY
#!python
OrderedDict([(u'lid', u'old_thing1'),
(u'r', 1),
(u'id', u'315637813d801ec6f057c67728bf00c2')])
"""
self.__client.register_callback_deleted(partial(self.__callback_payload_only, func), serialised=serialised) | 0.005368 |
def process_model_scores(self, model_names, root_cache,
include_features=False):
"""
Generates a score map for a set of models based on a `root_cache`.
This method performs no substantial IO, but may incur substantial CPU
usage.
:Parameters:
model_names : `set` ( `str` )
A set of models to score
root_cache : `dict` ( `str` --> `mixed` )
A cache of pre-computed root_dependencies for a specific
revision. See `extract_root_dependency_caches()`
include_features : `bool`
If True, include a map of basic features used in scoring along
with the model score. If False, just generate the scores.
"""
model_scores = {}
for model_name in model_names:
model_scores[model_name] = {}
# Mostly CPU
model_scores[model_name]['score'] = \
self._process_score(model_name, dependency_cache=root_cache)
# Essentially free
if include_features:
base_feature_map = self._solve_base_feature_map(
model_name, dependency_cache=root_cache)
model_scores[model_name]['features'] = base_feature_map
return model_scores | 0.002232 |
def bdh(self, tickers, flds, start_date, end_date, elms=None,
ovrds=None, longdata=False):
"""
Get tickers and fields, return pandas DataFrame with columns as
MultiIndex with levels "ticker" and "field" and indexed by "date".
If long data is requested return DataFrame with columns
["date", "ticker", "field", "value"].
Parameters
----------
tickers: {list, string}
String or list of strings corresponding to tickers
flds: {list, string}
String or list of strings corresponding to FLDS
start_date: string
String in format YYYYmmdd
end_date: string
String in format YYYYmmdd
elms: list of tuples
List of tuples where each tuple corresponds to the other elements
to be set, e.g. [("periodicityAdjustment", "ACTUAL")].
Refer to the HistoricalDataRequest section in the
'Services & schemas reference guide' for more info on these values
ovrds: list of tuples
List of tuples where each tuple corresponds to the override
field and value
longdata: boolean
Whether data should be returned in long data format or pivoted
"""
ovrds = [] if not ovrds else ovrds
elms = [] if not elms else elms
elms = list(elms)
data = self._bdh_list(tickers, flds, start_date, end_date,
elms, ovrds)
df = pd.DataFrame(data, columns=['date', 'ticker', 'field', 'value'])
df.loc[:, 'date'] = pd.to_datetime(df.loc[:, 'date'])
if not longdata:
cols = ['ticker', 'field']
df = df.set_index(['date'] + cols).unstack(cols)
df.columns = df.columns.droplevel(0)
return df | 0.001635 |
def output_markdown(markdown_cont, output_file):
"""
Writes to an output file if `outfile` is a valid path.
"""
if output_file:
with open(output_file, 'w') as out:
out.write(markdown_cont) | 0.004444 |
def read_long_description(readme_file):
""" Read package long description from README file """
try:
import pypandoc
except (ImportError, OSError) as e:
print('No pypandoc or pandoc: %s' % (e,))
if is_py3:
fh = open(readme_file, encoding='utf-8')
else:
fh = open(readme_file)
long_description = fh.read()
fh.close()
return long_description
else:
return pypandoc.convert(readme_file, 'rst') | 0.002033 |
def load_trace(path, *args, **kwargs):
"""Read a packet trace file, return a :class:`wltrace.common.WlTrace` object.
This function first reads the file's magic
(first ``FILE_TYPE_HANDLER`` bytes), and automatically determine the
file type, and call appropriate handler to process the file.
Args:
path (str): the file's path to be loaded.
Returns:
``WlTrace`` object.
"""
with open(path, 'rb') as f:
magic = f.read(MAGIC_LEN)
if magic not in FILE_TYPE_HANDLER:
raise Exception('Unknown file magic: %s' % (binascii.hexlify(magic)))
return FILE_TYPE_HANDLER[magic](path, *args, **kwargs) | 0.00303 |
def parse_directives(lexer: Lexer, is_const: bool) -> List[DirectiveNode]:
"""Directives[Const]: Directive[?Const]+"""
directives: List[DirectiveNode] = []
append = directives.append
while peek(lexer, TokenKind.AT):
append(parse_directive(lexer, is_const))
return directives | 0.003311 |
def _reorder_categories_after_dendrogram(adata, groupby, dendrogram,
var_names=None,
var_group_labels=None,
var_group_positions=None):
"""
Function used by plotting functions that need to reorder the the groupby observations
based on the dendrogram results.
The function checks if a dendrogram has already been precomputed. If not, sc.tl.dendrogram
is run with default parameters.
The results found in .uns[dendrogram_key] are used to reorder var_group_labels
and var_group_positions.
Returns
-------
dictionary with keys: 'categories_idx_ordered','var_group_names_idx_ordered',
'var_group_labels', and 'var_group_positions'
"""
key = _get_dendrogram_key(adata, dendrogram, groupby)
dendro_info = adata.uns[key]
if groupby != dendro_info['groupby']:
raise ValueError("Incompatible observations. The precomputed dendrogram contains information "
"for the observation: '{}' while the plot is made for the "
"observation: '{}. Please run sc.tl.dendrogram "
"using the right observation.'".format(groupby, dendro_info['groupby']))
has_var_groups = True if var_group_positions is not None and len(var_group_positions) > 0 else False
categories = adata.obs[dendro_info['groupby']].cat.categories
# order of groupby categories
categories_idx_ordered = dendro_info['categories_idx_ordered']
if len(categories) != len(categories_idx_ordered):
raise ValueError("Incompatible observations. Dendrogram data has {} categories but "
"current groupby observation {!r} contains {} categories. Most likely "
"the underlying groupby observation changed after the initial "
"computation of `sc.tl.dendrogram`. Please run sc.tl.dendrogram "
"again.'".format(len(categories_idx_ordered),
groupby, len(categories)))
# reorder var_groups (if any)
if var_names is not None:
var_names_idx_ordered = list(range(len(var_names)))
if has_var_groups:
if list(var_group_labels) == list(categories):
positions_ordered = []
labels_ordered = []
position_start = 0
var_names_idx_ordered = []
for idx in categories_idx_ordered:
position = var_group_positions[idx]
_var_names = var_names[position[0]:position[1] + 1]
var_names_idx_ordered.extend(range(position[0], position[1] + 1))
positions_ordered.append((position_start, position_start + len(_var_names) - 1))
position_start += len(_var_names)
labels_ordered.append(var_group_labels[idx])
var_group_labels = labels_ordered
var_group_positions = positions_ordered
else:
logg.warn("Groups are not reordered because the `groupby` categories "
"and the `var_group_labels` are different.\n"
"categories: {}\nvar_group_labels: {}".format(
_format_first_three_categories(categories),
_format_first_three_categories(var_group_labels)))
else:
var_names_idx_ordered = None
var_group_data = {'categories_idx_ordered': categories_idx_ordered,
'var_names_idx_ordered': var_names_idx_ordered,
'var_group_labels': var_group_labels,
'var_group_positions': var_group_positions}
return var_group_data | 0.004241 |
def initFooter(self):
""" Initializes the body/story window """
self.footerWin.bkgd(' ', curses.color_pair(7))
self.footerWin.noutrefresh() | 0.01227 |
def S2_surface(self, sizes, bounds, presets, covers, use_torch=False,
num_samples = 10):
"""Calculates the sensitivity surface of a GrFN for the two variables with
the highest S2 index.
Args:
num_samples: Number of samples for sensitivity analysis.
sizes: Tuple of (number of x inputs, number of y inputs).
bounds: Set of bounds for GrFN inputs.
presets: Set of standard values for GrFN inputs.
Returns:
Tuple:
Tuple: The names of the two variables that were selected
Tuple: The X, Y vectors of eval values
Z: The numpy matrix of output evaluations
"""
args = self.inputs
Si = self.sobol_analysis(
num_samples,
{
"num_vars": len(args),
"names": args,
"bounds": [bounds[arg] for arg in args],
},
covers
)
S2 = Si["S2"]
(s2_max, v1, v2) = get_max_s2_sensitivity(S2)
x_var = args[v1]
y_var = args[v2]
search_space = [(x_var, bounds[x_var]), (y_var, bounds[y_var])]
preset_vals = {
arg: presets[arg]
for i, arg in enumerate(args)
if i != v1 and i != v2
}
X = np.linspace(*search_space[0][1], sizes[0])
Y = np.linspace(*search_space[1][1], sizes[1])
if use_torch:
Xm, Ym = torch.meshgrid(torch.tensor(X), torch.tensor(Y))
inputs = {n: torch.full_like(Xm, v) for n, v in presets.items()}
inputs.update({search_space[0][0]: Xm, search_space[1][0]: Ym})
Z = self.run(inputs, covers).numpy()
else:
Xm, Ym = np.meshgrid(X, Y)
Z = np.zeros((len(X), len(Y)))
for x, y in itertools.product(range(len(X)), range(len(Y))):
inputs = {n: v for n, v in presets.items()}
inputs.update({search_space[0][0]: x, search_space[1][0]: y})
Z[x][y] = self.run(inputs, covers)
return X, Y, Z, x_var, y_var | 0.00282 |
def requestFields(self, field_names, required=False, strict=False):
"""Add the given list of fields to the request
@param field_names: The simple registration data fields to request
@type field_names: [str]
@param required: Whether these values should be presented to
the user as required
@param strict: whether to raise an exception when a field is
added to a request more than once
@raise ValueError: when a field requested is not a simple
registration field or strict is set and a field was
requested more than once
"""
if isinstance(field_names, basestring):
raise TypeError('Fields should be passed as a list of '
'strings (not %r)' % (type(field_names),))
for field_name in field_names:
self.requestField(field_name, required, strict=strict) | 0.002165 |
def aer2geodetic(az: float, el: float, srange: float,
lat0: float, lon0: float, h0: float,
ell=None,
deg: bool = True) -> Tuple[float, float, float]:
"""
gives geodetic coordinates of a point with az, el, range
from an observer at lat0, lon0, h0
Parameters
----------
az : float or numpy.ndarray of float
azimuth to target
el : float or numpy.ndarray of float
elevation to target
srange : float or numpy.ndarray of float
slant range [meters]
lat0 : float
Observer geodetic latitude
lon0 : float
Observer geodetic longitude
h0 : float
observer altitude above geodetic ellipsoid (meters)
ell : Ellipsoid, optional
reference ellipsoid
deg : bool, optional
degrees input/output (False: radians in/out)
Returns
-------
In reference ellipsoid system:
lat : float or numpy.ndarray of float
geodetic latitude
lon : float or numpy.ndarray of float
geodetic longitude
alt : float or numpy.ndarray of float
altitude above ellipsoid (meters)
"""
x, y, z = aer2ecef(az, el, srange, lat0, lon0, h0, ell=ell, deg=deg)
return ecef2geodetic(x, y, z, ell=ell, deg=deg) | 0.000767 |
def copy_database_structure(self, source, destination, tables=None):
"""Copy multiple tables from one database to another."""
# Change database to source
self.change_db(source)
if tables is None:
tables = self.tables
# Change database to destination
self.change_db(destination)
for t in tqdm(tables, total=len(tables), desc='Copying {0} table structure'.format(source)):
self.copy_table_structure(source, destination, t) | 0.005976 |
def plot_joint_sfs_scaled(*args, **kwargs):
"""Plot a scaled joint site frequency spectrum.
Parameters
----------
s : array_like, int, shape (n_chromosomes_pop1, n_chromosomes_pop2)
Joint site frequency spectrum.
ax : axes, optional
Axes on which to draw. If not provided, a new figure will be created.
imshow_kwargs : dict-like
Additional keyword arguments, passed through to ax.imshow().
Returns
-------
ax : axes
The axes on which the plot was drawn.
"""
imshow_kwargs = kwargs.get('imshow_kwargs', dict())
imshow_kwargs.setdefault('norm', None)
kwargs['imshow_kwargs'] = imshow_kwargs
ax = plot_joint_sfs(*args, **kwargs)
return ax | 0.00137 |
def get_selinux_context(path):
'''
Get an SELinux context from a given path
CLI Example:
.. code-block:: bash
salt '*' file.get_selinux_context /etc/hosts
'''
out = __salt__['cmd.run'](['ls', '-Z', path], python_shell=False)
try:
ret = re.search(r'\w+:\w+:\w+:\w+', out).group(0)
except AttributeError:
ret = (
'No selinux context information is available for {0}'.format(path)
)
return ret | 0.002105 |
def initialize(self, configfile=None):
"""Initialize and load the Fortran library (and model, if applicable).
The Fortran library is loaded and ctypes is used to annotate functions
inside the library. The Fortran library's initialization is called.
Normally a path to an ``*.ini`` model file is passed to the
:meth:`__init__`. If so, that model is loaded. Note that
:meth:`_load_model` changes the working directory to that of the model.
"""
if configfile is not None:
self.configfile = configfile
try:
self.configfile
except AttributeError:
raise ValueError("Specify configfile during construction or during initialize")
abs_name = os.path.abspath(self.configfile)
os.chdir(os.path.dirname(self.configfile) or '.')
logmsg = "Loading model {} in directory {}".format(
self.configfile,
os.path.abspath(os.getcwd())
)
logger.info(logmsg)
# Fortran init function.
self.library.initialize.argtypes = [c_char_p]
self.library.initialize.restype = None
# initialize by abs_name because we already chdirred
# if configfile is a relative path we would have a problem
ierr = wrap(self.library.initialize)(abs_name)
if ierr:
errormsg = "Loading model {config} failed with exit code {code}"
raise RuntimeError(errormsg.format(config=self.configfile,
code=ierr)) | 0.001922 |
def BeginEdit(self, row, col, grid):
"""
Fetch the value from the table and prepare the edit control
to begin editing. Set the focus to the edit control.
*Must Override*
"""
# Disable if cell is locked, enable if cell is not locked
grid = self.main_window.grid
key = grid.actions.cursor
locked = grid.code_array.cell_attributes[key]["locked"]or \
grid.code_array.cell_attributes[key]["button_cell"]
self._tc.Enable(not locked)
self._tc.Show(not locked)
if locked:
grid.code_array.result_cache.clear()
self._execute_cell_code(row, col, grid)
# Mirror our changes onto the main_window's code bar
self._tc.Bind(wx.EVT_CHAR, self.OnChar)
self._tc.Bind(wx.EVT_KEY_UP, self.OnKeyUp)
# Save cell and grid info
self._row = row
self._col = [col, ] # List of columns we are occupying
self._grid = grid
start_value = grid.GetTable().GetValue(*key)
try:
start_value_list = [start_value[i:i+self.max_char_width]
for i in xrange(0, len(start_value),
self.max_char_width)]
startValue = "\n".join(start_value_list)
self.startValue = startValue
except TypeError:
self.startValue = u""
# Set up the textcontrol to look like this cell (TODO: Does not work)
try:
self._tc.SetValue(unicode(startValue))
except (TypeError, AttributeError, UnboundLocalError):
self._tc.SetValue(u"")
self._tc.SetFont(grid.GetCellFont(row, col))
self._tc.SetBackgroundColour(grid.GetCellBackgroundColour(row, col))
self._update_control_length()
self._tc.SetInsertionPointEnd()
h_scroll_pos = grid.GetScrollPos(wx.SB_HORIZONTAL)
v_scroll_pos = grid.GetScrollPos(wx.SB_VERTICAL)
self._tc.SetFocus()
# GTK Bugfix for jumping grid when focusing the textctrl
if grid.GetScrollPos(wx.SB_HORIZONTAL) != h_scroll_pos or \
grid.GetScrollPos(wx.SB_VERTICAL) != v_scroll_pos:
wx.ScrolledWindow.Scroll(grid, (h_scroll_pos, v_scroll_pos))
# Select the text
self._tc.SetSelection(0, self._tc.GetLastPosition()) | 0.001265 |
def run_simulations(self, parameter_list, data_folder):
"""
This function runs multiple simulations in parallel.
Args:
parameter_list (list): list of parameter combinations to simulate.
data_folder (str): folder in which to create output folders.
"""
self.data_folder = data_folder
with Pool(processes=MAX_PARALLEL_PROCESSES) as pool:
for result in pool.imap_unordered(self.launch_simulation,
parameter_list):
yield result | 0.003515 |
def _render_item(self, depth, key, value = None, **settings):
"""
Format single list item.
"""
strptrn = self.INDENT * depth
lchar = self.lchar(settings[self.SETTING_LIST_STYLE])
s = self._es_text(settings, settings[self.SETTING_LIST_FORMATING])
lchar = self.fmt_text(lchar, **s)
strptrn = "{}"
if value is not None:
strptrn += ": {}"
s = self._es_text(settings, settings[self.SETTING_TEXT_FORMATING])
strptrn = self.fmt_text(strptrn.format(key, value), **s)
return '{} {} {}'.format(self.INDENT * depth, lchar, strptrn) | 0.007911 |
def create_mef(filename=None):
"""
Create a file an MEF fits file called filename. Generate a random
filename if None given
"""
import pyfits, time
if not filename:
### here I know what filename is to start with.
import tempfile
filename=tempfile.mktemp(suffix='.fits')
else:
import string, re
### filenames gotta be a string and no lead/trailing space
filename=string.strip(str(filename))
### require that the filename ends in .fits
suffix=re.match(r'^.*.fits$',filename)
if not suffix:
filename = filename+'.fits'
### create an HDU list
temp = pyfits.HDUList()
### create a primary HDU
prihdu = pyfits.PrimaryHDU()
### build the header
h=prihdu.header
h.update('EXTEND',pyfits.TRUE,after='NAXIS')
h.update('NEXTEND',0,after='EXTEND')
h.add_comment('MEF created at CADC')
h.add_comment('Created using '+__name__+' '+__Version__)
h.add_comment('Extensions may not be in CCD order')
#h.update('cfh12k',__Version__,comment='split2mef software at CADC')
h.add_comment('Use the EXTNAME keyword')
h.add_history('Primary HDU created on '+time.asctime())
### stick the HDU onto the HDU list and write to file
temp.append(prihdu)
temp.writeto(filename)
temp.close()
return(filename) | 0.014609 |
def codes_get_long_array(handle, key, size):
# type: (cffi.FFI.CData, str, int) -> T.List[int]
"""
Get long array values from a key.
:param bytes key: the keyword whose value(s) are to be extracted
:rtype: List(int)
"""
values = ffi.new('long[]', size)
size_p = ffi.new('size_t *', size)
_codes_get_long_array(handle, key.encode(ENC), values, size_p)
return list(values) | 0.002427 |
def _get_parameter_dictionary(self, base_name, dictionary, sorted_keys, parameter):
"""
Recursively loops over the parameter's children, adding
keys (starting with base_name) and values to the supplied dictionary
(provided they do not have a value of None).
"""
# assemble the key for this parameter
k = base_name + "/" + parameter.name()
# first add this parameter (if it has a value)
if not parameter.value()==None:
sorted_keys.append(k[1:])
dictionary[sorted_keys[-1]] = parameter.value()
# now loop over the children
for p in parameter.children():
self._get_parameter_dictionary(k, dictionary, sorted_keys, p) | 0.006757 |
def nas_auto_qos_set_dscp(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
nas = ET.SubElement(config, "nas", xmlns="urn:brocade.com:mgmt:brocade-qos")
auto_qos = ET.SubElement(nas, "auto-qos")
set = ET.SubElement(auto_qos, "set")
dscp = ET.SubElement(set, "dscp")
dscp.text = kwargs.pop('dscp')
callback = kwargs.pop('callback', self._callback)
return callback(config) | 0.006316 |
def CMS(data, format="PEM"):
"""
Factory function to create CMS objects from received messages.
Parses CMS data and returns either SignedData or EnvelopedData
object. format argument can be either "PEM" or "DER".
It determines object type from the contents of received CMS
structure.
"""
bio = Membio(data)
if format == "PEM":
ptr = libcrypto.PEM_read_bio_CMS(bio.bio, None, None, None)
else:
ptr = libcrypto.d2i_CMS_bio(bio.bio, None)
if ptr is None:
raise CMSError("Error parsing CMS data")
typeoid = Oid(libcrypto.OBJ_obj2nid(libcrypto.CMS_get0_type(ptr)))
if typeoid.shortname() == "pkcs7-signedData":
return SignedData(ptr)
elif typeoid.shortname() == "pkcs7-envelopedData":
return EnvelopedData(ptr)
elif typeoid.shortname() == "pkcs7-encryptedData":
return EncryptedData(ptr)
else:
raise NotImplementedError("cannot handle "+typeoid.shortname()) | 0.002045 |
def get_parameters(parser, token):
"""
{% get_parameters except_field %}
"""
args = token.split_contents()
if len(args) < 2:
raise template.TemplateSyntaxError(
"get_parameters tag takes at least 1 argument"
)
return GetParametersNode(args[1].strip()) | 0.0033 |
def get_connection(cls, pid, connection):
"""Return the specified :class:`~queries.pool.Connection` from the
pool.
:param str pid: The pool ID
:param connection: The connection to return for
:type connection: psycopg2.extensions.connection
:rtype: queries.pool.Connection
"""
with cls._lock:
return cls._pools[pid].connection_handle(connection) | 0.004739 |
def current_earthquake_model_name():
"""Human friendly name for the currently active earthquake fatality model.
:returns: Name of the current EQ fatality model as defined in users
settings.
"""
default_earthquake_function = setting(
'earthquake_function', EARTHQUAKE_FUNCTIONS[0]['key'], str)
current_function = None
for model in EARTHQUAKE_FUNCTIONS:
if model['key'] == default_earthquake_function:
current_function = model['name']
return current_function | 0.001919 |
def error_response(error_type="Internal server error",
error_text=None,
status=400,
participant=None):
"""Return a generic server error response."""
traceback.print_exc()
print("Error: {}.".format(error_type))
page = error_page(
error_text=error_text,
error_type=error_type,
participant=participant)
data = {
"status": "error",
"html": page
}
return Response(dumps(data), status=status, mimetype='application/json') | 0.001845 |
def _check_value_recursively(key, val, haystack):
"""
Check if there is key _key_ with value _val_ in the given dictionary.
..warning:
This is geared at JSON dictionaries, so some corner cases are ignored,
we assume all iterables are either arrays or dicts
"""
if isinstance(haystack, list):
return any([_check_value_recursively(key, val, l) for l in haystack])
elif isinstance(haystack, dict):
if not key in haystack:
return any([_check_value_recursively(key, val, d) for k, d in haystack.items()
if isinstance(d, list) or isinstance(d, dict)])
else:
return haystack[key] == val
else:
return False | 0.00554 |
def setup_default_layouts(self, index, settings):
"""Setup default layouts when run for the first time."""
self.setUpdatesEnabled(False)
first_spyder_run = bool(self.first_spyder_run) # Store copy
if first_spyder_run:
self.set_window_settings(*settings)
else:
if self.last_plugin:
if self.last_plugin.ismaximized:
self.maximize_dockwidget(restore=True)
if not (self.isMaximized() or self.maximized_flag):
self.showMaximized()
min_width = self.minimumWidth()
max_width = self.maximumWidth()
base_width = self.width()
self.setFixedWidth(base_width)
# IMPORTANT: order has to be the same as defined in the config file
MATLAB, RSTUDIO, VERTICAL, HORIZONTAL = range(self.DEFAULT_LAYOUTS)
# Define widgets locally
editor = self.editor
console_ipy = self.ipyconsole
console_int = self.console
outline = self.outlineexplorer
explorer_project = self.projects
explorer_file = self.explorer
explorer_variable = self.variableexplorer
plots = self.plots
history = self.historylog
finder = self.findinfiles
help_plugin = self.help
helper = self.onlinehelp
plugins = self.thirdparty_plugins
# Stored for tests
global_hidden_widgets = [finder, console_int, explorer_project,
helper] + plugins
global_hidden_toolbars = [self.source_toolbar, self.edit_toolbar,
self.search_toolbar]
# Layout definition
# --------------------------------------------------------------------
# Layouts are organized by columns, each column is organized by rows.
# Widths have to add 1.0 (except if hidden), height per column has to
# add 1.0 as well
# Spyder Default Initial Layout
s_layout = {
'widgets': [
# Column 0
[[explorer_project]],
# Column 1
[[editor]],
# Column 2
[[outline]],
# Column 3
[[help_plugin, explorer_variable, plots, # Row 0
helper, explorer_file, finder] + plugins,
[console_int, console_ipy, history]] # Row 1
],
'width fraction': [0.05, # Column 0 width
0.55, # Column 1 width
0.05, # Column 2 width
0.45], # Column 3 width
'height fraction': [[1.0], # Column 0, row heights
[1.0], # Column 1, row heights
[1.0], # Column 2, row heights
[0.46, 0.54]], # Column 3, row heights
'hidden widgets': [outline] + global_hidden_widgets,
'hidden toolbars': [],
}
# RStudio
r_layout = {
'widgets': [
# column 0
[[editor], # Row 0
[console_ipy, console_int]], # Row 1
# column 1
[[explorer_variable, plots, history, # Row 0
outline, finder] + plugins,
[explorer_file, explorer_project, # Row 1
help_plugin, helper]]
],
'width fraction': [0.55, # Column 0 width
0.45], # Column 1 width
'height fraction': [[0.55, 0.45], # Column 0, row heights
[0.55, 0.45]], # Column 1, row heights
'hidden widgets': [outline] + global_hidden_widgets,
'hidden toolbars': [],
}
# Matlab
m_layout = {
'widgets': [
# column 0
[[explorer_file, explorer_project],
[outline]],
# column 1
[[editor],
[console_ipy, console_int]],
# column 2
[[explorer_variable, plots, finder] + plugins,
[history, help_plugin, helper]]
],
'width fraction': [0.10, # Column 0 width
0.45, # Column 1 width
0.45], # Column 2 width
'height fraction': [[0.55, 0.45], # Column 0, row heights
[0.55, 0.45], # Column 1, row heights
[0.55, 0.45]], # Column 2, row heights
'hidden widgets': global_hidden_widgets,
'hidden toolbars': [],
}
# Vertically split
v_layout = {
'widgets': [
# column 0
[[editor], # Row 0
[console_ipy, console_int, explorer_file, # Row 1
explorer_project, help_plugin, explorer_variable, plots,
history, outline, finder, helper] + plugins]
],
'width fraction': [1.0], # Column 0 width
'height fraction': [[0.55, 0.45]], # Column 0, row heights
'hidden widgets': [outline] + global_hidden_widgets,
'hidden toolbars': [],
}
# Horizontally split
h_layout = {
'widgets': [
# column 0
[[editor]], # Row 0
# column 1
[[console_ipy, console_int, explorer_file, # Row 0
explorer_project, help_plugin, explorer_variable, plots,
history, outline, finder, helper] + plugins]
],
'width fraction': [0.55, # Column 0 width
0.45], # Column 1 width
'height fraction': [[1.0], # Column 0, row heights
[1.0]], # Column 1, row heights
'hidden widgets': [outline] + global_hidden_widgets,
'hidden toolbars': []
}
# Layout selection
layouts = {
'default': s_layout,
RSTUDIO: r_layout,
MATLAB: m_layout,
VERTICAL: v_layout,
HORIZONTAL: h_layout,
}
layout = layouts[index]
# Remove None from widgets layout
widgets_layout = layout['widgets']
widgets_layout_clean = []
for column in widgets_layout:
clean_col = []
for row in column:
clean_row = [w for w in row if w is not None]
if clean_row:
clean_col.append(clean_row)
if clean_col:
widgets_layout_clean.append(clean_col)
# Flatten widgets list
widgets = []
for column in widgets_layout_clean:
for row in column:
for widget in row:
widgets.append(widget)
# Make every widget visible
for widget in widgets:
widget.toggle_view(True)
widget.toggle_view_action.setChecked(True)
# We use both directions to ensure proper update when moving from
# 'Horizontal Split' to 'Spyder Default'
# This also seems to help on random cases where the display seems
# 'empty'
for direction in (Qt.Vertical, Qt.Horizontal):
# Arrange the widgets in one direction
for idx in range(len(widgets) - 1):
first, second = widgets[idx], widgets[idx+1]
if first is not None and second is not None:
self.splitDockWidget(first.dockwidget, second.dockwidget,
direction)
# Arrange the widgets in the other direction
for column in widgets_layout_clean:
for idx in range(len(column) - 1):
first_row, second_row = column[idx], column[idx+1]
self.splitDockWidget(first_row[0].dockwidget,
second_row[0].dockwidget,
Qt.Vertical)
# Tabify
for column in widgets_layout_clean:
for row in column:
for idx in range(len(row) - 1):
first, second = row[idx], row[idx+1]
self.tabify_plugins(first, second)
# Raise front widget per row
row[0].dockwidget.show()
row[0].dockwidget.raise_()
# Set dockwidget widths
width_fractions = layout['width fraction']
if len(width_fractions) > 1:
_widgets = [col[0][0].dockwidget for col in widgets_layout]
self.resizeDocks(_widgets, width_fractions, Qt.Horizontal)
# Set dockwidget heights
height_fractions = layout['height fraction']
for idx, column in enumerate(widgets_layout_clean):
if len(column) > 1:
_widgets = [row[0].dockwidget for row in column]
self.resizeDocks(_widgets, height_fractions[idx], Qt.Vertical)
# Hide toolbars
hidden_toolbars = global_hidden_toolbars + layout['hidden toolbars']
for toolbar in hidden_toolbars:
if toolbar is not None:
toolbar.close()
# Hide widgets
hidden_widgets = layout['hidden widgets']
for widget in hidden_widgets:
if widget is not None:
widget.dockwidget.close()
if first_spyder_run:
self.first_spyder_run = False
else:
self.setMinimumWidth(min_width)
self.setMaximumWidth(max_width)
if not (self.isMaximized() or self.maximized_flag):
self.showMaximized()
self.setUpdatesEnabled(True)
self.sig_layout_setup_ready.emit(layout)
return layout | 0.000192 |
def parse_lammps_log(filename="log.lammps"):
"""
Parses log file with focus on thermo data. Both one and multi line
formats are supported. Any incomplete runs (no "Loop time" marker)
will not be parsed.
Notes:
SHAKE stats printed with thermo data are not supported yet.
They are ignored in multi line format, while they may cause
issues with dataframe parsing in one line format.
Args:
filename (str): Filename to parse.
Returns:
[pd.DataFrame] containing thermo data for each completed run.
"""
with open(filename) as f:
lines = f.readlines()
begin_flag = ("Memory usage per processor =",
"Per MPI rank memory allocation (min/avg/max) =")
end_flag = "Loop time of"
begins, ends = [], []
for i, l in enumerate(lines):
if l.startswith(begin_flag):
begins.append(i)
elif l.startswith(end_flag):
ends.append(i)
def _parse_thermo(lines):
multi_pattern = r"-+\s+Step\s+([0-9]+)\s+-+"
# multi line thermo data
if re.match(multi_pattern, lines[0]):
timestep_marks = [i for i, l in enumerate(lines)
if re.match(multi_pattern, l)]
timesteps = np.split(lines, timestep_marks)[1:]
dicts = []
kv_pattern = r"([0-9A-Za-z_\[\]]+)\s+=\s+([0-9eE\.+-]+)"
for ts in timesteps:
data = {}
data["Step"] = int(re.match(multi_pattern, ts[0]).group(1))
data.update({k: float(v) for k, v
in re.findall(kv_pattern, "".join(ts[1:]))})
dicts.append(data)
df = pd.DataFrame(dicts)
# rearrange the sequence of columns
columns = ["Step"] + [k for k, v in
re.findall(kv_pattern,
"".join(timesteps[0][1:]))]
df = df[columns]
# one line thermo data
else:
df = pd.read_csv(StringIO("".join(lines)), delim_whitespace=True)
return df
runs = []
for b, e in zip(begins, ends):
runs.append(_parse_thermo(lines[b + 1:e]))
return runs | 0.000444 |
def dataframe(self):
"""
Returns a pandas DataFrame containing all other class properties and
values. The index for the DataFrame is the boxscore string.
"""
if self._goals_scored is None and self._goals_allowed is None:
return None
fields_to_include = {
'boxscore_index': self.boxscore_index,
'date': self.date,
'datetime': self.datetime,
'game': self.game,
'goals_allowed': self.goals_allowed,
'goals_scored': self.goals_scored,
'location': self.location,
'opponent_abbr': self.opponent_abbr,
'opponent_name': self.opponent_name,
'overtime': self.overtime,
'penalties_in_minutes': self.penalties_in_minutes,
'power_play_goals': self.power_play_goals,
'power_play_opportunities': self.power_play_opportunities,
'result': self.result,
'short_handed_goals': self.short_handed_goals,
'shots_on_goal': self.shots_on_goal,
'opp_shots_on_goal': self.opp_shots_on_goal,
'opp_penalties_in_minutes': self.opp_penalties_in_minutes,
'opp_power_play_goals': self.opp_power_play_goals,
'opp_power_play_opportunities': self.opp_power_play_opportunities,
'opp_short_handed_goals': self.opp_short_handed_goals,
'corsi_for': self.corsi_for,
'corsi_against': self.corsi_against,
'corsi_for_percentage': self.corsi_for_percentage,
'fenwick_for': self.fenwick_for,
'fenwick_against': self.fenwick_against,
'fenwick_for_percentage': self.fenwick_for_percentage,
'faceoff_wins': self.faceoff_wins,
'faceoff_losses': self.faceoff_losses,
'faceoff_win_percentage': self.faceoff_win_percentage,
'offensive_zone_start_percentage':
self.offensive_zone_start_percentage,
'pdo': self.pdo
}
return pd.DataFrame([fields_to_include], index=[self._boxscore]) | 0.000953 |
def compile_geometry(lat, lon, elev):
"""
Take in lists of lat and lon coordinates, and determine what geometry to create
:param list lat: Latitude values
:param list lon: Longitude values
:param float elev: Elevation value
:return dict:
"""
logger_excel.info("enter compile_geometry")
lat = _remove_geo_placeholders(lat)
lon = _remove_geo_placeholders(lon)
# 4 coordinate values
if len(lat) == 2 and len(lon) == 2:
logger_excel.info("found 4 coordinates")
geo_dict = geometry_linestring(lat, lon, elev)
# # 4 coordinate values
# if (lat[0] != lat[1]) and (lon[0] != lon[1]):
# geo_dict = geometry_polygon(lat, lon)
# # 3 unique coordinates
# else:
# geo_dict = geometry_multipoint(lat, lon)
#
# 2 coordinate values
elif len(lat) == 1 and len(lon) == 1:
logger_excel.info("found 2 coordinates")
geo_dict = geometry_point(lat, lon, elev)
# coordinate range. one value given but not the other.
elif (None in lon and None not in lat) or (len(lat) > 0 and len(lon) == 0):
geo_dict = geometry_range(lat, elev, "lat")
elif (None in lat and None not in lon) or (len(lon) > 0 and len(lat) == 0):
geo_dict = geometry_range(lat, elev, "lon")
# Too many points, or no points
else:
geo_dict = {}
logger_excel.warn("compile_geometry: invalid coordinates: lat: {}, lon: {}".format(lat, lon))
logger_excel.info("exit compile_geometry")
return geo_dict | 0.001931 |
def get_vault(self):
"""Gets the ``Vault`` at this node.
return: (osid.authorization.Vault) - the vault represented by
this node
*compliance: mandatory -- This method must be implemented.*
"""
if self._lookup_session is None:
mgr = get_provider_manager('AUTHORIZATION', runtime=self._runtime, proxy=self._proxy)
self._lookup_session = mgr.get_vault_lookup_session(proxy=getattr(self, "_proxy", None))
return self._lookup_session.get_vault(Id(self._my_map['id'])) | 0.007246 |
def is_single_tree(data_wrapper):
'''Check that data forms a single tree
Only the first point has ID of -1.
Returns:
CheckResult with result and list of IDs
Note:
This assumes no_missing_parents passed.
'''
db = data_wrapper.data_block
bad_ids = db[db[:, COLS.P] == -1][1:, COLS.ID]
return CheckResult(len(bad_ids) == 0, bad_ids.tolist()) | 0.002571 |
def create(self, dmeData):
"""
Create a new device management extension package
In case of failure it throws APIException
"""
url = "api/v0002/mgmt/custom/bundle"
r = self._apiClient.post(url, dmeData)
if r.status_code == 201:
return r.json()
else:
raise ApiException(r) | 0.005571 |
def drawSector(page, center, point, beta, color=None, fill=None,
dashes=None, fullSector=True, morph=None,
width=1, closePath=False, roundCap=False, overlay=True):
""" Draw a circle sector given circle center, one arc end point and the angle of the arc.
Parameters:
center -- center of circle
point -- arc end point
beta -- angle of arc (degrees)
fullSector -- connect arc ends with center
"""
img = page.newShape()
Q = img.drawSector(Point(center), Point(point), beta, fullSector=fullSector)
img.finish(color=color, fill=fill, dashes=dashes, width=width,
roundCap=roundCap, morph=morph, closePath=closePath)
img.commit(overlay)
return Q | 0.009284 |
def reduce_list_of_bags_of_words(list_of_keyword_sets):
"""
Reduces a number of keyword sets to a bag-of-words.
Input: - list_of_keyword_sets: This is a python list of sets of strings.
Output: - bag_of_words: This is the corresponding multi-set or bag-of-words, in the form of a python dictionary.
"""
bag_of_words = dict()
get_bag_of_words_keys = bag_of_words.keys
for keyword_set in list_of_keyword_sets:
for keyword in keyword_set:
if keyword in get_bag_of_words_keys():
bag_of_words[keyword] += 1
else:
bag_of_words[keyword] = 1
return bag_of_words | 0.003049 |
def lengths_to_mask(*lengths, **kwargs):
""" Given a list of lengths, create a batch mask.
Example:
>>> lengths_to_mask([1, 2, 3])
tensor([[1, 0, 0],
[1, 1, 0],
[1, 1, 1]], dtype=torch.uint8)
>>> lengths_to_mask([1, 2, 2], [1, 2, 2])
tensor([[[1, 0],
[0, 0]],
<BLANKLINE>
[[1, 1],
[1, 1]],
<BLANKLINE>
[[1, 1],
[1, 1]]], dtype=torch.uint8)
Args:
*lengths (list of int or torch.Tensor)
**kwargs: Keyword arguments passed to ``torch.zeros`` upon initially creating the returned
tensor.
Returns:
torch.ByteTensor
"""
# Squeeze to deal with random additional dimensions
lengths = [l.squeeze().tolist() if torch.is_tensor(l) else l for l in lengths]
# For cases where length is a scalar, this needs to convert it to a list.
lengths = [l if isinstance(l, list) else [l] for l in lengths]
assert all(len(l) == len(lengths[0]) for l in lengths)
batch_size = len(lengths[0])
other_dimensions = tuple([int(max(l)) for l in lengths])
mask = torch.zeros(batch_size, *other_dimensions, **kwargs)
for i, length in enumerate(zip(*tuple(lengths))):
mask[i][[slice(int(l)) for l in length]].fill_(1)
return mask.byte() | 0.005839 |
def _name_value_to_bson(name, value, check_keys, opts,
in_custom_call=False,
in_fallback_call=False):
"""Encode a single name, value pair."""
# First see if the type is already cached. KeyError will only ever
# happen once per subtype.
try:
return _ENCODERS[type(value)](name, value, check_keys, opts)
except KeyError:
pass
# Second, fall back to trying _type_marker. This has to be done
# before the loop below since users could subclass one of our
# custom types that subclasses a python built-in (e.g. Binary)
marker = getattr(value, "_type_marker", None)
if isinstance(marker, int) and marker in _MARKERS:
func = _MARKERS[marker]
# Cache this type for faster subsequent lookup.
_ENCODERS[type(value)] = func
return func(name, value, check_keys, opts)
# Third, check if a type encoder is registered for this type.
# Note that subtypes of registered custom types are not auto-encoded.
if not in_custom_call and opts.type_registry._encoder_map:
custom_encoder = opts.type_registry._encoder_map.get(type(value))
if custom_encoder is not None:
return _name_value_to_bson(
name, custom_encoder(value), check_keys, opts,
in_custom_call=True)
# Fourth, test each base type. This will only happen once for
# a subtype of a supported base type. Unlike in the C-extensions, this
# is done after trying the custom type encoder because checking for each
# subtype is expensive.
for base in _BUILT_IN_TYPES:
if isinstance(value, base):
func = _ENCODERS[base]
# Cache this type for faster subsequent lookup.
_ENCODERS[type(value)] = func
return func(name, value, check_keys, opts)
# As a last resort, try using the fallback encoder, if the user has
# provided one.
fallback_encoder = opts.type_registry._fallback_encoder
if not in_fallback_call and fallback_encoder is not None:
return _name_value_to_bson(
name, fallback_encoder(value), check_keys, opts,
in_fallback_call=True)
raise InvalidDocument(
"cannot encode object: %r, of type: %r" % (value, type(value))) | 0.000434 |
def init_ns_var(which_ns: str = CORE_NS, ns_var_name: str = NS_VAR_NAME) -> Var:
"""Initialize the dynamic `*ns*` variable in the Namespace `which_ns`."""
core_sym = sym.Symbol(which_ns)
core_ns = Namespace.get_or_create(core_sym)
ns_var = Var.intern(core_sym, sym.Symbol(ns_var_name), core_ns, dynamic=True)
logger.debug(f"Created namespace variable {sym.symbol(ns_var_name, ns=which_ns)}")
return ns_var | 0.009324 |
def set_content_length(self):
'''Find and set the content length.
.. seealso:: :meth:`compute_checksum`.
'''
if not self.block_file:
self.fields['Content-Length'] = '0'
return
with wpull.util.reset_file_offset(self.block_file):
wpull.util.seek_file_end(self.block_file)
self.fields['Content-Length'] = str(self.block_file.tell()) | 0.004773 |
def nunique(self, dropna=True):
"""
Return number of unique elements in the object.
Excludes NA values by default.
Parameters
----------
dropna : bool, default True
Don't include NaN in the count.
Returns
-------
int
See Also
--------
DataFrame.nunique: Method nunique for DataFrame.
Series.count: Count non-NA/null observations in the Series.
Examples
--------
>>> s = pd.Series([1, 3, 5, 7, 7])
>>> s
0 1
1 3
2 5
3 7
4 7
dtype: int64
>>> s.nunique()
4
"""
uniqs = self.unique()
n = len(uniqs)
if dropna and isna(uniqs).any():
n -= 1
return n | 0.002415 |
def import_cert(name, cert_format=_DEFAULT_FORMAT, context=_DEFAULT_CONTEXT, store=_DEFAULT_STORE,
exportable=True, password='', saltenv='base'):
'''
Import the certificate file into the given certificate store.
:param str name: The path of the certificate file to import.
:param str cert_format: The certificate format. Specify 'cer' for X.509, or 'pfx' for PKCS #12.
:param str context: The name of the certificate store location context.
:param str store: The name of the certificate store.
:param bool exportable: Mark the certificate as exportable. Only applicable to pfx format.
:param str password: The password of the certificate. Only applicable to pfx format.
:param str saltenv: The environment the file resides in.
Example of usage with only the required arguments:
.. code-block:: yaml
site0-cert-imported:
win_pki.import_cert:
- name: salt://win/webserver/certs/site0.cer
Example of usage specifying all available arguments:
.. code-block:: yaml
site0-cert-imported:
win_pki.import_cert:
- name: salt://win/webserver/certs/site0.pfx
- cert_format: pfx
- context: LocalMachine
- store: My
- exportable: True
- password: TestPassword
- saltenv: base
'''
ret = {'name': name,
'changes': dict(),
'comment': six.text_type(),
'result': None}
store_path = r'Cert:\{0}\{1}'.format(context, store)
cached_source_path = __salt__['cp.cache_file'](name, saltenv)
current_certs = __salt__['win_pki.get_certs'](context=context, store=store)
if password:
cert_props = __salt__['win_pki.get_cert_file'](name=cached_source_path, cert_format=cert_format, password=password)
else:
cert_props = __salt__['win_pki.get_cert_file'](name=cached_source_path, cert_format=cert_format)
if cert_props['thumbprint'] in current_certs:
ret['comment'] = ("Certificate '{0}' already contained in store:"
' {1}').format(cert_props['thumbprint'], store_path)
ret['result'] = True
elif __opts__['test']:
ret['comment'] = ("Certificate '{0}' will be imported into store:"
' {1}').format(cert_props['thumbprint'], store_path)
ret['changes'] = {'old': None,
'new': cert_props['thumbprint']}
else:
ret['changes'] = {'old': None,
'new': cert_props['thumbprint']}
ret['result'] = __salt__['win_pki.import_cert'](name=name, cert_format=cert_format,
context=context, store=store,
exportable=exportable, password=password,
saltenv=saltenv)
if ret['result']:
ret['comment'] = ("Certificate '{0}' imported into store:"
' {1}').format(cert_props['thumbprint'], store_path)
else:
ret['comment'] = ("Certificate '{0}' unable to be imported into store:"
' {1}').format(cert_props['thumbprint'], store_path)
return ret | 0.003893 |
async def load_uint(reader, width):
"""
Constant-width integer serialization
:param reader:
:param width:
:return:
"""
buffer = _UINT_BUFFER
result = 0
shift = 0
for _ in range(width):
await reader.areadinto(buffer)
result += buffer[0] << shift
shift += 8
return result | 0.002967 |
def set_cookie(self, cookie):
"""Set a cookie, without checking whether or not it should be set."""
c = self._cookies
self._cookies_lock.acquire()
try:
if cookie.domain not in c: c[cookie.domain] = {}
c2 = c[cookie.domain]
if cookie.path not in c2: c2[cookie.path] = {}
c3 = c2[cookie.path]
c3[cookie.name] = cookie
finally:
self._cookies_lock.release() | 0.008602 |
def flatten(x):
"""
Flatten an arbitrary depth nested list.
"""
# Lifted from: http://stackoverflow.com/a/406822/263969
result = []
for el in x:
if hasattr(el, "__iter__") and not isinstance(el, basestring):
result.extend(flatten(el))
else:
result.append(el)
return result | 0.002941 |
def symlink_abiext(self, inext, outext):
"""
Create a simbolic link (outext --> inext). The file names are implicitly
given by the ABINIT file extension.
Example:
outdir.symlink_abiext('1WF', 'DDK')
creates the link out_DDK that points to out_1WF
Return: 0 if success.
Raise: RuntimeError
"""
infile = self.has_abiext(inext)
if not infile:
raise RuntimeError('no file with extension %s in %s' % (inext, self))
for i in range(len(infile) - 1, -1, -1):
if infile[i] == '_':
break
else:
raise RuntimeError('Extension %s could not be detected in file %s' % (inext, infile))
outfile = infile[:i] + '_' + outext
if os.path.exists(outfile):
if os.path.islink(outfile):
if os.path.realpath(outfile) == infile:
logger.debug("link %s already exists but it's ok because it points to the correct file" % outfile)
return 0
else:
raise RuntimeError("Expecting link at %s already exists but it does not point to %s" % (outfile, infile))
else:
raise RuntimeError('Expecting link at %s but found file.' % outfile)
os.symlink(infile, outfile)
return 0 | 0.005857 |
def _singlenode_searchlight(l, msk, mysl_rad, bcast_var, extra_params):
"""Run searchlight function on block data in parallel.
`extra_params` contains:
- Searchlight function.
- `Shape` mask.
- Minimum active voxels proportion required to run the searchlight
function.
"""
voxel_fn = extra_params[0]
shape_mask = extra_params[1]
min_active_voxels_proportion = extra_params[2]
outmat = np.empty(msk.shape, dtype=np.object)[mysl_rad:-mysl_rad,
mysl_rad:-mysl_rad,
mysl_rad:-mysl_rad]
for i in range(0, outmat.shape[0]):
for j in range(0, outmat.shape[1]):
for k in range(0, outmat.shape[2]):
if msk[i+mysl_rad, j+mysl_rad, k+mysl_rad]:
searchlight_slice = np.s_[
i:i+2*mysl_rad+1,
j:j+2*mysl_rad+1,
k:k+2*mysl_rad+1]
voxel_fn_mask = msk[searchlight_slice] * shape_mask
if (min_active_voxels_proportion == 0
or np.count_nonzero(voxel_fn_mask) / voxel_fn_mask.size
> min_active_voxels_proportion):
outmat[i, j, k] = voxel_fn(
[ll[searchlight_slice] for ll in l],
msk[searchlight_slice] * shape_mask,
mysl_rad,
bcast_var)
return outmat | 0.001296 |
def plot_compare(self, other_plotter, legend=True):
"""
plot two band structure for comparison. One is in red the other in blue
(no difference in spins). The two band structures need to be defined
on the same symmetry lines! and the distance between symmetry lines is
the one of the band structure used to build the BSPlotter
Args:
another band structure object defined along the same symmetry lines
Returns:
a matplotlib object with both band structures
"""
# TODO: add exception if the band structures are not compatible
import matplotlib.lines as mlines
plt = self.get_plot()
data_orig = self.bs_plot_data()
data = other_plotter.bs_plot_data()
band_linewidth = 1
for i in range(other_plotter._nb_bands):
for d in range(len(data_orig['distances'])):
plt.plot(data_orig['distances'][d],
[e[str(Spin.up)][i] for e in data['energy']][d],
'c-', linewidth=band_linewidth)
if other_plotter._bs.is_spin_polarized:
plt.plot(data_orig['distances'][d],
[e[str(Spin.down)][i] for e in data['energy']][d],
'm--', linewidth=band_linewidth)
if legend:
handles = [mlines.Line2D([], [], linewidth=2,
color='b', label='bs 1 up'),
mlines.Line2D([], [], linewidth=2,
color='r', label='bs 1 down',
linestyle="--"),
mlines.Line2D([], [], linewidth=2,
color='c', label='bs 2 up'),
mlines.Line2D([], [], linewidth=2,
color='m', linestyle="--",
label='bs 2 down')]
plt.legend(handles=handles)
return plt | 0.000984 |
def translate(self, text, dest='en', src='auto'):
"""Translate text from source language to destination language
:param text: The source text(s) to be translated. Batch translation is supported via sequence input.
:type text: UTF-8 :class:`str`; :class:`unicode`; string sequence (list, tuple, iterator, generator)
:param dest: The language to translate the source text into.
The value should be one of the language codes listed in :const:`googletrans.LANGUAGES`
or one of the language names listed in :const:`googletrans.LANGCODES`.
:param dest: :class:`str`; :class:`unicode`
:param src: The language of the source text.
The value should be one of the language codes listed in :const:`googletrans.LANGUAGES`
or one of the language names listed in :const:`googletrans.LANGCODES`.
If a language is not specified,
the system will attempt to identify the source language automatically.
:param src: :class:`str`; :class:`unicode`
:rtype: Translated
:rtype: :class:`list` (when a list is passed)
Basic usage:
>>> from googletrans import Translator
>>> translator = Translator()
>>> translator.translate('안녕하세요.')
<Translated src=ko dest=en text=Good evening. pronunciation=Good evening.>
>>> translator.translate('안녕하세요.', dest='ja')
<Translated src=ko dest=ja text=こんにちは。 pronunciation=Kon'nichiwa.>
>>> translator.translate('veritas lux mea', src='la')
<Translated src=la dest=en text=The truth is my light pronunciation=The truth is my light>
Advanced usage:
>>> translations = translator.translate(['The quick brown fox', 'jumps over', 'the lazy dog'], dest='ko')
>>> for translation in translations:
... print(translation.origin, ' -> ', translation.text)
The quick brown fox -> 빠른 갈색 여우
jumps over -> 이상 점프
the lazy dog -> 게으른 개
"""
dest = dest.lower().split('_', 1)[0]
src = src.lower().split('_', 1)[0]
if src != 'auto' and src not in LANGUAGES:
if src in SPECIAL_CASES:
src = SPECIAL_CASES[src]
elif src in LANGCODES:
src = LANGCODES[src]
else:
raise ValueError('invalid source language')
if dest not in LANGUAGES:
if dest in SPECIAL_CASES:
dest = SPECIAL_CASES[dest]
elif dest in LANGCODES:
dest = LANGCODES[dest]
else:
raise ValueError('invalid destination language')
if isinstance(text, list):
result = []
for item in text:
translated = self.translate(item, dest=dest, src=src)
result.append(translated)
return result
origin = text
data = self._translate(text, dest, src)
# this code will be updated when the format is changed.
translated = ''.join([d[0] if d[0] else '' for d in data[0]])
extra_data = self._parse_extra_data(data)
# actual source language that will be recognized by Google Translator when the
# src passed is equal to auto.
try:
src = data[2]
except Exception: # pragma: nocover
pass
pron = origin
try:
pron = data[0][1][-2]
except Exception: # pragma: nocover
pass
if not PY3 and isinstance(pron, unicode) and isinstance(origin, str): # pragma: nocover
origin = origin.decode('utf-8')
if dest in EXCLUDES and pron == origin:
pron = translated
# for python 2.x compatbillity
if not PY3: # pragma: nocover
if isinstance(src, str):
src = src.decode('utf-8')
if isinstance(dest, str):
dest = dest.decode('utf-8')
if isinstance(translated, str):
translated = translated.decode('utf-8')
# put final values into a new Translated object
result = Translated(src=src, dest=dest, origin=origin,
text=translated, pronunciation=pron, extra_data=extra_data)
return result | 0.0034 |
def start(self):
"""Start the process, essentially forks and calls target function."""
logger.info("starting process")
process = os.fork()
time.sleep(0.01)
if process != 0:
logger.debug('starting child watcher')
self.loop.reset()
self.child_pid = process
self.watcher = pyev.Child(self.child_pid, False, self.loop, self._child)
self.watcher.start()
else:
self.loop.reset()
logger.debug('running main function')
self.run(*self.args, **self.kwargs)
logger.debug('quitting')
sys.exit(0) | 0.007657 |
def decode_cf_timedelta(num_timedeltas, units):
"""Given an array of numeric timedeltas in netCDF format, convert it into a
numpy timedelta64[ns] array.
"""
num_timedeltas = np.asarray(num_timedeltas)
units = _netcdf_to_numpy_timeunit(units)
shape = num_timedeltas.shape
num_timedeltas = num_timedeltas.ravel()
result = pd.to_timedelta(num_timedeltas, unit=units, box=False)
# NaT is returned unboxed with wrong units; this should be fixed in pandas
if result.dtype != 'timedelta64[ns]':
result = result.astype('timedelta64[ns]')
return result.reshape(shape) | 0.001634 |
def get_ordered_idx(id_type, id_list, meta_df):
"""
Gets index values corresponding to ids to subset and orders them.
Input:
- id_type (str): either "id", "idx" or None
- id_list (list): either a list of indexes or id names
Output:
- a sorted list of indexes to subset a dimension by
"""
if meta_df is not None:
if id_type is None:
id_list = range(0, len(list(meta_df.index)))
elif id_type == "id":
lookup = {x: i for (i,x) in enumerate(meta_df.index)}
id_list = [lookup[str(i)] for i in id_list]
return sorted(id_list)
else:
return None | 0.003044 |
def execute(self, conn, migration_url="", migration_input="", create_by="", migration_request_id="", oldest= False, transaction=False):
"""
Lists all requests if pattern is not provided.
"""
sql = self.sql
binds = {}
if migration_request_id:
sql += " WHERE MR.MIGRATION_REQUEST_ID=:migration_request_id"
binds['migration_request_id']=migration_request_id
elif oldest:
#FIXME: Need to write the sql.YG
#current_date = dbsUtils().getTime()
#we require waiting time for
#retry_count=0 is 1 minutes
#retry_count=1 is 2 minutes
#retyr_count=2 is 4 minutes
sql += """
WHERE MR.MIGRATION_STATUS=0
or (MR.migration_status=3 and MR.retry_count=0 and MR.last_modification_date <= :current_date-60)
or (MR.migration_status=3 and MR.retry_count=1 and MR.last_modification_date <= :current_date-120)
or (MR.migration_status=3 and MR.retry_count=2 and MR.last_modification_date <= :current_date-240)
ORDER BY MR.creation_date
"""
binds['current_date'] = dbsUtils().getTime()
#print "time= " + str(binds['current_date'])
else:
if migration_url or migration_input or create_by:
sql += " WHERE "
if migration_url:
sql += " MR.MIGRATION_URL=:migration_url"
binds['migration_url']=migration_url
if migration_input:
if migration_url:
sql += " AND "
op = ("=", "like")["%" in migration_input]
sql += " MR.MIGRATION_INPUT %s :migration_input" % op
binds['migration_input']=migration_input
if create_by:
if migration_url or migration_input:
sql += " AND "
sql += " MR.CREATE_BY=:create_by" %create_by
binds['create_by']=create_by
cursors = self.dbi.processData(sql, binds, conn, transaction, returnCursor=True)
result = []
for c in cursors:
result.extend(self.formatCursor(c, size=100))
return result | 0.028681 |
def _set_secpath_standby(self, v, load=False):
"""
Setter method for secpath_standby, mapped from YANG variable /mpls_config/router/mpls/mpls_cmds_holder/lsp/secondary_path/secpath_standby (empty)
If this variable is read-only (config: false) in the
source YANG file, then _set_secpath_standby is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_secpath_standby() directly.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=YANGBool, is_leaf=True, yang_name="secpath-standby", rest_name="standby", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Make secondary-path hot standby', u'alt-name': u'standby'}}, namespace='urn:brocade.com:mgmt:brocade-mpls', defining_module='brocade-mpls', yang_type='empty', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """secpath_standby must be of a type compatible with empty""",
'defined-type': "empty",
'generated-type': """YANGDynClass(base=YANGBool, is_leaf=True, yang_name="secpath-standby", rest_name="standby", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Make secondary-path hot standby', u'alt-name': u'standby'}}, namespace='urn:brocade.com:mgmt:brocade-mpls', defining_module='brocade-mpls', yang_type='empty', is_config=True)""",
})
self.__secpath_standby = t
if hasattr(self, '_set'):
self._set() | 0.006039 |
def _get_doc_from_filename(filename):
"""Get SBMLDocument from given filename.
Parameters
----------
filename : path to SBML, or SBML string, or filehandle
Returns
-------
libsbml.SBMLDocument
"""
if isinstance(filename, string_types):
if ("win" in platform) and (len(filename) < 260) \
and os.path.exists(filename):
# path (win)
doc = libsbml.readSBMLFromFile(filename) # noqa: E501 type: libsbml.SBMLDocument
elif ("win" not in platform) and os.path.exists(filename):
# path other
doc = libsbml.readSBMLFromFile(filename) # noqa: E501 type: libsbml.SBMLDocument
else:
# string representation
if "<sbml" not in filename:
raise IOError("The file with 'filename' does not exist, "
"or is not an SBML string. Provide the path to "
"an existing SBML file or a valid SBML string "
"representation: \n%s", filename)
doc = libsbml.readSBMLFromString(filename) # noqa: E501 type: libsbml.SBMLDocument
elif hasattr(filename, "read"):
# file handle
doc = libsbml.readSBMLFromString(filename.read()) # noqa: E501 type: libsbml.SBMLDocument
else:
raise CobraSBMLError("Input type '%s' for 'filename' is not supported."
" Provide a path, SBML str, "
"or file handle.", type(filename))
return doc | 0.000644 |
def emulate_mouse(self, key_code, x_val, y_val, data):
"""Emulate the ev codes using the data Windows has given us.
Note that by default in Windows, to recognise a double click,
you just notice two clicks in a row within a reasonablely
short time period.
However, if the application developer sets the application
window's class style to CS_DBLCLKS, the operating system will
notice the four button events (down, up, down, up), intercept
them and then send a single key code instead.
There are no such special double click codes on other
platforms, so not obvious what to do with them. It might be
best to just convert them back to four events.
Currently we do nothing.
((0x0203, 'WM_LBUTTONDBLCLK'),
(0x0206, 'WM_RBUTTONDBLCLK'),
(0x0209, 'WM_MBUTTONDBLCLK'),
(0x020D, 'WM_XBUTTONDBLCLK'))
"""
# Once again ignore Windows' relative time (since system
# startup) and use the absolute time (since epoch i.e. 1st Jan
# 1970).
self.update_timeval()
events = []
if key_code == 0x0200:
# We have a mouse move alone.
# So just pass through to below
pass
elif key_code == 0x020A:
# We have a vertical mouse wheel turn
events.append(self.emulate_wheel(data, 'y', self.timeval))
elif key_code == 0x020E:
# We have a horizontal mouse wheel turn
# https://msdn.microsoft.com/en-us/library/windows/desktop/
# ms645614%28v=vs.85%29.aspx
events.append(self.emulate_wheel(data, 'x', self.timeval))
else:
# We have a button press.
# Distinguish the second extra button
if key_code == 0x020B and data == 2:
key_code = 0x020B2
elif key_code == 0x020C and data == 2:
key_code = 0x020C2
# Get the mouse codes
code, value, scan_code = self.mouse_codes[key_code]
# Add in the press events
scan_event, key_event = self.emulate_press(
code, scan_code, value, self.timeval)
events.append(scan_event)
events.append(key_event)
# Add in the absolute position of the mouse cursor
x_event, y_event = self.emulate_abs(x_val, y_val, self.timeval)
events.append(x_event)
events.append(y_event)
# End with a sync marker
events.append(self.sync_marker(self.timeval))
# We are done
self.write_to_pipe(events) | 0.000759 |
def get(typename):
"""get(typename) -> PrimitiveType or ComplexType
Returns the PrimitiveType or ComplexType for typename or None.
"""
dt = getPDT(typename) or getCDT(typename)
if dt is None:
pdt, nelems = ArrayType.parse(typename)
if pdt and nelems:
dt = ArrayType(pdt, nelems)
return dt | 0.002915 |
def add(self, resource, replace=False):
"""Add just a single resource."""
uri = resource.uri
if (uri in self and not replace):
raise ResourceListDupeError(
"Attempt to add resource already in resource_list")
self[uri] = resource | 0.006944 |
def buy(self, account_id, **params):
"""https://developers.coinbase.com/api/v2#buy-bitcoin"""
if 'amount' not in params and 'total' not in params:
raise ValueError("Missing required parameter: 'amount' or 'total'")
for required in ['currency', 'payment_method']:
if required not in params:
raise ValueError("Missing required parameter: %s" % required)
response = self._post('v2', 'accounts', account_id, 'buys', data=params)
return self._make_api_object(response, Buy) | 0.005474 |
def cells_dn_meta(workbook, sheet, row, col, final_dict):
"""
Traverse all cells in a column moving downward. Primarily created for the metadata sheet, but may use elsewhere.
Check the cell title, and switch it to.
:param obj workbook:
:param str sheet:
:param int row:
:param int col:
:param dict final_dict:
:return: none
"""
logger_excel.info("enter cells_dn_meta")
row_loop = 0
pub_cases = ['id', 'year', 'author', 'journal', 'issue', 'volume', 'title', 'pages',
'reportNumber', 'abstract', 'alternateCitation']
geo_cases = ['latMin', 'lonMin', 'lonMax', 'latMax', 'elevation', 'siteName', 'location']
funding_cases = ["agency", "grant", "principalInvestigator", "country"]
# Temp
pub_qty = 0
geo_temp = {}
general_temp = {}
pub_temp = []
funding_temp = []
temp_sheet = workbook.sheet_by_name(sheet)
# Loop until we hit the max rows in the sheet
while row_loop < temp_sheet.nrows:
try:
# Get cell value
cell = temp_sheet.cell_value(row, col)
# If there is content in the cell...
if cell not in EMPTY:
# Convert title to correct format, and grab the cell data for that row
title_formal = temp_sheet.cell_value(row, col)
title_json = name_to_jsonld(title_formal)
# If we don't have a title for it, then it's not information we want to grab
if title_json:
# Geo
if title_json in geo_cases:
cell_data = cells_rt_meta(workbook, sheet, row, col)
geo_temp = compile_temp(geo_temp, title_json, cell_data)
# Pub
# Create a list of dicts. One for each pub column.
elif title_json in pub_cases:
# Authors seem to be the only consistent field we can rely on to determine number of Pubs.
if title_json == 'author':
cell_data = cells_rt_meta(workbook, sheet, row, col)
pub_qty = len(cell_data)
for i in range(pub_qty):
author_lst = compile_authors(cell_data[i])
pub_temp.append({'author': author_lst, 'pubDataUrl': 'Manually Entered'})
else:
cell_data = cells_rt_meta_pub(workbook, sheet, row, col, pub_qty)
for pub in range(pub_qty):
if title_json == 'id':
pub_temp[pub]['identifier'] = [{"type": "doi", "id": cell_data[pub]}]
else:
pub_temp[pub][title_json] = cell_data[pub]
# Funding
elif title_json in funding_cases:
if title_json == "agency":
funding_temp = compile_fund(workbook, sheet, row, col)
# All other cases do not need fancy structuring
else:
cell_data = cells_rt_meta(workbook, sheet, row, col)
general_temp = compile_temp(general_temp, title_json, cell_data)
except IndexError as e:
logger_excel.debug("cells_dn_datasheets: IndexError: sheet: {}, row: {}, col: {}, {}".format(sheet, row, col, e))
row += 1
row_loop += 1
# Compile the more complicated items
geo = compile_geo(geo_temp)
logger_excel.info("compile metadata dictionary")
# Insert into final dictionary
final_dict['@context'] = "context.jsonld"
final_dict['pub'] = pub_temp
final_dict['funding'] = funding_temp
final_dict['geo'] = geo
# Add remaining general items
for k, v in general_temp.items():
final_dict[k] = v
logger_excel.info("exit cells_dn_meta")
return final_dict | 0.003716 |
def partition_version_classifiers(
classifiers: t.Sequence[str], version_prefix: str = 'Programming Language :: Python :: ',
only_suffix: str = ' :: Only') -> t.Tuple[t.List[str], t.List[str]]:
"""Find version number classifiers in given list and partition them into 2 groups."""
versions_min, versions_only = [], []
for classifier in classifiers:
version = classifier.replace(version_prefix, '')
versions = versions_min
if version.endswith(only_suffix):
version = version.replace(only_suffix, '')
versions = versions_only
try:
versions.append(tuple([int(_) for _ in version.split('.')]))
except ValueError:
pass
return versions_min, versions_only | 0.003911 |
def batch_update(self, statements):
"""Perform a batch of DML statements via an ``ExecuteBatchDml`` request.
:type statements:
Sequence[Union[ str, Tuple[str, Dict[str, Any], Dict[str, Union[dict, .types.Type]]]]]
:param statements:
List of DML statements, with optional params / param types.
If passed, 'params' is a dict mapping names to the values
for parameter replacement. Keys must match the names used in the
corresponding DML statement. If 'params' is passed, 'param_types'
must also be passed, as a dict mapping names to the type of
value passed in 'params'.
:rtype:
Tuple(status, Sequence[int])
:returns:
Status code, plus counts of rows affected by each completed DML
statement. Note that if the staus code is not ``OK``, the
statement triggering the error will not have an entry in the
list, nor will any statements following that one.
"""
parsed = []
for statement in statements:
if isinstance(statement, str):
parsed.append({"sql": statement})
else:
dml, params, param_types = statement
params_pb = self._make_params_pb(params, param_types)
parsed.append(
{"sql": dml, "params": params_pb, "param_types": param_types}
)
database = self._session._database
metadata = _metadata_with_prefix(database.name)
transaction = self._make_txn_selector()
api = database.spanner_api
response = api.execute_batch_dml(
session=self._session.name,
transaction=transaction,
statements=parsed,
seqno=self._execute_sql_count,
metadata=metadata,
)
self._execute_sql_count += 1
row_counts = [
result_set.stats.row_count_exact for result_set in response.result_sets
]
return response.status, row_counts | 0.002875 |
def parse_objective_coefficient(entry):
"""Return objective value for reaction entry.
Detect objectives that are specified using the non-standardized
kinetic law parameters which are used by many pre-FBC SBML models. The
objective coefficient is returned for the given reaction, or None if
undefined.
Args:
entry: :class:`SBMLReactionEntry`.
"""
for parameter in entry.kinetic_law_reaction_parameters:
pid, name, value, units = parameter
if (pid == 'OBJECTIVE_COEFFICIENT' or
name == 'OBJECTIVE_COEFFICIENT'):
return value
return None | 0.0016 |
def augment_reading_list(self, primary_query, augment_query=None, reverse_negate=False):
"""Apply injected logic for slicing reading lists with additional content."""
primary_query = self.validate_query(primary_query)
augment_query = self.get_validated_augment_query(augment_query=augment_query)
try:
# We use this for cases like recent where queries are vague.
if reverse_negate:
primary_query = primary_query.filter(NegateQueryFilter(augment_query))
else:
augment_query = augment_query.filter(NegateQueryFilter(primary_query))
augment_query = randomize_es(augment_query)
return FirstSlotSlicer(primary_query, augment_query)
except TransportError:
return primary_query | 0.0086 |
def find_best_matches(errors, matchers):
"""
Find the best match for each error
We use the Good Enough™ ratio as a watershed level for match scores.
"""
for text_log_error in errors:
matches = find_all_matches(text_log_error, matchers) # TextLogErrorMatch instances, unsaved!
best_match = first(matches, key=lambda m: (-m.score, -m.classified_failure_id))
if not best_match:
continue
newrelic.agent.record_custom_event('highest_scored_matcher', {
'matcher': best_match.matcher_name,
'score': best_match.score,
'text_log_error': best_match.text_log_error_id,
})
yield best_match | 0.004298 |
def _post_resource(self, body):
"""
Create new resources and associated attributes.
Example:
acs.post_resource([
{
"resourceIdentifier": "masaya",
"parents": [],
"attributes": [
{
"issuer": "default",
"name": "country",
"value": "Nicaragua"
}
],
}
])
The issuer is effectively a namespace, and in policy evaluations you
identify an attribute by a specific namespace. Many examples provide
a URL but it could be any arbitrary string.
The body is a list, so many resources can be added at the same time.
"""
assert isinstance(body, (list)), "POST for requires body to be a list"
uri = self._get_resource_uri()
return self.service._post(uri, body) | 0.001974 |
def _get_mstd(cls, df, column, windows):
""" get moving standard deviation
:param df: data
:param column: column to calculate
:param windows: collection of window of moving standard deviation
:return: None
"""
window = cls.get_only_one_positive_int(windows)
column_name = '{}_{}_mstd'.format(column, window)
df[column_name] = df[column].rolling(min_periods=1, window=window,
center=False).std() | 0.003824 |
def maxnorm_regularizer(scale=1.0):
"""Max-norm regularization returns a function that can be used to apply max-norm regularization to weights.
More about max-norm, see `wiki-max norm <https://en.wikipedia.org/wiki/Matrix_norm#Max_norm>`_.
The implementation follows `TensorFlow contrib <https://github.com/tensorflow/tensorflow/blob/master/tensorflow/contrib/layers/python/layers/regularizers.py>`__.
Parameters
----------
scale : float
A scalar multiplier `Tensor`. 0.0 disables the regularizer.
Returns
---------
A function with signature `mn(weights, name=None)` that apply Lo regularization.
Raises
--------
ValueError : If scale is outside of the range [0.0, 1.0] or if scale is not a float.
"""
if isinstance(scale, numbers.Integral):
raise ValueError('scale cannot be an integer: %s' % scale)
if isinstance(scale, numbers.Real):
if scale < 0.:
raise ValueError('Setting a scale less than 0 on a regularizer: %g' % scale)
# if scale >= 1.:
# raise ValueError('Setting a scale greater than 1 on a regularizer: %g' %
# scale)
if scale == 0.:
tl.logging.info('Scale of 0 disables regularizer.')
return lambda _, name=None: None
def mn(weights, name='max_regularizer'):
"""Applies max-norm regularization to weights."""
with tf.name_scope(name) as scope:
my_scale = ops.convert_to_tensor(scale, dtype=weights.dtype.base_dtype, name='scale')
# if tf.__version__ <= '0.12':
# standard_ops_fn = standard_ops.mul
# else:
standard_ops_fn = standard_ops.multiply
return standard_ops_fn(my_scale, standard_ops.reduce_max(standard_ops.abs(weights)), name=scope)
return mn | 0.005397 |
def check_who_am_i(self):
"""
This method checks verifies the device ID.
@return: True if valid, False if not
"""
register = self.MMA8452Q_Register['WHO_AM_I']
self.board.i2c_read_request(self.address, register, 1,
Constants.I2C_READ | Constants.I2C_END_TX_MASK,
self.data_val, Constants.CB_TYPE_DIRECT)
reply = self.wait_for_read_result()
if reply[self.data_start] == self.device_id:
rval = True
else:
rval = False
return rval | 0.00659 |
def fetch_list(cls, client, ids):
"""
fetch instruments by ids
"""
results = []
request_url = "https://api.robinhood.com/options/instruments/"
for _ids in chunked_list(ids, 50):
params = {"ids": ",".join(_ids)}
data = client.get(request_url, params=params)
partial_results = data["results"]
while data["next"]:
data = client.get(data["next"])
partial_results.extend(data["results"])
results.extend(partial_results)
return results | 0.003442 |
def eval_potential_c(pot,R,z):
"""
NAME:
eval_potential_c
PURPOSE:
Use C to evaluate the interpolated potential
INPUT:
pot - Potential or list of such instances
R - array
z - array
OUTPUT:
potential evaluated R and z
HISTORY:
2013-01-24 - Written - Bovy (IAS)
"""
from galpy.orbit.integrateFullOrbit import _parse_pot #here bc otherwise there is an infinite loop
#Parse the potential
npot, pot_type, pot_args= _parse_pot(pot,potforactions=True)
#Set up result arrays
out= numpy.empty((len(R)))
err= ctypes.c_int(0)
#Set up the C code
ndarrayFlags= ('C_CONTIGUOUS','WRITEABLE')
interppotential_calc_potentialFunc= _lib.eval_potential
interppotential_calc_potentialFunc.argtypes= [ctypes.c_int,
ndpointer(dtype=numpy.float64,flags=ndarrayFlags),
ndpointer(dtype=numpy.float64,flags=ndarrayFlags),
ctypes.c_int,
ndpointer(dtype=numpy.int32,flags=ndarrayFlags),
ndpointer(dtype=numpy.float64,flags=ndarrayFlags),
ndpointer(dtype=numpy.float64,flags=ndarrayFlags),
ctypes.POINTER(ctypes.c_int)]
#Array requirements, first store old order
f_cont= [R.flags['F_CONTIGUOUS'],
z.flags['F_CONTIGUOUS']]
R= numpy.require(R,dtype=numpy.float64,requirements=['C','W'])
z= numpy.require(z,dtype=numpy.float64,requirements=['C','W'])
out= numpy.require(out,dtype=numpy.float64,requirements=['C','W'])
#Run the C code
interppotential_calc_potentialFunc(len(R),
R,
z,
ctypes.c_int(npot),
pot_type,
pot_args,
out,
ctypes.byref(err))
#Reset input arrays
if f_cont[0]: R= numpy.asfortranarray(R)
if f_cont[1]: z= numpy.asfortranarray(z)
return (out,err.value) | 0.020504 |
def get_variable(self, name, initializer):
"""
Create and initialize a variable using a numpy array and set trainable.
:param name: (required str) name of the variable
:param initializer: a numpy array or a tensor
"""
v = tf.get_variable(name, shape=initializer.shape,
initializer=(lambda shape, dtype, partition_info:
initializer),
trainable=self.training)
return v | 0.002088 |
def com_google_fonts_check_gasp(ttFont):
"""Is 'gasp' table set to optimize rendering?"""
if "gasp" not in ttFont.keys():
yield FAIL, ("Font is missing the 'gasp' table."
" Try exporting the font with autohinting enabled.")
else:
if not isinstance(ttFont["gasp"].gaspRange, dict):
yield FAIL, "'gasp' table has no values."
else:
failed = False
if 0xFFFF not in ttFont["gasp"].gaspRange:
yield WARN, ("'gasp' table does not have an entry for all"
" font sizes (gaspRange 0xFFFF).")
else:
gasp_meaning = {
0x01: "- Use gridfitting",
0x02: "- Use grayscale rendering",
0x04: "- Use gridfitting with ClearType symmetric smoothing",
0x08: "- Use smoothing along multiple axes with ClearType®"
}
table = []
for key in ttFont["gasp"].gaspRange.keys():
value = ttFont["gasp"].gaspRange[key]
meaning = []
for flag, info in gasp_meaning.items():
if value & flag:
meaning.append(info)
meaning = "\n\t".join(meaning)
table.append(f"PPM <= {key}:\n\tflag = 0x{value:02X}\n\t{meaning}")
table = "\n".join(table)
yield INFO, ("These are the ppm ranges declared on the"
f" gasp table:\n\n{table}\n")
for key in ttFont["gasp"].gaspRange.keys():
if key != 0xFFFF:
yield WARN, ("'gasp' table has a gaspRange of {} that"
" may be unneccessary.").format(key)
failed = True
else:
value = ttFont["gasp"].gaspRange[0xFFFF]
if value != 0x0F:
failed = True
yield WARN, (f"gaspRange 0xFFFF value 0x{value:02X}"
" should be set to 0x0F.")
if not failed:
yield PASS, ("'gasp' table is correctly set, with one "
"gaspRange:value of 0xFFFF:0x0F.") | 0.010553 |
def log_request_data_encode(self, target_system, target_component, id, ofs, count):
'''
Request a chunk of a log
target_system : System ID (uint8_t)
target_component : Component ID (uint8_t)
id : Log id (from LOG_ENTRY reply) (uint16_t)
ofs : Offset into the log (uint32_t)
count : Number of bytes (uint32_t)
'''
return MAVLink_log_request_data_message(target_system, target_component, id, ofs, count) | 0.007899 |
def create(self,
alert_config,
occurrence_frequency_count=None,
occurrence_frequency_unit=None,
alert_frequency_count=None,
alert_frequency_unit=None):
"""
Create a new alert
:param alert_config: A list of AlertConfig classes (Ex:
``[EmailAlertConfig('[email protected]')]``)
:type alert_config: list of
:class:`PagerDutyAlertConfig<logentries_api.alerts.PagerDutyAlertConfig>`,
:class:`WebHookAlertConfig<logentries_api.alerts.WebHookAlertConfig>`,
:class:`EmailAlertConfig<logentries_api.alerts.EmailAlertConfig>`,
:class:`SlackAlertConfig<logentries_api.alerts.SlackAlertConfig>`, or
:class:`HipChatAlertConfig<logentries_api.alerts.HipChatAlertConfig>`
:param occurrence_frequency_count: How many times per
``alert_frequency_unit`` for a match before issuing an alert.
Defaults to 1
:type occurrence_frequency_count: int
:param occurrence_frequency_unit: The time period to monitor for sending
an alert. Must be 'day', or 'hour'. Defaults to 'hour'
:type occurrence_frequency_unit: str
:param alert_frequency_count: How many times per
``alert_frequency_unit`` to issue an alert. Defaults to 1
:type alert_frequency_count: int
:param alert_frequency_unit: How often to regulate sending alerts.
Must be 'day', or 'hour'. Defaults to 'hour'
:type alert_frequency_unit: str
:returns: The response of your post
:rtype: dict
:raises: This will raise a
:class:`ServerException<logentries_api.exceptions.ServerException>`
if there is an error from Logentries
"""
data = {
'rate_count': occurrence_frequency_count or 1,
'rate_range': occurrence_frequency_unit or 'hour',
'limit_count': alert_frequency_count or 1,
'limit_range': alert_frequency_unit or 'hour',
'schedule': [],
'enabled': True,
}
data.update(alert_config.args())
# Yes, it's confusing. the `/actions/` endpoint is used for alerts, while
# the /tags/ endpoint is used for labels.
return self._post(
request=ApiActions.CREATE.value,
uri=ApiUri.ACTIONS.value,
params=data
) | 0.004065 |
def runMultiplePassSPonly(df, model, nMultiplePass, nTrain):
"""
run CLA model SP through data record 0:nTrain nMultiplePass passes
"""
predictedField = model.getInferenceArgs()['predictedField']
print "run TM through the train data multiple times"
for nPass in xrange(nMultiplePass):
for j in xrange(nTrain):
inputRecord = getInputRecord(df, predictedField, j)
model._sensorCompute(inputRecord)
model._spCompute()
if j % 400 == 0:
print " pass %i, record %i" % (nPass, j)
return model | 0.018622 |
def patch(module, external=(), internal=()):
"""
Temporarily monkey-patch dependencies which can be external to, or internal
to the supplied module.
:param module: Module object
:param external: External dependencies to patch (full paths as strings)
:param internal: Internal dependencies to patch (short names as strings)
:return:
"""
external = tuple(external)
internal = tuple(internal)
def decorator(fn):
@wraps(fn)
def wrapper(*args, **kwargs):
# The master mock is used to contain all of the sub-mocks. It is a
# useful container and can also be used to determine the order of
# calls to all sub-mocks.
master_mock = mock.MagicMock()
def get_mock(name):
return getattr(master_mock, __patch_name(name))
def patch_external(name):
return mock.patch(name, get_mock(name))
def patch_internal(name):
return mock.patch(module.__name__ + '.' + name, get_mock(name))
try:
with __nested(patch_external(n) for n in external):
if external:
# Reload the module to ensure that patched external
# dependencies are accounted for.
reload_module(module)
# Patch objects in the module itself.
with __nested(patch_internal(n) for n in internal):
return fn(master_mock, *args, **kwargs)
finally:
if external:
# When all patches have been discarded, reload the module
# to bring it back to its original state (except for all of
# the references which have been reassigned).
reload_module(module)
return wrapper
return decorator | 0.000522 |
def put_sync(self, **kwargs):
'''
PUT: puts data into the Firebase.
Requires the 'point' parameter as a keyworded argument.
'''
self.amust(("point", "data"), kwargs)
response = requests.put(self.url_correct(kwargs["point"],
kwargs.get("auth", self.__auth)),
data=json.dumps(kwargs["data"]))
self.catch_error(response)
return response.content | 0.004255 |
def logical_and(f1, f2): # function factory
'''Logical and from functions.
Parameters
----------
f1, f2 : function
Function that takes array and returns true or false for each item in array.
Returns
-------
Function.
Usage:
filter_func=logical_and(is_data_record, is_data_from_channel(4)) # new filter function
filter_func(array) # array that has Data Records from channel 4
'''
def f(value):
return np.logical_and(f1(value), f2(value))
f.__name__ = "(" + f1.__name__ + "_and_" + f2.__name__ + ")"
return f | 0.004983 |
def get_specific_subnodes(self, node, name, recursive=0):
"""Given a node and a name, return a list of child `ELEMENT_NODEs`, that
have a `tagName` matching the `name`. Search recursively for `recursive`
levels.
"""
children = [x for x in node.childNodes if x.nodeType == x.ELEMENT_NODE]
ret = [x for x in children if x.tagName == name]
if recursive > 0:
for x in children:
ret.extend(self.get_specific_subnodes(x, name, recursive-1))
return ret | 0.007449 |
def new_schedule(self, program, station, time, duration, new, stereo,
subtitled, hdtv, closeCaptioned, ei, tvRating, dolby,
partNumber, partTotal):
"""Callback run for each new schedule entry"""
if self.__v_schedule:
# [Schedule: EP012964250031, 70387, 2013-01-16 21:00:00.00, 30, False, True, False, False, True, False, TV-PG, None, None, None]
print("[Schedule: %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, "
"%s, %s]" % (program, station, time, duration, new, stereo,
subtitled, hdtv, closeCaptioned, ei, tvRating, dolby,
partNumber, partTotal)) | 0.014451 |
def list_targets(self):
"""Returns a list of association targets of instance VIFs.
Each association target is represented as FloatingIpTarget object.
FloatingIpTarget is a APIResourceWrapper/APIDictWrapper and
'id' and 'name' attributes must be defined in each object.
FloatingIpTarget.id can be passed as port_id in associate().
FloatingIpTarget.name is displayed in Floating Ip Association Form.
"""
tenant_id = self.request.user.tenant_id
ports = port_list(self.request, tenant_id=tenant_id)
servers, has_more = nova.server_list(self.request, detailed=False)
server_dict = collections.OrderedDict(
[(s.id, s.name) for s in servers])
reachable_subnets = self._get_reachable_subnets(ports)
targets = []
for p in ports:
# Remove network ports from Floating IP targets
if p.device_owner.startswith('network:'):
continue
server_name = server_dict.get(p.device_id)
for ip in p.fixed_ips:
if ip['subnet_id'] not in reachable_subnets:
continue
# Floating IPs can only target IPv4 addresses.
if netaddr.IPAddress(ip['ip_address']).version != 4:
continue
targets.append(FloatingIpTarget(p, ip['ip_address'],
server_name))
return targets | 0.001351 |
def native(name, ret, interp=None, send_interp=False):
"""Used as a decorator to add the decorated function to the
pfp interpreter so that it can be used from within scripts.
:param str name: The name of the function as it will be exposed in template scripts.
:param pfp.fields.Field ret: The return type of the function (a class)
:param pfp.interp.PfpInterp interp: The specific interpreter to add the function to
:param bool send_interp: If the current interpreter should be passed to the function.
Examples:
The example below defines a ``Sum`` function that will return the sum of
all parameters passed to the function: ::
from pfp.fields import PYVAL
@native(name="Sum", ret=pfp.fields.Int64)
def sum_numbers(params, ctxt, scope, stream, coord):
res = 0
for param in params:
res += PYVAL(param)
return res
The code below is the code for the :any:`Int3 <pfp.native.dbg.int3>` function. Notice that it
requires that the interpreter be sent as a parameter: ::
@native(name="Int3", ret=pfp.fields.Void, send_interp=True)
def int3(params, ctxt, scope, stream, coord, interp):
if interp._no_debug:
return
if interp._int3:
interp.debugger = PfpDbg(interp)
interp.debugger.cmdloop()
"""
def native_decorator(func):
@functools.wraps(func)
def native_wrapper(*args, **kwargs):
return func(*args, **kwargs)
pfp.interp.PfpInterp.add_native(name, func, ret, interp=interp, send_interp=send_interp)
return native_wrapper
return native_decorator | 0.004477 |
def posterior(self, x, sigma=1.):
"""Model is X_1,...,X_n ~ N(theta, sigma^2), theta~self, sigma fixed"""
pr0 = 1. / self.scale**2 # prior precision
prd = x.size / sigma**2 # data precision
varp = 1. / (pr0 + prd) # posterior variance
mu = varp * (pr0 * self.loc + prd * x.mean())
return Normal(loc=mu, scale=np.sqrt(varp)) | 0.005348 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.