text
stringlengths 78
104k
| score
float64 0
0.18
|
---|---|
def set_image(self, **kwargs):
"""
set image of embed
:keyword url: source url of image (only supports http(s) and attachments)
:keyword proxy_url: a proxied url of the image
:keyword height: height of image
:keyword width: width of image
"""
self.image = {
'url': kwargs.get('url'),
'proxy_url': kwargs.get('proxy_url'),
'height': kwargs.get('height'),
'width': kwargs.get('width'),
} | 0.005941 |
def get_identifier(self):
"""Return identifier which is either the machine file name or sha256 checksum of data."""
if self._path:
return os.path.basename(self._path)
else:
return hashlib.sha256(hashlib.sha256(repr(self._data).encode())).hexdigest() | 0.013468 |
def create_package(package, author, email, description, create_example):
"""Creates a Canari transform package skeleton."""
from canari.commands.create_package import create_package
create_package(package, author, email, description, create_example) | 0.003831 |
def matches(self, pattern):
"""Asserts that val is string and matches regex pattern."""
if not isinstance(self.val, str_types):
raise TypeError('val is not a string')
if not isinstance(pattern, str_types):
raise TypeError('given pattern arg must be a string')
if len(pattern) == 0:
raise ValueError('given pattern arg must not be empty')
if re.search(pattern, self.val) is None:
self._err('Expected <%s> to match pattern <%s>, but did not.' % (self.val, pattern))
return self | 0.005254 |
def validate(self):
"""
if schema exists we run shape file validation code of fiona by trying to save to in MemoryFile
"""
if self._schema is not None:
with MemoryFile() as memfile:
with memfile.open(driver="ESRI Shapefile", schema=self.schema) as target:
for _item in self._results:
# getting rid of the assets that don't behave well becasue of in memroy rasters
item = GeoFeature(_item.geometry, _item.properties)
target.write(item.to_record(item.crs)) | 0.008251 |
def add_l2_normalize(self, name, input_name, output_name, epsilon = 1e-5):
"""
Add L2 normalize layer. Normalizes the input by the L2 norm, i.e. divides by the
the square root of the sum of squares of all elements of the input along C, H and W dimensions.
Parameters
----------
name: str
The name of this layer.
input_name: str
The input blob name of this layer.
output_name: str
The output blob name of this layer.
epsilon: float
small bias to avoid division by zero.
See Also
--------
add_mvn, add_lrn
"""
spec = self.spec
nn_spec = self.nn_spec
# Add a new layer
spec_layer = nn_spec.layers.add()
spec_layer.name = name
spec_layer.input.append(input_name)
spec_layer.output.append(output_name)
spec_layer_params = spec_layer.l2normalize
spec_layer_params.epsilon = epsilon | 0.005958 |
def update_state_active(self):
"""Update the state of the model run to active.
Raises an exception if update fails or resource is unknown.
Returns
-------
ModelRunHandle
Refreshed run handle.
"""
# Update state to active
self.update_state(self.links[REF_UPDATE_STATE_ACTIVE], {'type' : RUN_ACTIVE})
# Returned refreshed verion of the handle
return self.refresh() | 0.008772 |
def eliminate_local_candidates(x, AggOp, A, T, Ca=1.0, **kwargs):
"""Eliminate canidates locally.
Helper function that determines where to eliminate candidates locally
on a per aggregate basis.
Parameters
---------
x : array
n x 1 vector of new candidate
AggOp : CSR or CSC sparse matrix
Aggregation operator for the level that x was generated for
A : sparse matrix
Operator for the level that x was generated for
T : sparse matrix
Tentative prolongation operator for the level that x was generated for
Ca : scalar
Constant threshold parameter to decide when to drop candidates
Returns
-------
Nothing, x is modified in place
"""
if not (isspmatrix_csr(AggOp) or isspmatrix_csc(AggOp)):
raise TypeError('AggOp must be a CSR or CSC matrix')
else:
AggOp = AggOp.tocsc()
ndof = max(x.shape)
nPDEs = int(ndof/AggOp.shape[0])
def aggregate_wise_inner_product(z, AggOp, nPDEs, ndof):
"""Inner products per aggregate.
Helper function that calculates <z, z>_i, i.e., the
inner product of z only over aggregate i
Returns a vector of length num_aggregates where entry i is <z, z>_i
"""
z = np.ravel(z)*np.ravel(z)
innerp = np.zeros((1, AggOp.shape[1]), dtype=z.dtype)
for j in range(nPDEs):
innerp += z[slice(j, ndof, nPDEs)].reshape(1, -1) * AggOp
return innerp.reshape(-1, 1)
def get_aggregate_weights(AggOp, A, z, nPDEs, ndof):
"""Weights per aggregate.
Calculate local aggregate quantities
Return a vector of length num_aggregates where entry i is
(card(agg_i)/A.shape[0]) ( <Az, z>/rho(A) )
"""
rho = approximate_spectral_radius(A)
zAz = np.dot(z.reshape(1, -1), A*z.reshape(-1, 1))
card = nPDEs*(AggOp.indptr[1:]-AggOp.indptr[:-1])
weights = (np.ravel(card)*zAz)/(A.shape[0]*rho)
return weights.reshape(-1, 1)
# Run test 1, which finds where x is small relative to its energy
weights = Ca*get_aggregate_weights(AggOp, A, x, nPDEs, ndof)
mask1 = aggregate_wise_inner_product(x, AggOp, nPDEs, ndof) <= weights
# Run test 2, which finds where x is already approximated
# accurately by the existing T
projected_x = x - T*(T.T*x)
mask2 = aggregate_wise_inner_product(projected_x,
AggOp, nPDEs, ndof) <= weights
# Combine masks and zero out corresponding aggregates in x
mask = np.ravel(mask1 + mask2).nonzero()[0]
if mask.shape[0] > 0:
mask = nPDEs*AggOp[:, mask].indices
for j in range(nPDEs):
x[mask+j] = 0.0 | 0.000366 |
def configureIAMCredentials(self, AWSAccessKeyID, AWSSecretAccessKey, AWSSessionToken=""):
"""
**Description**
Used to configure/update the custom IAM credentials for Websocket SigV4 connection to
AWS IoT. Should be called before connect.
**Syntax**
.. code:: python
myAWSIoTMQTTClient.configureIAMCredentials(obtainedAccessKeyID, obtainedSecretAccessKey, obtainedSessionToken)
.. note::
Hard-coding credentials into custom script is NOT recommended. Please use AWS Cognito identity service
or other credential provider.
**Parameters**
*AWSAccessKeyID* - AWS Access Key Id from user-specific IAM credentials.
*AWSSecretAccessKey* - AWS Secret Access Key from user-specific IAM credentials.
*AWSSessionToken* - AWS Session Token for temporary authentication from STS.
**Returns**
None
"""
iam_credentials_provider = IAMCredentialsProvider()
iam_credentials_provider.set_access_key_id(AWSAccessKeyID)
iam_credentials_provider.set_secret_access_key(AWSSecretAccessKey)
iam_credentials_provider.set_session_token(AWSSessionToken)
self._mqtt_core.configure_iam_credentials(iam_credentials_provider) | 0.007758 |
def __update_info(self):
"""Updates "visualization options" and "file info" areas."""
from f311 import explorer as ex
import f311
t = self.tableWidget
z = self.listWidgetVis
z.clear()
classes = self.__vis_classes = []
propss = self.__lock_get_current_propss()
npp = len(propss)
s0, s1 = "", ""
if npp == 1:
p = propss[0]
# Visualization options
if p.flag_scanned:
if isinstance(p.f, f311.DataFile):
classes.extend(f311.get_suitable_vis_classes(p.f))
if ex.VisPrint in classes:
classes.remove(ex.VisPrint)
if p.flag_text:
# This is an exception, since "txt" is not a Vis descendant.
# This will be properly handled in __visualize()
classes.append("txt")
for x in classes:
if x == "txt":
text = "View plain text"
else:
text = x.action
text += " ("+x.__name__+")"
item = QListWidgetItem(text)
z.addItem(item)
# File info
s0 = p.get_summary()
s1 = p.get_info()
elif npp >= 2:
s0 = "{0:d} selected".format(npp)
ff = [p.f for p in propss]
flag_spectra = all([isinstance(f, f311.FileSpectrum) for f in ff])
# gambiarra to visualize several PFANT .mod files
has_pyfant = False
try:
import pyfant
has_pyfant = True
except:
pass
flag_mod = False
if has_pyfant:
flag_mod = all([isinstance(f, pyfant.FileModBin) and len(f.records) > 1 for f in ff])
if flag_spectra:
z.addItem(QListWidgetItem("Plot spectra stacked"))
classes.append("sta")
z.addItem(QListWidgetItem("Plot spectra overlapped"))
classes.append("ovl")
elif flag_mod:
# TODO plugin-based way to handle visualization of multiple selection
z.addItem(QListWidgetItem("View model grid"))
classes.append("modgrid")
# File info
self.labelSummary.setText(s0)
self.textEditInfo.setPlainText(s1) | 0.002815 |
def main():
"""This program prints doubled values!"""
import numpy
X=arange(.1,10.1,.2) #make a list of numbers
Y=myfunc(X) # calls myfunc with argument X
for i in range(len(X)):
print(X[i],Y[i]) | 0.040359 |
def _get_calculated_status(self):
"""Get the calculated status of the page based on
:attr:`Page.publication_date`,
:attr:`Page.publication_end_date`,
and :attr:`Page.status`."""
if settings.PAGE_SHOW_START_DATE and self.publication_date:
if self.publication_date > get_now():
return self.DRAFT
if settings.PAGE_SHOW_END_DATE and self.publication_end_date:
if self.publication_end_date < get_now():
return self.EXPIRED
return self.status | 0.003636 |
def probability_density(self, X):
"""Compute density function for given copula family.
Args:
X: `np.ndarray`
Returns:
np.array: probability density
"""
self.check_fit()
U, V = self.split_matrix(X)
if self.theta == 0:
return np.multiply(U, V)
else:
num = np.multiply(np.multiply(-self.theta, self._g(1)), 1 + self._g(np.add(U, V)))
aux = np.multiply(self._g(U), self._g(V)) + self._g(1)
den = np.power(aux, 2)
return num / den | 0.00519 |
def prob_imf(m1, m2, s1z, s2z, **kwargs):
''' Return probability density for power-law
Parameters
----------
m1: array
Component masses 1
m2: array
Component masses 2
s1z: array
Aligned spin 1(Not in use currently)
s2z:
Aligned spin 2(Not in use currently)
**kwargs: string
Keyword arguments as model parameters
Returns
-------
p_m1_m2: array
the probability density for m1, m2 pair
'''
min_mass = kwargs.get('min_mass', 5.)
max_mass = kwargs.get('max_mass', 95.)
alpha = kwargs.get('alpha', -2.35)
max_mtotal = min_mass + max_mass
m1, m2 = np.array(m1), np.array(m2)
C_imf = max_mass**(alpha + 1)/(alpha + 1)
C_imf -= min_mass**(alpha + 1)/(alpha + 1)
xx = np.minimum(m1, m2)
m1 = np.maximum(m1, m2)
m2 = xx
bound = np.sign(max_mtotal - m1 - m2)
bound += np.sign(max_mass - m1) * np.sign(m2 - min_mass)
idx = np.where(bound != 2)
p_m1_m2 = np.zeros_like(m1)
idx = np.where(m1 <= max_mtotal/2.)
p_m1_m2[idx] = (1./C_imf) * m1[idx]**alpha /(m1[idx] - min_mass)
idx = np.where(m1 > max_mtotal/2.)
p_m1_m2[idx] = (1./C_imf) * m1[idx]**alpha /(max_mass - m1[idx])
p_m1_m2[idx] = 0
return p_m1_m2/2. | 0.002239 |
def resolve_primary_keys_in_schema(sql_tokens: List[str],
schema: Dict[str, List[TableColumn]]) -> List[str]:
"""
Some examples in the text2sql datasets use ID as a column reference to the
column of a table which has a primary key. This causes problems if you are trying
to constrain a grammar to only produce the column names directly, because you don't
know what ID refers to. So instead of dealing with that, we just replace it.
"""
primary_keys_for_tables = {name: max(columns, key=lambda x: x.is_primary_key).name
for name, columns in schema.items()}
resolved_tokens = []
for i, token in enumerate(sql_tokens):
if i > 2:
table_name = sql_tokens[i - 2]
if token == "ID" and table_name in primary_keys_for_tables.keys():
token = primary_keys_for_tables[table_name]
resolved_tokens.append(token)
return resolved_tokens | 0.00611 |
def transform(self, Y):
"""Transform input data `Y` to reduced data space defined by `self.data`
Takes data in the same ambient space as `self.data` and transforms it
to be in the same reduced space as `self.data_nu`.
Parameters
----------
Y : array-like, shape=[n_samples_y, n_features]
n_features must be the same as `self.data`.
Returns
-------
Transformed data, shape=[n_samples_y, n_pca]
Raises
------
ValueError : if Y.shape[1] != self.data.shape[1]
"""
try:
# try PCA first
return self.data_pca.transform(Y)
except AttributeError: # no pca, try to return data
try:
if Y.shape[1] != self.data.shape[1]:
# shape is wrong
raise ValueError
return Y
except IndexError:
# len(Y.shape) < 2
raise ValueError
except ValueError:
# more informative error
raise ValueError("data of shape {} cannot be transformed"
" to graph built on data of shape {}".format(
Y.shape, self.data.shape)) | 0.002372 |
def load_smc_file(cls, filename):
"""Read an SMC formatted time series.
Format of the time series is provided by:
https://escweb.wr.usgs.gov/nsmp-data/smcfmt.html
Parameters
----------
filename: str
Filename to open.
"""
from .tools import parse_fixed_width
with open(filename) as fp:
lines = list(fp)
# 11 lines of strings
lines_str = [lines.pop(0) for _ in range(11)]
if lines_str[0].strip() != '2 CORRECTED ACCELEROGRAM':
raise RuntimeWarning("Loading uncorrected SMC file.")
m = re.search('station =(.+)component=(.+)', lines_str[5])
description = '; '.join([g.strip() for g in m.groups()])
# 6 lines of (8i10) formatted integers
values_int = parse_fixed_width(48 * [(10, int)],
[lines.pop(0) for _ in range(6)])
count_comment = values_int[15]
count = values_int[16]
# 10 lines of (5e15.7) formatted floats
values_float = parse_fixed_width(50 * [(15, float)],
[lines.pop(0) for _ in range(10)])
time_step = 1 / values_float[1]
# Skip comments
lines = lines[count_comment:]
accels = np.array(parse_fixed_width(count * [
(10, float),
], lines))
return TimeSeriesMotion(filename, description, time_step, accels) | 0.001366 |
def close(self):
"""Closes the record writer."""
if self._writer is not None:
self.flush()
self._writer.close()
self._writer = None | 0.010929 |
def execute_async(self, operation, parameters=None, configuration=None):
"""Asynchronously execute a SQL query.
Immediately returns after query is sent to the HS2 server. Poll with
`is_executing`. A call to `fetch*` will block.
Parameters
----------
operation : str
The SQL query to execute.
parameters : str, optional
Parameters to be bound to variables in the SQL query, if any.
Impyla supports all DB API `paramstyle`s, including `qmark`,
`numeric`, `named`, `format`, `pyformat`.
configuration : dict of str keys and values, optional
Configuration overlay for this query.
Returns
-------
NoneType
Results are available through a call to `fetch*`.
"""
log.debug('Executing query %s', operation)
def op():
if parameters:
self._last_operation_string = _bind_parameters(operation,
parameters)
else:
self._last_operation_string = operation
op = self.session.execute(self._last_operation_string,
configuration,
run_async=True)
self._last_operation = op
self._execute_async(op) | 0.001437 |
def _translate(unistr, table):
'''Replace characters using a table.'''
if type(unistr) is str:
try:
unistr = unistr.decode('utf-8')
# Python 3 returns AttributeError when .decode() is called on a str
# This means it is already unicode.
except AttributeError:
pass
try:
if type(unistr) is not unicode:
return unistr
# Python 3 returns NameError because unicode is not a type.
except NameError:
pass
chars = []
for c in unistr:
replacement = table.get(c)
chars.append(replacement if replacement else c)
return u''.join(chars) | 0.001524 |
def get_variant_by_id(cls, variant_id, **kwargs):
"""Find Variant
Return single instance of Variant by its ID.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.get_variant_by_id(variant_id, async=True)
>>> result = thread.get()
:param async bool
:param str variant_id: ID of variant to return (required)
:return: Variant
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async'):
return cls._get_variant_by_id_with_http_info(variant_id, **kwargs)
else:
(data) = cls._get_variant_by_id_with_http_info(variant_id, **kwargs)
return data | 0.00344 |
def init(self):
'''
Initialize the device.
Parameters of visa.ResourceManager().open_resource()
'''
super(Visa, self).init()
backend = self._init.get('backend', '') # Empty string means std. backend (NI VISA)
rm = visa.ResourceManager(backend)
try:
logger.info('BASIL VISA TL with %s backend found the following devices: %s', backend, ", ".join(rm.list_resources()))
except NotImplementedError: # some backends do not always implement the list_resources function
logger.info('BASIL VISA TL with %s backend', backend)
self._resource = rm.open_resource(**{key: value for key, value in self._init.items() if key not in ("backend",)}) | 0.008152 |
def unique(text):
"""
Return an unique text
@type text: str
@param text: Text written used spin syntax.
@return: An unique text
# Generate an unique sentence
>>> unique('The {quick|fast} {brown|gray|red} fox jumped over the lazy dog.')
'The quick red fox jumped over the lazy dog'
"""
# check if the text is correct
correct, error = _is_correct(text)
if not correct:
raise Exception(error)
s = []
_all_unique_texts(text, s)
return s[0] | 0.003929 |
def get_cache_key(self, name, filename=None):
"""Returns the unique hash key for this template name."""
hash = sha1(name.encode('utf-8'))
if filename is not None:
filename = '|' + filename
if isinstance(filename, text_type):
filename = filename.encode('utf-8')
hash.update(filename)
return hash.hexdigest() | 0.005128 |
def trees(return_X_y=True):
"""cherry trees dataset
Parameters
----------
return_X_y : bool,
if True, returns a model-ready tuple of data (X, y)
otherwise, returns a Pandas DataFrame
Returns
-------
model-ready tuple of data (X, y)
OR
Pandas DataFrame
Notes
-----
X contains the girth and the height of each tree.
y contains the volume.
Source:
https://vincentarelbundock.github.io/Rdatasets/doc/datasets/trees.html
"""
# y is real.
# recommend InvGaussGAM, or GAM(distribution='gamma', link='log')
trees = pd.read_csv(PATH + '/trees.csv', index_col=0)
if return_X_y:
y = trees.Volume.values
X = trees[['Girth', 'Height']].values
return _clean_X_y(X, y)
return trees | 0.001253 |
def reorder_brute(p_atoms, q_atoms, p_coord, q_coord):
"""
Re-orders the input atom list and xyz coordinates using all permutation of
rows (using optimized column results)
Parameters
----------
p_atoms : array
(N,1) matrix, where N is points holding the atoms' names
q_atoms : array
(N,1) matrix, where N is points holding the atoms' names
p_coord : array
(N,D) matrix, where N is points and D is dimension
q_coord : array
(N,D) matrix, where N is points and D is dimension
Returns
-------
view_reorder : array
(N,1) matrix, reordered indexes of atom alignment based on the
coordinates of the atoms
"""
# Find unique atoms
unique_atoms = np.unique(p_atoms)
# generate full view from q shape to fill in atom view on the fly
view_reorder = np.zeros(q_atoms.shape, dtype=int)
view_reorder -= 1
for atom in unique_atoms:
p_atom_idx, = np.where(p_atoms == atom)
q_atom_idx, = np.where(q_atoms == atom)
A_coord = p_coord[p_atom_idx]
B_coord = q_coord[q_atom_idx]
view = brute_permutation(A_coord, B_coord)
view_reorder[p_atom_idx] = q_atom_idx[view]
return view_reorder | 0.000801 |
def _run_internal(self,
context,
pipeline_key,
root_pipeline_key,
caller_output):
"""Used by the Pipeline evaluator to execute this Pipeline."""
self._set_values_internal(
context, pipeline_key, root_pipeline_key, caller_output,
_PipelineRecord.RUN)
logging.debug('Running %s(*%s, **%s)#%s',
self._class_path, _short_repr(self.args),
_short_repr(self.kwargs), self._pipeline_key.name())
return self.run(*self.args, **self.kwargs) | 0.008666 |
def isSelectionPositionValid(self, selPos: tuple):
"""
Return **True** if the start- and end position denote valid
positions within the document.
|Args|
* ``selPos`` (**tuple**): tuple with four integers.
|Returns|
**bool**: **True** if the positions are valid; **False** otherwise.
|Raises|
* **None**
"""
if selPos is None:
return False
if len(selPos) != 4:
return False
check1 = self.isPositionValid(*selPos[:2])
check2 = self.isPositionValid(*selPos[2:])
if check1 and check2:
return True
else:
return False | 0.002886 |
def arch_size(self):
"""Return the architecure size in bits."""
if not self._ptr:
raise BfdException("BFD not initialized")
try:
return _bfd.get_arch_size(self._ptr)
except Exception, err:
raise BfdException("Unable to determine architeure size.") | 0.006329 |
def _in_version(self, *versions):
"Returns true if this frame is in any of the specified versions of ID3."
for version in versions:
if (self._version == version
or (isinstance(self._version, collections.Container)
and version in self._version)):
return True
return False | 0.011142 |
def writeln (self, s=u"", **args):
"""
Write string to output descriptor plus a newline.
"""
self.write(u"%s%s" % (s, unicode(os.linesep)), **args) | 0.01676 |
def compare_basis_against_file(basis_name,
src_filepath,
file_type=None,
version=None,
uncontract_general=False,
data_dir=None):
'''Compare a basis set in the BSE against a reference file'''
src_data = read_formatted_basis(src_filepath, file_type)
bse_data = get_basis(basis_name, version=version, data_dir=data_dir)
return basis_comparison_report(src_data, bse_data, uncontract_general=uncontract_general) | 0.00346 |
def deserialize_unicode(data):
"""Preserve unicode objects in Python 2, otherwise return data
as a string.
:param str data: response string to be deserialized.
:rtype: str or unicode
"""
# We might be here because we have an enum modeled as string,
# and we try to deserialize a partial dict with enum inside
if isinstance(data, Enum):
return data
# Consider this is real string
try:
if isinstance(data, unicode):
return data
except NameError:
return str(data)
else:
return str(data) | 0.003101 |
def _get_model_name(self, modeldata):
"""
Extract the model name from the ``modeldata`` that *django-admin-tools* provides.
"""
if 'change_url' in modeldata:
return modeldata['change_url'].strip('/').split('/')[-1] # /foo/admin/appname/modelname
elif 'add_url' in modeldata:
return modeldata['add_url'].strip('/').split('/')[-2] # /foo/admin/appname/modelname/add
else:
raise ValueError("Missing attributes in modeldata to find the model name!") | 0.011194 |
def select(self, filter_by=None):
"""
Parameters
----------
filter_by: callable, default None
Callable must take one argument (a record of queryset), and return True to keep record, or False to skip it.
Example : .select(lambda x: x.name == "my_name").
If None, records are not filtered.
Returns
-------
Queryset instance, containing all selected records.
"""
iterator = self._records if filter_by is None else filter(filter_by, self._records)
return Queryset(self._table, iterator) | 0.006678 |
def z_axis_transform(compound, new_origin=None,
point_on_z_axis=None,
point_on_zx_plane=None):
"""Move a compound such that the z-axis lies on specified points.
Parameters
----------
compound : mb.Compound
The compound to move.
new_origin : mb.Compound or list-like of size 3, optional, default=[0.0, 0.0, 0.0]
Where to place the new origin of the coordinate system.
point_on_z_axis : mb.Compound or list-like of size 3, optional, default=[0.0, 0.0, 1.0]
A point on the new z-axis.
point_on_zx_plane : mb.Compound or list-like of size 3, optional, default=[0.0, 0.0, 1.0]
A point on the new xz-plane.
"""
x_axis_transform(compound, new_origin=new_origin,
point_on_x_axis=point_on_z_axis,
point_on_xy_plane=point_on_zx_plane)
rotate_around_y(compound, np.pi * 3 / 2) | 0.004334 |
def divide_work(list_of_indexes, batch_size):
"""
Given a sequential list of indexes split them into num_parts.
:param list_of_indexes: [int] list of indexes to be divided up
:param batch_size: number of items to put in batch(not exact obviously)
:return: [(int,int)] list of (index, num_items) to be processed
"""
grouped_indexes = [list_of_indexes[i:i + batch_size] for i in range(0, len(list_of_indexes), batch_size)]
return [(batch[0], len(batch)) for batch in grouped_indexes] | 0.005505 |
def set_start_date(self, date):
"""Sets the start date.
arg: date (osid.calendaring.DateTime): the new date
raise: InvalidArgument - ``date`` is invalid
raise: NoAccess - ``Metadata.isReadOnly()`` is ``true``
raise: NullArgument - ``date`` is ``null``
*compliance: mandatory -- This method must be implemented.*
"""
if self.get_start_date_metadata().is_read_only():
raise errors.NoAccess()
if not self._is_valid_date_time(date, self.get_start_date_metadata()):
raise errors.InvalidArgument()
# self._my_map['startDate'] = self._get_date_map(date)
self._my_map['startDate'] = date | 0.002861 |
def svs_description_metadata(description):
"""Return metatata from Aperio image description as dict.
The Aperio image description format is unspecified. Expect failures.
>>> svs_description_metadata('Aperio Image Library v1.0')
{'Aperio Image Library': 'v1.0'}
"""
if not description.startswith('Aperio Image Library '):
raise ValueError('invalid Aperio image description')
result = {}
lines = description.split('\n')
key, value = lines[0].strip().rsplit(None, 1) # 'Aperio Image Library'
result[key.strip()] = value.strip()
if len(lines) == 1:
return result
items = lines[1].split('|')
result[''] = items[0].strip() # TODO: parse this?
for item in items[1:]:
key, value = item.split(' = ')
result[key.strip()] = astype(value.strip())
return result | 0.001183 |
def plot_diff(self, graphing_library='matplotlib'):
"""
Generate CDF diff plots of the submetrics
"""
diff_datasource = sorted(set(self.reports[0].datasource) & set(self.reports[1].datasource))
graphed = False
for submetric in diff_datasource:
baseline_csv = naarad.utils.get_default_csv(self.reports[0].local_location, (submetric + '.percentiles'))
current_csv = naarad.utils.get_default_csv(self.reports[1].local_location, (submetric + '.percentiles'))
if (not (naarad.utils.is_valid_file(baseline_csv) & naarad.utils.is_valid_file(current_csv))):
continue
baseline_plot = PD(input_csv=baseline_csv, csv_column=1, series_name=submetric, y_label=submetric, precision=None, graph_height=600, graph_width=1200,
graph_type='line', plot_label='baseline', x_label='Percentiles')
current_plot = PD(input_csv=current_csv, csv_column=1, series_name=submetric, y_label=submetric, precision=None, graph_height=600, graph_width=1200,
graph_type='line', plot_label='current', x_label='Percentiles')
graphed, div_file = Diff.graphing_modules[graphing_library].graph_data_on_the_same_graph([baseline_plot, current_plot],
os.path.join(self.output_directory, self.resource_path),
self.resource_path, (submetric + '.diff'))
if graphed:
self.plot_files.append(div_file)
return True | 0.011957 |
def _get_Berger_data(verbose=True):
'''Read in the Berger and Loutre orbital table as a pandas dataframe, convert to xarray
'''
# The first column of the data file is used as the row index, and represents kyr from present
orbit91_pd, path = load_data_source(local_path = local_path,
remote_source_list = [threddspath, NCDCpath],
open_method = pd.read_csv,
open_method_kwargs = {'delim_whitespace': True, 'skiprows':1},
verbose=verbose,)
# As xarray structure with the dimension named 'kyear'
orbit = xr.Dataset(orbit91_pd).rename({'dim_0': 'kyear'})
# Now change names
orbit = orbit.rename({'ECC': 'ecc', 'OMEGA': 'long_peri',
'OBL': 'obliquity', 'PREC': 'precession'})
# add 180 degrees to long_peri (see lambda definition, Berger 1978 Appendix)
orbit['long_peri'] += 180.
orbit['precession'] *= -1.
orbit.attrs['Description'] = 'The Berger and Loutre (1991) orbital data table'
orbit.attrs['Citation'] = 'https://doi.org/10.1016/0277-3791(91)90033-Q'
orbit.attrs['Source'] = path
orbit.attrs['Note'] = 'Longitude of perihelion is defined to be 0 degrees at Northern Vernal Equinox. This differs by 180 degrees from orbit91 source file.'
return orbit | 0.015106 |
def type_string(self):
'''
Returns the names of the flags that are set in the Type field
It can be used to format the counter.
'''
type = self.get_info()['type']
type_list = []
for member in dir(self):
if member.startswith("PERF_"):
bit = getattr(self, member)
if bit and bit & type:
type_list.append(member[5:])
return type_list | 0.004367 |
def cli(ctx, config, quiet):
"""AWS ECS Docker Deployment Tool"""
ctx.obj = {}
ctx.obj['config'] = load_config(config.read()) # yaml.load(config.read())
ctx.obj['quiet'] = quiet
log(ctx, ' * ' + rnd_scotty_quote() + ' * ') | 0.004115 |
def GenerateNewFileName(self):
"""
Create new file name from show name, season number, episode number
and episode name in format ShowName.S<NUM>.E<NUM>.EpisodeName.
Returns
----------
string
New file name in format ShowName.S<NUM>.E<NUM>.EpisodeName.
"""
if self.showInfo.showName is not None and self.showInfo.seasonNum is not None and \
self.showInfo.episodeNum is not None and self.showInfo.episodeName is not None:
ext = os.path.splitext(self.fileInfo.origPath)[1]
newFileName = "{0}.S{1}E{2}".format(self.showInfo.showName, self.showInfo.seasonNum, \
self.showInfo.episodeNum)
for episodeNum in self.showInfo.multiPartEpisodeNumbers:
newFileName = newFileName + "_{0}".format(episodeNum)
newFileName = newFileName + ".{0}{1}".format(self.showInfo.episodeName, ext)
newFileName = util.StripSpecialCharacters(newFileName)
return newFileName | 0.013225 |
def ListTimeZones(self):
"""Lists the timezones."""
max_length = 0
for timezone_name in pytz.all_timezones:
if len(timezone_name) > max_length:
max_length = len(timezone_name)
utc_date_time = datetime.datetime.utcnow()
table_view = views.ViewsFactory.GetTableView(
self._views_format_type, column_names=['Timezone', 'UTC Offset'],
title='Zones')
for timezone_name in pytz.all_timezones:
try:
local_timezone = pytz.timezone(timezone_name)
except AssertionError as exception:
logger.error((
'Unable to determine information about timezone: {0:s} with '
'error: {1!s}').format(timezone_name, exception))
continue
local_date_string = '{0!s}'.format(
local_timezone.localize(utc_date_time))
if '+' in local_date_string:
_, _, diff = local_date_string.rpartition('+')
diff_string = '+{0:s}'.format(diff)
else:
_, _, diff = local_date_string.rpartition('-')
diff_string = '-{0:s}'.format(diff)
table_view.AddRow([timezone_name, diff_string])
table_view.Write(self._output_writer) | 0.006891 |
def get_commensurate_points(supercell_matrix): # wrt primitive cell
"""Commensurate q-points are returned.
Parameters
----------
supercell_matrix : array_like
Supercell matrix with respect to primitive cell basis vectors.
shape=(3, 3)
dtype=intc
"""
smat = np.array(supercell_matrix, dtype=int)
rec_primitive = PhonopyAtoms(numbers=[1],
scaled_positions=[[0, 0, 0]],
cell=np.diag([1, 1, 1]),
pbc=True)
rec_supercell = get_supercell(rec_primitive, smat.T)
q_pos = rec_supercell.get_scaled_positions()
return np.array(np.where(q_pos > 1 - 1e-15, q_pos - 1, q_pos),
dtype='double', order='C') | 0.001289 |
def distances(self, points):
"""
Computes the distances from the plane to each of the points. Positive distances are on the side of the
normal of the plane while negative distances are on the other side
:param points: Points for which distances are computed
:return: Distances from the plane to the points (positive values on the side of the normal to the plane,
negative values on the other side)
"""
return [np.dot(self.normal_vector, pp) + self.d for pp in points] | 0.007407 |
def get_number_of_desktops(self):
"""
Get the current number of desktops.
Uses ``_NET_NUMBER_OF_DESKTOPS`` of the EWMH spec.
:param ndesktops:
pointer to long where the current number of desktops is stored
"""
ndesktops = ctypes.c_long(0)
_libxdo.xdo_get_number_of_desktops(self._xdo, ctypes.byref(ndesktops))
return ndesktops.value | 0.00489 |
def _headers(self, headers_dict):
"""
Convert dictionary of headers into twisted.web.client.Headers object.
"""
return Headers(dict((k,[v]) for (k,v) in headers_dict.items())) | 0.019324 |
def _acceptance_prob(self, position, position_bar, momentum, momentum_bar):
"""
Returns the acceptance probability for given new position(position) and momentum
"""
# Parameters to help in evaluating Joint distribution P(position, momentum)
_, logp = self.grad_log_pdf(position, self.model).get_gradient_log_pdf()
_, logp_bar = self.grad_log_pdf(position_bar, self.model).get_gradient_log_pdf()
# acceptance_prob = P(position_bar, momentum_bar)/ P(position, momentum)
potential_change = logp_bar - logp # Negative change
kinetic_change = 0.5 * np.float(np.dot(momentum_bar.T, momentum_bar) - np.dot(momentum.T, momentum))
# acceptance probability
return np.exp(potential_change - kinetic_change) | 0.010165 |
def replace_namespaced_horizontal_pod_autoscaler_status(self, name, namespace, body, **kwargs):
"""
replace status of the specified HorizontalPodAutoscaler
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.replace_namespaced_horizontal_pod_autoscaler_status(name, namespace, body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: name of the HorizontalPodAutoscaler (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param V2beta1HorizontalPodAutoscaler body: (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
:return: V2beta1HorizontalPodAutoscaler
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.replace_namespaced_horizontal_pod_autoscaler_status_with_http_info(name, namespace, body, **kwargs)
else:
(data) = self.replace_namespaced_horizontal_pod_autoscaler_status_with_http_info(name, namespace, body, **kwargs)
return data | 0.004792 |
def getRolesForUser(self, username, filter=None, maxCount=None):
"""
This operation returns a list of role names that have been
assigned to a particular user account.
Inputs:
username - name of the user for whom the returned roles
filter - filter to be applied to the resultant role set.
maxCount - maximum number of results to return for this query
"""
uURL = self._url + "/roles/getRolesForUser"
params = {
"f" : "json",
"username" : username
}
if filter is not None:
params['filter'] = filter
if maxCount is not None:
params['maxCount'] = maxCount
return self._post(url=uURL, param_dict=params,
securityHandler=self._securityHandler,
proxy_url=self._proxy_url,
proxy_port=self._proxy_port) | 0.007239 |
def add_defaults_to_kwargs(defaults, **kwargs):
"""Updates `kwargs` with dict of `defaults`
Args:
defaults: A dictionary of keys and values
**kwargs: The kwargs to update.
Returns:
The updated kwargs.
"""
defaults = dict(defaults)
defaults.update(kwargs)
return defaults | 0.003086 |
def sparsity_pattern(self, reordered = True, symmetric = True):
"""
Returns a sparse matrix with the filled pattern. By default,
the routine uses the reordered pattern, and the inverse
permutation is applied if `reordered` is `False`.
:param reordered: boolean (default: `True`)
:param symmetric: boolean (default: `True`)
"""
return cspmatrix(self, 1.0).spmatrix(reordered = reordered, symmetric = symmetric) | 0.026804 |
def load(self):
"""
Load this step's result from its dump directory
"""
hdf_filename = os.path.join(self._dump_dirname, 'result.h5')
if os.path.isfile(hdf_filename):
store = pd.HDFStore(hdf_filename, mode='r')
keys = store.keys()
if keys == ['/df']:
self.result = store['df']
else:
if set(keys) == set(map(lambda i: '/%s' % i, range(len(keys)))):
# keys are not necessarily ordered
self.result = [store[str(k)] for k in range(len(keys))]
else:
self.result = {k[1:]: store[k] for k in keys}
else:
self.result = joblib.load(
os.path.join(self._output_dirname, 'dump', 'result.pkl')) | 0.003672 |
def call_blink(*args, **kwargs):
'''
Blink a lamp. If lamp is ON, then blink ON-OFF-ON, otherwise OFF-ON-OFF.
Options:
* **id**: Specifies a device ID. Can be a comma-separated values. All, if omitted.
* **pause**: Time in seconds. Can be less than 1, i.e. 0.7, 0.5 sec.
CLI Example:
.. code-block:: bash
salt '*' hue.blink id=1
salt '*' hue.blink id=1,2,3
'''
devices = _get_lights()
pause = kwargs.get('pause', 0)
res = dict()
for dev_id in 'id' not in kwargs and sorted(devices.keys()) or _get_devices(kwargs):
state = devices[six.text_type(dev_id)]['state']['on']
_set(dev_id, state and Const.LAMP_OFF or Const.LAMP_ON)
if pause:
time.sleep(pause)
res[dev_id] = _set(dev_id, not state and Const.LAMP_OFF or Const.LAMP_ON)
return res | 0.004684 |
def _fill_function(*args):
"""Fills in the rest of function data into the skeleton function object
The skeleton itself is create by _make_skel_func().
"""
if len(args) == 2:
func = args[0]
state = args[1]
elif len(args) == 5:
# Backwards compat for cloudpickle v0.4.0, after which the `module`
# argument was introduced
func = args[0]
keys = ['globals', 'defaults', 'dict', 'closure_values']
state = dict(zip(keys, args[1:]))
elif len(args) == 6:
# Backwards compat for cloudpickle v0.4.1, after which the function
# state was passed as a dict to the _fill_function it-self.
func = args[0]
keys = ['globals', 'defaults', 'dict', 'module', 'closure_values']
state = dict(zip(keys, args[1:]))
else:
raise ValueError('Unexpected _fill_value arguments: %r' % (args,))
# - At pickling time, any dynamic global variable used by func is
# serialized by value (in state['globals']).
# - At unpickling time, func's __globals__ attribute is initialized by
# first retrieving an empty isolated namespace that will be shared
# with other functions pickled from the same original module
# by the same CloudPickler instance and then updated with the
# content of state['globals'] to populate the shared isolated
# namespace with all the global variables that are specifically
# referenced for this function.
func.__globals__.update(state['globals'])
func.__defaults__ = state['defaults']
func.__dict__ = state['dict']
if 'annotations' in state:
func.__annotations__ = state['annotations']
if 'doc' in state:
func.__doc__ = state['doc']
if 'name' in state:
func.__name__ = state['name']
if 'module' in state:
func.__module__ = state['module']
if 'qualname' in state:
func.__qualname__ = state['qualname']
cells = func.__closure__
if cells is not None:
for cell, value in zip(cells, state['closure_values']):
if value is not _empty_cell_value:
cell_set(cell, value)
return func | 0.000922 |
def _updateColumnValues(self, index, hidden):
"""
Updates the column values for the inputed column.
:param column | <int>
state | <bool>
"""
if hidden or not self.isVisible():
return
column = self.columnOf(index)
if not column in self._loadedColumns:
self._loadedColumns.add(column)
records = self.collectRecords()
self.loadColumnsRequested.emit(records, column) | 0.011152 |
def _onerror(self, result):
""" To execute on execution failure
:param kser.result.Result result: Execution result
:return: Execution result
:rtype: kser.result.Result
"""
if KSER_METRICS_ENABLED == "yes":
KSER_TASKS_STATUS.labels(
__hostname__, self.__class__.path, 'FAILED'
).inc()
if result:
result = self.result + result
else:
result = self.result
logger.error(
"{}.Failed: {}[{}]: {}".format(
self.__class__.__name__, self.__class__.path, self.uuid, result
),
extra=dict(
kmsg=Message(
self.uuid, entrypoint=self.__class__.path,
params=self.params, metadata=self.metadata
).dump(),
kresult=ResultSchema().dump(result) if result else dict()
)
)
return self.onerror(result) | 0.002037 |
def find_executable(executable, path=None):
"""
As distutils.spawn.find_executable, but on Windows, look up
every extension declared in PATHEXT instead of just `.exe`
"""
if sys.platform != 'win32':
return distutils.spawn.find_executable(executable, path)
if path is None:
path = os.environ['PATH']
paths = path.split(os.pathsep)
extensions = os.environ.get('PATHEXT', '.exe').split(os.pathsep)
base, ext = os.path.splitext(executable)
if not os.path.isfile(executable):
for p in paths:
for ext in extensions:
f = os.path.join(p, base + ext)
if os.path.isfile(f):
return f
return None
else:
return executable | 0.001318 |
def p_typed_var_list(self, p):
'''typed_var_list : typed_var_list COMMA typed_var
| typed_var'''
if len(p) == 4:
p[1].append(p[3])
p[0] = p[1]
elif len(p) == 2:
p[0] = [p[1]] | 0.007692 |
def evaluate_vars(data, context=None):
"""
Evaluates variables in ``data``
:param data: data structure containing variables, may be
``str``, ``dict`` or ``list``
:param context: ``dict`` containing variables
:returns: modified data structure
"""
context = context or {}
if isinstance(data, (dict, list)):
if isinstance(data, dict):
loop_items = data.items()
elif isinstance(data, list):
loop_items = enumerate(data)
for key, value in loop_items:
data[key] = evaluate_vars(value, context)
elif isinstance(data, six.string_types):
vars_found = var_pattern.findall(data)
for var in vars_found:
var = var.strip()
# if found multiple variables, create a new regexp pattern for each
# variable, otherwise different variables would get the same value
# (see https://github.com/openwisp/netjsonconfig/issues/55)
if len(vars_found) > 1:
pattern = r'\{\{(\s*%s\s*)\}\}' % var
# in case of single variables, use the precompiled
# regexp pattern to save computation
else:
pattern = var_pattern
if var in context:
data = re.sub(pattern, context[var], data)
return data | 0.000743 |
def _add_person_to_group(person, group):
""" Call datastores after adding a person to a group. """
from karaage.datastores import add_accounts_to_group
from karaage.datastores import add_accounts_to_project
from karaage.datastores import add_accounts_to_institute
a_list = person.account_set
add_accounts_to_group(a_list, group)
for project in group.project_set.all():
add_accounts_to_project(a_list, project)
for institute in group.institute_set.all():
add_accounts_to_institute(a_list, institute) | 0.001828 |
def approximate_moment(
dist,
K,
retall=False,
control_var=None,
rule="F",
order=1000,
**kws
):
"""
Approximation method for estimation of raw statistical moments.
Args:
dist : Dist
Distribution domain with dim=len(dist)
K : numpy.ndarray
The exponents of the moments of interest with shape (dim,K).
control_var : Dist
If provided will be used as a control variable to try to reduce
the error.
acc (:py:data:typing.Optional[int]):
The order of quadrature/MCI
sparse : bool
If True used Smolyak's sparse grid instead of normal tensor
product grid in numerical integration.
rule : str
Quadrature rule
Key Description
---- -----------
"G" Optiomal Gaussian quadrature from Golub-Welsch
Slow for high order and composit is ignored.
"E" Gauss-Legendre quadrature
"C" Clenshaw-Curtis quadrature. Exponential growth rule is
used when sparse is True to make the rule nested.
Monte Carlo Integration
Key Description
---- -----------
"H" Halton sequence
"K" Korobov set
"L" Latin hypercube sampling
"M" Hammersley sequence
"R" (Pseudo-)Random sampling
"S" Sobol sequence
composite (:py:data:typing.Optional[int, numpy.ndarray]):
If provided, composite quadrature will be used.
Ignored in the case if gaussian=True.
If int provided, determines number of even domain splits
If array of ints, determines number of even domain splits along
each axis
If array of arrays/floats, determines location of splits
antithetic (:py:data:typing.Optional[numpy.ndarray]):
List of bool. Represents the axes to mirror using antithetic
variable during MCI.
"""
dim = len(dist)
shape = K.shape
size = int(K.size/dim)
K = K.reshape(dim, size)
if dim > 1:
shape = shape[1:]
X, W = quad.generate_quadrature(order, dist, rule=rule, normalize=True, **kws)
grid = numpy.mgrid[:len(X[0]), :size]
X = X.T[grid[0]].T
K = K.T[grid[1]].T
out = numpy.prod(X**K, 0)*W
if control_var is not None:
Y = control_var.ppf(dist.fwd(X))
mu = control_var.mom(numpy.eye(len(control_var)))
if (mu.size == 1) and (dim > 1):
mu = mu.repeat(dim)
for d in range(dim):
alpha = numpy.cov(out, Y[d])[0, 1]/numpy.var(Y[d])
out -= alpha*(Y[d]-mu)
out = numpy.sum(out, -1)
return out | 0.000702 |
def _get_node_column(cls, node, column_name):
"""Given a ParsedNode, add some fields that might be missing. Return a
reference to the dict that refers to the given column, creating it if
it doesn't yet exist.
"""
if not hasattr(node, 'columns'):
node.set('columns', {})
if column_name in node.columns:
column = node.columns[column_name]
else:
column = {'name': column_name, 'description': ''}
node.columns[column_name] = column
return column | 0.003604 |
def redraw(self, col=0):
"""redraw image, applying the following:
rotation, flips, log scale
max/min values from sliders or explicit intensity ranges
color map
interpolation
"""
conf = self.conf
# note: rotation re-calls display(), to reset the image
# other transformations will just do .set_data() on image
if conf.rot:
if self.xdata is not None:
self.xdata = self.xdata[::-1]
if self.ydata is not None:
self.ydata = self.ydata[:]
self.display(np.rot90(conf.data),
x=self.ydata, xlabel=self.ylab,
y=self.xdata, ylabel=self.xlab)
# flips, log scales
img = conf.data
if img is None: return
if len(img.shape) == 2:
col = 0
if self.conf.style == 'image':
if conf.flip_ud: img = np.flipud(img)
if conf.flip_lr: img = np.fliplr(img)
if conf.log_scale:
img = np.log10(1 + 9.0*img)
# apply intensity scale for current limited (zoomed) image
if len(img.shape) == 2:
# apply clipped color scale, as from sliders
imin = float(conf.int_lo[col])
imax = float(conf.int_hi[col])
if conf.log_scale:
imin = np.log10(1 + 9.0*imin)
imax = np.log10(1 + 9.0*imax)
(xmin, xmax, ymin, ymax) = self.conf.datalimits
if xmin is None: xmin = 0
if xmax is None: xmax = img.shape[1]
if ymin is None: ymin = 0
if ymax is None: ymax = img.shape[0]
img = (img - imin)/(imax - imin + 1.e-8)
mlo = conf.cmap_lo[0]/(1.0*conf.cmap_range)
mhi = conf.cmap_hi[0]/(1.0*conf.cmap_range)
if self.conf.style == 'image':
conf.image.set_data(np.clip((img - mlo)/(mhi - mlo + 1.e-8), 0, 1))
conf.image.set_interpolation(conf.interp)
else:
r, g, b = img[:,:,0], img[:,:,1], img[:,:,2]
rmin = float(conf.int_lo[0])
rmax = float(conf.int_hi[0])
gmin = float(conf.int_lo[1])
gmax = float(conf.int_hi[1])
bmin = float(conf.int_lo[2])
bmax = float(conf.int_hi[2])
if conf.log_scale:
rmin = np.log10(1 + 9.0*rmin)
rmax = np.log10(1 + 9.0*rmax)
gmin = np.log10(1 + 9.0*gmin)
gmax = np.log10(1 + 9.0*gmax)
bmin = np.log10(1 + 9.0*bmin)
bmax = np.log10(1 + 9.0*bmax)
rlo = conf.cmap_lo[0]/(1.0*conf.cmap_range)
rhi = conf.cmap_hi[0]/(1.0*conf.cmap_range)
glo = conf.cmap_lo[1]/(1.0*conf.cmap_range)
ghi = conf.cmap_hi[1]/(1.0*conf.cmap_range)
blo = conf.cmap_lo[2]/(1.0*conf.cmap_range)
bhi = conf.cmap_hi[2]/(1.0*conf.cmap_range)
r = (r - rmin)/(rmax - rmin + 1.e-8)
g = (g - gmin)/(gmax - gmin + 1.e-8)
b = (b - bmin)/(bmax - bmin + 1.e-8)
inew = img*1.0
inew[:,:,0] = np.clip((r - rlo)/(rhi - rlo + 1.e-8), 0, 1)
inew[:,:,1] = np.clip((g - glo)/(ghi - glo + 1.e-8), 0, 1)
inew[:,:,2] = np.clip((b - blo)/(bhi - blo + 1.e-8), 0, 1)
whitebg = conf.tricolor_bg.startswith('wh')
if whitebg:
inew = conf.tricolor_white_bg(inew)
if self.conf.style == 'image':
conf.image.set_data(inew)
conf.image.set_interpolation(conf.interp)
self.canvas.draw()
if callable(self.redraw_callback):
self.redraw_callback(wid=self.GetId()) | 0.00607 |
def validate(cls, job_config):
"""Inherit docs."""
super(ModelDatastoreInputReader, cls).validate(job_config)
params = job_config.input_reader_params
entity_kind = params[cls.ENTITY_KIND_PARAM]
# Fail fast if Model cannot be located.
try:
model_class = util.for_name(entity_kind)
except ImportError, e:
raise errors.BadReaderParamsError("Bad entity kind: %s" % e)
if cls.FILTERS_PARAM in params:
filters = params[cls.FILTERS_PARAM]
if issubclass(model_class, db.Model):
cls._validate_filters(filters, model_class)
else:
cls._validate_filters_ndb(filters, model_class)
property_range.PropertyRange(filters, entity_kind) | 0.009972 |
def sync_objects_in(self):
"""Synchronize from records to objects"""
self.dstate = self.STATES.BUILDING
self.build_source_files.record_to_objects() | 0.011696 |
def _handlePressure(self, d):
"""
Parse an altimeter-pressure group.
The following attributes are set:
press [int]
"""
press = d['press']
if press != '////':
press = float(press.replace('O', '0'))
if d['unit']:
if d['unit'] == 'A' or (d['unit2'] and d['unit2'] == 'INS'):
self.press = CustomPressure(press / 100, 'IN')
elif d['unit'] == 'SLP':
if press < 500:
press = press / 10 + 1000
else:
press = press / 10 + 900
self.press = CustomPressure(press)
self._remarks.append("sea-level pressure %.1fhPa" % press)
else:
self.press = CustomPressure(press)
elif press > 2500:
self.press = CustomPressure(press / 100, 'IN')
else:
self.press = CustomPressure(press) | 0.001965 |
def write(self, handle):
'''Write metadata and point + analog frames to a file handle.
Parameters
----------
handle : file
Write metadata and C3D motion frames to the given file handle. The
writer does not close the handle.
'''
if not self._frames:
return
def add(name, desc, bpe, format, bytes, *dimensions):
group.add_param(name,
desc=desc,
bytes_per_element=bpe,
bytes=struct.pack(format, bytes),
dimensions=list(dimensions))
def add_str(name, desc, bytes, *dimensions):
group.add_param(name,
desc=desc,
bytes_per_element=-1,
bytes=bytes.encode('utf-8'),
dimensions=list(dimensions))
def add_empty_array(name, desc, bpe):
group.add_param(name, desc=desc, bytes_per_element=bpe, dimensions=[0])
points, analog = self._frames[0]
ppf = len(points)
# POINT group
group = self.add_group(1, 'POINT', 'POINT group')
add('USED', 'Number of 3d markers', 2, '<H', ppf)
add('FRAMES', 'frame count', 2, '<H', min(65535, len(self._frames)))
add('DATA_START', 'data block number', 2, '<H', 0)
add('SCALE', '3d scale factor', 4, '<f', self._point_scale)
add('RATE', '3d data capture rate', 4, '<f', self._point_rate)
add_str('X_SCREEN', 'X_SCREEN parameter', '+X', 2)
add_str('Y_SCREEN', 'Y_SCREEN parameter', '+Y', 2)
add_str('UNITS', '3d data units', self._point_units, len(self._point_units))
add_str('LABELS', 'labels', ''.join('M%03d ' % i for i in range(ppf)), 5, ppf)
add_str('DESCRIPTIONS', 'descriptions', ' ' * 16 * ppf, 16, ppf)
# ANALOG group
group = self.add_group(2, 'ANALOG', 'ANALOG group')
add('USED', 'analog channel count', 2, '<H', analog.shape[0])
add('RATE', 'analog samples per 3d frame', 4, '<f', analog.shape[1])
add('GEN_SCALE', 'analog general scale factor', 4, '<f', self._gen_scale)
add_empty_array('SCALE', 'analog channel scale factors', 4)
add_empty_array('OFFSET', 'analog channel offsets', 2)
# TRIAL group
group = self.add_group(3, 'TRIAL', 'TRIAL group')
add('ACTUAL_START_FIELD', 'actual start frame', 2, '<I', 1, 2)
add('ACTUAL_END_FIELD', 'actual end frame', 2, '<I', len(self._frames), 2)
# sync parameter information to header.
blocks = self.parameter_blocks()
self.get('POINT:DATA_START').bytes = struct.pack('<H', 2 + blocks)
self.header.data_block = 2 + blocks
self.header.frame_rate = self._point_rate
self.header.last_frame = min(len(self._frames), 65535)
self.header.point_count = ppf
self.header.analog_count = np.prod(analog.shape)
self.header.analog_per_frame = analog.shape[0]
self.header.scale_factor = self._point_scale
self._write_metadata(handle)
self._write_frames(handle) | 0.002194 |
def from_file(self, fname):
"""read in a file and compute digest"""
f = open(fname, "rb")
data = f.read()
self.update(data)
f.close() | 0.011561 |
def solveMDP():
"""Solve the problem as a finite horizon Markov decision process.
The optimal policy at each stage is found using backwards induction.
Possingham and Tuck report strategies for a 50 year time horizon, so the
number of stages for the finite horizon algorithm is set to 50. There is no
discount factor reported, so we set it to 0.96 rather arbitrarily.
Returns
-------
mdp : mdptoolbox.mdp.FiniteHorizon
The PyMDPtoolbox object that represents a finite horizon MDP. The
optimal policy for each stage is accessed with mdp.policy, which is a
numpy array with 50 columns (one for each stage).
"""
P, R = getTransitionAndRewardArrays(0.5)
sdp = mdp.FiniteHorizon(P, R, 0.96, 50)
sdp.run()
return(sdp) | 0.004994 |
def _SkipFieldContents(tokenizer):
"""Skips over contents (value or message) of a field.
Args:
tokenizer: A tokenizer to parse the field name and values.
"""
# Try to guess the type of this field.
# If this field is not a message, there should be a ":" between the
# field name and the field value and also the field value should not
# start with "{" or "<" which indicates the beginning of a message body.
# If there is no ":" or there is a "{" or "<" after ":", this field has
# to be a message or the input is ill-formed.
if tokenizer.TryConsume(':') and not tokenizer.LookingAt(
'{') and not tokenizer.LookingAt('<'):
_SkipFieldValue(tokenizer)
else:
_SkipFieldMessage(tokenizer) | 0.015193 |
def remove_product_version_from_build_configuration(id=None, name=None, product_version_id=None):
"""
Remove a ProductVersion from association with a BuildConfiguration
"""
data = remove_product_version_from_build_configuration_raw(id, name, product_version_id)
if data:
return utils.format_json_list(data) | 0.008982 |
def median_filter(data, size=3, cval = 0, res_g=None, sub_blocks=None):
"""
median filter of given size
Parameters
----------
data: 2 or 3 dimensional ndarray or OCLArray of type float32
input data
size: scalar, tuple
the size of the patch to consider
cval: scalar,
the constant value for out of border access (cf mode = "constant")
res_g: OCLArray
store result in buffer if given
sub_blocks:
perform over subblock tiling (only if data is ndarray)
Returns
-------
filtered image or None (if OCLArray)
"""
if data.ndim == 2:
_filt = make_filter(_median_filter_gpu_2d())
elif data.ndim == 3:
_filt = make_filter(_median_filter_gpu_3d())
else:
raise ValueError("currently only 2 or 3 dimensional data is supported")
return _filt(data=data, size=size, cval = cval, res_g=res_g, sub_blocks=sub_blocks) | 0.008466 |
def handle_import_tags(userdata, import_root):
"""Handle @import(filepath)@ tags in a UserData script.
:param import_root: Location for imports.
:type import_root: str
:param userdata: UserData script content.
:type userdata: str
:return: UserData script with the contents of the imported files.
:rtype: str
"""
imports = re.findall('@import\((.*?)\)@', userdata) # pylint: disable=anomalous-backslash-in-string
if not imports:
return userdata
for filepath in imports:
logger.info('Processing "import" of %s', filepath)
import_path = os.path.join(import_root, filepath)
try:
with open(import_path) as fo:
content = fo.read()
userdata = userdata.replace('@import(%s)@' % filepath, content)
except FileNotFoundError:
raise UserDataException('Import path {} not found.'.format(import_path))
return userdata | 0.006763 |
def _wget(cmd, opts=None, url='http://localhost:8080/manager', timeout=180):
'''
A private function used to issue the command to tomcat via the manager
webapp
cmd
the command to execute
url
The URL of the server manager webapp (example:
http://localhost:8080/manager)
opts
a dict of arguments
timeout
timeout for HTTP request
Return value is a dict in the from of::
{
res: [True|False]
msg: list of lines we got back from the manager
}
'''
ret = {
'res': True,
'msg': []
}
# prepare authentication
auth = _auth(url)
if auth is False:
ret['res'] = False
ret['msg'] = 'missing username and password settings (grain/pillar)'
return ret
# prepare URL
if url[-1] != '/':
url += '/'
url6 = url
url += 'text/{0}'.format(cmd)
url6 += '{0}'.format(cmd)
if opts:
url += '?{0}'.format(_urlencode(opts))
url6 += '?{0}'.format(_urlencode(opts))
# Make the HTTP request
_install_opener(auth)
try:
# Trying tomcat >= 7 url
ret['msg'] = _urlopen(url, timeout=timeout).read().splitlines()
except Exception:
try:
# Trying tomcat6 url
ret['msg'] = _urlopen(url6, timeout=timeout).read().splitlines()
except Exception:
ret['msg'] = 'Failed to create HTTP request'
if not ret['msg'][0].startswith('OK'):
ret['res'] = False
return ret | 0.000646 |
def handle_molecular_activity_default(_: str, __: int, tokens: ParseResults) -> ParseResults:
"""Handle a BEL 2.0 style molecular activity with BEL default names."""
upgraded = language.activity_labels[tokens[0]]
tokens[NAMESPACE] = BEL_DEFAULT_NAMESPACE
tokens[NAME] = upgraded
return tokens | 0.00641 |
def downsample(vector, factor):
"""
downsample(vector, factor):
Downsample (by averaging) a vector by an integer factor.
"""
if (len(vector) % factor):
print "Length of 'vector' is not divisible by 'factor'=%d!" % factor
return 0
vector.shape = (len(vector) / factor, factor)
return numpy.mean(vector, axis=1) | 0.002801 |
def _set_interface_bfd(self, v, load=False):
"""
Setter method for interface_bfd, mapped from YANG variable /routing_system/interface/ve/intf_isis/interface_isis/interface_bfd (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_interface_bfd is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_interface_bfd() directly.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=interface_bfd.interface_bfd, is_container='container', presence=False, yang_name="interface-bfd", rest_name="bfd", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Set BFD operation mode on this interface', u'hidden': u'full', u'alt-name': u'bfd', u'cli-incomplete-no': None, u'cli-incomplete-command': None}}, namespace='urn:brocade.com:mgmt:brocade-isis', defining_module='brocade-isis', yang_type='container', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """interface_bfd must be of a type compatible with container""",
'defined-type': "container",
'generated-type': """YANGDynClass(base=interface_bfd.interface_bfd, is_container='container', presence=False, yang_name="interface-bfd", rest_name="bfd", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Set BFD operation mode on this interface', u'hidden': u'full', u'alt-name': u'bfd', u'cli-incomplete-no': None, u'cli-incomplete-command': None}}, namespace='urn:brocade.com:mgmt:brocade-isis', defining_module='brocade-isis', yang_type='container', is_config=True)""",
})
self.__interface_bfd = t
if hasattr(self, '_set'):
self._set() | 0.005238 |
def combine_chunks(storage, args, prefix=None):
'''
Combine a chunked file into a whole file again.
Goes through each part, in order,
and appends that part's bytes to another destination file.
Chunks are stored in the chunks storage.
'''
uuid = args['uuid']
# Normalize filename including extension
target = utils.normalize(args['filename'])
if prefix:
target = os.path.join(prefix, target)
with storage.open(target, 'wb') as out:
for i in xrange(args['totalparts']):
partname = chunk_filename(uuid, i)
out.write(chunks.read(partname))
chunks.delete(partname)
chunks.delete(chunk_filename(uuid, META))
return target | 0.001391 |
def nacm_rule_list_rule_rule_type_data_node_path(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
nacm = ET.SubElement(config, "nacm", xmlns="urn:ietf:params:xml:ns:yang:ietf-netconf-acm")
rule_list = ET.SubElement(nacm, "rule-list")
name_key = ET.SubElement(rule_list, "name")
name_key.text = kwargs.pop('name')
rule = ET.SubElement(rule_list, "rule")
name_key = ET.SubElement(rule, "name")
name_key.text = kwargs.pop('name')
rule_type = ET.SubElement(rule, "rule-type")
data_node = ET.SubElement(rule_type, "data-node")
path = ET.SubElement(data_node, "path")
path.text = kwargs.pop('path')
callback = kwargs.pop('callback', self._callback)
return callback(config) | 0.003659 |
def _add_column(self, column):
"""
Add a new column to the DataFrame
:param column: column name
:return: nothing
"""
self._columns.append(column)
if self._blist:
self._data.append(blist([None] * len(self._index)))
else:
self._data.append([None] * len(self._index)) | 0.005666 |
def _send(self, *messages):
"""Send messages."""
if not self.transport:
return False
message = '\n'.join(messages) + '\n'
self.transport.write(message.encode('ascii')) | 0.009434 |
def attrib_to_dict(element, *args, **kwargs):
"""For an ElementTree ``element`` extract specified attributes. If an attribute does not exists, its value will be
``None``.
attrib_to_dict(element, 'attr_a', 'attr_b') -> {'attr_a': 'value', 'attr_a': 'value'}
Mapping between xml attributes and dictionary keys is done with kwargs.
attrib_to_dict(element, my_new_name = 'xml_atribute_name', ..)
"""
if len(args) > 0:
return {key: element.get(key) for key in args}
if len(kwargs) > 0:
return {new_key: element.get(old_key) for new_key, old_key in viewitems(kwargs)}
return element.attrib | 0.00626 |
def zonearea(idf, zonename, debug=False):
"""zone area"""
zone = idf.getobject('ZONE', zonename)
surfs = idf.idfobjects['BuildingSurface:Detailed'.upper()]
zone_surfs = [s for s in surfs if s.Zone_Name == zone.Name]
floors = [s for s in zone_surfs if s.Surface_Type.upper() == 'FLOOR']
if debug:
print(len(floors))
print([floor.area for floor in floors])
# area = sum([floor.area for floor in floors])
if floors != []:
area = zonearea_floor(idf, zonename)
else:
area = zonearea_roofceiling(idf, zonename)
return area | 0.001701 |
def get_default_task(self):
"""
Returns the default task if there is only one
"""
default_tasks = list(filter(lambda task: task.default, self.values()))
if len(default_tasks) == 1:
return default_tasks[0] | 0.007813 |
def get_names_in_namespace_page(namespace_id, offset, count, proxy=None, hostport=None):
"""
Get a page of names in a namespace
Returns the list of names on success
Returns {'error': ...} on error
"""
assert proxy or hostport, 'Need proxy or hostport'
if proxy is None:
proxy = connect_hostport(hostport)
assert count <= 100, 'Page too big: {}'.format(count)
names_schema = {
'type': 'object',
'properties': {
'names': {
'type': 'array',
'items': {
'type': 'string',
'uniqueItems': True
},
},
},
'required': [
'names',
],
}
schema = json_response_schema( names_schema )
resp = {}
try:
resp = proxy.get_names_in_namespace(namespace_id, offset, count)
resp = json_validate(schema, resp)
if json_is_error(resp):
return resp
# must be valid names
valid_names = []
for n in resp['names']:
if not is_name_valid(str(n)):
log.error('Invalid name "{}"'.format(str(n)))
else:
valid_names.append(n)
return valid_names
except ValidationError as e:
if BLOCKSTACK_DEBUG:
log.exception(e)
resp = {'error': 'Server response did not match expected schema. You are likely communicating with an out-of-date Blockstack node.', 'http_status': 502}
return resp
except socket.timeout:
log.error("Connection timed out")
resp = {'error': 'Connection to remote host timed out.', 'http_status': 503}
return resp
except socket.error as se:
log.error("Connection error {}".format(se.errno))
resp = {'error': 'Connection to remote host failed.', 'http_status': 502}
return resp
except Exception as ee:
if BLOCKSTACK_DEBUG:
log.exception(ee)
log.error("Caught exception while connecting to Blockstack node: {}".format(ee))
resp = {'error': 'Failed to contact Blockstack node. Try again with `--debug`.', 'http_status': 500}
return resp | 0.004074 |
def set_source_nodes(self, source_nodes):
r"""
Set multiple source nodes and compute their t-weights.
Parameters
----------
source_nodes : sequence of integers
Declare the source nodes via their ids.
Raises
------
ValueError
If a passed node id does not refer to any node of the graph
(i.e. it is either higher than the initially set number of
nodes or lower than zero).
Notes
-----
It does not get checked if one of the supplied source-nodes already has
a weight assigned (e.g. by passing it to `set_sink_nodes`). This can
occur when the foreground- and background-markers cover the same region. In this
case the order of setting the terminal nodes can affect the graph and therefore
the graph-cut result.
"""
if max(source_nodes) >= self.__nodes or min(source_nodes) < 0:
raise ValueError('Invalid node id of {} or {}. Valid values are 0 to {}.'.format(max(source_nodes), min(source_nodes), self.__nodes - 1))
# set the source-to-node weights (t-weights)
for snode in source_nodes:
self.__graph.add_tweights(int(snode), self.MAX, 0) | 0.006902 |
def path_dispatch_kwarg(mname, path_default, returns_model):
"""
Parameterized decorator for methods that accept path as a second
argument.
"""
def _wrapper(self, path=path_default, **kwargs):
prefix, mgr, mgr_path = _resolve_path(path, self.managers)
result = getattr(mgr, mname)(path=mgr_path, **kwargs)
if returns_model and prefix:
return _apply_prefix(prefix, result)
else:
return result
return _wrapper | 0.002053 |
def currentpath(self) -> str:
"""Absolute path of the current working directory.
>>> from hydpy.core.filetools import FileManager
>>> filemanager = FileManager()
>>> filemanager.BASEDIR = 'basename'
>>> filemanager.projectdir = 'projectname'
>>> from hydpy import repr_, TestIO
>>> with TestIO():
... filemanager.currentdir = 'testdir'
... repr_(filemanager.currentpath) # doctest: +ELLIPSIS
'...hydpy/tests/iotesting/projectname/basename/testdir'
"""
return os.path.join(self.basepath, self.currentdir) | 0.003273 |
def end_of_month(val):
"""
Return a new datetime.datetime object with values that represent
a end of a month.
:param val: Date to ...
:type val: datetime.datetime | datetime.date
:rtype: datetime.datetime
"""
if type(val) == date:
val = datetime.fromordinal(val.toordinal())
if val.month == 12:
return start_of_month(val).replace(year=val.year + 1, month=1) \
- timedelta(microseconds=1)
else:
return start_of_month(val).replace(month=val.month + 1) \
- timedelta(microseconds=1) | 0.001745 |
def CheckRepeatLogic(filename, linenumber, clean_lines, errors):
"""
Check for logic inside else, endif etc
"""
line = clean_lines.lines[linenumber]
for cmd in _logic_commands:
if re.search(r'\b%s\b'%cmd, line.lower()):
m = _RE_LOGIC_CHECK.search(line)
if m:
errors(filename, linenumber, 'readability/logic',
'Expression repeated inside %s; '
'better to use only %s()'%(cmd, m.group(1)))
break | 0.007678 |
def create_replication_schedule(self,
start_time, end_time, interval_unit, interval, paused, arguments,
alert_on_start=False, alert_on_success=False, alert_on_fail=False,
alert_on_abort=False):
"""
Create a new replication schedule for this service.
The replication argument type varies per service type. The following types
are recognized:
- HDFS: ApiHdfsReplicationArguments
- Hive: ApiHiveReplicationArguments
@type start_time: datetime.datetime
@param start_time: The time at which the schedule becomes active and first executes.
@type end_time: datetime.datetime
@param end_time: The time at which the schedule will expire.
@type interval_unit: str
@param interval_unit: The unit of time the `interval` represents. Ex. MINUTE, HOUR,
DAY. See the server documentation for a full list of values.
@type interval: int
@param interval: The number of time units to wait until triggering the next replication.
@type paused: bool
@param paused: Should the schedule be paused? Useful for on-demand replication.
@param arguments: service type-specific arguments for the replication job.
@param alert_on_start: whether to generate alerts when the job is started.
@param alert_on_success: whether to generate alerts when the job succeeds.
@param alert_on_fail: whether to generate alerts when the job fails.
@param alert_on_abort: whether to generate alerts when the job is aborted.
@return: The newly created schedule.
@since: API v3
"""
schedule = ApiReplicationSchedule(self._get_resource_root(),
startTime=start_time, endTime=end_time, intervalUnit=interval_unit, interval=interval,
paused=paused, alertOnStart=alert_on_start, alertOnSuccess=alert_on_success,
alertOnFail=alert_on_fail, alertOnAbort=alert_on_abort)
if self.type == 'HDFS':
if isinstance(arguments, ApiHdfsCloudReplicationArguments):
schedule.hdfsCloudArguments = arguments
elif isinstance(arguments, ApiHdfsReplicationArguments):
schedule.hdfsArguments = arguments
else:
raise TypeError, 'Unexpected type for HDFS replication argument.'
elif self.type == 'HIVE':
if not isinstance(arguments, ApiHiveReplicationArguments):
raise TypeError, 'Unexpected type for Hive replication argument.'
schedule.hiveArguments = arguments
else:
raise TypeError, 'Replication is not supported for service type ' + self.type
return self._post("replications", ApiReplicationSchedule, True, [schedule],
api_version=3)[0] | 0.009502 |
def get_annotations(fname, prefix=None):
"Open a COCO style json in `fname` and returns the lists of filenames (with maybe `prefix`) and labelled bboxes."
annot_dict = json.load(open(fname))
id2images, id2bboxes, id2cats = {}, collections.defaultdict(list), collections.defaultdict(list)
classes = {}
for o in annot_dict['categories']:
classes[o['id']] = o['name']
for o in annot_dict['annotations']:
bb = o['bbox']
id2bboxes[o['image_id']].append([bb[1],bb[0], bb[3]+bb[1], bb[2]+bb[0]])
id2cats[o['image_id']].append(classes[o['category_id']])
for o in annot_dict['images']:
if o['id'] in id2bboxes:
id2images[o['id']] = ifnone(prefix, '') + o['file_name']
ids = list(id2images.keys())
return [id2images[k] for k in ids], [[id2bboxes[k], id2cats[k]] for k in ids] | 0.007034 |
async def play_tone(self, command):
"""
This method controls a piezo device to play a tone. It is a FirmataPlus feature.
Tone command is TONE_TONE to play, TONE_NO_TONE to stop playing.
:param command: {"method": "play_tone", "params": [PIN, TONE_COMMAND, FREQUENCY(Hz), DURATION(MS)]}
:returns:No return message.
"""
pin = int(command[0])
if command[1] == "TONE_TONE":
tone_command = Constants.TONE_TONE
else:
tone_command = Constants.TONE_NO_TONE
frequency = int(command[2])
duration = int(command[3])
await self.core.play_tone(pin, tone_command, frequency, duration) | 0.005806 |
def emulate_users(self, request):
"""
The list view
"""
def display_as_link(self, obj):
try:
identifier = getattr(user_model_admin, list_display_link)(obj)
except AttributeError:
identifier = admin.utils.lookup_field(list_display_link, obj, model_admin=self)[2]
emulate_user_id = request.session.get('emulate_user_id')
if emulate_user_id == obj.id:
return format_html('<strong>{}</strong>', identifier)
fmtargs = {
'href': reverse('admin:emulate-user', kwargs={'user_id': obj.id}),
'identifier': identifier,
}
return format_html('<a href="{href}" class="emulate-user">{identifier}</a>', **fmtargs)
opts = self.UserModel._meta
app_label = opts.app_label
user_model_admin = self.admin_site._registry[self.UserModel]
request._lookup_model = self.UserModel
list_display_links = user_model_admin.get_list_display_links(request, user_model_admin.list_display)
# replace first entry in list_display_links by customized method display_as_link
list_display_link = list_display_links[0]
try:
list_display = list(user_model_admin.segmentation_list_display)
except AttributeError:
list_display = list(user_model_admin.list_display)
list_display.remove(list_display_link)
list_display.insert(0, 'display_as_link')
display_as_link.allow_tags = True # TODO: presumably not required anymore since Django-1.9
try:
display_as_link.short_description = user_model_admin.identifier.short_description
except AttributeError:
display_as_link.short_description = admin.utils.label_for_field(list_display_link, self.UserModel)
self.display_as_link = six.create_bound_method(display_as_link, self)
ChangeList = self.get_changelist(request)
cl = ChangeList(request, self.UserModel, list_display,
(None,), # disable list_display_links in ChangeList, instead override that field
user_model_admin.list_filter,
user_model_admin.date_hierarchy, user_model_admin.search_fields,
user_model_admin.list_select_related, user_model_admin.list_per_page,
user_model_admin.list_max_show_all,
(), # disable list_editable
self)
cl.formset = None
selection_note_all = ungettext('%(total_count)s selected',
'All %(total_count)s selected', cl.result_count)
context = {
'module_name': force_text(opts.verbose_name_plural),
'selection_note': _('0 of %(cnt)s selected') % {'cnt': len(cl.result_list)},
'selection_note_all': selection_note_all % {'total_count': cl.result_count},
'title': _("Select %(user_model)s to emulate") % {'user_model': opts.verbose_name},
'is_popup': cl.is_popup,
'cl': cl,
'media': self.media,
'has_add_permission': False,
'opts': cl.opts,
'app_label': app_label,
'actions_on_top': self.actions_on_top,
'actions_on_bottom': self.actions_on_bottom,
'actions_selection_counter': self.actions_selection_counter,
'preserved_filters': self.get_preserved_filters(request),
}
return TemplateResponse(request, self.change_list_template or [
'admin/%s/%s/change_list.html' % (app_label, opts.model_name),
'admin/%s/change_list.html' % app_label,
'admin/change_list.html'
], context) | 0.004617 |
def egg2dist(self, egginfo_path, distinfo_path):
"""Convert an .egg-info directory into a .dist-info directory"""
def adios(p):
"""Appropriately delete directory, file or link."""
if os.path.exists(p) and not os.path.islink(p) and os.path.isdir(p):
shutil.rmtree(p)
elif os.path.exists(p):
os.unlink(p)
adios(distinfo_path)
if not os.path.exists(egginfo_path):
# There is no egg-info. This is probably because the egg-info
# file/directory is not named matching the distribution name used
# to name the archive file. Check for this case and report
# accordingly.
import glob
pat = os.path.join(os.path.dirname(egginfo_path), '*.egg-info')
possible = glob.glob(pat)
err = "Egg metadata expected at %s but not found" % (egginfo_path,)
if possible:
alt = os.path.basename(possible[0])
err += " (%s found - possible misnamed archive file?)" % (alt,)
raise ValueError(err)
if os.path.isfile(egginfo_path):
# .egg-info is a single file
pkginfo_path = egginfo_path
pkg_info = self._pkginfo_to_metadata(egginfo_path, egginfo_path)
os.mkdir(distinfo_path)
else:
# .egg-info is a directory
pkginfo_path = os.path.join(egginfo_path, 'PKG-INFO')
pkg_info = self._pkginfo_to_metadata(egginfo_path, pkginfo_path)
# ignore common egg metadata that is useless to wheel
shutil.copytree(egginfo_path, distinfo_path,
ignore=lambda x, y: set(('PKG-INFO',
'requires.txt',
'SOURCES.txt',
'not-zip-safe',)))
# delete dependency_links if it is only whitespace
dependency_links_path = os.path.join(distinfo_path, 'dependency_links.txt')
with open(dependency_links_path, 'r') as dependency_links_file:
dependency_links = dependency_links_file.read().strip()
if not dependency_links:
adios(dependency_links_path)
write_pkg_info(os.path.join(distinfo_path, 'METADATA'), pkg_info)
# XXX deprecated. Still useful for current distribute/setuptools.
metadata_path = os.path.join(distinfo_path, 'METADATA')
self.add_requirements(metadata_path)
# XXX intentionally a different path than the PEP.
metadata_json_path = os.path.join(distinfo_path, 'metadata.json')
pymeta = pkginfo_to_dict(metadata_path,
distribution=self.distribution)
if 'description' in pymeta:
description_filename = 'DESCRIPTION.rst'
description_text = pymeta.pop('description')
description_path = os.path.join(distinfo_path,
description_filename)
with open(description_path, "wb") as description_file:
description_file.write(description_text.encode('utf-8'))
pymeta['extensions']['python.details']['document_names']['description'] = description_filename
# XXX heuristically copy any LICENSE/LICENSE.txt?
license = self.license_file()
if license:
license_filename = 'LICENSE.txt'
shutil.copy(license, os.path.join(self.distinfo_dir, license_filename))
pymeta['extensions']['python.details']['document_names']['license'] = license_filename
with open(metadata_json_path, "w") as metadata_json:
json.dump(pymeta, metadata_json, sort_keys=True)
adios(egginfo_path) | 0.001815 |
def populate(self, node_generator):
"""
Populate Merkle Tree with data from node_generator. This requires that node_generator yield byte[] elements.
Hashes, computes hex digest, and adds it to the Merkle Tree
:param node_generator:
:return:
"""
for data in node_generator:
hashed = hash_byte_array(data)
self.tree.add_leaf(hashed) | 0.007317 |
def _update_items(self, items):
"""
Replace the 'items' list of this OrderedSet with a new one, updating
self.map accordingly.
"""
self.items = items
self.map = {item: idx for (idx, item) in enumerate(items)} | 0.007813 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.