text
stringlengths 78
104k
| score
float64 0
0.18
|
---|---|
def init():
"""Execute init tasks for all components (virtualenv, pip)."""
if not os.path.isdir('venv'):
print(cyan('\nCreating the virtual env...'))
local('pyvenv-3.4 venv')
print(green('Virtual env created.'))
print(green('Virtual Environment ready.')) | 0.003413 |
def start_tag(self, name, attrs=None):
"""Open an XML tag"""
if not attrs:
self._write('<%s>' % name)
else:
self._write('<' + name)
for (name, value) in sorted(attrs.items()):
self._write(
' %s=%s' % (name, quoteattr(scientificformat(value))))
self._write('>')
self.indentlevel += 1 | 0.005025 |
def column_start_to_end(data, column, start_idx, end_idx):
"""Return a list of numeric data entries in the given column from the starting
index to the ending index. This can list can be compiled over one or more
DataFrames.
:param data: a list of DataFrames to extract data in one column from
:type data: Pandas.DataFrame list
:param column: a column index
:type column: int
:param start_idx: the index of the starting row
:type start_idx: int
:param start_idx: the index of the ending row
:type start_idx: int
:return: a list of data from the given column
:rtype: float list
"""
if len(data) == 1:
result = list(pd.to_numeric(data[0].iloc[start_idx:end_idx, column]))
else:
result = list(pd.to_numeric(data[0].iloc[start_idx:, column]))
for i in range(1, len(data)-1):
data[i].iloc[0, 0] = 0
result += list(pd.to_numeric(data[i].iloc[:, column]) +
(i if column == 0 else 0))
data[-1].iloc[0, 0] = 0
result += list(pd.to_numeric(data[-1].iloc[:end_idx, column]) +
(len(data)-1 if column == 0 else 0))
return result | 0.003364 |
def get_stp_brief_info_output_spanning_tree_info_spanning_tree_mode_pvstp_pvstp_root_bridge_priority(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
get_stp_brief_info = ET.Element("get_stp_brief_info")
config = get_stp_brief_info
output = ET.SubElement(get_stp_brief_info, "output")
spanning_tree_info = ET.SubElement(output, "spanning-tree-info")
spanning_tree_mode = ET.SubElement(spanning_tree_info, "spanning-tree-mode")
pvstp = ET.SubElement(spanning_tree_mode, "pvstp")
pvstp = ET.SubElement(pvstp, "pvstp")
vlan_id_key = ET.SubElement(pvstp, "vlan-id")
vlan_id_key.text = kwargs.pop('vlan_id')
root_bridge = ET.SubElement(pvstp, "root-bridge")
priority = ET.SubElement(root_bridge, "priority")
priority.text = kwargs.pop('priority')
callback = kwargs.pop('callback', self._callback)
return callback(config) | 0.004094 |
def nack(self, frame):
"""
Handles the NACK command: Unacknowledges receipt of a message.
For now, this is just a placeholder to implement this version of the protocol
"""
if not frame.headers.get('message-id'):
raise ProtocolError("No message-id specified for NACK command.")
if not frame.headers.get('subscription'):
raise ProtocolError("No subscription specified for NACK command.") | 0.006565 |
def endLoop(self, useDriverLoop):
'''
Called by the engine to stop an event loop.
'''
self._queue = []
self._driver.stop()
if useDriverLoop:
self._driver.endLoop()
else:
self._iterator = None
self.setBusy(True) | 0.006711 |
def edit(args):
tap = DbTap.find(args.id)
""" Carefully setup a dict """
options = {}
if not args.name is None:
options["db_name"]=args.name
if args.host is not None:
options["db_host"]=args.host
if args.user is not None:
options["db_user"]=args.user
if args.password is not None:
options["db_passwd"] = args.password
if args.type is not None:
options["db_type"] = args.type
if args.location is not None:
options["db_location"] = args.location
if args.port is not None:
options["port"] = args.port
tap = tap.edit(**options)
return json.dumps(tap.attributes, sort_keys=True, indent=4) | 0.007864 |
def resetSession(self, username=None, password=None, verify=True) :
"""resets the session"""
self.disconnectSession()
self.session = AikidoSession(username, password, verify) | 0.015152 |
def Move(self, x: int, y: int, waitTime: float = OPERATION_WAIT_TIME) -> bool:
"""
Call IUIAutomationTransformPattern::Move.
Move the UI Automation element.
x: int.
y: int.
waitTime: float.
Return bool, True if succeed otherwise False.
Refer https://docs.microsoft.com/en-us/windows/desktop/api/uiautomationclient/nf-uiautomationclient-iuiautomationtransformpattern-move
"""
ret = self.pattern.Move(x, y) == S_OK
time.sleep(waitTime)
return ret | 0.005556 |
def timed(limit):
"""Test must finish within specified time limit to pass.
Example use::
@timed(.1)
def test_that_fails():
time.sleep(.2)
"""
def decorate(func):
def newfunc(*arg, **kw):
start = time.time()
func(*arg, **kw)
end = time.time()
if end - start > limit:
raise TimeExpired("Time limit (%s) exceeded" % limit)
newfunc = make_decorator(func)(newfunc)
return newfunc
return decorate | 0.001916 |
def _absolute_path(path, relative_to=None):
'''
Return an absolute path. In case ``relative_to`` is passed and ``path`` is
not an absolute path, we try to prepend ``relative_to`` to ``path``and if
that path exists, return that one
'''
if path and os.path.isabs(path):
return path
if path and relative_to is not None:
_abspath = os.path.join(relative_to, path)
if os.path.isfile(_abspath):
log.debug(
'Relative path \'%s\' converted to existing absolute path '
'\'%s\'', path, _abspath
)
return _abspath
return path | 0.001565 |
def __format_filters(filters):
"""
Format filters for the api query (to filter[<filter-name>])
:param filters: dict: can be None, filters for the query
:return: the formatted filters, or None
"""
if filters is not None:
for k in filters:
if 'filter[' not in k:
filters['filter[{}]'.format(k)] = filters.pop(k)
return filters | 0.004662 |
def get_indexes(self):
""" Get a dict of index names to index """
ret = {}
for index in self.iter_query_indexes():
ret[index.name] = index
return ret | 0.010363 |
def binding_from_item(inventory, item):
"""Return binding for `item`
Example:
asset:
- myasset
The binding is "asset"
Arguments:
project: Name of project
item (str): Name of item
"""
if item in self.bindings:
return self.bindings[item]
bindings = invert_inventory(inventory)
try:
self.bindings[item] = bindings[item]
return bindings[item]
except KeyError as exc:
exc.bindings = bindings
raise exc | 0.001938 |
def workspace_from_dir(directory, recurse=True):
"""
Construct a workspace object from a directory name. If recurse=True, this
function will search down the directory tree and return the first workspace
it finds. If recurse=False, an exception will be raised if the given
directory is not a workspace. Workspace identification requires a file
called 'workspace.pkl' to be present in each workspace directory, which can
unfortunately be a little fragile.
"""
directory = os.path.abspath(directory)
pickle_path = os.path.join(directory, 'workspace.pkl')
# Make sure the given directory contains a 'workspace' file. This file is
# needed to instantiate the right kind of workspace.
if not os.path.exists(pickle_path):
if recurse:
parent_dir = os.path.dirname(directory)
# Keep looking for a workspace as long as we haven't hit the root
# of the file system. If an exception is raised, that means no
# workspace was found. Catch and re-raise the exception so that
# the name of the directory reported in the exception is meaningful
# to the user.
try:
return workspace_from_dir(parent_dir, parent_dir != '/')
except WorkspaceNotFound:
raise WorkspaceNotFound(directory)
else:
raise WorkspaceNotFound(directory)
# Load the 'workspace' file and create a workspace.
with open(pickle_path) as file:
workspace_class = pickle.load(file)
return workspace_class.from_directory(directory) | 0.000619 |
def create_signature(cls, method, base, params,
consumer_secret, token_secret=''):
"""
Returns HMAC-SHA1 signature as specified at:
http://oauth.net/core/1.0a/#rfc.section.9.2.
:param str method:
HTTP method of the request to be signed.
:param str base:
Base URL of the request without query string an fragment.
:param dict params:
Dictionary or list of tuples of the request parameters.
:param str consumer_secret:
:attr:`.core.Consumer.secret`
:param str token_secret:
Access token secret as specified in
http://oauth.net/core/1.0a/#anchor3.
:returns:
The signature string.
"""
base_string = _create_base_string(method, base, params)
key = cls._create_key(consumer_secret, token_secret)
hashed = hmac.new(
six.b(key),
base_string.encode('utf-8'),
hashlib.sha1)
base64_encoded = binascii.b2a_base64(hashed.digest())[:-1]
return base64_encoded | 0.002681 |
def bulk_create(self, *records):
"""Create and validate multiple records in associated app
Args:
*records (dict): One or more dicts of new record field names and values
Notes:
Requires Swimlane 2.15+
Validates like :meth:`create`, but only sends a single request to create all provided fields, and does not
return the newly created records
Any validation failures on any of the records will abort the batch creation, not creating any new records
Does not return the newly created records
Examples:
Create 3 new records with single request
::
app.records.bulk_create(
{'Field 1': 'value 1', ...},
{'Field 1': 'value 2', ...},
{'Field 1': 'value 3', ...}
)
Raises:
swimlane.exceptions.UnknownField: If any field in any new record cannot be found
swimlane.exceptions.ValidationError: If any field in any new record fails validation
TypeError: If no dict of fields was provided, or any provided argument is not a dict
"""
if not records:
raise TypeError('Must provide at least one record')
if any(not isinstance(r, dict) for r in records):
raise TypeError('New records must be provided as dicts')
# Create local records from factory for initial full validation
new_records = []
for record_data in records:
record = record_factory(self._app, record_data)
record.validate()
new_records.append(record)
self._swimlane.request(
'post',
'app/{}/record/batch'.format(self._app.id),
json=[r._raw for r in new_records]
) | 0.004334 |
def find_all(self, table_name, constraints=None, *, columns=None, order_by=None, limiting=None):
"""Returns all records that match a given criteria.
:table_name: the name of the table to search on
:constraints: is any construct that can be parsed by SqlWriter.parse_constraints.
:columns: either a string or a list of column names
:order_by: the order by clause
"""
query_string, params = self.sql_writer.get_find_all_query(
table_name, constraints, columns=columns, order_by=order_by, limiting=limiting)
query_string += ";"
return self.execute(query_string, params) | 0.006568 |
def logdet(self):
"""
Implements log|K| = - log|D| + n⋅log|C₁|.
Returns
-------
logdet : float
Log-determinant of K.
"""
self._init_svd()
return -log(self._De).sum() + self.G.shape[0] * self.C1.logdet() | 0.007168 |
def eeg_psd(raw, sensors_include="all", sensors_exclude=None, fmin=0.016, fmax=60, method="multitaper", proj=False):
"""
Compute Power-Spectral Density (PSD).
Parameters
----------
raw : mne.io.Raw
Raw EEG data.
sensors_include : str
Sensor area to include. See :func:`neurokit.eeg_select_sensors()`.
sensors_exclude : str
Sensor area to exclude. See :func:`neurokit.eeg_select_sensors()`.
fmin : float
Min frequency of interest.
fmax: float
Max frequency of interest.
method : str
"multitaper" or "welch".
proj : bool
add projectors.
Returns
----------
mean_psd : pandas.DataFrame
Averaged PSDs.
Example
----------
>>> import neurokit as nk
Notes
----------
*Details*
- Delta: 1-3Hz
- Theta: 4-7Hz
- Alpha1: 8-9Hz
- Alpha2: 10-12Hz
- Beta1: 13-17Hz
- Beta2: 18-30Hz
- Gamma1: 31-40Hz
- Gamma2: 41-50Hz
- Mu: 8-13Hz
*Authors*
- Dominique Makowski (https://github.com/DominiqueMakowski)
References
------------
- None
"""
picks = mne.pick_types(raw.info, include=eeg_select_electrodes(include=sensors_include, exclude=sensors_exclude), exclude="bads")
if method == "multitaper":
psds, freqs = mne.time_frequency.psd_multitaper(raw,
fmin=fmin,
fmax=fmax,
low_bias=True,
proj=proj,
picks=picks)
else:
psds, freqs = mne.time_frequency.psd_welch(raw,
fmin=fmin,
fmax=fmax,
proj=proj,
picks=picks)
tf = pd.DataFrame(psds)
tf.columns = eeg_name_frequencies(freqs)
tf = tf.mean(axis=0)
mean_psd = {}
for freq in ["UltraLow", "Delta", "Theta", "Alpha", "Alpha1", "Alpha2", "Mu", "Beta", "Beta1", "Beta2", "Gamma", "Gamma1", "Gamma2", "UltraHigh"]:
mean_psd[freq] = tf[[freq in s for s in tf.index]].mean()
mean_psd = pd.DataFrame.from_dict(mean_psd, orient="index").T
return(mean_psd) | 0.003253 |
def libraries():
"""return installed library names."""
ls = libraries_dir().dirs()
ls = [str(x.name) for x in ls]
ls.sort()
return ls | 0.006536 |
def inverse_transform(self, maps):
""" This function transforms from cartesian to spherical spins.
Parameters
----------
maps : a mapping object
Returns
-------
out : dict
A dict with key as parameter name and value as numpy.array or float
of transformed values.
"""
sx, sy, sz = self._outputs
data = coordinates.cartesian_to_spherical(maps[sx], maps[sy], maps[sz])
out = {param : val for param, val in zip(self._outputs, data)}
return self.format_output(maps, out) | 0.005111 |
def create_api_key(self, api_id, stage_name):
"""
Create new API key and link it with an api_id and a stage_name
"""
response = self.apigateway_client.create_api_key(
name='{}_{}'.format(stage_name, api_id),
description='Api Key for {}'.format(api_id),
enabled=True,
stageKeys=[
{
'restApiId': '{}'.format(api_id),
'stageName': '{}'.format(stage_name)
},
]
)
print('Created a new x-api-key: {}'.format(response['id'])) | 0.003344 |
def handle_basic_executor_options(options, parser):
"""Handle the options specified by add_basic_executor_options()."""
# setup logging
logLevel = logging.INFO
if options.debug:
logLevel = logging.DEBUG
elif options.quiet:
logLevel = logging.WARNING
util.setup_logging(level=logLevel) | 0.003086 |
def fit(self, X, truncated=3):
"""Fit a vine model to the data.
Args:
X(numpy.ndarray): data to be fitted.
truncated(int): max level to build the vine.
"""
self.n_sample, self.n_var = X.shape
self.columns = X.columns
self.tau_mat = X.corr(method='kendall').values
self.u_matrix = np.empty([self.n_sample, self.n_var])
self.truncated = truncated
self.depth = self.n_var - 1
self.trees = []
self.unis, self.ppfs = [], []
for i, col in enumerate(X):
uni = self.model()
uni.fit(X[col])
self.u_matrix[:, i] = uni.cumulative_distribution(X[col])
self.unis.append(uni)
self.ppfs.append(uni.percent_point)
self.train_vine(self.vine_type)
self.fitted = True | 0.002358 |
async def get_wallets(self, *args, **kwargs):
"""
Get users wallets by uid
Accepts:
- uid [integer] (users id)
Returns a list:
- [
{
"address": [string],
"uid": [integer],
"amount_active": [integer],
"amount_frozen": [integer]
},
]
"""
logging.debug("\n [+] -- Get wallets debugging.")
if kwargs.get("message"):
kwargs = json.loads(kwargs.get("message"))
logging.debug(kwargs)
uid = kwargs.get("uid",0)
address = kwargs.get("address")
coinid = kwargs.get("coinid")
try:
coinid = coinid.replace("TEST", "")
except:
pass
try:
uid = int(uid)
except:
return await self.error_400("User id must be integer. ")
if not uid and address:
uid = await self.get_uid_by_address(address=address, coinid=coinid)
if isinstance(uid, dict):
return uid
wallets = [i async for i in self.collect_wallets(uid)]
return {"wallets":wallets} | 0.044276 |
def zonevolume(idf, zonename):
"""zone volume"""
area = zonearea(idf, zonename)
height = zoneheight(idf, zonename)
volume = area * height
return volume | 0.005814 |
def get_colmin(data):
"""
Get rowwise column names with minimum values
:param data: pandas dataframe
"""
data=data.T
colmins=[]
for col in data:
colmins.append(data[col].idxmin())
return colmins | 0.012766 |
def _calculate_expires(self):
"""Calculates the session expiry using the timeout"""
self._backend_client.expires = None
now = datetime.utcnow()
self._backend_client.expires = now + timedelta(seconds=self._config.timeout) | 0.011858 |
def check_w3_time (self):
"""Make sure the W3C validators are at most called once a second."""
if time.time() - self.last_w3_call < W3Timer.SleepSeconds:
time.sleep(W3Timer.SleepSeconds)
self.last_w3_call = time.time() | 0.011811 |
def compileInterpolatableTTFs(
ufos,
preProcessorClass=TTFInterpolatablePreProcessor,
outlineCompilerClass=OutlineTTFCompiler,
featureCompilerClass=None,
featureWriters=None,
glyphOrder=None,
useProductionNames=None,
cubicConversionError=None,
reverseDirection=True,
inplace=False,
layerNames=None,
skipExportGlyphs=None,
):
"""Create FontTools TrueType fonts from a list of UFOs with interpolatable
outlines. Cubic curves are converted compatibly to quadratic curves using
the Cu2Qu conversion algorithm.
Return an iterator object that yields a TTFont instance for each UFO.
*layerNames* refers to the layer names to use glyphs from in the order of
the UFOs in *ufos*. By default, this is a list of `[None]` times the number
of UFOs, i.e. using the default layer from all the UFOs.
When the layerName is not None for a given UFO, the corresponding TTFont object
will contain only a minimum set of tables ("head", "hmtx", "glyf", "loca", "maxp",
"post" and "vmtx"), and no OpenType layout tables.
*skipExportGlyphs* is a list or set of glyph names to not be exported to the
final font. If these glyphs are used as components in any other glyph, those
components get decomposed. If the parameter is not passed in, the union of
all UFO's "public.skipExportGlyphs" lib keys will be used. If they don't
exist, all glyphs are exported. UFO groups and kerning will be pruned of
skipped glyphs.
"""
from ufo2ft.util import _LazyFontName
if layerNames is None:
layerNames = [None] * len(ufos)
assert len(ufos) == len(layerNames)
if skipExportGlyphs is None:
skipExportGlyphs = set()
for ufo in ufos:
skipExportGlyphs.update(ufo.lib.get("public.skipExportGlyphs", []))
logger.info("Pre-processing glyphs")
preProcessor = preProcessorClass(
ufos,
inplace=inplace,
conversionError=cubicConversionError,
reverseDirection=reverseDirection,
layerNames=layerNames,
skipExportGlyphs=skipExportGlyphs,
)
glyphSets = preProcessor.process()
for ufo, glyphSet, layerName in zip(ufos, glyphSets, layerNames):
fontName = _LazyFontName(ufo)
if layerName is not None:
logger.info("Building OpenType tables for %s-%s", fontName, layerName)
else:
logger.info("Building OpenType tables for %s", fontName)
outlineCompiler = outlineCompilerClass(
ufo,
glyphSet=glyphSet,
glyphOrder=glyphOrder,
tables=SPARSE_TTF_MASTER_TABLES if layerName else None,
)
ttf = outlineCompiler.compile()
# Only the default layer is likely to have all glyphs used in feature
# code.
if layerName is None:
compileFeatures(
ufo,
ttf,
glyphSet=glyphSet,
featureWriters=featureWriters,
featureCompilerClass=featureCompilerClass,
)
postProcessor = PostProcessor(ttf, ufo, glyphSet=glyphSet)
ttf = postProcessor.process(useProductionNames)
if layerName is not None:
# for sparse masters (i.e. containing only a subset of the glyphs), we
# need to include the post table in order to store glyph names, so that
# fontTools.varLib can interpolate glyphs with same name across masters.
# However we want to prevent the underlinePosition/underlineThickness
# fields in such sparse masters to be included when computing the deltas
# for the MVAR table. Thus, we set them to this unlikely, limit value
# (-36768) which is a signal varLib should ignore them when building MVAR.
ttf["post"].underlinePosition = -0x8000
ttf["post"].underlineThickness = -0x8000
yield ttf | 0.003299 |
def _get_qualifier_repo(self, namespace):
"""
Returns the qualifier repository for the specified CIM namespace
within the mock repository. This is the original instance variable,
so any modifications will change the mock repository.
Validates that the namespace exists in the mock repository.
If the qualifier repository does not contain the namespace yet, it is
added.
Parameters:
namespace(:term:`string`): Namespace name. Must not be `None`.
Returns:
dict of CIMQualifierDeclaration: Qualifier repository.
Raises:
:exc:`~pywbem.CIMError`: CIM_ERR_INVALID_NAMESPACE: Namespace does
not exist.
"""
self._validate_namespace(namespace)
if namespace not in self.qualifiers:
self.qualifiers[namespace] = NocaseDict()
return self.qualifiers[namespace] | 0.002172 |
def _compute_f1(self, C, mag, rrup):
"""
Compute f1 term (eq.4, page 105)
"""
r = np.sqrt(rrup ** 2 + C['c4'] ** 2)
f1 = (
C['a1'] +
C['a12'] * (8.5 - mag) ** C['n'] +
(C['a3'] + C['a13'] * (mag - C['c1'])) * np.log(r)
)
if mag <= C['c1']:
f1 += C['a2'] * (mag - C['c1'])
else:
f1 += C['a4'] * (mag - C['c1'])
return f1 | 0.004405 |
def csv_dumper(**kwargs):
"""dump data to csv"""
logging.info("dumping to csv")
barn = kwargs["barn"]
farms = kwargs["farms"]
experiments = kwargs["experiments"]
for experiment, farm in zip(experiments, farms):
name = experiment.journal.name
project = experiment.journal.project
project_dir, batch_dir, raw_dir = \
experiment.journal.paginate()
if batch_dir is None:
logging.info("have to generate folder-name on the fly")
out_data_dir, project_dir, batch_dir, raw_dir = \
generate_folder_names(name, project)
if barn == "batch_dir":
out_dir = batch_dir
elif barn == "project_dir":
out_dir = project_dir
elif barn == "raw_dir":
out_dir = raw_dir
else:
out_dir = barn
for animal in farm:
file_name = os.path.join(
out_dir, "summary_%s_%s.csv" % (
animal.name,
name
)
)
logging.info(f"> {file_name}")
animal.to_csv(file_name, sep=prms.Reader.sep) | 0.000861 |
def keep_absolute_resample__r2(X, y, model_generator, method_name, num_fcounts=11):
""" Keep Absolute (resample)
xlabel = "Max fraction of features kept"
ylabel = "R^2"
transform = "identity"
sort_order = 12
"""
return __run_measure(measures.keep_resample, X, y, model_generator, method_name, 0, num_fcounts, sklearn.metrics.r2_score) | 0.008287 |
def find_connected_resources(resource, dependency_graph=None):
"""
Collects all resources connected to the given resource and returns a
dictionary mapping member resource classes to new collections containing
the members found.
"""
# Build a resource_graph.
resource_graph = \
build_resource_graph(resource,
dependency_graph=dependency_graph)
entity_map = OrderedDict()
for mb in topological_sorting(resource_graph):
mb_cls = get_member_class(mb)
ents = entity_map.get(mb_cls)
if ents is None:
ents = []
entity_map[mb_cls] = ents
ents.append(mb.get_entity())
return entity_map | 0.002759 |
def select_role(self):
"""
The workflow method to be assigned to the person with the same role and unit as the user.
.. code-block:: python
# request:
{
'task_inv_key': string,
}
"""
roles = [(m.key, m.__unicode__()) for m in RoleModel.objects.filter(
abstract_role=self.current.role.abstract_role,
unit=self.current.role.unit) if m != self.current.role]
if roles:
_form = forms.JsonForm(title=_(u'Assign to workflow'))
_form.select_role = fields.Integer(_(u"Chose Role"), choices=roles)
_form.explain_text = fields.String(_(u"Explain Text"), required=False)
_form.send_button = fields.Button(_(u"Send"))
self.form_out(_form)
else:
title = _(u"Unsuccessful")
msg = _(u"Assign role not found")
self.current.msg_box(title=title, msg=msg) | 0.004028 |
def build_slide(filename: Path, pptx_template: Path, master_slide_idx: int, slide_layout_idx: int, font_size: int,
dst_dir: Path, font_name: str, slide_txt_alignment: str = "left") -> Path:
"""Builds a powerpoint presentation using data read from a yaml file
:param filename: path to the yaml file
:param pptx_template: path to powerpoint template
:param master_slide_idx: slide master index
:param slide_layout_idx: slide layout index
:param font_size: size of the font
:param dst_dir: directory where the generated pptx should get dumped
:param font_name: name of the font
:param slide_txt_alignment: alignment of text in slides
:return path to the generated pptx
"""
prs = pptx.Presentation(pptx_template)
# setting text box size and position
slide_height = pptx.util.Length(prs.slide_height)
slide_width = pptx.util.Length(prs.slide_width)
# Emu is English metric unit
tb_pos = {
"left": pptx.util.Emu(400000),
"top": pptx.util.Emu(400000),
"width": pptx.util.Emu(slide_width - (400000 * 2)),
"height": pptx.util.Emu(slide_height - (400000 * 2))
}
slide_layout = prs.slide_masters[master_slide_idx].slide_layouts[slide_layout_idx]
with filename.open() as f:
yml_data = yaml.load(f)
lyrics = yml_data.get('lyrics')
hard_font_size = yml_data.get("font_size")
if hard_font_size:
msg = f"NOTE: Setting the font size to {hard_font_size} for {filename}"
click.echo(click.style(msg, fg="blue"))
font_size = hard_font_size
for content in lyrics:
slide = prs.slides.add_slide(slide_layout)
txbox = slide.shapes.add_textbox(**tb_pos)
tf = txbox.text_frame
# this is to keep text frame in the middle of the screen from top to bottom of the screen
tf.vertical_anchor = pptx.enum.text.MSO_ANCHOR.MIDDLE
# this is supposed to work as per the documentation but it's not working
# 09/12/2018
# tf.fit_text(font_family=font_name, max_size=font_size)
p = tf.add_paragraph()
if slide_txt_alignment == "left":
p.alignment = pptx.enum.text.PP_ALIGN.LEFT
elif slide_txt_alignment == "middle":
p.alignment = pptx.enum.text.PP_ALIGN.CENTER
else:
p.alignment = pptx.enum.text.PP_ALIGN.RIGHT
p.font.name = font_name
p.font.size = pptx.util.Pt(font_size)
p.text = content.get("english")
dst_dir.mkdir(parents=True, exist_ok=True)
dst_file = filename.with_suffix(".pptx")
dst_path = Path(dst_dir, dst_file.name)
if dst_path.exists():
dst_path.unlink()
prs.save(str(dst_path))
return dst_path | 0.002803 |
def get_metadata_value(metadata_source, key: str) -> typing.Any:
"""Get the metadata value for the given key.
There are a set of predefined keys that, when used, will be type checked and be interoperable with other
applications. Please consult reference documentation for valid keys.
If using a custom key, we recommend structuring your keys in the '<group>.<attribute>' format followed
by the predefined keys. e.g. 'session.instrument' or 'camera.binning'.
Also note that some predefined keys map to the metadata ``dict`` but others do not. For this reason, prefer
using the ``metadata_value`` methods over directly accessing ``metadata``.
"""
desc = session_key_map.get(key)
if desc is not None:
v = getattr(metadata_source, "session_metadata", dict())
for k in desc['path']:
v = v.get(k) if v is not None else None
return v
desc = key_map.get(key)
if desc is not None:
v = getattr(metadata_source, "metadata", dict())
for k in desc['path']:
v = v.get(k) if v is not None else None
return v
raise KeyError() | 0.005272 |
def __set_variable_watch(self, tid, address, size, action):
"""
Used by L{watch_variable} and L{stalk_variable}.
@type tid: int
@param tid: Thread global ID.
@type address: int
@param address: Memory address of variable to watch.
@type size: int
@param size: Size of variable to watch. The only supported sizes are:
byte (1), word (2), dword (4) and qword (8).
@type action: function
@param action: (Optional) Action callback function.
See L{define_hardware_breakpoint} for more details.
@rtype: L{HardwareBreakpoint}
@return: Hardware breakpoint at the requested address.
"""
# TODO
# We should merge the breakpoints instead of overwriting them.
# We'll have the same problem as watch_buffer and we'll need to change
# the API again.
if size == 1:
sizeFlag = self.BP_WATCH_BYTE
elif size == 2:
sizeFlag = self.BP_WATCH_WORD
elif size == 4:
sizeFlag = self.BP_WATCH_DWORD
elif size == 8:
sizeFlag = self.BP_WATCH_QWORD
else:
raise ValueError("Bad size for variable watch: %r" % size)
if self.has_hardware_breakpoint(tid, address):
warnings.warn(
"Hardware breakpoint in thread %d at address %s was overwritten!" \
% (tid, HexDump.address(address,
self.system.get_thread(tid).get_bits())),
BreakpointWarning)
bp = self.get_hardware_breakpoint(tid, address)
if bp.get_trigger() != self.BP_BREAK_ON_ACCESS or \
bp.get_watch() != sizeFlag:
self.erase_hardware_breakpoint(tid, address)
self.define_hardware_breakpoint(tid, address,
self.BP_BREAK_ON_ACCESS, sizeFlag, True, action)
bp = self.get_hardware_breakpoint(tid, address)
else:
self.define_hardware_breakpoint(tid, address,
self.BP_BREAK_ON_ACCESS, sizeFlag, True, action)
bp = self.get_hardware_breakpoint(tid, address)
return bp | 0.004852 |
def status_gps_send(self, csFails, gpsQuality, msgsType, posStatus, magVar, magDir, modeInd, force_mavlink1=False):
'''
This contains the status of the GPS readings
csFails : Number of times checksum has failed (uint16_t)
gpsQuality : The quality indicator, 0=fix not available or invalid, 1=GPS fix, 2=C/A differential GPS, 6=Dead reckoning mode, 7=Manual input mode (fixed position), 8=Simulator mode, 9= WAAS a (uint8_t)
msgsType : Indicates if GN, GL or GP messages are being received (uint8_t)
posStatus : A = data valid, V = data invalid (uint8_t)
magVar : Magnetic variation, degrees (float)
magDir : Magnetic variation direction E/W. Easterly variation (E) subtracts from True course and Westerly variation (W) adds to True course (int8_t)
modeInd : Positioning system mode indicator. A - Autonomous;D-Differential; E-Estimated (dead reckoning) mode;M-Manual input; N-Data not valid (uint8_t)
'''
return self.send(self.status_gps_encode(csFails, gpsQuality, msgsType, posStatus, magVar, magDir, modeInd), force_mavlink1=force_mavlink1) | 0.007446 |
def sync_required(func):
"""Decorate methods when synchronizing repository is required."""
@wraps(func)
def wrapper(self, *args, **kwargs):
if not self._keepSynchronized:
r = func(self, *args, **kwargs)
else:
state = self._load_state()
#print("-----------> ",state, self.state)
if state is None:
r = func(self, *args, **kwargs)
elif state == self.state:
r = func(self, *args, **kwargs)
else:
warnings.warn("Repository at '%s' is out of date. Need to load it again to avoid conflict."%self.path)
r = None
return r
return wrapper | 0.005682 |
def sleep_for_rate_limit(self):
"""The fetching process sleeps until the rate limit is restored or
raises a RateLimitError exception if sleep_for_rate flag is disabled.
"""
if self.rate_limit is not None and self.rate_limit <= self.min_rate_to_sleep:
seconds_to_reset = self.calculate_time_to_reset()
if seconds_to_reset < 0:
logger.warning("Value of sleep for rate limit is negative, reset it to 0")
seconds_to_reset = 0
cause = "Rate limit exhausted."
if self.sleep_for_rate:
logger.info("%s Waiting %i secs for rate limit reset.", cause, seconds_to_reset)
time.sleep(seconds_to_reset)
else:
raise RateLimitError(cause=cause, seconds_to_reset=seconds_to_reset) | 0.008343 |
def windows_df(self):
"""Get Windows (W) W-row, W-col and W-index of windows e.g. loaded with :meth:`block_windows` as a dataframe.
Returns:
[dataframe] -- A dataframe with the window information and indices (row, col, index).
"""
import pandas as pd
if self.windows is None:
raise Exception("You need to call the block_windows or windows before.")
df_wins = []
for row, col, win in zip(self.windows_row, self.windows_col, self.windows):
df_wins.append(pd.DataFrame({"row":[row], "col":[col], "Window":[win]}))
df_wins = pd.concat(df_wins).set_index(["row", "col"])
df_wins["window_index"] = range(df_wins.shape[0])
df_wins = df_wins.sort_index()
return df_wins | 0.012706 |
def _get_cron_info():
'''
Returns the proper group owner and path to the cron directory
'''
owner = 'root'
if __grains__['os'] == 'FreeBSD':
group = 'wheel'
crontab_dir = '/var/cron/tabs'
elif __grains__['os'] == 'OpenBSD':
group = 'crontab'
crontab_dir = '/var/cron/tabs'
elif __grains__['os_family'] == 'Solaris':
group = 'root'
crontab_dir = '/var/spool/cron/crontabs'
elif __grains__['os'] == 'MacOS':
group = 'wheel'
crontab_dir = '/usr/lib/cron/tabs'
else:
group = 'root'
crontab_dir = '/var/spool/cron'
return owner, group, crontab_dir | 0.001511 |
def unresolve_filename(self, package_dir, filename):
"""Retrieves the probable source path from the output filename. Pass
in a .css path to get out a .scss path.
:param package_dir: the path of the package directory
:type package_dir: :class:`str`
:param filename: the css filename
:type filename: :class:`str`
:returns: the scss filename
:rtype: :class:`str`
"""
filename, _ = os.path.splitext(filename)
if self.strip_extension:
for ext in ('.scss', '.sass'):
test_path = os.path.join(
package_dir, self.sass_path, filename + ext,
)
if os.path.exists(test_path):
return filename + ext
else: # file not found, let it error with `.scss` extension
return filename + '.scss'
else:
return filename | 0.002146 |
def do_build(self):
"""
We need this hack, else 'self' would be replaced by __iter__.next().
"""
tmp = self.explicit
self.explicit = True
b = super(KeyShareEntry, self).do_build()
self.explicit = tmp
return b | 0.007353 |
def get_ref(profile, ref):
"""Fetch a ref.
Args:
profile
A profile generated from ``simplygithub.authentication.profile``.
Such profiles tell this module (i) the ``repo`` to connect to,
and (ii) the ``token`` to connect with.
ref
The ref to fetch, e.g., ``heads/my-feature-branch``.
Returns
A dict with data about the ref.
"""
resource = "/refs/" + ref
data = api.get_request(profile, resource)
return prepare(data) | 0.001923 |
def _detect(self):
""" Detect uninitialized storage variables
Recursively visit the calls
Returns:
dict: [contract name] = set(storage variable uninitialized)
"""
results = []
self.results = []
self.visited_all_paths = {}
for contract in self.slither.contracts:
for function in contract.functions:
if function.is_implemented:
uninitialized_storage_variables = [v for v in function.local_variables if v.is_storage and v.uninitialized]
function.entry_point.context[self.key] = uninitialized_storage_variables
self._detect_uninitialized(function, function.entry_point, [])
for(function, uninitialized_storage_variable) in self.results:
var_name = uninitialized_storage_variable.name
info = "{} in {}.{} ({}) is a storage variable never initialiazed\n"
info = info.format(var_name, function.contract.name, function.name, uninitialized_storage_variable.source_mapping_str)
json = self.generate_json_result(info)
self.add_variable_to_json(uninitialized_storage_variable, json)
self.add_function_to_json(function, json)
results.append(json)
return results | 0.006061 |
def id_by_index(index, resources):
"""Helper method to fetch the id or address of a resource by its index
Args:
resources (list of objects): The resources to be paginated
index (integer): The index of the target resource
Returns:
str: The address or header_signature of the resource,
returns an empty string if not found
"""
if index < 0 or index >= len(resources):
return ''
try:
return resources[index].header_signature
except AttributeError:
return resources[index].address | 0.00321 |
def drop_alembic_version_table():
"""Drop alembic_version table."""
if _db.engine.dialect.has_table(_db.engine, 'alembic_version'):
alembic_version = _db.Table('alembic_version', _db.metadata,
autoload_with=_db.engine)
alembic_version.drop(bind=_db.engine) | 0.003165 |
def get_user_settings_module(project_root: str):
"""Return project-specific user settings module, if it exists.
:param project_root: Absolute path to project root directory.
A project settings file is a file named `YSETTINGS_FILE` found at the top
level of the project root dir.
Return `None` if project root dir is not specified,
or if no such file is found.
Raise an exception if a file is found, but not importable.
The YSettings file can define 2 special module-level functions that
interact with the YABT CLI & config system:
1. `extend_cli`, if defined, takes the YABT `parser` object and may extend
it, to add custom command-line flags for the project.
(careful not to collide with YABT flags...)
2. `extend_config`, if defined, takes the YABT `config` object and the
parsed `args` object (returned by the the parser), and may extend the
config - should be used to reflect custom project CLI flags in the
config object.
Beyond that, the settings module is available in YBuild's under
`conf.settings` (except for the 2 special fucntions that are removed).
"""
if project_root:
project_settings_file = os.path.join(project_root, YSETTINGS_FILE)
if os.path.isfile(project_settings_file):
settings_loader = SourceFileLoader(
'settings', project_settings_file)
return settings_loader.load_module() | 0.000687 |
def build(self):
'''
Returns
-------
Corpus
'''
constructor_kwargs = self._get_build_kwargs()
if type(self.raw_texts) == list:
constructor_kwargs['raw_texts'] = np.array(self.raw_texts)
else:
constructor_kwargs['raw_texts'] = self.raw_texts
return Corpus(**constructor_kwargs) | 0.043333 |
def fields(self):
"""Returns the list of field names of the model."""
return (self.attributes.values() + self.lists.values()
+ self.references.values()) | 0.01087 |
def water(target, temperature='pore.temperature', salinity='pore.salinity'):
r"""
Calculates density of pure water or seawater at atmospheric pressure
using Eq. (8) given by Sharqawy et. al [1]. Values at temperature higher
than the normal boiling temperature are calculated at the saturation
pressure.
Parameters
----------
target : OpenPNM Object
The object for which these values are being calculated. This
controls the length of the calculated array, and also provides
access to other necessary thermofluid properties.
temperature : string
The dictionary key containing the temperature values. Temperature must
be in Kelvin for this emperical equation to work
salinity : string
The dictionary key containing the salinity values. Salinity must be
expressed in g of salt per kg of solution (ppt).
Returns
-------
The density of water/seawater in [kg/m3]
Notes
-----
T must be in K, and S in g of salt per kg of phase, or ppt (parts per
thousand)
VALIDITY: 273 < T < 453 K; 0 < S < 160 g/kg;
ACCURACY: 0.1 %
References
----------
[1] Sharqawy M. H., Lienhard J. H., and Zubair, S. M., Desalination and
Water Treatment, 2010.
"""
T = target[temperature]
if salinity in target.keys():
S = target[salinity]
else:
S = 0
a1 = 9.9992293295E+02
a2 = 2.0341179217E-02
a3 = -6.1624591598E-03
a4 = 2.2614664708E-05
a5 = -4.6570659168E-08
b1 = 8.0200240891E-01
b2 = -2.0005183488E-03
b3 = 1.6771024982E-05
b4 = -3.0600536746E-08
b5 = -1.6132224742E-11
TC = T-273.15
rho_w = a1 + a2*TC + a3*TC**2 + a4*TC**3 + a5*TC**4
d_rho = b1*S + b2*S*TC + b3*S*(TC**2) + b4*S*(TC**3) + b5*(S**2)*(TC**2)
rho_sw = rho_w + d_rho
value = rho_sw
return value | 0.000529 |
def log_url (self, url_data):
"""Write one node."""
node = self.get_node(url_data)
if node is not None:
self.writeln(u' "%s" [' % dotquote(node["label"]))
if self.has_part("realurl"):
self.writeln(u' href="%s",' % dotquote(node["url"]))
if node["dltime"] >= 0 and self.has_part("dltime"):
self.writeln(u" dltime=%d," % node["dltime"])
if node["size"] >= 0 and self.has_part("dlsize"):
self.writeln(u" size=%d," % node["size"])
if node["checktime"] and self.has_part("checktime"):
self.writeln(u" checktime=%d," % node["checktime"])
if self.has_part("extern"):
self.writeln(u" extern=%d," % node["extern"])
self.writeln(u" ];") | 0.00361 |
def docutils_sucks(spec):
"""
Yeah.
It doesn't allow using a class because it does stupid stuff like try to set
attributes on the callable object rather than just keeping a dict.
"""
base_url = VALIDATION_SPEC
ref_url = "https://json-schema.org/draft-04/json-schema-core.html#rfc.section.4.1"
schema_url = "https://json-schema.org/draft-04/json-schema-core.html#rfc.section.6"
def validator(name, raw_text, text, lineno, inliner):
"""
Link to the JSON Schema documentation for a validator.
Arguments:
name (str):
the name of the role in the document
raw_source (str):
the raw text (role with argument)
text (str):
the argument given to the role
lineno (int):
the line number
inliner (docutils.parsers.rst.states.Inliner):
the inliner
Returns:
tuple:
a 2-tuple of nodes to insert into the document and an
iterable of system messages, both possibly empty
"""
if text == "$ref":
return [nodes.reference(raw_text, text, refuri=ref_url)], []
elif text == "$schema":
return [nodes.reference(raw_text, text, refuri=schema_url)], []
# find the header in the validation spec containing matching text
header = spec.xpath("//h1[contains(text(), '{0}')]".format(text))
if len(header) == 0:
inliner.reporter.warning(
"Didn't find a target for {0}".format(text),
)
uri = base_url
else:
if len(header) > 1:
inliner.reporter.info(
"Found multiple targets for {0}".format(text),
)
# get the href from link in the header
uri = base_url + header[0].find('a').attrib["href"]
reference = nodes.reference(raw_text, text, refuri=uri)
return [reference], []
return validator | 0.001451 |
def _should_queue(self, link, referrer, rel):
"""
Determine whether a link URL from a referring page and with a
particular "rel" attribute should be queued for scraping.
"""
scheme, netloc, path, _, _, _ = urlparse(link)
if path.endswith(self.source_extensions + self.binary_extensions +
self.excluded_extensions):
result = False
elif self.skip_externals and not link.startswith(self.base_url):
result = False
elif not referrer.startswith(self.base_url):
result = False
elif rel not in ('homepage', 'download'):
result = False
elif scheme not in ('http', 'https', 'ftp'):
result = False
elif self._is_platform_dependent(link):
result = False
else:
host = netloc.split(':', 1)[0]
if host.lower() == 'localhost':
result = False
else:
result = True
logger.debug('should_queue: %s (%s) from %s -> %s', link, rel,
referrer, result)
return result | 0.001757 |
def main(filename):
"""
Creates a PDF by embedding the first page from the given image and
writes some text to it.
@param[in] filename
The source filename of the image to embed.
"""
# Prepare font.
font_family = 'arial'
font = Font(font_family, bold=True)
if not font:
raise RuntimeError('No font found for %r' % font_family)
# Initialize PDF document on a stream.
with Document('output.pdf') as document:
# Initialize a new page and begin its context.
with document.Page() as ctx:
# Open the image to embed.
with Image(filename) as embed:
# Set the media box for the page to the same as the
# image to embed.
ctx.box = embed.box
# Embed the image.
ctx.embed(embed)
# Write some text.
ctx.add(Text('Hello World', font, size=14, x=100, y=60)) | 0.001049 |
def move(self, i, lat, lng, change_time=True):
'''move a fence point'''
if i < 0 or i >= self.count():
print("Invalid fence point number %u" % i)
self.points[i].lat = lat
self.points[i].lng = lng
# ensure we close the polygon
if i == 1:
self.points[self.count()-1].lat = lat
self.points[self.count()-1].lng = lng
if i == self.count() - 1:
self.points[1].lat = lat
self.points[1].lng = lng
if change_time:
self.last_change = time.time() | 0.006814 |
def get_maximum_index(indices):
"""Internally used."""
def _maximum_idx_single(idx):
if isinstance(idx, slice):
start = -1
stop = 0
if idx.start is not None:
start = idx.start.__index__()
if idx.stop is not None:
stop = idx.stop.__index__()
return max(start, stop - 1)
else:
return idx.__index__()
if isinstance(indices, tuple):
return max((_maximum_idx_single(i) for i in indices), default=-1)
else:
return _maximum_idx_single(indices) | 0.001701 |
def timeline_hashtag(self, hashtag, local=False, max_id=None, min_id=None, since_id=None, limit=None, only_media=False):
"""
Fetch a timeline of toots with a given hashtag. The hashtag parameter
should not contain the leading #.
Set `local` to True to retrieve only instance-local tagged posts.
Set `only_media` to True to retrieve only statuses with media attachments.
Returns a list of `toot dicts`_.
"""
if hashtag.startswith("#"):
raise MastodonIllegalArgumentError("Hashtag parameter should omit leading #")
if max_id != None:
max_id = self.__unpack_id(max_id)
if min_id != None:
min_id = self.__unpack_id(min_id)
if since_id != None:
since_id = self.__unpack_id(since_id)
params_initial = locals()
if local == False:
del params_initial['local']
if only_media == False:
del params_initial['only_media']
url = '/api/v1/timelines/tag/{0}'.format(hashtag)
params = self.__generate_params(params_initial, ['hashtag'])
return self.__api_request('GET', url, params) | 0.017054 |
def check_keyname(self, rule):
"""If a key name is specified, verify it is permitted."""
keynames = rule.get('keynames')
if not keynames:
self.logdebug('no keynames requirement.\n')
return True
if not isinstance(keynames, list):
keynames = [keynames]
if self.keyname in keynames:
self.logdebug('keyname "%s" matches rule.\n' % self.keyname)
return True
else:
self.logdebug('keyname "%s" does not match rule.\n' % self.keyname)
return False | 0.00349 |
def list_(saltenv='base', test=None):
'''
List currently configured reactors
CLI Example:
.. code-block:: bash
salt-run reactor.list
'''
sevent = salt.utils.event.get_event(
'master',
__opts__['sock_dir'],
__opts__['transport'],
opts=__opts__,
listen=True)
master_key = salt.utils.master.get_master_key('root', __opts__)
__jid_event__.fire_event({'key': master_key}, 'salt/reactors/manage/list')
results = sevent.get_event(wait=30, tag='salt/reactors/manage/list-results')
reactors = results['reactors']
return reactors | 0.00314 |
def get_profile(session):
"""Get complete profile."""
try:
profile = session.get(PROFILE_URL).json()
if 'errorCode' in profile and profile['errorCode'] == '403':
raise MoparError("not logged in")
return profile
except JSONDecodeError:
raise MoparError("not logged in") | 0.003086 |
def demonize(self):
"""
do the double fork magic
"""
# check if a process is already running
if access(self.pid_file_name, F_OK):
# read the pid file
pid = self.read_pid()
try:
kill(pid, 0) # check if process is running
self.stderr.write("process is already running\n")
return False
except OSError as e:
if e.errno == errno.ESRCH:
# process is dead
self.delete_pid(force_del=True)
else:
self.stderr.write("demonize failed, something went wrong: %d (%s)\n" % (e.errno, e.strerror))
return False
try:
pid = fork()
if pid > 0:
# Exit from the first parent
timeout = time() + 60
while self.read_pid() is None:
self.stderr.write("waiting for pid..\n")
sleep(0.5)
if time() > timeout:
break
self.stderr.write("pid is %d\n" % self.read_pid())
sys.exit(0)
except OSError as e:
self.stderr.write("demonize failed in 1. Fork: %d (%s)\n" % (e.errno, e.strerror))
sys.exit(1)
# Decouple from parent environment
# os.chdir("/")
setsid()
umask(0)
# Do the Second fork
try:
pid = fork()
if pid > 0:
# Exit from the second parent
sys.exit(0)
except OSError as e:
self.stderr.write("demonize failed in 2. Fork: %d (%s)\n" % (e.errno, e.strerror))
sys.exit(1)
# Redirect standard file descriptors
# sys.stdout.flush()
# sys.stderr.flush()
# si = file(self.stdin, 'r')
# so = file(self.stdout, 'a+')
# se = file(self.stderr, 'a+',
# os.dup2(si.fileno(), sys.stdin.fileno())
# os.dup2(so.fileno(), sys.stdout.fileno())
# os.dup2(se.fileno(), sys.stderr.fileno())
# Write the PID file
#atexit.register(self.delete_pid)
self.write_pid()
return True | 0.002668 |
def _prepare_results(self, results):
"""
Sort results by score if not done before (faster, if we have no values to
retrieve, or slice)
"""
# if we want a result sorted by a score, and if we have a full result
# (no slice), we can do it know, by creating keys for each values with
# the sorted set score, and sort on them
if self._sort_by_sortedset_after and (len(results) > 1 or self._values):
conn = self.cls.get_connection()
sort_params = {}
base_tmp_key, tmp_keys = self._prepare_sort_by_score(results, sort_params)
# compose the set to sort
final_set = '%s_final_set' % base_tmp_key
conn.sadd(final_set, *results)
# apply the sort
results = conn.sort(final_set, **sort_params)
# finally delete all temporary keys
conn.delete(*(tmp_keys + [final_set, base_tmp_key]))
if self._store:
# if store, redis doesn't return result, so don't return anything here
return
if self._values and self._values['mode'] != 'flat':
results = self._to_values(results)
return super(ExtendedCollectionManager, self)._prepare_results(results) | 0.004713 |
def rfl2norm2(xf, xs, axis=(0, 1)):
r"""
Compute the squared :math:`\ell_2` norm in the DFT domain, taking
into account the unnormalised DFT scaling, i.e. given the DFT of a
multi-dimensional array computed via :func:`rfftn`, return the
squared :math:`\ell_2` norm of the original array.
Parameters
----------
xf : array_like
Input array
xs : sequence of ints
Shape of original array to which :func:`rfftn` was applied to
obtain the input array
axis : sequence of ints, optional (default (0,1))
Axes on which the input is in the frequency domain
Returns
-------
x : float
:math:`\|\mathbf{x}\|_2^2` where the input array is the result of
applying :func:`rfftn` to the specified axes of multi-dimensional
array :math:`\mathbf{x}`
"""
scl = 1.0 / np.prod(np.array([xs[k] for k in axis]))
slc0 = (slice(None),) * axis[-1]
nrm0 = np.linalg.norm(xf[slc0 + (0,)])
idx1 = (xs[axis[-1]] + 1) // 2
nrm1 = np.linalg.norm(xf[slc0 + (slice(1, idx1),)])
if xs[axis[-1]] % 2 == 0:
nrm2 = np.linalg.norm(xf[slc0 + (slice(-1, None),)])
else:
nrm2 = 0.0
return scl*(nrm0**2 + 2.0*nrm1**2 + nrm2**2) | 0.000813 |
def extract_tweets(json_file):
""" Yield screen_name, string tuples, where the string is the
concatenation of all tweets of this user. """
for screen_name, tweet_iter in groupby(parse_json(json_file), lambda x: x[0]):
tweets = [t[1] for t in tweet_iter]
yield screen_name, ' '.join(tweets) | 0.006309 |
def input_validate_nonce(nonce, name='nonce', pad = False):
""" Input validation for nonces. """
if type(nonce) is not str:
raise pyhsm.exception.YHSM_WrongInputType( \
name, str, type(nonce))
if len(nonce) > pyhsm.defines.YSM_AEAD_NONCE_SIZE:
raise pyhsm.exception.YHSM_InputTooLong(
name, pyhsm.defines.YSM_AEAD_NONCE_SIZE, len(nonce))
if pad:
return nonce.ljust(pyhsm.defines.YSM_AEAD_NONCE_SIZE, chr(0x0))
else:
return nonce | 0.007937 |
def make_fileitem_sizeinbytes(filesize, condition='is', negate=False):
"""
Create a node for FileItem/SizeInBytes
:return: A IndicatorItem represented as an Element node
"""
document = 'FileItem'
search = 'FileItem/SizeInBytes'
content_type = 'int'
content = filesize
ii_node = ioc_api.make_indicatoritem_node(condition, document, search, content_type, content,
negate=negate)
return ii_node | 0.006224 |
def mtf_range(mesh, dim, dtype, name=None):
"""Create a 1d mesh tensor with a range from [0, dim.size).
Call externally as mtf.range()
Args:
mesh: a Mesh
dim: a Dimension
dtype: a tf.DType
name: an optional string
Returns:
a Tensor
"""
dim = convert_to_dimension(dim)
with tf.variable_scope(name, default_name="range"):
if dtype == tf.bfloat16:
# tf.range(dtype=bfloat16) gives the wrong shape.
# TODO(noam): report the bug.
tf_range = tf.cast(tf.range(dim.size), tf.bfloat16)
else:
tf_range = tf.range(dim.size, dtype=dtype)
return import_tf_tensor(mesh, tf_range, shape=Shape([dim])) | 0.012158 |
def from_short_lines_text(self, text: str):
"""
Example from Völsupá 28
>>> stanza = "Ein sat hon úti,\\nþá er inn aldni kom\\nyggjungr ása\\nok í augu leit.\\nHvers fregnið mik?\\nHví freistið mín?\\nAllt veit ek, Óðinn,\\nhvar þú auga falt,\\ní inum mæra\\nMímisbrunni.\\nDrekkr mjöð Mímir\\nmorgun hverjan\\naf veði Valföðrs.\\nVituð ér enn - eða hvat?"
>>> us = UnspecifiedStanza()
>>> us.from_short_lines_text(stanza)
>>> [sl.text for sl in us.short_lines]
['Ein sat hon úti,', 'þá er inn aldni kom', 'yggjungr ása', 'ok í augu leit.', 'Hvers fregnið mik?', 'Hví freistið mín?', 'Allt veit ek, Óðinn,', 'hvar þú auga falt,', 'í inum mæra', 'Mímisbrunni.', 'Drekkr mjöð Mímir', 'morgun hverjan', 'af veði Valföðrs.', 'Vituð ér enn - eða hvat?']
>>> us.long_lines
:param text:
:return:
"""
Metre.from_short_lines_text(self, text)
self.short_lines = [ShortLine(line) for line in text.split("\n") if line]
self.long_lines = None | 0.004798 |
def build_engine_session(connection, echo=False, autoflush=None, autocommit=None, expire_on_commit=None,
scopefunc=None):
"""Build an engine and a session.
:param str connection: An RFC-1738 database connection string
:param bool echo: Turn on echoing SQL
:param Optional[bool] autoflush: Defaults to True if not specified in kwargs or configuration.
:param Optional[bool] autocommit: Defaults to False if not specified in kwargs or configuration.
:param Optional[bool] expire_on_commit: Defaults to False if not specified in kwargs or configuration.
:param scopefunc: Scoped function to pass to :func:`sqlalchemy.orm.scoped_session`
:rtype: tuple[Engine,Session]
From the Flask-SQLAlchemy documentation:
An extra key ``'scopefunc'`` can be set on the ``options`` dict to
specify a custom scope function. If it's not provided, Flask's app
context stack identity is used. This will ensure that sessions are
created and removed with the request/response cycle, and should be fine
in most cases.
"""
if connection is None:
raise ValueError('can not build engine when connection is None')
engine = create_engine(connection, echo=echo)
autoflush = autoflush if autoflush is not None else False
autocommit = autocommit if autocommit is not None else False
expire_on_commit = expire_on_commit if expire_on_commit is not None else True
log.debug('auto flush: %s, auto commit: %s, expire on commmit: %s', autoflush, autocommit, expire_on_commit)
#: A SQLAlchemy session maker
session_maker = sessionmaker(
bind=engine,
autoflush=autoflush,
autocommit=autocommit,
expire_on_commit=expire_on_commit,
)
#: A SQLAlchemy session object
session = scoped_session(
session_maker,
scopefunc=scopefunc
)
return engine, session | 0.004188 |
def get_attrname_by_colname(instance, name):
""" Get value from SQLAlchemy instance by column name
:Parameters:
- `instance`: SQLAlchemy model instance.
- `name`: Column name
:Examples:
>>> from sqlalchemy import Column, Integer
>>> from sqlalchemy.ext.declarative import declarative_base
>>> Base = declarative_base()
>>> class MPTTPages(Base):
... __tablename__ = "mptt_pages"
... id = Column(Integer, primary_key=True)
... left = Column("lft", Integer, nullable=False)
>>> get_attrname_by_colname(MPTTPages(), 'lft')
'left'
"""
for attr, column in list(sqlalchemy.inspect(instance.__class__).c.items()):
if column.name == name:
return attr | 0.001333 |
def _nameFromHeaderInfo(headerInfo, isDecoy, decoyTag):
"""Generates a protein name from headerInfo. If "isDecoy" is True, the
"decoyTag" is added to beginning of the generated protein name.
:param headerInfo: dict, must contain a key "name" or "id"
:param isDecoy: bool, determines if the "decoyTag" is added or not.
:param decoyTag: str, a tag that identifies decoy / reverse protein entries.
:returns: str, protein name
"""
if 'name' in headerInfo:
proteinName = headerInfo['name']
else:
proteinName = headerInfo['id']
if isDecoy:
proteinName = ''.join((decoyTag, proteinName))
return proteinName | 0.00299 |
def _validate_iterable(iterable_type, value):
"""Convert the iterable to iterable_type, or raise a Configuration
exception.
"""
if isinstance(value, six.string_types):
msg = "Invalid iterable of type(%s): %s"
raise ValidationError(msg % (type(value), value))
try:
return iterable_type(value)
except TypeError:
raise ValidationError("Invalid iterable: %s" % (value)) | 0.00237 |
def mouseDrag(self, x, y, step=1):
""" Move the mouse point to position (x, y) in increments of step
"""
log.debug('mouseDrag %d,%d', x, y)
if x < self.x:
xsteps = [self.x - i for i in range(step, self.x - x + 1, step)]
else:
xsteps = range(self.x, x, step)
if y < self.y:
ysteps = [self.y - i for i in range(step, self.y - y + 1, step)]
else:
ysteps = range(self.y, y, step)
for ypos in ysteps:
time.sleep(.2)
self.mouseMove(self.x, ypos)
for xpos in xsteps:
time.sleep(.2)
self.mouseMove(xpos, self.y)
self.mouseMove(x, y)
return self | 0.002759 |
def get_extended_summary(self, s, base=None):
"""Get the extended summary from a docstring
This here is the extended summary
Parameters
----------
s: str
The docstring to use
base: str or None
A key under which the summary shall be stored in the :attr:`params`
attribute. If not None, the summary will be stored in
``base + '.summary_ext'``. Otherwise, it will not be stored at
all
Returns
-------
str
The extracted extended summary"""
# Remove the summary and dedent
s = self._remove_summary(s)
ret = ''
if not self._all_sections_patt.match(s):
m = self._extended_summary_patt.match(s)
if m is not None:
ret = m.group().strip()
if base is not None:
self.params[base + '.summary_ext'] = ret
return ret | 0.00211 |
def get_switchable_as_iter(network, component, attr, snapshots, inds=None):
"""
Return an iterator over snapshots for a time-varying component
attribute with values for all non-time-varying components filled
in with the default values for the attribute.
Parameters
----------
network : pypsa.Network
component : string
Component object name, e.g. 'Generator' or 'Link'
attr : string
Attribute name
snapshots : pandas.Index
Restrict to these snapshots rather than network.snapshots.
inds : pandas.Index
Restrict to these items rather than all of network.{generators,..}.index
Returns
-------
pandas.DataFrame
Examples
--------
>>> get_switchable_as_iter(network, 'Generator', 'p_max_pu', snapshots)
"""
df = network.df(component)
pnl = network.pnl(component)
index = df.index
varying_i = pnl[attr].columns
fixed_i = df.index.difference(varying_i)
if inds is not None:
inds = pd.Index(inds)
index = inds.intersection(index)
varying_i = inds.intersection(varying_i)
fixed_i = inds.intersection(fixed_i)
# Short-circuit only fixed
if len(varying_i) == 0:
return repeat(df.loc[fixed_i, attr], len(snapshots))
def is_same_indices(i1, i2): return len(i1) == len(i2) and (i1 == i2).all()
if is_same_indices(fixed_i.append(varying_i), index):
def reindex_maybe(s): return s
else:
def reindex_maybe(s): return s.reindex(index)
return (
reindex_maybe(df.loc[fixed_i, attr].append(pnl[attr].loc[sn, varying_i]))
for sn in snapshots
) | 0.001813 |
def _translate_residue(self, selection, default_atomname='CA'):
"""Translate selection for a single res to make_ndx syntax."""
m = self.RESIDUE.match(selection)
if not m:
errmsg = "Selection {selection!r} is not valid.".format(**vars())
logger.error(errmsg)
raise ValueError(errmsg)
gmx_resid = self.gmx_resid(int(m.group('resid'))) # magic offset correction
residue = m.group('aa')
if len(residue) == 1:
gmx_resname = utilities.convert_aa_code(residue) # only works for AA
else:
gmx_resname = residue # use 3-letter for any resname
gmx_atomname = m.group('atom')
if gmx_atomname is None:
gmx_atomname = default_atomname
return {'resname':gmx_resname, 'resid':gmx_resid, 'atomname':gmx_atomname} | 0.011364 |
def load_firmware(self, path, vrf_management_name=None):
"""Update firmware version on device by loading provided image, performs following steps:
1. Copy bin file from remote tftp server.
2. Clear in run config boot system section.
3. Set downloaded bin file as boot file and then reboot device.
4. Check if firmware was successfully installed.
:param path: full path to firmware file on ftp/tftp location
:param vrf_management_name: VRF Name
:return: status / exception
"""
url = UrlParser.parse_url(path)
required_keys = [UrlParser.FILENAME, UrlParser.HOSTNAME, UrlParser.SCHEME]
if not url or not all(key in url for key in required_keys):
raise Exception(self.__class__.__name__, "Path is wrong or empty")
self.load_firmware_flow.execute_flow(path, vrf_management_name, self._timeout) | 0.005405 |
def create_startup_config(self):
""" Startup and shutdown commands config
Used by agent.py on the target
"""
cfg_path = "agent_startup_{}.cfg".format(self.host)
if os.path.isfile(cfg_path):
logger.info(
'Found agent startup config file in working directory with the same name as created for host %s.\n'
'Creating new one via tempfile. This will affect predictable filenames for agent artefacts',
self.host)
handle, cfg_path = tempfile.mkstemp('.cfg', 'agent_')
os.close(handle)
try:
config = ConfigParser.RawConfigParser()
# FIXME incinerate such a string formatting inside a method call
# T_T
config.add_section('startup')
[
config.set('startup', "cmd%s" % idx, cmd)
for idx, cmd in enumerate(self.startups)
]
config.add_section('shutdown')
[
config.set('shutdown', "cmd%s" % idx, cmd)
for idx, cmd in enumerate(self.shutdowns)
]
config.add_section('source')
[
config.set('source', "file%s" % idx, path)
for idx, path in enumerate(self.sources)
]
with open(cfg_path, 'w') as fds:
config.write(fds)
except Exception as exc:
logger.error(
'Error trying to create monitoring startups config. Malformed? %s',
exc,
exc_info=True)
return cfg_path | 0.003088 |
def main(command_line=True, **kwargs):
"""
NAME
iodp_jr6_magic.py
DESCRIPTION
converts shipboard .jr6 format files to magic_measurements format files
SYNTAX
iodp_jr6_magic.py [command line options]
OPTIONS
-h: prints the help message and quits.
-f FILE: specify input file, or
-F FILE: specify output file, default is magic_measurements.txt
-fsa FILE: specify er_samples.txt file for sample name lookup ,
default is 'er_samples.txt'
-loc HOLE : specify hole name (U1456A)
-A: don't average replicate measurements
INPUT
JR6 .jr6 format file
"""
def fix_separation(filename, new_filename):
old_file = open(filename, 'r')
data = old_file.readlines()
new_data = []
for line in data:
new_line = line.replace('-', ' -')
new_line = new_line.replace(' ', ' ')
new_data.append(new_line)
new_file = open(new_filename, 'w')
for s in new_data:
new_file.write(s)
old_file.close()
new_file.close()
return new_filename
def old_fix_separation(filename, new_filename):
old_file = open(filename, 'r')
data = old_file.readlines()
new_data = []
for line in data:
new_line = []
for i in line.split():
if '-' in i[1:]:
lead_char = '-' if i[0] == '-' else ''
if lead_char:
v = i[1:].split('-')
else:
v = i.split('-')
new_line.append(lead_char + v[0])
new_line.append('-' + v[1])
else:
new_line.append(i)
new_line = (' '.join(new_line)) + '\n'
new_data.append(new_line)
new_file = open(new_filename, 'w')
for s in new_data:
new_file.write(s)
new_file.close()
old_file.close()
return new_filename
# initialize some stuff
noave=0
volume=2.5**3 #default volume is a 2.5cm cube
inst=""
samp_con,Z='5',""
missing=1
demag="N"
er_location_name="unknown"
citation='This study'
args=sys.argv
meth_code="LP-NO"
version_num=pmag.get_version()
dir_path='.'
MagRecs=[]
samp_file = 'er_samples.txt'
meas_file = 'magic_measurements.txt'
mag_file = ''
#
# get command line arguments
#
if command_line:
if '-WD' in sys.argv:
ind = sys.argv.index('-WD')
dir_path=sys.argv[ind+1]
if '-ID' in sys.argv:
ind = sys.argv.index('-ID')
input_dir_path = sys.argv[ind+1]
else:
input_dir_path = dir_path
output_dir_path = dir_path
if "-h" in args:
print(main.__doc__)
return False
if '-F' in args:
ind=args.index("-F")
meas_file = args[ind+1]
if '-fsa' in args:
ind = args.index("-fsa")
samp_file = args[ind+1]
if samp_file[0]!='/':
samp_file = os.path.join(input_dir_path, samp_file)
try:
open(samp_file,'r')
ErSamps,file_type=pmag.magic_read(samp_file)
except:
print(samp_file,' not found: ')
print(' download csv file and import to MagIC with iodp_samples_magic.py')
if '-f' in args:
ind = args.index("-f")
mag_file= args[ind+1]
if "-loc" in args:
ind=args.index("-loc")
er_location_name=args[ind+1]
if "-A" in args:
noave=1
if not command_line:
dir_path = kwargs.get('dir_path', '.')
input_dir_path = kwargs.get('input_dir_path', dir_path)
output_dir_path = dir_path
meas_file = kwargs.get('meas_file', 'magic_measurements.txt')
mag_file = kwargs.get('mag_file', '')
samp_file = kwargs.get('samp_file', 'er_samples.txt')
specnum = kwargs.get('specnum', 1)
samp_con = kwargs.get('samp_con', '1')
if len(str(samp_con)) > 1:
samp_con, Z = samp_con.split('-')
else:
Z = ''
er_location_name = kwargs.get('er_location_name', '')
noave = kwargs.get('noave', 0) # default (0) means DO average
meth_code = kwargs.get('meth_code', "LP-NO")
# format variables
meth_code=meth_code+":FS-C-DRILL-IODP:SP-SS-C:SO-V"
meth_code=meth_code.strip(":")
if mag_file:
mag_file = os.path.join(input_dir_path, mag_file)
samp_file = os.path.join(input_dir_path, samp_file)
meas_file = os.path.join(output_dir_path, meas_file)
# validate variables
if not mag_file:
print("You must provide an IODP_jr6 format file")
return False, "You must provide an IODP_jr6 format file"
if not os.path.exists(mag_file):
print('The input file you provided: {} does not exist.\nMake sure you have specified the correct filename AND correct input directory name.'.format(mag_file))
return False, 'The input file you provided: {} does not exist.\nMake sure you have specified the correct filename AND correct input directory name.'.format(mag_file)
if not os.path.exists(samp_file):
print("Your input directory:\n{}\nmust contain an er_samples.txt file, or you must explicitly provide one".format(input_dir_path))
return False, "Your input directory:\n{}\nmust contain an er_samples.txt file, or you must explicitly provide one".format(input_dir_path)
# parse data
temp = os.path.join(output_dir_path, 'temp.txt')
fix_separation(mag_file, temp)
samples, filetype = pmag.magic_read(samp_file)
with open(temp, 'r') as finput:
lines = finput.readlines()
os.remove(temp)
for line in lines:
MagRec = {}
line = line.split()
spec_text_id = line[0].split('_')[1]
SampRecs=pmag.get_dictitem(samples,'er_sample_alternatives',spec_text_id,'has')
if len(SampRecs)>0: # found one
MagRec['er_specimen_name']=SampRecs[0]['er_sample_name']
MagRec['er_sample_name']=MagRec['er_specimen_name']
MagRec['er_site_name']=MagRec['er_specimen_name']
MagRec["er_citation_names"]="This study"
MagRec['er_location_name']=er_location_name
MagRec['magic_software_packages']=version_num
MagRec["treatment_temp"]='%8.3e' % (273) # room temp in kelvin
MagRec["measurement_temp"]='%8.3e' % (273) # room temp in kelvin
MagRec["measurement_flag"]='g'
MagRec["measurement_standard"]='u'
MagRec["measurement_number"]='1'
MagRec["treatment_ac_field"]='0'
volume=float(SampRecs[0]['sample_volume'])
x = float(line[4])
y = float(line[3])
negz = float(line[2])
cart=np.array([x,y,-negz]).transpose()
direction = pmag.cart2dir(cart).transpose()
expon = float(line[5])
magn_volume = direction[2] * (10.0**expon)
moment = magn_volume * volume
MagRec["measurement_magn_moment"]=str(moment)
MagRec["measurement_magn_volume"]=str(magn_volume)#str(direction[2] * (10.0 ** expon))
MagRec["measurement_dec"]='%7.1f'%(direction[0])
MagRec["measurement_inc"]='%7.1f'%(direction[1])
step = line[1]
if step == 'NRM':
meas_type="LT-NO"
elif step[0:2] == 'AD':
meas_type="LT-AF-Z"
treat=float(step[2:])
MagRec["treatment_ac_field"]='%8.3e' %(treat*1e-3) # convert from mT to tesla
elif step[0:2] == 'TD':
meas_type="LT-T-Z"
treat=float(step[2:])
MagRec["treatment_temp"]='%8.3e' % (treat+273.) # temp in kelvin
elif step[0:3]=='ARM': #
meas_type="LT-AF-I"
treat=float(row['step'][3:])
MagRec["treatment_ac_field"]='%8.3e' %(treat*1e-3) # convert from mT to tesla
MagRec["treatment_dc_field"]='%8.3e' %(50e-6) # assume 50uT DC field
MagRec["measurement_description"]='Assumed DC field - actual unknown'
elif step[0:3]=='IRM': #
meas_type="LT-IRM"
treat=float(step[3:])
MagRec["treatment_dc_field"]='%8.3e' %(treat*1e-3) # convert from mT to tesla
else:
print('unknown treatment type for ',row)
return False, 'unknown treatment type for ',row
MagRec['magic_method_codes']=meas_type
MagRecs.append(MagRec.copy())
else:
print('sample name not found: ',row['specname'])
MagOuts=pmag.measurements_methods(MagRecs,noave)
file_created, error_message = pmag.magic_write(meas_file,MagOuts,'magic_measurements')
if file_created:
return True, meas_file
else:
return False, 'Results not written to file' | 0.0129 |
def create_packet(header, data):
"""Creates an IncomingPacket object from header and data
This method is for testing purposes
"""
packet = IncomingPacket()
packet.header = header
packet.data = data
if len(header) == HeronProtocol.HEADER_SIZE:
packet.is_header_read = True
if len(data) == packet.get_datasize():
packet.is_complete = True
return packet | 0.007407 |
def check_board(self):
"""Check the board status and give feedback."""
num_mines = np.sum(self.info_map == 12)
num_undiscovered = np.sum(self.info_map == 11)
num_questioned = np.sum(self.info_map == 10)
if num_mines > 0:
return 0
elif np.array_equal(self.info_map == 9, self.mine_map):
return 1
elif num_undiscovered > 0 or num_questioned > 0:
return 2 | 0.004494 |
def show_item_v3(h):
"""Show any RAR3 record.
"""
st = rar3_type(h.type)
xprint("%s: hdrlen=%d datlen=%d", st, h.header_size, h.add_size)
if h.type in (rf.RAR_BLOCK_FILE, rf.RAR_BLOCK_SUB):
if h.host_os == rf.RAR_OS_UNIX:
s_mode = "0%o" % h.mode
else:
s_mode = "0x%x" % h.mode
xprint(" flags=0x%04x:%s", h.flags, get_file_flags(h.flags))
if h.host_os >= 0 and h.host_os < len(os_list):
s_os = os_list[h.host_os]
else:
s_os = "?"
xprint(" os=%d:%s ver=%d mode=%s meth=%c cmp=%d dec=%d vol=%d",
h.host_os, s_os,
h.extract_version, s_mode, h.compress_type,
h.compress_size, h.file_size, h.volume)
ucrc = (h.CRC + (1 << 32)) & ((1 << 32) - 1)
xprint(" crc=0x%08x (%d) date_time=%s", ucrc, h.CRC, fmt_time(h.date_time))
xprint(" name=%s", h.filename)
if h.mtime:
xprint(" mtime=%s", fmt_time(h.mtime))
if h.ctime:
xprint(" ctime=%s", fmt_time(h.ctime))
if h.atime:
xprint(" atime=%s", fmt_time(h.atime))
if h.arctime:
xprint(" arctime=%s", fmt_time(h.arctime))
elif h.type == rf.RAR_BLOCK_MAIN:
xprint(" flags=0x%04x:%s", h.flags, render_flags(h.flags, main_bits))
elif h.type == rf.RAR_BLOCK_ENDARC:
xprint(" flags=0x%04x:%s", h.flags, render_flags(h.flags, endarc_bits))
elif h.type == rf.RAR_BLOCK_MARK:
xprint(" flags=0x%04x:", h.flags)
else:
xprint(" flags=0x%04x:%s", h.flags, render_flags(h.flags, generic_bits))
if h.comment is not None:
cm = repr(h.comment)
if cm[0] == 'u':
cm = cm[1:]
xprint(" comment=%s", cm) | 0.002243 |
def report_message(report):
"""Report message."""
body = 'Error: return code != 0\n\n'
body += 'Archive: {}\n\n'.format(report['archive'])
body += 'Docker image: {}\n\n'.format(report['image'])
body += 'Docker container: {}\n\n'.format(report['container_id'])
return body | 0.00339 |
def run_work(self):
"""Run attacks and defenses"""
if os.path.exists(LOCAL_EVAL_ROOT_DIR):
sudo_remove_dirtree(LOCAL_EVAL_ROOT_DIR)
self.run_attacks()
self.run_defenses() | 0.010417 |
def getPreprocessor(self, service_request):
"""
Gets a preprocessor callable based on the service_request. This is
granular, looking at the service method first, then at the service
level and finally to see if there is a global preprocessor function
for the gateway. Returns C{None} if one could not be found.
"""
preproc = service_request.service.getPreprocessor(service_request)
if preproc is None:
return self.preprocessor
return preproc | 0.003802 |
def check_python_matlab_architecture(bits, lib_dir):
"""Make sure we can find corresponding installation of Python and MATLAB."""
if not os.path.isdir(lib_dir):
raise RuntimeError("It seem that you are using {bits} version of Python, but there's no matching MATLAB installation in {lib_dir}.".format(bits=bits, lib_dir=lib_dir)) | 0.008721 |
def _GetResourceTimestamps(self, pefile_object):
"""Retrieves timestamps from resource directory entries, if available.
Args:
pefile_object (pefile.PE): pefile object.
Returns:
list[int]: resource timestamps.
"""
timestamps = []
if not hasattr(pefile_object, 'DIRECTORY_ENTRY_RESOURCE'):
return timestamps
for entrydata in pefile_object.DIRECTORY_ENTRY_RESOURCE.entries:
directory = entrydata.directory
timestamp = getattr(directory, 'TimeDateStamp', 0)
if timestamp:
timestamps.append(timestamp)
return timestamps | 0.008446 |
def get_ntgos_sorted(self, hdrgos):
"""Return sorted Grouper namedtuples if there are user GO IDs underneath."""
go2nt = self.grprobj.go2nt
return sorted([go2nt[go] for go in hdrgos if go in go2nt], key=self.fncsortnt) | 0.016529 |
def cross_entropy_neighbors_in_rep(adata, use_rep, n_points=3):
"""Compare neighborhood graph of representation based on cross entropy.
`n_points` denotes the number of points to add as highlight annotation.
Returns
-------
The cross entropy and the geodesic-distance-weighted cross entropy as
``entropy, geo_entropy_d, geo_entropy_o``.
Adds the most overlapping or disconnected points as annotation to `adata`.
"""
# see below why we need this
if 'X_diffmap' not in adata.obsm.keys():
raise ValueError('Run `tl.diffmap` on `adata`, first.')
adata_ref = adata # simple renaming, don't need copy here
adata_cmp = adata.copy()
n_neighbors = adata_ref.uns['neighbors']['params']['n_neighbors']
from .neighbors import neighbors
neighbors(adata_cmp, n_neighbors=n_neighbors, use_rep=use_rep)
from .tools.diffmap import diffmap
diffmap(adata_cmp)
graph_ref = adata_ref.uns['neighbors']['connectivities']
graph_cmp = adata_cmp.uns['neighbors']['connectivities']
graph_ref = graph_ref.tocoo() # makes a copy
graph_cmp = graph_cmp.tocoo()
edgeset_ref = {e for e in zip(graph_ref.row, graph_ref.col)}
edgeset_cmp = {e for e in zip(graph_cmp.row, graph_cmp.col)}
edgeset_union = list(edgeset_ref.union(edgeset_cmp))
edgeset_union_indices = tuple(zip(*edgeset_union))
edgeset_union_indices = (np.array(edgeset_union_indices[0]), np.array(edgeset_union_indices[1]))
n_edges_ref = len(graph_ref.nonzero()[0])
n_edges_cmp = len(graph_cmp.nonzero()[0])
n_edges_union = len(edgeset_union)
logg.msg(
'... n_edges_ref', n_edges_ref,
'n_edges_cmp', n_edges_cmp,
'n_edges_union', n_edges_union)
graph_ref = graph_ref.tocsr() # need a copy of the csr graph anyways
graph_cmp = graph_cmp.tocsr()
p_ref = graph_ref[edgeset_union_indices].A1
p_cmp = graph_cmp[edgeset_union_indices].A1
# the following is how one compares it to log_loss form sklearn
# p_ref[p_ref.nonzero()] = 1
# from sklearn.metrics import log_loss
# print(log_loss(p_ref, p_cmp))
p_cmp = np.clip(p_cmp, EPS, 1-EPS)
ratio = np.clip(p_ref / p_cmp, EPS, None)
ratio_1m = np.clip((1 - p_ref) / (1 - p_cmp), EPS, None)
entropy = np.sum(p_ref * np.log(ratio) + (1-p_ref) * np.log(ratio_1m))
n_edges_fully_connected = (graph_ref.shape[0]**2 - graph_ref.shape[0])
entropy /= n_edges_fully_connected
fraction_edges = n_edges_ref / n_edges_fully_connected
naive_entropy = (fraction_edges * np.log(1./fraction_edges)
+ (1-fraction_edges) * np.log(1./(1-fraction_edges)))
logg.msg('cross entropy of naive sparse prediction {:.3e}'.format(naive_entropy))
logg.msg('cross entropy of random prediction {:.3e}'.format(-np.log(0.5)))
logg.info('cross entropy {:.3e}'.format(entropy))
# for manifold analysis, restrict to largest connected component in
# reference
# now that we clip at a quite high value below, this might not even be
# necessary
n_components, labels = scipy.sparse.csgraph.connected_components(graph_ref)
largest_component = np.arange(graph_ref.shape[0], dtype=int)
if n_components > 1:
component_sizes = np.bincount(labels)
logg.msg('largest component has size', component_sizes.max())
largest_component = np.where(
component_sizes == component_sizes.max())[0][0]
graph_ref_red = graph_ref.tocsr()[labels == largest_component, :]
graph_ref_red = graph_ref_red.tocsc()[:, labels == largest_component]
graph_ref_red = graph_ref_red.tocoo()
graph_cmp_red = graph_cmp.tocsr()[labels == largest_component, :]
graph_cmp_red = graph_cmp_red.tocsc()[:, labels == largest_component]
graph_cmp_red = graph_cmp_red.tocoo()
edgeset_ref_red = {e for e in zip(graph_ref_red.row, graph_ref_red.col)}
edgeset_cmp_red = {e for e in zip(graph_cmp_red.row, graph_cmp_red.col)}
edgeset_union_red = edgeset_ref_red.union(edgeset_cmp_red)
map_indices = np.where(labels == largest_component)[0]
edgeset_union_red = {
(map_indices[i], map_indices[j]) for (i, j) in edgeset_union_red}
from .neighbors import Neighbors
neigh_ref = Neighbors(adata_ref)
dist_ref = neigh_ref.distances_dpt # we expect 'X_diffmap' to be already present
neigh_cmp = Neighbors(adata_cmp)
dist_cmp = neigh_cmp.distances_dpt
d_cmp = np.zeros_like(p_ref)
d_ref = np.zeros_like(p_ref)
for i, e in enumerate(edgeset_union):
# skip contributions that are not in the largest component
if n_components > 1 and e not in edgeset_union_red:
continue
d_cmp[i] = dist_cmp[e]
d_ref[i] = dist_ref[e]
MAX_DIST = 1000
d_cmp = np.clip(d_cmp, 0.1, MAX_DIST) # we don't want to measure collapsing clusters
d_ref = np.clip(d_ref, 0.1, MAX_DIST)
weights = np.array(d_cmp / d_ref) # disconnected regions
weights_overlap = np.array(d_ref / d_cmp) # overlapping regions
# the following is just for annotation of figures
if 'highlights' not in adata_ref.uns:
adata_ref.uns['highlights'] = {}
else:
# remove old disconnected and overlapping points
new_highlights = {}
for k, v in adata_ref.uns['highlights'].items():
if v != 'O' and v not in {'D0', 'D1', 'D2', 'D3', 'D4'}:
new_highlights[k] = v
adata_ref.uns['highlights'] = new_highlights
# points that are maximally disconnected
max_weights = np.argpartition(weights, kth=-n_points)[-n_points:]
points = list(edgeset_union_indices[0][max_weights])
points2 = list(edgeset_union_indices[1][max_weights])
found_disconnected_points = False
for ip, p in enumerate(points):
if d_cmp[max_weights][ip] == MAX_DIST:
adata_ref.uns['highlights'][p] = 'D' + str(ip)
adata_ref.uns['highlights'][points2[ip]] = 'D' + str(ip)
found_disconnected_points = True
if found_disconnected_points:
logg.msg('most disconnected points', points)
logg.msg(' with weights', weights[max_weights].round(1))
max_weights = np.argpartition(
weights_overlap, kth=-n_points)[-n_points:]
points = list(edgeset_union_indices[0][max_weights])
for p in points:
adata_ref.uns['highlights'][p] = 'O'
logg.msg('most overlapping points', points)
logg.msg(' with weights', weights_overlap[max_weights].round(1))
logg.msg(' with d_rep', d_cmp[max_weights].round(1))
logg.msg(' with d_ref', d_ref[max_weights].round(1))
geo_entropy_d = np.sum(weights * p_ref * np.log(ratio))
geo_entropy_o = np.sum(weights_overlap * (1-p_ref) * np.log(ratio_1m))
geo_entropy_d /= n_edges_fully_connected
geo_entropy_o /= n_edges_fully_connected
logg.info('geodesic cross entropy {:.3e}'.format(geo_entropy_d + geo_entropy_o))
return entropy, geo_entropy_d, geo_entropy_o | 0.001137 |
def check(self):
"""
Returns #True if the timeout is exceeded.
"""
if self.value is None:
return False
return (time.clock() - self.start) >= self.value | 0.011236 |
def max_command_length():
"""
get the maximum length of the command line, in bytes, defaulting
to a conservative number if not set
http://www.in-ulm.de/~mascheck/various/argmax/
"""
DEFAULT_MAX_LENGTH = 150000 # lowest seen so far is 200k
try:
arg_max = os.sysconf('SC_ARG_MAX')
env_lines = len(os.environ) * 4
env_chars = sum([len(x) + len(y) for x, y in os.environ.items()])
arg_length = arg_max - env_lines - 2048
except ValueError:
arg_length = DEFAULT_MAX_LENGTH
return arg_length if arg_length > 0 else DEFAULT_MAX_LENGTH | 0.003317 |
def suspend(rq, ctx, duration):
"Suspends all workers."
ctx.invoke(
rq_cli.suspend,
duration=duration,
**shared_options(rq)
) | 0.006211 |
def auth_pair(self, force_console=False):
"""Return username/password tuple, possibly prompting the user for them."""
if not self.auth_valid():
self._get_auth(force_console)
return (self.user, self.password) | 0.012346 |
def valid_targets(self):
"""Return a :class:`QuerySet` of valid targets for moving a page
into the tree.
:param perms: the level of permission of the concerned user.
"""
exclude_list = [self.pk]
for p in self.get_descendants():
exclude_list.append(p.id)
return Page.objects.exclude(id__in=exclude_list) | 0.005391 |
def _get_hypocentral_depth_term(self, C, rup):
"""
Returns the hypocentral depth scaling term defined in equations 21 - 23
"""
if rup.hypo_depth <= 7.0:
fhyp_h = 0.0
elif rup.hypo_depth > 20.0:
fhyp_h = 13.0
else:
fhyp_h = rup.hypo_depth - 7.0
if rup.mag <= 5.5:
fhyp_m = C["c17"]
elif rup.mag > 6.5:
fhyp_m = C["c18"]
else:
fhyp_m = C["c17"] + ((C["c18"] - C["c17"]) * (rup.mag - 5.5))
return fhyp_h * fhyp_m | 0.003559 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.