text
stringlengths 78
104k
| score
float64 0
0.18
|
---|---|
def propagate_average_up(self, cols=['lat', 'lon'],
target_df_name='sites', source_df_name='samples'):
"""
Propagate average values from a lower table to a higher one.
For example, propagate average lats/lons from samples to sites.
Pre-existing values will not be overwritten.
Parameters
----------
cols : list-like
list of columns to propagate
target_df_name : str
name of table to propagate values into
source_df_name:
name of table to propagate values from
Returns
---------
target_df : MagicDataFrame or None
returns table with propagated data,
or None if no propagation could be done
"""
# make sure target/source table are appropriate
target_ind = self.ancestry.index(target_df_name)
source_ind = self.ancestry.index(source_df_name)
if target_ind - source_ind != 1:
print('-W- propagate_average_up only works with tables that are spaced one apart, i.e. sites and samples.')
print(' Source table must be lower in the hierarchy than the target table.')
print(' You have provided "{}" as the target table and "{}" as the source table.'.format(target_df_name, source_df_name))
return None
# make sure target table is read in
if target_df_name not in self.tables:
self.add_magic_table(target_df_name)
if target_df_name not in self.tables:
print("-W- Couldn't read in {} table".format(target_df_name))
return
# make sure source table is read in
if source_df_name not in self.tables:
self.add_magic_table(source_df_name)
if source_df_name not in self.tables:
print("-W- Couldn't read in {} table".format(source_df_name))
return
# get tables
target_df = self.tables[target_df_name]
source_df = self.tables[source_df_name]
target_name = target_df_name[:-1]
# step 1: make sure columns exist in target_df
for col in cols:
if col not in target_df.df.columns:
target_df.df[col] = None
# step 2: propagate target_df columns forward & back
target_df.front_and_backfill(cols)
# step 3: see if any column values are missing
values = [not_null(val) for val in target_df.df[cols].values.ravel()]
if all(values):
print('-I- {} table already has {} filled column(s)'.format(target_df_name, cols))
self.tables[target_df_name] = target_df
return target_df
# step 4: make sure columns are in source table, also target name
if target_name not in source_df.df.columns:
print("-W- can't propagate from {} to {} table".format(source_df_name, target_df_name))
print(" Missing {} column in {} table".format(target_name, source_df_name))
self.tables[target_df_name] = target_df
return target_df
for col in cols:
if col not in target_df.df.columns:
target_df.df[col] = None
# step 5: if needed, average from source table and apply to target table
for col in cols:
if col not in source_df.df.columns:
source_df.df[col] = np.nan
else:
# make sure is numeric
source_df.df[col] = pd.to_numeric(source_df.df[col], errors='coerce')
grouped = source_df.df[cols + [target_name]].groupby(target_name)
grouped = grouped[cols].apply(np.mean)
for col in cols:
target_df.df['new_' + col] = grouped[col]
# use custom not_null
mask = [not_null(val) for val in target_df.df[col]]
target_df.df[col] = np.where(mask, #target_df.df[col].notnull(),
target_df.df[col],
target_df.df['new_' + col])
target_df.df.drop(['new_' + col], inplace=True, axis=1)
# round column to 5 decimal points
try:
target_df.df[col] = target_df.df[col].astype(float)
target_df.df = target_df.df.round({col: 5})
except ValueError: # if there are sneaky strings...
pass
self.tables[target_df_name] = target_df
return target_df | 0.003135 |
def _execActions(self, type, msg):
""" Execute Registered Actions """
for action in self.ACTIONS:
action(type, msg) | 0.013986 |
def all(cls):
"""
Returns a list of all configured endpoints the server is listening on. For each endpoint,
the list of allowed databases is returned too if set.
The result is a JSON hash which has the endpoints as keys, and the list of
mapped database names as values for each endpoint.
If a list of mapped databases is empty, it means that all databases can be accessed via the endpoint.
If a list of mapped databases contains more than one database name, this means that any of the
databases might be accessed via the endpoint, and the first database in the list will be treated
as the default database for the endpoint. The default database will be used when an incoming request
does not specify a database name in the request explicitly.
*Note*: retrieving the list of all endpoints is allowed in the system database only.
Calling this action in any other database will make the server return an error.
"""
api = Client.instance().api
endpoint_list = api.endpoint.get()
return endpoint_list | 0.008525 |
def p2wpkh_input_and_witness(outpoint, sig, pubkey, sequence=0xFFFFFFFE):
'''
Outpoint, hex_string, hex_string, int -> (TxIn, InputWitness)
Create a signed witness TxIn and InputWitness from a p2wpkh prevout
'''
return tb.make_witness_input_and_witness(
outpoint=outpoint,
sequence=sequence,
stack=[bytes.fromhex(sig), bytes.fromhex(pubkey)]) | 0.002591 |
def DEFINE_point(name, default, help): # pylint: disable=invalid-name,redefined-builtin
"""Registers a flag whose value parses as a point."""
flags.DEFINE(PointParser(), name, default, help) | 0.020513 |
def set_priors(self, priors=None, fixed=None, random=None,
match_derived_names=True):
'''Set priors for one or more existing terms.
Args:
priors (dict): Dict of priors to update. Keys are names of terms
to update; values are the new priors (either a Prior instance,
or an int or float that scales the default priors). Note that
a tuple can be passed as the key, in which case the same prior
will be applied to all terms named in the tuple.
fixed (Prior, int, float, str): a prior specification to apply to
all fixed terms currently included in the model.
random (Prior, int, float, str): a prior specification to apply to
all random terms currently included in the model.
match_derived_names (bool): if True, the specified prior(s) will be
applied not only to terms that match the keyword exactly,
but to the levels of random effects that were derived from
the original specification with the passed name. For example,
`priors={'condition|subject':0.5}` would apply the prior
to the terms with names '1|subject', 'condition[T.1]|subject',
and so on. If False, an exact match is required for the
prior to be applied.
'''
# save arguments to pass to _set_priors() at build time
kwargs = dict(zip(
['priors', 'fixed', 'random', 'match_derived_names'],
[priors, fixed, random, match_derived_names]))
self._added_priors.update(kwargs)
self.built = False | 0.00176 |
def browserify_file(entry_point, output_file, babelify=False, export_as=None):
"""
Browserify a single javascript entry point plus non-external
dependencies into a single javascript file. Generates source maps
in debug mode. Minifies the output in release mode.
By default, it is not possible to ``require()`` any exports from the entry
point or included files. If ``export_as`` is specified, any module exports
in the specified entry point are exposed for ``require()`` with the
name specified by ``export_as``.
"""
from .modules import browserify
if not isinstance(entry_point, str):
raise RuntimeError('Browserify File compiler takes a single entry point as input.')
return {
'dependencies_fn': browserify.browserify_deps_file,
'compiler_fn': browserify.browserify_compile_file,
'input': entry_point,
'output': output_file,
'kwargs': {
'babelify': babelify,
'export_as': export_as,
},
} | 0.001953 |
async def get_profile(self, *tags):
'''Get a profile object using tag(s)'''
url = '{0.BASE}/profile/{1}'.format(self, ','.join(tags))
data = await self.request(url)
if isinstance(data, list):
return [Profile(self, c) for c in data]
else:
return Profile(self, data) | 0.008671 |
def get_stops_in_polygon(
feed: "Feed", polygon: Polygon, geo_stops=None
) -> DataFrame:
"""
Return the slice of ``feed.stops`` that contains all stops that lie
within the given Shapely Polygon object that is specified in
WGS84 coordinates.
Parameters
----------
feed : Feed
polygon : Shapely Polygon
Specified in WGS84 coordinates
geo_stops : Geopandas GeoDataFrame
A geographic version of ``feed.stops`` which will be computed
if not given.
Specify this parameter in batch jobs to avoid unnecessary
computation.
Returns
-------
DataFrame
Subset of ``feed.stops``
Notes
-----
- Requires GeoPandas
- Assume the following feed attributes are not ``None``:
* ``feed.stops``, if ``geo_stops`` is not given
"""
if geo_stops is not None:
f = geo_stops.copy()
else:
f = geometrize_stops(feed.stops)
cols = f.columns
f["hit"] = f["geometry"].within(polygon)
f = f[f["hit"]][cols]
return ungeometrize_stops(f) | 0.00093 |
def readAlignedString(self, align = 4):
"""
Reads an ASCII string aligned to the next align-bytes boundary.
@type align: int
@param align: (Optional) The value we want the ASCII string to be aligned.
@rtype: str
@return: A 4-bytes aligned (default) ASCII string.
"""
s = self.readString()
r = align - len(s) % align
while r:
s += self.data[self.offset]
self.offset += 1
r -= 1
return s.rstrip("\x00") | 0.014733 |
def request(self, method, url, **kwargs):
"""Build remote url request. Constructs necessary auth."""
user_token = kwargs.pop('token', self.token)
token, secret, expires_at = self.parse_raw_token(user_token)
if token is not None:
params = kwargs.get('params', {})
params['access_token'] = token
kwargs['params'] = params
return super(OAuth2Provider, self).request(method, url, **kwargs) | 0.004338 |
def get_filter(cls, mimetype):
"""
Returns a filter string for the file dialog. The filter is based
on the mime type.
:param mimetype: path from which the filter must be derived.
:return: Filter string
"""
filters = ' '.join(
['*%s' % ext for ext in mimetypes.guess_all_extensions(mimetype)])
return '%s (%s)' % (mimetype, filters) | 0.004902 |
def freeze(self):
"""Make the SchemaElement's connections immutable."""
self.in_connections = frozenset(self.in_connections)
self.out_connections = frozenset(self.out_connections) | 0.009852 |
def write(self):
""" Write the current text to self.file, and flush it.
This can be overridden to handle custom writes.
"""
if self._text is not None:
with self.lock:
self.file.write(str(self._text).encode())
self.file.flush()
sleep(self.nice_delay) | 0.005935 |
def info(self):
'''Return a nested dictionary of information related to the actor
status and performance. The dictionary contains the following entries:
* ``actor`` a dictionary containing information regarding the type of
actor and its status.
* ``events`` a dictionary of information about the
:ref:`event loop <asyncio-event-loop>` running the actor.
* ``extra`` the :attr:`extra` attribute (you can use it to add stuff).
* ``system`` system info.
This method is invoked when you run the
:ref:`info command <actor_info_command>` from another actor.
'''
if not self.started():
return
isp = self.is_process()
actor = {'name': self.name,
'state': self.info_state,
'actor_id': self.aid,
'uptime': self._loop.time() - self._started,
'thread_id': self.tid,
'process_id': self.pid,
'is_process': isp,
'age': self.concurrency.age}
data = {'actor': actor,
'extra': self.extra}
if isp:
data['system'] = system.process_info(self.pid)
self.event('on_info').fire(data=data)
return data | 0.001556 |
def tweets_for_user(screen_name, limit=1e10):
""" Collect the most recent 3200 tweets for this user, sleeping to deal with rate limits."""
qu = Queue()
p = Thread(target=_tweets_for_user, args=(qu, screen_name, limit))
p.start()
p.join(910)
if p.is_alive():
sys.stderr.write('no results after 15 minutes for %s. Aborting.' % screen_name)
return []
else:
return qu.get() | 0.007126 |
def runExperiment(args):
"""
Run experiment. What did you think this does?
args is a dict representing the parameters. We do it this way to support
multiprocessing. args contains one or more of the following keys:
@param featureNoise (float) Noise level to add to the features
during inference. Default: None
@param locationNoise (float) Noise level to add to the locations
during inference. Default: None
@param numObjects (int) The number of objects we will train.
Default: 10
@param numPoints (int) The number of points on each object.
Default: 10
@param numLocations (int) For each point, the number of locations to choose
from. Default: 10
@param numFeatures (int) For each point, the number of features to choose
from. Default: 10
@param numColumns (int) The total number of cortical columns in network.
Default: 2
@param networkType (string)The type of network to use. Options are:
"MultipleL4L2Columns",
"MultipleL4L2ColumnsWithTopology" and
"MultipleL4L2ColumnsWithRandomTopology".
Default: "MultipleL4L2Columns"
@param longDistanceConnections (float) The probability that a column will
connect to a distant column. Only relevant when
using the random topology network type.
If > 1, will instead be taken as desired number
of long-distance connections per column.
@param settlingTime (int) Number of iterations we wait to let columns
stabilize. Important for multicolumn experiments
with lateral connections.
@param includeRandomLocation (bool) If True, a random location SDR will be
generated during inference for each feature.
@param enableFeedback (bool) If True, enable feedback, default is True
@param numAmbiguousLocations (int) number of ambiguous locations. Ambiguous
locations will present during inference if this
parameter is set to be a positive number
The method returns the args dict updated with multiple additional keys
representing accuracy metrics.
"""
numObjects = args.get("numObjects", 10)
numLocations = args.get("numLocations", 10)
numFeatures = args.get("numFeatures", 10)
numColumns = args.get("numColumns", 2)
networkType = args.get("networkType", "MultipleL4L2Columns")
longDistanceConnections = args.get("longDistanceConnections", 0)
locationNoise = args.get("locationNoise", 0.0)
featureNoise = args.get("featureNoise", 0.0)
numPoints = args.get("numPoints", 10)
trialNum = args.get("trialNum", 42)
plotInferenceStats = args.get("plotInferenceStats", True)
settlingTime = args.get("settlingTime", 3)
includeRandomLocation = args.get("includeRandomLocation", False)
enableFeedback = args.get("enableFeedback", True)
numAmbiguousLocations = args.get("numAmbiguousLocations", 0)
numInferenceRpts = args.get("numInferenceRpts", 1)
l2Params = args.get("l2Params", None)
l4Params = args.get("l4Params", None)
# Create the objects
objects = createObjectMachine(
machineType="simple",
numInputBits=20,
sensorInputSize=150,
externalInputSize=2400,
numCorticalColumns=numColumns,
numFeatures=numFeatures,
numLocations=numLocations,
seed=trialNum
)
objects.createRandomObjects(numObjects, numPoints=numPoints,
numLocations=numLocations,
numFeatures=numFeatures)
r = objects.objectConfusion()
print "Average common pairs in objects=", r[0],
print ", locations=",r[1],", features=",r[2]
# print "Total number of objects created:",len(objects.getObjects())
# print "Objects are:"
# for o in objects:
# pairs = objects[o]
# pairs.sort()
# print str(o) + ": " + str(pairs)
# Setup experiment and train the network
name = "convergence_O%03d_L%03d_F%03d_C%03d_T%03d" % (
numObjects, numLocations, numFeatures, numColumns, trialNum
)
exp = L4L2Experiment(
name,
numCorticalColumns=numColumns,
L2Overrides=l2Params,
L4Overrides=l4Params,
networkType = networkType,
longDistanceConnections=longDistanceConnections,
inputSize=150,
externalInputSize=2400,
numInputBits=20,
seed=trialNum,
enableFeedback=enableFeedback,
)
exp.learnObjects(objects.provideObjectsToLearn())
# For inference, we will check and plot convergence for each object. For each
# object, we create a sequence of random sensations for each column. We will
# present each sensation for settlingTime time steps to let it settle and
# ensure it converges.
numCorrectClassifications=0
classificationPerSensation = numpy.zeros(settlingTime*numPoints)
for objectId in objects:
exp.sendReset()
obj = objects[objectId]
objectSensations = {}
for c in range(numColumns):
objectSensations[c] = []
if numColumns > 1:
# Create sequence of random sensations for this object for all columns At
# any point in time, ensure each column touches a unique loc,feature pair
# on the object. It is ok for a given column to sense a loc,feature pair
# more than once. The total number of sensations is equal to the number of
# points on the object.
for sensationNumber in range(len(obj)):
# Randomly shuffle points for each sensation
objectCopy = [pair for pair in obj]
random.shuffle(objectCopy)
for c in range(numColumns):
# stay multiple steps on each sensation
for _ in xrange(settlingTime):
objectSensations[c].append(objectCopy[c])
else:
# Create sequence of sensations for this object for one column. The total
# number of sensations is equal to the number of points on the object. No
# point should be visited more than once.
objectCopy = [pair for pair in obj]
random.shuffle(objectCopy)
for pair in objectCopy:
# stay multiple steps on each sensation
for _ in xrange(settlingTime):
objectSensations[0].append(pair)
inferConfig = {
"object": objectId,
"numSteps": len(objectSensations[0]),
"pairs": objectSensations,
"noiseLevel": featureNoise,
"locationNoise": locationNoise,
"includeRandomLocation": includeRandomLocation,
"numAmbiguousLocations": numAmbiguousLocations,
}
inferenceSDRs = objects.provideObjectToInfer(inferConfig)
exp.infer(inferenceSDRs, objectName=objectId, reset=False)
classificationPerSensation += numpy.array(
exp.statistics[objectId]["Correct classification"])
if exp.isObjectClassified(objectId, minOverlap=30):
numCorrectClassifications += 1
if plotInferenceStats:
exp.plotInferenceStats(
fields=["L2 Representation",
"Overlap L2 with object",
"L4 Representation"],
experimentID=objectId,
onePlot=False,
)
convergencePoint, accuracy = exp.averageConvergencePoint("L2 Representation",
30, 40, settlingTime)
classificationAccuracy = float(numCorrectClassifications) / numObjects
classificationPerSensation = classificationPerSensation / numObjects
print "# objects {} # features {} # locations {} # columns {} trial # {} network type {}".format(
numObjects, numFeatures, numLocations, numColumns, trialNum, networkType)
print "Average convergence point=",convergencePoint
print "Classification accuracy=",classificationAccuracy
print
# Return our convergence point as well as all the parameters and objects
args.update({"objects": objects.getObjects()})
args.update({"convergencePoint":convergencePoint})
args.update({"classificationAccuracy":classificationAccuracy})
args.update({"classificationPerSensation":classificationPerSensation.tolist()})
# Can't pickle experiment so can't return it for batch multiprocessing runs.
# However this is very useful for debugging when running in a single thread.
if plotInferenceStats:
args.update({"experiment": exp})
return args | 0.011167 |
def next(self) -> mx.io.DataBatch:
"""
Returns the next batch from the data iterator.
"""
if not self.iter_next():
raise StopIteration
i, j = self.batch_indices[self.curr_batch_index]
self.curr_batch_index += 1
batch_size = self.bucket_batch_sizes[i].batch_size
source = self.data.source[i][j:j + batch_size]
target = self.data.target[i][j:j + batch_size]
data = [source, target]
label = [self.data.label[i][j:j + batch_size]]
provide_data = [mx.io.DataDesc(name=n, shape=x.shape, layout=C.BATCH_MAJOR) for n, x in
zip(self.data_names, data)]
provide_label = [mx.io.DataDesc(name=n, shape=x.shape, layout=C.BATCH_MAJOR) for n, x in
zip(self.label_names, label)]
# TODO: num pad examples is not set here if fillup policy would be padding
return mx.io.DataBatch(data, label,
pad=0, index=None, bucket_key=self.buckets[i],
provide_data=provide_data, provide_label=provide_label) | 0.005348 |
def setRegisterNumbersForTemporaries(ast, start):
"""Assign register numbers for temporary registers, keeping track of
aliases and handling immediate operands.
"""
seen = 0
signature = ''
aliases = []
for node in ast.postorderWalk():
if node.astType == 'alias':
aliases.append(node)
node = node.value
if node.reg.immediate:
node.reg.n = node.value
continue
reg = node.reg
if reg.n is None:
reg.n = start + seen
seen += 1
signature += reg.node.typecode()
for node in aliases:
node.reg = node.value.reg
return start + seen, signature | 0.001447 |
def to_python(self, value, resource):
"""Converts to unicode if `self.encoding != None`, otherwise returns input without attempting to decode"""
if value is None:
return self._transform(value)
if isinstance(value, six.text_type):
return self._transform(value)
if self.encoding is None and isinstance(value, (six.text_type, six.binary_type)):
return self._transform(value)
if self.encoding is not None and isinstance(value, six.binary_type):
return self._transform(value.decode(self.encoding))
return self._transform(six.text_type(value)) | 0.00627 |
def create(self, **kwargs):
"""Create a new instance of this resource type.
As a general rule, the identifier should have been provided, but in
some subclasses the identifier is server-side-generated. Those classes
have to overload this method to deal with that scenario.
"""
if self.primary_key in kwargs:
del kwargs[self.primary_key]
data = self._generate_input_dict(**kwargs)
self.load(self.client.post('/'.join(self.url.split('/')[:-1]) + 's', data=data))
return self | 0.005386 |
def _set_get_vnetwork_dvpgs(self, v, load=False):
"""
Setter method for get_vnetwork_dvpgs, mapped from YANG variable /brocade_vswitch_rpc/get_vnetwork_dvpgs (rpc)
If this variable is read-only (config: false) in the
source YANG file, then _set_get_vnetwork_dvpgs is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_get_vnetwork_dvpgs() directly.
YANG Description: Shows discovered distributed virtual port-groups
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=get_vnetwork_dvpgs.get_vnetwork_dvpgs, is_leaf=True, yang_name="get-vnetwork-dvpgs", rest_name="get-vnetwork-dvpgs", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, extensions={u'tailf-common': {u'hidden': u'rpccmd', u'actionpoint': u'dvpg-name'}}, namespace='urn:brocade.com:mgmt:brocade-vswitch', defining_module='brocade-vswitch', yang_type='rpc', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """get_vnetwork_dvpgs must be of a type compatible with rpc""",
'defined-type': "rpc",
'generated-type': """YANGDynClass(base=get_vnetwork_dvpgs.get_vnetwork_dvpgs, is_leaf=True, yang_name="get-vnetwork-dvpgs", rest_name="get-vnetwork-dvpgs", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, extensions={u'tailf-common': {u'hidden': u'rpccmd', u'actionpoint': u'dvpg-name'}}, namespace='urn:brocade.com:mgmt:brocade-vswitch', defining_module='brocade-vswitch', yang_type='rpc', is_config=True)""",
})
self.__get_vnetwork_dvpgs = t
if hasattr(self, '_set'):
self._set() | 0.005672 |
def load_variables(layout, types=None, levels=None, skip_empty=True,
dataset=None, scope='all', **kwargs):
''' A convenience wrapper for one or more load_*_variables() calls.
Args:
layout (BIDSLayout): BIDSLayout containing variable files.
types (str, list): Types of variables to retrieve. All valid values
reflect the filename stipulated in the BIDS spec for each kind of
variable. Valid values include: 'events', 'physio', 'stim',
'scans', 'participants', 'sessions', and 'regressors'.
levels (str, list): Optional level(s) of variables to load. Valid
values are 'run', 'session', 'subject', or 'dataset'. This is
simply a shorthand way to specify types--e.g., 'run' will be
converted to types=['events', 'physio', 'stim', 'regressors'].
skip_empty (bool): Whether or not to skip empty Variables (i.e.,
where there are no rows/records in a file after applying any
filtering operations like dropping NaNs).
dataset (NodeIndex): An existing NodeIndex container to store the
loaded data in. Can be used to iteratively construct a dataset
that contains otherwise heterogeneous sets of variables. If None,
a new NodeIndex is used.
scope (str, list): The scope of the space to search for variables. See
docstring for BIDSLayout for details and valid predefined values.
kwargs: Optional keyword arguments to pass onto the individual
load_*_variables() calls.
Returns:
A NodeIndex instance.
Example:
>>> load_variables(layout, ['events', 'physio'], subject='01')
# returns all variables stored in _events.tsv and _physio.tsv.gz files
# for runs that belong to subject with id '01'.
'''
TYPES = ['events', 'physio', 'stim', 'scans', 'participants', 'sessions',
'regressors']
types = listify(types)
if types is None:
if levels is not None:
types = []
lev_map = {
'run': ['events', 'physio', 'stim', 'regressors'],
'session': ['scans'],
'subject': ['sessions'],
'dataset': ['participants']
}
[types.extend(lev_map[l.lower()]) for l in listify(levels)]
else:
types = TYPES
bad_types = set(types) - set(TYPES)
if bad_types:
raise ValueError("Invalid variable types: %s" % bad_types)
dataset = dataset or NodeIndex()
run_types = list({'events', 'physio', 'stim', 'regressors'} - set(types))
type_flags = {t: False for t in run_types}
if len(type_flags) < 4:
_kwargs = kwargs.copy()
_kwargs.update(type_flags)
dataset = _load_time_variables(layout, dataset, scope=scope, **_kwargs)
for t in ({'scans', 'sessions', 'participants'} & set(types)):
kwargs.pop('suffix', None) # suffix is always one of values aboves
dataset = _load_tsv_variables(layout, t, dataset, scope=scope,
**kwargs)
return dataset | 0.000948 |
def set_header(self, header, value):
""" Set header value """
# requests>=2.11 only accepts `str` or `bytes` header values
# raising an exception here, instead of leaving it to `requests` makes
# it easy to know where we passed a wrong header type in the code.
if not isinstance(value, (str, bytes)):
raise TypeError("header values must be str or bytes, but %s value has type %s" % (header, type(value)))
self._headers[header] = value | 0.006073 |
def execute_on_all_members(self, task):
"""
Executes a task on all of the known cluster members.
:param task: (Task), the task executed on the all of the members.
:return: (Map), :class:`~hazelcast.future.Future` tuples representing pending completion of the task on each member.
"""
return self.execute_on_members(self._client.cluster.get_member_list(), task) | 0.00978 |
def adapter_update_nio_binding(self, adapter_number, nio):
"""
Update a port NIO binding.
:param adapter_number: adapter number
:param nio: NIO instance to add to the adapter
"""
if self.is_running():
try:
yield from self.update_ubridge_udp_connection(
"VBOX-{}-{}".format(self._id, adapter_number),
self._local_udp_tunnels[adapter_number][1],
nio)
except IndexError:
raise VirtualBoxError('Adapter {adapter_number} does not exist on VirtualBox VM "{name}"'.format(
name=self._name,
adapter_number=adapter_number
)) | 0.004071 |
def collect_variables(self, selections) -> None:
"""Apply method |ChangeItem.collect_variables| of the base class
|ChangeItem| and also apply method |ExchangeItem.insert_variables|
of class |ExchangeItem| to collect the relevant base variables
handled by the devices of the given |Selections| object.
>>> from hydpy.core.examples import prepare_full_example_2
>>> hp, pub, TestIO = prepare_full_example_2()
>>> from hydpy import AddItem
>>> item = AddItem(
... 'alpha', 'hland_v1', 'control.sfcf', 'control.rfcf', 0)
>>> item.collect_variables(pub.selections)
>>> land_dill = hp.elements.land_dill
>>> control = land_dill.model.parameters.control
>>> item.device2target[land_dill] is control.sfcf
True
>>> item.device2base[land_dill] is control.rfcf
True
>>> for device in sorted(item.device2base, key=lambda x: x.name):
... print(device)
land_dill
land_lahn_1
land_lahn_2
land_lahn_3
"""
super().collect_variables(selections)
self.insert_variables(self.device2base, self.basespecs, selections) | 0.001663 |
def import_data(self, file_name='*', folder_name='.', head_row=0, index_col=0,
convert_col=True, concat_files=False, save_file=True):
""" Imports csv file(s) and stores the result in self.imported_data.
Note
----
1. If folder exists out of current directory, folder_name should contain correct regex
2. Assuming there's no file called "\*.csv"
Parameters
----------
file_name : str
CSV file to be imported. Defaults to '\*' - all csv files in the folder.
folder_name : str
Folder where file resides. Defaults to '.' - current directory.
head_row : int
Skips all rows from 0 to head_row-1
index_col : int
Skips all columns from 0 to index_col-1
convert_col : bool
Convert columns to numeric type
concat_files : bool
Appends data from files to result dataframe
save_file : bool
Specifies whether to save file or not. Defaults to True.
Returns
-------
pd.DataFrame()
Dataframe containing imported data.
"""
# Create instance and import the data
import_data_obj = Import_Data()
import_data_obj.import_csv(file_name=file_name, folder_name=folder_name,
head_row=head_row, index_col=index_col,
convert_col=convert_col, concat_files=concat_files)
# Store imported data in wrapper class
self.imported_data = import_data_obj.data
# Logging
self.result['Import'] = {
'File Name': file_name,
'Folder Name': folder_name,
'Head Row': head_row,
'Index Col': index_col,
'Convert Col': convert_col,
'Concat Files': concat_files,
'Save File': save_file
}
if save_file:
f = self.results_folder_name + '/imported_data-' + str(self.get_global_count()) + '.csv'
self.imported_data.to_csv(f)
self.result['Import']['Saved File'] = f
else:
self.result['Import']['Saved File'] = ''
return self.imported_data | 0.007799 |
def get_name(self, use_alias=True):
"""
Gets the name to reference the sorted field
:return: the name to reference the sorted field
:rtype: str
"""
if self.desc:
direction = 'DESC'
else:
direction = 'ASC'
if use_alias:
return '{0} {1}'.format(self.field.get_identifier(), direction)
return '{0} {1}'.format(self.field.get_select_sql(), direction) | 0.004386 |
def sync_account(self, sync_message):
"""同步账户
Arguments:
sync_message {[type]} -- [description]
"""
self.init_hold = sync_message['hold_available']
self.init_cash = sync_message['cash_available']
self.sell_available = copy.deepcopy(self.init_hold)
self.history = []
self.cash = [self.init_cash]
self.cash_available = self.cash[-1] | 0.004796 |
def load(self, filename, **kwargs):
"""
Parse a file specified with the filename and return an numpy array
Parameters
----------
filename : string
A path of a file
Returns
-------
ndarray
An instance of numpy array
"""
with open(filename, 'r') as f:
return self.parse(f, **kwargs) | 0.004914 |
def Create(name,template,group_id,network_id,cpu=None,memory=None,alias=None,password=None,ip_address=None,
storage_type="standard",type="standard",primary_dns=None,secondary_dns=None,
additional_disks=[],custom_fields=[],ttl=None,managed_os=False,description=None,
source_server_password=None,cpu_autoscale_policy_id=None,anti_affinity_policy_id=None,
packages=[],configuration_id=None,session=None):
"""Creates a new server.
https://www.centurylinkcloud.com/api-docs/v2/#servers-create-server
cpu and memory are optional and if not provided we pull from the default server size values associated with
the provided group_id.
Set ttl as number of seconds before server is to be terminated. Must be >3600
>>> d = clc.v2.Datacenter()
>>> clc.v2.Server.Create(name="api2",cpu=1,memory=1,
group_id=d.Groups().Get("Default Group").id,
template=d.Templates().Search("centos-6-64")[0].id,
network_id=d.Networks().networks[0].id).WaitUntilComplete()
0
"""
if not alias: alias = clc.v2.Account.GetAlias(session=session)
if not description: description = name
if type.lower() != "baremetal":
if not cpu or not memory:
group = clc.v2.Group(id=group_id,alias=alias,session=session)
if not cpu and group.Defaults("cpu"):
cpu = group.Defaults("cpu")
elif not cpu:
raise(clc.CLCException("No default CPU defined"))
if not memory and group.Defaults("memory"):
memory = group.Defaults("memory")
elif not memory:
raise(clc.CLCException("No default Memory defined"))
if type.lower() == "standard" and storage_type.lower() not in ("standard","premium"):
raise(clc.CLCException("Invalid type/storage_type combo"))
if type.lower() == "hyperscale" and storage_type.lower() != "hyperscale":
raise(clc.CLCException("Invalid type/storage_type combo"))
if type.lower() == "baremetal":
type = "bareMetal"
if ttl and ttl<=3600: raise(clc.CLCException("ttl must be greater than 3600 seconds"))
if ttl: ttl = clc.v2.time_utils.SecondsToZuluTS(int(time.time())+ttl)
# TODO - validate custom_fields as a list of dicts with an id and a value key
# TODO - validate template exists
# TODO - validate additional_disks as a list of dicts with a path, sizeGB, and type (partitioned,raw) keys
# TODO - validate addition_disks path not in template reserved paths
# TODO - validate antiaffinity policy id set only with type=hyperscale
payload = {
'name': name, 'description': description, 'groupId': group_id, 'primaryDNS': primary_dns, 'secondaryDNS': secondary_dns,
'networkId': network_id, 'password': password, 'type': type, 'customFields': custom_fields
}
if type == 'bareMetal':
payload.update({'configurationId': configuration_id, 'osType': template})
else:
payload.update({'sourceServerId': template, 'isManagedOS': managed_os, 'ipAddress': ip_address,
'sourceServerPassword': source_server_password, 'cpu': cpu, 'cpuAutoscalePolicyId': cpu_autoscale_policy_id,
'memoryGB': memory, 'storageType': storage_type, 'antiAffinityPolicyId': anti_affinity_policy_id,
'additionalDisks': additional_disks, 'ttl': ttl, 'packages': packages})
return clc.v2.Requests(clc.v2.API.Call('POST','servers/%s' % (alias), json.dumps(payload), session=session),
alias=alias,
session=session) | 0.03501 |
def padRect(rect, padTop, padBottom, padLeft, padRight, bounds, clipExcess = True):
"""
Pads a rectangle by the specified values on each individual side,
ensuring the padded rectangle falls within the specified bounds.
The input rectangle, bounds, and return value are all a tuple of (x,y,w,h).
"""
# Unpack the rectangle
x, y, w, h = rect
# Pad by the specified value
x -= padLeft
y -= padTop
w += (padLeft + padRight)
h += (padTop + padBottom)
# Determine if we are clipping overflows/underflows or
# shifting the centre of the rectangle to compensate
if clipExcess == True:
# Clip any underflows
x = max(0, x)
y = max(0, y)
# Clip any overflows
overflowY = max(0, (y + h) - bounds[0])
overflowX = max(0, (x + w) - bounds[1])
h -= overflowY
w -= overflowX
else:
# Compensate for any underflows
underflowX = max(0, 0 - x)
underflowY = max(0, 0 - y)
x += underflowX
y += underflowY
# Compensate for any overflows
overflowY = max(0, (y + h) - bounds[0])
overflowX = max(0, (x + w) - bounds[1])
x -= overflowX
w += overflowX
y -= overflowY
h += overflowY
# If there are still overflows or underflows after our
# modifications, we have no choice but to clip them
x, y, w, h = padRect((x,y,w,h), 0, 0, 0, 0, bounds, True)
# Re-pack the padded rect
return (x,y,w,h) | 0.054734 |
def get_type(bind):
""" Detect the ideal type for the data, either using the explicit type
definition or the format (for date, date-time, not supported by JSON). """
types = bind.types + [bind.schema.get('format')]
for type_name in ('date-time', 'date', 'decimal', 'integer', 'boolean',
'number', 'string'):
if type_name in types:
return type_name
return 'string' | 0.002353 |
def set_of_vars(arg_plot):
"""Build set of needed variables.
Args:
arg_plot (str): string with variable names separated with ``,``.
Returns:
set of str: set of variables.
"""
return set(var for var in arg_plot.split(',') if var in phyvars.PLATES) | 0.003534 |
def read_vcf(vcf_file, ref_file):
"""
Reads in a vcf/vcf.gz file and associated
reference sequence fasta (to which the VCF file is mapped).
Parses mutations, insertions, and deletions and stores them in a nested dict,
see 'returns' for the dict structure.
Calls with heterozygous values 0/1, 0/2, etc and no-calls (./.) are
replaced with Ns at the associated sites.
Positions are stored to correspond the location in the reference sequence
in Python (numbering is transformed to start at 0)
Parameters
----------
vcf_file : string
Path to the vcf or vcf.gz file to be read in
ref_file : string
Path to the fasta reference file to be read in
Returns
--------
compress_seq : nested dict
In the format: ::
{
'reference':'AGCTCGA..A',
'sequences': { 'seq1':{4:'A', 7:'-'}, 'seq2':{100:'C'} },
'insertions': { 'seq1':{4:'ATT'}, 'seq3':{1:'TT', 10:'CAG'} },
'positions': [1,4,7,10,100...]
}
references : string
String of the reference sequence read from the Fasta, to which
the variable sites are mapped
sequences : nested dict
Dict containing sequence names as keys which map to dicts
that have position as key and the single-base mutation (or deletion)
as values
insertions : nested dict
Dict in the same format as the above, which stores insertions and their
locations. The first base of the insertion is the same as whatever is
currently in that position (Ref if no mutation, mutation in 'sequences'
otherwise), so the current base can be directly replaced by the bases held here.
positions : list
Python list of all positions with a mutation, insertion, or deletion.
"""
#Programming Note:
# Note on VCF Format
# -------------------
# 'Insertion where there are also deletions' (special handling)
# Ex:
# REF ALT Seq1 Seq2
# GC GCC,G 1/1 2/2
# Insertions formatted differently - don't know how many bp match
# the Ref (unlike simple insert below). Could be mutations, also.
# 'Deletion'
# Ex:
# REF ALT
# GC G
# Alt does not have to be 1 bp - any length shorter than Ref.
# 'Insertion'
# Ex:
# REF ALT
# A ATT
# First base always matches Ref.
# 'No indel'
# Ex:
# REF ALT
# A G
#define here, so that all sub-functions can access them
sequences = defaultdict(dict)
insertions = defaultdict(dict) #Currently not used, but kept in case of future use.
#TreeTime handles 2-3 base ambig codes, this will allow that.
def getAmbigCode(bp1, bp2, bp3=""):
bps = [bp1,bp2,bp3]
bps.sort()
key = "".join(bps)
return {
'CT': 'Y',
'AG': 'R',
'AT': 'W',
'CG': 'S',
'GT': 'K',
'AC': 'M',
'AGT': 'D',
'ACG': 'V',
'ACT': 'H',
'CGT': 'B'
}[key]
#Parses a 'normal' (not hetero or no-call) call depending if insertion+deletion, insertion,
#deletion, or single bp subsitution
def parseCall(snps, ins, pos, ref, alt):
#Insertion where there are also deletions (special handling)
if len(ref) > 1 and len(alt)>len(ref):
for i in range(len(ref)):
#if the pos doesn't match, store in sequences
if ref[i] != alt[i]:
snps[pos+i] = alt[i] if alt[i] != '.' else 'N' #'.' = no-call
#if about to run out of ref, store rest:
if (i+1) >= len(ref):
ins[pos+i] = alt[i:]
#Deletion
elif len(ref) > 1:
for i in range(len(ref)):
#if ref is longer than alt, these are deletion positions
if i+1 > len(alt):
snps[pos+i] = '-'
#if not, there may be mutations
else:
if ref[i] != alt[i]:
snps[pos+i] = alt[i] if alt[i] != '.' else 'N' #'.' = no-call
#Insertion
elif len(alt) > 1:
ins[pos] = alt
#No indel
else:
snps[pos] = alt
#Parses a 'bad' (hetero or no-call) call depending on what it is
def parseBadCall(snps, ins, pos, ref, ALT):
#Deletion
# REF ALT Seq1 Seq2 Seq3
# GCC G 1/1 0/1 ./.
# Seq1 (processed by parseCall, above) will become 'G--'
# Seq2 will become 'GNN'
# Seq3 will become 'GNN'
if len(ref) > 1:
#Deleted part becomes Ns
if gen[0] == '0' or gen[0] == '.':
if gen[0] == '0': #if het, get first bp
alt = str(ALT[int(gen[2])-1])
else: #if no-call, there is no alt, so just put Ns after 1st ref base
alt = ref[0]
for i in range(len(ref)):
#if ref is longer than alt, these are deletion positions
if i+1 > len(alt):
snps[pos+i] = 'N'
#if not, there may be mutations
else:
if ref[i] != alt[i]:
snps[pos+i] = alt[i] if alt[i] != '.' else 'N' #'.' = no-call
#If not deletion, need to know call type
#if het, see if proposed alt is 1bp mutation
elif gen[0] == '0':
alt = str(ALT[int(gen[2])-1])
if len(alt)==1:
#alt = getAmbigCode(ref,alt) #if want to allow ambig
alt = 'N' #if you want to disregard ambig
snps[pos] = alt
#else a het-call insertion, so ignore.
#else it's a no-call; see if all alts have a length of 1
#(meaning a simple 1bp mutation)
elif len(ALT)==len("".join(ALT)):
alt = 'N'
snps[pos] = alt
#else a no-call insertion, so ignore.
#House code is *much* faster than pyvcf because we don't care about all info
#about coverage, quality, counts, etc, which pyvcf goes to effort to parse
#(and it's not easy as there's no standard ordering). Custom code can completely
#ignore all of this.
import gzip
from Bio import SeqIO
import numpy as np
nsamp = 0
posLoc = 0
refLoc = 0
altLoc = 0
sampLoc = 9
#Use different openers depending on whether compressed
opn = gzip.open if vcf_file.endswith(('.gz', '.GZ')) else open
with opn(vcf_file, mode='rt') as f:
for line in f:
if line[0] != '#':
#actual data - most common so first in 'if-list'!
line = line.strip()
dat = line.split('\t')
POS = int(dat[posLoc])
REF = dat[refLoc]
ALT = dat[altLoc].split(',')
calls = np.array(dat[sampLoc:])
#get samples that differ from Ref at this site
recCalls = {}
for sname, sa in zip(samps, calls):
if ':' in sa: #if proper VCF file (followed by quality/coverage info)
gt = sa.split(':')[0]
else: #if 'pseudo' VCF file (nextstrain output, or otherwise stripped)
gt = sa
if gt == '0' or gt == '1': #for haploid calls in VCF
gt = '0/0' if gt == '0' else '1/1'
#ignore if ref call: '.' or '0/0', depending on VCF
if ('/' in gt and gt != '0/0') or ('|' in gt and gt != '0|0'):
recCalls[sname] = gt
#store the position and the alt
for seq, gen in recCalls.items():
ref = REF
pos = POS-1 #VCF numbering starts from 1, but Reference seq numbering
#will be from 0 because it's python!
#Accepts only calls that are 1/1, 2/2 etc. Rejects hets and no-calls
if gen[0] != '0' and gen[2] != '0' and gen[0] != '.' and gen[2] != '.':
alt = str(ALT[int(gen[0])-1]) #get the index of the alternate
if seq not in sequences.keys():
sequences[seq] = {}
parseCall(sequences[seq],insertions[seq], pos, ref, alt)
#If is heterozygote call (0/1) or no call (./.)
else:
#alt will differ here depending on het or no-call, must pass original
parseBadCall(sequences[seq],insertions[seq], pos, ref, ALT)
elif line[0] == '#' and line[1] == 'C':
#header line, get all the information
header = line.strip().split('\t')
posLoc = header.index("POS")
refLoc = header.index('REF')
altLoc = header.index('ALT')
sampLoc = header.index('FORMAT')+1
samps = header[sampLoc:]
samps = [ x.strip() for x in samps ] #ensure no leading/trailing spaces
nsamp = len(samps)
#else you are a comment line, ignore.
#Gather all variable positions
positions = set()
for seq, muts in sequences.items():
positions.update(muts.keys())
#One or more seqs are same as ref! (No non-ref calls) So haven't been 'seen' yet
if nsamp > len(sequences):
missings = set(samps).difference(sequences.keys())
for s in missings:
sequences[s] = {}
refSeq = SeqIO.read(ref_file, format='fasta')
refSeq = refSeq.upper() #convert to uppercase to avoid unknown chars later
refSeqStr = str(refSeq.seq)
compress_seq = {'reference':refSeqStr,
'sequences': sequences,
'insertions': insertions,
'positions': sorted(positions)}
return compress_seq | 0.010369 |
def place_svg_dict(self, x, y, svg_dict, layer_id, group=None):
"""Same as :meth:`place` but with a dictionary as :paramref:`svg_dict`.
:param dict svg_dict: a dictionary returned by `xmltodict.parse()
<https://github.com/martinblech/xmltodict>`__
:param dict group: a dictionary of values to add to the group the
:paramref:`svg_dict` will be added to or :obj:`None` if nothing
should be added
"""
if group is None:
group = {}
group_ = {
"@transform": "translate({},{})".format(x, y),
"g": list(svg_dict.values())
}
group_.update(group)
layer = self._get_layer(layer_id)
layer["g"].append(group_) | 0.002692 |
def is_valid_timestamp(date, unit='millis'):
"""
Checks that a number that represents a date as milliseconds is correct.
"""
assert isinstance(date, int), "Input is not instance of int"
if unit is 'millis':
return is_positive(date) and len(str(date)) == 13
elif unit is 'seconds':
return is_positive(date) and len(str(date)) == 10
else:
raise ValueError('Unknown unit "%s"' % unit) | 0.002304 |
def read(self, size=None):
"""Reads a byte string from the file-like object at the current offset.
The function will read a byte string of the specified size or
all of the remaining data if no size was specified.
Args:
size (Optional[int]): number of bytes to read, where None is all
remaining data.
Returns:
bytes: data read.
Raises:
IOError: if the read failed.
OSError: if the read failed.
"""
if not self._is_open:
raise IOError('Not opened.')
if self._current_offset < 0:
raise IOError('Invalid current offset value less than zero.')
# The SleuthKit is not POSIX compliant in its read behavior. Therefore
# pytsk3 will raise an IOError if the read offset is beyond the data size.
if self._current_offset >= self._size:
return b''
if size is None or self._current_offset + size > self._size:
size = self._size - self._current_offset
if self._tsk_attribute:
data = self._tsk_file.read_random(
self._current_offset, size, self._tsk_attribute.info.type,
self._tsk_attribute.info.id)
else:
data = self._tsk_file.read_random(self._current_offset, size)
# It is possible the that returned data size is not the same as the
# requested data size. At this layer we don't care and this discrepancy
# should be dealt with on a higher layer if necessary.
self._current_offset += len(data)
return data | 0.004755 |
def _generate_nodes(self,
name,
command,
parent=None,
show_nested=False,
commands=None):
"""Generate the relevant Sphinx nodes.
Format a `click.Group` or `click.Command`.
:param name: Name of command, as used on the command line
:param command: Instance of `click.Group` or `click.Command`
:param parent: Instance of `click.Context`, or None
:param show_nested: Whether subcommands should be included in output
:param commands: Display only listed commands or skip the section if
empty
:returns: A list of nested docutil nodes
"""
ctx = click.Context(command, info_name=name, parent=parent)
# Title
section = nodes.section(
'',
nodes.title(text=name),
ids=[nodes.make_id(ctx.command_path)],
names=[nodes.fully_normalize_name(ctx.command_path)])
# Summary
source_name = ctx.command_path
result = statemachine.ViewList()
lines = _format_command(ctx, show_nested, commands)
for line in lines:
LOG.debug(line)
result.append(line, source_name)
self.state.nested_parse(result, 0, section)
# Subcommands
if show_nested:
commands = _filter_commands(ctx, commands)
for command in commands:
section.extend(
self._generate_nodes(command.name, command, ctx,
show_nested))
return [section] | 0.004217 |
def collect(self):
"""Collect statistics from /proc/self/mountstats.
Currently, we do fairly naive parsing and do not actually check
the statvers value returned by mountstats.
"""
if str_to_bool(self.config['use_sudo']):
if not os.access(self.config['sudo_cmd'], os.X_OK):
self.log.error("Cannot find or exec %s"
% self.config['sudo_cmd'])
return None
command = [self.config['sudo_cmd'], '/bin/cat', self.MOUNTSTATS]
p = subprocess.Popen(command,
stdout=subprocess.PIPE).communicate()[0][:-1]
lines = p.split("\n")
else:
if not os.access(self.MOUNTSTATS, os.R_OK):
self.log.error("Cannot read path %s" % self.MOUNTSTATS)
return None
f = open(self.MOUNTSTATS)
lines = f.readlines()
f.close()
path = None
for line in lines:
tokens = line.split()
if len(tokens) == 0:
continue
if tokens[0] == 'device':
path = tokens[4]
skip = False
if self.exclude_reg:
skip = self.exclude_reg.match(path)
if self.include_reg:
skip = not self.include_reg.match(path)
if skip:
self.log.debug("Ignoring %s", path)
else:
self.log.debug("Keeping %s", path)
path = path.replace('.', '_')
path = path.replace('/', '_')
elif skip:
# If we are in a skip state, don't pay any attention to
# anything that isn't the next device line
continue
elif tokens[0] == 'events:':
for i in range(0, len(self.EVENTS_MAP)):
metric_name = "%s.events.%s" % (path, self.EVENTS_MAP[i])
metric_value = long(tokens[i + 1])
self.publish_counter(metric_name, metric_value)
elif tokens[0] == 'bytes:':
for i in range(0, len(self.BYTES_MAP)):
metric_name = "%s.bytes.%s" % (path, self.BYTES_MAP[i])
metric_value = long(tokens[i + 1])
self.publish_counter(metric_name, metric_value)
elif tokens[0] == 'xprt:':
proto = tokens[1]
if not self.XPRT_MAP[proto]:
self.log.error("Unknown protocol %s", proto)
continue
for i in range(0, len(self.XPRT_MAP[proto])):
metric_name = "%s.xprt.%s.%s" % (path, proto,
self.XPRT_MAP[proto][i])
metric_value = long(tokens[i + 2])
self.publish_counter(metric_name, metric_value)
elif tokens[0][:-1] in self.RPCS_MAP:
rpc = tokens[0][:-1]
ops = long(tokens[1])
rtt = long(tokens[7])
exe = long(tokens[8])
metric_fmt = "%s.rpc.%s.%s"
ops_name = metric_fmt % (path, rpc.lower(), 'ops')
rtt_name = metric_fmt % (path, rpc.lower(), 'rtt')
exe_name = metric_fmt % (path, rpc.lower(), 'exe')
self.publish_counter(ops_name, ops)
self.publish_counter(rtt_name, rtt)
self.publish_counter(exe_name, exe) | 0.00056 |
def pformat(self, prefix=()):
'''
Makes a pretty ASCII format of the data, suitable for
displaying in a console or saving to a text file.
Returns a list of lines.
'''
nan = float("nan")
def sformat(segment, stat):
FMT = "n={0}, mean={1}, p50/95={2}/{3}, max={4}"
line_segs = [segment]
for s in [stat]:
p = s.get_percentiles()
p50, p95 = p.get(0.50, nan), p.get(0.95, nan)
line_segs.append(FMT.format(s.n, s.mean, p50, p95, s.max))
return '{0}: {1}'.format(*line_segs)
lines = []
for path in sorted(self.path_stats.keys()):
lines.append('=====================')
for seg, stat in zip(path, self.path_stats[path]):
lines.append(sformat(seg, stat))
return lines | 0.00223 |
def get_pullrequest(self, project, repository, pull_request_id):
"""
Retrieve a pull request.
The authenticated user must have REPO_READ permission
for the repository that this pull request targets to call this resource.
:param project:
:param repository:
:param pull_request_id: the ID of the pull request within the repository
:return:
"""
url = 'rest/api/1.0/projects/{project}/repos/{repository}/pull-requests/{pullRequestId}'.format(project=project,
repository=repository,
pullRequestId=pull_request_id)
return self.get(url) | 0.008485 |
def encipher(self,string):
"""Encipher string using Polybius square cipher according to initialised key.
Example::
ciphertext = Polybius('APCZWRLFBDKOTYUQGENHXMIVS',5,'MKSBU').encipher(plaintext)
:param string: The string to encipher.
:returns: The enciphered string. The ciphertext will be twice the length of the plaintext.
"""
string = self.remove_punctuation(string)#,filter='[^'+self.key+']')
ret = ''
for c in range(0,len(string)):
ret += self.encipher_char(string[c])
return ret | 0.018333 |
def predict(self, X, b=0.5, pos_label=1, return_probs=False):
"""Return numpy array of class predictions for X
based on predicted marginal probabilities.
:param X: Input data.
:param b: Decision boundary *for binary setting only*.
:type b: float
:param pos_label: Positive class index *for binary setting only*. Default: 1
:type pos_label: int
:param return_probs: If True, return predict probability. Default: False
:type return_probs: bool
"""
if self._check_input(X):
X = self._preprocess_data(X)
Y_prob = self.marginals(X)
if self.cardinality > 2:
Y_pred = Y_prob.argmax(axis=1) + 1
if return_probs:
return Y_pred, Y_prob
else:
return Y_pred
if pos_label not in [1, 2]:
raise ValueError("pos_label must have values in {1,2}.")
self.logger.info(f"Using positive label class {pos_label} with threshold {b}")
Y_pred = np.array(
[pos_label if p[pos_label - 1] > b else 3 - pos_label for p in Y_prob]
)
if return_probs:
return Y_pred, Y_prob
else:
return Y_pred | 0.004815 |
def batch_get_item(self, batch_list):
"""
Return a set of attributes for a multiple items in
multiple tables using their primary keys.
:type batch_list: :class:`boto.dynamodb.batch.BatchList`
:param batch_list: A BatchList object which consists of a
list of :class:`boto.dynamoddb.batch.Batch` objects.
Each Batch object contains the information about one
batch of objects that you wish to retrieve in this
request.
"""
request_items = self.dynamize_request_items(batch_list)
return self.layer1.batch_get_item(request_items,
object_hook=item_object_hook) | 0.002821 |
def get_time():
'''
Get the current system time.
:return: The current time in 24 hour format
:rtype: str
CLI Example:
.. code-block:: bash
salt '*' timezone.get_time
'''
ret = salt.utils.mac_utils.execute_return_result('systemsetup -gettime')
return salt.utils.mac_utils.parse_return(ret) | 0.002976 |
def fcoe_get_interface_output_fcoe_intf_list_fcoe_intf_ifindex(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
fcoe_get_interface = ET.Element("fcoe_get_interface")
config = fcoe_get_interface
output = ET.SubElement(fcoe_get_interface, "output")
fcoe_intf_list = ET.SubElement(output, "fcoe-intf-list")
fcoe_intf_fcoe_port_id_key = ET.SubElement(fcoe_intf_list, "fcoe-intf-fcoe-port-id")
fcoe_intf_fcoe_port_id_key.text = kwargs.pop('fcoe_intf_fcoe_port_id')
fcoe_intf_ifindex = ET.SubElement(fcoe_intf_list, "fcoe-intf-ifindex")
fcoe_intf_ifindex.text = kwargs.pop('fcoe_intf_ifindex')
callback = kwargs.pop('callback', self._callback)
return callback(config) | 0.003793 |
def verify_selenium_server_is_running(self):
"""
Start the Selenium standalone server, if it isn't already running.
Returns a tuple of two elements:
* A boolean which is True if the server is now running
* The Popen object representing the process so it can be terminated
later; if the server was already running, this value is "None"
"""
selenium_jar = settings.SELENIUM_JAR_PATH
if len(selenium_jar) < 5:
self.stdout.write('You need to configure SELENIUM_JAR_PATH')
return False, None
_jar_dir, jar_name = os.path.split(selenium_jar)
# Is it already running?
process = Popen(['ps -e | grep "%s"' % jar_name[:-4]],
shell=True, stdout=PIPE)
(grep_output, _grep_error) = process.communicate()
lines = grep_output.split('\n')
for line in lines:
if jar_name in line:
self.stdout.write('Selenium standalone server is already running')
return True, None
self.stdout.write('Starting the Selenium standalone server')
output = OutputMonitor()
selenium_process = Popen(['java', '-jar', selenium_jar],
stdout=open(os.devnull, 'w'),
stderr=output.stream.input)
ready_log_line = 'Selenium Server is up and running'
if not output.wait_for(ready_log_line, 10):
self.stdout.write('Timeout starting the Selenium server:\n')
self.stdout.write('\n'.join(output.lines))
return False, None
return True, selenium_process | 0.001803 |
def models(cls, api_version=DEFAULT_API_VERSION):
"""Module depends on the API version:
* 2017-07-01: :mod:`v2017_07_01.models<azure.mgmt.containerservice.v2017_07_01.models>`
* 2018-03-31: :mod:`v2018_03_31.models<azure.mgmt.containerservice.v2018_03_31.models>`
* 2018-08-01-preview: :mod:`v2018_08_01_preview.models<azure.mgmt.containerservice.v2018_08_01_preview.models>`
* 2018-09-30-preview: :mod:`v2018_09_30_preview.models<azure.mgmt.containerservice.v2018_09_30_preview.models>`
* 2019-02-01: :mod:`v2019_02_01.models<azure.mgmt.containerservice.v2019_02_01.models>`
"""
if api_version == '2017-07-01':
from .v2017_07_01 import models
return models
elif api_version == '2018-03-31':
from .v2018_03_31 import models
return models
elif api_version == '2018-08-01-preview':
from .v2018_08_01_preview import models
return models
elif api_version == '2018-09-30-preview':
from .v2018_09_30_preview import models
return models
elif api_version == '2019-02-01':
from .v2019_02_01 import models
return models
raise NotImplementedError("APIVersion {} is not available".format(api_version)) | 0.00602 |
def pipe_urlbuilder(context=None, _INPUT=None, conf=None, **kwargs):
"""A url module that builds a url. Loopable.
Parameters
----------
context : pipe2py.Context object
_INPUT : pipeforever pipe or an iterable of items or fields
conf : {
'PARAM': [
{'key': {'value': <'order'>}, 'value': {'value': <'desc'>}},
{'key': {'value': <'page'>}, 'value': {'value': <'2'>}}
]
'PATH': {'type': 'text', 'value': <''>},
'BASE': {'type': 'text', 'value': <'http://site.com/feed.xml'>},
}
Yields
------
_OUTPUT : url
"""
pkwargs = cdicts(opts, kwargs)
get_params = get_funcs(conf.get('PARAM', []), **kwargs)[0]
get_paths = get_funcs(conf.get('PATH', []), **pkwargs)[0]
get_base = get_funcs(conf['BASE'], listize=False, **pkwargs)[0]
parse_params = utils.parse_params
splits = get_splits(_INPUT, funcs=[get_params, get_paths, get_base])
parsed = utils.dispatch(splits, *get_dispatch_funcs('pass', parse_params))
_OUTPUT = starmap(parse_result, parsed)
return _OUTPUT | 0.000917 |
def plot_estimates(positions, estimates):
"""
Plots density, and probability estimates.
Parameters
----------
positions : iterable of float
Paragraph positions for which densities, and probabilities were estimated.
estimates : six-tuple of (sequence of float)
Estimates of P(relevant), p(position), p(position | relevant), P(position, relevant), and
P(relevant | position).
Returns
-------
matplotlib.figure.Figure
The plotted figure.
"""
x = list(positions)
fig = plt.figure(figsize=(SUBPLOT_WIDTH * len(estimates), FIGURE_HEIGHT))
for i, (title, y) in enumerate(zip(ESTIMATE_TITLES, estimates)):
ax = fig.add_subplot(1, len(estimates), i + 1)
ax.plot(x, y, linewidth=LINE_WIDTH, c=LINE_COLOR)
ax.title.set_text(title)
ax.set_xlim(0, 1)
ax.set_xlabel("position")
ax.set_ylabel("$\\hat P$")
ax.grid()
return fig | 0.003135 |
def create_branch(profile, name, branch_off):
"""Create a branch.
Args:
profile
A profile generated from ``simplygithub.authentication.profile``.
Such profiles tell this module (i) the ``repo`` to connect to,
and (ii) the ``token`` to connect with.
name
The name of the new branch.
branch_off
The name of a branch to create the new branch off of.
Returns:
A dict with data about the new branch.
"""
branch_off_sha = get_branch_sha(profile, branch_off)
ref = "heads/" + name
data = refs.create_ref(profile, ref, branch_off_sha)
return data | 0.001497 |
def infer_x(self, y):
"""Infer probable x from input y
@param y the desired output for infered x.
@return a list of probable x
"""
OptimizedInverseModel.infer_x(self, y)
if self.fmodel.size() == 0:
return self._random_x()
x_guesses = [self._guess_x_simple(y)[0]]
result = []
for xg in x_guesses:
res = cma.fmin(self._error, xg, self.cmaes_sigma,
options={'bounds':[self.lower, self.upper],
'verb_log':0,
'verb_disp':False,
'maxfevals':self.maxfevals,
'seed': self.seed})
result.append((res[1], res[0]))
return [xi for fi, xi in sorted(result)] | 0.0125 |
def run(ctx, commandline):
"""Run command with environment variables present."""
file = ctx.obj['FILE']
dotenv_as_dict = dotenv_values(file)
if not commandline:
click.echo('No command given.')
exit(1)
ret = run_command(commandline, dotenv_as_dict)
exit(ret) | 0.003367 |
def get_scope_list(self) -> list:
"""
Return the list of all contained scope from global to local
"""
# by default only return scoped name
lstparent = [self]
p = self.get_parent()
while p is not None:
lstparent.append(p)
p = p.get_parent()
return lstparent | 0.005814 |
def SvcStop(self) -> None:
"""
Called when the service is being shut down.
"""
# tell the SCM we're shutting down
# noinspection PyUnresolvedReferences
self.ReportServiceStatus(win32service.SERVICE_STOP_PENDING)
# fire the stop event
win32event.SetEvent(self.h_stop_event) | 0.005952 |
def _maybe_match_name(a, b):
"""
Try to find a name to attach to the result of an operation between
a and b. If only one of these has a `name` attribute, return that
name. Otherwise return a consensus name if they match of None if
they have different names.
Parameters
----------
a : object
b : object
Returns
-------
name : str or None
See Also
--------
pandas.core.common.consensus_name_attr
"""
a_has = hasattr(a, 'name')
b_has = hasattr(b, 'name')
if a_has and b_has:
if a.name == b.name:
return a.name
else:
# TODO: what if they both have np.nan for their names?
return None
elif a_has:
return a.name
elif b_has:
return b.name
return None | 0.001244 |
def count_with_multiplier(groups, multiplier):
""" Update group counts with multiplier
This is for handling atom counts on groups like (OH)2
:param groups: iterable of Group/Element
:param multiplier: the number to multiply by
"""
counts = collections.defaultdict(float)
for group in groups:
for element, count in group.count().items():
counts[element] += count*multiplier
return counts | 0.002268 |
def _get_value_from_config(self, section, name):
"""Loads the default from the config. Returns _no_value if it doesn't exist"""
conf = configuration.get_config()
try:
value = conf.get(section, name)
except (NoSectionError, NoOptionError, KeyError):
return _no_value
return self.parse(value) | 0.008403 |
def set(self, data_type, values):
"""Update/Create a new attribute and set its value(s).
Args::
data_type : attribute data type (see constants SDC.xxx)
values : attribute value(s); specify a list to create
a multi-valued attribute; a string valued
attribute can be created by setting 'data_type'
to SDC.CHAR8 and 'values' to the corresponding
string
Returns::
None
C library equivalent : SDsetattr
Attributes can also be written like ordinary python attributes,
using the dot notation. See "High level attribute access".
"""
try:
n_values = len(values)
except:
n_values = 1
values = [values]
if data_type == SDC.CHAR8:
buf = _C.array_byte(n_values)
# Allow values to be passed as a string.
# Noop if a list is passed.
values = list(values)
for n in range(n_values):
values[n] = ord(values[n])
elif data_type in [SDC.UCHAR8, SDC.UINT8]:
buf = _C.array_byte(n_values)
elif data_type == SDC.INT8:
buf = _C.array_int8(n_values)
elif data_type == SDC.INT16:
buf = _C.array_int16(n_values)
elif data_type == SDC.UINT16:
buf = _C.array_uint16(n_values)
elif data_type == SDC.INT32:
buf = _C.array_int32(n_values)
elif data_type == SDC.UINT32:
buf = _C.array_uint32(n_values)
elif data_type == SDC.FLOAT32:
buf = _C.array_float32(n_values)
elif data_type == SDC.FLOAT64:
buf = _C.array_float64(n_values)
else:
raise HDF4Error("set: illegal or unimplemented data_type")
for n in range(n_values):
buf[n] = values[n]
status = _C.SDsetattr(self._obj._id, self._name,
data_type, n_values, buf)
_checkErr('set', status, 'illegal attribute')
# Init index following attribute creation.
self._index = _C.SDfindattr(self._obj._id, self._name)
_checkErr('find', self._index, 'illegal attribute') | 0.001284 |
def transfer(ctx, _to='address', _value='uint256', returns=STATUS):
""" Standardized Contract API:
function transfer(address _to, uint256 _value) returns (bool _success)
"""
log.DEV('In Fungible.transfer')
if ctx.accounts[ctx.msg_sender] >= _value:
ctx.accounts[ctx.msg_sender] -= _value
ctx.accounts[_to] += _value
ctx.Transfer(ctx.msg_sender, _to, _value)
return OK
else:
return INSUFFICIENTFUNDS | 0.003953 |
def normalize(es, esnull):
"""normalize the ES(S,pi) and the observed ES(S), separately rescaling
the positive and negative scores by dividing the mean of the ES(S,pi).
return: NES, NESnull
"""
nEnrichmentScores =np.zeros(es.shape)
nEnrichmentNulls=np.zeros(esnull.shape)
esnull_pos = (esnull * (esnull >= 0)).mean(axis=1)
esnull_neg = (esnull * (esnull < 0)).mean(axis=1)
# calculate nESnulls
for i in range(esnull.shape[0]):
# NES
if es[i] >= 0:
nEnrichmentScores[i] = es[i] / esnull_pos[i]
else:
nEnrichmentScores[i] = - es[i] / esnull_neg[i]
# NESnull
for j in range(esnull.shape[1]):
if esnull[i,j] >= 0:
nEnrichmentNulls[i,j] = esnull[i,j] / esnull_pos[i]
else:
nEnrichmentNulls[i,j] = - esnull[i,j] / esnull_neg[i]
return nEnrichmentScores, nEnrichmentNulls | 0.009514 |
def get_fields_by_class(cls, field_class):
""" Return a list of field names matching a field class
:param field_class: field class object
:return: list
"""
ret = []
for key, val in getattr(cls, '_fields').items():
if isinstance(val, field_class):
ret.append(key)
return ret | 0.005556 |
def clear_boxes(self):
"""
Clear all boxes
"""
self.tmin_box.Clear()
self.tmin_box.SetStringSelection("")
if self.current_fit:
self.tmin_box.SetItems(self.T_list)
self.tmin_box.SetSelection(-1)
self.tmax_box.Clear()
self.tmax_box.SetStringSelection("")
if self.current_fit:
self.tmax_box.SetItems(self.T_list)
self.tmax_box.SetSelection(-1)
self.fit_box.Clear()
self.fit_box.SetStringSelection("")
if self.s in self.pmag_results_data['specimens'] and self.pmag_results_data['specimens'][self.s]:
self.fit_box.SetItems(
list([x.name for x in self.pmag_results_data['specimens'][self.s]]))
for parameter in ['dec', 'inc', 'n', 'mad', 'dang', 'alpha95']:
COMMAND = "self.s%s_window.SetValue('')" % parameter
exec(COMMAND)
COMMAND = "self.s%s_window.SetBackgroundColour(wx.Colour('grey'))" % parameter
exec(COMMAND) | 0.004798 |
def read_ipv6_frag(self, length, extension):
"""Read Fragment Header for IPv6.
Structure of IPv6-Frag header [RFC 8200]:
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| Next Header | Reserved | Fragment Offset |Res|M|
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| Identification |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
Octets Bits Name Description
0 0 frag.next Next Header
1 8 - Reserved
2 16 frag.offset Fragment Offset
3 29 - Reserved
3 31 frag.mf More Flag
4 32 frag.id Identification
"""
if length is None:
length = len(self)
_next = self._read_protos(1)
_temp = self._read_fileng(1)
_offm = self._read_binary(2)
_ipid = self._read_unpack(4)
ipv6_frag = dict(
next=_next,
length=8,
offset=int(_offm[:13], base=2),
mf=True if int(_offm[15], base=2) else False,
id=_ipid,
)
length -= ipv6_frag['length']
ipv6_frag['packet'] = self._read_packet(header=8, payload=length)
if extension:
self._protos = None
return ipv6_frag
return self._decode_next_layer(ipv6_frag, _next, length) | 0.001179 |
def benchmark_method(f):
"decorator to turn f into a factory of benchmarks"
@wraps(f)
def inner(name, *args, **kwargs):
return Benchmark(name, f, args, kwargs)
return inner | 0.005102 |
def delete(filething):
""" delete(filething)
Arguments:
filething (filething)
Raises:
mutagen.MutagenError
Remove tags from a file.
"""
t = OggTheora(filething)
filething.fileobj.seek(0)
t.delete(filething) | 0.003891 |
def get(self, key, default=None):
"""
Returns the contents of the named key.
**key** is a :ref:`type-string`, and the returned values will
either be ``list`` of key contents or ``None`` if no key was
found. ::
>>> font.lib["public.glyphOrder"]
["A", "B", "C"]
It is important to understand that any changes to the returned key
contents will not be reflected in the Lib object. If one wants to
make a change to the key contents, one should do the following::
>>> lib = font.lib["public.glyphOrder"]
>>> lib.remove("A")
>>> font.lib["public.glyphOrder"] = lib
"""
return super(BaseLib, self).get(key, default) | 0.002667 |
def _find_bounds_1d(data, x):
"""
Find the index of the lower bound where ``x`` should be inserted
into ``a`` to maintain order.
The index of the upper bound is the index of the lower bound
plus 2. Both bound indices must be within the array.
Parameters
----------
data : 1D `~numpy.ndarray`
The 1D array to search.
x : float
The value to insert.
Returns
-------
index : int
The index of the lower bound.
"""
idx = np.searchsorted(data, x)
if idx == 0:
idx0 = 0
elif idx == len(data): # pragma: no cover
idx0 = idx - 2
else:
idx0 = idx - 1
return idx0 | 0.002581 |
def lambda_handler(event, context):
"""Run the script."""
body = event.get('body', dict())
events = body.get('events', list())
source_ip = str(event.get('source_ip', ''))
if len(events) == 0:
return {'success': False, 'message': "No events sent in"}
status = process_events(events, source_ip)
msg = "Wrote {} events to the cloud".format(len(events))
return {'success': True, 'message': msg} | 0.002326 |
def _calcEnergyStretchTwist(self, diff, es, which):
r"""Calculate energy for ``estype='ST'`` using a difference vector.
It is called in :meth:`dnaEY.getGlobalDeformationEnergy` for energy calculation of each frame.
Parameters
----------
diff : numpy.ndarray
Array of difference between minimum and current parameter values.
.. math::
\mathbf{x} = \begin{bmatrix}
(L_i - L_0) & (\phi_i - \phi_0)
\end{bmatrix}
es : numpy.ndarray
Elastic matrix. See in :meth:`dnaEY.getStretchTwistModulus` about elastic matrix.
which : str
For which type of motions, energy will be calculated.
See ``which`` parameter in :meth:`dnaEY.getGlobalDeformationEnergy` for keywords.
Return
------
energy : float
Deformation free energy value
"""
if which not in self.enGlobalTypes[:5]:
raise ValueError('{0} is not a supported energy keywords.\n Use any of the following: \n {1}'.format(
which, self.enGlobalTypes[:5]))
energy = None
if which == 'full':
temp = np.matrix(diff)
energy = 0.5 * ((temp * es) * temp.T)
energy = energy[0,0]
if which == 'diag':
energy = 0.5 * ((diff[0] ** 2 * es[0][0])
+ (diff[1] ** 2 * es[1][1]))
if which == 'stretch':
energy = 0.5 * (diff[0] ** 2 * es[0][0])
if which == 'twist':
energy = 0.5 * (diff[1] ** 2 * es[1][1])
if which == 'st_coupling':
energy = 0.5 * (diff[0] * diff[1] * es[0][1])
return energy | 0.003984 |
def dict(value,
allow_empty = False,
json_serializer = None,
**kwargs):
"""Validate that ``value`` is a :class:`dict <python:dict>`.
.. hint::
If ``value`` is a string, this validator will assume it is a JSON
object and try to convert it into a :class:`dict <python:dict>`
You can override the JSON serializer used by passing it to the
``json_serializer`` property. By default, will utilize the Python
:class:`json <json>` encoder/decoder.
:param value: The value to validate.
:param allow_empty: If ``True``, returns :obj:`None <python:None>` if
``value`` is empty. If ``False``, raises a
:class:`EmptyValueError <validator_collection.errors.EmptyValueError>`
if ``value`` is empty. Defaults to ``False``.
:type allow_empty: :class:`bool <python:bool>`
:param json_serializer: The JSON encoder/decoder to use to deserialize a
string passed in ``value``. If not supplied, will default to the Python
:class:`json <python:json>` encoder/decoder.
:type json_serializer: callable
:returns: ``value`` / :obj:`None <python:None>`
:rtype: :class:`dict <python:dict>` / :obj:`None <python:None>`
:raises EmptyValueError: if ``value`` is empty and ``allow_empty`` is ``False``
:raises CannotCoerceError: if ``value`` cannot be coerced to a
:class:`dict <python:dict>`
:raises NotADictError: if ``value`` is not a :class:`dict <python:dict>`
"""
original_value = value
if not value and not allow_empty:
raise errors.EmptyValueError('value (%s) was empty' % value)
elif not value:
return None
if json_serializer is None:
json_serializer = json_
if isinstance(value, str):
try:
value = json_serializer.loads(value)
except Exception:
raise errors.CannotCoerceError(
'value (%s) cannot be coerced to a dict' % original_value
)
value = dict(value,
json_serializer = json_serializer)
if not isinstance(value, dict_):
raise errors.NotADictError('value (%s) is not a dict' % original_value)
return value | 0.003635 |
def read(self, sources, cache_duration=None):
"""
Queues the config sources to be read later (when config is accessed), or reads immediately if config has already been
accessed.
:param file/str/list sources: Config source URL (http/https), source string, file name, or file pointer, or list
of the other sources. If file source does not exist, it is ignored.
:param int cache_duration: Default cache durationg for for URL source only. Optionally cache the URL content
for the given duration (seconds) to avoid downloading too often.
This sets the default for all reads now and subsequent reads.
:return: True if all sources were successfully read or will be read, otherwise False
"""
if cache_duration is not None:
self._cache_duration = cache_duration
return super(RemoteConfig, self).read(sources) | 0.008911 |
def run(self):
'''
Main loop of the ConCache, starts updates in intervals and
answers requests from the MWorkers
'''
context = zmq.Context()
# the socket for incoming cache requests
creq_in = context.socket(zmq.REP)
creq_in.setsockopt(zmq.LINGER, 100)
creq_in.bind('ipc://' + self.cache_sock)
# the socket for incoming cache-updates from workers
cupd_in = context.socket(zmq.SUB)
cupd_in.setsockopt(zmq.SUBSCRIBE, b'')
cupd_in.setsockopt(zmq.LINGER, 100)
cupd_in.bind('ipc://' + self.update_sock)
# the socket for the timer-event
timer_in = context.socket(zmq.SUB)
timer_in.setsockopt(zmq.SUBSCRIBE, b'')
timer_in.setsockopt(zmq.LINGER, 100)
timer_in.connect('ipc://' + self.upd_t_sock)
poller = zmq.Poller()
poller.register(creq_in, zmq.POLLIN)
poller.register(cupd_in, zmq.POLLIN)
poller.register(timer_in, zmq.POLLIN)
# our serializer
serial = salt.payload.Serial(self.opts.get('serial', ''))
# register a signal handler
signal.signal(signal.SIGINT, self.signal_handler)
# secure the sockets from the world
self.secure()
log.info('ConCache started')
while self.running:
# we check for new events with the poller
try:
socks = dict(poller.poll(1))
except KeyboardInterrupt:
self.stop()
except zmq.ZMQError as zmq_err:
log.error('ConCache ZeroMQ-Error occurred')
log.exception(zmq_err)
self.stop()
# check for next cache-request
if socks.get(creq_in) == zmq.POLLIN:
msg = serial.loads(creq_in.recv())
log.debug('ConCache Received request: %s', msg)
# requests to the minion list are send as str's
if isinstance(msg, six.string_types):
if msg == 'minions':
# Send reply back to client
reply = serial.dumps(self.minions)
creq_in.send(reply)
# check for next cache-update from workers
if socks.get(cupd_in) == zmq.POLLIN:
new_c_data = serial.loads(cupd_in.recv())
# tell the worker to exit
#cupd_in.send(serial.dumps('ACK'))
# check if the returned data is usable
if not isinstance(new_c_data, list):
log.error('ConCache Worker returned unusable result')
del new_c_data
continue
# the cache will receive lists of minions
# 1. if the list only has 1 item, its from an MWorker, we append it
# 2. if the list contains another list, its from a CacheWorker and
# the currently cached minions are replaced with that list
# 3. anything else is considered malformed
try:
if not new_c_data:
log.debug('ConCache Got empty update from worker')
continue
data = new_c_data[0]
if isinstance(data, six.string_types):
if data not in self.minions:
log.debug('ConCache Adding minion %s to cache',
new_c_data[0])
self.minions.append(data)
elif isinstance(data, list):
log.debug('ConCache Replacing minion list from worker')
self.minions = data
except IndexError:
log.debug('ConCache Got malformed result dict from worker')
del new_c_data
log.info('ConCache %s entries in cache', len(self.minions))
# check for next timer-event to start new jobs
if socks.get(timer_in) == zmq.POLLIN:
sec_event = serial.loads(timer_in.recv())
# update the list every 30 seconds
if int(sec_event % 30) == 0:
cw = CacheWorker(self.opts)
cw.start()
self.stop()
creq_in.close()
cupd_in.close()
timer_in.close()
context.term()
log.debug('ConCache Shutting down') | 0.001115 |
def get_acs(self):
"""
Returns an instance of the Asset Control Service.
"""
import predix.security.acs
acs = predix.security.acs.AccessControl()
return acs | 0.009804 |
def pmt_angles(self):
"""A list of PMT directions sorted by PMT channel, on DU-1, floor-1"""
if self._pmt_angles == []:
mask = (self.pmts.du == 1) & (self.pmts.floor == 1)
self._pmt_angles = self.pmts.dir[mask]
return self._pmt_angles | 0.007092 |
def sanitize_filename(filename):
"""preserve the file ending, but replace the name with a random token """
# TODO: fix broken splitext (it reveals everything of the filename after the first `.` - doh!)
token = generate_drop_id()
name, extension = splitext(filename)
if extension:
return '%s%s' % (token, extension)
else:
return token | 0.005362 |
def home_mode_set_state(self, state, **kwargs):
"""Set the state of Home Mode"""
# It appears that surveillance station needs lowercase text
# true/false for the on switch
if state not in (HOME_MODE_ON, HOME_MODE_OFF):
raise ValueError('Invalid home mode state')
api = self._api_info['home_mode']
payload = dict({
'api': api['name'],
'method': 'Switch',
'version': api['version'],
'on': state,
'_sid': self._sid,
}, **kwargs)
response = self._get_json_with_retry(api['url'], payload)
if response['success']:
return True
return False | 0.002857 |
def _get_users(self, user_base):
""""Get users from LDAP"""
results = self._search(
getattr(self, '_%s_user_base' % user_base),
'(objectClass=*)',
['*'],
scope=ldap.SCOPE_ONELEVEL
)
for dn, attrs in results:
uid = attrs.get('uid')[0].decode('utf-8', 'ignore')
getattr(self, '_%s_users' % user_base)[uid] = FreeIPAUser(dn, attrs)
# print(attrs)
log.debug('%s users: %s' % (user_base.capitalize(), len(getattr(self, '_%s_users' % user_base)))) | 0.00708 |
def setRecordSet( self, recordSet ):
"""
Sets the record set instance that this widget will use.
:param recordSet | <orb.RecordSet>
"""
if ( recordSet ):
self.setQuery( recordSet.query() )
self.setGroupBy( recordSet.groupBy() )
self.setPageSize( recordSet.pageSize() )
self.setSortBy( recordSet.order() )
self.uiPagedCHK.setChecked( recordSet.isPaged() )
else:
self.setQuery(Q())
self.setGroupBy('')
self.setPageSize(100)
self.setSortBy('')
self.uiPagedCHK.setChecked( False ) | 0.029289 |
def _check_equal_shape(name,
static_shape,
dynamic_shape,
static_target_shape,
dynamic_target_shape=None):
"""Check that source and target shape match, statically if possible."""
static_target_shape = tf.TensorShape(static_target_shape)
if tensorshape_util.is_fully_defined(
static_shape) and tensorshape_util.is_fully_defined(static_target_shape):
if static_shape != static_target_shape:
raise ValueError("{}: required shape {} but found {}".
format(name, static_target_shape, static_shape))
return None
else:
if dynamic_target_shape is None:
if tensorshape_util.is_fully_defined(static_target_shape):
dynamic_target_shape = tensorshape_util.as_list(static_target_shape)
else:
raise ValueError("{}: cannot infer target shape: no dynamic shape "
"specified and static shape {} is not fully defined".
format(name, static_target_shape))
return assert_util.assert_equal(
dynamic_shape,
dynamic_target_shape,
message=("{}: required shape {}".format(name, static_target_shape))) | 0.007335 |
def prefix(prefix):
"""Returns a dictionary of all environment variables starting with
the given prefix, lower cased and stripped.
"""
d = {}
e = lower_dict(environ.copy())
prefix = prefix.lower()
for k, v in e.items():
try:
if k.startswith(prefix):
k = k[len(prefix):]
d[k] = v
except AttributeError:
pass
return d | 0.002364 |
def collectSingleContribs(self, measure='LFP'):
"""
Collect single cell data and save them to HDF5 file.
The function will also return signals generated by all cells
Parameters
----------
measure : str
{'LFP', 'CSD'}: Either 'LFP' or 'CSD'.
Returns
-------
numpy.ndarray
output of all neurons in population, axis 0 correspond to neuron ind
"""
try:
assert(self.recordSingleContribFrac <= 1 and
self.recordSingleContribFrac >= 0)
except AssertionError as ae:
raise ae, 'recordSingleContribFrac {} not in [0, 1]'.format(
self.recordSingleContribFrac)
if not self.recordSingleContribFrac:
return
else:
#reconstruct RANK_CELLINDICES of all RANKs for controlling
#communication
if self.recordSingleContribFrac == 1.:
SAMPLESIZE = self.POPULATION_SIZE
RANK_CELLINDICES = []
for i in range(SIZE):
RANK_CELLINDICES += [self.CELLINDICES[
self.CELLINDICES % SIZE == i]]
else:
SAMPLESIZE = int(self.recordSingleContribFrac *
self.POPULATION_SIZE)
RANK_CELLINDICES = []
for i in range(SIZE):
ids = self.CELLINDICES[self.CELLINDICES % SIZE == i]
RANK_CELLINDICES += [ids[ids < SAMPLESIZE]]
#gather data on this RANK
if RANK_CELLINDICES[RANK].size > 0:
for i, cellindex in enumerate(RANK_CELLINDICES[RANK]):
if i == 0:
data_temp = np.zeros([RANK_CELLINDICES[RANK].size] +
list(self.output[cellindex
][measure].shape),
dtype=np.float32)
data_temp[i, ] = self.output[cellindex][measure]
if RANK == 0:
#container of all output
data = np.zeros([SAMPLESIZE] +
list(self.output[cellindex][measure].shape),
dtype=np.float32)
#fill in values from this RANK
if RANK_CELLINDICES[0].size > 0:
for j, k in enumerate(RANK_CELLINDICES[0]):
data[k, ] = data_temp[j, ]
#iterate over all other RANKs
for i in range(1, len(RANK_CELLINDICES)):
if RANK_CELLINDICES[i].size > 0:
#receive on RANK 0 from all other RANK
data_temp = np.zeros([RANK_CELLINDICES[i].size] +
list(self.output[cellindex
][measure].shape),
dtype=np.float32)
COMM.Recv([data_temp, MPI.FLOAT], source=i, tag=13)
#fill in values
for j, k in enumerate(RANK_CELLINDICES[i]):
data[k, ] = data_temp[j, ]
else:
data = None
if RANK_CELLINDICES[RANK].size > 0:
#send to RANK 0
COMM.Send([data_temp, MPI.FLOAT], dest=0, tag=13)
if RANK == 0:
#save all single-cell data to file
fname = os.path.join(self.populations_path,
'%s_%ss.h5' % (self.y, measure))
f = h5py.File(fname, 'w')
f.create_dataset('data', data=data, compression=4)
f['srate'] = self.output[0]['srate']
f.close()
assert(os.path.isfile(fname))
print('file %s_%ss.h5 ok' % (self.y, measure))
COMM.Barrier()
return data | 0.004896 |
def request_callback_answer(
self,
chat_id: Union[int, str],
message_id: int,
callback_data: bytes
):
"""Use this method to request a callback answer from bots.
This is the equivalent of clicking an inline button containing callback data.
Args:
chat_id (``int`` | ``str``):
Unique identifier (int) or username (str) of the target chat.
For your personal cloud (Saved Messages) you can simply use "me" or "self".
For a contact that exists in your Telegram address book you can use his phone number (str).
message_id (``int``):
The message id the inline keyboard is attached on.
callback_data (``bytes``):
Callback data associated with the inline button you want to get the answer from.
Returns:
The answer containing info useful for clients to display a notification at the top of the chat screen
or as an alert.
Raises:
:class:`RPCError <pyrogram.RPCError>` in case of a Telegram RPC error.
``TimeoutError`` if the bot fails to answer within 10 seconds.
"""
return self.send(
functions.messages.GetBotCallbackAnswer(
peer=self.resolve_peer(chat_id),
msg_id=message_id,
data=callback_data
),
retries=0,
timeout=10
) | 0.006085 |
def update_alarm(self, entity, alarm, criteria=None, disabled=False,
label=None, name=None, metadata=None):
"""
Updates an existing alarm on the given entity.
"""
return entity.update_alarm(alarm, criteria=criteria, disabled=disabled,
label=label, name=name, metadata=metadata) | 0.012012 |
def replace_anc(dataset, parent_dataset):
"""Replace *dataset* the *parent_dataset*'s `ancillary_variables` field."""
if parent_dataset is None:
return
current_dsid = DatasetID.from_dict(dataset.attrs)
for idx, ds in enumerate(parent_dataset.attrs['ancillary_variables']):
if current_dsid == DatasetID.from_dict(ds.attrs):
parent_dataset.attrs['ancillary_variables'][idx] = dataset
return | 0.002252 |
def add_records(self, domain, records):
"""
Adds the records to this domain. Each record should be a dict with the
following keys:
- type (required)
- name (required)
- data (required)
- ttl (optional)
- comment (optional)
- priority (required for MX and SRV records; forbidden otherwise)
"""
if isinstance(records, dict):
# Single record passed
records = [records]
dom_id = utils.get_id(domain)
uri = "/domains/%s/records" % dom_id
body = {"records": records}
resp, resp_body = self._async_call(uri, method="POST", body=body,
error_class=exc.DomainRecordAdditionFailed, has_response=False)
records = resp_body.get("response", {}).get("records", [])
for record in records:
record["domain_id"] = dom_id
return [CloudDNSRecord(self, record, loaded=False)
for record in records if record] | 0.002938 |
def hybrid_forward(self, F, samples, valid_length, outputs, scores, beam_alive_mask, states):
"""
Parameters
----------
F
samples : NDArray or Symbol
The current samples generated by beam search. Shape (batch_size, beam_size, L)
valid_length : NDArray or Symbol
The current valid lengths of the samples
outputs: NDArray or Symbol
Decoder output (unnormalized) scores of the current step.
Shape (batch_size * beam_size, V)
scores : NDArray or Symbol
The previous scores. Shape (batch_size, beam_size)
beam_alive_mask : NDArray or Symbol
Shape (batch_size, beam_size)
states : nested structure of NDArrays/Symbols
Inner NDArrays have shape (batch_size * beam_size, ...)
Returns
-------
new_samples : NDArray or Symbol
The updated samples. Shape (batch_size, beam_size, L + 1)
new_valid_length : NDArray or Symbol
Valid lengths of the samples. Shape (batch_size, beam_size)
new_scores : NDArray or Symbol
Shape (batch_size, beam_size)
chosen_word_ids : NDArray or Symbol
The chosen word ids of the step. Shape (batch_size, beam_size). If it's negative,
no word will be appended to the beam.
beam_alive_mask : NDArray or Symbol
Shape (batch_size, beam_size)
new_states : nested structure of NDArrays/Symbols
Inner NDArrays have shape (batch_size * beam_size, ...)
"""
beam_size = self._beam_size
# outputs: (batch_size, beam_size, vocab_size)
outputs = outputs.reshape(shape=(-4, -1, beam_size, 0))
smoothed_probs = (outputs / self._temperature).softmax(axis=2)
log_probs = F.log_softmax(outputs, axis=2).reshape(-3, -1)
# (batch_size, beam_size)
chosen_word_ids = F.sample_multinomial(smoothed_probs, dtype=np.int32)
chosen_word_ids = F.where(beam_alive_mask,
chosen_word_ids,
-1*F.ones_like(beam_alive_mask))
chosen_word_log_probs = log_probs[mx.nd.arange(log_probs.shape[0]),
chosen_word_ids.reshape(-1)].reshape(-4, -1, beam_size)
# Don't update for finished beams
new_scores = scores + F.where(beam_alive_mask,
chosen_word_log_probs,
F.zeros_like(chosen_word_log_probs))
new_valid_length = valid_length + beam_alive_mask
# Update the samples and vaild_length
new_samples = F.concat(samples, chosen_word_ids.expand_dims(2), dim=2)
# Update the states
new_states = states
# Update the alive mask.
beam_alive_mask = beam_alive_mask * (chosen_word_ids != self._eos_id)
return new_samples, new_valid_length, new_scores,\
chosen_word_ids, beam_alive_mask, new_states | 0.002302 |
def handle_ref(attr, language=DEFAULT_LANG):
"""
Receives something like:
{
"$ref": "#/files/description/1"
},
Or:
{
"$ref": "#/files/fix/39"
}
And returns the contents of the description or fix file.
:param attr: A dict containing a reference
:param language: The user's language (en, es, etc.)
:return: Markdown referenced by the attr
"""
ref = attr.get('$ref', None)
if ref is None:
raise NotFoundException('No $ref in attribute')
_, files, _type, _id = ref.split('/')
if 'files' != files:
raise NotFoundException('Mandatory "files" path was not found in $ref')
if _type not in ('fix', 'description'):
raise NotFoundException('Mandatory fix or description not found in $ref')
if not _id.isdigit():
raise NotFoundException('Mandatory integer ID not found in $ref')
file_path = os.path.join(DBVuln.get_json_path(language=language),
_type,
'%s.md' % _id)
if not os.path.exists(file_path):
raise NotFoundException('$ref points to a non existing file')
return file(file_path).read() | 0.003021 |
def delete(self, table, condition):
""".. :py:method::
Usage::
>>> delete('hospital', {'id': '12de3wrv'})
delete from hospital where id='12de3wrv';
"""
sql = "delete from {}".format(table)
sql += self.parse_condition(condition) + ";"
super(PGWrapper, self).execute(sql, result=False) | 0.005587 |
def update(self, resource, force=False, timeout=-1):
"""
Updates the Deployment Server resource. The properties that are omitted (not included as part
of the request body) are ignored.
Args:
resource (dict): Object to update.
force:
If set to true, the operation completes despite any problems with network connectivity or errors on
the resource itself. The default is false.
timeout:
Timeout in seconds. Wait for task completion by default. The timeout does not abort the operation
in OneView, just stops waiting for its completion.
Returns:
Updated resource.
"""
return self._client.update(resource, timeout=timeout, force=force) | 0.006242 |
def parse_from_dict(json_dict):
"""
Given a Unified Uploader message, parse the contents and return a
MarketHistoryList instance.
:param dict json_dict: A Unified Uploader message as a dict.
:rtype: MarketOrderList
:returns: An instance of MarketOrderList, containing the orders
within.
"""
history_columns = json_dict['columns']
history_list = MarketHistoryList(
upload_keys=json_dict['uploadKeys'],
history_generator=json_dict['generator'],
)
for rowset in json_dict['rowsets']:
generated_at = parse_datetime(rowset['generatedAt'])
region_id = rowset['regionID']
type_id = rowset['typeID']
history_list.set_empty_region(region_id, type_id, generated_at)
for row in rowset['rows']:
history_kwargs = _columns_to_kwargs(
SPEC_TO_KWARG_CONVERSION, history_columns, row)
historical_date = parse_datetime(history_kwargs['historical_date'])
history_kwargs.update({
'type_id': type_id,
'region_id': region_id,
'historical_date': historical_date,
'generated_at': generated_at,
})
history_list.add_entry(MarketHistoryEntry(**history_kwargs))
return history_list | 0.000762 |
def _set_options(self, qobj_config=None, backend_options=None):
"""Set the backend options for all experiments in a qobj"""
# Reset default options
self._initial_unitary = self.DEFAULT_OPTIONS["initial_unitary"]
self._chop_threshold = self.DEFAULT_OPTIONS["chop_threshold"]
if backend_options is None:
backend_options = {}
# Check for custom initial statevector in backend_options first,
# then config second
if 'initial_unitary' in backend_options:
self._initial_unitary = np.array(backend_options['initial_unitary'],
dtype=complex)
elif hasattr(qobj_config, 'initial_unitary'):
self._initial_unitary = np.array(qobj_config.initial_unitary,
dtype=complex)
if self._initial_unitary is not None:
# Check the initial unitary is actually unitary
shape = np.shape(self._initial_unitary)
if len(shape) != 2 or shape[0] != shape[1]:
raise BasicAerError("initial unitary is not a square matrix")
iden = np.eye(len(self._initial_unitary))
u_dagger_u = np.dot(self._initial_unitary.T.conj(),
self._initial_unitary)
norm = np.linalg.norm(u_dagger_u - iden)
if round(norm, 10) != 0:
raise BasicAerError("initial unitary is not unitary")
# Check the initial statevector is normalized
# Check for custom chop threshold
# Replace with custom options
if 'chop_threshold' in backend_options:
self._chop_threshold = backend_options['chop_threshold']
elif hasattr(qobj_config, 'chop_threshold'):
self._chop_threshold = qobj_config.chop_threshold | 0.001621 |
def autoreg(self, data: ['SASdata', str] = None,
by: [str, list] = None,
cls: [str, list] = None,
hetero: str = None,
model: str = None,
nloptions: str = None,
output: [str, bool, 'SASdata'] = None,
restrict: str = None,
test: str = None,
procopts: str = None,
stmtpassthrough: str = None,
**kwargs: dict) -> 'SASresults':
"""
Python method to call the AUTOREG procedure
Documentation link:
:param data: SASdata object or string. This parameter is required.
:parm by: The by variable can be a string or list type.
:parm cls: The cls variable can be a string or list type. It refers to the categorical, or nominal variables.
:parm hetero: The hetero variable can only be a string type.
:parm model: The model variable can only be a string type.
:parm nloptions: The nloptions variable can only be a string type.
:parm output: The output variable can be a string, boolean or SASdata type. The member name for a boolean is "_output".
:parm restrict: The restrict variable can only be a string type.
:parm test: The test variable can only be a string type.
:parm procopts: The procopts variable is a generic option available for advanced use. It can only be a string type.
:parm stmtpassthrough: The stmtpassthrough variable is a generic option available for advanced use. It can only be a string type.
:return: SAS Result Object
""" | 0.011009 |
def p_file_cr_value_1(self, p):
"""file_cr_value : TEXT"""
if six.PY2:
p[0] = p[1].decode(encoding='utf-8')
else:
p[0] = p[1] | 0.011561 |
def nlerp_quat(from_quat, to_quat, percent):
"""Return normalized linear interpolation of two quaternions.
Less computationally expensive than slerp (which not implemented in this
lib yet), but does not maintain a constant velocity like slerp.
"""
result = lerp_quat(from_quat, to_quat, percent)
result.normalize()
return result | 0.002793 |
def _sync_io(self):
"""Update the stream with changes to the file object contents."""
if self._file_epoch == self.file_object.epoch:
return
if self._io.binary:
contents = self.file_object.byte_contents
else:
contents = self.file_object.contents
self._set_stream_contents(contents)
self._file_epoch = self.file_object.epoch | 0.004902 |
def result_to_dict(raw_result):
"""
Parse raw result from fetcher into readable dictionary
Args:
raw_result (dict) - raw data from `fetcher`
Returns:
dict - readable dictionary
"""
result = {}
for channel_index, channel in enumerate(raw_result):
channel_id, channel_name = channel[0], channel[1]
channel_result = {
'id': channel_id,
'name': channel_name,
'movies': []
}
for movie in channel[2]:
channel_result['movies'].append({
'title': movie[1],
'start_time': datetime.fromtimestamp(movie[2]),
'end_time': datetime.fromtimestamp(movie[2] + movie[3]),
'inf': True if movie[3] else False,
})
result[channel_id] = channel_result
return result | 0.001166 |
def _getEndpoint(self, add_tags=None):
"""
Override Build Loggly's RESTful API endpoint
"""
return 'https://logs-01.loggly.com/bulk/{0}/tag/{1}/'.format(
self.custom_token,
self._implodeTags(add_tags=add_tags)
) | 0.007246 |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.