text
stringlengths 78
104k
| score
float64 0
0.18
|
---|---|
def _chooseBestSegmentPerColumn(cls, connections, matchingCells,
allMatchingSegments, potentialOverlaps,
cellsPerColumn):
"""
For all the columns covered by 'matchingCells', choose the column's matching
segment with largest number of active potential synapses. When there's a
tie, the first segment wins.
@param connections (SparseMatrixConnections)
@param matchingCells (numpy array)
@param allMatchingSegments (numpy array)
@param potentialOverlaps (numpy array)
"""
candidateSegments = connections.filterSegmentsByCell(allMatchingSegments,
matchingCells)
# Narrow it down to one segment per column.
cellScores = potentialOverlaps[candidateSegments]
columnsForCandidates = (connections.mapSegmentsToCells(candidateSegments) /
cellsPerColumn)
onePerColumnFilter = np2.argmaxMulti(cellScores, columnsForCandidates)
learningSegments = candidateSegments[onePerColumnFilter]
return learningSegments | 0.003581 |
def intersection(line1, line2):
"""
Return the coordinates of a point of intersection given two lines.
Return None if the lines are parallel, but non-colli_near.
Return an arbitrary point of intersection if the lines are colli_near.
Parameters:
line1 and line2: lines given by 4 points (x0,y0,x1,y1).
"""
x1, y1, x2, y2 = line1
u1, v1, u2, v2 = line2
(a, b), (c, d) = (x2 - x1, u1 - u2), (y2 - y1, v1 - v2)
e, f = u1 - x1, v1 - y1
# Solve ((a,b), (c,d)) * (t,s) = (e,f)
denom = float(a * d - b * c)
if _near(denom, 0):
# parallel
# If colli_near, the equation is solvable with t = 0.
# When t=0, s would have to equal e/b and f/d
if b == 0 or d == 0:
return None
if _near(e / b, f / d):
# colli_near
px = x1
py = y1
else:
return None
else:
t = (e * d - b * f) / denom
# s = (a*f - e*c)/denom
px = x1 + t * (x2 - x1)
py = y1 + t * (y2 - y1)
return px, py | 0.000945 |
def get_placeholder_data(self, request, obj=None):
"""
Return the data of the placeholder fields.
"""
# Return all placeholder fields in the model.
if not hasattr(self.model, '_meta_placeholder_fields'):
return []
data = []
for name, field in self.model._meta_placeholder_fields.items():
assert isinstance(field, PlaceholderField)
data.append(PlaceholderData(
slot=field.slot,
title=field.verbose_name.capitalize(),
fallback_language=None, # Information cant' be known by "render_placeholder" in the template.
))
return data | 0.004367 |
def _reset_corpus_iterator(self):
"""
create an iterator over all documents in the file (i.e. all
<text> elements).
Once you have iterated over all documents, call this method again
if you want to iterate over them again.
"""
self.__context = etree.iterparse(self.urml_file, events=('end',),
tag='document', recover=False) | 0.004762 |
def compute(mechanism, subsystem, purviews, cause_purviews,
effect_purviews):
"""Compute a |Concept| for a mechanism, in this |Subsystem| with the
provided purviews.
"""
concept = subsystem.concept(mechanism,
purviews=purviews,
cause_purviews=cause_purviews,
effect_purviews=effect_purviews)
# Don't serialize the subsystem.
# This is replaced on the other side of the queue, and ensures
# that all concepts in the CES reference the same subsystem.
concept.subsystem = None
return concept | 0.004386 |
def _value_loss(self, observ, reward, length):
"""Compute the loss function for the value baseline.
The value loss is the difference between empirical and approximated returns
over the collected episodes. Returns the loss tensor and a summary strin.
Args:
observ: Sequences of observations.
reward: Sequences of reward.
length: Batch of sequence lengths.
Returns:
Tuple of loss tensor and summary tensor.
"""
with tf.name_scope('value_loss'):
value = self._network(observ, length).value
return_ = utility.discounted_return(
reward, length, self._config.discount)
advantage = return_ - value
value_loss = 0.5 * self._mask(advantage ** 2, length)
summary = tf.summary.merge([
tf.summary.histogram('value_loss', value_loss),
tf.summary.scalar('avg_value_loss', tf.reduce_mean(value_loss))])
value_loss = tf.reduce_mean(value_loss)
return tf.check_numerics(value_loss, 'value_loss'), summary | 0.00789 |
def notify(self, *args, **kwargs):
"See signal"
loop = kwargs.pop('loop', self.loop)
return self.signal.prepare_notification(
subscribers=self.subscribers, instance=self.instance,
loop=loop).run(*args, **kwargs) | 0.007722 |
def add(self, response, label=None):
"""add one response object to the list
"""
if not isinstance(response, sip_response.sip_response):
raise Exception(
'can only add sip_reponse.sip_response objects'
)
self.objects.append(response)
if label is None:
self.labels.append('na')
else:
self.labels.append(label) | 0.004762 |
def close_hover(self, element, use_js=False):
"""
Close hover by moving to a set offset "away" from the element being hovered.
:param element: element that triggered the hover to open
:param use_js: use javascript to close hover
:return: None
"""
try:
if use_js:
self._js_hover('mouseout', element)
else:
actions = ActionChains(self.driver)
actions.move_to_element_with_offset(element, -100, -100)
actions.reset_actions()
except (StaleElementReferenceException, MoveTargetOutOfBoundsException):
return True | 0.005961 |
def create_size_estimators():
"""Create a dict of name to a function that returns an estimated size for a given target.
The estimated size is used to build the largest targets first (subject to dependency constraints).
Choose 'random' to choose random sizes for each target, which may be useful for distributed
builds.
:returns: Dict of a name to a function that returns an estimated size.
"""
def line_count(filename):
with open(filename, 'rb') as fh:
return sum(1 for line in fh)
return {
'linecount': lambda srcs: sum(line_count(src) for src in srcs),
'filecount': lambda srcs: len(srcs),
'filesize': lambda srcs: sum(os.path.getsize(src) for src in srcs),
'nosize': lambda srcs: 0,
'random': lambda srcs: random.randint(0, 10000),
} | 0.010191 |
def parse_text_to_table(txt):
"""
takes a blob of text and finds delimiter OR guesses
the column positions to parse into a table.
input: txt = blob of text, lines separated by \n
output: res = table of text
"""
res = [] # resulting table
delim = identify_delim(txt)
print('txt to parse = ', txt, '\ndelim=',delim)
if delim == '' or delim == ' ':
fixed_split = identify_col_pos(txt)
if fixed_split == []:
res = []
else:
res = parse_text_by_col_pos(txt, fixed_split)
else:
res = parse_text_by_delim(txt, delim)
return res | 0.035897 |
def read_data(file_path):
"""
Reads a file and returns a json encoded representation of the file.
"""
if not is_valid(file_path):
write_data(file_path, {})
db = open_file_for_reading(file_path)
content = db.read()
obj = decode(content)
db.close()
return obj | 0.003268 |
def minion_pub(self, clear_load):
'''
Publish a command initiated from a minion, this method executes minion
restrictions so that the minion publication will only work if it is
enabled in the config.
The configuration on the master allows minions to be matched to
salt functions, so the minions can only publish allowed salt functions
The config will look like this:
.. code-block:: bash
peer:
.*:
- .*
This configuration will enable all minions to execute all commands:
.. code-block:: bash
peer:
foo.example.com:
- test.*
The above configuration will only allow the minion foo.example.com to
execute commands from the test module.
:param dict clear_load: The minion pay
'''
if not self.__verify_minion_publish(clear_load):
return {}
else:
return self.masterapi.minion_pub(clear_load) | 0.001919 |
def main():
"""
NAME
plotxy_magic.py
DESCRIPTION
Makes simple X,Y plots
INPUT FORMAT
Any MagIC formatted file
SYNTAX
plotxy_magic.py [command line options]
OPTIONS
-h prints this help message
-f FILE to set file name on command rec
-c col1 col2 specify columns names to plot
-sym SYM SIZE specify symbol and size to plot: default is red dots
-S don't plot symbols
-xlab XLAB
-ylab YLAB
-l connect symbols with lines
-b xmin xmax ymin ymax, sets bounds
# -b [key:max:min,key:max:min,etc.] leave or min blank for no cutoff
"""
col1,col2=0,1
sym,size = 'ro',20
xlab,ylab='',''
lines=0
if '-h' in sys.argv:
print(main.__doc__)
sys.exit()
if '-f' in sys.argv:
ind=sys.argv.index('-f')
file=sys.argv[ind+1]
else:
'-f option is a required field'
print(main.__doc__)
sys.exit()
if '-c' in sys.argv:
ind=sys.argv.index('-c')
col1=sys.argv[ind+1]
col2=sys.argv[ind+2]
else:
'Column headers a required field'
print(main.__doc__)
sys.exit()
if '-xlab' in sys.argv:
ind=sys.argv.index('-xlab')
xlab=sys.argv[ind+1]
if '-ylab' in sys.argv:
ind=sys.argv.index('-ylab')
ylab=sys.argv[ind+1]
# if '-b' in sys.argv:
# ind=sys.argv.index('-b')
# bounds=sys.argv[ind+1].split(',')
if '-b' in sys.argv:
ind=sys.argv.index('-b')
xmin=float(sys.argv[ind+1])
xmax=float(sys.argv[ind+2])
ymin=float(sys.argv[ind+3])
ymax=float(sys.argv[ind+4])
if '-sym' in sys.argv:
ind=sys.argv.index('-sym')
sym=sys.argv[ind+1]
size=int(sys.argv[ind+2])
if '-l' in sys.argv: lines=1
if '-S' in sys.argv: sym=''
X,Y=[],[]
data,file_type=pmag.magic_read(file)
print(file_type)
for rec in data:
if col1 not in list(rec.keys()) or col2 not in list(rec.keys()):
print(col1,' and/or ',col2, ' not in file headers')
print('try again')
sys.exit()
if rec[col1]!='' and rec[col2]!='':
skip=0
if '-crit' in sys.argv:
for crit in bounds:
crits=crit.split(':')
crit_key=crits[0]
crit_min=crits[1]
crit_max=crits[2]
if rec[crit_key]=="":
skip=1
else:
if crit_min!="" and float(rec[crit_key])<float(crit_min):skip=1
if crit_max!="" and float(rec[crit_key])>float(crit_min):skip=1
if skip==0:
X.append(float(rec[col1]))
Y.append(float(rec[col2]))
if len(X)==0:
print(col1,' and/or ',col2, ' have no data ')
print('try again')
sys.exit()
else:
print(len(X),' data points')
if sym!='':pylab.scatter(X,Y,c=sym[0],marker=sym[1],s=size)
if xlab!='':pylab.xlabel(xlab)
if ylab!='':pylab.ylabel(ylab)
if lines==1:pylab.plot(X,Y,'k-')
if '-b' in sys.argv:pylab.axis([xmin,xmax,ymin,ymax])
pylab.draw()
ans=input("Press return to quit ")
sys.exit() | 0.027586 |
def ostree_path(self):
""" ostree repository -- content """
if self._ostree_path is None:
self._ostree_path = os.path.join(self.tmpdir, "ostree-repo")
subprocess.check_call(["ostree", "init", "--mode", "bare-user-only",
"--repo", self._ostree_path])
return self._ostree_path | 0.008403 |
def code_description(self, column=None, value=None, **kwargs):
"""
The Permit Compliance System (PCS) records milestones, events, and many
other parameters in code format. To provide text descriptions that
explain the code meanings, the PCS_CODE_DESC provide s complete
information on all types of codes, and for each type, the text
description of each possible code value.
>>> PCS().code_description('code', 110)
"""
return self._resolve_call('PCS_CODE_DESC', column, value, **kwargs) | 0.003584 |
def find_peakset(dataset, basecolumn=-1, method='', where=None):
"""
Find peakset from the dataset
Parameters
-----------
dataset : list
A list of data
basecolumn : int
An index of column for finding peaks
method : str
A method name of numpy for finding peaks
where : function
A function which recieve ``data`` and return numpy indexing list
Returns
-------
list
A list of peaks of each axis (list)
"""
peakset = []
where_i = None
for data in dataset:
base = data[basecolumn]
base = maidenhair.statistics.average(base)
# limit data points
if where:
adata = [maidenhair.statistics.average(x) for x in data]
where_i = np.where(where(adata))
base = base[where_i]
# find peak index
index = getattr(np, method, np.argmax)(base)
# create peakset
for a, axis in enumerate(data):
if len(peakset) <= a:
peakset.append([])
if where_i:
axis = axis[where_i]
peakset[a].append(axis[index])
peakset = np.array(peakset)
return peakset | 0.000835 |
def persist_upstream_diagram(self, filepath):
"""Creates upstream steps diagram and persists it to disk as png file.
Pydot graph is created and persisted to disk as png file under the filepath directory.
Args:
filepath (str): filepath to which the png with steps visualization should
be persisted
"""
assert isinstance(filepath, str),\
'Step {} error, filepath must be str. Got {} instead'.format(self.name, type(filepath))
persist_as_png(self.upstream_structure, filepath) | 0.008881 |
def get_vlan_brief_output_vlan_vlan_id(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
get_vlan_brief = ET.Element("get_vlan_brief")
config = get_vlan_brief
output = ET.SubElement(get_vlan_brief, "output")
vlan = ET.SubElement(output, "vlan")
vlan_id = ET.SubElement(vlan, "vlan-id")
vlan_id.text = kwargs.pop('vlan_id')
callback = kwargs.pop('callback', self._callback)
return callback(config) | 0.003929 |
def to_google_str(self):
""" Convert to Google's bounds format: 'latMin,lonMin|latMax,lonMax' """
vb = self.convert_srs(4326)
return '%s,%s|%s,%s' % (vb.bottom, vb.left, vb.top, vb.right) | 0.014218 |
def modify_order(self, orderId, quantity=None, limit_price=None):
"""Modify quantity and/or limit price of an active order for the instrument
:Parameters:
orderId : int
the order id to modify
:Optional:
quantity : int
the required quantity of the modified order
limit_price : int
the new limit price of the modified order
"""
return self.parent.modify_order(self, orderId, quantity, limit_price) | 0.005758 |
def get_splits(self, id_num, unit='mi'):
"""Return the splits of the activity with the given id.
:param unit: The unit to use for splits. May be one of
'mi' or 'km'.
"""
url = self._build_url('my', 'activities', id_num, 'splits', unit)
return self._json(url) | 0.006192 |
def compare(self, range_comparison, range_objs):
"""
Compares this type against comparison filters
"""
range_values = [obj.value for obj in range_objs]
comparison_func = get_comparison_func(range_comparison)
return comparison_func(self.value, *range_values) | 0.006557 |
def format(logger,
show_successful=True,
show_errors=True,
show_traceback=True):
"""
Prints a report of the actions that were logged by the given Logger.
The report contains a list of successful actions, as well as the full
error message on failed actions.
:type logger: Logger
:param logger: The logger that recorded what happened in the queue.
:rtype: string
:return: A string summarizing the status of every performed task.
"""
output = []
# Print failed actions.
errors = logger.get_aborted_actions()
if show_errors and errors:
output += _underline('Failed actions:')
for log in logger.get_aborted_logs():
if show_traceback:
output.append(log.get_name() + ':')
output.append(log.get_error())
else:
output.append(log.get_name() + ': ' + log.get_error(False))
output.append('')
# Print successful actions.
if show_successful:
output += _underline('Successful actions:')
for log in logger.get_succeeded_logs():
output.append(log.get_name())
output.append('')
return '\n'.join(output).strip() | 0.000815 |
def decode_chain_list(in_bytes):
"""Convert a list of bytes to a list of strings. Each string is of length mmtf.CHAIN_LEN
:param in_bytes: the input bytes
:return the decoded list of strings"""
tot_strings = len(in_bytes) // mmtf.utils.constants.CHAIN_LEN
out_strings = []
for i in range(tot_strings):
out_s = in_bytes[i * mmtf.utils.constants.CHAIN_LEN:i * mmtf.utils.constants.CHAIN_LEN + mmtf.utils.constants.CHAIN_LEN]
out_strings.append(out_s.decode("ascii").strip(mmtf.utils.constants.NULL_BYTE))
return out_strings | 0.007067 |
def location_name(self, name):
""" location.name """
response = self._request(
'location.name',
input=name)
return _get_node(response, 'LocationList', 'StopLocation') | 0.009346 |
def foex(a, b):
"""Returns the factor of exceedance
"""
return (np.sum(a > b, dtype=float) / len(a) - 0.5) * 100 | 0.008065 |
def next_session_label(self, session_label):
"""
Given a session label, returns the label of the next session.
Parameters
----------
session_label: pd.Timestamp
A session whose next session is desired.
Returns
-------
pd.Timestamp
The next session label (midnight UTC).
Notes
-----
Raises ValueError if the given session is the last session in this
calendar.
"""
idx = self.schedule.index.get_loc(session_label)
try:
return self.schedule.index[idx + 1]
except IndexError:
if idx == len(self.schedule.index) - 1:
raise ValueError("There is no next session as this is the end"
" of the exchange calendar.")
else:
raise | 0.002294 |
def getmodeldefinition(self, storageobject, required=False):
""" find modeldefinition for StorageTableModel or StorageTableQuery """
if isinstance(storageobject, StorageTableModel):
definitionlist = [definition for definition in self._modeldefinitions if definition['modelname'] == storageobject.__class__.__name__]
elif isinstance(storageobject, StorageTableQuery):
""" StorageTableQuery """
storagemodel = storageobject._storagemodel
definitionlist = [definition for definition in self._modeldefinitions if definition['modelname'] == storagemodel.__class__.__name__]
else:
raise Exception("Argument is not an StorageTableModel nor an StorageTableQuery")
# is there only one modeldefinition ?
# hopefully!
modeldefinition = None
if len(definitionlist) == 1:
modeldefinition = definitionlist[0]
elif len(definitionlist) > 1:
raise ModelRegisteredMoreThanOnceError(storageobject)
# is there a modeldefinition if required ?
if required and modeldefinition is None:
raise ModelNotRegisteredError(storageobject)
return modeldefinition | 0.005344 |
def times(self, factor):
"""Return a new set with each element's multiplicity multiplied with the given scalar factor.
>>> ms = Multiset('aab')
>>> sorted(ms.times(2))
['a', 'a', 'a', 'a', 'b', 'b']
You can also use the ``*`` operator for the same effect:
>>> sorted(ms * 3)
['a', 'a', 'a', 'a', 'a', 'a', 'b', 'b', 'b']
For a variant of the operation which modifies the multiset in place see
:meth:`times_update`.
Args:
factor: The factor to multiply each multiplicity with.
"""
if factor == 0:
return self.__class__()
if factor < 0:
raise ValueError('The factor must no be negative.')
result = self.__copy__()
_elements = result._elements
for element in _elements:
_elements[element] *= factor
result._total *= factor
return result | 0.003226 |
def subproduct(self, ext):
"""Makes a subproduct filename by calling subproductPath(), then calls
subproductUpToDate() to determine if it is up-to-date.
Returns tuple of basename,path,uptodate
"""
fname, fpath = self.subproductPath(ext)
return fname, fpath, self.subproductUpToDate(fpath) | 0.005952 |
def parse_file(self, file_path, currency) -> List[PriceModel]:
""" Load and parse a .csv file """
# load file
# read csv into memory?
contents = self.load_file(file_path)
prices = []
# parse price elements
for line in contents:
price = self.parse_line(line)
assert isinstance(price, PriceModel)
price.currency = currency
prices.append(price)
return prices | 0.006289 |
def move_item(self, assessment_id, item_id, preceeding_item_id):
"""Moves an existing item to follow another item in an assessment.
arg: assessment_id (osid.id.Id): the ``Id`` of the
``Assessment``
arg: item_id (osid.id.Id): the ``Id`` of an ``Item``
arg: preceeding_item_id (osid.id.Id): the ``Id`` of a
preceeding ``Item`` in the sequence
raise: NotFound - ``assessment_id`` is not found, or
``item_id`` or ``preceeding_item_id`` not on
``assessment_id``
raise: NullArgument - ``assessment_id, item_id`` or
``preceeding_item_id`` is ``null``
raise: OperationFailed - unable to complete request
raise: PermissionDenied - authorization failure occurred
*compliance: mandatory -- This method must be implemented.*
"""
if assessment_id.get_identifier_namespace() != 'assessment.Assessment':
raise errors.InvalidArgument
self._part_item_design_session.move_item_behind(item_id, self._get_first_part_id(assessment_id), preceeding_item_id) | 0.002639 |
def read_struct_array(fd, endian, header):
"""Read a struct array.
Returns a dict with fields of the struct array.
"""
# read field name length (unused, as strings are null terminated)
field_name_length = read_elements(fd, endian, ['miINT32'])
if field_name_length > 32:
raise ParseError('Unexpected field name length: {}'.format(
field_name_length))
# read field names
fields = read_elements(fd, endian, ['miINT8'], is_name=True)
if isinstance(fields, basestring):
fields = [fields]
# read rows and columns of each field
empty = lambda: [list() for i in range(header['dims'][0])]
array = {}
for row in range(header['dims'][0]):
for col in range(header['dims'][1]):
for field in fields:
# read the matrix header and array
vheader, next_pos, fd_var = read_var_header(fd, endian)
data = read_var_array(fd_var, endian, vheader)
if field not in array:
array[field] = empty()
array[field][row].append(data)
# move on to next field
fd.seek(next_pos)
# pack the nested arrays
for field in fields:
rows = array[field]
for i in range(header['dims'][0]):
rows[i] = squeeze(rows[i])
array[field] = squeeze(array[field])
return array | 0.001412 |
def swap_across(idx, idy, mat_a, mat_r, perm):
"""Interchange row and column idy and idx."""
# Temporary permutation matrix for swaping 2 rows or columns.
size = mat_a.shape[0]
perm_new = numpy.eye(size, dtype=int)
# Modify the permutation matrix perm by swaping columns.
perm_row = 1.0*perm[:, idx]
perm[:, idx] = perm[:, idy]
perm[:, idy] = perm_row
# Modify the permutation matrix p by swaping rows (same as
# columns because p = pT).
row_p = 1.0 * perm_new[idx]
perm_new[idx] = perm_new[idy]
perm_new[idy] = row_p
# Permute mat_a and r (p = pT).
mat_a = numpy.dot(perm_new, numpy.dot(mat_a, perm_new))
mat_r = numpy.dot(mat_r, perm_new)
return mat_a, mat_r, perm | 0.001357 |
def configure(self, width, height):
"""See :meth:`set_window_size`."""
self._imgwin_set = True
self.set_window_size(width, height) | 0.012987 |
def import_module(modulename):
"""
Static method for importing module modulename. Can handle relative imports as well.
:param modulename: Name of module to import. Can be relative
:return: imported module instance.
"""
module = None
try:
module = importlib.import_module(modulename)
except ImportError:
# If importing fails we see if the modulename has dots in it, split the name.
if "." in modulename:
modules = modulename.split(".")
package = ".".join(modules[1:len(modules)])
# Might raise an ImportError again. If so, we really failed to import the module.
module = importlib.import_module(package)
else:
# No dots, really unable to import the module. Raise.
raise
return module | 0.00486 |
def all_experiment_groups(self):
"""
Similar to experiment_groups,
but uses the default manager to return archived experiments as well.
"""
from db.models.experiment_groups import ExperimentGroup
return ExperimentGroup.all.filter(project=self) | 0.006849 |
def extract_ctcp(self, spin, nick, user, host, target, msg):
"""
it is used to extract ctcp requests into pieces.
"""
# The ctcp delimiter token.
DELIM = '\001'
if not msg.startswith(DELIM) or not msg.endswith(DELIM):
return
ctcp_args = msg.strip(DELIM).split(' ')
spawn(spin, ctcp_args[0], (nick, user, host, target, msg), *ctcp_args[1:]) | 0.020362 |
def get_network_by_device(vm, device, pyvmomi_service, logger):
"""
Get a Network connected to a particular Device (vNIC)
@see https://github.com/vmware/pyvmomi/blob/master/docs/vim/dvs/PortConnection.rst
:param vm:
:param device: <vim.vm.device.VirtualVmxnet3> instance of adapter
:param pyvmomi_service:
:param logger:
:return: <vim Network Obj or None>
"""
try:
backing = device.backing
if hasattr(backing, 'network'):
return backing.network
elif hasattr(backing, 'port') and hasattr(backing.port, 'portgroupKey'):
return VNicService._network_get_network_by_connection(vm, backing.port, pyvmomi_service)
except:
logger.debug(u"Cannot determinate which Network connected to device {}".format(device))
return None | 0.007813 |
def excluded_length(self):
"""Surveyed length which does not count toward the included total"""
return sum([shot.length for shot in self.shots if Exclude.LENGTH in shot.flags or Exclude.TOTAL in shot.flags]) | 0.013453 |
def QPSK_BEP(tx_data,rx_data,Ncorr = 1024,Ntransient = 0):
"""
Count bit errors between a transmitted and received QPSK signal.
Time delay between streams is detected as well as ambiquity resolution
due to carrier phase lock offsets of :math:`k*\\frac{\\pi}{4}`, k=0,1,2,3.
The ndarray sdata is Tx +/-1 symbols as complex numbers I + j*Q.
The ndarray data is Rx +/-1 symbols as complex numbers I + j*Q.
Note: Ncorr needs to be even
"""
#Remove Ntransient symbols
tx_data = tx_data[Ntransient:]
rx_data = rx_data[Ntransient:]
#Correlate the first Ncorr symbols at four possible phase rotations
R0 = np.fft.ifft(np.fft.fft(rx_data,Ncorr)*
np.conj(np.fft.fft(tx_data,Ncorr)))
R1 = np.fft.ifft(np.fft.fft(1j*rx_data,Ncorr)*
np.conj(np.fft.fft(tx_data,Ncorr)))
R2 = np.fft.ifft(np.fft.fft(-1*rx_data,Ncorr)*
np.conj(np.fft.fft(tx_data,Ncorr)))
R3 = np.fft.ifft(np.fft.fft(-1j*rx_data,Ncorr)*
np.conj(np.fft.fft(tx_data,Ncorr)))
#Place the zero lag value in the center of the array
R0 = np.fft.fftshift(R0)
R1 = np.fft.fftshift(R1)
R2 = np.fft.fftshift(R2)
R3 = np.fft.fftshift(R3)
R0max = np.max(R0.real)
R1max = np.max(R1.real)
R2max = np.max(R2.real)
R3max = np.max(R3.real)
R = np.array([R0max,R1max,R2max,R3max])
Rmax = np.max(R)
kphase_max = np.where(R == Rmax)[0]
kmax = kphase_max[0]
#Correlation lag value is zero at the center of the array
if kmax == 0:
lagmax = np.where(R0.real == Rmax)[0] - Ncorr/2
elif kmax == 1:
lagmax = np.where(R1.real == Rmax)[0] - Ncorr/2
elif kmax == 2:
lagmax = np.where(R2.real == Rmax)[0] - Ncorr/2
elif kmax == 3:
lagmax = np.where(R3.real == Rmax)[0] - Ncorr/2
taumax = lagmax[0]
print('kmax = %d, taumax = %d' % (kmax, taumax))
# Count bit and symbol errors over the entire input ndarrays
# Begin by making tx and rx length equal and apply phase rotation to rx
if taumax < 0:
tx_data = tx_data[-taumax:]
tx_data = tx_data[:min(len(tx_data),len(rx_data))]
rx_data = 1j**kmax*rx_data[:len(tx_data)]
else:
rx_data = 1j**kmax*rx_data[taumax:]
rx_data = rx_data[:min(len(tx_data),len(rx_data))]
tx_data = tx_data[:len(rx_data)]
#Convert to 0's and 1's
S_count = len(tx_data)
tx_I = np.int16((tx_data.real + 1)/2)
tx_Q = np.int16((tx_data.imag + 1)/2)
rx_I = np.int16((rx_data.real + 1)/2)
rx_Q = np.int16((rx_data.imag + 1)/2)
I_errors = tx_I ^ rx_I
Q_errors = tx_Q ^ rx_Q
#A symbol errors occurs when I or Q or both are in error
S_errors = I_errors | Q_errors
#return 0
return S_count,np.sum(I_errors),np.sum(Q_errors),np.sum(S_errors) | 0.012992 |
def do_ams_sto_put(endpoint, body, content_length):
'''Do a PUT request to the Azure Storage API and return JSON.
Args:
endpoint (str): Azure Media Services Initial Endpoint.
body (str): Azure Media Services Content Body.
content_length (str): Content_length.
Returns:
HTTP response. JSON body.
'''
headers = {"Accept": json_acceptformat,
"Accept-Charset" : charset,
"x-ms-blob-type" : "BlockBlob",
"x-ms-meta-m1": "v1",
"x-ms-meta-m2": "v2",
"x-ms-version" : "2015-02-21",
"Content-Length" : str(content_length)}
return requests.put(endpoint, data=body, headers=headers) | 0.006954 |
async def get_session_data(self):
"""Get Tautulli sessions."""
cmd = 'get_activity'
url = self.base_url + cmd
try:
async with async_timeout.timeout(8, loop=self._loop):
response = await self._session.get(url)
logger("Status from Tautulli: " + str(response.status))
self.tautulli_session_data = await response.json()
logger(self.tautulli_session_data)
except (asyncio.TimeoutError, aiohttp.ClientError, socket.gaierror,
AttributeError) as error:
msg = "Can not load data from Tautulli: {} - {}".format(url, error)
logger(msg, 40) | 0.002967 |
def iter_query(query):
"""Accept a filename, stream, or string.
Returns an iterator over lines of the query."""
try:
itr = click.open_file(query).readlines()
except IOError:
itr = [query]
return itr | 0.004274 |
def set_dataset_date_from_datetime(self, dataset_date, dataset_end_date=None):
# type: (datetime, Optional[datetime]) -> None
"""Set dataset date from datetime.datetime object
Args:
dataset_date (datetime.datetime): Dataset date
dataset_end_date (Optional[datetime.datetime]): Dataset end date
Returns:
None
"""
start_date = dataset_date.strftime('%m/%d/%Y')
if dataset_end_date is None:
self.data['dataset_date'] = start_date
else:
end_date = dataset_end_date.strftime('%m/%d/%Y')
self.data['dataset_date'] = '%s-%s' % (start_date, end_date) | 0.004412 |
def _set_metric(self, metric_name, metric_type, value, tags=None, device_name=None):
"""
Set a metric
"""
if metric_type == self.HISTOGRAM:
self.histogram(metric_name, value, tags=tags, device_name=device_name)
elif metric_type == self.INCREMENT:
self.increment(metric_name, value, tags=tags, device_name=device_name)
else:
self.log.error('Metric type "{}" unknown'.format(metric_type)) | 0.010638 |
def price_change(self):
"""
This method returns any price change.
:return:
"""
try:
if self._data_from_search:
return self._data_from_search.find('div', {'class': 'price-changes-sr'}).text
else:
return self._ad_page_content.find('div', {'class': 'price-changes-sr'}).text
except Exception as e:
if self._debug:
logging.error(
"Error getting price_change. Error message: " + e.args[0])
return | 0.007207 |
def load_mol(self, mol_file):
"""Loads a MOL file of the ligand (submitted by user) into RDKit environment.
Takes:
* mol_file * - user submitted MOL file of the ligand
Output:
* self.mol_mda * - the ligand as MDAnalysis Universe,
* self.mol * - the ligand in RDKit environment as Mol object.
"""
#Check if MOL file has been provided correctly and can be loaded in MDAnalysis
if mol_file is None:
mol_file = "lig.mol"
self.mol = Chem.MolFromMolFile(mol_file,removeHs=False,sanitize=False)
try:
self.mol.UpdatePropertyCache(strict=False)
except AttributeError:
assert self.mol != None, "The MOL file could not be imported in RDKit environment. Suggestion: Check the atomtypes."
assert self.mol != None, "The MOL file could not be imported in RDKit environment." | 0.011803 |
def import_deleted_fields(self, data):
"""
Set data fields to deleted
"""
if self.get_read_only() and self.is_locked():
return
if isinstance(data, str):
data = [data]
for key in data:
if hasattr(self, key):
delattr(self, key)
continue
keys = key.split('.', 1)
if len(keys) != 2:
continue
child = getattr(self, keys[0])
child.import_deleted_fields(keys[1]) | 0.003697 |
def eigs_s(infile="", dir_path='.'):
"""
Converts eigenparamters format data to s format
Parameters
___________________
Input:
file : input file name with eigenvalues (tau) and eigenvectors (V) with format:
tau_1 V1_dec V1_inc tau_2 V2_dec V2_inc tau_3 V3_dec V3_inc
Output
the six tensor elements as a nested array
[[x11,x22,x33,x12,x23,x13],....]
"""
file = os.path.join(dir_path, infile)
eigs_data = np.loadtxt(file)
Ss = []
for ind in range(eigs_data.shape[0]):
tau, Vdirs = [], []
for k in range(0, 9, 3):
tau.append(eigs_data[ind][k])
Vdirs.append([eigs_data[ind][k+1], eigs_data[ind][k+2]])
s = list(pmag.doeigs_s(tau, Vdirs))
Ss.append(s)
return Ss | 0.002494 |
def get_record(self, path=None, no_pdf=False,
test=False, refextract_callback=None):
"""Convert a record to MARCXML format.
:param path: path to a record.
:type path: string
:param test: flag to determine if it is a test call.
:type test: bool
:param refextract_callback: callback to be used to extract
unstructured references. It should
return a marcxml formated string
of the reference.
:type refextract_callback: callable
:returns: marcxml formated string.
"""
xml_doc = self.get_article(path)
rec = create_record()
title = self.get_title(xml_doc)
if title:
record_add_field(rec, '245', subfields=[('a', title)])
(journal, dummy, volume, issue, first_page, last_page, year,
start_date, doi) = self.get_publication_information(xml_doc, path)
if not journal:
journal = self.get_article_journal(xml_doc)
if start_date:
record_add_field(rec, '260', subfields=[('c', start_date),
('t', 'published')])
else:
record_add_field(
rec, '260', subfields=[('c', time.strftime('%Y-%m-%d'))])
if doi:
record_add_field(rec, '024', ind1='7', subfields=[('a', doi),
('2', 'DOI')])
license, license_url = self.get_license(xml_doc)
if license and license_url:
record_add_field(rec, '540', subfields=[('a', license),
('u', license_url)])
elif license_url:
record_add_field(rec, '540', subfields=[('u', license_url)])
self.logger.info("Creating record: %s %s" % (path, doi))
authors = self.get_authors(xml_doc)
first_author = True
for author in authors:
author_name = (author['surname'], author.get(
'given_name') or author.get('initials'))
subfields = [('a', '%s, %s' % author_name)]
if 'orcid' in author:
subfields.append(('j', author['orcid']))
if 'affiliation' in author:
for aff in author["affiliation"]:
subfields.append(('v', aff))
if self.extract_nations:
add_nations_field(subfields)
if author.get('email'):
subfields.append(('m', author['email']))
if first_author:
record_add_field(rec, '100', subfields=subfields)
first_author = False
else:
record_add_field(rec, '700', subfields=subfields)
abstract = self.get_abstract(xml_doc)
if abstract:
record_add_field(rec, '520', subfields=[('a', abstract),
('9', 'Elsevier')])
record_copyright = self.get_copyright(xml_doc)
if record_copyright:
record_add_field(rec, '542', subfields=[('f', record_copyright)])
keywords = self.get_keywords(xml_doc)
if self.CONSYN:
for tag in xml_doc.getElementsByTagName('ce:collaboration'):
collaboration = get_value_in_tag(tag, 'ce:text')
if collaboration:
record_add_field(rec, '710',
subfields=[('g', collaboration)])
# We add subjects also as author keywords
subjects = xml_doc.getElementsByTagName('dct:subject')
for subject in subjects:
for listitem in subject.getElementsByTagName('rdf:li'):
keyword = xml_to_text(listitem)
if keyword not in keywords:
keywords.append(keyword)
for keyword in keywords:
record_add_field(rec, '653', ind1='1',
subfields=[('a', keyword),
('9', 'author')])
journal, dummy = fix_journal_name(journal.strip(),
self.journal_mappings)
subfields = []
doctype = self.get_doctype(xml_doc)
try:
page_count = int(last_page) - int(first_page) + 1
record_add_field(rec, '300',
subfields=[('a', str(page_count))])
except ValueError: # do nothing
pass
if doctype == 'err':
subfields.append(('m', 'Erratum'))
elif doctype == 'add':
subfields.append(('m', 'Addendum'))
elif doctype == 'pub':
subfields.append(('m', 'Publisher Note'))
elif doctype == 'rev':
record_add_field(rec, '980', subfields=[('a', 'Review')])
if journal:
subfields.append(('p', journal))
if first_page and last_page:
subfields.append(('c', '%s-%s' %
(first_page, last_page)))
elif first_page:
subfields.append(('c', first_page))
if volume:
subfields.append(('v', volume))
if year:
subfields.append(('y', year))
record_add_field(rec, '773', subfields=subfields)
if not test:
if license:
url = 'http://www.sciencedirect.com/science/article/pii/'\
+ path.split('/')[-1][:-4]
record_add_field(rec, '856', ind1='4',
subfields=[('u', url),
('y', 'Elsevier server')])
record_add_field(rec, 'FFT', subfields=[('a', path),
('t', 'INSPIRE-PUBLIC'),
('d', 'Fulltext')])
else:
record_add_field(rec, 'FFT', subfields=[('a', path),
('t', 'Elsevier'),
('o', 'HIDDEN')])
record_add_field(rec, '980', subfields=[('a', 'HEP')])
record_add_field(rec, '980', subfields=[('a', 'Citeable')])
record_add_field(rec, '980', subfields=[('a', 'Published')])
self._add_references(xml_doc, rec, refextract_callback)
else:
licence = 'http://creativecommons.org/licenses/by/3.0/'
record_add_field(rec,
'540',
subfields=[('a', 'CC-BY-3.0'), ('u', licence)])
if keywords:
for keyword in keywords:
record_add_field(
rec, '653', ind1='1', subfields=[('a', keyword),
('9', 'author')])
pages = ''
if first_page and last_page:
pages = '{0}-{1}'.format(first_page, last_page)
elif first_page:
pages = first_page
subfields = filter(lambda x: x[1] and x[1] != '-', [('p', journal),
('v', volume),
('n', issue),
('c', pages),
('y', year)])
record_add_field(rec, '773', subfields=subfields)
if not no_pdf:
from invenio.search_engine import perform_request_search
query = '0247_a:"%s" AND NOT 980:DELETED"' % (doi,)
prev_version = perform_request_search(p=query)
old_pdf = False
if prev_version:
from invenio.bibdocfile import BibRecDocs
prev_rec = BibRecDocs(prev_version[0])
try:
pdf_path = prev_rec.get_bibdoc('main')
pdf_path = pdf_path.get_file(
".pdf;pdfa", exact_docformat=True)
pdf_path = pdf_path.fullpath
old_pdf = True
record_add_field(rec, 'FFT',
subfields=[('a', pdf_path),
('n', 'main'),
('f', '.pdf;pdfa')])
message = ('Leaving previously delivered PDF/A for: '
+ doi)
self.logger.info(message)
except:
pass
try:
if exists(join(path, 'main_a-2b.pdf')):
pdf_path = join(path, 'main_a-2b.pdf')
record_add_field(rec, 'FFT',
subfields=[('a', pdf_path),
('n', 'main'),
('f', '.pdf;pdfa')])
self.logger.debug('Adding PDF/A to record: %s'
% (doi,))
elif exists(join(path, 'main.pdf')):
pdf_path = join(path, 'main.pdf')
record_add_field(rec, 'FFT', subfields=[('a', pdf_path)])
else:
if not old_pdf:
message = "Record " + doi
message += " doesn't contain PDF file."
self.logger.warning(message)
raise MissingFFTError(message)
except MissingFFTError:
message = "Elsevier paper: %s is missing PDF." % (doi,)
register_exception(alert_admin=True, prefix=message)
version = self.get_elsevier_version(find_package_name(path))
record_add_field(rec, '583', subfields=[('l', version)])
xml_path = join(path, 'main.xml')
record_add_field(rec, 'FFT', subfields=[('a', xml_path)])
record_add_field(rec, '980', subfields=[('a', 'SCOAP3'),
('b', 'Elsevier')])
try:
return record_xml_output(rec)
except UnicodeDecodeError:
message = "Found a bad char in the file for the article " + doi
sys.stderr.write(message)
return "" | 0.000645 |
def set_formatter(name, func):
"""Replace the formatter function used by the trace decorator to
handle formatting a specific kind of argument. There are several
kinds of arguments that trace discriminates between:
* instance argument - the object bound to an instance method.
* class argument - the class object bound to a class method.
* positional arguments (named) - values bound to distinct names.
* positional arguments (default) - named positional arguments with
default values specified in the function declaration.
* positional arguments (anonymous) - an arbitrary number of values
that are all bound to the '*' variable.
* keyword arguments - zero or more name-value pairs that are
placed in a dictionary and bound to the double-star variable.
\var{name} - specifies the name of the formatter to be modified.
* instance argument - "self", "instance" or "this"
* class argument - "class"
* named argument - "named", "param" or "parameter"
* default argument - "default", "optional"
* anonymous argument - "anonymous", "arbitrary" or "unnamed"
* keyword argument - "keyword", "pair" or "pairs"
\var{func} - a function to format an argument.
* For all but anonymous formatters this function must accept two
arguments: the variable name and the value to which it is bound.
* The anonymous formatter function is passed only one argument
corresponding to an anonymous value.
* if \var{func} is "None" then the default formatter will be used.
"""
if name in ('self', 'instance', 'this'):
global af_self
af_self = _formatter_self if func is None else func
elif name == 'class':
global af_class
af_class = _formatter_class if func is None else func
elif name in ('named', 'param', 'parameter'):
global af_named
af_named = _formatter_named if func is None else func
elif name in ('default', 'optional'):
global af_default
af_default = _formatter_defaults if func is None else func
elif name in ('anonymous', 'arbitrary', 'unnamed'):
global af_anonymous
af_anonymous = chop if func is None else func
elif name in ('keyword', 'pair', 'pairs'):
global af_keyword
af_keyword = _formatter_named if func is None else func
else:
raise ValueError('unknown trace formatter %r' % name) | 0.000408 |
def create(cls, expr, binds):
"""
Helper for creating new NumExprFactors.
This is just a wrapper around NumericalExpression.__new__ that always
forwards `bool` as the dtype, since Filters can only be of boolean
dtype.
"""
return cls(expr=expr, binds=binds, dtype=bool_dtype) | 0.006042 |
def _mine_get(self, load):
'''
Gathers the data from the specified minions' mine
:param dict load: A payload received from a minion
:rtype: dict
:return: Mine data from the specified minions
'''
load = self.__verify_load(load, ('id', 'tgt', 'fun', 'tok'))
if load is False:
return {}
else:
return self.masterapi._mine_get(load, skip_verify=True) | 0.004515 |
def main(args, stop=False):
"""
Arguments parsing, etc..
"""
daemon = AMQPDaemon(
con_param=getConParams(
settings.RABBITMQ_PDFGEN_VIRTUALHOST
),
queue=settings.RABBITMQ_PDFGEN_INPUT_QUEUE,
out_exch=settings.RABBITMQ_PDFGEN_EXCHANGE,
out_key=settings.RABBITMQ_PDFGEN_OUTPUT_KEY,
react_fn=reactToAMQPMessage,
glob=globals() # used in deserializer
)
if not stop and args.foreground: # run at foreground
daemon.run()
else:
daemon.run_daemon() | 0.001761 |
def get_subj_alt_name(peer_cert):
"""
Given an PyOpenSSL certificate, provides all the subject alternative names.
"""
# Pass the cert to cryptography, which has much better APIs for this.
if hasattr(peer_cert, "to_cryptography"):
cert = peer_cert.to_cryptography()
else:
# This is technically using private APIs, but should work across all
# relevant versions before PyOpenSSL got a proper API for this.
cert = _Certificate(openssl_backend, peer_cert._x509)
# We want to find the SAN extension. Ask Cryptography to locate it (it's
# faster than looping in Python)
try:
ext = cert.extensions.get_extension_for_class(
x509.SubjectAlternativeName
).value
except x509.ExtensionNotFound:
# No such extension, return the empty list.
return []
except (x509.DuplicateExtension, UnsupportedExtension,
x509.UnsupportedGeneralNameType, UnicodeError) as e:
# A problem has been found with the quality of the certificate. Assume
# no SAN field is present.
log.warning(
"A problem was encountered with the certificate that prevented "
"urllib3 from finding the SubjectAlternativeName field. This can "
"affect certificate validation. The error was %s",
e,
)
return []
# We want to return dNSName and iPAddress fields. We need to cast the IPs
# back to strings because the match_hostname function wants them as
# strings.
# Sadly the DNS names need to be idna encoded and then, on Python 3, UTF-8
# decoded. This is pretty frustrating, but that's what the standard library
# does with certificates, and so we need to attempt to do the same.
# We also want to skip over names which cannot be idna encoded.
names = [
('DNS', name) for name in map(_dnsname_to_stdlib, ext.get_values_for_type(x509.DNSName))
if name is not None
]
names.extend(
('IP Address', str(name))
for name in ext.get_values_for_type(x509.IPAddress)
)
return names | 0.000943 |
def atlasdb_format_query( query, values ):
"""
Turn a query into a string for printing.
Useful for debugging.
"""
return "".join( ["%s %s" % (frag, "'%s'" % val if type(val) in [str, unicode] else val) for (frag, val) in zip(query.split("?"), values + ("",))] ) | 0.021352 |
def _get_image_numpy_dtype(self):
"""
Get the numpy dtype for the image
"""
try:
ftype = self._info['img_equiv_type']
npy_type = _image_bitpix2npy[ftype]
except KeyError:
raise KeyError("unsupported fits data type: %d" % ftype)
return npy_type | 0.006098 |
def ConsultarTipoDeduccion(self, sep="||"):
"Consulta de tipos de Deducciones"
ret = self.client.tipoDeduccionConsultar(
auth={
'token': self.Token, 'sign': self.Sign,
'cuit': self.Cuit, },
)['tipoDeduccionReturn']
self.__analizar_errores(ret)
array = ret.get('tiposDeduccion', [])
return [("%s %%s %s %%s %s" % (sep, sep, sep)) %
(it['codigoDescripcion']['codigo'],
it['codigoDescripcion']['descripcion'])
for it in array] | 0.009539 |
def modify(self, service_name, json, **kwargs):
"""Modify an AppNexus object"""
return self._send(requests.put, service_name, json, **kwargs) | 0.012739 |
def split(self, bitindex):
"""Split a promise into two promises at the provided index.
A common operation in JTAG is reading/writing to a
register. During the operation, the TMS pin must be low, but
during the writing of the last bit, the TMS pin must be
high. Requiring all reads or writes to have full arbitrary
control over the TMS pin is unrealistic.
Splitting a promise into two sub promises is a way to mitigate
this issue. The final read bit is its own subpromise that can
be associated with a different primitive than the 'rest' of
the subpromise.
Returns:
Two TDOPromise instances: the 'Rest' and the 'Tail'.
The 'Rest' is the first chunk of the original promise.
The 'Tail' is a single bit sub promise for the final bit
in the operation
If the 'Rest' would have a length of 0, None is returned
"""
if bitindex < 0:
raise ValueError("bitindex must be larger or equal to 0.")
if bitindex > len(self):
raise ValueError(
"bitindex larger than the array's size. "
"Len: %s; bitindex: %s"%(len(self), bitindex))
if bitindex == 0:
return None, self
if bitindex == len(self):
return self, None
left = TDOPromise(self._chain, self._bitstart, bitindex,
_parent=self)
#Starts at 0 because offset is for incoming data from
#associated primitive, not location in parent.
right = TDOPromise(self._chain, 0, len(self)-bitindex,
_parent=self)
self._components = []
self._addsub(left, 0)
self._addsub(right, bitindex)
return left, right | 0.003293 |
def data_received(self, data):
"""Receive data from the protocol.
Called when asyncio.Protocol detects received data from network.
"""
_LOGGER.debug("Starting: data_received")
_LOGGER.debug('Received %d bytes from PLM: %s',
len(data), binascii.hexlify(data))
self._buffer.put_nowait(data)
asyncio.ensure_future(self._peel_messages_from_buffer(),
loop=self._loop)
_LOGGER.debug("Finishing: data_received") | 0.003831 |
def get_fulltext_links(doi):
"""Return a list of links to the full text of an article given its DOI.
Each list entry is a dictionary with keys:
- URL: the URL to the full text
- content-type: e.g. text/xml or text/plain
- content-version
- intended-application: e.g. text-mining
"""
metadata = get_metadata(doi)
if metadata is None:
return None
links = metadata.get('link')
return links | 0.002283 |
def save_post(self, title, text, user_id, tags, draft=False,
post_date=None, last_modified_date=None, meta_data=None,
post_id=None):
"""
Persist the blog post data. If ``post_id`` is ``None`` or ``post_id``
is invalid, the post must be inserted into the storage. If ``post_id``
is a valid id, then the data must be updated.
:param title: The title of the blog post
:type title: str
:param text: The text of the blog post
:type text: str
:param user_id: The user identifier
:type user_id: str
:param tags: A list of tags
:type tags: list
:param draft: If the post is a draft of if needs to be published.
:type draft: bool
:param post_date: (Optional) The date the blog was posted (default
datetime.datetime.utcnow())
:type post_date: datetime.datetime
:param last_modified_date: (Optional) The date when blog was last
modified (default datetime.datetime.utcnow())
:type last_modified_date: datetime.datetime
:param meta_data: The meta data for the blog post
:type meta_data: dict
:param post_id: The post identifier. This should be ``None`` for an
insert call, and a valid value for update.
:type post_id: int
:return: The post_id value, in case of a successful insert or update.
Return ``None`` if there were errors.
"""
raise NotImplementedError("This method needs to be implemented by "
"the inheriting class") | 0.002469 |
def check_used(self, pkg):
"""Check if dependencies used
"""
used = []
dep_path = self.meta.log_path + "dep/"
logs = find_package("", dep_path)
for log in logs:
deps = Utils().read_file(dep_path + log)
for dep in deps.splitlines():
if pkg == dep:
used.append(log)
return used | 0.005115 |
def simplify_tree(tree, unpack_lists=True, in_list=False):
"""Recursively unpack single-item lists and objects where fields and labels only reference a single child
:param tree: the tree to simplify (mutating!)
:param unpack_lists: whether single-item lists should be replaced by that item
:param in_list: this is used to prevent unpacking a node in a list as AST visit can't handle nested lists
"""
# TODO: copy (or (de)serialize)? outside this function?
if isinstance(tree, BaseNode) and not isinstance(tree, Terminal):
used_fields = [field for field in tree._fields if getattr(tree, field, False)]
if len(used_fields) == 1:
result = getattr(tree, used_fields[0])
else:
result = None
if (
len(used_fields) != 1
or isinstance(tree, AliasNode)
or (in_list and isinstance(result, list))
):
result = tree
for field in tree._fields:
old_value = getattr(tree, field, None)
if old_value:
setattr(
result,
field,
simplify_tree(old_value, unpack_lists=unpack_lists),
)
return result
assert result is not None
elif isinstance(tree, list) and len(tree) == 1 and unpack_lists:
result = tree[0]
else:
if isinstance(tree, list):
result = [
simplify_tree(el, unpack_lists=unpack_lists, in_list=True)
for el in tree
]
else:
result = tree
return result
return simplify_tree(result, unpack_lists=unpack_lists) | 0.002902 |
def run_start_backup(cls):
"""
Connects to a server and attempts to start a hot backup
Yields the WAL information in a dictionary for bookkeeping and
recording.
"""
def handler(popen):
assert popen.returncode != 0
raise UserException('Could not start hot backup')
# The difficulty of getting a timezone-stamped, UTC,
# ISO-formatted datetime is downright embarrassing.
#
# See http://bugs.python.org/issue5094
label = 'freeze_start_' + (datetime.datetime.utcnow()
.replace(tzinfo=UTC()).isoformat())
return cls._dict_transform(psql_csv_run(
"SELECT file_name, "
" lpad(file_offset::text, 8, '0') AS file_offset "
"FROM pg_{0}file_name_offset("
" pg_start_backup('{1}'))".format(cls._wal_name(), label),
error_handler=handler)) | 0.002068 |
def name(self): # pylint: disable=no-self-use
"""
Name returns user's name or user's email or user_id
:return: best guess of name to use to greet user
"""
if 'lis_person_sourcedid' in self.session:
return self.session['lis_person_sourcedid']
elif 'lis_person_contact_email_primary' in self.session:
return self.session['lis_person_contact_email_primary']
elif 'user_id' in self.session:
return self.session['user_id']
else:
return '' | 0.003663 |
def _render_module_flags(self, module, flags, output_lines, prefix=''):
"""Returns a help string for a given module."""
if not isinstance(module, str):
module = module.__name__
output_lines.append('\n%s%s:' % (prefix, module))
self._render_flag_list(flags, output_lines, prefix + ' ') | 0.006515 |
def insert_row(self, index, row):
"""Insert a row before index in the table.
Parameters
----------
index : int
List index rules apply
row : iterable
Any iterable of appropriate length.
Raises
------
TypeError:
If `row` is not an iterable.
ValueError:
If size of `row` is inconsistent with the current number
of columns.
"""
row = self._validate_row(row)
row_obj = RowData(self, row)
self._table.insert(index, row_obj) | 0.003419 |
def guest_unpause(self, userid):
"""Unpause a virtual machine.
:param str userid: the id of the virtual machine to be unpaused
:returns: None
"""
action = "unpause guest '%s'" % userid
with zvmutils.log_and_reraise_sdkbase_error(action):
self._vmops.guest_unpause(userid) | 0.006024 |
def _clear(self, pipe=None):
"""Helper for clear operations.
:param pipe: Redis pipe in case update is performed as a part
of transaction.
:type pipe: :class:`redis.client.StrictPipeline` or
:class:`redis.client.StrictRedis`
"""
redis = self.redis if pipe is None else pipe
redis.delete(self.key) | 0.005181 |
def default():
"""Retrieves a default Context object, creating it if necessary.
The default Context is a global shared instance used every time the default context is
retrieved.
Attempting to use a Context with no project_id will raise an exception, so on first use
set_project_id must be called.
Returns:
An initialized and shared instance of a Context object.
"""
credentials = du.get_credentials()
project = du.get_default_project_id()
if Context._global_context is None:
config = Context._get_default_config()
Context._global_context = Context(project, credentials, config)
else:
# Always update everything in case the access token is revoked or expired, config changed,
# or project changed.
Context._global_context.set_credentials(credentials)
Context._global_context.set_project_id(project)
return Context._global_context | 0.010764 |
def activate(username):
"""Activate a user.
Example:
\b
```bash
$ polyaxon user activate david
```
"""
try:
PolyaxonClient().user.activate_user(username)
except (PolyaxonHTTPError, PolyaxonShouldExitError, PolyaxonClientException) as e:
Printer.print_error('Could not activate user `{}`.'.format(username))
Printer.print_error('Error message `{}`.'.format(e))
sys.exit(1)
Printer.print_success("User `{}` was activated successfully.".format(username)) | 0.005703 |
def create_gw_response(app, wsgi_env):
"""Create an api gw response from a wsgi app and environ.
"""
response = {}
buf = []
result = []
def start_response(status, headers, exc_info=None):
result[:] = [status, headers]
return buf.append
appr = app(wsgi_env, start_response)
close_func = getattr(appr, 'close', None)
try:
buf.extend(list(appr))
finally:
close_func and close_func()
response['body'] = ''.join(buf)
response['statusCode'] = result[0].split(' ', 1)[0]
response['headers'] = {}
for k, v in result[1]:
response['headers'][k] = v
if 'Content-Length' not in response['headers']:
response['headers']['Content-Length'] = str(len(response['body']))
if 'Content-Type' not in response['headers']:
response['headers']['Content-Type'] = 'text/plain'
return response | 0.001119 |
def script(name,
source,
saltenv='base',
args=None,
template=None,
exec_driver=None,
stdin=None,
python_shell=True,
output_loglevel='debug',
ignore_retcode=False,
use_vt=False,
keep_env=None):
'''
Run :py:func:`cmd.script <salt.modules.cmdmod.script>` within a container
.. note::
While the command is run within the container, it is initiated from the
host. Therefore, the PID in the return dict is from the host, not from
the container.
name
Container name or ID
source
Path to the script. Can be a local path on the Minion or a remote file
from the Salt fileserver.
args
A string containing additional command-line options to pass to the
script.
template : None
Templating engine to use on the script before running.
exec_driver : None
If not passed, the execution driver will be detected as described
:ref:`above <docker-execution-driver>`.
stdin : None
Standard input to be used for the script
output_loglevel : debug
Level at which to log the output from the script. Set to ``quiet`` to
suppress logging.
use_vt : False
Use SaltStack's utils.vt to stream output to console.
keep_env : None
If not passed, only a sane default PATH environment variable will be
set. If ``True``, all environment variables from the container's host
will be kept. Otherwise, a comma-separated list (or Python list) of
environment variable names can be passed, and those environment
variables will be kept.
CLI Example:
.. code-block:: bash
salt myminion docker.script mycontainer salt://docker_script.py
salt myminion docker.script mycontainer salt://scripts/runme.sh 'arg1 arg2 "arg 3"'
salt myminion docker.script mycontainer salt://scripts/runme.sh stdin='one\\ntwo\\nthree\\nfour\\nfive\\n' output_loglevel=quiet
'''
return _script(name,
source,
saltenv=saltenv,
args=args,
template=template,
exec_driver=exec_driver,
stdin=stdin,
python_shell=python_shell,
output_loglevel=output_loglevel,
ignore_retcode=ignore_retcode,
use_vt=use_vt,
keep_env=keep_env) | 0.001183 |
def Mx(mt, x):
""" Return the Mx """
n = len(mt.Cx)
sum1 = 0
for j in range(x, n):
k = mt.Cx[j]
sum1 += k
return sum1 | 0.006536 |
def _create_function(name, doc=""):
""" Create a function for aggregator by name"""
def _(col):
spark_ctx = SparkContext._active_spark_context
java_ctx = (getattr(spark_ctx._jvm.com.sparklingpandas.functions,
name)
(col._java_ctx if isinstance(col, Column) else col))
return Column(java_ctx)
_.__name__ = name
_.__doc__ = doc
return _ | 0.002347 |
def _execute_example(self):
"Handles the execution of the Example"
test_benchmark = Benchmark()
try:
with Registry(), test_benchmark:
if accepts_arg(self.example.testfn):
self.example.testfn(self.context)
else:
self.context.inject_into_self(self.example.testfn)
self.example.testfn()
self.num_successes += 1
except KeyboardInterrupt:
# bubble interrupt for canceling spec execution
raise
except:
raise
self.num_failures += 1
finally:
self.example.user_time = test_benchmark.total_time | 0.004213 |
def vertices(self, values):
"""
Assign vertex values to the mesh.
Parameters
--------------
values : (n, 3) float
Points in space
"""
self._data['vertices'] = np.asanyarray(values,
order='C',
dtype=np.float64) | 0.005405 |
def absent(name,
if_exists=None,
restrict=None,
cascade=None,
user=None,
maintenance_db=None,
db_user=None,
db_password=None,
db_host=None,
db_port=None):
'''
Ensure that the named extension is absent.
name
Extension name of the extension to remove
if_exists
Add if exist slug
restrict
Add restrict slug
cascade
Drop on cascade
user
System user all operations should be performed on behalf of
maintenance_db
Database to act on
db_user
Database username if different from config or default
db_password
User password if any password for a specified user
db_host
Database host if different from config or default
db_port
Database port if different from config or default
'''
ret = {'name': name,
'changes': {},
'result': True,
'comment': ''}
db_args = {
'maintenance_db': maintenance_db,
'runas': user,
'host': db_host,
'user': db_user,
'port': db_port,
'password': db_password,
}
# check if extension exists and remove it
exists = __salt__['postgres.is_installed_extension'](name, **db_args)
if exists:
if __opts__['test']:
ret['result'] = None
ret['comment'] = 'Extension {0} is set to be removed'.format(name)
return ret
if __salt__['postgres.drop_extension'](name,
if_exists=if_exists,
restrict=restrict,
cascade=cascade,
**db_args):
ret['comment'] = 'Extension {0} has been removed'.format(name)
ret['changes'][name] = 'Absent'
return ret
else:
ret['result'] = False
ret['comment'] = 'Extension {0} failed to be removed'.format(name)
return ret
else:
ret['comment'] = 'Extension {0} is not present, so it cannot ' \
'be removed'.format(name)
return ret | 0.000442 |
def make_box_pixel_mask_from_col_row(column, row, default=0, value=1):
'''Generate box shaped mask from column and row lists. Takes the minimum and maximum value from each list.
Parameters
----------
column : iterable, int
List of colums values.
row : iterable, int
List of row values.
default : int
Value of pixels that are not selected by the mask.
value : int
Value of pixels that are selected by the mask.
Returns
-------
numpy.ndarray
'''
# FE columns and rows start from 1
col_array = np.array(column) - 1
row_array = np.array(row) - 1
if np.any(col_array >= 80) or np.any(col_array < 0) or np.any(row_array >= 336) or np.any(row_array < 0):
raise ValueError('Column and/or row out of range')
shape = (80, 336)
mask = np.full(shape, default, dtype=np.uint8)
if column and row:
mask[col_array.min():col_array.max() + 1, row_array.min():row_array.max() + 1] = value # advanced indexing
return mask | 0.003791 |
def __fromfile(self, filename, astype=None):
"""a private method to deduce and load a filename into a matrix object.
Uses extension: 'jco' or 'jcb': binary, 'mat','vec' or 'cov': ASCII,
'unc': pest uncertainty file.
Parameters
----------
filename : str
the name of the file
Returns
-------
m : pyemu.Matrix
Raises
------
exception for unrecognized extension
"""
assert os.path.exists(filename),"LinearAnalysis.__fromfile(): " +\
"file not found:" + filename
ext = filename.split('.')[-1].lower()
if ext in ["jco", "jcb"]:
self.log("loading jco: "+filename)
if astype is None:
astype = Jco
m = astype.from_binary(filename)
self.log("loading jco: "+filename)
elif ext in ["mat","vec"]:
self.log("loading ascii: "+filename)
if astype is None:
astype = Matrix
m = astype.from_ascii(filename)
self.log("loading ascii: "+filename)
elif ext in ["cov"]:
self.log("loading cov: "+filename)
if astype is None:
astype = Cov
if _istextfile(filename):
m = astype.from_ascii(filename)
else:
m = astype.from_binary(filename)
self.log("loading cov: "+filename)
elif ext in["unc"]:
self.log("loading unc: "+filename)
if astype is None:
astype = Cov
m = astype.from_uncfile(filename)
self.log("loading unc: "+filename)
else:
raise Exception("linear_analysis.__fromfile(): unrecognized" +
" filename extension:" + str(ext))
return m | 0.003183 |
def update_one(self, filter, update, **kwargs):
"""
See http://api.mongodb.com/python/current/api/pymongo/collection.html#pymongo.collection.Collection.update_one
"""
self._arctic_lib.check_quota()
return self._collection.update_one(filter, update, **kwargs) | 0.010067 |
def derivative(self,x):
'''
Evaluates the derivative of the interpolated function at the given input.
Parameters
----------
x : np.array or float
Real values to be evaluated in the interpolated function.
Returns
-------
dydx : np.array or float
The interpolated function's first derivative evaluated at x:
dydx = f'(x), with the same shape as x.
'''
z = np.asarray(x)
return (self._der(z.flatten())).reshape(z.shape) | 0.00738 |
def compute(chart):
""" Computes the Almutem table. """
almutems = {}
# Hylegic points
hylegic = [
chart.getObject(const.SUN),
chart.getObject(const.MOON),
chart.getAngle(const.ASC),
chart.getObject(const.PARS_FORTUNA),
chart.getObject(const.SYZYGY)
]
for hyleg in hylegic:
row = newRow()
digInfo = essential.getInfo(hyleg.sign, hyleg.signlon)
# Add the scores of each planet where hyleg has dignities
for dignity in DIGNITY_LIST:
objID = digInfo[dignity]
if objID:
score = essential.SCORES[dignity]
row[objID]['string'] += '+%s' % score
row[objID]['score'] += score
almutems[hyleg.id] = row
# House positions
row = newRow()
for objID in OBJECT_LIST:
obj = chart.getObject(objID)
house = chart.houses.getObjectHouse(obj)
score = HOUSE_SCORES[house.id]
row[objID]['string'] = '+%s' % score
row[objID]['score'] = score
almutems['Houses'] = row
# Planetary time
row = newRow()
table = planetarytime.getHourTable(chart.date, chart.pos)
ruler = table.currRuler()
hourRuler = table.hourRuler()
row[ruler] = {
'string': '+7',
'score': 7
}
row[hourRuler] = {
'string': '+6',
'score': 6
}
almutems['Rulers'] = row;
# Compute scores
scores = newRow()
for _property, _list in almutems.items():
for objID, values in _list.items():
scores[objID]['string'] += values['string']
scores[objID]['score'] += values['score']
almutems['Score'] = scores
return almutems | 0.005119 |
def add_identities(cls, db, identities, backend):
""" Load identities list from backend in Sorting Hat """
logger.info("Adding the identities to SortingHat")
total = 0
for identity in identities:
try:
cls.add_identity(db, identity, backend)
total += 1
except Exception as e:
logger.error("Unexcepted error when adding identities: %s" % e)
continue
logger.info("Total identities added to SH: %i", total) | 0.003745 |
def check_spec(self, pos_args, kwargs=None):
"""Check if there are any missing or duplicate arguments.
Args:
pos_args (list): A list of arguments that will be passed as positional
arguments.
kwargs (dict): A dictionary of the keyword arguments that will be passed.
Returns:
dict: A dictionary of argument name to argument value, pulled from either
the value passed or the default value if no argument is passed.
Raises:
ArgumentError: If a positional or keyword argument does not fit in the spec.
ValidationError: If an argument is passed twice.
"""
if kwargs is None:
kwargs = {}
if self.varargs is not None or self.kwargs is not None:
raise InternalError("check_spec cannot be called on a function that takes *args or **kwargs")
missing = object()
arg_vals = [missing]*len(self.arg_names)
kw_indices = {name: i for i, name in enumerate(self.arg_names)}
for i, arg in enumerate(pos_args):
if i >= len(arg_vals):
raise ArgumentError("Too many positional arguments, first excessive argument=%s" % str(arg))
arg_vals[i] = arg
for arg, val in kwargs.items():
index = kw_indices.get(arg)
if index is None:
raise ArgumentError("Cannot find argument by name: %s" % arg)
if arg_vals[index] is not missing:
raise ValidationError("Argument %s passed twice" % arg)
arg_vals[index] = val
# Fill in any default variables if their args are missing
if len(self.arg_defaults) > 0:
for i in range(0, len(self.arg_defaults)):
neg_index = -len(self.arg_defaults) + i
if arg_vals[neg_index] is missing:
arg_vals[neg_index] = self.arg_defaults[i]
# Now make sure there isn't a missing gap
if missing in arg_vals:
index = arg_vals.index(missing)
raise ArgumentError("Missing a required argument (position: %d, name: %s)" % (index, self.arg_names[index]))
return {name: val for name, val in zip(self.arg_names, arg_vals)} | 0.003958 |
def compute(self, data, fill_value=None, **kwargs):
"""Resample the given data using bilinear interpolation"""
del kwargs
if fill_value is None:
fill_value = data.attrs.get('_FillValue')
target_shape = self.target_geo_def.shape
res = self.resampler.get_sample_from_bil_info(data,
fill_value=fill_value,
output_shape=target_shape)
return res | 0.005871 |
def period_neighborhood_probability(self, radius, smoothing, threshold, stride,start_time,end_time):
"""
Calculate the neighborhood probability over the full period of the forecast
Args:
radius: circular radius from each point in km
smoothing: width of Gaussian smoother in km
threshold: intensity of exceedance
stride: number of grid points to skip for reduced neighborhood grid
Returns:
(neighborhood probabilities)
"""
neighbor_x = self.x[::stride, ::stride]
neighbor_y = self.y[::stride, ::stride]
neighbor_kd_tree = cKDTree(np.vstack((neighbor_x.ravel(), neighbor_y.ravel())).T)
neighbor_prob = np.zeros((self.data.shape[0], neighbor_x.shape[0], neighbor_x.shape[1]))
print('Forecast Hours: {0}-{1}'.format(start_time, end_time))
for m in range(len(self.members)):
period_max = self.data[m,start_time:end_time,:,:].max(axis=0)
valid_i, valid_j = np.where(period_max >= threshold)
print(self.members[m], len(valid_i))
if len(valid_i) > 0:
var_kd_tree = cKDTree(np.vstack((self.x[valid_i, valid_j], self.y[valid_i, valid_j])).T)
exceed_points = np.unique(np.concatenate(var_kd_tree.query_ball_tree(neighbor_kd_tree, radius))).astype(int)
exceed_i, exceed_j = np.unravel_index(exceed_points, neighbor_x.shape)
neighbor_prob[m][exceed_i, exceed_j] = 1
if smoothing > 0:
neighbor_prob[m] = gaussian_filter(neighbor_prob[m], smoothing,mode='constant')
return neighbor_prob | 0.009541 |
def get_learning_objectives_metadata(self):
"""Gets the metadata for learning objectives.
return: (osid.Metadata) - metadata for the learning objectives
*compliance: mandatory -- This method must be implemented.*
"""
# Implemented from template for osid.learning.ActivityForm.get_assets_metadata_template
metadata = dict(self._mdata['learning_objectives'])
metadata.update({'existing_learning_objectives_values': self._my_map['learningObjectiveIds']})
return Metadata(**metadata) | 0.007339 |
def get_validated_type(object_type: Type[Any], name: str, enforce_not_joker: bool = True) -> Type[Any]:
"""
Utility to validate a type :
* None is not allowed,
* 'object', 'AnyObject' and 'Any' lead to the same 'AnyObject' type
* JOKER is either rejected (if enforce_not_joker is True, default) or accepted 'as is'
:param object_type: the type to validate
:param name: a name used in exceptions if any
:param enforce_not_joker: a boolean, set to False to tolerate JOKER types
:return: the fixed type
"""
if object_type is object or object_type is Any or object_type is AnyObject:
return AnyObject
else:
# -- !! Do not check TypeVar or Union : this is already handled at higher levels --
if object_type is JOKER:
# optionally check if JOKER is allowed
if enforce_not_joker:
raise ValueError('JOKER is not allowed for object_type')
else:
# note: we dont check var earlier, since 'typing.Any' is not a subclass of type anymore
check_var(object_type, var_types=type, var_name=name)
return object_type | 0.004352 |
def compare_hdf_files(file1, file2):
""" Compare two hdf files.
:param file1: First file to compare.
:param file2: Second file to compare.
:returns True if they are the same.
"""
data1 = FileToDict()
data2 = FileToDict()
scanner1 = data1.scan
scanner2 = data2.scan
with h5py.File(file1, 'r') as fh1:
fh1.visititems(scanner1)
with h5py.File(file2, 'r') as fh2:
fh2.visititems(scanner2)
return data1.contents == data2.contents | 0.002045 |
def manage_pool(hostname, username, password, name,
allow_nat=None,
allow_snat=None,
description=None,
gateway_failsafe_device=None,
ignore_persisted_weight=None,
ip_tos_to_client=None,
ip_tos_to_server=None,
link_qos_to_client=None,
link_qos_to_server=None,
load_balancing_mode=None,
min_active_members=None,
min_up_members=None,
min_up_members_action=None,
min_up_members_checking=None,
monitor=None,
profiles=None,
queue_depth_limit=None,
queue_on_connection_limit=None,
queue_time_limit=None,
reselect_tries=None,
service_down_action=None,
slow_ramp_time=None):
'''
Create a new pool if it does not already exist. Pool members are managed separately. Only the
parameters specified are enforced.
hostname
The host/address of the bigip device
username
The iControl REST username
password
The iControl REST password
name
The name of the pool to create
allow_nat
[yes | no]
allow_snat
[yes | no]
description
[string]
gateway_failsafe_device
[string]
ignore_persisted_weight
[enabled | disabled]
ip_tos_to_client
[pass-through | [integer]]
ip_tos_to_server
[pass-through | [integer]]
link_qos_to_client
[pass-through | [integer]]
link_qos_to_server
[pass-through | [integer]]
load_balancing_mode
[dynamic-ratio-member | dynamic-ratio-node |
fastest-app-response | fastest-node |
least-connections-members |
least-connections-node |
least-sessions |
observed-member | observed-node |
predictive-member | predictive-node |
ratio-least-connections-member |
ratio-least-connections-node |
ratio-member | ratio-node | ratio-session |
round-robin | weighted-least-connections-member |
weighted-least-connections-node]
min_active_members
[integer]
min_up_members
[integer]
min_up_members_action
[failover | reboot | restart-all]
min_up_members_checking
[enabled | disabled]
monitor
[name]
profiles
[none | profile_name]
queue_depth_limit
[integer]
queue_on_connection_limit
[enabled | disabled]
queue_time_limit
[integer]
reselect_tries
[integer]
service_down_action
[drop | none | reselect | reset]
slow_ramp_time
[integer]
'''
ret = {'name': name, 'changes': {}, 'result': False, 'comment': ''}
if __opts__['test']:
return _test_output(ret, 'manage', params={
'hostname': hostname,
'username': username,
'password': password,
'name': name,
'allow_nat': allow_nat,
'allow_snat': allow_snat,
'description': description,
'gateway_failsafe_device': gateway_failsafe_device,
'ignore_persisted_weight': ignore_persisted_weight,
'ip_tos_client:': ip_tos_to_client,
'ip_tos_server': ip_tos_to_server,
'link_qos_to_client': link_qos_to_client,
'link_qos_to_server': link_qos_to_server,
'load_balancing_mode': load_balancing_mode,
'min_active_members': min_active_members,
'min_up_members': min_up_members,
'min_up_members_checking': min_up_members_checking,
'monitor': monitor,
'profiles': profiles,
'queue_depth_limit': queue_depth_limit,
'queue_on_connection_limit': queue_on_connection_limit,
'queue_time_limit': queue_time_limit,
'reselect_tries': reselect_tries,
'service_down_action': service_down_action,
'slow_ramp_time': slow_ramp_time
}
)
#is this pool currently configured?
existing = __salt__['bigip.list_pool'](hostname, username, password, name)
# if it exists
if existing['code'] == 200:
modified = __salt__['bigip.modify_pool'](hostname=hostname,
username=username,
password=password,
name=name,
allow_nat=allow_nat,
allow_snat=allow_snat,
description=description,
gateway_failsafe_device=gateway_failsafe_device,
ignore_persisted_weight=ignore_persisted_weight,
ip_tos_to_client=ip_tos_to_client,
ip_tos_to_server=ip_tos_to_server,
link_qos_to_client=link_qos_to_client,
link_qos_to_server=link_qos_to_server,
load_balancing_mode=load_balancing_mode,
min_active_members=min_active_members,
min_up_members=min_up_members,
min_up_members_action=min_up_members_action,
min_up_members_checking=min_up_members_checking,
monitor=monitor,
profiles=profiles,
queue_depth_limit=queue_depth_limit,
queue_on_connection_limit=queue_on_connection_limit,
queue_time_limit=queue_time_limit,
reselect_tries=reselect_tries,
service_down_action=service_down_action,
slow_ramp_time=slow_ramp_time)
#was the modification successful?
if modified['code'] == 200:
#remove member listings and self-links
del existing['content']['membersReference']
del modified['content']['membersReference']
del existing['content']['selfLink']
del modified['content']['selfLink']
ret = _check_for_changes('Pool', ret, existing, modified)
else:
ret = _load_result(modified, ret)
# if it doesn't exist
elif existing['code'] == 404:
new = __salt__['bigip.create_pool'](hostname=hostname,
username=username,
password=password,
name=name,
allow_nat=allow_nat,
allow_snat=allow_snat,
description=description,
gateway_failsafe_device=gateway_failsafe_device,
ignore_persisted_weight=ignore_persisted_weight,
ip_tos_to_client=ip_tos_to_client,
ip_tos_to_server=ip_tos_to_server,
link_qos_to_client=link_qos_to_client,
link_qos_to_server=link_qos_to_server,
load_balancing_mode=load_balancing_mode,
min_active_members=min_active_members,
min_up_members=min_up_members,
min_up_members_action=min_up_members_action,
min_up_members_checking=min_up_members_checking,
monitor=monitor,
profiles=profiles,
queue_depth_limit=queue_depth_limit,
queue_on_connection_limit=queue_on_connection_limit,
queue_time_limit=queue_time_limit,
reselect_tries=reselect_tries,
service_down_action=service_down_action,
slow_ramp_time=slow_ramp_time)
# were we able to create it?
if new['code'] == 200:
ret['result'] = True
ret['comment'] = 'Pool was created and enforced to the desired state. Note: Only parameters specified ' \
'were enforced. See changes for details.'
ret['changes']['old'] = {}
ret['changes']['new'] = new['content']
# unable to create it
else:
ret = _load_result(new, ret)
# else something else was returned
else:
ret = _load_result(existing, ret)
return ret | 0.003354 |
def gettemp(content=None, dir=None, prefix="tmp", suffix="tmp"):
"""Create temporary file with the given content.
Please note: the temporary file must be deleted by the caller.
:param string content: the content to write to the temporary file.
:param string dir: directory where the file should be created
:param string prefix: file name prefix
:param string suffix: file name suffix
:returns: a string with the path to the temporary file
"""
if dir is not None:
if not os.path.exists(dir):
os.makedirs(dir)
fh, path = tempfile.mkstemp(dir=dir, prefix=prefix, suffix=suffix)
_tmp_paths.append(path)
if content:
fh = os.fdopen(fh, "wb")
if hasattr(content, 'encode'):
content = content.encode('utf8')
fh.write(content)
fh.close()
return path | 0.001166 |
def G(self, ID, lat, lon):
""" Creates a generic entry for an object. """
# Equatorial coordinates
eqM = utils.eqCoords(lon, lat)
eqZ = eqM
if lat != 0:
eqZ = utils.eqCoords(lon, 0)
return {
'id': ID,
'lat': lat,
'lon': lon,
'ra': eqM[0],
'decl': eqM[1],
'raZ': eqZ[0],
'declZ': eqZ[1],
} | 0.008734 |
def set_possible(self):
'''
break up a module path to its various parts (prefix, module, class, method)
this uses PEP 8 conventions, so foo.Bar would be foo module with class Bar
return -- list -- a list of possible interpretations of the module path
(eg, foo.bar can be bar module in foo module, or bar method in foo module)
'''
possible = []
name = self.name
logger.debug('Guessing test name: {}'.format(name))
name_f = self.name.lower()
filepath = ""
if name_f.endswith(".py") or ".py:" in name_f:
# path/something:Class.method
bits = name.split(":", 1)
filepath = bits[0]
logger.debug('Found filepath: {}'.format(filepath))
name = bits[1] if len(bits) > 1 else ""
if name:
logger.debug('Found test name: {} for filepath: {}'.format(name, filepath))
bits = name.split('.')
basedir = self.basedir
method_prefix = self.method_prefix
# check if the last bit is a Class
if re.search(r'^\*?[A-Z]', bits[-1]):
logger.debug('Found class in name: {}'.format(bits[-1]))
possible.append(PathFinder(basedir, method_prefix, **{
'class_name': bits[-1],
'module_name': bits[-2] if len(bits) > 1 else '',
'prefix': os.sep.join(bits[0:-2]),
'filepath': filepath,
}))
elif len(bits) > 1 and re.search(r'^\*?[A-Z]', bits[-2]):
logger.debug('Found class in name: {}'.format(bits[-2]))
possible.append(PathFinder(basedir, method_prefix, **{
'class_name': bits[-2],
'method_name': bits[-1],
'module_name': bits[-3] if len(bits) > 2 else '',
'prefix': os.sep.join(bits[0:-3]),
'filepath': filepath,
}))
else:
if self.name:
if filepath:
if len(bits):
possible.append(PathFinder(basedir, method_prefix, **{
'filepath': filepath,
'method_name': bits[0],
}))
else:
possible.append(PathFinder(basedir, method_prefix, **{
'filepath': filepath,
}))
else:
logger.debug('Test name is ambiguous')
possible.append(PathFinder(basedir, method_prefix, **{
'module_name': bits[-1],
'prefix': os.sep.join(bits[0:-1]),
'filepath': filepath,
}))
possible.append(PathFinder(basedir, method_prefix, **{
'method_name': bits[-1],
'module_name': bits[-2] if len(bits) > 1 else '',
'prefix': os.sep.join(bits[0:-2]),
'filepath': filepath,
}))
possible.append(PathFinder(basedir, method_prefix, **{
'prefix': os.sep.join(bits),
'filepath': filepath,
}))
else:
possible.append(PathFinder(basedir, method_prefix, filepath=filepath))
logger.debug("Found {} possible test names".format(len(possible)))
self.possible = possible | 0.001994 |
def column_type(self, column_name):
"""
Report column type as one of 'local', 'series', or 'function'.
Parameters
----------
column_name : str
Returns
-------
col_type : {'local', 'series', 'function'}
'local' means that the column is part of the registered table,
'series' means the column is a registered Pandas Series,
and 'function' means the column is a registered function providing
a Pandas Series.
"""
extra_cols = list_columns_for_table(self.name)
if column_name in extra_cols:
col = _COLUMNS[(self.name, column_name)]
if isinstance(col, _SeriesWrapper):
return 'series'
elif isinstance(col, _ColumnFuncWrapper):
return 'function'
elif column_name in self.local_columns:
return 'local'
raise KeyError('column {!r} not found'.format(column_name)) | 0.002014 |
def random_public_ip(self):
"""Return a randomly generated, public IPv4 address.
:return: ip address
"""
randomip = random_ip()
while self.is_reserved_ip(randomip):
randomip = random_ip()
return randomip | 0.007576 |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.