code
stringlengths 75
104k
| docstring
stringlengths 1
46.9k
|
---|---|
def main():
"""Entry point for the script"""
desc = 'Converts between geodetic, modified apex, quasi-dipole and MLT'
parser = argparse.ArgumentParser(description=desc, prog='apexpy')
parser.add_argument('source', metavar='SOURCE',
choices=['geo', 'apex', 'qd', 'mlt'],
help='Convert from {geo, apex, qd, mlt}')
parser.add_argument('dest', metavar='DEST',
choices=['geo', 'apex', 'qd', 'mlt'],
help='Convert to {geo, apex, qd, mlt}')
desc = 'YYYY[MM[DD[HHMMSS]]] date/time for IGRF coefficients, time part '
desc += 'required for MLT calculations'
parser.add_argument('date', metavar='DATE', help=desc)
parser.add_argument('--height', dest='height', default=0, metavar='HEIGHT',
type=float, help='height for conversion')
parser.add_argument('--refh', dest='refh', metavar='REFH', type=float,
default=0,
help='reference height for modified apex coordinates')
parser.add_argument('-i', '--input', dest='file_in', metavar='FILE_IN',
type=argparse.FileType('r'), default=STDIN,
help='input file (stdin if none specified)')
parser.add_argument('-o', '--output', dest='file_out', metavar='FILE_OUT',
type=argparse.FileType('wb'), default=STDOUT,
help='output file (stdout if none specified)')
args = parser.parse_args()
array = np.loadtxt(args.file_in, ndmin=2)
if 'mlt' in [args.source, args.dest] and len(args.date) < 14:
desc = 'full date/time YYYYMMDDHHMMSS required for MLT calculations'
raise ValueError(desc)
if 9 <= len(args.date) <= 13:
desc = 'full date/time must be given as YYYYMMDDHHMMSS, not ' + \
'YYYYMMDDHHMMSS'[:len(args.date)]
raise ValueError(desc)
datetime = dt.datetime.strptime(args.date,
'%Y%m%d%H%M%S'[:len(args.date)-2])
A = apexpy.Apex(date=datetime, refh=args.refh)
lats, lons = A.convert(array[:, 0], array[:, 1], args.source, args.dest,
args.height, datetime=datetime)
np.savetxt(args.file_out, np.column_stack((lats, lons)), fmt='%.8f') | Entry point for the script |
def get_and_alter(self, function):
"""
Alters the currently stored reference by applying a function on it on and gets the old value.
:param function: (Function), A stateful serializable object which represents the Function defined on
server side.
This object must have a serializable Function counter part registered on server side with the actual
``org.hazelcast.core.IFunction`` implementation.
:return: (object), the old value, the value before the function is applied.
"""
check_not_none(function, "function can't be None")
return self._encode_invoke(atomic_reference_get_and_alter_codec, function=self._to_data(function)) | Alters the currently stored reference by applying a function on it on and gets the old value.
:param function: (Function), A stateful serializable object which represents the Function defined on
server side.
This object must have a serializable Function counter part registered on server side with the actual
``org.hazelcast.core.IFunction`` implementation.
:return: (object), the old value, the value before the function is applied. |
def from_weight_map(cls, pixel_scale, weight_map):
"""Setup the noise-map from a weight map, which is a form of noise-map that comes via HST image-reduction and \
the software package MultiDrizzle.
The variance in each pixel is computed as:
Variance = 1.0 / sqrt(weight_map).
The weight map may contain zeros, in which cause the variances are converted to large values to omit them from \
the analysis.
Parameters
-----------
pixel_scale : float
The size of each pixel in arc seconds.
weight_map : ndarray
The weight-value of each pixel which is converted to a variance.
"""
np.seterr(divide='ignore')
noise_map = 1.0 / np.sqrt(weight_map)
noise_map[noise_map == np.inf] = 1.0e8
return NoiseMap(array=noise_map, pixel_scale=pixel_scale) | Setup the noise-map from a weight map, which is a form of noise-map that comes via HST image-reduction and \
the software package MultiDrizzle.
The variance in each pixel is computed as:
Variance = 1.0 / sqrt(weight_map).
The weight map may contain zeros, in which cause the variances are converted to large values to omit them from \
the analysis.
Parameters
-----------
pixel_scale : float
The size of each pixel in arc seconds.
weight_map : ndarray
The weight-value of each pixel which is converted to a variance. |
def validate(self):
"""method traverse tree and performs following activities:
* requests a job record in STATE_EMBRYO if no job record is currently assigned to the node
* requests nodes for reprocessing, if STATE_PROCESSED node relies on unfinalized nodes
* requests node for skipping if it is daily node and all 24 of its Hourly nodes are in STATE_SKIPPED state"""
# step 1: request Job record if current one is not set
if self.job_record is None:
self.tree.timetable.assign_job_record(self)
# step 2: define if current node has a younger sibling
next_timeperiod = time_helper.increment_timeperiod(self.time_qualifier, self.timeperiod)
has_younger_sibling = next_timeperiod in self.parent.children
# step 3: define if all children are done and if perhaps they all are in STATE_SKIPPED
all_children_skipped = True
all_children_finished = True
for timeperiod, child in self.children.items():
child.validate()
if child.job_record.is_active:
all_children_finished = False
if not child.job_record.is_skipped:
all_children_skipped = False
# step 4: request this node's reprocessing if it is enroute to STATE_PROCESSED
# while some of its children are still performing processing
if all_children_finished is False and self.job_record.is_finished:
self.tree.timetable.reprocess_tree_node(self)
# step 5: verify if this node should be transferred to STATE_SKIPPED
# algorithm is following:
# point a: node must have children
# point b: existence of a younger sibling means that the tree contains another node of the same level
# thus - should the tree.build_timeperiod be not None - the children level of this node is fully constructed
# point c: if all children of this node are in STATE_SKIPPED then we will set this node state to STATE_SKIPPED
if len(self.children) != 0 \
and all_children_skipped \
and self.tree.build_timeperiod is not None \
and has_younger_sibling is True \
and not self.job_record.is_skipped:
self.tree.timetable.skip_tree_node(self) | method traverse tree and performs following activities:
* requests a job record in STATE_EMBRYO if no job record is currently assigned to the node
* requests nodes for reprocessing, if STATE_PROCESSED node relies on unfinalized nodes
* requests node for skipping if it is daily node and all 24 of its Hourly nodes are in STATE_SKIPPED state |
def find(self, resource, req, sub_resource_lookup):
"""Find documents for resource."""
args = getattr(req, 'args', request.args if request else {}) or {}
source_config = config.SOURCES[resource]
if args.get('source'):
query = json.loads(args.get('source'))
if 'filtered' not in query.get('query', {}):
_query = query.get('query')
query['query'] = {'filtered': {}}
if _query:
query['query']['filtered']['query'] = _query
else:
query = {'query': {'filtered': {}}}
if args.get('q', None):
query['query']['filtered']['query'] = _build_query_string(args.get('q'),
default_field=args.get('df', '_all'),
default_operator=args.get('default_operator', 'OR'))
if 'sort' not in query:
if req.sort:
sort = ast.literal_eval(req.sort)
set_sort(query, sort)
elif self._default_sort(resource) and 'query' not in query['query']['filtered']:
set_sort(query, self._default_sort(resource))
if req.max_results:
query.setdefault('size', req.max_results)
if req.page > 1:
query.setdefault('from', (req.page - 1) * req.max_results)
filters = []
filters.append(source_config.get('elastic_filter'))
filters.append(source_config.get('elastic_filter_callback', noop)())
filters.append({'and': _build_lookup_filter(sub_resource_lookup)} if sub_resource_lookup else None)
filters.append(json.loads(args.get('filter')) if 'filter' in args else None)
filters.extend(args.get('filters') if 'filters' in args else [])
if req.where:
try:
filters.append({'term': json.loads(req.where)})
except ValueError:
try:
filters.append({'term': parse(req.where)})
except ParseError:
abort(400)
set_filters(query, filters)
if 'facets' in source_config:
query['facets'] = source_config['facets']
if 'aggregations' in source_config and self.should_aggregate(req):
query['aggs'] = source_config['aggregations']
if 'es_highlight' in source_config and self.should_highlight(req):
query_string = query['query'].get('filtered', {}).get('query', {}).get('query_string')
highlights = source_config.get('es_highlight', noop)(query_string)
if highlights:
query['highlight'] = highlights
query['highlight'].setdefault('require_field_match', False)
source_projections = None
if self.should_project(req):
source_projections = self.get_projected_fields(req)
args = self._es_args(resource, source_projections=source_projections)
try:
hits = self.elastic(resource).search(body=query, **args)
except elasticsearch.exceptions.RequestError as e:
if e.status_code == 400 and "No mapping found for" in e.error:
hits = {}
elif e.status_code == 400 and 'SearchParseException' in e.error:
raise InvalidSearchString
else:
raise
return self._parse_hits(hits, resource) | Find documents for resource. |
def solveConsAggShock(solution_next,IncomeDstn,LivPrb,DiscFac,CRRA,PermGroFac,
PermGroFacAgg,aXtraGrid,BoroCnstArt,Mgrid,AFunc,Rfunc,wFunc,DeprFac):
'''
Solve one period of a consumption-saving problem with idiosyncratic and
aggregate shocks (transitory and permanent). This is a basic solver that
can't handle cubic splines, nor can it calculate a value function.
Parameters
----------
solution_next : ConsumerSolution
The solution to the succeeding one period problem.
IncomeDstn : [np.array]
A list containing five arrays of floats, representing a discrete
approximation to the income process between the period being solved
and the one immediately following (in solution_next). Order: event
probabilities, idisyncratic permanent shocks, idiosyncratic transitory
shocks, aggregate permanent shocks, aggregate transitory shocks.
LivPrb : float
Survival probability; likelihood of being alive at the beginning of
the succeeding period.
DiscFac : float
Intertemporal discount factor for future utility.
CRRA : float
Coefficient of relative risk aversion.
PermGroFac : float
Expected permanent income growth factor at the end of this period.
PermGroFacAgg : float
Expected aggregate productivity growth factor.
aXtraGrid : np.array
Array of "extra" end-of-period asset values-- assets above the
absolute minimum acceptable level.
BoroCnstArt : float
Artificial borrowing constraint; minimum allowable end-of-period asset-to-
permanent-income ratio. Unlike other models, this *can't* be None.
Mgrid : np.array
A grid of aggregate market resourses to permanent income in the economy.
AFunc : function
Aggregate savings as a function of aggregate market resources.
Rfunc : function
The net interest factor on assets as a function of capital ratio k.
wFunc : function
The wage rate for labor as a function of capital-to-labor ratio k.
DeprFac : float
Capital Depreciation Rate
Returns
-------
solution_now : ConsumerSolution
The solution to the single period consumption-saving problem. Includes
a consumption function cFunc (linear interpolation over linear interpola-
tions) and marginal value function vPfunc.
'''
# Unpack next period's solution
vPfuncNext = solution_next.vPfunc
mNrmMinNext = solution_next.mNrmMin
# Unpack the income shocks
ShkPrbsNext = IncomeDstn[0]
PermShkValsNext = IncomeDstn[1]
TranShkValsNext = IncomeDstn[2]
PermShkAggValsNext = IncomeDstn[3]
TranShkAggValsNext = IncomeDstn[4]
ShkCount = ShkPrbsNext.size
# Make the grid of end-of-period asset values, and a tiled version
aNrmNow = aXtraGrid
aCount = aNrmNow.size
Mcount = Mgrid.size
aXtra_tiled = np.tile(np.reshape(aNrmNow,(1,aCount,1)),(Mcount,1,ShkCount))
# Make tiled versions of the income shocks
# Dimension order: Mnow, aNow, Shk
ShkPrbsNext_tiled = np.tile(np.reshape(ShkPrbsNext,(1,1,ShkCount)),(Mcount,aCount,1))
PermShkValsNext_tiled = np.tile(np.reshape(PermShkValsNext,(1,1,ShkCount)),(Mcount,aCount,1))
TranShkValsNext_tiled = np.tile(np.reshape(TranShkValsNext,(1,1,ShkCount)),(Mcount,aCount,1))
PermShkAggValsNext_tiled = np.tile(np.reshape(PermShkAggValsNext,(1,1,ShkCount)),(Mcount,aCount,1))
TranShkAggValsNext_tiled = np.tile(np.reshape(TranShkAggValsNext,(1,1,ShkCount)),(Mcount,aCount,1))
# Calculate returns to capital and labor in the next period
AaggNow_tiled = np.tile(np.reshape(AFunc(Mgrid),(Mcount,1,1)),(1,aCount,ShkCount))
kNext_array = AaggNow_tiled/(PermGroFacAgg*PermShkAggValsNext_tiled) # Next period's aggregate capital to labor ratio
kNextEff_array = kNext_array/TranShkAggValsNext_tiled # Same thing, but account for *transitory* shock
R_array = Rfunc(kNextEff_array) # Interest factor on aggregate assets
Reff_array = R_array/LivPrb # Effective interest factor on individual assets *for survivors*
wEff_array = wFunc(kNextEff_array)*TranShkAggValsNext_tiled # Effective wage rate (accounts for labor supply)
PermShkTotal_array = PermGroFac*PermGroFacAgg*PermShkValsNext_tiled*PermShkAggValsNext_tiled # total / combined permanent shock
Mnext_array = kNext_array*R_array + wEff_array # next period's aggregate market resources
# Find the natural borrowing constraint for each value of M in the Mgrid.
# There is likely a faster way to do this, but someone needs to do the math:
# is aNrmMin determined by getting the worst shock of all four types?
aNrmMin_candidates = PermGroFac*PermGroFacAgg*PermShkValsNext_tiled[:,0,:]*PermShkAggValsNext_tiled[:,0,:]/Reff_array[:,0,:]*\
(mNrmMinNext(Mnext_array[:,0,:]) - wEff_array[:,0,:]*TranShkValsNext_tiled[:,0,:])
aNrmMin_vec = np.max(aNrmMin_candidates,axis=1)
BoroCnstNat_vec = aNrmMin_vec
aNrmMin_tiled = np.tile(np.reshape(aNrmMin_vec,(Mcount,1,1)),(1,aCount,ShkCount))
aNrmNow_tiled = aNrmMin_tiled + aXtra_tiled
# Calculate market resources next period (and a constant array of capital-to-labor ratio)
mNrmNext_array = Reff_array*aNrmNow_tiled/PermShkTotal_array + TranShkValsNext_tiled*wEff_array
# Find marginal value next period at every income shock realization and every aggregate market resource gridpoint
vPnext_array = Reff_array*PermShkTotal_array**(-CRRA)*vPfuncNext(mNrmNext_array,Mnext_array)
# Calculate expectated marginal value at the end of the period at every asset gridpoint
EndOfPrdvP = DiscFac*LivPrb*np.sum(vPnext_array*ShkPrbsNext_tiled,axis=2)
# Calculate optimal consumption from each asset gridpoint
cNrmNow = EndOfPrdvP**(-1.0/CRRA)
mNrmNow = aNrmNow_tiled[:,:,0] + cNrmNow
# Loop through the values in Mgrid and make a linear consumption function for each
cFuncBaseByM_list = []
for j in range(Mcount):
c_temp = np.insert(cNrmNow[j,:],0,0.0) # Add point at bottom
m_temp = np.insert(mNrmNow[j,:] - BoroCnstNat_vec[j],0,0.0)
cFuncBaseByM_list.append(LinearInterp(m_temp,c_temp))
# Add the M-specific consumption function to the list
# Construct the overall unconstrained consumption function by combining the M-specific functions
BoroCnstNat = LinearInterp(np.insert(Mgrid,0,0.0),np.insert(BoroCnstNat_vec,0,0.0))
cFuncBase = LinearInterpOnInterp1D(cFuncBaseByM_list,Mgrid)
cFuncUnc = VariableLowerBoundFunc2D(cFuncBase,BoroCnstNat)
# Make the constrained consumption function and combine it with the unconstrained component
cFuncCnst = BilinearInterp(np.array([[0.0,0.0],[1.0,1.0]]),
np.array([BoroCnstArt,BoroCnstArt+1.0]),np.array([0.0,1.0]))
cFuncNow = LowerEnvelope2D(cFuncUnc,cFuncCnst)
# Make the minimum m function as the greater of the natural and artificial constraints
mNrmMinNow = UpperEnvelope(BoroCnstNat,ConstantFunction(BoroCnstArt))
# Construct the marginal value function using the envelope condition
vPfuncNow = MargValueFunc2D(cFuncNow,CRRA)
# Pack up and return the solution
solution_now = ConsumerSolution(cFunc=cFuncNow,vPfunc=vPfuncNow,mNrmMin=mNrmMinNow)
return solution_now | Solve one period of a consumption-saving problem with idiosyncratic and
aggregate shocks (transitory and permanent). This is a basic solver that
can't handle cubic splines, nor can it calculate a value function.
Parameters
----------
solution_next : ConsumerSolution
The solution to the succeeding one period problem.
IncomeDstn : [np.array]
A list containing five arrays of floats, representing a discrete
approximation to the income process between the period being solved
and the one immediately following (in solution_next). Order: event
probabilities, idisyncratic permanent shocks, idiosyncratic transitory
shocks, aggregate permanent shocks, aggregate transitory shocks.
LivPrb : float
Survival probability; likelihood of being alive at the beginning of
the succeeding period.
DiscFac : float
Intertemporal discount factor for future utility.
CRRA : float
Coefficient of relative risk aversion.
PermGroFac : float
Expected permanent income growth factor at the end of this period.
PermGroFacAgg : float
Expected aggregate productivity growth factor.
aXtraGrid : np.array
Array of "extra" end-of-period asset values-- assets above the
absolute minimum acceptable level.
BoroCnstArt : float
Artificial borrowing constraint; minimum allowable end-of-period asset-to-
permanent-income ratio. Unlike other models, this *can't* be None.
Mgrid : np.array
A grid of aggregate market resourses to permanent income in the economy.
AFunc : function
Aggregate savings as a function of aggregate market resources.
Rfunc : function
The net interest factor on assets as a function of capital ratio k.
wFunc : function
The wage rate for labor as a function of capital-to-labor ratio k.
DeprFac : float
Capital Depreciation Rate
Returns
-------
solution_now : ConsumerSolution
The solution to the single period consumption-saving problem. Includes
a consumption function cFunc (linear interpolation over linear interpola-
tions) and marginal value function vPfunc. |
def get_position_d(self):
""" Get the D value of the current PID for position
"""
data = []
data.append(0x09)
data.append(self.servoid)
data.append(RAM_READ_REQ)
data.append(POSITION_KD_RAM)
data.append(BYTE2)
send_data(data)
rxdata = []
try:
rxdata = SERPORT.read(13)
return (ord(rxdata[10])*256)+(ord(rxdata[9])&0xff)
except HerkulexError:
raise HerkulexError("could not communicate with motors") | Get the D value of the current PID for position |
def discardID(self, idVal):
"""Checks if the collected items contains the give _idVal_ and discards it if it is found, will not raise an exception if item is not found
# Parameters
_idVal_ : `str`
> The discarded id string
"""
for i in self:
if i.id == idVal:
self._collection.discard(i)
return | Checks if the collected items contains the give _idVal_ and discards it if it is found, will not raise an exception if item is not found
# Parameters
_idVal_ : `str`
> The discarded id string |
def get_alignment_df(a_aln_seq, b_aln_seq, a_seq_id=None, b_seq_id=None):
"""Summarize two alignment strings in a dataframe.
Args:
a_aln_seq (str): Aligned sequence string
b_aln_seq (str): Aligned sequence string
a_seq_id (str): Optional ID of a_seq
b_seq_id (str): Optional ID of b_aln_seq
Returns:
DataFrame: a per-residue level annotation of the alignment
"""
if len(a_aln_seq) != len(b_aln_seq):
raise ValueError('Sequence lengths not equal - was an alignment run?')
if not a_seq_id:
a_seq_id = 'a_seq'
if not b_seq_id:
b_seq_id = 'b_seq'
a_aln_seq = ssbio.protein.sequence.utils.cast_to_str(a_aln_seq)
b_aln_seq = ssbio.protein.sequence.utils.cast_to_str(b_aln_seq)
a_idx = 1
b_idx = 1
appender = []
for i, (a,b) in enumerate(zip(a_aln_seq, b_aln_seq)):
to_append = {}
if a == b and a != '-' and b != '-':
aa_flag = 'match'
elif a != b and a == '-' and b != '-':
aa_flag = 'insertion'
elif a != b and a != '-' and b == '-':
aa_flag = 'deletion'
elif a != b and a != '-' and b == 'X':
aa_flag = 'unresolved'
elif a != b and b != '-' and a == 'X':
aa_flag = 'unresolved'
elif a != b and a != '-' and b != '-':
aa_flag = 'mutation'
to_append['id_a'] = a_seq_id
to_append['id_b'] = b_seq_id
to_append['type'] = aa_flag
if aa_flag == 'match' or aa_flag == 'unresolved' or aa_flag == 'mutation':
to_append['id_a_aa'] = a
to_append['id_a_pos'] = int(a_idx)
to_append['id_b_aa'] = b
to_append['id_b_pos'] = int(b_idx)
a_idx += 1
b_idx += 1
if aa_flag == 'deletion':
to_append['id_a_aa'] = a
to_append['id_a_pos'] = int(a_idx)
a_idx += 1
if aa_flag == 'insertion':
to_append['id_b_aa'] = b
to_append['id_b_pos'] = int(b_idx)
b_idx += 1
appender.append(to_append)
cols = ['id_a', 'id_b', 'type', 'id_a_aa', 'id_a_pos', 'id_b_aa', 'id_b_pos']
alignment_df = pd.DataFrame.from_records(appender, columns=cols)
alignment_df = alignment_df.fillna(value=np.nan)
return alignment_df | Summarize two alignment strings in a dataframe.
Args:
a_aln_seq (str): Aligned sequence string
b_aln_seq (str): Aligned sequence string
a_seq_id (str): Optional ID of a_seq
b_seq_id (str): Optional ID of b_aln_seq
Returns:
DataFrame: a per-residue level annotation of the alignment |
def _load_values(self, db_key: str) -> dict:
"""Load values from the db at the specified key, db_key.
FIXME(BMo): Could also be extended to load scalar types (instead of
just list and hash)
"""
if self._db.type(db_key) == 'list':
db_values = self._db.lrange(db_key, 0, -1)
for i, value in enumerate(db_values):
try:
db_values[i] = ast.literal_eval(value)
except SyntaxError:
pass
except ValueError:
pass
else: # self._db.type == 'hash'
db_values = self._db.hgetall(db_key)
for _key, _value in db_values.items():
try:
db_values[_key] = ast.literal_eval(_value)
except SyntaxError:
pass
except ValueError:
pass
return db_values | Load values from the db at the specified key, db_key.
FIXME(BMo): Could also be extended to load scalar types (instead of
just list and hash) |
def addCases(self, tupesValStmnts):
"""
Add multiple case statements from iterable of tuleles
(caseVal, statements)
"""
s = self
for val, statements in tupesValStmnts:
s = s.Case(val, statements)
return s | Add multiple case statements from iterable of tuleles
(caseVal, statements) |
def search_playlist(self, playlist_name, quiet=False, limit=9):
"""Search playlist by playlist name.
:params playlist_name: playlist name.
:params quiet: automatically select the best one.
:params limit: playlist count returned by weapi.
:return: a Playlist object.
"""
result = self.search(playlist_name, search_type=1000, limit=limit)
if result['result']['playlistCount'] <= 0:
LOG.warning('Playlist %s not existed!', playlist_name)
raise SearchNotFound('playlist {} not existed'.format(playlist_name))
else:
playlists = result['result']['playlists']
if quiet:
playlist_id, playlist_name = playlists[0]['id'], playlists[0]['name']
playlist = Playlist(playlist_id, playlist_name)
return playlist
else:
return self.display.select_one_playlist(playlists) | Search playlist by playlist name.
:params playlist_name: playlist name.
:params quiet: automatically select the best one.
:params limit: playlist count returned by weapi.
:return: a Playlist object. |
def groupby(self, dimensions=None, container_type=None, group_type=None, **kwargs):
"""Groups DynamicMap by one or more dimensions
Applies groupby operation over the specified dimensions
returning an object of type container_type (expected to be
dictionary-like) containing the groups.
Args:
dimensions: Dimension(s) to group by
container_type: Type to cast group container to
group_type: Type to cast each group to
dynamic: Whether to return a DynamicMap
**kwargs: Keyword arguments to pass to each group
Returns:
Returns object of supplied container_type containing the
groups. If dynamic=True returns a DynamicMap instead.
"""
if dimensions is None:
dimensions = self.kdims
if not isinstance(dimensions, (list, tuple)):
dimensions = [dimensions]
container_type = container_type if container_type else type(self)
group_type = group_type if group_type else type(self)
outer_kdims = [self.get_dimension(d) for d in dimensions]
inner_kdims = [d for d in self.kdims if not d in outer_kdims]
outer_dynamic = issubclass(container_type, DynamicMap)
inner_dynamic = issubclass(group_type, DynamicMap)
if ((not outer_dynamic and any(not d.values for d in outer_kdims)) or
(not inner_dynamic and any(not d.values for d in inner_kdims))):
raise Exception('Dimensions must specify sampling via '
'values to apply a groupby')
if outer_dynamic:
def outer_fn(*outer_key, **dynkwargs):
if inner_dynamic:
def inner_fn(*inner_key, **dynkwargs):
outer_vals = zip(outer_kdims, util.wrap_tuple(outer_key))
inner_vals = zip(inner_kdims, util.wrap_tuple(inner_key))
inner_sel = [(k.name, v) for k, v in inner_vals]
outer_sel = [(k.name, v) for k, v in outer_vals]
return self.select(**dict(inner_sel+outer_sel))
return self.clone([], callback=inner_fn, kdims=inner_kdims)
else:
dim_vals = [(d.name, d.values) for d in inner_kdims]
dim_vals += [(d.name, [v]) for d, v in
zip(outer_kdims, util.wrap_tuple(outer_key))]
with item_check(False):
selected = HoloMap(self.select(**dict(dim_vals)))
return group_type(selected.reindex(inner_kdims))
if outer_kdims:
return self.clone([], callback=outer_fn, kdims=outer_kdims)
else:
return outer_fn(())
else:
outer_product = itertools.product(*[self.get_dimension(d).values
for d in dimensions])
groups = []
for outer in outer_product:
outer_vals = [(d.name, [o]) for d, o in zip(outer_kdims, outer)]
if inner_dynamic or not inner_kdims:
def inner_fn(outer_vals, *key, **dynkwargs):
inner_dims = zip(inner_kdims, util.wrap_tuple(key))
inner_vals = [(d.name, k) for d, k in inner_dims]
return self.select(**dict(outer_vals+inner_vals)).last
if inner_kdims or self.streams:
group = self.clone(callback=partial(inner_fn, outer_vals),
kdims=inner_kdims)
else:
group = inner_fn(outer_vals, ())
groups.append((outer, group))
else:
inner_vals = [(d.name, self.get_dimension(d).values)
for d in inner_kdims]
with item_check(False):
selected = HoloMap(self.select(**dict(outer_vals+inner_vals)))
group = group_type(selected.reindex(inner_kdims))
groups.append((outer, group))
return container_type(groups, kdims=outer_kdims) | Groups DynamicMap by one or more dimensions
Applies groupby operation over the specified dimensions
returning an object of type container_type (expected to be
dictionary-like) containing the groups.
Args:
dimensions: Dimension(s) to group by
container_type: Type to cast group container to
group_type: Type to cast each group to
dynamic: Whether to return a DynamicMap
**kwargs: Keyword arguments to pass to each group
Returns:
Returns object of supplied container_type containing the
groups. If dynamic=True returns a DynamicMap instead. |
def trigger(self, event, filter=None, update=None, documents=None, ids=None, replacements=None):
""" Trigger the after_save hook on documents, if present. """
if not self.has_trigger(event):
return
if documents is not None:
pass
elif ids is not None:
documents = self.find_by_ids(ids, read_use="primary")
elif filter is not None:
documents = self.find(filter, read_use="primary")
else:
raise Exception("Trigger couldn't filter documents")
for doc in documents:
getattr(doc, event)(update=update, replacements=replacements) | Trigger the after_save hook on documents, if present. |
def Process(self, path):
"""Processes a given path.
Args:
path: Path (as a string) to post-process.
Returns:
A list of paths with environment variables replaced with their
values. If the mapping had a list of values for a particular variable,
instead of just one value, then all possible replacements will be
returned.
"""
path = re.sub(self.SYSTEMROOT_RE, r"%systemroot%", path, count=1)
path = re.sub(self.SYSTEM32_RE, r"%systemroot%\\system32", path, count=1)
matches_iter = self.WIN_ENVIRON_REGEX.finditer(path)
var_names = set(m.group(1).lower() for m in matches_iter)
results = [path]
for var_name in var_names:
try:
var_regex, var_value = self.vars_map[var_name]
except KeyError:
continue
if isinstance(var_value, string_types):
replacements = [var_value]
else:
replacements = var_value
processed_results = []
for result in results:
for repl in replacements:
# Using lambda here, as otherwise Python interprets \\f as a
# backreference (same applies to \\0 and \\1). When using a
# function as a replacement argument, backreferences are ignored.
# pylint: disable=cell-var-from-loop
processed_results.append(var_regex.sub(lambda _: repl, result))
results = processed_results
return results | Processes a given path.
Args:
path: Path (as a string) to post-process.
Returns:
A list of paths with environment variables replaced with their
values. If the mapping had a list of values for a particular variable,
instead of just one value, then all possible replacements will be
returned. |
def _set_af_vrf(self, v, load=False):
"""
Setter method for af_vrf, mapped from YANG variable /rbridge_id/router/router_bgp/address_family/ipv4/ipv4_unicast/af_vrf (list)
If this variable is read-only (config: false) in the
source YANG file, then _set_af_vrf is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_af_vrf() directly.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=YANGListType("af_vrf_name",af_vrf.af_vrf, yang_name="af-vrf", rest_name="vrf", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='af-vrf-name', extensions={u'tailf-common': {u'info': u'VRF unicast', u'alt-name': u'vrf', u'cli-suppress-list-no': None, u'callpoint': u'AfIpv4UcastVrf', u'cli-full-command': None, u'cli-full-no': None, u'cli-mode-name': u'config-bgp-ipv4u-vrf'}}), is_container='list', yang_name="af-vrf", rest_name="vrf", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'VRF unicast', u'alt-name': u'vrf', u'cli-suppress-list-no': None, u'callpoint': u'AfIpv4UcastVrf', u'cli-full-command': None, u'cli-full-no': None, u'cli-mode-name': u'config-bgp-ipv4u-vrf'}}, namespace='urn:brocade.com:mgmt:brocade-bgp', defining_module='brocade-bgp', yang_type='list', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """af_vrf must be of a type compatible with list""",
'defined-type': "list",
'generated-type': """YANGDynClass(base=YANGListType("af_vrf_name",af_vrf.af_vrf, yang_name="af-vrf", rest_name="vrf", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='af-vrf-name', extensions={u'tailf-common': {u'info': u'VRF unicast', u'alt-name': u'vrf', u'cli-suppress-list-no': None, u'callpoint': u'AfIpv4UcastVrf', u'cli-full-command': None, u'cli-full-no': None, u'cli-mode-name': u'config-bgp-ipv4u-vrf'}}), is_container='list', yang_name="af-vrf", rest_name="vrf", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'VRF unicast', u'alt-name': u'vrf', u'cli-suppress-list-no': None, u'callpoint': u'AfIpv4UcastVrf', u'cli-full-command': None, u'cli-full-no': None, u'cli-mode-name': u'config-bgp-ipv4u-vrf'}}, namespace='urn:brocade.com:mgmt:brocade-bgp', defining_module='brocade-bgp', yang_type='list', is_config=True)""",
})
self.__af_vrf = t
if hasattr(self, '_set'):
self._set() | Setter method for af_vrf, mapped from YANG variable /rbridge_id/router/router_bgp/address_family/ipv4/ipv4_unicast/af_vrf (list)
If this variable is read-only (config: false) in the
source YANG file, then _set_af_vrf is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_af_vrf() directly. |
def create_filename_parser(self, base_dir):
"""Create a :class:`trollsift.parser.Parser` object for later use."""
# just in case a writer needs more complex file patterns
# Set a way to create filenames if we were given a pattern
if base_dir and self.file_pattern:
file_pattern = os.path.join(base_dir, self.file_pattern)
else:
file_pattern = self.file_pattern
return parser.Parser(file_pattern) if file_pattern else None | Create a :class:`trollsift.parser.Parser` object for later use. |
def formatTextException(*args):
"""
Formats given exception as a text.
:param \*args: Arguments.
:type \*args: \*
:return: Exception text.
:rtype: unicode
"""
format = lambda x: re.sub(r"^(\s+)", lambda y: "{0} ".format("." * len(y.group(0))), x.rstrip().expandtabs(4))
verbose = 10
cls, instance, trcback = args
stack = foundations.exceptions.extract_stack(foundations.exceptions.get_inner_most_frame(trcback), verbose)
text = []
text.append(foundations.strings.to_string(cls))
text.append("")
for line in foundations.exceptions.format_exception(cls, instance, trcback):
text.append(format("{0}".format(format(line))))
text.append("")
text.append("An unhandled exception occured in {0} {1}!".format(Constants.application_name,
Constants.version))
text.append("Sequence of calls leading up to the exception, in their occurring order:")
text.append("")
for frame, file_name, line_number, name, context, index in stack:
location = "{0}{1}".format(name if name != "<module>" else "",
inspect.formatargvalues(*inspect.getargvalues(frame)))
text.append("File \"{0}\", line {1}, in {2}".format(file_name, line_number, location))
for i, line in enumerate(context):
if i == index:
text.append(format("\t{0} {1} <===".format(line_number - index + i, format(format(line)))))
else:
text.append(format("\t{0} {1}".format(line_number - index + i, format(format(line)))))
text.append("")
for line in traceback.format_exception_only(cls, instance):
text.append("{0}".format(format(line)))
text.append("")
text.append("Frames locals by stack ordering, innermost last:")
text.append("")
for frame, locals in foundations.exceptions.extract_locals(trcback):
name, file_name, line_number = frame
text.append("Frame \"{0}\" in \"{1}\" file, line {2}:".format(name, file_name, line_number))
arguments, nameless_args, keyword_args, locals = locals
has_arguments, has_locals = any((arguments, nameless_args, keyword_args)), any(locals)
has_arguments and text.append(format("\tArguments:"))
for key, value in arguments.iteritems():
text.append(format("\t\t{0} = {1}".format(key, value)))
for value in nameless_args:
text.append(format("\t\t{0}".format(value)))
for key, value in sorted(keyword_args.iteritems()):
text.append(format("\\tt{0} = {1}".format(key, value)))
has_locals and text.append(format("\tLocals:"))
for key, value in sorted(locals.iteritems()):
text.append(format("\t\t{0} = {1}".format(key, value)))
text.append("")
return text | Formats given exception as a text.
:param \*args: Arguments.
:type \*args: \*
:return: Exception text.
:rtype: unicode |
def get_vlan_brief_output_vlan_vlan_name(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
get_vlan_brief = ET.Element("get_vlan_brief")
config = get_vlan_brief
output = ET.SubElement(get_vlan_brief, "output")
vlan = ET.SubElement(output, "vlan")
vlan_id_key = ET.SubElement(vlan, "vlan-id")
vlan_id_key.text = kwargs.pop('vlan_id')
vlan_name = ET.SubElement(vlan, "vlan-name")
vlan_name.text = kwargs.pop('vlan_name')
callback = kwargs.pop('callback', self._callback)
return callback(config) | Auto Generated Code |
def _get_ip_address(self, request):
"""Get the remote ip address the request was generated from. """
ipaddr = request.META.get("HTTP_X_FORWARDED_FOR", None)
if ipaddr:
# X_FORWARDED_FOR returns client1, proxy1, proxy2,...
return ipaddr.split(",")[0].strip()
return request.META.get("REMOTE_ADDR", "") | Get the remote ip address the request was generated from. |
def init_logging(config):
"""Initialize base logger named 'wsgidav'.
The base logger is filtered by the `verbose` configuration option.
Log entries will have a time stamp and thread id.
:Parameters:
verbose : int
Verbosity configuration (0..5)
enable_loggers : string list
List of module logger names, that will be switched to DEBUG level.
Module loggers
~~~~~~~~~~~~~~
Module loggers (e.g 'wsgidav.lock_manager') are named loggers, that can be
independently switched to DEBUG mode.
Except for verbosity, they will inherit settings from the base logger.
They will suppress DEBUG level messages, unless they are enabled by passing
their name to util.init_logging().
If enabled, module loggers will print DEBUG messages, even if verbose == 3.
Example initialize and use a module logger, that will generate output,
if enabled (and verbose >= 2)::
_logger = util.get_module_logger(__name__)
[..]
_logger.debug("foo: '{}'".format(s))
This logger would be enabled by passing its name to init_logging()::
enable_loggers = ["lock_manager",
"property_manager",
]
util.init_logging(2, enable_loggers)
Log Level Matrix
~~~~~~~~~~~~~~~~
+---------+--------+---------------------------------------------------------------+
| Verbose | Option | Log level |
| level | +-------------+------------------------+------------------------+
| | | base logger | module logger(default) | module logger(enabled) |
+=========+========+=============+========================+========================+
| 0 | -qqq | CRITICAL | CRITICAL | CRITICAL |
+---------+--------+-------------+------------------------+------------------------+
| 1 | -qq | ERROR | ERROR | ERROR |
+---------+--------+-------------+------------------------+------------------------+
| 2 | -q | WARN | WARN | WARN |
+---------+--------+-------------+------------------------+------------------------+
| 3 | | INFO | INFO | **DEBUG** |
+---------+--------+-------------+------------------------+------------------------+
| 4 | -v | DEBUG | DEBUG | DEBUG |
+---------+--------+-------------+------------------------+------------------------+
| 5 | -vv | DEBUG | DEBUG | DEBUG |
+---------+--------+-------------+------------------------+------------------------+
"""
verbose = config.get("verbose", 3)
enable_loggers = config.get("enable_loggers", [])
if enable_loggers is None:
enable_loggers = []
logger_date_format = config.get("logger_date_format", "%Y-%m-%d %H:%M:%S")
logger_format = config.get(
"logger_format",
"%(asctime)s.%(msecs)03d - <%(thread)d> %(name)-27s %(levelname)-8s: %(message)s",
)
formatter = logging.Formatter(logger_format, logger_date_format)
# Define handlers
consoleHandler = logging.StreamHandler(sys.stdout)
# consoleHandler = logging.StreamHandler(sys.stderr)
consoleHandler.setFormatter(formatter)
# consoleHandler.setLevel(logging.DEBUG)
# Add the handlers to the base logger
logger = logging.getLogger(BASE_LOGGER_NAME)
if verbose >= 4: # --verbose
logger.setLevel(logging.DEBUG)
elif verbose == 3: # default
logger.setLevel(logging.INFO)
elif verbose == 2: # --quiet
logger.setLevel(logging.WARN)
# consoleHandler.setLevel(logging.WARN)
elif verbose == 1: # -qq
logger.setLevel(logging.ERROR)
# consoleHandler.setLevel(logging.WARN)
else: # -qqq
logger.setLevel(logging.CRITICAL)
# consoleHandler.setLevel(logging.ERROR)
# Don't call the root's handlers after our custom handlers
logger.propagate = False
# Remove previous handlers
for hdlr in logger.handlers[:]: # Must iterate an array copy
try:
hdlr.flush()
hdlr.close()
except Exception:
pass
logger.removeHandler(hdlr)
logger.addHandler(consoleHandler)
if verbose >= 3:
for e in enable_loggers:
if not e.startswith(BASE_LOGGER_NAME + "."):
e = BASE_LOGGER_NAME + "." + e
lg = logging.getLogger(e.strip())
lg.setLevel(logging.DEBUG) | Initialize base logger named 'wsgidav'.
The base logger is filtered by the `verbose` configuration option.
Log entries will have a time stamp and thread id.
:Parameters:
verbose : int
Verbosity configuration (0..5)
enable_loggers : string list
List of module logger names, that will be switched to DEBUG level.
Module loggers
~~~~~~~~~~~~~~
Module loggers (e.g 'wsgidav.lock_manager') are named loggers, that can be
independently switched to DEBUG mode.
Except for verbosity, they will inherit settings from the base logger.
They will suppress DEBUG level messages, unless they are enabled by passing
their name to util.init_logging().
If enabled, module loggers will print DEBUG messages, even if verbose == 3.
Example initialize and use a module logger, that will generate output,
if enabled (and verbose >= 2)::
_logger = util.get_module_logger(__name__)
[..]
_logger.debug("foo: '{}'".format(s))
This logger would be enabled by passing its name to init_logging()::
enable_loggers = ["lock_manager",
"property_manager",
]
util.init_logging(2, enable_loggers)
Log Level Matrix
~~~~~~~~~~~~~~~~
+---------+--------+---------------------------------------------------------------+
| Verbose | Option | Log level |
| level | +-------------+------------------------+------------------------+
| | | base logger | module logger(default) | module logger(enabled) |
+=========+========+=============+========================+========================+
| 0 | -qqq | CRITICAL | CRITICAL | CRITICAL |
+---------+--------+-------------+------------------------+------------------------+
| 1 | -qq | ERROR | ERROR | ERROR |
+---------+--------+-------------+------------------------+------------------------+
| 2 | -q | WARN | WARN | WARN |
+---------+--------+-------------+------------------------+------------------------+
| 3 | | INFO | INFO | **DEBUG** |
+---------+--------+-------------+------------------------+------------------------+
| 4 | -v | DEBUG | DEBUG | DEBUG |
+---------+--------+-------------+------------------------+------------------------+
| 5 | -vv | DEBUG | DEBUG | DEBUG |
+---------+--------+-------------+------------------------+------------------------+ |
def delete_insight(self, project_key, insight_id):
"""Delete an existing insight.
:params project_key: Project identifier, in the form of
projectOwner/projectId
:type project_key: str
:params insight_id: Insight unique id
:type insight_id: str
:raises RestApiException: If a server error occurs
Examples
--------
>>> import datadotworld as dw
>>> api_client = dw.api_client()
>>> del_insight = api_client.delete_insight(
... 'username/project', 'insightid') # doctest: +SKIP
"""
projectOwner, projectId = parse_dataset_key(project_key)
try:
self._insights_api.delete_insight(projectOwner,
projectId,
insight_id)
except _swagger.rest.ApiException as e:
raise RestApiError(cause=e) | Delete an existing insight.
:params project_key: Project identifier, in the form of
projectOwner/projectId
:type project_key: str
:params insight_id: Insight unique id
:type insight_id: str
:raises RestApiException: If a server error occurs
Examples
--------
>>> import datadotworld as dw
>>> api_client = dw.api_client()
>>> del_insight = api_client.delete_insight(
... 'username/project', 'insightid') # doctest: +SKIP |
def reset(self):
"""Clear ConfigObj instance and restore to 'freshly created' state."""
self.clear()
self._initialise()
# FIXME: Should be done by '_initialise', but ConfigObj constructor (and reload)
# requires an empty dictionary
self.configspec = None
# Just to be sure ;-)
self._original_configspec = None | Clear ConfigObj instance and restore to 'freshly created' state. |
def to_bytes(string):
"""Convert a string (bytes, str or unicode) to bytes."""
assert isinstance(string, basestring)
if sys.version_info[0] >= 3:
if isinstance(string, str):
return string.encode('utf-8')
else:
return string
else:
if isinstance(string, unicode):
return string.encode('utf-8')
else:
return string | Convert a string (bytes, str or unicode) to bytes. |
def get_list(self, size=100, startIndex=0, searchText="", sortProperty="", sortOrder='ASC', status='Active,Pending'):
"""
Request service locations
Returns
-------
dict
"""
url = urljoin(BASEURL, "sites", "list")
params = {
'api_key': self.token,
'size': size,
'startIndex': startIndex,
'sortOrder': sortOrder,
'status': status
}
if searchText:
params['searchText'] = searchText
if sortProperty:
params['sortProperty'] = sortProperty
r = requests.get(url, params)
r.raise_for_status()
return r.json() | Request service locations
Returns
-------
dict |
def on_module(self, node): # ():('body',)
"""Module def."""
out = None
for tnode in node.body:
out = self.run(tnode)
return out | Module def. |
def overlap(self, spectrum):
"""Tests for overlap of this filter with a spectrum
Example of full overlap:
|---------- spectrum ----------|
|------ self ------|
Examples of partial overlap: :
|---------- self ----------|
|------ spectrum ------|
|---- spectrum ----|
|----- self -----|
|---- self ----|
|---- spectrum ----|
Examples of no overlap: :
|---- spectrum ----| |---- other ----|
|---- other ----| |---- spectrum ----|
Parameters
----------
spectrum: sequence
The [W, F] spectrum with astropy units
Returns
-------
ans : {'full', 'partial', 'none'}
Overlap status.
"""
swave = self.wave[np.where(self.throughput != 0)]
s1, s2 = swave.min(), swave.max()
owave = spectrum[0]
o1, o2 = owave.min(), owave.max()
if (s1 >= o1 and s2 <= o2):
ans = 'full'
elif (s2 < o1) or (o2 < s1):
ans = 'none'
else:
ans = 'partial'
return ans | Tests for overlap of this filter with a spectrum
Example of full overlap:
|---------- spectrum ----------|
|------ self ------|
Examples of partial overlap: :
|---------- self ----------|
|------ spectrum ------|
|---- spectrum ----|
|----- self -----|
|---- self ----|
|---- spectrum ----|
Examples of no overlap: :
|---- spectrum ----| |---- other ----|
|---- other ----| |---- spectrum ----|
Parameters
----------
spectrum: sequence
The [W, F] spectrum with astropy units
Returns
-------
ans : {'full', 'partial', 'none'}
Overlap status. |
def addTab(self, widget, *args):
"""
Re-implements addTab to connect to the dirty changed signal and setup
some helper attributes.
:param widget: widget to add
:param args: optional addtional arguments (name and/or icon).
"""
widget.dirty_changed.connect(self._on_dirty_changed)
super(CodeEditTabWidget, self).addTab(widget, *args) | Re-implements addTab to connect to the dirty changed signal and setup
some helper attributes.
:param widget: widget to add
:param args: optional addtional arguments (name and/or icon). |
def get_value(self, key):
"""
Fetch the settings value with the highest precedence for the given
key, or raise KeyError.
Precedence:
- IDB scope
- directory scope
- user scope
- system scope
type key: basestring
rtype value: Union[basestring, int, float, List, Dict]
"""
try:
return self.idb.get_value(key)
except (KeyError, EnvironmentError):
pass
try:
return self.directory.get_value(key)
except (KeyError, EnvironmentError):
pass
try:
return self.user.get_value(key)
except KeyError:
pass
try:
return self.system.get_value(key)
except KeyError:
pass
raise KeyError("key not found") | Fetch the settings value with the highest precedence for the given
key, or raise KeyError.
Precedence:
- IDB scope
- directory scope
- user scope
- system scope
type key: basestring
rtype value: Union[basestring, int, float, List, Dict] |
def pixel_width(self):
"""
Width of the whole TimeLine in pixels
:rtype: int
"""
return self.zoom_factor * ((self._finish - self._start) / self._resolution) | Width of the whole TimeLine in pixels
:rtype: int |
def as_dict(self):
""" Pre-serialisation of the meta data """
drepr = super(PlayMeta, self).as_dict()
drepr["details"] = [meta.as_dict() for meta in self._metas]
return drepr | Pre-serialisation of the meta data |
def list_runner_book(self, market_id, selection_id, handicap=None, price_projection=None, order_projection=None,
match_projection=None, include_overall_position=None, partition_matched_by_strategy_ref=None,
customer_strategy_refs=None, currency_code=None, matched_since=None, bet_ids=None, locale=None,
session=None, lightweight=None):
"""
Returns a list of dynamic data about a market and a specified runner.
Dynamic data includes prices, the status of the market, the status of selections,
the traded volume, and the status of any orders you have placed in the market
:param unicode market_id: The unique id for the market
:param int selection_id: The unique id for the selection in the market
:param double handicap: The projection of price data you want to receive in the response
:param dict price_projection: The projection of price data you want to receive in the response
:param str order_projection: The orders you want to receive in the response
:param str match_projection: If you ask for orders, specifies the representation of matches
:param bool include_overall_position: If you ask for orders, returns matches for each selection
:param bool partition_matched_by_strategy_ref: If you ask for orders, returns the breakdown of matches
by strategy for each selection
:param list customer_strategy_refs: If you ask for orders, restricts the results to orders matching
any of the specified set of customer defined strategies
:param str currency_code: A Betfair standard currency code
:param str matched_since: If you ask for orders, restricts the results to orders that have at
least one fragment matched since the specified date
:param list bet_ids: If you ask for orders, restricts the results to orders with the specified bet IDs
:param str locale: The language used for the response
:param requests.session session: Requests session object
:param bool lightweight: If True will return dict not a resource
:rtype: list[resources.MarketBook]
"""
params = clean_locals(locals())
method = '%s%s' % (self.URI, 'listRunnerBook')
(response, elapsed_time) = self.request(method, params, session)
return self.process_response(response, resources.MarketBook, elapsed_time, lightweight) | Returns a list of dynamic data about a market and a specified runner.
Dynamic data includes prices, the status of the market, the status of selections,
the traded volume, and the status of any orders you have placed in the market
:param unicode market_id: The unique id for the market
:param int selection_id: The unique id for the selection in the market
:param double handicap: The projection of price data you want to receive in the response
:param dict price_projection: The projection of price data you want to receive in the response
:param str order_projection: The orders you want to receive in the response
:param str match_projection: If you ask for orders, specifies the representation of matches
:param bool include_overall_position: If you ask for orders, returns matches for each selection
:param bool partition_matched_by_strategy_ref: If you ask for orders, returns the breakdown of matches
by strategy for each selection
:param list customer_strategy_refs: If you ask for orders, restricts the results to orders matching
any of the specified set of customer defined strategies
:param str currency_code: A Betfair standard currency code
:param str matched_since: If you ask for orders, restricts the results to orders that have at
least one fragment matched since the specified date
:param list bet_ids: If you ask for orders, restricts the results to orders with the specified bet IDs
:param str locale: The language used for the response
:param requests.session session: Requests session object
:param bool lightweight: If True will return dict not a resource
:rtype: list[resources.MarketBook] |
def increase_writes_in_units(
current_provisioning, units, max_provisioned_writes,
consumed_write_units_percent, log_tag):
""" Increase the current_provisioning with units units
:type current_provisioning: int
:param current_provisioning: The current provisioning
:type units: int
:param units: How many units should we increase with
:returns: int -- New provisioning value
:type max_provisioned_writes: int
:param max_provisioned_writes: Configured max provisioned writes
:type consumed_write_units_percent: float
:param consumed_write_units_percent: Number of consumed write units
:type log_tag: str
:param log_tag: Prefix for the log
"""
units = int(units)
current_provisioning = float(current_provisioning)
consumed_write_units_percent = float(consumed_write_units_percent)
consumption_based_current_provisioning = \
int(math.ceil(current_provisioning*(consumed_write_units_percent/100)))
if consumption_based_current_provisioning > current_provisioning:
updated_provisioning = consumption_based_current_provisioning + units
else:
updated_provisioning = int(current_provisioning) + units
if max_provisioned_writes > 0:
if updated_provisioning > max_provisioned_writes:
logger.info(
'{0} - Reached provisioned writes max limit: {1}'.format(
log_tag,
max_provisioned_writes))
return max_provisioned_writes
logger.debug(
'{0} - Write provisioning will be increased to {1:d} units'.format(
log_tag,
int(updated_provisioning)))
return updated_provisioning | Increase the current_provisioning with units units
:type current_provisioning: int
:param current_provisioning: The current provisioning
:type units: int
:param units: How many units should we increase with
:returns: int -- New provisioning value
:type max_provisioned_writes: int
:param max_provisioned_writes: Configured max provisioned writes
:type consumed_write_units_percent: float
:param consumed_write_units_percent: Number of consumed write units
:type log_tag: str
:param log_tag: Prefix for the log |
def write(self):
"""Return SAM formatted string
Returns:
str: SAM formatted string containing entire SAM entry
"""
return '{0}\t{1}\t{2}\t{3}\t{4}\t' \
'{5}\t{6}\t{7}\t{8}\t{9}\t' \
'{10}{11}'.format(self.qname,
str(self.flag),
self.rname,
str(self.pos),
str(self.mapq),
self.cigar,
self.rnext,
str(self.pnext),
str(self.tlen),
self.seq,
self.qual,
os.linesep) | Return SAM formatted string
Returns:
str: SAM formatted string containing entire SAM entry |
def tune(runner, kernel_options, device_options, tuning_options):
""" Find the best performing kernel configuration in the parameter space
:params runner: A runner from kernel_tuner.runners
:type runner: kernel_tuner.runner
:param kernel_options: A dictionary with all options for the kernel.
:type kernel_options: kernel_tuner.interface.Options
:param device_options: A dictionary with all options for the device
on which the kernel should be tuned.
:type device_options: kernel_tuner.interface.Options
:param tuning_options: A dictionary with all options regarding the tuning
process.
:type tuning_options: kernel_tuner.interface.Options
:returns: A list of dictionaries for executed kernel configurations and their
execution times. And a dictionary that contains a information
about the hardware/software environment on which the tuning took place.
:rtype: list(dict()), dict()
"""
results = []
cache = {}
tuning_options["scaling"] = False
#build a bounds array as needed for the optimizer
bounds = get_bounds(tuning_options.tune_params)
args = (kernel_options, tuning_options, runner, results, cache)
#call the differential evolution optimizer
opt_result = differential_evolution(_cost_func, bounds, args, maxiter=1,
polish=False, disp=tuning_options.verbose)
if tuning_options.verbose:
print(opt_result.message)
return results, runner.dev.get_environment() | Find the best performing kernel configuration in the parameter space
:params runner: A runner from kernel_tuner.runners
:type runner: kernel_tuner.runner
:param kernel_options: A dictionary with all options for the kernel.
:type kernel_options: kernel_tuner.interface.Options
:param device_options: A dictionary with all options for the device
on which the kernel should be tuned.
:type device_options: kernel_tuner.interface.Options
:param tuning_options: A dictionary with all options regarding the tuning
process.
:type tuning_options: kernel_tuner.interface.Options
:returns: A list of dictionaries for executed kernel configurations and their
execution times. And a dictionary that contains a information
about the hardware/software environment on which the tuning took place.
:rtype: list(dict()), dict() |
def decode_event(self, log_topics, log_data):
""" Return a dictionary representation the log.
Note:
This function won't work with anonymous events.
Args:
log_topics (List[bin]): The log's indexed arguments.
log_data (bin): The encoded non-indexed arguments.
"""
# https://github.com/ethereum/wiki/wiki/Ethereum-Contract-ABI#function-selector-and-argument-encoding
# topics[0]: keccak(EVENT_NAME+"("+EVENT_ARGS.map(canonical_type_of).join(",")+")")
# If the event is declared as anonymous the topics[0] is not generated;
if not len(log_topics) or log_topics[0] not in self.event_data:
raise ValueError('Unknown log type')
event_id_ = log_topics[0]
event = self.event_data[event_id_]
# data: abi_serialise(EVENT_NON_INDEXED_ARGS)
# EVENT_NON_INDEXED_ARGS is the series of EVENT_ARGS that are not
# indexed, abi_serialise is the ABI serialisation function used for
# returning a series of typed values from a function.
unindexed_types = [
type_
for type_, indexed in zip(event['types'], event['indexed'])
if not indexed
]
unindexed_args = decode_abi(unindexed_types, log_data)
# topics[n]: EVENT_INDEXED_ARGS[n - 1]
# EVENT_INDEXED_ARGS is the series of EVENT_ARGS that are indexed
indexed_count = 1 # skip topics[0]
result = {}
for name, type_, indexed in zip(
event['names'], event['types'], event['indexed']):
if indexed:
topic_bytes = utils.zpad(
utils.encode_int(log_topics[indexed_count]),
32,
)
indexed_count += 1
value = decode_single(process_type(type_), topic_bytes)
else:
value = unindexed_args.pop(0)
result[name] = value
result['_event_type'] = utils.to_string(event['name'])
return result | Return a dictionary representation the log.
Note:
This function won't work with anonymous events.
Args:
log_topics (List[bin]): The log's indexed arguments.
log_data (bin): The encoded non-indexed arguments. |
def _get_uploaded_file(session, file_info, fragment_count=0):
"""
:param session: locked session (with self._session_resource as >> session <<)
:param file_info: contains file information to save or query
:param fragment_count: amount of fragments associated to the file
:return: an UploadedFile associated to the file_info
"""
try:
return session.query(UploadedFile).filter(UploadedFile.sha1 == file_info.sha1).one()
except NoResultFound:
new_instance = UploadedFile(
sha1=file_info.sha1,
file_name=file_info.upath,
fragment_count=fragment_count
)
session.add(new_instance)
return new_instance | :param session: locked session (with self._session_resource as >> session <<)
:param file_info: contains file information to save or query
:param fragment_count: amount of fragments associated to the file
:return: an UploadedFile associated to the file_info |
def entry_line_to_text(self, entry):
"""
Return the textual representation of an :class:`~taxi.timesheet.lines.Entry` instance. This method is a bit
convoluted since we don't want to completely mess up the original formatting of the entry.
"""
line = []
# The entry is new, it didn't come from an existing line, so let's just return a simple text representation of
# it
if not entry._text:
flags_text = self.flags_to_text(entry.flags)
duration_text = self.duration_to_text(entry.duration)
return ''.join(
(flags_text, ' ' if flags_text else '', entry.alias, ' ', duration_text, ' ', entry.description)
)
for i, text in enumerate(entry._text):
# If this field is mapped to an attribute, check if it has changed
# and, if so, regenerate its text. The only fields that are not
# mapped to attributes are spacing fields
if i in self.ENTRY_ATTRS_POSITION:
if self.ENTRY_ATTRS_POSITION[i] in entry._changed_attrs:
attr_name = self.ENTRY_ATTRS_POSITION[i]
attr_value = getattr(entry, self.ENTRY_ATTRS_POSITION[i])
# Some attributes need to be transformed to their textual representation, such as flags or duration
if attr_name in self.ENTRY_ATTRS_TRANSFORMERS:
attr_value = getattr(self, self.ENTRY_ATTRS_TRANSFORMERS[attr_name])(attr_value)
else:
attr_value = text
line.append(attr_value)
else:
# If the length of the field has changed, do whatever we can to keep the current formatting (ie. number
# of whitespaces)
if len(line[i-1]) != len(entry._text[i-1]):
text = ' ' * max(1, (len(text) - (len(line[i-1]) - len(entry._text[i-1]))))
line.append(text)
return ''.join(line).strip() | Return the textual representation of an :class:`~taxi.timesheet.lines.Entry` instance. This method is a bit
convoluted since we don't want to completely mess up the original formatting of the entry. |
def from_bytes(cls, bitstream):
'''
Parse the given packet and update properties accordingly
'''
packet = cls()
# Convert to ConstBitStream (if not already provided)
if not isinstance(bitstream, ConstBitStream):
if isinstance(bitstream, Bits):
bitstream = ConstBitStream(auto=bitstream)
else:
bitstream = ConstBitStream(bytes=bitstream)
# Read the source and destination ports
(packet.source_port,
packet.destination_port) = bitstream.readlist('2*uint:16')
# Store the length
length = bitstream.read('uint:16')
if length < 8:
raise ValueError('Invalid UDP length')
# Read the checksum
packet.checksum = bitstream.read('uint:16')
# And the rest is payload
payload_bytes = length - 8
packet.payload = bitstream.read('bytes:%d' % payload_bytes)
# LISP-specific handling
if packet.source_port == 4341 or packet.destination_port == 4341:
# Payload is a LISP data packet
from pylisp.packet.lisp.data import DataPacket
packet.payload = DataPacket.from_bytes(packet.payload)
elif packet.source_port == 4342 or packet.destination_port == 4342:
# Payload is a LISP control message
from pylisp.packet.lisp.control.base import ControlMessage
packet.payload = ControlMessage.from_bytes(packet.payload)
# There should be no remaining bits
if bitstream.pos != bitstream.len:
raise ValueError('Bits remaining after processing packet')
# Verify that the properties make sense
packet.sanitize()
return packet | Parse the given packet and update properties accordingly |
def _dens(self,R,z,phi=0.,t=0.):
"""
NAME:
_dens
PURPOSE:
evaluate the density for this potential
INPUT:
R - Galactocentric cylindrical radius
z - vertical height
phi - azimuth
t - time
OUTPUT:
the surface density
HISTORY:
2018-08-19 - Written - Bovy (UofT)
"""
r2= R**2+z**2
if r2 != self.a2:
return 0.
else: # pragma: no cover
return nu.infty | NAME:
_dens
PURPOSE:
evaluate the density for this potential
INPUT:
R - Galactocentric cylindrical radius
z - vertical height
phi - azimuth
t - time
OUTPUT:
the surface density
HISTORY:
2018-08-19 - Written - Bovy (UofT) |
def create_async_sns_topic(self, lambda_name, lambda_arn):
"""
Create the SNS-based async topic.
"""
topic_name = get_topic_name(lambda_name)
# Create SNS topic
topic_arn = self.sns_client.create_topic(
Name=topic_name)['TopicArn']
# Create subscription
self.sns_client.subscribe(
TopicArn=topic_arn,
Protocol='lambda',
Endpoint=lambda_arn
)
# Add Lambda permission for SNS to invoke function
self.create_event_permission(
lambda_name=lambda_name,
principal='sns.amazonaws.com',
source_arn=topic_arn
)
# Add rule for SNS topic as a event source
add_event_source(
event_source={
"arn": topic_arn,
"events": ["sns:Publish"]
},
lambda_arn=lambda_arn,
target_function="zappa.asynchronous.route_task",
boto_session=self.boto_session
)
return topic_arn | Create the SNS-based async topic. |
def problem_with_codon(codon_index, codon_list, bad_seqs):
"""
Return true if the given codon overlaps with a bad sequence.
"""
base_1 = 3 * codon_index
base_3 = 3 * codon_index + 2
gene_seq = ''.join(codon_list)
for bad_seq in bad_seqs:
problem = bad_seq.search(gene_seq)
if problem and problem.start() < base_3 and problem.end() > base_1:
return True
return False | Return true if the given codon overlaps with a bad sequence. |
def thin_sum(cachedir, form='sha1'):
'''
Return the checksum of the current thin tarball
'''
thintar = gen_thin(cachedir)
code_checksum_path = os.path.join(cachedir, 'thin', 'code-checksum')
if os.path.isfile(code_checksum_path):
with salt.utils.files.fopen(code_checksum_path, 'r') as fh:
code_checksum = "'{0}'".format(fh.read().strip())
else:
code_checksum = "'0'"
return code_checksum, salt.utils.hashutils.get_hash(thintar, form) | Return the checksum of the current thin tarball |
def rate_limit_status(self):
""" :reference: https://developer.twitter.com/en/docs/developer-utilities/rate-limit-status/api-reference/get-application-rate_limit_status
:allowed_param:'resources'
"""
return bind_api(
api=self,
path='/application/rate_limit_status.json',
payload_type='json',
allowed_param=['resources'],
use_cache=False
) | :reference: https://developer.twitter.com/en/docs/developer-utilities/rate-limit-status/api-reference/get-application-rate_limit_status
:allowed_param:'resources' |
def get_views(self, item=None, key=None, option=None):
"""Return the views object.
If key is None, return all the view for the current plugin
else if option is None return the view for the specific key (all option)
else return the view fo the specific key/option
Specify item if the stats are stored in a dict of dict (ex: NETWORK, FS...)
"""
if item is None:
item_views = self.views
else:
item_views = self.views[item]
if key is None:
return item_views
else:
if option is None:
return item_views[key]
else:
if option in item_views[key]:
return item_views[key][option]
else:
return 'DEFAULT' | Return the views object.
If key is None, return all the view for the current plugin
else if option is None return the view for the specific key (all option)
else return the view fo the specific key/option
Specify item if the stats are stored in a dict of dict (ex: NETWORK, FS...) |
def push(self, cf):
"""
Push the frame cf onto the stack. Return the new stack.
"""
cf.next = self
if self.state is not None:
self.state.register_plugin('callstack', cf)
self.state.history.recent_stack_actions.append(CallStackAction(
hash(cf), len(cf), 'push', callframe=cf.copy({}, with_tail=False)
))
return cf | Push the frame cf onto the stack. Return the new stack. |
def infer_x(self, y):
"""Infer probable x from input y
@param y the desired output for infered x.
@return a list of probable x
"""
OptimizedInverseModel.infer_x(self, y)
if self.fmodel.size() == 0:
return self._random_x()
x_guesses = [self._guess_x_simple(y)[0]]
result = []
for xg in x_guesses:
res = cma.fmin(self._error, xg, self.cmaes_sigma,
options={'bounds':[self.lower, self.upper],
'verb_log':0,
'verb_disp':False,
'maxfevals':self.maxfevals,
'seed': self.seed})
result.append((res[1], res[0]))
return [xi for fi, xi in sorted(result)] | Infer probable x from input y
@param y the desired output for infered x.
@return a list of probable x |
def _from_json_array_nested(cls, response_raw):
"""
:type response_raw: client.BunqResponseRaw
:rtype: bunq.sdk.client.BunqResponse[cls]
"""
json = response_raw.body_bytes.decode()
obj = converter.json_to_class(dict, json)
value = converter.deserialize(cls, obj[cls._FIELD_RESPONSE])
return client.BunqResponse(value, response_raw.headers) | :type response_raw: client.BunqResponseRaw
:rtype: bunq.sdk.client.BunqResponse[cls] |
def do_help(self, arg):
"""
? - show the list of available commands
? * - show help for all commands
? <command> [command...] - show help for the given command(s)
help - show the list of available commands
help * - show help for all commands
help <command> [command...] - show help for the given command(s)
"""
if not arg:
Cmd.do_help(self, arg)
elif arg in ('?', 'help'):
# An easter egg :)
print(" Help! I need somebody...")
print(" Help! Not just anybody...")
print(" Help! You know, I need someone...")
print(" Heeelp!")
else:
if arg == '*':
commands = self.get_names()
commands = [ x for x in commands if x.startswith('do_') ]
else:
commands = set()
for x in arg.split(' '):
x = x.strip()
if x:
for n in self.completenames(x):
commands.add( 'do_%s' % n )
commands = list(commands)
commands.sort()
print(self.get_help(commands)) | ? - show the list of available commands
? * - show help for all commands
? <command> [command...] - show help for the given command(s)
help - show the list of available commands
help * - show help for all commands
help <command> [command...] - show help for the given command(s) |
def add_tracked_motors(self, tracked_motors):
"""Add new motors to the recording"""
new_mockup_motors = map(self.get_mockup_motor, tracked_motors)
self.tracked_motors = list(set(self.tracked_motors + new_mockup_motors)) | Add new motors to the recording |
def state(anon, obj, field, val):
"""
Returns a randomly selected US state code
"""
return anon.faker.state(field=field) | Returns a randomly selected US state code |
def within_n_mads(n, series):
"""Return true if all values in sequence are within n MADs"""
mad_score = (series - series.mean()) / series.mad()
return (mad_score.abs() <= n).all() | Return true if all values in sequence are within n MADs |
def refresh(self, force_cache=False):
"""
Perform a system refresh.
:param force_cache: Force an update of the camera cache
"""
if self.check_if_ok_to_update() or force_cache:
for sync_name, sync_module in self.sync.items():
_LOGGER.debug("Attempting refresh of sync %s", sync_name)
sync_module.refresh(force_cache=force_cache)
if not force_cache:
# Prevents rapid clearing of motion detect property
self.last_refresh = int(time.time())
return True
return False | Perform a system refresh.
:param force_cache: Force an update of the camera cache |
def _dataframe_fields(self):
"""
Creates a dictionary of all fields to include with DataFrame.
With the result of the calls to class properties changing based on the
class index value, the dictionary should be regenerated every time the
index is changed when the dataframe property is requested.
Returns
-------
dictionary
Returns a dictionary where the keys are the shortened ``string``
attribute names and the values are the actual value for each
attribute for the specified index.
"""
fields_to_include = {
'assist_percentage': self.assist_percentage,
'assists': self.assists,
'block_percentage': self.block_percentage,
'blocks': self.blocks,
'box_plus_minus': self.box_plus_minus,
'conference': self.conference,
'defensive_box_plus_minus': self.defensive_box_plus_minus,
'defensive_rebound_percentage': self.defensive_rebound_percentage,
'defensive_rebounds': self.defensive_rebounds,
'defensive_win_shares': self.defensive_win_shares,
'effective_field_goal_percentage':
self.effective_field_goal_percentage,
'field_goal_attempts': self.field_goal_attempts,
'field_goal_percentage': self.field_goal_percentage,
'field_goals': self.field_goals,
'free_throw_attempt_rate': self.free_throw_attempt_rate,
'free_throw_attempts': self.free_throw_attempts,
'free_throw_percentage': self.free_throw_percentage,
'free_throws': self.free_throws,
'games_played': self.games_played,
'games_started': self.games_started,
'height': self.height,
'minutes_played': self.minutes_played,
'offensive_box_plus_minus': self.offensive_box_plus_minus,
'offensive_rebound_percentage': self.offensive_rebound_percentage,
'offensive_rebounds': self.offensive_rebounds,
'offensive_win_shares': self.offensive_win_shares,
'personal_fouls': self.personal_fouls,
'player_efficiency_rating': self.player_efficiency_rating,
'player_id': self.player_id,
'points': self.points,
'points_produced': self.points_produced,
'position': self.position,
'steal_percentage': self.steal_percentage,
'steals': self.steals,
'team_abbreviation': self.team_abbreviation,
'three_point_attempt_rate': self.three_point_attempt_rate,
'three_point_attempts': self.three_point_attempts,
'three_point_percentage': self.three_point_percentage,
'three_pointers': self.three_pointers,
'total_rebound_percentage': self.total_rebound_percentage,
'total_rebounds': self.total_rebounds,
'true_shooting_percentage': self.true_shooting_percentage,
'turnover_percentage': self.turnover_percentage,
'turnovers': self.turnovers,
'two_point_attempts': self.two_point_attempts,
'two_point_percentage': self.two_point_percentage,
'two_pointers': self.two_pointers,
'usage_percentage': self.usage_percentage,
'weight': self.weight,
'win_shares': self.win_shares,
'win_shares_per_40_minutes': self.win_shares_per_40_minutes,
}
return fields_to_include | Creates a dictionary of all fields to include with DataFrame.
With the result of the calls to class properties changing based on the
class index value, the dictionary should be regenerated every time the
index is changed when the dataframe property is requested.
Returns
-------
dictionary
Returns a dictionary where the keys are the shortened ``string``
attribute names and the values are the actual value for each
attribute for the specified index. |
def words(content, filter=True, predicate=None):
"""\
Returns an iterable of words from the provided text.
`content`
A text.
`filter`
Indicates if stop words and garbage like "xxxxxx" should be removed from
the word list.
`predicate`
An alternative word filter. If it is ``None`` "xxxx", "---",
default stop words, and words which have no min. length of 3 are filtered
(iff ``filter`` is set to ``True``).
>>> list(words('Hello and goodbye ------ '))
['Hello', 'goodbye']
>>> list(words('Hello, and goodbye ------ Subject xxxxxxxxx XXXXXXXXXXXX here'))
['Hello', 'goodbye', 'Subject']
>>> list(words('Hello, and goodbye.How are you?'))
['Hello', 'goodbye']
"""
def accept_word(word):
"""\
Returns if the `word` is acceptable/useful
`word`
The word to check.
"""
return len(word) > 2 \
and word.lower() not in stop_words \
and not _UNWANTED_WORDS_PATTERN.match(word)
words = _tokenize(content)
if filter or predicate:
if not predicate:
predicate = accept_word
return (w for w in words if predicate(w))
return words | \
Returns an iterable of words from the provided text.
`content`
A text.
`filter`
Indicates if stop words and garbage like "xxxxxx" should be removed from
the word list.
`predicate`
An alternative word filter. If it is ``None`` "xxxx", "---",
default stop words, and words which have no min. length of 3 are filtered
(iff ``filter`` is set to ``True``).
>>> list(words('Hello and goodbye ------ '))
['Hello', 'goodbye']
>>> list(words('Hello, and goodbye ------ Subject xxxxxxxxx XXXXXXXXXXXX here'))
['Hello', 'goodbye', 'Subject']
>>> list(words('Hello, and goodbye.How are you?'))
['Hello', 'goodbye'] |
def add_github_hook_options(parser):
"""Add the github jenkins hook command and arguments.
:rtype: argparse.ArgumentParser
"""
cookbook = parser.add_parser('github', help='Install the Jenkins callback '
'hook in a GitHub repository')
cookbook.add_argument('owner', action='store',
help='The owner of the GitHub repo')
cookbook.add_argument('repo', action='store',
help='The GitHub repository name')
domain = socket.gethostname()
example = 'jenkins.%s' % domain
cookbook.add_argument('jenkins_hook_url', action='store',
help='The jenkins hook URL. For example %s' % example)
cookbook.add_argument('-g', '--github-host',
action='store',
dest='github',
default=github.GITHUB_HOST,
help='Override github.com for a '
'GitHub::Enterprise host')
cookbook.add_argument('-u', '--username',
action='store',
dest='username',
help='Specify a different username than the repo '
'owner')
cookbook.set_defaults(func='github_hooks') | Add the github jenkins hook command and arguments.
:rtype: argparse.ArgumentParser |
def fit_first_and_second_harmonics(phi, intensities):
"""
Fit the first and second harmonic function values to a set of
(angle, intensity) pairs.
This function is used to compute corrections for ellipse fitting:
.. math::
f(phi) = y0 + a1*\\sin(phi) + b1*\\cos(phi) + a2*\\sin(2*phi) +
b2*\\cos(2*phi)
Parameters
----------
phi : float or `~numpy.ndarray`
The angle(s) along the elliptical path, going towards the positive
y axis, starting coincident with the position angle. That is, the
angles are defined from the semimajor axis that lies in
the positive x quadrant.
intensities : `~numpy.ndarray`
The intensities measured along the elliptical path, at the
angles defined by the ``phi`` parameter.
Returns
-------
y0, a1, b1, a2, b2 : float
The fitted harmonic coefficent values.
"""
a1 = b1 = a2 = b2 = 1.
def optimize_func(x):
return first_and_second_harmonic_function(
phi, np.array([x[0], x[1], x[2], x[3], x[4]])) - intensities
return _least_squares_fit(optimize_func, [np.mean(intensities), a1, b1,
a2, b2]) | Fit the first and second harmonic function values to a set of
(angle, intensity) pairs.
This function is used to compute corrections for ellipse fitting:
.. math::
f(phi) = y0 + a1*\\sin(phi) + b1*\\cos(phi) + a2*\\sin(2*phi) +
b2*\\cos(2*phi)
Parameters
----------
phi : float or `~numpy.ndarray`
The angle(s) along the elliptical path, going towards the positive
y axis, starting coincident with the position angle. That is, the
angles are defined from the semimajor axis that lies in
the positive x quadrant.
intensities : `~numpy.ndarray`
The intensities measured along the elliptical path, at the
angles defined by the ``phi`` parameter.
Returns
-------
y0, a1, b1, a2, b2 : float
The fitted harmonic coefficent values. |
def Update(self, menu=None, tooltip=None,filename=None, data=None, data_base64=None,):
'''
Updates the menu, tooltip or icon
:param menu: menu defintion
:param tooltip: string representing tooltip
:param filename: icon filename
:param data: icon raw image
:param data_base64: icon base 64 image
:return:
'''
# Menu
if menu is not None:
self.Menu = menu
qmenu = QMenu()
qmenu.setTitle(self.Menu[0])
AddTrayMenuItem(qmenu, self.Menu[1], self)
self.TrayIcon.setContextMenu(qmenu)
# Tooltip
if tooltip is not None:
self.TrayIcon.setToolTip(str(tooltip))
# Icon
qicon = None
if filename is not None:
qicon = QIcon(filename)
elif data is not None:
ba = QtCore.QByteArray.fromRawData(data)
pixmap = QtGui.QPixmap()
pixmap.loadFromData(ba)
qicon = QIcon(pixmap)
elif data_base64 is not None:
ba = QtCore.QByteArray.fromBase64(data_base64)
pixmap = QtGui.QPixmap()
pixmap.loadFromData(ba)
qicon = QIcon(pixmap)
if qicon is not None:
self.TrayIcon.setIcon(qicon) | Updates the menu, tooltip or icon
:param menu: menu defintion
:param tooltip: string representing tooltip
:param filename: icon filename
:param data: icon raw image
:param data_base64: icon base 64 image
:return: |
def normalize_enum_constant(s):
"""Return enum constant `s` converted to a canonical snake-case."""
if s.islower(): return s
if s.isupper(): return s.lower()
return "".join(ch if ch.islower() else "_" + ch.lower() for ch in s).strip("_") | Return enum constant `s` converted to a canonical snake-case. |
def check_valid_cpc_status(method, uri, cpc):
"""
Check that the CPC is in a valid status, as indicated by its 'status'
property.
If the Cpc object does not have a 'status' property set, this function does
nothing (in order to make the mock support easy to use).
Raises:
ConflictError with reason 1: The CPC itself has been targeted by the
operation.
ConflictError with reason 6: The CPC is hosting the resource targeted by
the operation.
"""
status = cpc.properties.get('status', None)
if status is None:
# Do nothing if no status is set on the faked CPC
return
valid_statuses = ['active', 'service-required', 'degraded', 'exceptions']
if status not in valid_statuses:
if uri.startswith(cpc.uri):
# The uri targets the CPC (either is the CPC uri or some
# multiplicity under the CPC uri)
raise ConflictError(method, uri, reason=1,
message="The operation cannot be performed "
"because the targeted CPC {} has a status "
"that is not valid for the operation: {}".
format(cpc.name, status))
else:
# The uri targets a resource hosted by the CPC
raise ConflictError(method, uri, reason=6,
message="The operation cannot be performed "
"because CPC {} hosting the targeted resource "
"has a status that is not valid for the "
"operation: {}".
format(cpc.name, status)) | Check that the CPC is in a valid status, as indicated by its 'status'
property.
If the Cpc object does not have a 'status' property set, this function does
nothing (in order to make the mock support easy to use).
Raises:
ConflictError with reason 1: The CPC itself has been targeted by the
operation.
ConflictError with reason 6: The CPC is hosting the resource targeted by
the operation. |
def pretty_print_json(self, json_string):
"""
Return formatted JSON string _json_string_.\n
Using method json.dumps with settings: _indent=2, ensure_ascii=False_.
*Args:*\n
_json_string_ - JSON string.
*Returns:*\n
Formatted JSON string.
*Example:*\n
| *Settings* | *Value* |
| Library | JsonValidator |
| Library | OperatingSystem |
| *Test Cases* | *Action* | *Argument* | *Argument* |
| Check element | ${pretty_json}= | Pretty print json | {a:1,foo:[{b:2,c:3},{d:"baz",e:4}]} |
| | Log | ${pretty_json} |
=>\n
| {
| "a": 1,
| "foo": [
| {
| "c": 3,
| "b": 2
| },
| {
| "e": 4,
| "d": "baz"
| }
| ]
| }
"""
return json.dumps(self.string_to_json(json_string), indent=2, ensure_ascii=False) | Return formatted JSON string _json_string_.\n
Using method json.dumps with settings: _indent=2, ensure_ascii=False_.
*Args:*\n
_json_string_ - JSON string.
*Returns:*\n
Formatted JSON string.
*Example:*\n
| *Settings* | *Value* |
| Library | JsonValidator |
| Library | OperatingSystem |
| *Test Cases* | *Action* | *Argument* | *Argument* |
| Check element | ${pretty_json}= | Pretty print json | {a:1,foo:[{b:2,c:3},{d:"baz",e:4}]} |
| | Log | ${pretty_json} |
=>\n
| {
| "a": 1,
| "foo": [
| {
| "c": 3,
| "b": 2
| },
| {
| "e": 4,
| "d": "baz"
| }
| ]
| } |
def exclude_by_ends(in_file, exclude_file, data, in_params=None):
"""Exclude calls based on overlap of the ends with exclusion regions.
Removes structural variants with either end being in a repeat: a large
source of false positives.
Parameters tuned based on removal of LCR overlapping false positives in DREAM
synthetic 3 data.
"""
params = {"end_buffer": 50,
"rpt_pct": 0.9,
"total_rpt_pct": 0.2,
"sv_pct": 0.5}
if in_params:
params.update(in_params)
assert in_file.endswith(".bed")
out_file = "%s-norepeats%s" % utils.splitext_plus(in_file)
to_filter = collections.defaultdict(list)
removed = 0
if not utils.file_uptodate(out_file, in_file):
with file_transaction(data, out_file) as tx_out_file:
with shared.bedtools_tmpdir(data):
for coord, end_name in [(1, "end1"), (2, "end2")]:
base, ext = utils.splitext_plus(tx_out_file)
end_file = _create_end_file(in_file, coord, params, "%s-%s%s" % (base, end_name, ext))
to_filter = _find_to_filter(end_file, exclude_file, params, to_filter)
with open(tx_out_file, "w") as out_handle:
with open(in_file) as in_handle:
for line in in_handle:
key = "%s:%s-%s" % tuple(line.strip().split("\t")[:3])
total_rpt_size = sum(to_filter.get(key, [0]))
if total_rpt_size <= (params["total_rpt_pct"] * params["end_buffer"]):
out_handle.write(line)
else:
removed += 1
return out_file, removed | Exclude calls based on overlap of the ends with exclusion regions.
Removes structural variants with either end being in a repeat: a large
source of false positives.
Parameters tuned based on removal of LCR overlapping false positives in DREAM
synthetic 3 data. |
def copy_and_disconnect_tree(root, machine):
"""Copy a RoutingTree (containing nothing but RoutingTrees), disconnecting
nodes which are not connected in the machine.
Note that if a dead chip is part of the input RoutingTree, no corresponding
node will be included in the copy. The assumption behind this is that the
only reason a tree would visit a dead chip is because a route passed
through the chip and wasn't actually destined to arrive at that chip. This
situation is impossible to confirm since the input routing trees have not
yet been populated with vertices. The caller is responsible for being
sensible.
Parameters
----------
root : :py:class:`~rig.place_and_route.routing_tree.RoutingTree`
The root of the RoutingTree that contains nothing but RoutingTrees
(i.e. no children which are vertices or links).
machine : :py:class:`~rig.place_and_route.Machine`
The machine in which the routes exist.
Returns
-------
(root, lookup, broken_links)
Where:
* `root` is the new root of the tree
:py:class:`~rig.place_and_route.routing_tree.RoutingTree`
* `lookup` is a dict {(x, y):
:py:class:`~rig.place_and_route.routing_tree.RoutingTree`, ...}
* `broken_links` is a set ([(parent, child), ...]) containing all
disconnected parent and child (x, y) pairs due to broken links.
"""
new_root = None
# Lookup for copied routing tree {(x, y): RoutingTree, ...}
new_lookup = {}
# List of missing connections in the copied routing tree [(new_parent,
# new_child), ...]
broken_links = set()
# A queue [(new_parent, direction, old_node), ...]
to_visit = deque([(None, None, root)])
while to_visit:
new_parent, direction, old_node = to_visit.popleft()
if old_node.chip in machine:
# Create a copy of the node
new_node = RoutingTree(old_node.chip)
new_lookup[new_node.chip] = new_node
else:
# This chip is dead, move all its children into the parent node
assert new_parent is not None, \
"Net cannot be sourced from a dead chip."
new_node = new_parent
if new_parent is None:
# This is the root node
new_root = new_node
elif new_node is not new_parent:
# If this node is not dead, check connectivity to parent node (no
# reason to check connectivity between a dead node and its parent).
if direction in links_between(new_parent.chip,
new_node.chip,
machine):
# Is connected via working link
new_parent.children.append((direction, new_node))
else:
# Link to parent is dead (or original parent was dead and the
# new parent is not adjacent)
broken_links.add((new_parent.chip, new_node.chip))
# Copy children
for child_direction, child in old_node.children:
to_visit.append((new_node, child_direction, child))
return (new_root, new_lookup, broken_links) | Copy a RoutingTree (containing nothing but RoutingTrees), disconnecting
nodes which are not connected in the machine.
Note that if a dead chip is part of the input RoutingTree, no corresponding
node will be included in the copy. The assumption behind this is that the
only reason a tree would visit a dead chip is because a route passed
through the chip and wasn't actually destined to arrive at that chip. This
situation is impossible to confirm since the input routing trees have not
yet been populated with vertices. The caller is responsible for being
sensible.
Parameters
----------
root : :py:class:`~rig.place_and_route.routing_tree.RoutingTree`
The root of the RoutingTree that contains nothing but RoutingTrees
(i.e. no children which are vertices or links).
machine : :py:class:`~rig.place_and_route.Machine`
The machine in which the routes exist.
Returns
-------
(root, lookup, broken_links)
Where:
* `root` is the new root of the tree
:py:class:`~rig.place_and_route.routing_tree.RoutingTree`
* `lookup` is a dict {(x, y):
:py:class:`~rig.place_and_route.routing_tree.RoutingTree`, ...}
* `broken_links` is a set ([(parent, child), ...]) containing all
disconnected parent and child (x, y) pairs due to broken links. |
def is_binary(path):
'''
Detects if the file is a binary, returns bool. Returns True if the file is
a bin, False if the file is not and None if the file is not available.
'''
if not os.path.isfile(path):
return False
try:
with fopen(path, 'rb') as fp_:
try:
data = fp_.read(2048)
if six.PY3:
data = data.decode(__salt_system_encoding__)
return salt.utils.stringutils.is_binary(data)
except UnicodeDecodeError:
return True
except os.error:
return False | Detects if the file is a binary, returns bool. Returns True if the file is
a bin, False if the file is not and None if the file is not available. |
def format_item(x, timedelta_format=None, quote_strings=True):
"""Returns a succinct summary of an object as a string"""
if isinstance(x, (np.datetime64, datetime)):
return format_timestamp(x)
if isinstance(x, (np.timedelta64, timedelta)):
return format_timedelta(x, timedelta_format=timedelta_format)
elif isinstance(x, (str, bytes)):
return repr(x) if quote_strings else x
elif isinstance(x, (float, np.float)):
return '{0:.4}'.format(x)
else:
return str(x) | Returns a succinct summary of an object as a string |
def close(self):
"""Close the connection to the email server."""
try:
try:
self.connection.quit()
except socket.sslerror:
# This happens when calling quit() on a TLS connection
# sometimes.
self.connection.close()
except Exception as e:
logger.error(
"Error trying to close connection to server " "%s:%s: %s",
self.host,
self.port,
e,
)
if self.fail_silently:
return
raise
finally:
self.connection = None | Close the connection to the email server. |
def metadata_path(self, m_path):
"""Provide pointers to the paths of the metadata file
Args:
m_path: Path to metadata file
"""
if not m_path:
self.metadata_dir = None
self.metadata_file = None
else:
if not op.exists(m_path):
raise OSError('{}: file does not exist!'.format(m_path))
if not op.dirname(m_path):
self.metadata_dir = '.'
else:
self.metadata_dir = op.dirname(m_path)
self.metadata_file = op.basename(m_path)
# TODO: update using Biopython's built in SeqRecord parser
# Just updating IDs and stuff
self.update(parse_kegg_gene_metadata(self.metadata_path), overwrite=True) | Provide pointers to the paths of the metadata file
Args:
m_path: Path to metadata file |
def structured_partlist(input, timeout=20, showgui=False):
'''export partlist by eagle, then parse it
:param input: .sch or .brd file name
:param timeout: int
:param showgui: Bool, True -> do not hide eagle GUI
:rtype: tuple of header list and dict list: (['part','value',..], [{'part':'C1', 'value':'1n'}, ..])
'''
s = raw_partlist(input=input, timeout=timeout, showgui=showgui)
return parse_partlist(s) | export partlist by eagle, then parse it
:param input: .sch or .brd file name
:param timeout: int
:param showgui: Bool, True -> do not hide eagle GUI
:rtype: tuple of header list and dict list: (['part','value',..], [{'part':'C1', 'value':'1n'}, ..]) |
def parse_universe_description(self, description):
''' Semantic
- 'sid1,sid2,sid2,...'
- 'exchange' : every sids of the exchange
- 'exchange,n' : n random sids of the exchange
where exchange is a combination of 'type:index:submarket'
'''
self.raw_description = description
description = description.split(',')
self.exchange = description[0]
n = int(description[1]) if len(description) == 2 else -1
self.sids = self._lookup_sids(description[0], n) | Semantic
- 'sid1,sid2,sid2,...'
- 'exchange' : every sids of the exchange
- 'exchange,n' : n random sids of the exchange
where exchange is a combination of 'type:index:submarket' |
def find_matching_bracket_position(self, start_pos=None, end_pos=None):
"""
Return relative cursor position of matching [, (, { or < bracket.
When `start_pos` or `end_pos` are given. Don't look past the positions.
"""
# Look for a match.
for A, B in '()', '[]', '{}', '<>':
if self.current_char == A:
return self.find_enclosing_bracket_right(A, B, end_pos=end_pos) or 0
elif self.current_char == B:
return self.find_enclosing_bracket_left(A, B, start_pos=start_pos) or 0
return 0 | Return relative cursor position of matching [, (, { or < bracket.
When `start_pos` or `end_pos` are given. Don't look past the positions. |
def get_data_record(self, brain, field_names):
"""Returns a dict with the column values for the given brain
"""
record = {}
model = None
for field_name in field_names:
# First try to get the value directly from the brain
value = getattr(brain, field_name, None)
# No metadata for this column name
if value is None:
logger.warn("Not a metadata field: {}".format(field_name))
model = model or SuperModel(brain)
value = model.get(field_name, None)
if callable(value):
value = value()
# ' ' instead of '' because empty div fields don't render
# correctly in combo results table
record[field_name] = value or " "
return record | Returns a dict with the column values for the given brain |
def get_node_label(self, model):
"""
Defines how labels are constructed from models.
Default - uses verbose name, lines breaks where sensible
"""
if model.is_proxy:
label = "(P) %s" % (model.name.title())
else:
label = "%s" % (model.name.title())
line = ""
new_label = []
for w in label.split(" "):
if len(line + w) > 15:
new_label.append(line)
line = w
else:
line += " "
line += w
new_label.append(line)
return "\n".join(new_label) | Defines how labels are constructed from models.
Default - uses verbose name, lines breaks where sensible |
def recreate_article_body(self):
'''
Handles case where article body contained page or image.
Assumes all articles and images have been created.
'''
for foreign_id, body in iteritems(self.record_keeper.article_bodies):
try:
local_page_id = self.record_keeper.get_local_page(foreign_id)
page = Page.objects.get(id=local_page_id).specific
# iterate through the body
new_body = []
for item in body:
if not item['value']:
continue
if item['type'] == 'page':
new_page_id = self.record_keeper.get_local_page(
item['value'])
item['value'] = new_page_id
elif item['type'] == 'image':
new_image_id = self.record_keeper.get_local_image(
item['value'])
item['value'] = new_image_id
new_body.append(item)
setattr(page, 'body', json.dumps(new_body))
page.save_revision().publish()
except Exception as e:
self.log(ERROR, "recreating article body",
{
"exception": e,
"foreign_id": foreign_id,
"body": body,
},
depth=1) | Handles case where article body contained page or image.
Assumes all articles and images have been created. |
def get_perceel_by_capakey(self, capakey):
'''
Get a `perceel`.
:param capakey: An capakey for a `perceel`.
:rtype: :class:`Perceel`
'''
def creator():
url = self.base_url + '/parcel/%s' % capakey
h = self.base_headers
p = {
'geometry': 'full',
'srs': '31370',
'data': 'adp'
}
res = capakey_rest_gateway_request(url, h, p).json()
return Perceel(
res['perceelnummer'],
Sectie(
res['sectionCode'],
Afdeling(
res['departmentCode'],
res['departmentName'],
Gemeente(res['municipalityCode'], res['municipalityName'])
)
),
res['capakey'],
Perceel.get_percid_from_capakey(res['capakey']),
None,
None,
self._parse_centroid(res['geometry']['center']),
self._parse_bounding_box(res['geometry']['boundingBox']),
res['geometry']['shape']
)
if self.caches['short'].is_configured:
key = 'get_perceel_by_capakey_rest#%s' % capakey
perceel = self.caches['short'].get_or_create(key, creator)
else:
perceel = creator()
perceel.set_gateway(self)
return perceel | Get a `perceel`.
:param capakey: An capakey for a `perceel`.
:rtype: :class:`Perceel` |
def safe_size(source):
"""
READ THE source UP TO SOME LIMIT, THEN COPY TO A FILE IF TOO BIG
RETURN A str() OR A FileString()
"""
if source is None:
return None
total_bytes = 0
bytes = []
b = source.read(MIN_READ_SIZE)
while b:
total_bytes += len(b)
bytes.append(b)
if total_bytes > MAX_STRING_SIZE:
try:
data = FileString(TemporaryFile())
for bb in bytes:
data.write(bb)
del bytes
del bb
b = source.read(MIN_READ_SIZE)
while b:
total_bytes += len(b)
data.write(b)
b = source.read(MIN_READ_SIZE)
data.seek(0)
Log.note("Using file of size {{length}} instead of str()", length= total_bytes)
return data
except Exception as e:
Log.error("Could not write file > {{num}} bytes", num= total_bytes, cause=e)
b = source.read(MIN_READ_SIZE)
data = b"".join(bytes)
del bytes
return data | READ THE source UP TO SOME LIMIT, THEN COPY TO A FILE IF TOO BIG
RETURN A str() OR A FileString() |
def get_config():
'''
Get the status of all the firewall profiles
Returns:
dict: A dictionary of all profiles on the system
Raises:
CommandExecutionError: If the command fails
CLI Example:
.. code-block:: bash
salt '*' firewall.get_config
'''
profiles = {}
curr = None
cmd = ['netsh', 'advfirewall', 'show', 'allprofiles']
ret = __salt__['cmd.run_all'](cmd, python_shell=False, ignore_retcode=True)
if ret['retcode'] != 0:
raise CommandExecutionError(ret['stdout'])
# There may be some problems with this depending on how `netsh` is localized
# It's looking for lines that contain `Profile Settings` or start with
# `State` which may be different in different localizations
for line in ret['stdout'].splitlines():
if not curr:
tmp = re.search('(.*) Profile Settings:', line)
if tmp:
curr = tmp.group(1)
elif line.startswith('State'):
profiles[curr] = line.split()[1] == 'ON'
curr = None
return profiles | Get the status of all the firewall profiles
Returns:
dict: A dictionary of all profiles on the system
Raises:
CommandExecutionError: If the command fails
CLI Example:
.. code-block:: bash
salt '*' firewall.get_config |
def get_git_postversion(addon_dir):
""" return the addon version number, with a developmental version increment
if there were git commits in the addon_dir after the last version change.
If the last change to the addon correspond to the version number in the
manifest it is used as is for the python package version. Otherwise a
counter is incremented for each commit and resulting version number has
the following form: [8|9].0.x.y.z.1devN, N being the number of git
commits since the version change.
Note: we use .99.devN because:
* pip ignores .postN by design (https://github.com/pypa/pip/issues/2872)
* x.y.z.devN is anterior to x.y.z
Note: we don't put the sha1 of the commit in the version number because
this is not PEP 440 compliant and is therefore misinterpreted by pip.
"""
addon_dir = os.path.realpath(addon_dir)
last_version = read_manifest(addon_dir).get('version', '0.0.0')
last_version_parsed = parse_version(last_version)
if not is_git_controlled(addon_dir):
return last_version
if get_git_uncommitted(addon_dir):
uncommitted = True
count = 1
else:
uncommitted = False
count = 0
last_sha = None
git_root = get_git_root(addon_dir)
for sha in git_log_iterator(addon_dir):
try:
manifest = read_manifest_from_sha(sha, addon_dir, git_root)
except NoManifestFound:
break
version = manifest.get('version', '0.0.0')
version_parsed = parse_version(version)
if version_parsed != last_version_parsed:
break
if last_sha is None:
last_sha = sha
else:
count += 1
if not count:
return last_version
if last_sha:
return last_version + ".99.dev%s" % count
if uncommitted:
return last_version + ".dev1"
# if everything is committed, the last commit
# must have the same version as current,
# so last_sha must be set and we'll never reach this branch
return last_version | return the addon version number, with a developmental version increment
if there were git commits in the addon_dir after the last version change.
If the last change to the addon correspond to the version number in the
manifest it is used as is for the python package version. Otherwise a
counter is incremented for each commit and resulting version number has
the following form: [8|9].0.x.y.z.1devN, N being the number of git
commits since the version change.
Note: we use .99.devN because:
* pip ignores .postN by design (https://github.com/pypa/pip/issues/2872)
* x.y.z.devN is anterior to x.y.z
Note: we don't put the sha1 of the commit in the version number because
this is not PEP 440 compliant and is therefore misinterpreted by pip. |
def callback(self, timestamp, event_type, payload):
"""Callback method for processing events in notification queue.
:param timestamp: time the message is received.
:param event_type: event type in the notification queue such as
identity.project.created, identity.project.deleted.
:param payload: Contains information of an event
"""
try:
data = (event_type, payload)
LOG.debug('RX NOTIFICATION ==>\nevent_type: %(event)s, '
'payload: %(payload)s\n', (
{'event': event_type, 'payload': payload}))
if 'create' in event_type:
pri = self._create_pri
elif 'delete' in event_type:
pri = self._delete_pri
elif 'update' in event_type:
pri = self._update_pri
else:
pri = self._delete_pri
self._pq.put((pri, timestamp, data))
except Exception as exc:
LOG.exception('Error: %(err)s for event %(event)s',
{'err': str(exc), 'event': event_type}) | Callback method for processing events in notification queue.
:param timestamp: time the message is received.
:param event_type: event type in the notification queue such as
identity.project.created, identity.project.deleted.
:param payload: Contains information of an event |
def open_like(a, path, **kwargs):
"""Open a persistent array like `a`."""
_like_args(a, kwargs)
if isinstance(a, Array):
kwargs.setdefault('fill_value', a.fill_value)
return open_array(path, **kwargs) | Open a persistent array like `a`. |
def get_additional_properties(self, _type, *args, **kwargs):
"""Make head and table with additional properties by schema_id
:param str _type:
:rtype: str
"""
if not SchemaObjects.contains(_type):
return _type
schema = SchemaObjects.get(_type)
body = []
for sch in schema.nested_schemas: # complex types
nested_schema = SchemaObjects.get(sch)
if not (nested_schema or isinstance(nested_schema, SchemaMapWrapper)):
continue
body.append('Map of {{"key":"{}"}}\n\n'.format(self.get_type_description(
nested_schema.schema_id, *args, **kwargs)) # head
)
if nested_schema.is_array: # table
_schema = SchemaObjects.get(nested_schema.item.get('type'))
if _schema and _schema.schema_type == SchemaTypes.INLINE:
body.append(self.get_regular_properties(_schema.schema_id, *args, **kwargs))
else:
body.append(self.get_regular_properties(nested_schema.schema_id, *args, **kwargs))
if schema.type_format: # basic types, only head
body.append(
'Map of {{"key":"{}"}}'.format(self.get_type_description(schema.type_format, *args, **kwargs)))
return ''.join(body) | Make head and table with additional properties by schema_id
:param str _type:
:rtype: str |
def detail_search(self, params, standardize=False):
"""Get a detailed list of person objects for the given search params.
:param params:
Dictionary specifying the query parameters
>>> people_detailed = d.detail_search({'first_name': 'tobias', 'last_name': 'funke'})
"""
response = self._request(ENDPOINTS['SEARCH'], params)
result_data = []
for person in response['result_data']:
try:
detail = self.person_details(person['person_id'],
standardize=standardize)
except ValueError:
pass
else:
result_data.append(detail)
response['result_data'] = result_data
return response | Get a detailed list of person objects for the given search params.
:param params:
Dictionary specifying the query parameters
>>> people_detailed = d.detail_search({'first_name': 'tobias', 'last_name': 'funke'}) |
def dehydrate(self):
"""Return a dict representing this bucket."""
# Only concerned about very specific attributes
result = {}
for attr in self.attrs:
result[attr] = getattr(self, attr)
return result | Return a dict representing this bucket. |
def heartbeat(request):
"""
Runs all the Django checks and returns a JsonResponse with either
a status code of 200 or 500 depending on the results of the checks.
Any check that returns a warning or worse (error, critical) will
return a 500 response.
"""
all_checks = checks.registry.registry.get_checks(
include_deployment_checks=not settings.DEBUG,
)
details = {}
statuses = {}
level = 0
for check in all_checks:
detail = heartbeat_check_detail(check)
statuses[check.__name__] = detail['status']
level = max(level, detail['level'])
if detail['level'] > 0:
details[check.__name__] = detail
if level < checks.messages.WARNING:
status_code = 200
heartbeat_passed.send(sender=heartbeat, level=level)
else:
status_code = 500
heartbeat_failed.send(sender=heartbeat, level=level)
payload = {
'status': level_to_text(level),
'checks': statuses,
'details': details,
}
return JsonResponse(payload, status=status_code) | Runs all the Django checks and returns a JsonResponse with either
a status code of 200 or 500 depending on the results of the checks.
Any check that returns a warning or worse (error, critical) will
return a 500 response. |
def lbol_from_spt_dist_mag (sptnum, dist_pc, jmag, kmag, format='cgs'):
"""Estimate a UCD's bolometric luminosity given some basic parameters.
sptnum: the spectral type as a number; 8 -> M8; 10 -> L0 ; 20 -> T0
Valid values range between 0 and 30, ie M0 to Y0.
dist_pc: distance to the object in parsecs
jmag: object's J-band magnitude or NaN (*not* None) if unavailable
kmag: same with K-band magnitude
format: either 'cgs', 'logcgs', or 'logsun', defining the form of the
outputs. Logarithmic quantities are base 10.
This routine can be used with vectors of measurements. The result will be
NaN if a value cannot be computed. This routine implements the method
documented in the Appendix of Williams et al., 2014ApJ...785....9W
(doi:10.1088/0004-637X/785/1/9).
"""
bcj = bcj_from_spt (sptnum)
bck = bck_from_spt (sptnum)
n = np.zeros (sptnum.shape, dtype=np.int)
app_mbol = np.zeros (sptnum.shape)
w = np.isfinite (bcj) & np.isfinite (jmag)
app_mbol[w] += jmag[w] + bcj[w]
n[w] += 1
w = np.isfinite (bck) & np.isfinite (kmag)
app_mbol[w] += kmag[w] + bck[w]
n[w] += 1
w = (n != 0)
abs_mbol = (app_mbol[w] / n[w]) - 5 * (np.log10 (dist_pc[w]) - 1)
# note: abs_mbol is filtered by `w`
lbol = np.empty (sptnum.shape)
lbol.fill (np.nan)
lbol[w] = lbol_from_mbol (abs_mbol, format=format)
return lbol | Estimate a UCD's bolometric luminosity given some basic parameters.
sptnum: the spectral type as a number; 8 -> M8; 10 -> L0 ; 20 -> T0
Valid values range between 0 and 30, ie M0 to Y0.
dist_pc: distance to the object in parsecs
jmag: object's J-band magnitude or NaN (*not* None) if unavailable
kmag: same with K-band magnitude
format: either 'cgs', 'logcgs', or 'logsun', defining the form of the
outputs. Logarithmic quantities are base 10.
This routine can be used with vectors of measurements. The result will be
NaN if a value cannot be computed. This routine implements the method
documented in the Appendix of Williams et al., 2014ApJ...785....9W
(doi:10.1088/0004-637X/785/1/9). |
def template_from_file(basedir, path, vars):
''' run a file through the templating engine '''
from cirruscluster.ext.ansible import utils
realpath = utils.path_dwim(basedir, path)
loader=jinja2.FileSystemLoader([basedir,os.path.dirname(realpath)])
environment = jinja2.Environment(loader=loader, trim_blocks=True)
for filter_plugin in utils.plugins.filter_loader.all():
filters = filter_plugin.filters()
if not isinstance(filters, dict):
raise errors.AnsibleError("FilterModule.filters should return a dict.")
environment.filters.update(filters)
try:
data = codecs.open(realpath, encoding="utf8").read()
except UnicodeDecodeError:
raise errors.AnsibleError("unable to process as utf-8: %s" % realpath)
except:
raise errors.AnsibleError("unable to read %s" % realpath)
# Get jinja env overrides from template
if data.startswith(JINJA2_OVERRIDE):
eol = data.find('\n')
line = data[len(JINJA2_OVERRIDE):eol]
data = data[eol+1:]
for pair in line.split(','):
(key,val) = pair.split(':')
setattr(environment,key.strip(),val.strip())
environment.template_class = J2Template
t = environment.from_string(data)
vars = vars.copy()
try:
template_uid = pwd.getpwuid(os.stat(realpath).st_uid).pw_name
except:
template_uid = os.stat(realpath).st_uid
vars['template_host'] = os.uname()[1]
vars['template_path'] = realpath
vars['template_mtime'] = datetime.datetime.fromtimestamp(os.path.getmtime(realpath))
vars['template_uid'] = template_uid
vars['template_fullpath'] = os.path.abspath(realpath)
vars['template_run_date'] = datetime.datetime.now()
managed_default = C.DEFAULT_MANAGED_STR
managed_str = managed_default.format(
host = vars['template_host'],
uid = vars['template_uid'],
file = vars['template_path']
)
vars['ansible_managed'] = time.strftime(managed_str,
time.localtime(os.path.getmtime(realpath)))
# This line performs deep Jinja2 magic that uses the _jinja2_vars object for vars
# Ideally, this could use some API where setting shared=True and the object won't get
# passed through dict(o), but I have not found that yet.
res = jinja2.utils.concat(t.root_render_func(t.new_context(_jinja2_vars(basedir, vars, t.globals), shared=True)))
if data.endswith('\n') and not res.endswith('\n'):
res = res + '\n'
return template(basedir, res, vars) | run a file through the templating engine |
def join(self, fm_new, minimal_subset=True):
"""
Adds content of a new Datamat to this Datamat.
If a parameter of the Datamats is not equal or does not exist
in one, it is promoted to a field.
If the two Datamats have different fields then the elements for the
Datamats that did not have the field will be NaN, unless
'minimal_subset' is true, in which case the mismatching fields will
simply be deleted.
Parameters
fm_new : instance of Datamat
This Datamat is added to the current one.
minimal_subset : if true, remove fields which don't exist in both,
instead of using NaNs for missing elements (defaults to False)
Capacity to use superset of fields added by rmuil 2012/01/30
"""
# Check if parameters are equal. If not, promote them to fields.
'''
for (nm, val) in fm_new._parameters.items():
if self._parameters.has_key(nm):
if (val != self._parameters[nm]):
self.parameter_to_field(nm)
fm_new.parameter_to_field(nm)
else:
fm_new.parameter_to_field(nm)
'''
# Deal with mismatch in the fields
# First those in self that do not exist in new...
orig_fields = self._fields[:]
for field in orig_fields:
if not field in fm_new._fields:
if minimal_subset:
self.rm_field(field)
else:
warnings.warn("This option is deprecated. Clean and Filter your data before it is joined.", DeprecationWarning)
fm_new.add_field_like(field, self.field(field))
# ... then those in the new that do not exist in self.
orig_fields = fm_new._fields[:]
for field in orig_fields:
if not field in self._fields:
if minimal_subset:
fm_new.rm_field(field)
else:
warnings.warn("This option is deprecated. Clean and Filter your data before it is joined.", DeprecationWarning)
self.add_field_like(field, fm_new.field(field))
if 'SUBJECTINDEX' in self._fields[:]:
if fm_new.SUBJECTINDEX[0] in self.SUBJECTINDEX:
fm_new.SUBJECTINDEX[:] = self.SUBJECTINDEX.max()+1
# Concatenate fields
for field in self._fields:
self.__dict__[field] = ma.hstack((self.__dict__[field],
fm_new.__dict__[field]))
# Update _num_fix
self._num_fix += fm_new._num_fix | Adds content of a new Datamat to this Datamat.
If a parameter of the Datamats is not equal or does not exist
in one, it is promoted to a field.
If the two Datamats have different fields then the elements for the
Datamats that did not have the field will be NaN, unless
'minimal_subset' is true, in which case the mismatching fields will
simply be deleted.
Parameters
fm_new : instance of Datamat
This Datamat is added to the current one.
minimal_subset : if true, remove fields which don't exist in both,
instead of using NaNs for missing elements (defaults to False)
Capacity to use superset of fields added by rmuil 2012/01/30 |
def fetch_guilds(self, *, limit=100, before=None, after=None):
"""|coro|
Retrieves an :class:`.AsyncIterator` that enables receiving your guilds.
.. note::
Using this, you will only receive :attr:`.Guild.owner`, :attr:`.Guild.icon`,
:attr:`.Guild.id`, and :attr:`.Guild.name` per :class:`.Guild`.
.. note::
This method is an API call. For general usage, consider :attr:`guilds` instead.
All parameters are optional.
Parameters
-----------
limit: Optional[:class:`int`]
The number of guilds to retrieve.
If ``None``, it retrieves every guild you have access to. Note, however,
that this would make it a slow operation.
Defaults to 100.
before: :class:`.abc.Snowflake` or :class:`datetime.datetime`
Retrieves guilds before this date or object.
If a date is provided it must be a timezone-naive datetime representing UTC time.
after: :class:`.abc.Snowflake` or :class:`datetime.datetime`
Retrieve guilds after this date or object.
If a date is provided it must be a timezone-naive datetime representing UTC time.
Raises
------
HTTPException
Getting the guilds failed.
Yields
--------
:class:`.Guild`
The guild with the guild data parsed.
Examples
---------
Usage ::
async for guild in client.fetch_guilds(limit=150):
print(guild.name)
Flattening into a list ::
guilds = await client.fetch_guilds(limit=150).flatten()
# guilds is now a list of Guild...
"""
return GuildIterator(self, limit=limit, before=before, after=after) | |coro|
Retrieves an :class:`.AsyncIterator` that enables receiving your guilds.
.. note::
Using this, you will only receive :attr:`.Guild.owner`, :attr:`.Guild.icon`,
:attr:`.Guild.id`, and :attr:`.Guild.name` per :class:`.Guild`.
.. note::
This method is an API call. For general usage, consider :attr:`guilds` instead.
All parameters are optional.
Parameters
-----------
limit: Optional[:class:`int`]
The number of guilds to retrieve.
If ``None``, it retrieves every guild you have access to. Note, however,
that this would make it a slow operation.
Defaults to 100.
before: :class:`.abc.Snowflake` or :class:`datetime.datetime`
Retrieves guilds before this date or object.
If a date is provided it must be a timezone-naive datetime representing UTC time.
after: :class:`.abc.Snowflake` or :class:`datetime.datetime`
Retrieve guilds after this date or object.
If a date is provided it must be a timezone-naive datetime representing UTC time.
Raises
------
HTTPException
Getting the guilds failed.
Yields
--------
:class:`.Guild`
The guild with the guild data parsed.
Examples
---------
Usage ::
async for guild in client.fetch_guilds(limit=150):
print(guild.name)
Flattening into a list ::
guilds = await client.fetch_guilds(limit=150).flatten()
# guilds is now a list of Guild... |
def emflx(self, area, wavelengths=None):
"""Calculate
:ref:`equivalent monochromatic flux <synphot-formula-emflx>`.
Parameters
----------
area, wavelengths
See :func:`unit_response`.
Returns
-------
em_flux : `~astropy.units.quantity.Quantity`
Equivalent monochromatic flux.
"""
t_lambda = self.tlambda(wavelengths=wavelengths)
if t_lambda == 0: # pragma: no cover
em_flux = 0.0 * units.FLAM
else:
uresp = self.unit_response(area, wavelengths=wavelengths)
equvw = self.equivwidth(wavelengths=wavelengths).value
em_flux = uresp * equvw / t_lambda
return em_flux | Calculate
:ref:`equivalent monochromatic flux <synphot-formula-emflx>`.
Parameters
----------
area, wavelengths
See :func:`unit_response`.
Returns
-------
em_flux : `~astropy.units.quantity.Quantity`
Equivalent monochromatic flux. |
def parse(self, line=None):
"""parses the line provided, if None then uses sys.argv"""
args = self.parser.parse_args(args=line)
return args.func(args) | parses the line provided, if None then uses sys.argv |
def _gatk_extract_reads_cl(data, region, prep_params, tmp_dir):
"""Use GATK to extract reads from full BAM file.
"""
args = ["PrintReads",
"-L", region_to_gatk(region),
"-R", dd.get_ref_file(data),
"-I", data["work_bam"]]
# GATK3 back compatibility, need to specify analysis type
if "gatk4" in dd.get_tools_off(data):
args = ["--analysis_type"] + args
runner = broad.runner_from_config(data["config"])
return runner.cl_gatk(args, tmp_dir) | Use GATK to extract reads from full BAM file. |
def dataset_merge_method(dataset, other, overwrite_vars, compat, join):
"""Guts of the Dataset.merge method."""
# we are locked into supporting overwrite_vars for the Dataset.merge
# method due for backwards compatibility
# TODO: consider deprecating it?
if isinstance(overwrite_vars, str):
overwrite_vars = set([overwrite_vars])
overwrite_vars = set(overwrite_vars)
if not overwrite_vars:
objs = [dataset, other]
priority_arg = None
elif overwrite_vars == set(other):
objs = [dataset, other]
priority_arg = 1
else:
other_overwrite = OrderedDict()
other_no_overwrite = OrderedDict()
for k, v in other.items():
if k in overwrite_vars:
other_overwrite[k] = v
else:
other_no_overwrite[k] = v
objs = [dataset, other_no_overwrite, other_overwrite]
priority_arg = 2
return merge_core(objs, compat, join, priority_arg=priority_arg) | Guts of the Dataset.merge method. |
def get_section_key_line(self, data, key, opt_extension=':'):
"""Get the next section line for a given key.
:param data: the data to proceed
:param key: the key
:param opt_extension: an optional extension to delimit the opt value
"""
return super(GoogledocTools, self).get_section_key_line(data, key, opt_extension) | Get the next section line for a given key.
:param data: the data to proceed
:param key: the key
:param opt_extension: an optional extension to delimit the opt value |
def set_blink_rate(self, b):
"""
Set the user's desired blink rate (0 - 3)
@param b: blink rate
"""
if b > 3:
b = 0 # turn off if not sure
self.firmata.i2c_write(self.board_address,
(self.HT16K33_BLINK_CMD | self.HT16K33_BLINK_DISPLAYON | (b << 1))) | Set the user's desired blink rate (0 - 3)
@param b: blink rate |
def git_env(self):
'Set the index file and prevent git from reading global configs.'
env = dict(os.environ)
for var in ["HOME", "XDG_CONFIG_HOME"]:
env.pop(var, None)
env["GIT_CONFIG_NOSYSTEM"] = "true"
# Weirdly, GIT_INDEX_FILE is interpreted relative to the work tree. As
# a workaround, we absoluteify the path.
env["GIT_INDEX_FILE"] = os.path.abspath(self.index_file)
return env | Set the index file and prevent git from reading global configs. |
def _ParseAndValidateRecord(self, parser_mediator, text_file_object):
"""Parses and validates an Opera global history record.
Args:
parser_mediator (ParserMediator): mediates interactions between parsers
and other components, such as storage and dfvfs.
text_file_object (dfvfs.TextFile): text file.
Returns:
bool: True if the record was successfully parsed.
"""
try:
title = text_file_object.readline(size=self._MAXIMUM_LINE_SIZE)
url = text_file_object.readline(size=self._MAXIMUM_LINE_SIZE)
timestamp = text_file_object.readline(size=self._MAXIMUM_LINE_SIZE)
popularity_index = text_file_object.readline(size=self._MAXIMUM_LINE_SIZE)
except UnicodeDecodeError:
return False
if len(title) == self._MAXIMUM_LINE_SIZE and title[-1] != '\n':
return False
if len(url) == self._MAXIMUM_LINE_SIZE and url[-1] != '\n':
return False
if len(timestamp) == self._MAXIMUM_LINE_SIZE and timestamp[-1] != '\n':
return False
if (len(popularity_index) == self._MAXIMUM_LINE_SIZE and
popularity_index[-1] != '\n'):
return False
title = title.strip()
url = url.strip()
timestamp = timestamp.strip()
popularity_index = popularity_index.strip()
if not title or not url or not timestamp or not popularity_index:
return False
event_data = OperaGlobalHistoryEventData()
if not self._IsValidUrl(url):
return False
event_data.url = url
if title != url:
event_data.title = title
try:
event_data.popularity_index = int(popularity_index, 10)
timestamp = int(timestamp, 10)
except ValueError:
return False
if event_data.popularity_index < 0:
event_data.description = 'First and Only Visit'
else:
event_data.description = 'Last Visit'
date_time = dfdatetime_posix_time.PosixTime(timestamp=timestamp)
event = time_events.DateTimeValuesEvent(
date_time, definitions.TIME_DESCRIPTION_LAST_VISITED)
parser_mediator.ProduceEventWithEventData(event, event_data)
return True | Parses and validates an Opera global history record.
Args:
parser_mediator (ParserMediator): mediates interactions between parsers
and other components, such as storage and dfvfs.
text_file_object (dfvfs.TextFile): text file.
Returns:
bool: True if the record was successfully parsed. |
def open(self, callback, instance=None, processor=None):
"""
Begin consuming messages.
:param string instance: (Optional) instance to use in the WebSocket URL
:param string processor: (Optional) processor to use in the WebSocket URL
"""
assert not self._closed
ws_url = self._client.ws_root
if instance:
ws_url += '/' + instance
if processor:
ws_url += '/' + processor
self._callback = callback
self._websocket = websocket.WebSocketApp(
ws_url,
on_open=self._on_websocket_open,
on_message=self._on_websocket_message,
on_error=self._on_websocket_error,
subprotocols=['protobuf'],
header=[
'{}: {}'.format(k, self._client.session.headers[k])
for k in self._client.session.headers
],
)
self._consumer = threading.Thread(target=self._websocket.run_forever)
# Running this as a daemon thread improves possibilities
# for consumers of our API to control shutdown.
# (example: can just use time.sleep on the main thread instead of blocking on the future)
self._consumer.daemon = True
self._consumer.start() | Begin consuming messages.
:param string instance: (Optional) instance to use in the WebSocket URL
:param string processor: (Optional) processor to use in the WebSocket URL |
def get_cache(self, decorated_function, *args, **kwargs):
""" :meth:`WCacheStorage.get_cache` method implementation
"""
self.__check(decorated_function, *args, **kwargs)
if decorated_function in self._storage:
for i in self._storage[decorated_function]:
if i['instance']() == args[0]:
result = i['result'].cache_entry(*args, **kwargs)
if self.__statistic is True:
if result.has_value is True:
self.__cache_hit += 1
else:
self.__cache_missed += 1
return result
if self.__statistic is True:
self.__cache_missed += 1
return WCacheStorage.CacheEntry() | :meth:`WCacheStorage.get_cache` method implementation |
def _unique_class_name(namespace: Dict[str, Any], uuid: uuid.UUID) -> str:
'''Generate unique to namespace name for a class using uuid.
**Parameters**
:``namespace``: the namespace to verify uniqueness against
:``uuid``: the "unique" portion of the name
**Return Value(s)**
A unique string (in namespace) using uuid.
'''
count = 0
name = original_name = 'f_' + uuid.hex
while name in namespace:
count += 1
name = original_name + '_' + str(count)
return name | Generate unique to namespace name for a class using uuid.
**Parameters**
:``namespace``: the namespace to verify uniqueness against
:``uuid``: the "unique" portion of the name
**Return Value(s)**
A unique string (in namespace) using uuid. |
def __get_git_bin():
"""
Get git binary location.
:return: Check git location
"""
git = 'git'
alternatives = [
'/usr/bin/git'
]
for alt in alternatives:
if os.path.exists(alt):
git = alt
break
return git | Get git binary location.
:return: Check git location |
def repr_setup(self, name=None, col_names=None, col_types=None):
"""
This wasn't safe to pass into init because of the inheritance
"""
self._name = name or self._name
self._col_types = col_types or self._col_types | This wasn't safe to pass into init because of the inheritance |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.