text
stringlengths 78
104k
| score
float64 0
0.18
|
---|---|
def get_term_by_name(self, name):
"""Get the GO term with the given GO term name.
If the given name is not associated with any GO term, the function will
search for it among synonyms.
Parameters
----------
name: str
The name of the GO term.
Returns
-------
GOTerm
The GO term with the given name.
Raises
------
ValueError
If the given name is found neither among the GO term names, nor
among synonyms.
"""
term = None
func_name = 'get_term_by_name'
try:
term = self.terms[self._name2id[name]]
except KeyError:
try:
term = self.terms[self._syn2id[name]]
except KeyError:
pass
else:
logger.warning(
'%s: GO term name "%s" is a synonym for "%s".',
func_name, name, term.name)
if term is None:
raise ValueError('%s : GO term name "%s" not found!'
% (func_name, name))
return term | 0.001727 |
def nacm_rule_list_rule_module_name(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
nacm = ET.SubElement(config, "nacm", xmlns="urn:ietf:params:xml:ns:yang:ietf-netconf-acm")
rule_list = ET.SubElement(nacm, "rule-list")
name_key = ET.SubElement(rule_list, "name")
name_key.text = kwargs.pop('name')
rule = ET.SubElement(rule_list, "rule")
name_key = ET.SubElement(rule, "name")
name_key.text = kwargs.pop('name')
module_name = ET.SubElement(rule, "module-name")
module_name.text = kwargs.pop('module_name')
callback = kwargs.pop('callback', self._callback)
return callback(config) | 0.004172 |
def get_api_key_form(userfilter={}):
"""
userfileter: when binding api key with user, filter some users if necessary
"""
class APIKeyForm(ModelForm):
class Meta:
model = APIKeys
exclude = ("apitree",)
user = forms.ModelChoiceField(queryset=get_user_model().objects.filter(**userfilter),
required=True,)
return APIKeyForm | 0.004785 |
async def _analog_message(self, data):
"""
This is a private message handler method.
It is a message handler for analog messages.
:param data: message data
:returns: None - but saves the data in the pins structure
"""
pin = data[0]
value = (data[PrivateConstants.MSB] << 7) + data[PrivateConstants.LSB]
# if self.analog_pins[pin].current_value != value:
self.analog_pins[pin].current_value = value
# append pin number, pin value, and pin type to return value and return as a list
message = [pin, value, Constants.ANALOG]
if self.analog_pins[pin].cb:
if self.analog_pins[pin].cb_type:
await self.analog_pins[pin].cb(message)
else:
loop = self.loop
loop.call_soon(self.analog_pins[pin].cb, message)
# is there a latch entry for this pin?
key = 'A' + str(pin)
if key in self.latch_map:
await self._check_latch_data(key, message[1]) | 0.002871 |
def midpoint_refine_triangulation_by_vertices(self, vertices):
"""
return points defining a refined triangulation obtained by bisection of all edges
in the triangulation connected to any of the vertices in the list provided
"""
mlons, mlats = self.segment_midpoints_by_vertices(vertices=vertices)
lonv1 = np.concatenate((self.lons, mlons), axis=0)
latv1 = np.concatenate((self.lats, mlats), axis=0)
return lonv1, latv1 | 0.008247 |
def return_hdr(ts, package):
"""
Hand back the hdr - duh - if the pkg is foobar handback None
Shamelessly stolen from Seth Vidal
http://yum.baseurl.org/download/misc/checksig.py
"""
try:
fdno = os.open(package, os.O_RDONLY)
except OSError:
hdr = None
return hdr
ts.setVSFlags(~(rpm.RPMVSF_NOMD5 | rpm.RPMVSF_NEEDPAYLOAD))
try:
hdr = ts.hdrFromFdno(fdno)
except rpm.error:
hdr = None
raise rpm.error
if type(hdr) != rpm.hdr:
hdr = None
ts.setVSFlags(0)
os.close(fdno)
return hdr | 0.001695 |
def RemoveAllClientLabels(self, client_id):
"""Removes all labels for a given client.
Args:
client_id: The client_id.
"""
labels_to_remove = set(
[l.name for l in data_store.REL_DB.ReadClientLabels(client_id)])
self.RemoveClientLabels(client_id, labels_to_remove) | 0.006711 |
def compare_overlaps(context: list, synsets_signatures: dict,
nbest=False, keepscore=False, normalizescore=False) -> "wn.Synset":
"""
Calculates overlaps between the context sentence and the synset_signture
and returns a ranked list of synsets from highest overlap to lowest.
:param context: List of strings, tokenized sentence or document.
:param synsets_signatures: dict of Synsets and the set of their corresponding signatures.
:return: The Synset with the highest number of overlaps with its signatures.
"""
overlaplen_synsets = [] # a tuple of (len(overlap), synset).
for ss in synsets_signatures:
overlaps = set(synsets_signatures[ss]).intersection(context)
overlaplen_synsets.append((len(overlaps), ss))
# Rank synsets from highest to lowest overlap.
ranked_synsets = sorted(overlaplen_synsets, reverse=True)
# Normalize scores such that it's between 0 to 1.
if normalizescore:
total = float(sum(i[0] for i in ranked_synsets))
ranked_synsets = [(i/total,j) for i,j in ranked_synsets]
if not keepscore: # Returns a list of ranked synsets without scores
ranked_synsets = [i[1] for i in sorted(overlaplen_synsets, reverse=True)]
# Returns a ranked list of synsets otherwise only the best sense.
return ranked_synsets if nbest else ranked_synsets[0] | 0.006508 |
def _prm_write_dict_as_table(self, key, data_to_store, group, fullname, **kwargs):
"""Stores a python dictionary as pytable
:param key:
Name of data item to store
:param data_to_store:
Dictionary to store
:param group:
Group node where to store data in hdf5 file
:param fullname:
Full name of the `data_to_store`s original container, only needed for throwing errors.
"""
if key in group:
raise ValueError(
'Dictionary `%s` already exists in `%s`. Appending is not supported (yet).')
if key in group:
raise ValueError('Dict `%s` already exists in `%s`. Appending is not supported (yet).')
temp_dict = {}
for innerkey in data_to_store:
val = data_to_store[innerkey]
temp_dict[innerkey] = [val]
# Convert dictionary to object table
objtable = ObjectTable(data=temp_dict)
# Then store the object table
self._prm_write_into_pytable(key, objtable, group, fullname, **kwargs)
new_table = group._f_get_child(key)
# Remember that the Object Table represents a dictionary
self._all_set_attributes_to_recall_natives(temp_dict, new_table,
HDF5StorageService.DATA_PREFIX)
setattr(new_table._v_attrs, HDF5StorageService.STORAGE_TYPE,
HDF5StorageService.DICT)
self._hdf5file.flush() | 0.00462 |
def _parse_eval_args(self,*args,**kwargs):
"""
NAME:
_parse_eval_args
PURPOSE:
Internal function to parse the arguments given for an action/frequency/angle evaluation
INPUT:
OUTPUT:
HISTORY:
2010-07-11 - Written - Bovy (NYU)
"""
if len(args) == 3: #R,vR.vT
R,vR,vT= args
self._eval_R= R
self._eval_vR= vR
self._eval_vT= vT
self._eval_z= 0.
self._eval_vz= 0.
elif len(args) == 5: #R,vR.vT, z, vz
R,vR,vT, z, vz= args
self._eval_R= R
self._eval_vR= vR
self._eval_vT= vT
self._eval_z= z
self._eval_vz= vz
elif len(args) == 6: #R,vR.vT, z, vz, phi
R,vR,vT, z, vz, phi= args
self._eval_R= R
self._eval_vR= vR
self._eval_vT= vT
self._eval_z= z
self._eval_vz= vz
self._eval_phi= phi
else:
if not kwargs.get('_noOrbUnitsCheck',False):
self._check_consistent_units_orbitInput(args[0])
if len(args) == 2:
vxvv= args[0](args[1])._orb.vxvv
else:
try:
vxvv= args[0]._orb.vxvv
except AttributeError: #if we're given an OrbitTop instance
vxvv= args[0].vxvv
self._eval_R= vxvv[0]
self._eval_vR= vxvv[1]
self._eval_vT= vxvv[2]
if len(vxvv) > 4:
self._eval_z= vxvv[3]
self._eval_vz= vxvv[4]
if len(vxvv) > 5:
self._eval_phi= vxvv[5]
elif len(vxvv) > 3:
self._eval_phi= vxvv[3]
self._eval_z= 0.
self._eval_vz= 0.
else:
self._eval_z= 0.
self._eval_vz= 0.
if hasattr(self,'_eval_z'): #calculate the polar angle
if self._eval_z == 0.: self._eval_theta= m.pi/2.
else: self._eval_theta= m.atan(self._eval_R/self._eval_z)
return None | 0.027816 |
def rehearse(
folders, references, handler,
repeat=0, roles=1, strict=False,
loop=None
):
"""Cast a set of objects into a sequence of scene scripts. Deliver the performance.
:param folders: A sequence of
:py:class:`turberfield.dialogue.model.SceneScript.Folder` objects.
:param references: A sequence of Python objects.
:param handler: A callable object. This will be invoked with every event from the
performance.
:param int repeat: Extra repetitions of each folder.
:param int roles: Maximum number of roles permitted each character.
:param bool strict: Only fully-cast scripts to be performed.
This function is a generator. It yields events from the performance.
"""
if isinstance(folders, SceneScript.Folder):
folders = [folders]
yield from handler(references, loop=loop)
matcher = Matcher(folders)
performer = Performer(folders, references)
while True:
folder, index, script, selection, interlude = performer.next(
folders, references, strict=strict, roles=roles
)
yield from handler(script, loop=loop)
for item in performer.run(react=False, strict=strict, roles=roles):
yield from handler(item, loop=loop)
if isinstance(interlude, Callable):
metadata = next(handler(
interlude, folder, index, references, loop=loop
), None)
yield metadata
if metadata is None:
return
branch = next(matcher.options(metadata))
if branch != folder:
performer = Performer([branch], references)
if not repeat:
break
else:
repeat -= 1 | 0.00171 |
def to_csv(self, datadir=None, sep=None, cycles=False, raw=True,
summary=True, shifted=False,
method=None, shift=0.0,
last_cycle=None):
"""Saves the data as .csv file(s).
Args:
datadir: folder where to save the data (uses current folder if not
given).
sep: the separator to use in the csv file
(defaults to CellpyData.sep).
cycles: (bool) export voltage-capacity curves if True.
raw: (bool) export raw-data if True.
summary: (bool) export summary if True.
shifted (bool): export with cumulated shift.
method (string): how the curves are given
"back-and-forth" - standard back and forth; discharge
(or charge) reversed from where charge (or
discharge) ends.
"forth" - discharge (or charge) continues along x-axis.
"forth-and-forth" - discharge (or charge) also starts at 0 (or
shift if not shift=0.0)
shift: start-value for charge (or discharge)
last_cycle: process only up to this cycle (if not None).
Returns: Nothing
"""
if sep is None:
sep = self.sep
self.logger.debug("saving to csv")
dataset_number = -1
for data in self.datasets:
dataset_number += 1
if not self._is_not_empty_dataset(data):
self.logger.info("to_csv -")
self.logger.info("empty test [%i]" % dataset_number)
self.logger.info("not saved!")
else:
if isinstance(data.loaded_from, (list, tuple)):
txt = "merged file"
txt += "using first file as basename"
self.logger.debug(txt)
no_merged_sets = len(data.loaded_from)
no_merged_sets = "_merged_" + str(no_merged_sets).zfill(3)
filename = data.loaded_from[0]
else:
filename = data.loaded_from
no_merged_sets = ""
firstname, extension = os.path.splitext(filename)
firstname += no_merged_sets
if datadir:
firstname = os.path.join(datadir,
os.path.basename(firstname))
if raw:
outname_normal = firstname + "_normal.csv"
self._export_normal(data, outname=outname_normal, sep=sep)
if data.step_table_made is True:
outname_steps = firstname + "_steps.csv"
self._export_steptable(data, outname=outname_steps,
sep=sep)
else:
self.logger.debug("step_table_made is not True")
if summary:
outname_stats = firstname + "_stats.csv"
self._export_stats(data, outname=outname_stats, sep=sep)
if cycles:
outname_cycles = firstname + "_cycles.csv"
self._export_cycles(outname=outname_cycles,
dataset_number=dataset_number,
sep=sep, shifted=shifted,
method=method, shift=shift,
last_cycle=last_cycle) | 0.001408 |
def leave_chat(
self,
chat_id: Union[int, str],
delete: bool = False
):
"""Use this method to leave a group chat or channel.
Args:
chat_id (``int`` | ``str``):
Unique identifier for the target chat or username of the target channel/supergroup
(in the format @username).
delete (``bool``, *optional*):
Deletes the group chat dialog after leaving (for simple group chats, not supergroups).
Raises:
:class:`RPCError <pyrogram.RPCError>` in case of a Telegram RPC error.
"""
peer = self.resolve_peer(chat_id)
if isinstance(peer, types.InputPeerChannel):
return self.send(
functions.channels.LeaveChannel(
channel=self.resolve_peer(chat_id)
)
)
elif isinstance(peer, types.InputPeerChat):
r = self.send(
functions.messages.DeleteChatUser(
chat_id=peer.chat_id,
user_id=types.InputPeerSelf()
)
)
if delete:
self.send(
functions.messages.DeleteHistory(
peer=peer,
max_id=0
)
)
return r | 0.004386 |
def parseFASTAEditingCommandLineOptions(args, reads):
"""
Examine parsed FASTA editing command-line options and return information
about kept sites and sequences.
@param args: An argparse namespace, as returned by the argparse
C{parse_args} function.
@param reads: A C{Reads} instance to filter.
@return: The filtered C{Reads} instance.
"""
removeGaps = args.removeGaps
removeDescriptions = args.removeDescriptions
truncateTitlesAfter = args.truncateTitlesAfter
keepSites = (
parseRangeString(args.keepSites, convertToZeroBased=True)
if args.keepSites else None)
if args.keepSitesFile:
keepSites = keepSites or set()
with open(args.keepSitesFile) as fp:
for lineNumber, line in enumerate(fp):
try:
keepSites.update(
parseRangeString(line, convertToZeroBased=True))
except ValueError as e:
raise ValueError(
'Keep sites file %r line %d could not be parsed: '
'%s' % (args.keepSitesFile, lineNumber, e))
removeSites = (
parseRangeString(args.removeSites, convertToZeroBased=True)
if args.removeSites else None)
if args.removeSitesFile:
removeSites = removeSites or set()
with open(args.removeSitesFile) as fp:
for lineNumber, line in enumerate(fp):
try:
removeSites.update(
parseRangeString(line, convertToZeroBased=True))
except ValueError as e:
raise ValueError(
'Remove sites file %r line %d parse error: %s'
% (args.removeSitesFile, lineNumber, e))
return reads.filter(
removeGaps=removeGaps,
truncateTitlesAfter=truncateTitlesAfter,
removeDescriptions=removeDescriptions,
idLambda=args.idLambda, readLambda=args.readLambda,
keepSites=keepSites, removeSites=removeSites,
reverse=args.reverse, reverseComplement=args.reverseComplement) | 0.000469 |
def master_address(self, name):
"""Returns a (host, port) pair for the given ``name``."""
fut = self.execute(b'get-master-addr-by-name', name, encoding='utf-8')
return wait_convert(fut, parse_address) | 0.008929 |
def addDerivesFrom(self, child_id, parent_id):
"""
We add a derives_from relationship between the child and parent id.
Examples of uses include between:
an allele and a construct or strain here,
a cell line and it's parent genotype. Adding the parent and child to
the graph should happen outside of this function call to ensure graph
integrity.
:param child_id:
:param parent_id:
:return:
"""
self.graph.addTriple(
child_id, self.globaltt['derives_from'], parent_id)
return | 0.003373 |
def from_epw_file(cls, epwfile, timestep=1):
"""Create a wea object using the solar irradiance values in an epw file.
Args:
epwfile: Full path to epw weather file.
timestep: An optional integer to set the number of time steps per hour.
Default is 1 for one value per hour. Note that this input
will only do a linear interpolation over the data in the EPW
file. While such linear interpolations are suitable for most
thermal simulations, where thermal lag "smooths over" the effect
of momentary increases in solar energy, it is not recommended
for daylight simulations, where momentary increases in solar
energy can mean the difference between glare and visual comfort.
"""
is_leap_year = False # epw file is always for 8760 hours
epw = EPW(epwfile)
direct_normal, diffuse_horizontal = \
cls._get_data_collections(epw.direct_normal_radiation.values,
epw.diffuse_horizontal_radiation.values,
epw.metadata, 1, is_leap_year)
if timestep != 1:
print ("Note: timesteps greater than 1 on epw-generated Wea's \n" +
"are suitable for thermal models but are not recommended \n" +
"for daylight models.")
# interpolate the data
direct_normal = direct_normal.interpolate_to_timestep(timestep)
diffuse_horizontal = diffuse_horizontal.interpolate_to_timestep(timestep)
# create sunpath to check if the sun is up at a given timestep
sp = Sunpath.from_location(epw.location)
# add correct values to the emply data collection
for i, dt in enumerate(cls._get_datetimes(timestep, is_leap_year)):
# set irradiance values to 0 when the sun is not up
sun = sp.calculate_sun_from_date_time(dt)
if sun.altitude < 0:
direct_normal[i] = 0
diffuse_horizontal[i] = 0
return cls(epw.location, direct_normal, diffuse_horizontal,
timestep, is_leap_year) | 0.003989 |
def bounding_box(self, factor=10.0):
"""Tuple defining the default ``bounding_box`` limits,
``(x_low, x_high)``.
.. math::
x_{\\textnormal{low}} = 0
x_{\\textnormal{high}} = \\log(\\lambda_{\\textnormal{max}} \\;\
(1 + \\textnormal{factor}))
Parameters
----------
factor : float
Used to calculate ``x_high``.
"""
w0 = self.lambda_max
return (w0 * 0, np.log10(w0 + factor * w0)) | 0.003984 |
def check_module_usage(modpath_patterns):
"""
FIXME: not fully implmented
Desired behavior is ---
Given a set of modules specified by a list of patterns, returns how the
functions defined in the modules are called: a) with themselves and b) by
other files in the project not in the given set.
Args:
modpath_patterns (list):
CommandLine:
python -m utool.util_inspect check_module_usage --show
utprof.py -m utool.util_inspect check_module_usage --show
python -m utool.util_inspect check_module_usage --pat="['auto*', 'user_dialogs.py', 'special_query.py', 'qt_inc_automatch.py', 'devcases.py']"
python -m utool.util_inspect check_module_usage --pat="preproc_detectimg.py"
python -m utool.util_inspect check_module_usage --pat="neighbor_index.py"
python -m utool.util_inspect check_module_usage --pat="manual_chip_funcs.py"
python -m utool.util_inspect check_module_usage --pat="preproc_probchip.py"
python -m utool.util_inspect check_module_usage --pat="guiback.py"
python -m utool.util_inspect check_module_usage --pat="util_str.py"
Ignore:
Example:
>>> # DISABLE_DOCTEST
>>> from utool.util_inspect import * # NOQA
>>> import utool as ut
>>> modpath_patterns = ['_grave*']
>>> modpath_patterns = ['auto*', 'user_dialogs.py', 'special_query.py', 'qt_inc_automatch.py', 'devcases.py']
>>> modpath_patterns = ['neighbor_index.py']
>>> modpath_patterns = ['manual_chip_funcs.py']
>>> modpath_patterns = ut.get_argval('--pat', type_=list, default=['*'])
>>> result = check_module_usage(modpath_patterns)
>>> print(result)
"""
import utool as ut
#dpath = '~/code/ibeis/ibeis/algo/hots'
modpaths = ut.flatten([ut.glob_projects(pat) for pat in modpath_patterns])
modpaths = ut.unique(modpaths)
modnames = ut.lmap(ut.get_modname_from_modpath, modpaths)
print('Checking usage of modules: ' + ut.repr3(modpaths))
# Mark as True is module is always explicitly imported
restrict_to_importing_modpaths = False
cache = {}
def find_where_module_is_imported(modname):
""" finds where a module was explicitly imported. (in most scenareos) """
# Find places where the module was imported
patterns = ut.possible_import_patterns(modname)
# do modname grep with all possible import patterns
grepres = ut.grep_projects(patterns, new=True, verbose=False, cache=cache)
return grepres.found_fpath_list
def find_function_callers(funcname, importing_modpaths):
""" searches for places where a function is used """
pattern = '\\b' + funcname + '\\b',
# Search which module uses each public member
grepres = ut.grep_projects(
pattern, new=True, verbose=False, cache=cache,
fpath_list=importing_modpaths)
# Exclude places where function is defined or call is commented out
nohit_patterns = [
r'^\s*def',
r'^\s*#',
r'\-\-exec\-',
r'\-\-test-',
r'^\s*python -m ',
r'^\s*python -m ibeis ',
r'^\s*ibeis ',
r'\-\-test\-[a-zA-z]*\.',
r'\-\-exec\-[a-zA-z]*\.',
]
nohit_patterns += [
r'^\s*\>\>\>',
]
filter_pat = ut.regex_or(nohit_patterns)
# import copy
# grepres_ = copy.deepcopy(grepres)
grepres.inplace_filter_results(filter_pat)
grepres.found_modnames = ut.lmap(ut.get_modname_from_modpath,
grepres.found_fpath_list)
parent_numlines = ut.lmap(len, grepres.found_lines_list)
numcall_graph_ = dict(zip(grepres.found_modnames, parent_numlines))
# Remove self references
#ut.delete_keys(numcall_graph_, modnames)
return numcall_graph_, grepres
print('Find modules that use this the query modules')
# Note: only works for explicit imports
importing_modpaths_list = [find_where_module_is_imported(modname) for modname in modnames]
print('Find members of the query modules')
funcnames_list = [get_funcnames_from_modpath(modpath) for modpath in modpaths]
print('Building call graph')
cache = {}
func_numcall_graph = ut.ddict(dict)
grep_results = ut.ddict(dict)
# Extract public members from each module
exclude_self = ut.get_argflag('--exclude-self')
_iter = list(zip(modnames, modpaths, importing_modpaths_list, funcnames_list))
_iter = ut.ProgIter(_iter, lbl='Searching query module', bs=False)
for modname, modpath, importing_modpaths, funcname_list in _iter:
if not restrict_to_importing_modpaths:
importing_modpaths = None
# Search for each function in modpath
for funcname in ut.ProgIter(funcname_list, lbl='Searching funcs in query module'):
numcall_graph_, grepres = find_function_callers(funcname, importing_modpaths)
grep_results[modname][funcname] = grepres
if exclude_self:
if modname in numcall_graph_:
del numcall_graph_[modname]
func_numcall_graph[modname][funcname] = numcall_graph_
# Sort by incidence cardinality
# func_numcall_graph = ut.odict([(key, ut.sort_dict(val, 'vals', len)) for key, val in func_numcall_graph.items()])
# Sort by weighted degree
func_numcall_graph = ut.odict([(key, ut.sort_dict(val, 'vals', lambda x: sum(x.values())))
for key, val in func_numcall_graph.items()])
# Print out grep results in order
print('PRINTING GREP RESULTS IN ORDER')
for modname, num_callgraph in func_numcall_graph.items():
print('\n============\n')
for funcname in num_callgraph.keys():
print('\n============\n')
with ut.Indenter('[%s]' % (funcname,)):
grepres = grep_results[modname][funcname]
print(grepres)
# print(func_numcall_graph[modname][funcname])
print('PRINTING NUMCALLGRAPH IN ORDER')
# Print out callgraph in order
print('func_numcall_graph = %s' % (ut.repr3(func_numcall_graph),))
# importance_dict = {}
# import copy
# func_call_graph2 = copy.deepcopy(func_numcall_graph)
# #ignore_modnames = []
# ignore_modnames = ['ibeis.algo.hots.multi_index', 'ibeis.algo.hots._neighbor_experiment']
# num_callers = ut.ddict(dict)
# for modname, modpath in list(zip(modnames, modpaths)):
# subdict = func_call_graph2[modname]
# for funcname in subdict.keys():
# numcall_graph_ = subdict[funcname]
# ut.delete_keys(numcall_graph_, modnames)
# ut.delete_keys(numcall_graph_, ignore_modnames)
# num_callers[modname][funcname] = sum(numcall_graph_.values())
# print(ut.repr4(num_callers[modname], sorted_=True, key_order_metric='val'))
# # Check external usage
# unused_external = []
# grep_results2 = copy.deepcopy(grep_results)
# for modname, grepres_subdict in grep_results2.items():
# for funcname, grepres_ in grepres_subdict.items():
# idxs = ut.find_list_indexes(grepres_.found_modnames, modnames)
# idxs += ut.find_list_indexes(grepres_.found_modnames, ignore_modnames)
# idxs = list(ut.filter_Nones(idxs))
# ut.delete_items_by_index(grepres_, idxs)
# ut.delete_items_by_index(grepres_.found_modnames, idxs)
# if len(grepres_) > 0:
# print(grepres_.make_resultstr())
# else:
# unused_external += [funcname]
# print('internal grep')
# # Check internal usage
# unused_internal = []
# grep_results2 = copy.deepcopy(grep_results)
# for modname, grepres_subdict in grep_results2.items():
# for funcname, grepres_ in grepres_subdict.items():
# idxs = ut.filter_Nones(ut.find_list_indexes(grepres_.found_modnames, [modname]))
# idxs_ = ut.index_complement(idxs, len(grepres_.found_modnames))
# ut.delete_items_by_index(grepres_, idxs_)
# ut.delete_items_by_index(grepres_.found_modnames, idxs_)
# grepres_.hack_remove_pystuff()
# #self = grepres_
# if len(grepres_) > 0:
# #print(modname)
# #print(funcname)
# #print(grepres_.extended_regex_list)
# print(grepres_.make_resultstr())
# else:
# unused_internal += [funcname]
# # HACK: how to write ut.parfor
# # returns a 0 lenth iterator so the for loop is never run. Then uses code
# # introspection to determine the content of the for loop body executes code
# # using the values of the local variables in a parallel / distributed
# # context.
# for modname, modpath in zip(modnames, modpaths):
# pattern = '\\b' + modname + '\\b',
# grepres = ut.grep_projects(pattern, new=True, verbose=False, cache=cache)
# parent_modnames = ut.lmap(ut.get_modname_from_modpath, grepres.found_fpath_list)
# parent_numlines = ut.lmap(len, grepres.found_lines_list)
# importance = dict(zip(parent_modnames, parent_numlines))
# ut.delete_keys(importance, modnames)
# importance_dict[modname] = importance
# print('importance_dict = %s' % (ut.repr3(importance_dict),))
# combo = reduce(ut.dict_union, importance_dict.values())
# print('combined %s' % (ut.repr3(combo),))
# print(ut.repr3(found_fpath_list))
pass | 0.001967 |
def adj_nodes_ali(ali_nodes):
"""Adjust details specific to AliCloud."""
for node in ali_nodes:
node.cloud = "alicloud"
node.cloud_disp = "AliCloud"
node.private_ips = ip_to_str(node.extra['vpc_attributes']['private_ip_address'])
node.public_ips = ip_to_str(node.public_ips)
node.zone = node.extra['zone_id']
node.size = node.extra['instance_type']
if node.size.startswith('ecs.'):
node.size = node.size[len('ecs.'):]
return ali_nodes | 0.003891 |
def output(self):
"""!
@brief Returns output dynamic of the network.
"""
if (self.__ccore_legion_dynamic_pointer is not None):
return wrapper.legion_dynamic_get_output(self.__ccore_legion_dynamic_pointer);
return self.__output; | 0.022654 |
def create_issue(project,
summary,
description,
template_engine='jinja',
context=None,
defaults=None,
saltenv='base',
issuetype='Bug',
priority='Normal',
labels=None,
assignee=None,
server=None,
username=None,
password=None,
**kwargs):
'''
Create a JIRA issue using the named settings. Return the JIRA ticket ID.
project
The name of the project to attach the JIRA ticket to.
summary
The summary (title) of the JIRA ticket. When the ``template_engine``
argument is set to a proper value of an existing Salt template engine
(e.g., ``jinja``, ``mako``, etc.) it will render the ``summary`` before
creating the ticket.
description
The full body description of the JIRA ticket. When the ``template_engine``
argument is set to a proper value of an existing Salt template engine
(e.g., ``jinja``, ``mako``, etc.) it will render the ``description`` before
creating the ticket.
template_engine: ``jinja``
The name of the template engine to be used to render the values of the
``summary`` and ``description`` arguments. Default: ``jinja``.
context: ``None``
The context to pass when rendering the ``summary`` and ``description``.
This argument is ignored when ``template_engine`` is set as ``None``
defaults: ``None``
Default values to pass to the Salt rendering pipeline for the
``summary`` and ``description`` arguments.
This argument is ignored when ``template_engine`` is set as ``None``.
saltenv: ``base``
The Salt environment name (for the rendering system).
issuetype: ``Bug``
The type of the JIRA ticket. Default: ``Bug``.
priority: ``Normal``
The priority of the JIRA ticket. Default: ``Normal``.
labels: ``None``
A list of labels to add to the ticket.
assignee: ``None``
The name of the person to assign the ticket to.
CLI Examples:
.. code-block:: bash
salt '*' jira.create_issue NET 'Ticket title' 'Ticket description'
salt '*' jira.create_issue NET 'Issue on {{ opts.id }}' 'Error detected on {{ opts.id }}' template_engine=jinja
'''
if template_engine:
summary = __salt__['file.apply_template_on_contents'](summary,
template=template_engine,
context=context,
defaults=defaults,
saltenv=saltenv)
description = __salt__['file.apply_template_on_contents'](description,
template=template_engine,
context=context,
defaults=defaults,
saltenv=saltenv)
jira_ = _get_jira(server=server,
username=username,
password=password)
if not labels:
labels = []
data = {
'project': {
'key': project
},
'summary': summary,
'description': description,
'issuetype': {
'name': issuetype
},
'priority': {
'name': priority
},
'labels': labels
}
data.update(clean_kwargs(**kwargs))
issue = jira_.create_issue(data)
issue_key = str(issue)
if assignee:
assign_issue(issue_key, assignee)
return issue_key | 0.002554 |
def format(self, maxline=0, pstyle='v', nline=None, extend=True):
""" Formats fit output details into a string for printing.
The output tabulates the ``chi**2`` per degree of freedom of the fit
(``chi2/dof``), the number of degrees of freedom, the ``Q`` value of
the fit (ie, p-value), and the logarithm of the Gaussian Bayes Factor
for the fit (``logGBF``). At the end it lists the SVD cut, the number
of eigenmodes modified by the SVD cut, the tolerances used in the fit,
and the time in seconds needed to do the fit. The tolerance used to
terminate the fit is marked with an asterisk. It also lists
information about the fitter used if it is other than the standard
choice.
Optionally, ``format`` will also list the best-fit values
for the fit parameters together with the prior for each (in ``[]`` on
each line). Lines for parameters that deviate from their prior by more
than one (prior) standard deviation are marked with asterisks, with
the number of asterisks equal to the number of standard deviations (up
to five). Lines for parameters designated as linear (see ``linear``
keyword) are marked with a minus sign after their prior.
``format`` can also list all of the data and the corresponding values
from the fit, again with asterisks on lines where there is a
significant discrepancy.
Args:
maxline (int or bool): Maximum number of data points for which
fit results and input data are tabulated. ``maxline<0``
implies that only ``chi2``, ``Q``, ``logGBF``, and ``itns``
are tabulated; no parameter values are included. Setting
``maxline=True`` prints all data points; setting it
equal ``None`` or ``False`` is the same as setting
it equal to ``-1``. Default is ``maxline=0``.
pstyle (str or None): Style used for parameter list. Supported
values are 'vv' for very verbose, 'v' for verbose, and 'm' for
minimal. When 'm' is set, only parameters whose values differ
from their prior values are listed. Setting ``pstyle=None``
implies no parameters are listed.
extend (bool): If ``True``, extend the parameter list to
include values derived from log-normal or other
non-Gaussian parameters. So values for fit parameter
``p['log(a)']``, for example, are listed together with
values ``p['a']`` for the exponential of the fit parameter.
Setting ``extend=False`` means that only the value
for ``p['log(a)']`` is listed. Default is ``True``.
Returns:
String containing detailed information about fit.
"""
# unpack arguments
if nline is not None and maxline == 0:
maxline = nline # for legacy code (old name)
if maxline is True:
# print all data
maxline = sys.maxsize
if maxline is False or maxline is None:
maxline = -1
if pstyle is not None:
if pstyle[:2] == 'vv':
pstyle = 'vv'
elif pstyle[:1] == 'v':
pstyle = 'v'
elif pstyle[:1] == 'm':
pstyle = 'm'
else:
raise ValueError("Invalid pstyle: "+str(pstyle))
def collect(v1, v2, style='v', stride=1, extend=False):
""" Collect data from v1 and v2 into table.
Returns list of [label,v1fmt,v2fmt]s for each entry in v1 and
v2. Here v1fmt and v2fmt are strings representing entries in v1
and v2, while label is assembled from the key/index of the
entry.
"""
def nstar(v1, v2):
sdev = max(v1.sdev, v2.sdev)
nstar = int(abs(v1.mean - v2.mean) / sdev)
if nstar > 5:
nstar = 5
elif nstar < 1:
nstar = 0
return ' ' + nstar * '*'
ct = 0
ans = []
width = [0,0,0]
stars = []
if v1.shape is None:
# BufferDict
keys = list(v1.keys())
if extend:
v1 = _gvar.BufferDict(v1)
v2 = _gvar.BufferDict(v2)
ekeys = v1.extension_keys()
if len(ekeys) > 0:
first_ekey = ekeys[0]
keys += ekeys
else:
extend = False
for k in keys:
if extend and k == first_ekey:
# marker indicating beginning of extra keys
stars.append(None)
ans.append(None)
ktag = str(k)
if numpy.shape(v1[k]) == ():
if ct%stride != 0:
ct += 1
continue
if style in ['v','m']:
v1fmt = v1[k].fmt(sep=' ')
v2fmt = v2[k].fmt(sep=' ')
else:
v1fmt = v1[k].fmt(-1)
v2fmt = v2[k].fmt(-1)
if style == 'm' and v1fmt == v2fmt:
ct += 1
continue
stars.append(nstar(v1[k], v2[k]))
ans.append([ktag, v1fmt, v2fmt])
w = [len(ai) for ai in ans[-1]]
for i, (wo, wn) in enumerate(zip(width, w)):
if wn > wo:
width[i] = wn
ct += 1
else:
ktag = ktag + " "
for i in numpy.ndindex(v1[k].shape):
if ct%stride != 0:
ct += 1
continue
ifmt = (len(i)*"%d,")[:-1] % i
if style in ['v','m']:
v1fmt = v1[k][i].fmt(sep=' ')
v2fmt = v2[k][i].fmt(sep=' ')
else:
v1fmt = v1[k][i].fmt(-1)
v2fmt = v2[k][i].fmt(-1)
if style == 'm' and v1fmt == v2fmt:
ct += 1
continue
stars.append(nstar(v1[k][i], v2[k][i]))
ans.append([ktag+ifmt, v1fmt, v2fmt])
w = [len(ai) for ai in ans[-1]]
for i, (wo, wn) in enumerate(zip(width, w)):
if wn > wo:
width[i] = wn
ct += 1
ktag = ""
else:
# numpy array
v2 = numpy.asarray(v2)
for k in numpy.ndindex(v1.shape):
# convert array(GVar) to GVar
v1k = v1[k] if hasattr(v1[k], 'fmt') else v1[k].flat[0]
v2k = v2[k] if hasattr(v2[k], 'fmt') else v2[k].flat[0]
if ct%stride != 0:
ct += 1
continue
kfmt = (len(k) * "%d,")[:-1] % k
if style in ['v','m']:
v1fmt = v1k.fmt(sep=' ')
v2fmt = v2k.fmt(sep=' ')
else:
v1fmt = v1k.fmt(-1)
v2fmt = v2k.fmt(-1)
if style == 'm' and v1fmt == v2fmt:
ct += 1
continue
stars.append(nstar(v1k, v2k)) ###
ans.append([kfmt, v1fmt, v2fmt])
w = [len(ai) for ai in ans[-1]]
for i, (wo, wn) in enumerate(zip(width, w)):
if wn > wo:
width[i] = wn
ct += 1
collect.width = width
collect.stars = stars
return ans
# build header
dof = self.dof
if dof > 0:
chi2_dof = self.chi2/self.dof
else:
chi2_dof = self.chi2
try:
Q = 'Q = %.2g' % self.Q
except:
Q = ''
try:
logGBF = 'logGBF = %.5g' % self.logGBF
except:
logGBF = ''
if self.prior is None:
descr = ' (no prior)'
else:
descr = ''
table = ('Least Square Fit%s:\n chi2/dof [dof] = %.2g [%d] %s'
' %s\n' % (descr, chi2_dof, dof, Q, logGBF))
if maxline < 0:
return table
# create parameter table
if pstyle is not None:
table = table + '\nParameters:\n'
prior = self.prior
if prior is None:
if self.p0.shape is None:
prior = _gvar.BufferDict(
self.p0, buf=self.p0.flatten() + _gvar.gvar(0,float('inf')))
else:
prior = self.p0 + _gvar.gvar(0,float('inf'))
data = collect(self.palt, prior, style=pstyle, stride=1, extend=extend)
w1, w2, w3 = collect.width
fst = "%%%ds%s%%%ds%s[ %%%ds ]" % (
max(w1, 15), 3 * ' ',
max(w2, 10), int(max(w2,10)/2) * ' ', max(w3,10)
)
if len(self.linear) > 0:
spacer = [' ', '-']
else:
spacer = ['', '']
for i, (di, stars) in enumerate(zip(data, collect.stars)):
if di is None:
# marker for boundary between true fit parameters and derived parameters
ndashes = (
max(w1, 15) + 3 + max(w2, 10) + int(max(w2, 10)/2)
+ 4 + max(w3, 10)
)
table += ndashes * '-' + '\n'
continue
table += (
(fst % tuple(di)) +
spacer[i in self.linear] +
stars + '\n'
)
# settings
settings = "\nSettings:"
if not self.add_svdnoise or self.svdcut is None or self.svdcut < 0:
settings += "\n svdcut/n = {svdcut:.2g}/{svdn}".format(
svdcut=self.svdcut if self.svdcut is not None else 0.0,
svdn=self.svdn
)
else:
settings += "\n svdcut/n = {svdcut:.2g}/{svdn}*".format(
svdcut=self.svdcut, svdn=self.svdn
)
criterion = self.stopping_criterion
try:
fmtstr = [
" tol = ({:.2g},{:.2g},{:.2g})",
" tol = ({:.2g}*,{:.2g},{:.2g})",
" tol = ({:.2g},{:.2g}*,{:.2g})",
" tol = ({:.2g},{:.2g},{:.2g}*)",
][criterion if criterion is not None else 0]
settings += fmtstr.format(*self.tol)
except:
pass
if criterion is not None and criterion == 0:
settings +=" (itns/time = {itns}*/{time:.1f})".format(
itns=self.nit, time=self.time
)
else:
settings +=" (itns/time = {itns}/{time:.1f})".format(
itns=self.nit, time=self.time
)
default_line = '\n fitter = gsl_multifit methods = lm/more/qr\n'
newline = "\n fitter = {} {}\n".format(
self.fitter, self.description
)
if newline != default_line:
settings += newline
else:
settings += '\n'
if maxline <= 0 or self.data is None:
return table + settings
# create table comparing fit results to data
ny = self.y.size
stride = 1 if maxline >= ny else (int(ny/maxline) + 1)
if hasattr(self, 'fcn_p'):
f = self.fcn_p
elif self.x is False:
f = self.fcn(self.p)
else:
f = self.fcn(self.x, self.p)
if hasattr(f, 'keys'):
f = _gvar.BufferDict(f)
else:
f = numpy.array(f)
data = collect(self.y, f, style='v', stride=stride, extend=False)
w1,w2,w3 = collect.width
clabels = ("key","y[key]","f(p)[key]")
if self.y.shape is not None and self.x is not False and self.x is not None:
# use x[k] to label lines in table?
try:
x = numpy.array(self.x)
xlist = []
ct = 0
for k in numpy.ndindex(x.shape):
if ct%stride != 0:
ct += 1
continue
xlist.append("%g" % x[k])
assert len(xlist) == len(data)
except:
xlist = None
if xlist is not None:
for i,(d1,d2,d3) in enumerate(data):
data[i] = (xlist[i],d2,d3)
clabels = ("x[k]","y[k]","f(x[k],p)")
w1,w2,w3 = max(9,w1+4), max(9,w2+4), max(9,w3+4)
table += "\nFit:\n"
fst = "%%%ds%%%ds%%%ds\n" % (w1, w2, w3)
table += fst % clabels
table += (w1 + w2 + w3) * "-" + "\n"
for di, stars in zip(data, collect.stars):
table += fst[:-1] % tuple(di) + stars + '\n'
return table + settings | 0.003104 |
def disable_pow_check(chain_class: Type[BaseChain]) -> Type[BaseChain]:
"""
Disable the proof of work validation check for each of the chain's vms.
This allows for block mining without generation of the proof of work seal.
.. note::
blocks mined this way will not be importable on any chain that does not
have proof of work disabled.
"""
if not chain_class.vm_configuration:
raise ValidationError("Chain class has no vm_configuration")
if issubclass(chain_class, NoChainSealValidationMixin):
# Seal validation already disabled, hence nothing to change
chain_class_without_seal_validation = chain_class
else:
chain_class_without_seal_validation = type(
chain_class.__name__,
(chain_class, NoChainSealValidationMixin),
{},
)
return chain_class_without_seal_validation.configure( # type: ignore
vm_configuration=_mix_in_disable_seal_validation(
chain_class_without_seal_validation.vm_configuration # type: ignore
),
) | 0.001854 |
def get_access_token_from_cli():
'''Get an Azure authentication token from CLI's cache.
Will only work if CLI local cache has an unexpired auth token (i.e. you ran 'az login'
recently), or if you are running in Azure Cloud Shell (aka cloud console)
Returns:
An Azure authentication token string.
'''
# check if running in cloud shell, if so, pick up token from MSI_ENDPOINT
if 'ACC_CLOUD' in os.environ and 'MSI_ENDPOINT' in os.environ:
endpoint = os.environ['MSI_ENDPOINT']
headers = {'Metadata': 'true'}
body = {"resource": "https://management.azure.com/"}
ret = requests.post(endpoint, headers=headers, data=body)
return ret.json()['access_token']
else: # not running cloud shell
home = os.path.expanduser('~')
sub_username = ""
# 1st identify current subscription
azure_profile_path = home + os.sep + '.azure' + os.sep + 'azureProfile.json'
if os.path.isfile(azure_profile_path) is False:
print('Error from get_access_token_from_cli(): Cannot find ' + azure_profile_path)
return None
with codecs.open(azure_profile_path, 'r', 'utf-8-sig') as azure_profile_fd:
subs = json.load(azure_profile_fd)
for sub in subs['subscriptions']:
if sub['isDefault'] == True:
sub_username = sub['user']['name']
if sub_username == "":
print('Error from get_access_token_from_cli(): Default subscription not found in ' + \
azure_profile_path)
return None
# look for acces_token
access_keys_path = home + os.sep + '.azure' + os.sep + 'accessTokens.json'
if os.path.isfile(access_keys_path) is False:
print('Error from get_access_token_from_cli(): Cannot find ' + access_keys_path)
return None
with open(access_keys_path, 'r') as access_keys_fd:
keys = json.load(access_keys_fd)
# loop through accessTokens.json until first unexpired entry found
for key in keys:
if key['userId'] == sub_username:
if 'accessToken' not in keys[0]:
print('Error from get_access_token_from_cli(): accessToken not found in ' + \
access_keys_path)
return None
if 'tokenType' not in keys[0]:
print('Error from get_access_token_from_cli(): tokenType not found in ' + \
access_keys_path)
return None
if 'expiresOn' not in keys[0]:
print('Error from get_access_token_from_cli(): expiresOn not found in ' + \
access_keys_path)
return None
expiry_date_str = key['expiresOn']
# check date and skip past expired entries
if 'T' in expiry_date_str:
exp_date = dt.strptime(key['expiresOn'], '%Y-%m-%dT%H:%M:%S.%fZ')
else:
exp_date = dt.strptime(key['expiresOn'], '%Y-%m-%d %H:%M:%S.%f')
if exp_date < dt.now():
continue
else:
return key['accessToken']
# if dropped out of the loop, token expired
print('Error from get_access_token_from_cli(): token expired. Run \'az login\'')
return None | 0.007278 |
def get_duplicates(self):
"""
Extract duplicated index elements.
.. deprecated:: 0.23.0
Use idx[idx.duplicated()].unique() instead
Returns a sorted list of index elements which appear more than once in
the index.
Returns
-------
array-like
List of duplicated indexes.
See Also
--------
Index.duplicated : Return boolean array denoting duplicates.
Index.drop_duplicates : Return Index with duplicates removed.
Examples
--------
Works on different Index of types.
>>> pd.Index([1, 2, 2, 3, 3, 3, 4]).get_duplicates() # doctest: +SKIP
[2, 3]
Note that for a DatetimeIndex, it does not return a list but a new
DatetimeIndex:
>>> dates = pd.to_datetime(['2018-01-01', '2018-01-02', '2018-01-03',
... '2018-01-03', '2018-01-04', '2018-01-04'],
... format='%Y-%m-%d')
>>> pd.Index(dates).get_duplicates() # doctest: +SKIP
DatetimeIndex(['2018-01-03', '2018-01-04'],
dtype='datetime64[ns]', freq=None)
Sorts duplicated elements even when indexes are unordered.
>>> pd.Index([1, 2, 3, 2, 3, 4, 3]).get_duplicates() # doctest: +SKIP
[2, 3]
Return empty array-like structure when all elements are unique.
>>> pd.Index([1, 2, 3, 4]).get_duplicates() # doctest: +SKIP
[]
>>> dates = pd.to_datetime(['2018-01-01', '2018-01-02', '2018-01-03'],
... format='%Y-%m-%d')
>>> pd.Index(dates).get_duplicates() # doctest: +SKIP
DatetimeIndex([], dtype='datetime64[ns]', freq=None)
"""
warnings.warn("'get_duplicates' is deprecated and will be removed in "
"a future release. You can use "
"idx[idx.duplicated()].unique() instead",
FutureWarning, stacklevel=2)
return self[self.duplicated()].unique() | 0.000964 |
def _check_start_timestamp(self):
"""Check that starting timestamp exists for cumulative metrics."""
if self.descriptor.type in (
metric_descriptor.MetricDescriptorType.CUMULATIVE_INT64,
metric_descriptor.MetricDescriptorType.CUMULATIVE_DOUBLE,
metric_descriptor.MetricDescriptorType.CUMULATIVE_DISTRIBUTION,
):
for ts in self.time_series:
if ts.start_timestamp is None:
raise ValueError("time_series.start_timestamp must exist "
"for cumulative metrics") | 0.003268 |
def from_analysis_period(cls, analysis_period, clearness=1,
daylight_savings_indicator='No'):
""""Initialize a OriginalClearSkyCondition from an analysis_period"""
_check_analysis_period(analysis_period)
return cls(analysis_period.st_month, analysis_period.st_day, clearness,
daylight_savings_indicator) | 0.008 |
def _get_dvs_portgroup(dvs, portgroup_name):
'''
Return a portgroup object corresponding to the portgroup name on the dvs
:param dvs: DVS object
:param portgroup_name: Name of portgroup to return
:return: Portgroup object
'''
for portgroup in dvs.portgroup:
if portgroup.name == portgroup_name:
return portgroup
return None | 0.002653 |
def Max(left: vertex_constructor_param_types, right: vertex_constructor_param_types, label: Optional[str]=None) -> Vertex:
"""
Finds the maximum between two vertices
:param left: one of the vertices to find the maximum of
:param right: one of the vertices to find the maximum of
"""
return Double(context.jvm_view().MaxVertex, label, cast_to_double_vertex(left), cast_to_double_vertex(right)) | 0.014252 |
def _get_containing_contigs(self, hits_dict):
'''Given dictionary of nucmer hits (made by self._load_nucmer_hits()), returns a dictionary.
key=contig name. Value = set of contigs that contain the key.'''
containing = {}
for qry_name in hits_dict:
d = self._containing_contigs(hits_dict[qry_name])
if len(d):
containing[qry_name] = d
return containing | 0.006897 |
def find(self, query=None, func=None, labels=None, colors=None, pinned=None, archived=None, trashed=False): # pylint: disable=too-many-arguments
"""Find Notes based on the specified criteria.
Args:
query (Union[_sre.SRE_Pattern, str, None]): A str or regular expression to match against the title and text.
func (Union[callable, None]): A filter function.
labels (Union[List[str], None]): A list of label ids or objects to match. An empty list matches notes with no labels.
colors (Union[List[str], None]): A list of colors to match.
pinned (Union[bool, None]): Whether to match pinned notes.
archived (Union[bool, None]): Whether to match archived notes.
trashed (Union[bool, None]): Whether to match trashed notes.
Return:
List[gkeepapi.node.TopLevelNode]: Results.
"""
if labels is not None:
labels = [i.id if isinstance(i, _node.Label) else i for i in labels]
return (node for node in self.all() if
(query is None or (
(isinstance(query, six.string_types) and (query in node.title or query in node.text)) or
(isinstance(query, Pattern) and (
query.search(node.title) or query.search(node.text)
))
)) and
(func is None or func(node)) and \
(labels is None or \
(not labels and not node.labels.all()) or \
(any((node.labels.get(i) is not None for i in labels)))
) and \
(colors is None or node.color in colors) and \
(pinned is None or node.pinned == pinned) and \
(archived is None or node.archived == archived) and \
(trashed is None or node.trashed == trashed)
) | 0.009259 |
def apply_order(self):
'''Naively apply query orders.'''
self._ensure_modification_is_safe()
if len(self.query.orders) > 0:
self._iterable = Order.sorted(self._iterable, self.query.orders) | 0.009662 |
def _clean(cls, value, invalid):
"""Deprecated. See BlockUsageLocator._clean"""
cls._deprecation_warning()
return BlockUsageLocator._clean(value, invalid) | 0.011236 |
def main(self, argv=None, exit=True):
"""
Shortcut for running a command.
See :meth:`guacamole.recipes.Recipe.main()` for details.
"""
return CommandRecipe(self).main(argv, exit) | 0.009132 |
def document_iter(self, context):
"""
Iterates over all the elements in an iterparse context
(here: <document> elements) and yields an URMLDocumentGraph instance
for each of them. For efficiency, the elements are removed from the
DOM / main memory after processing them.
If ``self.debug`` is set to ``True`` (in the ``__init__`` method),
this method will yield <documents> elements, which can be used to
construct ``URMLDocumentGraph``s manually.
"""
for _event, elem in context:
if not self.debug:
yield URMLDocumentGraph(elem, tokenize=self.tokenize,
precedence=self.precedence)
else:
yield elem
# removes element (and references to it) from memory after processing it
elem.clear()
while elem.getprevious() is not None:
del elem.getparent()[0]
del context | 0.003018 |
def parse(self, declaration):
"""
Parse sql type declaration, e.g. varchar(10) and return instance of corresponding type class,
e.g. VarCharType(10)
@param declaration: Sql declaration to parse, e.g. varchar(10)
@return: instance of SqlTypeMetaclass
"""
declaration = declaration.strip()
for regex, constructor in self._compiled:
m = regex.match(declaration)
if m:
return constructor(*m.groups())
raise ValueError('Unable to parse type declaration', declaration) | 0.005217 |
def log_K(Z, X, t):
"""
Log K
Log of the proposal probability density function for gnm
Inputs :
Z :
proposed to
x :
proposed from
Outputs :
log of the probability density function
"""
m, L = update_params(X, t)
z = Z['x']
return np.log(det(L))-la.norm(np.dot(L.T,z-m))**2/2. | 0.013624 |
def debug_log(
self, no_tail=False, exclude_module=None, include_module=None,
include=None, level=None, limit=0, lines=10, replay=False,
exclude=None):
"""Get log messages for this model.
:param bool no_tail: Stop after returning existing log messages
:param list exclude_module: Do not show log messages for these logging
modules
:param list include_module: Only show log messages for these logging
modules
:param list include: Only show log messages for these entities
:param str level: Log level to show, valid options are 'TRACE',
'DEBUG', 'INFO', 'WARNING', 'ERROR,
:param int limit: Return this many of the most recent (possibly
filtered) lines are shown
:param int lines: Yield this many of the most recent lines, and keep
yielding
:param bool replay: Yield the entire log, and keep yielding
:param list exclude: Do not show log messages for these entities
"""
raise NotImplementedError() | 0.001837 |
def remove(self, iterable, data=None, index=0):
"""Remove an element from the trie
Args
iterable(hashable): key used to find what is to be removed
data(object): data associated with the key
index(int): index of what is to me removed
Returns:
bool:
True: if it was removed
False: if it was not removed
"""
if index == len(iterable):
if self.is_terminal:
if data:
self.data.remove(data)
if len(self.data) == 0:
self.is_terminal = False
else:
self.data.clear()
self.is_terminal = False
return True
else:
return False
elif iterable[index] in self.children:
return self.children[iterable[index]].remove(iterable, index=index+1, data=data)
else:
return False | 0.003018 |
def _create_and_send_json_bulk(self, payload, req_url, request_type="POST"):
"""Create a json, do a request to the URL and process the response.
:param list payload: contains the informations necessary for the action.
It's a list of dictionnary.
:param str req_url: URL to request with the payload.
:param str request_type: type of request, either "POST" or "DELETE".
:default request_type: "POST".
:return: response of the request.
:rtype: list of dict.
:raises CraftAiBadRequestError: if the payload doesn't have the
correct form to be transformed into a json or request_type is
neither "POST" or "DELETE".
"""
# Extra header in addition to the main session's
ct_header = {"Content-Type": "application/json; charset=utf-8"}
try:
json_pl = json.dumps(payload)
except TypeError as err:
raise CraftAiBadRequestError("Error while dumping the payload into json"
"format when converting it for the bulk request. {}"
.format(err.__str__()))
if request_type == "POST":
resp = self._requests_session.post(req_url, headers=ct_header, data=json_pl)
elif request_type == "DELETE":
resp = self._requests_session.delete(req_url, headers=ct_header, data=json_pl)
else:
raise CraftAiBadRequestError("Request for the bulk API should be either a POST or DELETE"
"request")
agents = self._decode_response(resp)
agents = self._decode_response_bulk(agents)
return agents | 0.006325 |
def parse_vtrgb(path='/etc/vtrgb'):
''' Parse the color table for the Linux console. '''
palette = ()
table = []
try:
with open(path) as infile:
for i, line in enumerate(infile):
row = tuple(int(val) for val in line.split(','))
table.append(row)
if i == 2: # failsafe
break
palette = tuple(zip(*table)) # swap rows to columns
except IOError as err:
palette = color_tables.vga_palette4
return palette | 0.00188 |
def to_api_repr(self):
"""Construct JSON API representation for the parameter.
:rtype: dict
:returns: JSON mapping
"""
s_types = {}
values = {}
for name, value in self.struct_values.items():
type_ = self.struct_types[name]
if type_ in ("STRUCT", "ARRAY"):
repr_ = value.to_api_repr()
s_types[name] = {"name": name, "type": repr_["parameterType"]}
values[name] = repr_["parameterValue"]
else:
s_types[name] = {"name": name, "type": {"type": type_}}
converter = _SCALAR_VALUE_TO_JSON_PARAM.get(type_)
if converter is not None:
value = converter(value)
values[name] = {"value": value}
resource = {
"parameterType": {
"type": "STRUCT",
"structTypes": [s_types[key] for key in self.struct_types],
},
"parameterValue": {"structValues": values},
}
if self.name is not None:
resource["name"] = self.name
return resource | 0.001741 |
def remove_comments(tokens):
"""
Removes comments from *tokens* which is expected to be a list equivalent of
tokenize.generate_tokens() (so we can update in-place).
.. note::
* If the comment makes up the whole line, the newline will also be removed (so you don't end up with lots of blank lines).
* Preserves shebangs and encoding strings.
"""
preserved_shebang = ""
preserved_encoding = ""
# This (short) loop preserves shebangs and encoding strings:
for tok in tokens[0:4]: # Will always be in the first four tokens
line = tok[4]
# Save the first comment line if it starts with a shebang
# (e.g. '#!/usr/bin/env python')
if analyze.shebang.match(line): # Must be first line
preserved_shebang = line
# Save the encoding string (must be first or second line in file)
# (e.g. '# -*- coding: utf-8 -*-')
elif analyze.encoding.match(line):
preserved_encoding = line
# Now remove comments:
prev_tok_type = 0
for index, tok in enumerate(tokens):
token_type = tok[0]
if token_type == tokenize.COMMENT:
tokens[index][1] = '' # Making it an empty string removes it
# TODO: Figure out a way to make this work
#elif prev_tok_type == tokenize.COMMENT:
#if token_type == tokenize.NL:
#tokens[index][1] = '' # Remove trailing newline
prev_tok_type = token_type
# Prepend our preserved items back into the token list:
if preserved_shebang: # Have to re-tokenize them
io_obj = io.StringIO(preserved_shebang + preserved_encoding)
preserved = [list(a) for a in tokenize.generate_tokens(io_obj.readline)]
preserved.pop() # Get rid of ENDMARKER
preserved.reverse() # Round and round we go!
for item in preserved:
tokens.insert(0, item) | 0.006831 |
def run(self, steps=None, ipyclient=None, force=False, quiet=False):
"""
Submits an ordered list of jobs to a load-balancer to complete
the following tasks, and reports a progress bar:
(1) Write nexus files for each locus
(2) Run mrBayes on each locus to get a posterior of gene trees
(3) Run mbsum (a bucky tool) on the posterior set of trees
(4) Run Bucky on the summarized set of trees for all alpha values.
Parameters:
-----------
ipyclient (ipyparallel.Client())
A connected ipyparallel Client object used to distribute jobs
force (bool):
Whether to overwrite existing files with the same name and workdir
if they exist. Default is False.
quiet (bool):
Whether to suppress progress information. Default is False.
steps (list):
A list of integers of steps to perform. This is useful if a
job was interrupted, or you created a new bucky object copy,
or you wish to run an analysis under a new set of parameters,
after having run it once. For example, if you finished running
steps 1 and 2 (write nexus files and infer mrbayes posteriors),
but you want to rerun steps 3 and 4 with new settings, then you
could enter `steps=[3,4]` and also `force=True` to run steps 3
and 4 with a new set of parameters. Default argument is None
which means run all steps.
"""
## require ipyclient
if not ipyclient:
raise IPyradWarningExit("an ipyclient object is required")
## check the steps argument
if not steps:
steps = [1, 2, 3, 4]
if isinstance(steps, (int, str)):
steps = [int(i) for i in [steps]]
if isinstance(steps, list):
if not all(isinstance(i, int) for i in steps):
raise IPyradWarningExit("steps must be a list of integers")
## run steps ------------------------------------------------------
## TODO: wrap this function so it plays nice when interrupted.
if 1 in steps:
self.write_nexus_files(force=force, quiet=quiet)
if 2 in steps:
self.run_mrbayes(force=force, quiet=quiet, ipyclient=ipyclient)
if 3 in steps:
self.run_mbsum(force=force, quiet=quiet, ipyclient=ipyclient)
if 4 in steps:
self.run_bucky(force=force, quiet=quiet, ipyclient=ipyclient)
## make sure jobs are done if waiting (TODO: maybe make this optional)
ipyclient.wait() | 0.005671 |
def call_method_async(self, method_name_or_object, params=None):
"""
Calls the ``method_name`` method from the given service asynchronously
and returns a :py:class:`gemstone.client.structs.AsyncMethodCall` instance.
:param method_name_or_object: The name of te called method or a ``MethodCall`` instance
:param params: A list of dict representing the parameters for the request
:return: a :py:class:`gemstone.client.structs.AsyncMethodCall` instance.
"""
thread_pool = self._get_thread_pool()
if isinstance(method_name_or_object, MethodCall):
req_obj = method_name_or_object
else:
req_obj = MethodCall(method_name_or_object, params)
async_result_mp = thread_pool.apply_async(self.handle_single_request, args=(req_obj,))
return AsyncMethodCall(req_obj=req_obj, async_resp_object=async_result_mp) | 0.008724 |
def get_app(opts):
'''
Returns a WSGI app and a configuration dictionary
'''
apiopts = opts.get(__name__.rsplit('.', 2)[-2], {}) # rest_cherrypy opts
# Add Salt and salt-api config options to the main CherryPy config dict
cherrypy.config['saltopts'] = opts
cherrypy.config['apiopts'] = apiopts
root = API() # cherrypy app
cpyopts = root.get_conf() # cherrypy app opts
return root, apiopts, cpyopts | 0.002257 |
def open(server=None, url=None, ip=None, port=None, name=None, https=None, auth=None, verify_ssl_certificates=True,
proxy=None, cookies=None, verbose=True, _msgs=None):
r"""
Establish connection to an existing H2O server.
The connection is not kept alive, so what this method actually does is it attempts to connect to the
specified server, and checks that the server is healthy and responds to REST API requests. If the H2O server
cannot be reached, an :class:`H2OConnectionError` will be raised. On success this method returns a new
:class:`H2OConnection` object, and it is the only "official" way to create instances of this class.
There are 3 ways to specify the target to connect to (these settings are mutually exclusive):
* pass a ``server`` option,
* pass the full ``url`` for the connection,
* provide a triple of parameters ``ip``, ``port``, ``https``.
:param H2OLocalServer server: connect to the specified local server instance. There is a slight difference
between connecting to a local server by specifying its ip and address, and connecting through
an H2OLocalServer instance: if the server becomes unresponsive, then having access to its process handle
will allow us to query the server status through OS, and potentially provide snapshot of the server's
error log in the exception information.
:param url: full url of the server to connect to.
:param ip: target server's IP address or hostname (default "localhost").
:param port: H2O server's port (default 54321).
:param name: H2O cluster name.
:param https: if True then connect using https instead of http (default False).
:param verify_ssl_certificates: if False then SSL certificate checking will be disabled (default True). This
setting should rarely be disabled, as it makes your connection vulnerable to man-in-the-middle attacks. When
used, it will generate a warning from the requests library. Has no effect when ``https`` is False.
:param auth: authentication token for connecting to the remote server. This can be either a
(username, password) tuple, or an authenticator (AuthBase) object. Please refer to the documentation in
the ``requests.auth`` module.
:param proxy: url address of a proxy server. If you do not specify the proxy, then the requests module
will attempt to use a proxy specified in the environment (in HTTP_PROXY / HTTPS_PROXY variables). We
check for the presence of these variables and issue a warning if they are found. In order to suppress
that warning and use proxy from the environment, pass ``proxy="(default)"``.
:param cookies: Cookie (or list of) to add to requests
:param verbose: if True, then connection progress info will be printed to the stdout.
:param _msgs: custom messages to display during connection. This is a tuple (initial message, success message,
failure message).
:returns: A new :class:`H2OConnection` instance.
:raises H2OConnectionError: if the server cannot be reached.
:raises H2OServerError: if the server is in an unhealthy state (although this might be a recoverable error, the
client itself should decide whether it wants to retry or not).
"""
if server is not None:
assert_is_type(server, H2OLocalServer)
assert_is_type(ip, None, "`ip` should be None when `server` parameter is supplied")
assert_is_type(url, None, "`url` should be None when `server` parameter is supplied")
assert_is_type(name, None, "`name` should be None when `server` parameter is supplied")
if not server.is_running():
raise H2OConnectionError("Unable to connect to server because it is not running")
ip = server.ip
port = server.port
scheme = server.scheme
context_path = ''
elif url is not None:
assert_is_type(url, str)
assert_is_type(ip, None, "`ip` should be None when `url` parameter is supplied")
assert_is_type(name, str, None)
# We don't allow any Unicode characters in the URL. Maybe some day we will...
match = assert_matches(url, H2OConnection.url_pattern)
scheme = match.group(1)
ip = match.group(2)
port = int(match.group(3))
context_path = '' if match.group(4) is None else "%s" % (match.group(4))
else:
if ip is None: ip = str("localhost")
if port is None: port = 54321
if https is None: https = False
if is_type(port, str) and port.isdigit(): port = int(port)
assert_is_type(ip, str)
assert_is_type(port, int)
assert_is_type(name, str, None)
assert_is_type(https, bool)
assert_matches(ip, r"(?:[\w-]+\.)*[\w-]+")
assert_satisfies(port, 1 <= port <= 65535)
scheme = "https" if https else "http"
context_path = ''
if verify_ssl_certificates is None: verify_ssl_certificates = True
assert_is_type(verify_ssl_certificates, bool)
assert_is_type(proxy, str, None)
assert_is_type(auth, AuthBase, (str, str), None)
assert_is_type(cookies, str, [str], None)
assert_is_type(_msgs, None, (str, str, str))
conn = H2OConnection()
conn._verbose = bool(verbose)
conn._local_server = server
conn._base_url = "%s://%s:%d%s" % (scheme, ip, port, context_path)
conn._name = server.name if server else name
conn._verify_ssl_cert = bool(verify_ssl_certificates)
conn._auth = auth
conn._cookies = cookies
conn._proxies = None
if proxy and proxy != "(default)":
conn._proxies = {scheme: proxy}
elif not proxy:
# Give user a warning if there are any "*_proxy" variables in the environment. [PUBDEV-2504]
# To suppress the warning pass proxy = "(default)".
for name in os.environ:
if name.lower() == scheme + "_proxy":
warn("Proxy is defined in the environment: %s. "
"This may interfere with your H2O Connection." % name)
try:
retries = 20 if server else 5
conn._stage = 1
conn._timeout = 3.0
conn._cluster = conn._test_connection(retries, messages=_msgs)
# If a server is unable to respond within 1s, it should be considered a bug. However we disable this
# setting for now, for no good reason other than to ignore all those bugs :(
conn._timeout = None
# This is a good one! On the surface it registers a callback to be invoked when the script is about
# to finish, but it also has a side effect in that the reference to current connection will be held
# by the ``atexit`` service till the end -- which means it will never be garbage-collected.
atexit.register(lambda: conn.close())
except Exception:
# Reset _session_id so that we know the connection was not initialized properly.
conn._stage = 0
raise
return conn | 0.006174 |
def load_and_init(self, modules):
"""Import, instantiate & "init" the modules we manage
:param modules: list of the managed modules
:return: True if no errors
"""
self.load(modules)
self.get_instances()
return len(self.configuration_errors) == 0 | 0.006601 |
def walk(self, leavesonly=True, maxdepth=None, _depth = 0):
"""Depth-first search, walking through trie, returning all encounterd nodes (by default only leaves)"""
if self.children:
if not maxdepth or (maxdepth and _depth < maxdepth):
for key, child in self.children.items():
if child.leaf():
yield child
else:
for results in child.walk(leavesonly, maxdepth, _depth + 1):
yield results | 0.011009 |
def sendFMS_same(self, CorpNum, PlusFriendID, Sender, Content, AltContent, AltSendType, SndDT, FilePath, ImageURL,
KakaoMessages, KakaoButtons, AdsYN=False, UserID=None, RequestNum=None):
"""
친구톡 이미지 대량 전송
:param CorpNum: 팝빌회원 사업자번호
:param PlusFriendID: 플러스친구 아이디
:param Sender: 발신번호
:param Content: [동보] 친구톡 내용
:param AltContent: [동보] 대체문자 내용
:param AltSendType: 대체문자 유형 [공백-미전송, C-알림톡내용, A-대체문자내용]
:param SndDT: 예약일시 [작성형식 : yyyyMMddHHmmss]
:param FilePath: 파일경로
:param ImageURL: 이미지URL
:param KakaoMessages: 친구톡 내용 (배열)
:param KakaoButtons: 버튼 목록 (최대 5개)
:param AdsYN: 광고 전송여부
:param UserID: 팝빌회원 아이디
:param RequestNum : 요청번호
:return: receiptNum (접수번호)
"""
if PlusFriendID is None or PlusFriendID == '':
raise PopbillException(-99999999, "플러스친구 아이디가 입력되지 않았습니다.")
if Sender is None or Sender == '':
raise PopbillException(-99999999, "발신번호가 입력되지 않았습니다.")
req = {}
if PlusFriendID is not None or PlusFriendID != '':
req['plusFriendID'] = PlusFriendID
if Sender is not None or Sender != '':
req['snd'] = Sender
if Content is not None or Content != '':
req['content'] = Content
if AltContent is not None or AltContent != '':
req['altContent'] = AltContent
if AltSendType is not None or AltSendType != '':
req['altSendType'] = AltSendType
if SndDT is not None or SndDT != '':
req['sndDT'] = SndDT
if KakaoMessages is not None or KakaoMessages != '':
req['msgs'] = KakaoMessages
if ImageURL is not None or ImageURL != '':
req['imageURL'] = ImageURL
if KakaoButtons:
req['btns'] = KakaoButtons
if AdsYN:
req['adsYN'] = True
if RequestNum is not None or RequestNum != '':
req['requestNum'] = RequestNum
postData = self._stringtify(req)
files = []
try:
with open(FilePath, "rb") as F:
files = [File(fieldName='file',
fileName=F.name,
fileData=F.read())]
except IOError:
raise PopbillException(-99999999, "해당경로에 파일이 없거나 읽을 수 없습니다.")
result = self._httppost_files('/FMS', postData, files, CorpNum, UserID)
return result.receiptNum | 0.001994 |
def sim_typo(
src, tar, metric='euclidean', cost=(1, 1, 0.5, 0.5), layout='QWERTY'
):
"""Return the normalized typo similarity between two strings.
This is a wrapper for :py:meth:`Typo.sim`.
Parameters
----------
src : str
Source string for comparison
tar : str
Target string for comparison
metric : str
Supported values include: ``euclidean``, ``manhattan``,
``log-euclidean``, and ``log-manhattan``
cost : tuple
A 4-tuple representing the cost of the four possible edits: inserts,
deletes, substitutions, and shift, respectively (by default:
(1, 1, 0.5, 0.5)) The substitution & shift costs should be
significantly less than the cost of an insertion & deletion unless a
log metric is used.
layout : str
Name of the keyboard layout to use (Currently supported:
``QWERTY``, ``Dvorak``, ``AZERTY``, ``QWERTZ``)
Returns
-------
float
Normalized typo similarity
Examples
--------
>>> round(sim_typo('cat', 'hat'), 12)
0.472953716914
>>> round(sim_typo('Niall', 'Neil'), 12)
0.434971857071
>>> round(sim_typo('Colin', 'Cuilen'), 12)
0.430964390437
>>> sim_typo('ATCG', 'TAGC')
0.375
"""
return Typo().sim(src, tar, metric, cost, layout) | 0.00075 |
def rgb_to_ansi(color):
"""
Converts hex RGB to the 6x6x6 xterm color space
Args:
color (str): RGB color string in the format "#RRGGBB"
Returns:
str: ansi color string in the format "ansi_n", where n
is between 16 and 230
Reference:
https://github.com/chadj2/bash-ui/blob/master/COLORS.md
"""
if color[0] != '#' or len(color) != 7:
return None
try:
r = round(int(color[1:3], 16) / 51.0) # Normalize between 0-5
g = round(int(color[3:5], 16) / 51.0)
b = round(int(color[5:7], 16) / 51.0)
n = int(36 * r + 6 * g + b + 16)
return 'ansi_{0:d}'.format(n)
except ValueError:
return None | 0.002519 |
def tz_file(name):
"""
Open a timezone file from the zoneinfo subdir for reading.
:param name: The name of the timezone.
:type name: str
:rtype: file
"""
try:
filepath = tz_path(name)
return open(filepath, 'rb')
except TimezoneNotFound:
# http://bugs.launchpad.net/bugs/383171 - we avoid using this
# unless absolutely necessary to help when a broken version of
# pkg_resources is installed.
try:
from pkg_resources import resource_stream
except ImportError:
resource_stream = None
if resource_stream is not None:
try:
return resource_stream(__name__, 'zoneinfo/' + name)
except FileNotFoundError:
return tz_path(name)
raise | 0.00123 |
def get_service():
"""Load the configured service."""
global _SERVICE_MANAGER
if _SERVICE_MANAGER is None:
_SERVICE_MANAGER = driver.DriverManager(
namespace='tvrenamer.data.services',
name=cfg.CONF.lookup_service,
invoke_on_load=True)
return _SERVICE_MANAGER.driver | 0.003058 |
def instantiate(self, params):
"""
Allows you to fetch the map tiles of a created map
:param params: The json with the styling info for the named map
:type params: dict
:return:
:raise: CartoException
"""
try:
self.send(self.Meta.collection_endpoint, "POST", json=params)
except CartoRateLimitException as e:
raise e
except Exception as e:
raise CartoException(e) | 0.004149 |
def indicate_fulfillment_fcp(self, wwpn, lun, host_port):
"""
Indicate completion of :term:`fulfillment` for this FCP storage volume
and provide identifying information (WWPN and LUN) about the actual
storage volume on the storage subsystem.
Manually indicating fulfillment is required for storage volumes that
will be used as boot devices for a partition. The specified host
port will be used to access the storage volume during boot of the
partition.
Because the CPC discovers storage volumes automatically, the
fulfillment of non-boot volumes does not need to be manually indicated
using this function (it may be indicated though before the CPC
discovers a working communications path to the volume, but the role
of the specified host port is not clear in this case).
This method performs the "Fulfill FCP Storage Volume" HMC operation.
Upon successful completion of this operation, the "fulfillment-state"
property of this storage volume object will have been set to
"complete". That is necessary for the CPC to be able to address and
connect to the volume.
If the "fulfillment-state" properties of all storage volumes in the
owning storage group are "complete", the owning storage group's
"fulfillment-state" property will also be set to "complete".
Parameters:
wwpn (:term:`string`):
World wide port name (WWPN) of the FCP storage subsystem containing
the storage volume,
as a hexadecimal number of up to 16 characters in any lexical case.
lun (:term:`string`):
Logical Unit Number (LUN) of the storage volume within its FCP
storage subsystem,
as a hexadecimal number of up to 16 characters in any lexical case.
host_port (:class:`~zhmcclient.Port`):
Storage port on the CPC that will be used to boot from.
Authorization requirements:
* Object-access permission to the storage group owning this storage
volume.
* Task permission to the "Configure Storage - Storage Administrator"
task.
Raises:
:exc:`~zhmcclient.HTTPError`
:exc:`~zhmcclient.ParseError`
:exc:`~zhmcclient.AuthError`
:exc:`~zhmcclient.ConnectionError`
"""
# The operation requires exactly 16 characters in lower case
wwpn_16 = format(int(wwpn, 16), '016x')
lun_16 = format(int(lun, 16), '016x')
body = {
'world-wide-port-name': wwpn_16,
'logical-unit-number': lun_16,
'adapter-port-uri': host_port.uri,
}
self.manager.session.post(
self.uri + '/operations/fulfill-fcp-storage-volume',
body=body) | 0.000695 |
def fork(self,name):
'''
Create fork and store it in current instance
'''
fork=deepcopy(self)
self[name]=fork
return fork | 0.029586 |
def get_db(cls):
"""Return the database for the collection"""
if cls._db:
return getattr(cls._client, cls._db)
return cls._client.get_default_database() | 0.010638 |
def delete(self, story, params={}, **options):
"""Deletes a story. A user can only delete stories they have created. Returns an empty data record.
Parameters
----------
story : {Id} Globally unique identifier for the story.
"""
path = "/stories/%s" % (story)
return self.client.delete(path, params, **options) | 0.010899 |
def start(self, labels=None):
"""Start specified timer(s).
Parameters
----------
labels : string or list, optional (default None)
Specify the label(s) of the timer(s) to be started. If it is
``None``, start the default timer with label specified by the
``dfltlbl`` parameter of :meth:`__init__`.
"""
# Default label is self.dfltlbl
if labels is None:
labels = self.dfltlbl
# If label is not a list or tuple, create a singleton list
# containing it
if not isinstance(labels, (list, tuple)):
labels = [labels,]
# Iterate over specified label(s)
t = timer()
for lbl in labels:
# On first call to start for a label, set its accumulator to zero
if lbl not in self.td:
self.td[lbl] = 0.0
self.t0[lbl] = None
# Record the time at which start was called for this lbl if
# it isn't already running
if self.t0[lbl] is None:
self.t0[lbl] = t | 0.002732 |
def delete_knowledge_base(project_id, knowledge_base_id):
"""Deletes a specific Knowledge base.
Args:
project_id: The GCP project linked with the agent.
knowledge_base_id: Id of the Knowledge base."""
import dialogflow_v2beta1 as dialogflow
client = dialogflow.KnowledgeBasesClient()
knowledge_base_path = client.knowledge_base_path(
project_id, knowledge_base_id)
response = client.delete_knowledge_base(knowledge_base_path)
print('Knowledge Base deleted.'.format(response)) | 0.001887 |
def regex_condition(func):
"""
If a condition is given as string instead of a function, it is turned
into a regex-matching function.
"""
@wraps(func)
def regex_condition_wrapper(condition, *args, **kwargs):
if isinstance(condition, string_types):
condition = maybe | partial(re.match, condition)
return func(condition, *args, **kwargs)
return regex_condition_wrapper | 0.00237 |
def _dispatch(self, textgroup, directory):
""" Run the dispatcher over a textgroup.
:param textgroup: Textgroup object that needs to be dispatched
:param directory: Directory in which the textgroup was found
"""
if textgroup.id in self.dispatcher.collection:
self.dispatcher.collection[textgroup.id].update(textgroup)
else:
self.dispatcher.dispatch(textgroup, path=directory)
for work_urn, work in textgroup.works.items():
if work_urn in self.dispatcher.collection[textgroup.id].works:
self.dispatcher.collection[work_urn].update(work) | 0.003101 |
def handle_parse_result(self, ctx, opts, args):
""" Save value for this option in configuration
if key/value pair doesn't already exist.
Or old value in config was deprecated
it needs to be updated to the new value format
but the value keeps the same "meaning"
"""
gandi = ctx.obj
needs_update = False
value, args = click.Option.handle_parse_result(self, ctx, opts, args)
if value is not None:
previous_value = gandi.get(global_=True, key=self.name)
if isinstance(self.type, GandiChoice):
if value == previous_value:
needs_update = True
value = self.type.convert_deprecated_value(value)
if not previous_value or needs_update:
gandi.configure(global_=True, key=self.name, val=value)
opts[self.name] = value
value, args = click.Option.handle_parse_result(self, ctx, opts, args)
return value, args | 0.001994 |
def _config_parser_to_defaultdict(config_parser):
"""Convert a ConfigParser to a defaultdict.
Args:
config_parser (ConfigParser): A ConfigParser.
"""
config = defaultdict(defaultdict)
for section, section_content in config_parser.items():
if section != 'DEFAULT':
for option, option_value in section_content.items():
config[section][option] = option_value
return config | 0.002283 |
def __loadindcomps(self):
''' import industry comps '''
csv_path = os.path.join(os.path.dirname(__file__), self.stock_no_files)
with open(csv_path) as csv_file:
csv_data = csv.reader(csv_file)
result = {}
check_words = re.compile(r'^[\d]{2,}[\w]?')
for i in csv_data:
if check_words.match(i[2]):
try:
result[i[2]].append(i[0].decode('utf-8'))
except (ValueError, KeyError):
try:
result[i[2]] = [i[0].decode('utf-8')]
except KeyError:
pass
return result | 0.002778 |
def search_tagged_source_for_facet(self, facet, **kwargs): # noqa: E501
"""Lists the values of a specific facet over the customer's sources # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.search_tagged_source_for_facet(facet, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str facet: (required)
:param FacetSearchRequestContainer body:
:return: ResponseContainerFacetResponse
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.search_tagged_source_for_facet_with_http_info(facet, **kwargs) # noqa: E501
else:
(data) = self.search_tagged_source_for_facet_with_http_info(facet, **kwargs) # noqa: E501
return data | 0.001919 |
def main():
""" Main entry point of the CLI. """
try:
args = sys.argv[1:]
try:
_, args = getopt.getopt(args, MAIN_OPTS, MAIN_LONG_OPTS)
except getopt.GetoptError as e:
error(str(e))
sys.exit(1)
if args[0] == 'prompt':
try:
from topydo.ui.prompt.Prompt import PromptApplication
PromptApplication().run()
except ImportError:
error("Some additional dependencies for prompt mode were not installed, please install with 'pip3 install topydo[prompt]'")
elif args[0] == 'columns':
try:
from topydo.ui.columns.Main import UIApplication
UIApplication().run()
except ImportError:
error("Some additional dependencies for column mode were not installed, please install with 'pip3 install topydo[columns]'")
except NameError as err:
if _WINDOWS:
error("Column mode is not supported on Windows.")
else:
error("Could not load column mode: {}".format(err))
else:
CLIApplication().run()
except IndexError:
CLIApplication().run() | 0.002387 |
def on_message(self, opcode, message):
"""
The primary dispatch function to handle incoming WebSocket messages.
:param int opcode: The opcode of the message that was received.
:param bytes message: The data contained within the message.
"""
self.logger.debug("processing {0} (opcode: 0x{1:02x}) message".format(self._opcode_names.get(opcode, 'UNKNOWN'), opcode))
if opcode == self._opcode_close:
self.close()
elif opcode == self._opcode_ping:
if len(message) > 125:
self.close()
return
self.send_message(self._opcode_pong, message)
elif opcode == self._opcode_pong:
pass
elif opcode == self._opcode_binary:
self.on_message_binary(message)
elif opcode == self._opcode_text:
try:
message = self._decode_string(message)
except UnicodeDecodeError:
self.logger.warning('closing connection due to invalid unicode within a text message')
self.close()
else:
self.on_message_text(message)
elif opcode == self._opcode_continue:
self.close()
else:
self.logger.warning("received unknown opcode: {0} (0x{0:02x})".format(opcode))
self.close() | 0.031475 |
def pair_tree_creator(meta_id):
"""Splits string into a pairtree path."""
chunks = []
for x in range(0, len(meta_id)):
if x % 2:
continue
if (len(meta_id) - 1) == x:
chunk = meta_id[x]
else:
chunk = meta_id[x: x + 2]
chunks.append(chunk)
return os.sep + os.sep.join(chunks) + os.sep | 0.002732 |
def rolling_max(self, window_start, window_end, min_observations=None):
"""
Calculate a new SArray of the maximum value of different subsets over
this SArray.
The subset that the maximum is calculated over is defined as an
inclusive range relative to the position to each value in the SArray,
using `window_start` and `window_end`. For a better understanding of
this, see the examples below.
Parameters
----------
window_start : int
The start of the subset to calculate the maximum relative to the
current value.
window_end : int
The end of the subset to calculate the maximum relative to the current
value. Must be greater than `window_start`.
min_observations : int
Minimum number of non-missing observations in window required to
calculate the maximum (otherwise result is None). None signifies that
the entire window must not include a missing value. A negative
number throws an error.
Returns
-------
out : SArray
Examples
--------
>>> import pandas
>>> sa = SArray([1,2,3,4,5])
>>> series = pandas.Series([1,2,3,4,5])
A rolling max with a window including the previous 2 entries including
the current:
>>> sa.rolling_max(-2,0)
dtype: int
Rows: 5
[None, None, 3, 4, 5]
Pandas equivalent:
>>> pandas.rolling_max(series, 3)
0 NaN
1 NaN
2 3
3 4
4 5
dtype: float64
Same rolling max operation, but 2 minimum observations:
>>> sa.rolling_max(-2,0,min_observations=2)
dtype: int
Rows: 5
[None, 2, 3, 4, 5]
Pandas equivalent:
>>> pandas.rolling_max(series, 3, min_periods=2)
0 NaN
1 2
2 3
3 4
4 5
dtype: float64
A rolling max with a size of 3, centered around the current:
>>> sa.rolling_max(-1,1)
dtype: int
Rows: 5
[None, 3, 4, 5, None]
Pandas equivalent:
>>> pandas.rolling_max(series, 3, center=True)
0 NaN
1 3
2 4
3 5
4 NaN
dtype: float64
A rolling max with a window including the current and the 2 entries
following:
>>> sa.rolling_max(0,2)
dtype: int
Rows: 5
[3, 4, 5, None, None]
A rolling max with a window including the previous 2 entries NOT
including the current:
>>> sa.rolling_max(-2,-1)
dtype: int
Rows: 5
[None, None, 2, 3, 4]
"""
min_observations = self.__check_min_observations(min_observations)
agg_op = '__builtin__max__'
return SArray(_proxy=self.__proxy__.builtin_rolling_apply(agg_op, window_start, window_end, min_observations)) | 0.001655 |
def post_address_subcommand(search_terms, vcard_list, parsable):
"""Print a contact table. with all postal / mailing addresses
:param search_terms: used as search term to filter the contacts before
printing
:type search_terms: str
:param vcard_list: the vcards to search for matching entries which should
be printed
:type vcard_list: list of carddav_object.CarddavObject
:param parsable: machine readable output: columns devided by tabulator (\t)
:type parsable: bool
:returns: None
:rtype: None
"""
all_post_address_list = []
matching_post_address_list = []
for vcard in vcard_list:
# vcard name
if config.display_by_name() == "first_name":
name = vcard.get_first_name_last_name()
else:
name = vcard.get_last_name_first_name()
# create post address line list
post_address_line_list = []
if parsable:
for type, post_address_list in sorted(vcard.get_post_addresses().items(),
key=lambda k: k[0].lower()):
for post_address in post_address_list:
post_address_line_list.append(
"\t".join([str(post_address), name, type]))
else:
for type, post_address_list in sorted(vcard.get_formatted_post_addresses().items(),
key=lambda k: k[0].lower()):
for post_address in sorted(post_address_list):
post_address_line_list.append(
"\t".join([name, type, post_address]))
# add to matching and all post address lists
for post_address_line in post_address_line_list:
if re.search(search_terms,
"%s\n%s" % (post_address_line, post_address_line),
re.IGNORECASE | re.DOTALL):
matching_post_address_list.append(post_address_line)
# collect all post addresses in a different list as fallback
all_post_address_list.append(post_address_line)
if matching_post_address_list:
if parsable:
print('\n'.join(matching_post_address_list))
else:
list_post_addresses(matching_post_address_list)
elif all_post_address_list:
if parsable:
print('\n'.join(all_post_address_list))
else:
list_post_addresses(all_post_address_list)
else:
if not parsable:
print("Found no post adresses")
sys.exit(1) | 0.001937 |
def generate_s3_bucket():
"""Create the blockade bucket if not already there."""
logger.debug("[#] Setting up S3 bucket")
client = boto3.client("s3", region_name=PRIMARY_REGION)
buckets = client.list_buckets()
matches = [x for x in buckets.get('Buckets', list())
if x['Name'].startswith(S3_BUCKET)]
if len(matches) > 0:
logger.debug("[*] Bucket already exists")
return matches.pop()
response = client.create_bucket(
Bucket=S3_BUCKET,
CreateBucketConfiguration={
'LocationConstraint': PRIMARY_REGION
}
)
logger.info("[#] Successfully setup the S3 bucket")
return response | 0.001473 |
def selected_objects(self):
"""Filter out objects outside table boundaries"""
return [
obj
for obj in self.text_objects
if contains_or_overlap(self.table_bbox, obj.bbox)
] | 0.008621 |
def add(self, member_name, collection_name='', parent=None, uid='',
**kwargs):
"""
:param member_name: singular name of the resource. It should be the
appropriate singular version of the resource given your locale
and used with members of the collection.
:param collection_name: plural name of the resource. It will be used
to refer to the resource collection methods and should be a
plural version of the ``member_name`` argument.
Note: if collection_name is empty, it means resource is singular
:param parent: parent resource name or object.
:param uid: unique name for the resource
:param kwargs:
view: custom view to overwrite the default one.
the rest of the keyward arguments are passed to
add_resource_routes call.
:return: ResourceMap object
"""
# self is the parent resource on which this method is called.
parent = (self.resource_map.get(parent) if type(parent)
is str else parent or self)
prefix = kwargs.pop('prefix', '')
uid = (uid or
':'.join(filter(bool, [parent.uid, prefix, member_name])))
if uid in self.resource_map:
raise ValueError('%s already exists in resource map' % uid)
# Use id_name of parent for singular views to make url generation
# easier
id_name = kwargs.get('id_name', '')
if not id_name and parent:
id_name = parent.id_name
new_resource = Resource(self.config, member_name=member_name,
collection_name=collection_name,
parent=parent, uid=uid,
id_name=id_name,
prefix=prefix)
view = maybe_dotted(
kwargs.pop('view', None) or get_default_view_path(new_resource))
for name, val in kwargs.pop('view_args', {}).items():
setattr(view, name, val)
root_resource = self.config.get_root_resource()
view.root_resource = root_resource
new_resource.view = view
path_segs = []
kwargs['path_prefix'] = ''
for res in new_resource.ancestors:
if not res.is_singular:
if res.id_name:
id_full = res.id_name
else:
id_full = "%s_%s" % (res.member_name, DEFAULT_ID_NAME)
path_segs.append('%s/{%s}' % (res.collection_name, id_full))
else:
path_segs.append(res.member_name)
if path_segs:
kwargs['path_prefix'] = '/'.join(path_segs)
if prefix:
kwargs['path_prefix'] += '/' + prefix
name_segs = [a.member_name for a in new_resource.ancestors]
name_segs.insert(1, prefix)
name_segs = [seg for seg in name_segs if seg]
if name_segs:
kwargs['name_prefix'] = '_'.join(name_segs) + ':'
new_resource.renderer = kwargs.setdefault(
'renderer', view._default_renderer)
kwargs.setdefault('auth', root_resource.auth)
kwargs.setdefault('factory', root_resource.default_factory)
_factory = maybe_dotted(kwargs['factory'])
kwargs['auth'] = kwargs.get('auth', root_resource.auth)
kwargs['http_cache'] = kwargs.get(
'http_cache', root_resource.http_cache)
new_resource.action_route_map = add_resource_routes(
self.config, view, member_name, collection_name,
**kwargs)
self.resource_map[uid] = new_resource
# add all route names for this resource as keys in the dict,
# so its easy to find it in the view.
self.resource_map.update(dict.fromkeys(
list(new_resource.action_route_map.values()),
new_resource))
# Store resources in {modelName: resource} map if:
# * Its view has Model defined
# * It's not singular
# * Its parent is root or it's not already stored
model = new_resource.view.Model
is_collection = model is not None and not new_resource.is_singular
if is_collection:
is_needed = (model.__name__ not in self.model_collections or
new_resource.parent is root_resource)
if is_needed:
self.model_collections[model.__name__] = new_resource
parent.children.append(new_resource)
view._resource = new_resource
view._factory = _factory
return new_resource | 0.000648 |
def state_length(state, size):
"""Check that the state is the given size."""
if len(state) != size:
raise ValueError('Invalid state: there must be one entry per '
'node in the network; this state has {} entries, but '
'there are {} nodes.'.format(len(state), size))
return True | 0.002882 |
def create_action_token(self, action, expires_in):
"""
Create a url safe action token attached to the user
:param action:
:param expires_in:
:return:
"""
return utils.sign_url_safe(self.user.id,
secret_key=get_jwt_secret(),
salt=action,
expires_in=expires_in) | 0.004762 |
def update(self, E=None, **F):
"""
D.update([E, ]**F) -> None. Update D from dict/iterable E and F.
If E present and has a .keys() method, does: for k in E: D[k] = E[k]
If E present and lacks .keys() method, does: for (k, v) in E: D[k] = v
In either case, this is followed by: for k in F: D[k] = F[k]
"""
if hasattr(E, 'keys'):
self.extend(E)
else:
for key, value in E:
self._set_key(key, value)
self.extend(F) | 0.007533 |
def _publish_metrics(self, name, prev_keys, key, data):
"""Recursively publish keys"""
value = data[key]
keys = prev_keys + [key]
if isinstance(value, dict):
for new_key in value:
self._publish_metrics(name, keys, new_key, value)
elif isinstance(value, (float, int, long)):
joined_keys = '.'.join(keys)
if name:
publish_key = '{}.{}'.format(name, joined_keys)
else:
publish_key = joined_keys
if isinstance(value, bool):
value = int(value)
self.publish(publish_key, value) | 0.003086 |
def with_labels(self, selectable, base=None, **mapper_args):
"""Map a selectable directly, wrapping the
selectable in a subquery with labels.
The class and its mapping are not cached and will
be discarded once dereferenced (as of 0.6.6).
:param selectable: an :func:`.expression.select` construct.
:param base: a Python class which will be used as the
base for the mapped class. If ``None``, the "base"
argument specified by this :class:`.SQLSoup`
instance's constructor will be used, which defaults to
``object``.
:param mapper_args: Dictionary of arguments which will
be passed directly to :func:`.orm.mapper`.
"""
# TODO give meaningful aliases
return self.map(
expression._clause_element_as_expr(selectable).
select(use_labels=True).
alias('foo'), base=base, **mapper_args) | 0.005061 |
def add_ignored(self, ignored):
"""Add ignored text to the node. This will add the length of the ignored text to the node's
consumed property.
"""
if ignored:
if self.ignored:
self.ignored = ignored + self.ignored
else:
self.ignored = ignored
self.consumed += len(ignored) | 0.012461 |
def parse(readDataInstance, sectionHeadersInstance):
"""
Returns a new L{Sections} object.
@type readDataInstance: L{ReadData}
@param readDataInstance: A L{ReadData} object with data to be parsed as a L{Sections} object.
@type sectionHeadersInstance: instance
@param sectionHeadersInstance: The L{SectionHeaders} instance with the necessary to parse every section data.
@rtype: L{Sections}
@return: A new L{Sections} object.
"""
sData = Sections()
for sectionHdr in sectionHeadersInstance:
if sectionHdr.sizeOfRawData.value > len(readDataInstance.data):
print "Warning: SizeOfRawData is larger than file."
if sectionHdr.pointerToRawData.value > len(readDataInstance.data):
print "Warning: PointerToRawData points beyond the end of the file."
if sectionHdr.misc.value > 0x10000000:
print "Warning: VirtualSize is extremely large > 256MiB."
if sectionHdr.virtualAddress.value > 0x10000000:
print "Warning: VirtualAddress is beyond 0x10000000"
# skip sections with pointerToRawData == 0. According to PECOFF, it contains uninitialized data
if sectionHdr.pointerToRawData.value:
sData.append(readDataInstance.read(sectionHdr.sizeOfRawData.value))
return sData | 0.011229 |
def has_role(self, identifiers, role_s, log_results=True):
"""
:param identifiers: a collection of identifiers
:type identifiers: subject_abcs.IdentifierCollection
:param role_s: a collection of 1..N Role identifiers
:type role_s: Set of String(s)
:param log_results: states whether to log results (True) or allow the
calling method to do so instead (False)
:type log_results: bool
:returns: a set of tuple(s), containing the role and a Boolean
indicating whether the user is a member of the Role
"""
self.assert_realms_configured()
results = collections.defaultdict(bool) # defaults to False
for role, has_role in self._has_role(identifiers, role_s):
# checkrole expected format is: (role, Boolean)
# As long as one realm returns True for a role, a subject is
# considered a member of that Role.
# Given that (True or False == True), assign accordingly:
results[role] = results[role] or has_role
if log_results:
self.notify_event(identifiers,
list(results.items()),
'AUTHORIZATION.RESULTS') # before freezing
results = set(results.items())
return results | 0.001465 |
def get_default_config(self):
"""
Returns the default collector settings
"""
config = super(PuppetDBCollector, self).get_default_config()
config.update({
'host': 'localhost',
'port': 8080,
'path': 'PuppetDB',
})
return config | 0.006309 |
def validate(self,
asset,
amount,
portfolio,
algo_datetime,
algo_current_data):
"""
Fail if the given order would cause the magnitude of our position to be
greater in shares than self.max_shares or greater in dollar value than
self.max_notional.
"""
if self.asset is not None and self.asset != asset:
return
current_share_count = portfolio.positions[asset].amount
shares_post_order = current_share_count + amount
too_many_shares = (self.max_shares is not None and
abs(shares_post_order) > self.max_shares)
if too_many_shares:
self.handle_violation(asset, amount, algo_datetime)
current_price = algo_current_data.current(asset, "price")
value_post_order = shares_post_order * current_price
too_much_value = (self.max_notional is not None and
abs(value_post_order) > self.max_notional)
if too_much_value:
self.handle_violation(asset, amount, algo_datetime) | 0.006108 |
def pop(self, queue_name):
"""
Pops a task off the queue.
:param queue_name: The name of the queue. Usually handled by the
``Gator`` instance.
:type queue_name: string
:returns: The data for the task.
:rtype: string
"""
task_id = self.conn.lpop(queue_name)
data = self.conn.get(task_id)
self.conn.delete(task_id)
return data | 0.004695 |
def endpoint_update(**kwargs):
"""
Executor for `globus endpoint update`
"""
# validate params. Requires a get call to check the endpoint type
client = get_client()
endpoint_id = kwargs.pop("endpoint_id")
get_res = client.get_endpoint(endpoint_id)
if get_res["host_endpoint_id"]:
endpoint_type = "shared"
elif get_res["is_globus_connect"]:
endpoint_type = "personal"
elif get_res["s3_url"]:
endpoint_type = "s3"
else:
endpoint_type = "server"
validate_endpoint_create_and_update_params(
endpoint_type, get_res["subscription_id"], kwargs
)
# make the update
ep_doc = assemble_generic_doc("endpoint", **kwargs)
res = client.update_endpoint(endpoint_id, ep_doc)
formatted_print(res, text_format=FORMAT_TEXT_RAW, response_key="message") | 0.001188 |
def set_pay_mch(self, mchid, s_pappid):
"""
关联商户号与开票平台,设置支付后开票
详情请参考
https://mp.weixin.qq.com/wiki?id=mp1496561731_2Z55U
:param mchid: 微信支付商户号
:param s_pappid: 开票平台在微信的标识号,商户需要找开票平台提供
"""
return self._post(
'setbizattr',
params={
'action': 'set_pay_mch',
},
data={
'paymch_info': {
'mchid': mchid,
's_pappid': s_pappid,
},
},
) | 0.00365 |
def notify_txn_invalid(self, txn_id, message=None, extended_data=None):
"""Adds a batch id to the invalid cache along with the id of the
transaction that was rejected and any error message or extended data.
Removes that batch id from the pending set. The cache is only
temporary, and the batch info will be purged after one hour.
Args:
txn_id (str): The id of the invalid batch
message (str, optional): Message explaining why batch is invalid
extended_data (bytes, optional): Additional error data
"""
invalid_txn_info = {'id': txn_id}
if message is not None:
invalid_txn_info['message'] = message
if extended_data is not None:
invalid_txn_info['extended_data'] = extended_data
with self._lock:
for batch_id, txn_ids in self._batch_info.items():
if txn_id in txn_ids:
if batch_id not in self._invalid:
self._invalid[batch_id] = [invalid_txn_info]
else:
self._invalid[batch_id].append(invalid_txn_info)
self._pending.discard(batch_id)
self._update_observers(batch_id, ClientBatchStatus.INVALID)
return | 0.001517 |
def output(self, context, *args, **kwargs):
"""
Allow all readers to use eventually use output_fields XOR output_type options.
"""
output_fields = self.output_fields
output_type = self.output_type
if output_fields and output_type:
raise UnrecoverableError("Cannot specify both output_fields and output_type option.")
if self.output_type:
context.set_output_type(self.output_type)
if self.output_fields:
context.set_output_fields(self.output_fields)
yield | 0.007042 |
def is_reserved_ip(self, ip):
"""Check if the given ip address is in a reserved ipv4 address space.
:param ip: ip address
:return: boolean
"""
theip = ipaddress(ip)
for res in self._reserved_netmasks:
if theip in ipnetwork(res):
return True
return False | 0.005917 |
def set_options(self, option_type, option_dict, force_options=False):
"""set plot options """
if force_options:
self.options[option_type].update(option_dict)
elif (option_type == 'yAxis' or option_type == 'xAxis') and isinstance(option_dict, list):
# For multi-Axis
self.options[option_type] = MultiAxis(option_type)
for each_dict in option_dict:
self.options[option_type].update(**each_dict)
elif option_type == 'colors':
self.options["colors"].set_colors(option_dict) # option_dict should be a list
elif option_type in ["global" , "lang"]: #Highcharts.setOptions:
self.setOptions[option_type].update_dict(**option_dict)
else:
self.options[option_type].update_dict(**option_dict) | 0.010856 |
def form_valid(self, forms):
"""
If the form is valid, save the associated model.
"""
for key, form in forms.items():
setattr(self, '{}_object'.format(key), form.save())
return super(MultipleModelFormMixin, self).form_valid(forms) | 0.007092 |
def predict_is(self, h=5, fit_once=True, fit_method='MLE', intervals=False, **kwargs):
""" Makes dynamic in-sample predictions with the estimated model
Parameters
----------
h : int (default : 5)
How many steps would you like to forecast?
fit_once : boolean
(default: True) Fits only once before the in-sample prediction; if False, fits after every new datapoint
fit_method : string
Which method to fit the model with
intervals: boolean
Whether to return prediction intervals
Returns
----------
- pd.DataFrame with predicted values
"""
predictions = []
for t in range(0,h):
x = GARCH(p=self.p, q=self.q, data=self.data[0:-h+t])
if fit_once is False:
x.fit(method=fit_method, printer=False)
if t == 0:
if fit_once is True:
x.fit(method=fit_method, printer=False)
saved_lvs = x.latent_variables
predictions = x.predict(1, intervals=intervals)
else:
if fit_once is True:
x.latent_variables = saved_lvs
predictions = pd.concat([predictions,x.predict(1, intervals=intervals)])
if intervals is True:
predictions.rename(columns={0:self.data_name, 1: "1% Prediction Interval",
2: "5% Prediction Interval", 3: "95% Prediction Interval", 4: "99% Prediction Interval"}, inplace=True)
else:
predictions.rename(columns={0:self.data_name}, inplace=True)
predictions.index = self.index[-h:]
return predictions | 0.008661 |
def _flush_decoder(self):
"""
Flushes the decoder. Should only be called if the decoder is actually
being used.
"""
if self._decoder:
buf = self._decoder.decompress(b'')
return buf + self._decoder.flush()
return b'' | 0.006944 |
def _fetch_all_error_message(self, response):
"""
:type response: requests.Response
:rtype: list[str]
"""
response_content_string = response.content.decode()
try:
error_dict = converter.json_to_class(dict, response_content_string)
return self._fetch_error_descriptions(error_dict)
except ValueError:
return [response_content_string] | 0.004673 |
def get_context_hints_per_source(context_renderers):
"""
Given a list of context renderers, return a dictionary of context hints per source.
"""
# Merge the context render hints for each source as there can be multiple context hints for
# sources depending on the render target. Merging them together involves combining select
# and prefetch related hints for each context renderer
context_hints_per_source = defaultdict(lambda: defaultdict(lambda: {
'app_name': None,
'model_name': None,
'select_related': set(),
'prefetch_related': set(),
}))
for cr in context_renderers:
for key, hints in cr.context_hints.items() if cr.context_hints else []:
for source in cr.get_sources():
context_hints_per_source[source][key]['app_name'] = hints['app_name']
context_hints_per_source[source][key]['model_name'] = hints['model_name']
context_hints_per_source[source][key]['select_related'].update(hints.get('select_related', []))
context_hints_per_source[source][key]['prefetch_related'].update(hints.get('prefetch_related', []))
return context_hints_per_source | 0.006633 |
def dim(self):
"""
NAME:
dim
PURPOSE:
return the dimension of the Orbit
INPUT:
(none)
OUTPUT:
dimension
HISTORY:
2011-02-03 - Written - Bovy (NYU)
"""
if len(self._orb.vxvv) == 2:
return 1
elif len(self._orb.vxvv) == 3 or len(self._orb.vxvv) == 4:
return 2
elif len(self._orb.vxvv) == 5 or len(self._orb.vxvv) == 6:
return 3 | 0.00396 |
def count(self, seqs, nreport=100, scan_rc=True):
"""
count the number of matches above the cutoff
returns an iterator of lists containing integer counts
"""
for matches in self.scan(seqs, nreport, scan_rc):
counts = [len(m) for m in matches]
yield counts | 0.00627 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.