code
stringlengths 75
104k
| docstring
stringlengths 1
46.9k
|
---|---|
def proxy_uri(self, value):
"""
Set the Proxy-Uri option of a request.
:param value: the Proxy-Uri value
"""
option = Option()
option.number = defines.OptionRegistry.PROXY_URI.number
option.value = str(value)
self.add_option(option) | Set the Proxy-Uri option of a request.
:param value: the Proxy-Uri value |
def stereo_bm_preset(self, value):
"""Set private ``_stereo_bm_preset`` and reset ``_block_matcher``."""
if value in (cv2.STEREO_BM_BASIC_PRESET,
cv2.STEREO_BM_FISH_EYE_PRESET,
cv2.STEREO_BM_NARROW_PRESET):
self._bm_preset = value
else:
raise InvalidBMPresetError("Stereo BM preset must be defined as "
"cv2.STEREO_BM_*_PRESET.")
self._replace_bm() | Set private ``_stereo_bm_preset`` and reset ``_block_matcher``. |
def add_val(self, subj: Node, pred: URIRef, json_obj: JsonObj, json_key: str,
valuetype: Optional[URIRef] = None) -> Optional[BNode]:
"""
Add the RDF representation of val to the graph as a target of subj, pred. Note that FHIR lists are
represented as a list of BNODE objects with a fhir:index discrimanant
:param subj: graph subject
:param pred: predicate
:param json_obj: object containing json_key
:param json_key: name of the value in the JSON resource
:param valuetype: value type if NOT determinable by predicate
:return: value node if target is a BNode else None
"""
if json_key not in json_obj:
print("Expecting to find object named '{}' in JSON:".format(json_key))
print(json_obj._as_json_dumps())
print("entry skipped")
return None
val = json_obj[json_key]
if isinstance(val, List):
list_idx = 0
for lv in val:
entry_bnode = BNode()
# TODO: this is getting messy. Refactor and clean this up
if pred == FHIR.Bundle.entry:
entry_subj = URIRef(lv.fullUrl)
self.add(entry_bnode, FHIR.index, Literal(list_idx))
self.add_val(entry_bnode, FHIR.Bundle.entry.fullUrl, lv, 'fullUrl')
self.add(entry_bnode, FHIR.Bundle.entry.resource, entry_subj)
self.add(subj, pred, entry_bnode)
entry_mv = FHIRMetaVocEntry(self._vocabulary, FHIR.BundleEntryComponent)
for k, p in entry_mv.predicates().items():
if k not in ['resource', 'fullUrl'] and k in lv:
print("---> adding {}".format(k))
self.add_val(subj, p, lv, k)
FHIRResource(self._vocabulary, None, self._base_uri, lv.resource, self._g,
False, self._replace_narrative_text, False, resource_uri=entry_subj)
else:
self.add(entry_bnode, FHIR.index, Literal(list_idx))
if isinstance(lv, JsonObj):
self.add_value_node(entry_bnode, pred, lv, valuetype)
else:
vt = self._meta.predicate_type(pred)
atom_type = self._meta.primitive_datatype_nostring(vt) if vt else None
self.add(entry_bnode, FHIR.value, Literal(lv, datatype=atom_type))
self.add(subj, pred, entry_bnode)
list_idx += 1
else:
vt = self._meta.predicate_type(pred) if not valuetype else valuetype
if self._meta.is_atom(pred):
if self._replace_narrative_text and pred == FHIR.Narrative.div and len(val) > 120:
val = REPLACED_NARRATIVE_TEXT
self.add(subj, pred, Literal(val))
else:
v = BNode()
if self._meta.is_primitive(vt):
self.add(v, FHIR.value, Literal(str(val), datatype=self._meta.primitive_datatype_nostring(vt, val)))
else:
self.add_value_node(v, pred, val, valuetype)
self.add(subj, pred, v)
if pred == FHIR.Reference.reference:
self.add_reference(subj, val)
self.add_extension_val(v, json_obj, json_key)
return v
return None | Add the RDF representation of val to the graph as a target of subj, pred. Note that FHIR lists are
represented as a list of BNODE objects with a fhir:index discrimanant
:param subj: graph subject
:param pred: predicate
:param json_obj: object containing json_key
:param json_key: name of the value in the JSON resource
:param valuetype: value type if NOT determinable by predicate
:return: value node if target is a BNode else None |
def run_dssp(pdb, path=True, outfile=None):
"""Uses DSSP to find helices and extracts helices from a pdb file or string.
Parameters
----------
pdb : str
Path to pdb file or string.
path : bool, optional
Indicates if pdb is a path or a string.
outfile : str, optional
Filepath for storing the dssp output.
Returns
-------
dssp_out : str
Std out from DSSP.
"""
if not path:
if type(pdb) == str:
pdb = pdb.encode()
try:
temp_pdb = tempfile.NamedTemporaryFile(delete=False)
temp_pdb.write(pdb)
temp_pdb.seek(0)
dssp_out = subprocess.check_output(
[global_settings['dssp']['path'], temp_pdb.name])
temp_pdb.close()
finally:
os.remove(temp_pdb.name)
else:
dssp_out = subprocess.check_output(
[global_settings['dssp']['path'], pdb])
# Python 3 string formatting.
dssp_out = dssp_out.decode()
if outfile:
with open(outfile, 'w') as outf:
outf.write(dssp_out)
return dssp_out | Uses DSSP to find helices and extracts helices from a pdb file or string.
Parameters
----------
pdb : str
Path to pdb file or string.
path : bool, optional
Indicates if pdb is a path or a string.
outfile : str, optional
Filepath for storing the dssp output.
Returns
-------
dssp_out : str
Std out from DSSP. |
def resolve(self, obj):
"""
Resolve a reference to an entry point or a variable in a module.
If ``obj`` is a ``module:varname`` reference to an object, :func:`resolve_reference` is
used to resolve it. If it is a string of any other kind, the named entry point is loaded
from this container's namespace. Otherwise, ``obj`` is returned as is.
:param obj: an entry point identifier, an object reference or an arbitrary object
:return: the loaded entry point, resolved object or the unchanged input value
:raises LookupError: if ``obj`` was a string but the named entry point was not found
"""
if not isinstance(obj, str):
return obj
if ':' in obj:
return resolve_reference(obj)
value = self._entrypoints.get(obj)
if value is None:
raise LookupError('no such entry point in {}: {}'.format(self.namespace, obj))
if isinstance(value, EntryPoint):
value = self._entrypoints[obj] = value.load()
return value | Resolve a reference to an entry point or a variable in a module.
If ``obj`` is a ``module:varname`` reference to an object, :func:`resolve_reference` is
used to resolve it. If it is a string of any other kind, the named entry point is loaded
from this container's namespace. Otherwise, ``obj`` is returned as is.
:param obj: an entry point identifier, an object reference or an arbitrary object
:return: the loaded entry point, resolved object or the unchanged input value
:raises LookupError: if ``obj`` was a string but the named entry point was not found |
def get_amount_of_tweets(self):
""" Returns current amount of tweets available within this instance
:returns: The amount of tweets currently available
:raises: TwitterSearchException
"""
if not self.__response:
raise TwitterSearchException(1013)
return (len(self.__response['content']['statuses'])
if self.__order_is_search
else len(self.__response['content'])) | Returns current amount of tweets available within this instance
:returns: The amount of tweets currently available
:raises: TwitterSearchException |
def format(self, formatter, subset=None):
"""
Format the text display value of cells.
.. versionadded:: 0.18.0
Parameters
----------
formatter : str, callable, or dict
subset : IndexSlice
An argument to ``DataFrame.loc`` that restricts which elements
``formatter`` is applied to.
Returns
-------
self : Styler
Notes
-----
``formatter`` is either an ``a`` or a dict ``{column name: a}`` where
``a`` is one of
- str: this will be wrapped in: ``a.format(x)``
- callable: called with the value of an individual cell
The default display value for numeric values is the "general" (``g``)
format with ``pd.options.display.precision`` precision.
Examples
--------
>>> df = pd.DataFrame(np.random.randn(4, 2), columns=['a', 'b'])
>>> df.style.format("{:.2%}")
>>> df['c'] = ['a', 'b', 'c', 'd']
>>> df.style.format({'c': str.upper})
"""
if subset is None:
row_locs = range(len(self.data))
col_locs = range(len(self.data.columns))
else:
subset = _non_reducing_slice(subset)
if len(subset) == 1:
subset = subset, self.data.columns
sub_df = self.data.loc[subset]
row_locs = self.data.index.get_indexer_for(sub_df.index)
col_locs = self.data.columns.get_indexer_for(sub_df.columns)
if is_dict_like(formatter):
for col, col_formatter in formatter.items():
# formatter must be callable, so '{}' are converted to lambdas
col_formatter = _maybe_wrap_formatter(col_formatter)
col_num = self.data.columns.get_indexer_for([col])[0]
for row_num in row_locs:
self._display_funcs[(row_num, col_num)] = col_formatter
else:
# single scalar to format all cells with
locs = product(*(row_locs, col_locs))
for i, j in locs:
formatter = _maybe_wrap_formatter(formatter)
self._display_funcs[(i, j)] = formatter
return self | Format the text display value of cells.
.. versionadded:: 0.18.0
Parameters
----------
formatter : str, callable, or dict
subset : IndexSlice
An argument to ``DataFrame.loc`` that restricts which elements
``formatter`` is applied to.
Returns
-------
self : Styler
Notes
-----
``formatter`` is either an ``a`` or a dict ``{column name: a}`` where
``a`` is one of
- str: this will be wrapped in: ``a.format(x)``
- callable: called with the value of an individual cell
The default display value for numeric values is the "general" (``g``)
format with ``pd.options.display.precision`` precision.
Examples
--------
>>> df = pd.DataFrame(np.random.randn(4, 2), columns=['a', 'b'])
>>> df.style.format("{:.2%}")
>>> df['c'] = ['a', 'b', 'c', 'd']
>>> df.style.format({'c': str.upper}) |
def archive(self):
"""
Archives (soft delete) all the records matching the query.
This assumes that the model allows archiving (not many do - especially
transactional documents).
Internal implementation sets the active field to False.
"""
ids = self.rpc_model.search(self.domain, context=self.context)
if ids:
self.rpc_model.write(ids, {'active': False}) | Archives (soft delete) all the records matching the query.
This assumes that the model allows archiving (not many do - especially
transactional documents).
Internal implementation sets the active field to False. |
def get_window(window, Nx, fftbins=True):
'''Compute a window function.
This is a wrapper for `scipy.signal.get_window` that additionally
supports callable or pre-computed windows.
Parameters
----------
window : string, tuple, number, callable, or list-like
The window specification:
- If string, it's the name of the window function (e.g., `'hann'`)
- If tuple, it's the name of the window function and any parameters
(e.g., `('kaiser', 4.0)`)
- If numeric, it is treated as the beta parameter of the `'kaiser'`
window, as in `scipy.signal.get_window`.
- If callable, it's a function that accepts one integer argument
(the window length)
- If list-like, it's a pre-computed window of the correct length `Nx`
Nx : int > 0
The length of the window
fftbins : bool, optional
If True (default), create a periodic window for use with FFT
If False, create a symmetric window for filter design applications.
Returns
-------
get_window : np.ndarray
A window of length `Nx` and type `window`
See Also
--------
scipy.signal.get_window
Notes
-----
This function caches at level 10.
Raises
------
ParameterError
If `window` is supplied as a vector of length != `n_fft`,
or is otherwise mis-specified.
'''
if six.callable(window):
return window(Nx)
elif (isinstance(window, (six.string_types, tuple)) or
np.isscalar(window)):
# TODO: if we add custom window functions in librosa, call them here
return scipy.signal.get_window(window, Nx, fftbins=fftbins)
elif isinstance(window, (np.ndarray, list)):
if len(window) == Nx:
return np.asarray(window)
raise ParameterError('Window size mismatch: '
'{:d} != {:d}'.format(len(window), Nx))
else:
raise ParameterError('Invalid window specification: {}'.format(window)) | Compute a window function.
This is a wrapper for `scipy.signal.get_window` that additionally
supports callable or pre-computed windows.
Parameters
----------
window : string, tuple, number, callable, or list-like
The window specification:
- If string, it's the name of the window function (e.g., `'hann'`)
- If tuple, it's the name of the window function and any parameters
(e.g., `('kaiser', 4.0)`)
- If numeric, it is treated as the beta parameter of the `'kaiser'`
window, as in `scipy.signal.get_window`.
- If callable, it's a function that accepts one integer argument
(the window length)
- If list-like, it's a pre-computed window of the correct length `Nx`
Nx : int > 0
The length of the window
fftbins : bool, optional
If True (default), create a periodic window for use with FFT
If False, create a symmetric window for filter design applications.
Returns
-------
get_window : np.ndarray
A window of length `Nx` and type `window`
See Also
--------
scipy.signal.get_window
Notes
-----
This function caches at level 10.
Raises
------
ParameterError
If `window` is supplied as a vector of length != `n_fft`,
or is otherwise mis-specified. |
def _read_opt_ilnp(self, code, *, desc):
"""Read HOPOPT ILNP Nonce option.
Structure of HOPOPT ILNP Nonce option [RFC 6744]:
0 1 2 3
0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| Next Header | Hdr Ext Len | Option Type | Option Length |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
/ Nonce Value /
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
Octets Bits Name Description
0 0 hopopt.ilnp.type Option Type
0 0 hopopt.ilnp.type.value Option Number
0 0 hopopt.ilnp.type.action Action (10)
0 2 hopopt.ilnp.type.change Change Flag (0)
1 8 hopopt.ilnp.length Length of Option Data
2 16 hopopt.ilnp.value Nonce Value
"""
_type = self._read_opt_type(code)
_size = self._read_unpack(1)
_nval = self._read_fileng(_size)
opt = dict(
desc=desc,
type=_type,
length=_size + 2,
value=_nval,
)
return opt | Read HOPOPT ILNP Nonce option.
Structure of HOPOPT ILNP Nonce option [RFC 6744]:
0 1 2 3
0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| Next Header | Hdr Ext Len | Option Type | Option Length |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
/ Nonce Value /
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
Octets Bits Name Description
0 0 hopopt.ilnp.type Option Type
0 0 hopopt.ilnp.type.value Option Number
0 0 hopopt.ilnp.type.action Action (10)
0 2 hopopt.ilnp.type.change Change Flag (0)
1 8 hopopt.ilnp.length Length of Option Data
2 16 hopopt.ilnp.value Nonce Value |
def _populate_user(self):
"""
Populates our User object with information from the LDAP directory.
"""
self._populate_user_from_attributes()
self._populate_user_from_group_memberships()
self._populate_user_from_dn_regex()
self._populate_user_from_dn_regex_negation() | Populates our User object with information from the LDAP directory. |
def etree_write(tree, stream):
"""
Write XML ElementTree 'root' content into 'stream'.
:param tree: XML ElementTree object
:param stream: File or file-like object can write to
"""
try:
tree.write(stream, encoding="utf-8", xml_declaration=True)
except TypeError:
tree.write(stream, encoding="unicode", xml_declaration=True) | Write XML ElementTree 'root' content into 'stream'.
:param tree: XML ElementTree object
:param stream: File or file-like object can write to |
def artist_update(self, artist_id, name=None, urls=None, alias=None,
group=None):
"""Function to update artists (Requires Login) (UNTESTED).
Only the artist_id parameter is required. The other parameters are
optional.
Parameters:
artist_id (int): The id of thr artist to update (Type: INT).
name (str): The artist's name.
urls (str): A list of URLs associated with the artist, whitespace
delimited.
alias (str): The artist that this artist is an alias for. Simply
enter the alias artist's name.
group (str): The group or cicle that this artist is a member of.
Simply enter the group's name.
"""
params = {
'id': artist_id,
'artist[name]': name,
'artist[urls]': urls,
'artist[alias]': alias,
'artist[group]': group
}
return self._get('artist/update', params, method='PUT') | Function to update artists (Requires Login) (UNTESTED).
Only the artist_id parameter is required. The other parameters are
optional.
Parameters:
artist_id (int): The id of thr artist to update (Type: INT).
name (str): The artist's name.
urls (str): A list of URLs associated with the artist, whitespace
delimited.
alias (str): The artist that this artist is an alias for. Simply
enter the alias artist's name.
group (str): The group or cicle that this artist is a member of.
Simply enter the group's name. |
def get_connection_info(connection_file=None, unpack=False, profile=None):
"""Return the connection information for the current Kernel.
Parameters
----------
connection_file : str [optional]
The connection file to be used. Can be given by absolute path, or
IPython will search in the security directory of a given profile.
If run from IPython,
If unspecified, the connection file for the currently running
IPython Kernel will be used, which is only allowed from inside a kernel.
unpack : bool [default: False]
if True, return the unpacked dict, otherwise just the string contents
of the file.
profile : str [optional]
The name of the profile to use when searching for the connection file,
if different from the current IPython session or 'default'.
Returns
-------
The connection dictionary of the current kernel, as string or dict,
depending on `unpack`.
"""
if connection_file is None:
# get connection file from current kernel
cf = get_connection_file()
else:
# connection file specified, allow shortnames:
cf = find_connection_file(connection_file, profile=profile)
with open(cf) as f:
info = f.read()
if unpack:
info = json.loads(info)
# ensure key is bytes:
info['key'] = str_to_bytes(info.get('key', ''))
return info | Return the connection information for the current Kernel.
Parameters
----------
connection_file : str [optional]
The connection file to be used. Can be given by absolute path, or
IPython will search in the security directory of a given profile.
If run from IPython,
If unspecified, the connection file for the currently running
IPython Kernel will be used, which is only allowed from inside a kernel.
unpack : bool [default: False]
if True, return the unpacked dict, otherwise just the string contents
of the file.
profile : str [optional]
The name of the profile to use when searching for the connection file,
if different from the current IPython session or 'default'.
Returns
-------
The connection dictionary of the current kernel, as string or dict,
depending on `unpack`. |
def process(self, now):
"""Perform connection state processing."""
if self._pn_connection is None:
LOG.error("Connection.process() called on destroyed connection!")
return 0
# do nothing until the connection has been opened
if self._pn_connection.state & proton.Endpoint.LOCAL_UNINIT:
return 0
if self._pn_sasl and not self._sasl_done:
# wait until SASL has authenticated
if (_PROTON_VERSION < (0, 10)):
if self._pn_sasl.state not in (proton.SASL.STATE_PASS,
proton.SASL.STATE_FAIL):
LOG.debug("SASL in progress. State=%s",
str(self._pn_sasl.state))
if self._handler:
with self._callback_lock:
self._handler.sasl_step(self, self._pn_sasl)
return self._next_deadline
self._sasl_done = True
if self._handler:
with self._callback_lock:
self._handler.sasl_done(self, self._pn_sasl,
self._pn_sasl.outcome)
else:
if self._pn_sasl.outcome is not None:
self._sasl_done = True
if self._handler:
with self._callback_lock:
self._handler.sasl_done(self, self._pn_sasl,
self._pn_sasl.outcome)
# process timer events:
timer_deadline = self._expire_timers(now)
transport_deadline = self._pn_transport.tick(now)
if timer_deadline and transport_deadline:
self._next_deadline = min(timer_deadline, transport_deadline)
else:
self._next_deadline = timer_deadline or transport_deadline
# process events from proton:
pn_event = self._pn_collector.peek()
while pn_event:
# LOG.debug("pn_event: %s received", pn_event.type)
if _Link._handle_proton_event(pn_event, self):
pass
elif self._handle_proton_event(pn_event):
pass
elif _SessionProxy._handle_proton_event(pn_event, self):
pass
self._pn_collector.pop()
pn_event = self._pn_collector.peek()
# check for connection failure after processing all pending
# engine events:
if self._error:
if self._handler:
# nag application until connection is destroyed
self._next_deadline = now
with self._callback_lock:
self._handler.connection_failed(self, self._error)
elif (self._endpoint_state == self._CLOSED and
self._read_done and self._write_done):
# invoke closed callback after endpoint has fully closed and
# all pending I/O has completed:
if self._handler:
with self._callback_lock:
self._handler.connection_closed(self)
return self._next_deadline | Perform connection state processing. |
def lookupAll(data, configFields, lookupType, db, histObj={}):
"""
Return a record after having cleaning rules of specified type applied to all fields in the config
:param dict data: single record (dictionary) to which cleaning rules should be applied
:param dict configFields: "fields" object from DWM config (see DataDictionary)
:param string lookupType: Type of lookup to perform/MongoDB collection name. One of 'genericLookup', 'fieldSpecificLookup', 'normLookup', 'genericRegex', 'fieldSpecificRegex', 'normRegex', 'normIncludes'
:param MongoClient db: MongoClient instance connected to MongoDB
:param dict histObj: History object to which changes should be appended
"""
for field in data.keys():
if field in configFields.keys() and data[field]!='':
if lookupType in configFields[field]["lookup"]:
if lookupType in ['genericLookup', 'fieldSpecificLookup', 'normLookup']:
fieldValNew, histObj = DataLookup(fieldVal=data[field], db=db, lookupType=lookupType, fieldName=field, histObj=histObj)
elif lookupType in ['genericRegex', 'fieldSpecificRegex', 'normRegex']:
fieldValNew, histObj = RegexLookup(fieldVal=data[field], db=db, fieldName=field, lookupType=lookupType, histObj=histObj)
elif lookupType=='normIncludes':
fieldValNew, histObj, checkMatch = IncludesLookup(fieldVal=data[field], lookupType='normIncludes', db=db, fieldName=field, histObj=histObj)
data[field] = fieldValNew
return data, histObj | Return a record after having cleaning rules of specified type applied to all fields in the config
:param dict data: single record (dictionary) to which cleaning rules should be applied
:param dict configFields: "fields" object from DWM config (see DataDictionary)
:param string lookupType: Type of lookup to perform/MongoDB collection name. One of 'genericLookup', 'fieldSpecificLookup', 'normLookup', 'genericRegex', 'fieldSpecificRegex', 'normRegex', 'normIncludes'
:param MongoClient db: MongoClient instance connected to MongoDB
:param dict histObj: History object to which changes should be appended |
def hdb_disk_interface(self, hdb_disk_interface):
"""
Sets the hda disk interface for this QEMU VM.
:param hdb_disk_interface: QEMU hdb disk interface
"""
self._hdb_disk_interface = hdb_disk_interface
log.info('QEMU VM "{name}" [{id}] has set the QEMU hdb disk interface to {interface}'.format(name=self._name,
id=self._id,
interface=self._hdb_disk_interface)) | Sets the hda disk interface for this QEMU VM.
:param hdb_disk_interface: QEMU hdb disk interface |
def choices(self):
""" Retrieve choices from API if possible"""
if not self._choices:
gandi = self.gandi or GandiContextHelper()
self._choices = self._get_choices(gandi)
if not self._choices:
api = gandi.get_api_connector()
gandi.echo('Please check that you are connecting to the good '
"api '%s' and that it's running." % (api.host))
sys.exit(1)
return self._choices | Retrieve choices from API if possible |
def read(self, file_p):
"""Reads an OVF file into the appliance object.
This method succeeds if the OVF is syntactically valid and, by itself, without errors. The
mere fact that this method returns successfully does not mean that VirtualBox supports all
features requested by the appliance; this can only be examined after a call to :py:func:`interpret` .
in file_p of type str
Name of appliance file to open (either with an .ovf or .ova extension, depending
on whether the appliance is distributed as a set of files or as a single file, respectively).
return progress of type :class:`IProgress`
Progress object to track the operation completion.
"""
if not isinstance(file_p, basestring):
raise TypeError("file_p can only be an instance of type basestring")
progress = self._call("read",
in_p=[file_p])
progress = IProgress(progress)
return progress | Reads an OVF file into the appliance object.
This method succeeds if the OVF is syntactically valid and, by itself, without errors. The
mere fact that this method returns successfully does not mean that VirtualBox supports all
features requested by the appliance; this can only be examined after a call to :py:func:`interpret` .
in file_p of type str
Name of appliance file to open (either with an .ovf or .ova extension, depending
on whether the appliance is distributed as a set of files or as a single file, respectively).
return progress of type :class:`IProgress`
Progress object to track the operation completion. |
def get_Tsys(calON_obs,calOFF_obs,calflux,calfreq,spec_in,oneflux=False,**kwargs):
'''
Returns frequency dependent system temperature given observations on and off a calibrator source
Parameters
----------
(See diode_spec())
'''
return diode_spec(calON_obs,calOFF_obs,calflux,calfreq,spec_in,average=False,oneflux=False,**kwargs)[1] | Returns frequency dependent system temperature given observations on and off a calibrator source
Parameters
----------
(See diode_spec()) |
def get_celery_app(
name=os.getenv(
"CELERY_NAME",
"worker"),
auth_url=os.getenv(
"BROKER_URL",
"redis://localhost:6379/9"),
backend_url=os.getenv(
"BACKEND_URL",
"redis://localhost:6379/10"),
include_tasks=[],
ssl_options=None,
transport_options=None,
path_to_config_module=os.getenv(
"CONFIG_MODULE_PATH",
"celery_loaders.work_tasks.celery_config"),
worker_log_format=os.getenv(
"WORKER_LOG_FORMAT",
"%(asctime)s: %(levelname)s %(message)s"),
**kwargs):
"""get_celery_app
:param name: name for this app
:param auth_url: celery broker
:param backend_url: celery backend
:param include_tasks: list of modules containing tasks to add
:param ssl_options: security options dictionary
:param trasport_options: transport options dictionary
:param path_to_config_module: config module
:param worker_log_format: format for logs
"""
if len(include_tasks) == 0:
log.error(("creating celery app={} MISSING tasks={}")
.format(
name,
include_tasks))
else:
log.info(("creating celery app={} tasks={}")
.format(
name,
include_tasks))
# get the Celery application
app = celery.Celery(
name,
broker_url=auth_url,
result_backend=backend_url,
include=include_tasks)
app.config_from_object(
path_to_config_module,
namespace="CELERY")
app.conf.update(kwargs)
if transport_options:
log.info(("loading transport_options={}")
.format(transport_options))
app.conf.update(**transport_options)
# custom tranport options
if ssl_options:
log.info(("loading ssl_options={}")
.format(ssl_options))
app.conf.update(**ssl_options)
# custom ssl options
if len(include_tasks) > 0:
app.autodiscover_tasks(include_tasks)
return app | get_celery_app
:param name: name for this app
:param auth_url: celery broker
:param backend_url: celery backend
:param include_tasks: list of modules containing tasks to add
:param ssl_options: security options dictionary
:param trasport_options: transport options dictionary
:param path_to_config_module: config module
:param worker_log_format: format for logs |
def get_option_lists(self):
"""
A hook to override the option lists used to generate option names
and defaults.
"""
return [self.get_option_list()] + \
[option_list
for name, description, option_list
in self.get_option_groups()] | A hook to override the option lists used to generate option names
and defaults. |
def _get_imports_h(self, data_types):
"""Emits all necessary header file imports for the given Stone data type."""
if not isinstance(data_types, list):
data_types = [data_types]
import_classes = []
for data_type in data_types:
if is_user_defined_type(data_type):
import_classes.append(fmt_class_prefix(data_type))
for field in data_type.all_fields:
data_type, _ = unwrap_nullable(field.data_type)
# unpack list or map
while is_list_type(data_type) or is_map_type(data_type):
data_type = (data_type.value_data_type if
is_map_type(data_type) else data_type.data_type)
if is_user_defined_type(data_type):
import_classes.append(fmt_class_prefix(data_type))
import_classes = list(set(import_classes))
import_classes.sort()
return import_classes | Emits all necessary header file imports for the given Stone data type. |
def p_term_var(self, p):
''' term : VAR
'''
_LOGGER.debug("term -> VAR")
# TODO: determine the type of the var
if p[1] not in self._VAR_VALUES:
if self._autodefine_vars:
self._VAR_VALUES[p[1]] = TypedClass(None, TypedClass.UNKNOWN)
if p[1] in self._VAR_VALUES:
_LOGGER.debug("term -> VAR")
p[0] = self._VAR_VALUES[p[1]]
else:
raise UndefinedVar() | term : VAR |
def split(pattern, string, maxsplit=0, flags=0):
"""Split the source string by the occurrences of the pattern,
returning a list containing the resulting substrings."""
return _compile(pattern, flags).split(string, maxsplit) | Split the source string by the occurrences of the pattern,
returning a list containing the resulting substrings. |
def align_texts(source_blocks, target_blocks, params = LanguageIndependent):
"""Creates the sentence alignment of two texts.
Texts can consist of several blocks. Block boundaries cannot be crossed by sentence
alignment links.
Each block consists of a list that contains the lengths (in characters) of the sentences
in this block.
@param source_blocks: The list of blocks in the source text.
@param target_blocks: The list of blocks in the target text.
@param params: the sentence alignment parameters.
@returns: A list of sentence alignment lists
"""
if len(source_blocks) != len(target_blocks):
raise ValueError("Source and target texts do not have the same number of blocks.")
return [align_blocks(source_block, target_block, params)
for source_block, target_block in zip(source_blocks, target_blocks)] | Creates the sentence alignment of two texts.
Texts can consist of several blocks. Block boundaries cannot be crossed by sentence
alignment links.
Each block consists of a list that contains the lengths (in characters) of the sentences
in this block.
@param source_blocks: The list of blocks in the source text.
@param target_blocks: The list of blocks in the target text.
@param params: the sentence alignment parameters.
@returns: A list of sentence alignment lists |
def make_graph_pygraphviz(self, recs, nodecolor,
edgecolor, dpi,
draw_parents=True, draw_children=True):
"""Draw AMIGO style network, lineage containing one query record."""
import pygraphviz as pgv
grph = pgv.AGraph(name="GO tree")
edgeset = set()
for rec in recs:
if draw_parents:
edgeset.update(rec.get_all_parent_edges())
if draw_children:
edgeset.update(rec.get_all_child_edges())
edgeset = [(self.label_wrap(a), self.label_wrap(b))
for (a, b) in edgeset]
# add nodes explicitly via add_node
# adding nodes implicitly via add_edge misses nodes
# without at least one edge
for rec in recs:
grph.add_node(self.label_wrap(rec.item_id))
for src, target in edgeset:
# default layout in graphviz is top->bottom, so we invert
# the direction and plot using dir="back"
grph.add_edge(target, src)
grph.graph_attr.update(dpi="%d" % dpi)
grph.node_attr.update(shape="box", style="rounded,filled",
fillcolor="beige", color=nodecolor)
grph.edge_attr.update(shape="normal", color=edgecolor,
dir="back", label="is_a")
# highlight the query terms
for rec in recs:
try:
node = grph.get_node(self.label_wrap(rec.item_id))
node.attr.update(fillcolor="plum")
except:
continue
return grph | Draw AMIGO style network, lineage containing one query record. |
def mean_min_time_distance(item_a, item_b, max_value):
"""
Calculate the mean time difference among the time steps in each object.
Args:
item_a: STObject from the first set in TrackMatcher
item_b: STObject from the second set in TrackMatcher
max_value: Maximum distance value used as scaling value and upper constraint.
Returns:
Distance value between 0 and 1.
"""
times_a = item_a.times.reshape((item_a.times.size, 1))
times_b = item_b.times.reshape((1, item_b.times.size))
distance_matrix = (times_a - times_b) ** 2
mean_min_distances = np.sqrt(distance_matrix.min(axis=0).mean() + distance_matrix.min(axis=1).mean())
return np.minimum(mean_min_distances, max_value) / float(max_value) | Calculate the mean time difference among the time steps in each object.
Args:
item_a: STObject from the first set in TrackMatcher
item_b: STObject from the second set in TrackMatcher
max_value: Maximum distance value used as scaling value and upper constraint.
Returns:
Distance value between 0 and 1. |
def do_dir(self, args, unknown):
"""List contents of current directory."""
# No arguments for this command
if unknown:
self.perror("dir does not take any positional arguments:", traceback_war=False)
self.do_help('dir')
self._last_result = cmd2.CommandResult('', 'Bad arguments')
return
# Get the contents as a list
contents = os.listdir(self.cwd)
fmt = '{} '
if args.long:
fmt = '{}\n'
for f in contents:
self.stdout.write(fmt.format(f))
self.stdout.write('\n')
self._last_result = cmd2.CommandResult(data=contents) | List contents of current directory. |
def resolve_dotted_attribute(obj, attr, allow_dotted_names=True):
"""resolve_dotted_attribute(a, 'b.c.d') => a.b.c.d
Resolves a dotted attribute name to an object. Raises
an AttributeError if any attribute in the chain starts with a '_'.
If the optional allow_dotted_names argument is false, dots are not
supported and this function operates similar to getattr(obj, attr).
"""
if allow_dotted_names:
attrs = attr.split('.')
else:
attrs = [attr]
for i in attrs:
if i.startswith('_'):
raise AttributeError(
'attempt to access private attribute "%s"' % i
)
else:
obj = getattr(obj,i)
return obj | resolve_dotted_attribute(a, 'b.c.d') => a.b.c.d
Resolves a dotted attribute name to an object. Raises
an AttributeError if any attribute in the chain starts with a '_'.
If the optional allow_dotted_names argument is false, dots are not
supported and this function operates similar to getattr(obj, attr). |
def get_rendered_objects(self):
"""Render objects"""
objects = self.objects
if isinstance(objects, str):
objects = getattr(self.object, objects).all()
return [
self.get_rendered_object(obj)
for obj in objects
] | Render objects |
def set(self, document_data, merge=False):
"""Replace the current document in the Firestore database.
A write ``option`` can be specified to indicate preconditions of
the "set" operation. If no ``option`` is specified and this document
doesn't exist yet, this method will create it.
Overwrites all content for the document with the fields in
``document_data``. This method performs almost the same functionality
as :meth:`create`. The only difference is that this method doesn't
make any requirements on the existence of the document (unless
``option`` is used), whereas as :meth:`create` will fail if the
document already exists.
Args:
document_data (dict): Property names and values to use for
replacing a document.
merge (Optional[bool] or Optional[List<apispec>]):
If True, apply merging instead of overwriting the state
of the document.
Returns:
google.cloud.firestore_v1beta1.types.WriteResult: The
write result corresponding to the committed document. A write
result contains an ``update_time`` field.
"""
batch = self._client.batch()
batch.set(self, document_data, merge=merge)
write_results = batch.commit()
return _first_write_result(write_results) | Replace the current document in the Firestore database.
A write ``option`` can be specified to indicate preconditions of
the "set" operation. If no ``option`` is specified and this document
doesn't exist yet, this method will create it.
Overwrites all content for the document with the fields in
``document_data``. This method performs almost the same functionality
as :meth:`create`. The only difference is that this method doesn't
make any requirements on the existence of the document (unless
``option`` is used), whereas as :meth:`create` will fail if the
document already exists.
Args:
document_data (dict): Property names and values to use for
replacing a document.
merge (Optional[bool] or Optional[List<apispec>]):
If True, apply merging instead of overwriting the state
of the document.
Returns:
google.cloud.firestore_v1beta1.types.WriteResult: The
write result corresponding to the committed document. A write
result contains an ``update_time`` field. |
def _dict_from_terse_tabular(
names: List[str],
inp: str,
transformers: Dict[str, Callable[[str], Any]] = {})\
-> List[Dict[str, Any]]:
""" Parse NMCLI terse tabular output into a list of Python dict.
``names`` is a list of strings of field names to apply to the input data,
which is assumed to be colon separated.
``inp`` is the input as a string (i.e. already decode()d) from nmcli
``transformers`` is a dict mapping field names to callables of the form
f: str -> any. If a fieldname is in transformers, that callable will be
invoked on the field matching the name and the result stored.
The return value is a list with one element per valid line of input, where
each element is a dict with keys taken from names and values from the input
"""
res = []
for n in names:
if n not in transformers:
transformers[n] = lambda s: s
for line in inp.split('\n'):
if len(line) < 3:
continue
fields = line.split(':')
res.append(dict([
(elem[0], transformers[elem[0]](elem[1]))
for elem in zip(names, fields)]))
return res | Parse NMCLI terse tabular output into a list of Python dict.
``names`` is a list of strings of field names to apply to the input data,
which is assumed to be colon separated.
``inp`` is the input as a string (i.e. already decode()d) from nmcli
``transformers`` is a dict mapping field names to callables of the form
f: str -> any. If a fieldname is in transformers, that callable will be
invoked on the field matching the name and the result stored.
The return value is a list with one element per valid line of input, where
each element is a dict with keys taken from names and values from the input |
def resnet_v2(inputs,
block_fn,
layer_blocks,
filters,
data_format="channels_first",
is_training=False,
is_cifar=False,
use_td=False,
targeting_rate=None,
keep_prob=None):
"""Resnet model.
Args:
inputs: `Tensor` images.
block_fn: `function` for the block to use within the model. Either
`residual_block` or `bottleneck_block`.
layer_blocks: list of 3 or 4 `int`s denoting the number of blocks to include
in each of the 3 or 4 block groups. Each group consists of blocks that
take inputs of the same resolution.
filters: list of 4 or 5 `int`s denoting the number of filter to include in
block.
data_format: `str`, "channels_first" `[batch, channels, height,
width]` or "channels_last" `[batch, height, width, channels]`.
is_training: bool, build in training mode or not.
is_cifar: bool, whether the data is CIFAR or not.
use_td: `str` one of "weight" or "unit". Set to False or "" to disable
targeted dropout.
targeting_rate: `float` proportion of weights to target with targeted
dropout.
keep_prob: `float` keep probability for targeted dropout.
Returns:
Pre-logit activations.
"""
inputs = block_layer(
inputs=inputs,
filters=filters[1],
block_fn=block_fn,
blocks=layer_blocks[0],
strides=1,
is_training=is_training,
name="block_layer1",
data_format=data_format,
use_td=use_td,
targeting_rate=targeting_rate,
keep_prob=keep_prob)
inputs = block_layer(
inputs=inputs,
filters=filters[2],
block_fn=block_fn,
blocks=layer_blocks[1],
strides=2,
is_training=is_training,
name="block_layer2",
data_format=data_format,
use_td=use_td,
targeting_rate=targeting_rate,
keep_prob=keep_prob)
inputs = block_layer(
inputs=inputs,
filters=filters[3],
block_fn=block_fn,
blocks=layer_blocks[2],
strides=2,
is_training=is_training,
name="block_layer3",
data_format=data_format,
use_td=use_td,
targeting_rate=targeting_rate,
keep_prob=keep_prob)
if not is_cifar:
inputs = block_layer(
inputs=inputs,
filters=filters[4],
block_fn=block_fn,
blocks=layer_blocks[3],
strides=2,
is_training=is_training,
name="block_layer4",
data_format=data_format,
use_td=use_td,
targeting_rate=targeting_rate,
keep_prob=keep_prob)
return inputs | Resnet model.
Args:
inputs: `Tensor` images.
block_fn: `function` for the block to use within the model. Either
`residual_block` or `bottleneck_block`.
layer_blocks: list of 3 or 4 `int`s denoting the number of blocks to include
in each of the 3 or 4 block groups. Each group consists of blocks that
take inputs of the same resolution.
filters: list of 4 or 5 `int`s denoting the number of filter to include in
block.
data_format: `str`, "channels_first" `[batch, channels, height,
width]` or "channels_last" `[batch, height, width, channels]`.
is_training: bool, build in training mode or not.
is_cifar: bool, whether the data is CIFAR or not.
use_td: `str` one of "weight" or "unit". Set to False or "" to disable
targeted dropout.
targeting_rate: `float` proportion of weights to target with targeted
dropout.
keep_prob: `float` keep probability for targeted dropout.
Returns:
Pre-logit activations. |
def help_center_articles_search(self, category=None, label_names=None, locale=None, query=None, section=None, updated_after=None, updated_before=None, **kwargs):
"https://developer.zendesk.com/rest_api/docs/help_center/search#search-articles"
api_path = "/api/v2/help_center/articles/search.json"
api_query = {}
if "query" in kwargs.keys():
api_query.update(kwargs["query"])
del kwargs["query"]
if category:
api_query.update({
"category": category,
})
if label_names:
api_query.update({
"label_names": label_names,
})
if locale:
api_query.update({
"locale": locale,
})
if query:
api_query.update({
"query": query,
})
if section:
api_query.update({
"section": section,
})
if updated_after:
api_query.update({
"updated_after": updated_after,
})
if updated_before:
api_query.update({
"updated_before": updated_before,
})
return self.call(api_path, query=api_query, **kwargs) | https://developer.zendesk.com/rest_api/docs/help_center/search#search-articles |
def select_many_with_correspondence(
self,
collection_selector=identity,
result_selector=KeyedElement):
'''Projects each element of a sequence to an intermediate new sequence,
and flattens the resulting sequence, into one sequence and uses a
selector function to incorporate the corresponding source for each item
in the result sequence.
Note: This method uses deferred execution.
Args:
collection_selector: A unary function mapping each element of the
source iterable into an intermediate sequence. The single
argument of the collection_selector is the value of an element
from the source sequence. The return value should be an
iterable derived from that element value. The default
collection_selector, which is the identity function, assumes
that each element of the source sequence is itself iterable.
result_selector:
An optional binary function mapping the elements in the
flattened intermediate sequence together with their
corresponding source elements to elements of the result
sequence. The two positional arguments of the result_selector
are, first the source element corresponding to an element from
the intermediate sequence, and second the actual element from
the intermediate sequence. The return value should be the
corresponding value in the result sequence. If no
result_selector function is provided, the elements of the
result sequence are KeyedElement namedtuples.
Returns:
A Queryable over a generated sequence whose elements are the result
of applying the one-to-many collection_selector to each element of
the source sequence, concatenating the results into an intermediate
sequence, and then mapping each of those elements through the
result_selector which incorporates the corresponding source element
into the result sequence.
Raises:
ValueError: If this Queryable has been closed.
TypeError: If projector or selector are not callable.
'''
if self.closed():
raise ValueError("Attempt to call "
"select_many_with_correspondence() on a closed Queryable.")
if not is_callable(collection_selector):
raise TypeError("select_many_with_correspondence() parameter "
"projector={0} is not callable".format(repr(collection_selector)))
if not is_callable(result_selector):
raise TypeError("select_many_with_correspondence() parameter "
"selector={0} is not callable".format(repr(result_selector)))
return self._create(
self._generate_select_many_with_correspondence(collection_selector,
result_selector)) | Projects each element of a sequence to an intermediate new sequence,
and flattens the resulting sequence, into one sequence and uses a
selector function to incorporate the corresponding source for each item
in the result sequence.
Note: This method uses deferred execution.
Args:
collection_selector: A unary function mapping each element of the
source iterable into an intermediate sequence. The single
argument of the collection_selector is the value of an element
from the source sequence. The return value should be an
iterable derived from that element value. The default
collection_selector, which is the identity function, assumes
that each element of the source sequence is itself iterable.
result_selector:
An optional binary function mapping the elements in the
flattened intermediate sequence together with their
corresponding source elements to elements of the result
sequence. The two positional arguments of the result_selector
are, first the source element corresponding to an element from
the intermediate sequence, and second the actual element from
the intermediate sequence. The return value should be the
corresponding value in the result sequence. If no
result_selector function is provided, the elements of the
result sequence are KeyedElement namedtuples.
Returns:
A Queryable over a generated sequence whose elements are the result
of applying the one-to-many collection_selector to each element of
the source sequence, concatenating the results into an intermediate
sequence, and then mapping each of those elements through the
result_selector which incorporates the corresponding source element
into the result sequence.
Raises:
ValueError: If this Queryable has been closed.
TypeError: If projector or selector are not callable. |
def gfuds(udfuns, udqdec, relate, refval, adjust, step, nintvls, cnfine, result):
"""
Perform a GF search on a user defined scalar quantity.
https://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/gfuds_c.html
:param udfuns: Name of the routine that computes the scalar quantity of interest at some time.
:type udfuns: ctypes.CFunctionType
:param udqdec: Name of the routine that computes whether the scalar quantity is decreasing.
:type udqdec: ctypes.CFunctionType
:param relate: Operator that either looks for an extreme value (max, min, local, absolute) or compares the geometric quantity value and a number.
:type relate: str
:param refval: Value used as reference for scalar quantity condition.
:type refval: float
:param adjust: Allowed variation for absolute extremal geometric conditions.
:type adjust: float
:param step: Step size used for locating extrema and roots.
:type step: float
:param nintvls: Workspace window interval count.
:type nintvls: int
:param cnfine: SPICE window to which the search is restricted.
:type cnfine: spiceypy.utils.support_types.SpiceCell
:param result: SPICE window containing results.
:type result: spiceypy.utils.support_types.SpiceCell
:return: result
:rtype: spiceypy.utils.support_types.SpiceCell
"""
relate = stypes.stringToCharP(relate)
refval = ctypes.c_double(refval)
adjust = ctypes.c_double(adjust)
step = ctypes.c_double(step)
nintvls = ctypes.c_int(nintvls)
libspice.gfuds_c(udfuns, udqdec, relate, refval, adjust, step, nintvls, ctypes.byref(cnfine), ctypes.byref(result))
return result | Perform a GF search on a user defined scalar quantity.
https://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/gfuds_c.html
:param udfuns: Name of the routine that computes the scalar quantity of interest at some time.
:type udfuns: ctypes.CFunctionType
:param udqdec: Name of the routine that computes whether the scalar quantity is decreasing.
:type udqdec: ctypes.CFunctionType
:param relate: Operator that either looks for an extreme value (max, min, local, absolute) or compares the geometric quantity value and a number.
:type relate: str
:param refval: Value used as reference for scalar quantity condition.
:type refval: float
:param adjust: Allowed variation for absolute extremal geometric conditions.
:type adjust: float
:param step: Step size used for locating extrema and roots.
:type step: float
:param nintvls: Workspace window interval count.
:type nintvls: int
:param cnfine: SPICE window to which the search is restricted.
:type cnfine: spiceypy.utils.support_types.SpiceCell
:param result: SPICE window containing results.
:type result: spiceypy.utils.support_types.SpiceCell
:return: result
:rtype: spiceypy.utils.support_types.SpiceCell |
def all(self, axis=None, *args, **kwargs):
"""
Tests whether all elements evaluate True
Returns
-------
all : bool
See Also
--------
numpy.all
"""
nv.validate_all(args, kwargs)
values = self.sp_values
if len(values) != len(self) and not np.all(self.fill_value):
return False
return values.all() | Tests whether all elements evaluate True
Returns
-------
all : bool
See Also
--------
numpy.all |
def setValues(nxG, nyG, iBeg, iEnd, jBeg, jEnd, data):
"""
Set setValues
@param nxG number of global cells in x
@param nyG number of global cells in y
@param iBeg global starting index in x
@param iEnd global ending index in x
@param jBeg global starting index in y
@param jEnd global ending index in y
@param data local array
"""
nxGHalf = nxG/2.
nyGHalf = nyG/2.
nxGQuart = nxGHalf/2.
nyGQuart = nyGHalf/2.
for i in range(data.shape[0]):
iG = iBeg + i
di = iG - nxG
for j in range(data.shape[1]):
jG = jBeg + j
dj = jG - 0.8*nyG
data[i, j] = numpy.floor(1.9*numpy.exp(-di**2/nxGHalf**2 - dj**2/nyGHalf**2)) | Set setValues
@param nxG number of global cells in x
@param nyG number of global cells in y
@param iBeg global starting index in x
@param iEnd global ending index in x
@param jBeg global starting index in y
@param jEnd global ending index in y
@param data local array |
def find_children(self, linespec):
"""Find lines and immediate children that match the linespec regex.
:param linespec: regular expression of line to match
:returns: list of lines. These correspond to the lines that were
matched and their immediate children
"""
res = []
for parent in self.find_objects(linespec):
res.append(parent.line)
res.extend([child.line for child in parent.children])
return res | Find lines and immediate children that match the linespec regex.
:param linespec: regular expression of line to match
:returns: list of lines. These correspond to the lines that were
matched and their immediate children |
def GET_AUTH(self, courseid): # pylint: disable=arguments-differ
""" GET request """
course, __ = self.get_course_and_check_rights(courseid)
return self.page(course) | GET request |
def translate_bit_for_bit(data):
""" Translates data where data["Type"]=="Bit for Bit" """
headers = sorted(data.get("Headers", []))
table = '\\FloatBarrier \n \\section{$NAME} \n'.replace('$NAME', data.get("Title", "table"))
table += '\\begin{table}[!ht] \n \\begin{center}'
# Set the number of columns
n_cols = "c"*(len(headers)+1)
table += '\n \\begin{tabular}{$NCOLS} \n'.replace("$NCOLS", n_cols)
# Put in the headers
table += " Variable &"
for header in headers:
table += ' $HEADER &'.replace('$HEADER', header).replace('%', '\%')
table = table[:-1] + ' \\\\ \n \hline \n'
# Put in the data
for k, v in data.get("Data", []).items():
table += "\n \\textbf{$VAR} & ".replace("$VAR", k)
for header in headers:
table += ' $VAL &'.replace("$VAL", str(v[header]))
table = table[:-1] + ' \\\\'
table += '\n \hline \n \end{tabular} \n \end{center} \n \end{table}\n'
return table | Translates data where data["Type"]=="Bit for Bit" |
def ring(surf, xy, r, width, color):
"""Draws a ring"""
r2 = r - width
x0, y0 = xy
x = r2
y = 0
err = 0
# collect points of the inner circle
right = {}
while x >= y:
right[x] = y
right[y] = x
right[-x] = y
right[-y] = x
y += 1
if err <= 0:
err += 2 * y + 1
if err > 0:
x -= 1
err -= 2 * x + 1
def h_fill_the_circle(surf, color, x, y, right):
if -r2 <= y <= r2:
pygame.draw.line(surf, color, (x0 + right[y], y0 + y), (x0 + x, y0 + y))
pygame.draw.line(surf, color, (x0 - right[y], y0 + y), (x0 - x, y0 + y))
else:
pygame.draw.line(surf, color, (x0 - x, y0 + y), (x0 + x, y0 + y))
x = r
y = 0
err = 0
while x >= y:
h_fill_the_circle(surf, color, x, y, right)
h_fill_the_circle(surf, color, x, -y, right)
h_fill_the_circle(surf, color, y, x, right)
h_fill_the_circle(surf, color, y, -x, right)
y += 1
if err < 0:
err += 2 * y + 1
if err >= 0:
x -= 1
err -= 2 * x + 1
gfxdraw.aacircle(surf, x0, y0, r, color)
gfxdraw.aacircle(surf, x0, y0, r2, color) | Draws a ring |
def put_property(elt, key, value, ttl=None, ctx=None):
"""Put properties in elt.
:param elt: properties elt to put. Not None methods.
:param number ttl: If not None, property time to leave.
:param ctx: elt ctx from where put properties. Equals elt if None. It
allows to get function properties related to a class or instance if
related function is defined in base class.
:param dict properties: properties to put in elt. elt and ttl are exclude.
:return: Timer if ttl is not None.
:rtype: Timer
"""
return put_properties(elt=elt, properties={key: value}, ttl=ttl, ctx=ctx) | Put properties in elt.
:param elt: properties elt to put. Not None methods.
:param number ttl: If not None, property time to leave.
:param ctx: elt ctx from where put properties. Equals elt if None. It
allows to get function properties related to a class or instance if
related function is defined in base class.
:param dict properties: properties to put in elt. elt and ttl are exclude.
:return: Timer if ttl is not None.
:rtype: Timer |
def _request(self, lat_min, lon_min, lat_max, lon_max, start, end, picture_size=None, set_=None, map_filter=None):
"""
Internal method to send requests to the Panoramio data API.
:param lat_min:
Minimum latitude of the bounding box
:type lat_min: float
:param lon_min:
Minimum longitude of the bounding box
:type lon_min: float
:param lat_max:
Maximum latitude of the bounding box
:type lat_max: float
:param lon_max:
Maximum longitude of the bounding box
:type lon_max: float
:param start:
Start number of the number of photo's to retrieve, where 0 is the most popular picture
:type start: int
:param end:
Last number of the number of photo's to retrieve, where 0 is the most popular picture
:type end: int
:param picture_size:
This can be: original, medium (*default*), small, thumbnail, square, mini_square
:type picture_size: basestring
:param set_:
This can be: public, popular or user-id; where user-id is the specific id of a user (as integer)
:type set_: basestring/int
:param map_filter:
Whether to return photos that look better together; when True, tries to avoid returning photos of the same
location
:type map_filter: bool
:return: JSON response of the request formatted as a dictionary.
"""
if not isinstance(lat_min, float):
raise PynoramioException(
'{0}._request requires the lat_min parameter to be a float.'.format(self.__class__.__name__))
if not isinstance(lon_min, float):
raise PynoramioException(
'{0}._request requires the lon_min parameter to be a float.'.format(self.__class__.__name__))
if not isinstance(lat_max, float):
raise PynoramioException(
'{0}._request requires the lat_max parameter to be a float.'.format(self.__class__.__name__))
if not isinstance(lon_max, float):
raise PynoramioException(
'{0}._request requires the lon_max parameter to be a float.'.format(self.__class__.__name__))
if not isinstance(start, int):
raise PynoramioException(
'{0}._request requires the start parameter to be an int.'.format(self.__class__.__name__))
if not isinstance(end, int):
raise PynoramioException(
'{0}._request requires the end parameter to be an int.'.format(self.__class__.__name__))
url = self.base_url + '&minx={0}&miny={1}&maxx={2}&maxy={3}&from={4}&to={5}'.format(lon_min, lat_min,
lon_max, lat_max,
start, end)
if picture_size is not None and isinstance(picture_size, basestring) \
and picture_size in ['original', 'medium', 'small', 'thumbnail', 'square', 'mini_square']:
url += '&size={0}'.format(picture_size)
if set_ is not None and (isinstance(set_, basestring) and set_ in ['public', 'full']) \
or (isinstance(set_, int)):
url += '&set={0}'.format(set_)
else:
url += '&set=public'
if map_filter is not None and isinstance(map_filter, bool) and not map_filter:
url += '&map_filter=false'
r = requests.get(url)
try:
return r.json()
except ValueError:
# add your debugging lines here, for example, print(r.url)
raise PynoramioException(
'An invalid or malformed url was passed to {0}._request'.format(self.__class__.__name__)) | Internal method to send requests to the Panoramio data API.
:param lat_min:
Minimum latitude of the bounding box
:type lat_min: float
:param lon_min:
Minimum longitude of the bounding box
:type lon_min: float
:param lat_max:
Maximum latitude of the bounding box
:type lat_max: float
:param lon_max:
Maximum longitude of the bounding box
:type lon_max: float
:param start:
Start number of the number of photo's to retrieve, where 0 is the most popular picture
:type start: int
:param end:
Last number of the number of photo's to retrieve, where 0 is the most popular picture
:type end: int
:param picture_size:
This can be: original, medium (*default*), small, thumbnail, square, mini_square
:type picture_size: basestring
:param set_:
This can be: public, popular or user-id; where user-id is the specific id of a user (as integer)
:type set_: basestring/int
:param map_filter:
Whether to return photos that look better together; when True, tries to avoid returning photos of the same
location
:type map_filter: bool
:return: JSON response of the request formatted as a dictionary. |
def send_custom_host_notification(self, host, options, author, comment):
"""DOES NOTHING (Should send a custom notification)
Format of the line that triggers function call::
SEND_CUSTOM_HOST_NOTIFICATION;<host_name>;<options>;<author>;<comment>
:param host: host to send notif for
:type host: alignak.object.host.Host
:param options: notification options
:type options:
:param author: notification author
:type author: str
:param comment: notification text
:type comment: str
:return: None
"""
logger.warning("The external command 'SEND_CUSTOM_HOST_NOTIFICATION' "
"is not currently implemented in Alignak. If you really need it, "
"request for its implementation in the project repository: "
"https://github.com/Alignak-monitoring/alignak")
self.send_an_element(make_monitoring_log(
'warning', 'SEND_CUSTOM_HOST_NOTIFICATION: this command is not implemented!')) | DOES NOTHING (Should send a custom notification)
Format of the line that triggers function call::
SEND_CUSTOM_HOST_NOTIFICATION;<host_name>;<options>;<author>;<comment>
:param host: host to send notif for
:type host: alignak.object.host.Host
:param options: notification options
:type options:
:param author: notification author
:type author: str
:param comment: notification text
:type comment: str
:return: None |
def delta(self, signature):
"Generates delta for remote file via API using local file's signature."
return self.api.post('path/sync/delta', self.path, signature=signature) | Generates delta for remote file via API using local file's signature. |
def run_gatk(self, params, tmp_dir=None, log_error=True,
data=None, region=None, memscale=None, parallel_gc=False, ld_preload=False):
"""Top level interface to running a GATK command.
ld_preload injects required libraries for Java JNI calls:
https://gatkforums.broadinstitute.org/gatk/discussion/8810/something-about-create-pon-workflow
"""
needs_java7 = LooseVersion(self.get_gatk_version()) < LooseVersion("3.6")
# For old Java requirements use global java 7
if needs_java7:
setpath.remove_bcbiopath()
with tx_tmpdir(self._config) as local_tmp_dir:
if tmp_dir is None:
tmp_dir = local_tmp_dir
cl = self.cl_gatk(params, tmp_dir, memscale=memscale, parallel_gc=parallel_gc)
atype_index = params.index("-T") if params.count("-T") > 0 \
else params.index("--analysis_type")
prog = params[atype_index + 1]
cl = fix_missing_spark_user(cl, prog, params)
if ld_preload:
cl = "export LD_PRELOAD=%s/lib/libopenblas.so && %s" % (os.path.dirname(utils.get_bcbio_bin()), cl)
do.run(cl, "GATK: {0}".format(prog), data, region=region,
log_error=log_error)
if needs_java7:
setpath.prepend_bcbiopath() | Top level interface to running a GATK command.
ld_preload injects required libraries for Java JNI calls:
https://gatkforums.broadinstitute.org/gatk/discussion/8810/something-about-create-pon-workflow |
def reset(self, document, parent, level):
"""Reset the state of state machine.
After reset, self and self.state can be used to
passed to docutils.parsers.rst.Directive.run
Parameters
----------
document: docutils document
Current document of the node.
parent: parent node
Parent node that will be used to interpret role and directives.
level: int
Current section level.
"""
self.language = languages.get_language(
document.settings.language_code)
# setup memo
self.memo.document = document
self.memo.reporter = document.reporter
self.memo.language = self.language
self.memo.section_level = level
# setup inliner
if self.memo.inliner is None:
self.memo.inliner = Inliner()
self.memo.inliner.init_customizations(document.settings)
inliner = self.memo.inliner
inliner.reporter = document.reporter
inliner.document = document
inliner.language = self.language
inliner.parent = parent
# setup self
self.document = document
self.reporter = self.memo.reporter
self.node = parent
self.state.runtime_init()
self.input_lines = document['source'] | Reset the state of state machine.
After reset, self and self.state can be used to
passed to docutils.parsers.rst.Directive.run
Parameters
----------
document: docutils document
Current document of the node.
parent: parent node
Parent node that will be used to interpret role and directives.
level: int
Current section level. |
def _writeGpoScript(psscript=False):
'''
helper function to write local GPO startup/shutdown script
scripts are stored in scripts.ini and psscripts.ini files in
``WINDIR\\System32\\GroupPolicy\\Machine|User\\Scripts``
these files have the hidden attribute set
files have following format:
empty line
[Startup]
0CmdLine=<path to script 0>
0Parameters=<script 0 parameters>
[Shutdown]
0CmdLine=<path to shutdown script 0>
0Parameters=<shutdown script 0 parameters>
Number is incremented for each script added
psscript file also has the option of a [ScriptsConfig] section, which has
the following two parameters:
StartExecutePSFirst
EndExecutePSFirst
these can be set to True/False to denote if the powershell startup/shutdown
scripts execute first (True) or last (False), if the value isn't set, then
it is 'Not Configured' in the GUI
'''
_machineScriptPolicyPath = os.path.join(os.getenv('WINDIR'),
'System32',
'GroupPolicy',
'Machine',
'Scripts',
'scripts.ini')
_machinePowershellScriptPolicyPath = os.path.join(os.getenv('WINDIR'),
'System32',
'GroupPolicy',
'Machine',
'Scripts',
'psscripts.ini')
_userScriptPolicyPath = os.path.join(os.getenv('WINDIR'),
'System32',
'GroupPolicy',
'User',
'Scripts',
'scripts.ini')
_userPowershellScriptPolicyPath = os.path.join(os.getenv('WINDIR'),
'System32',
'GroupPolicy',
'User',
'Scripts',
'psscripts.ini') | helper function to write local GPO startup/shutdown script
scripts are stored in scripts.ini and psscripts.ini files in
``WINDIR\\System32\\GroupPolicy\\Machine|User\\Scripts``
these files have the hidden attribute set
files have following format:
empty line
[Startup]
0CmdLine=<path to script 0>
0Parameters=<script 0 parameters>
[Shutdown]
0CmdLine=<path to shutdown script 0>
0Parameters=<shutdown script 0 parameters>
Number is incremented for each script added
psscript file also has the option of a [ScriptsConfig] section, which has
the following two parameters:
StartExecutePSFirst
EndExecutePSFirst
these can be set to True/False to denote if the powershell startup/shutdown
scripts execute first (True) or last (False), if the value isn't set, then
it is 'Not Configured' in the GUI |
def addResourceFile(self, pid, resource_file, resource_filename=None, progress_callback=None):
""" Add a new file to an existing resource
:param pid: The HydroShare ID of the resource
:param resource_file: a read-only binary file-like object (i.e. opened with the flag 'rb') or a string
representing path to file to be uploaded as part of the new resource
:param resource_filename: string representing the filename of the resource file. Must be specified
if resource_file is a file-like object. If resource_file is a string representing a valid file path,
and resource_filename is not specified, resource_filename will be equal to os.path.basename(resource_file).
is a string
:param progress_callback: user-defined function to provide feedback to the user about the progress
of the upload of resource_file. For more information, see:
http://toolbelt.readthedocs.org/en/latest/uploading-data.html#monitoring-your-streaming-multipart-upload
:return: Dictionary containing 'resource_id' the ID of the resource to which the file was added, and
'file_name' the filename of the file added.
:raises: HydroShareNotAuthorized if user is not authorized to perform action.
:raises: HydroShareNotFound if the resource was not found.
:raises: HydroShareHTTPException if an unexpected HTTP response code is encountered.
"""
url = "{url_base}/resource/{pid}/files/".format(url_base=self.url_base,
pid=pid)
params = {}
close_fd = self._prepareFileForUpload(params, resource_file, resource_filename)
encoder = MultipartEncoder(params)
if progress_callback is None:
progress_callback = default_progress_callback
monitor = MultipartEncoderMonitor(encoder, progress_callback)
r = self._request('POST', url, data=monitor, headers={'Content-Type': monitor.content_type})
if close_fd:
fd = params['file'][1]
fd.close()
if r.status_code != 201:
if r.status_code == 403:
raise HydroShareNotAuthorized(('POST', url))
elif r.status_code == 404:
raise HydroShareNotFound((pid,))
else:
raise HydroShareHTTPException((url, 'POST', r.status_code))
response = r.json()
# assert(response['resource_id'] == pid)
return response | Add a new file to an existing resource
:param pid: The HydroShare ID of the resource
:param resource_file: a read-only binary file-like object (i.e. opened with the flag 'rb') or a string
representing path to file to be uploaded as part of the new resource
:param resource_filename: string representing the filename of the resource file. Must be specified
if resource_file is a file-like object. If resource_file is a string representing a valid file path,
and resource_filename is not specified, resource_filename will be equal to os.path.basename(resource_file).
is a string
:param progress_callback: user-defined function to provide feedback to the user about the progress
of the upload of resource_file. For more information, see:
http://toolbelt.readthedocs.org/en/latest/uploading-data.html#monitoring-your-streaming-multipart-upload
:return: Dictionary containing 'resource_id' the ID of the resource to which the file was added, and
'file_name' the filename of the file added.
:raises: HydroShareNotAuthorized if user is not authorized to perform action.
:raises: HydroShareNotFound if the resource was not found.
:raises: HydroShareHTTPException if an unexpected HTTP response code is encountered. |
def _repos_checked(self, worker, output, error):
"""Callback for _check_repos."""
if worker.repo in self._checking_repos:
self._checking_repos.remove(worker.repo)
if output:
self._valid_repos.append(worker.repo)
if len(self._checking_repos) == 0:
self._download_repodata(self._valid_repos) | Callback for _check_repos. |
def do_update(pool,request,models):
"unlike *_check() below, update doesn't worry about missing children"
return {k:fkapply(models,pool,process_update,missing_update,k,v) for k,v in request.items()} | unlike *_check() below, update doesn't worry about missing children |
def combinations(iterable, r):
"""Calculate combinations
>>> list(combinations('ABCD',2))
[['A', 'B'], ['A', 'C'], ['A', 'D'], ['B', 'C'], ['B', 'D'], ['C', 'D']]
>>> list(combinations(range(4), 3))
[[0, 1, 2], [0, 1, 3], [0, 2, 3], [1, 2, 3]]
Args:
iterable: Any iterable object.
r: Size of combination.
Yields:
list: Combination of size r.
"""
pool = tuple(iterable)
n = len(pool)
if r > n:
return
indices = list(range(r))
yield list(pool[i] for i in indices)
while True:
for i in reversed(range(r)):
if indices[i] != i + n - r:
break
else:
return
indices[i] += 1
for j in range(i + 1, r):
indices[j] = indices[j - 1] + 1
yield list(pool[i] for i in indices) | Calculate combinations
>>> list(combinations('ABCD',2))
[['A', 'B'], ['A', 'C'], ['A', 'D'], ['B', 'C'], ['B', 'D'], ['C', 'D']]
>>> list(combinations(range(4), 3))
[[0, 1, 2], [0, 1, 3], [0, 2, 3], [1, 2, 3]]
Args:
iterable: Any iterable object.
r: Size of combination.
Yields:
list: Combination of size r. |
def get_generated_project_files(self, tool):
""" Get generated project files, the content depends on a tool. Look at tool implementation """
exporter = ToolsSupported().get_tool(tool)
return exporter(self.generated_files[tool], self.settings).get_generated_project_files() | Get generated project files, the content depends on a tool. Look at tool implementation |
def destroy(self, eip_or_aid, disassociate=False):
"""Release an EIP. If the EIP was allocated for a VPC instance, an
AllocationId(aid) must be provided instead of a PublicIp. Setting
disassociate to True will attempt to disassociate the IP before
releasing it (required for associated nondefault VPC instances).
"""
if "." in eip_or_aid: # If an IP is given (Classic)
# NOTE: EIPs are automatically disassociated for Classic instances.
return "true" == self.call("ReleaseAddress",
response_data_key="return",
PublicIp=eip_or_aid)
else: # If an AID is given (VPC)
if disassociate:
self.disassociate(eip_or_aid)
return "true" == self.call("ReleaseAddress",
response_data_key="return",
AllocationId=eip_or_aid) | Release an EIP. If the EIP was allocated for a VPC instance, an
AllocationId(aid) must be provided instead of a PublicIp. Setting
disassociate to True will attempt to disassociate the IP before
releasing it (required for associated nondefault VPC instances). |
def compute_toc_line_indentation_spaces(
header_type_curr: int = 1,
header_type_prev: int = 0,
no_of_indentation_spaces_prev: int = 0,
parser: str = 'github',
ordered: bool = False,
list_marker: str = '-',
list_marker_log: list = build_list_marker_log('github', '.'),
index: int = 1) -> int:
r"""Compute the number of indentation spaces for the TOC list element.
:parameter header_type_curr: the current type of header (h[1-Inf]).
Defaults to ``1``.
:parameter header_type_prev: the previous type of header (h[1-Inf]).
Defaults to ``0``.
:parameter no_of_indentation_spaces_prev: the number of previous indentation spaces.
Defaults to ``0``.
:parameter parser: decides rules on how compute indentations.
Defaults to ``github``.
:parameter ordered: if set to ``True``, numbers will be used
as list ids or otherwise a dash character, otherwise.
Defaults to ``False``.
:parameter list_marker: a string that contains some of the first
characters of the list element.
Defaults to ``-``.
:parameter list_marker_log: a data structure that holds list marker
information for ordered lists.
Defaults to ``build_list_marker_log('github', '.')``.
:parameter index: a number that will be used as list id in case of an
ordered table of contents. Defaults to ``1``.
:type header_type_curr: int
:type header_type_prev: int
:type no_of_indentation_spaces_prev: int
:type parser: str
:type ordered: bool
:type list_marker: str
:type list_marker_log: list
:type index: int
:returns: no_of_indentation_spaces_curr, the number of indentation spaces
for the list element.
:rtype: int
:raises: a built-in exception.
.. note::
Please note that this function
assumes that no_of_indentation_spaces_prev contains the correct
number of spaces.
"""
assert header_type_curr >= 1
assert header_type_prev >= 0
assert no_of_indentation_spaces_prev >= 0
if (parser == 'github' or parser == 'cmark' or parser == 'gitlab'
or parser == 'commonmarker' or parser == 'redcarpet'):
if ordered:
assert list_marker in md_parser[parser]['list']['ordered'][
'closing_markers']
else:
assert list_marker in md_parser[parser]['list']['unordered'][
'bullet_markers']
if (parser == 'github' or parser == 'cmark' or parser == 'gitlab'
or parser == 'commonmarker'):
if ordered:
assert len(
list_marker_log) == md_parser['github']['header']['max_levels']
for e in list_marker_log:
assert isinstance(e, str)
assert index >= 1
if (parser == 'github' or parser == 'cmark' or parser == 'gitlab'
or parser == 'commonmarker'):
if header_type_prev == 0:
# Base case for the first toc line.
no_of_indentation_spaces_curr = 0
elif header_type_curr == header_type_prev:
# Base case for same indentation.
no_of_indentation_spaces_curr = no_of_indentation_spaces_prev
else:
if ordered:
list_marker_prev = str(list_marker_log[header_type_curr - 1])
else:
# list_marker for unordered lists will always be 1 character.
list_marker_prev = list_marker
# Generic cases.
if header_type_curr > header_type_prev:
# More indentation.
no_of_indentation_spaces_curr = (
no_of_indentation_spaces_prev + len(list_marker_prev) +
len(' '))
elif header_type_curr < header_type_prev:
# Less indentation.
no_of_indentation_spaces_curr = (
no_of_indentation_spaces_prev -
(len(list_marker_prev) + len(' ')))
# Reset older nested list indices. If this is not performed then
# future nested ordered lists will rely on incorrect data to
# compute indentations.
if ordered:
for i in range((header_type_curr - 1) + 1,
md_parser['github']['header']['max_levels']):
list_marker_log[i] = str(
md_parser['github']['list']['ordered']
['min_marker_number']) + list_marker
# Update the data structure.
if ordered:
list_marker_log[header_type_curr - 1] = str(index) + list_marker
elif parser == 'redcarpet':
no_of_indentation_spaces_curr = 4 * (header_type_curr - 1)
return no_of_indentation_spaces_curr | r"""Compute the number of indentation spaces for the TOC list element.
:parameter header_type_curr: the current type of header (h[1-Inf]).
Defaults to ``1``.
:parameter header_type_prev: the previous type of header (h[1-Inf]).
Defaults to ``0``.
:parameter no_of_indentation_spaces_prev: the number of previous indentation spaces.
Defaults to ``0``.
:parameter parser: decides rules on how compute indentations.
Defaults to ``github``.
:parameter ordered: if set to ``True``, numbers will be used
as list ids or otherwise a dash character, otherwise.
Defaults to ``False``.
:parameter list_marker: a string that contains some of the first
characters of the list element.
Defaults to ``-``.
:parameter list_marker_log: a data structure that holds list marker
information for ordered lists.
Defaults to ``build_list_marker_log('github', '.')``.
:parameter index: a number that will be used as list id in case of an
ordered table of contents. Defaults to ``1``.
:type header_type_curr: int
:type header_type_prev: int
:type no_of_indentation_spaces_prev: int
:type parser: str
:type ordered: bool
:type list_marker: str
:type list_marker_log: list
:type index: int
:returns: no_of_indentation_spaces_curr, the number of indentation spaces
for the list element.
:rtype: int
:raises: a built-in exception.
.. note::
Please note that this function
assumes that no_of_indentation_spaces_prev contains the correct
number of spaces. |
def realimag_files(xscript=0, yscript="d[1]+1j*d[2]", eyscript=None, exscript=None, paths=None, g=None, **kwargs):
"""
This will load a bunch of data files, generate data based on the supplied
scripts, and then plot the ydata's real and imaginary parts versus xdata.
Parameters
----------
xscript=0
Script for x data
yscript='d[1]+1j*d[2]'
Script for y data
eyscript=None
Script for y error
exscript=None
Script for x error
paths=None
List of paths to open.
g=None
Optional dictionary of globals for the scripts
See spinmob.plot.realimag.data() for additional optional arguments.
See spinmob.data.databox.execute_script() for more information about scripts.
Common additional parameters
----------------------------
filters="*.*"
Set the file filters for the dialog.
"""
return files(xscript, yscript, eyscript, exscript, plotter=realimag_databoxes, paths=paths, g=g, **kwargs) | This will load a bunch of data files, generate data based on the supplied
scripts, and then plot the ydata's real and imaginary parts versus xdata.
Parameters
----------
xscript=0
Script for x data
yscript='d[1]+1j*d[2]'
Script for y data
eyscript=None
Script for y error
exscript=None
Script for x error
paths=None
List of paths to open.
g=None
Optional dictionary of globals for the scripts
See spinmob.plot.realimag.data() for additional optional arguments.
See spinmob.data.databox.execute_script() for more information about scripts.
Common additional parameters
----------------------------
filters="*.*"
Set the file filters for the dialog. |
def create_cloud_integration(self, **kwargs): # noqa: E501
"""Create a cloud integration # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.create_cloud_integration(async_req=True)
>>> result = thread.get()
:param async_req bool
:param CloudIntegration body: Example Body: <pre>{ \"name\":\"CloudWatch integration\", \"service\":\"CLOUDWATCH\", \"cloudWatch\":{ \"baseCredentials\":{ \"roleArn\":\"arn:aws:iam::<accountid>:role/<rolename>\", \"externalId\":\"wave123\" }, \"metricFilterRegex\":\"^aws.(sqs|ec2|ebs|elb).*$\", \"pointTagFilterRegex\":\"(region|name)\" }, \"serviceRefreshRateInMins\":5 }</pre>
:return: ResponseContainerCloudIntegration
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.create_cloud_integration_with_http_info(**kwargs) # noqa: E501
else:
(data) = self.create_cloud_integration_with_http_info(**kwargs) # noqa: E501
return data | Create a cloud integration # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.create_cloud_integration(async_req=True)
>>> result = thread.get()
:param async_req bool
:param CloudIntegration body: Example Body: <pre>{ \"name\":\"CloudWatch integration\", \"service\":\"CLOUDWATCH\", \"cloudWatch\":{ \"baseCredentials\":{ \"roleArn\":\"arn:aws:iam::<accountid>:role/<rolename>\", \"externalId\":\"wave123\" }, \"metricFilterRegex\":\"^aws.(sqs|ec2|ebs|elb).*$\", \"pointTagFilterRegex\":\"(region|name)\" }, \"serviceRefreshRateInMins\":5 }</pre>
:return: ResponseContainerCloudIntegration
If the method is called asynchronously,
returns the request thread. |
def get(self, url, params=None):
'Make a GET request for url and return the response content as a generic lxml.objectify object'
url = self.escapeUrl(url)
content = six.BytesIO(self.raw(url, params=params))
# We need to make sure that the xml fits in available memory before we parse
# with lxml.objectify.fromstring(), or else it will bomb out.
# If it is too big, we need to buffer it to disk before we run it through objectify. see #87
#
# get length of buffer
content.seek(0,2)
contentlen = content.tell()
content.seek(0)
MAX_BUFFER_SIZE=1024*1024*200 # 200MB. TODO: find a way to compute this
if contentlen > MAX_BUFFER_SIZE:
# xml is too big to parse with lxml.objectify.fromstring()
contentfile = tempfile.NamedTemporaryFile()
contentfile.write(content.read())
o = lxml.objectify.parse(contentfile)
else:
o = lxml.objectify.fromstring(content.getvalue())
if o.tag == 'error':
JFSError.raiseError(o, url)
return o | Make a GET request for url and return the response content as a generic lxml.objectify object |
def delete(self, r=None, w=None, dw=None, pr=None, pw=None,
timeout=None):
"""
Delete this object from Riak.
:param r: R-value, wait for this many partitions to read object
before performing the put
:type r: integer
:param w: W-value, wait for this many partitions to respond
before returning to client.
:type w: integer
:param dw: DW-value, wait for this many partitions to
confirm the write before returning to client.
:type dw: integer
:param pr: PR-value, require this many primary partitions to
be available before performing the read that
precedes the put
:type pr: integer
:param pw: PW-value, require this many primary partitions to
be available before performing the put
:type pw: integer
:param timeout: a timeout value in milliseconds
:type timeout: int
:rtype: :class:`RiakObject`
"""
self.client.delete(self, r=r, w=w, dw=dw, pr=pr, pw=pw,
timeout=timeout)
self.clear()
return self | Delete this object from Riak.
:param r: R-value, wait for this many partitions to read object
before performing the put
:type r: integer
:param w: W-value, wait for this many partitions to respond
before returning to client.
:type w: integer
:param dw: DW-value, wait for this many partitions to
confirm the write before returning to client.
:type dw: integer
:param pr: PR-value, require this many primary partitions to
be available before performing the read that
precedes the put
:type pr: integer
:param pw: PW-value, require this many primary partitions to
be available before performing the put
:type pw: integer
:param timeout: a timeout value in milliseconds
:type timeout: int
:rtype: :class:`RiakObject` |
def saveWallet(self, wallet, fpath):
"""Save wallet into specified localtion.
Returns the canonical path for the ``fpath`` where ``wallet``
has been stored.
Error cases:
- ``fpath`` is not inside the keyrings base dir - ValueError raised
- directory part of ``fpath`` exists and it's not a directory -
NotADirectoryError raised
- ``fpath`` exists and it's a directory - IsADirectoryError raised
:param wallet: wallet to save
:param fpath: wallet file path, absolute or relative to
keyrings base dir
"""
if not fpath:
raise ValueError("empty path")
_fpath = self._normalize(fpath)
_dpath = _fpath.parent
try:
_dpath.relative_to(self._baseDir)
except ValueError:
raise ValueError(
"path {} is not is not relative to the keyrings {}".format(
fpath, self._baseDir))
self._createDirIfNotExists(_dpath)
# ensure permissions from the bottom of the directory hierarchy
while _dpath != self._baseDir:
self._ensurePermissions(_dpath, self.dmode)
_dpath = _dpath.parent
with _fpath.open("w") as wf:
self._ensurePermissions(_fpath, self.fmode)
encodedWallet = self.encode(wallet)
wf.write(encodedWallet)
logger.debug("stored wallet '{}' in {}".format(
wallet.name, _fpath))
return str(_fpath) | Save wallet into specified localtion.
Returns the canonical path for the ``fpath`` where ``wallet``
has been stored.
Error cases:
- ``fpath`` is not inside the keyrings base dir - ValueError raised
- directory part of ``fpath`` exists and it's not a directory -
NotADirectoryError raised
- ``fpath`` exists and it's a directory - IsADirectoryError raised
:param wallet: wallet to save
:param fpath: wallet file path, absolute or relative to
keyrings base dir |
def spawn(self, *cmds: str) -> List[SublemonSubprocess]:
"""Coroutine to spawn shell commands.
If `max_concurrency` is reached during the attempt to spawn the
specified subprocesses, excess subprocesses will block while attempting
to acquire this server's semaphore.
"""
if not self._is_running:
raise SublemonRuntimeError(
'Attempted to spawn subprocesses from a non-started server')
subprocs = [SublemonSubprocess(self, cmd) for cmd in cmds]
for sp in subprocs:
asyncio.ensure_future(sp.spawn())
return subprocs | Coroutine to spawn shell commands.
If `max_concurrency` is reached during the attempt to spawn the
specified subprocesses, excess subprocesses will block while attempting
to acquire this server's semaphore. |
def download_static_assets(doc, destination, base_url,
request_fn=make_request, url_blacklist=[], js_middleware=None,
css_middleware=None, derive_filename=_derive_filename):
"""
Download all static assets referenced from an HTML page.
The goal is to easily create HTML5 apps! Downloads JS, CSS, images, and
audio clips.
Args:
doc: The HTML page source as a string or BeautifulSoup instance.
destination: The folder to download the static assets to!
base_url: The base URL where assets will be downloaded from.
request_fn: The function to be called to make requests, passed to
ricecooker.utils.html.download_file(). Pass in a custom one for custom
caching logic.
url_blacklist: A list of keywords of files to not include in downloading.
Will do substring matching, so e.g. 'acorn.js' will match
'/some/path/to/acorn.js'.
js_middleware: If specificed, JS content will be passed into this callback
which is expected to return JS content with any modifications.
css_middleware: If specificed, CSS content will be passed into this callback
which is expected to return CSS content with any modifications.
Return the modified page HTML with links rewritten to the locations of the
downloaded static files, as a BeautifulSoup object. (Call str() on it to
extract the raw HTML.)
"""
if not isinstance(doc, BeautifulSoup):
doc = BeautifulSoup(doc, "html.parser")
# Helper function to download all assets for a given CSS selector.
def download_assets(selector, attr, url_middleware=None,
content_middleware=None, node_filter=None):
nodes = doc.select(selector)
for i, node in enumerate(nodes):
if node_filter:
if not node_filter(node):
src = node[attr]
node[attr] = ''
print(' Skipping node with src ', src)
continue
if node[attr].startswith('data:'):
continue
url = urljoin(base_url, node[attr])
if _is_blacklisted(url, url_blacklist):
print(' Skipping downloading blacklisted url', url)
node[attr] = ""
continue
if url_middleware:
url = url_middleware(url)
filename = derive_filename(url)
node[attr] = filename
print(" Downloading", url, "to filename", filename)
download_file(url, destination, request_fn=request_fn,
filename=filename, middleware_callbacks=content_middleware)
def js_content_middleware(content, url, **kwargs):
if js_middleware:
content = js_middleware(content, url, **kwargs)
# Polyfill localStorage and document.cookie as iframes can't access
# them
return (content
.replace("localStorage", "_localStorage")
.replace('document.cookie.split', '"".split')
.replace('document.cookie', 'window._document_cookie'))
def css_node_filter(node):
return "stylesheet" in node["rel"]
def css_content_middleware(content, url, **kwargs):
if css_middleware:
content = css_middleware(content, url, **kwargs)
file_dir = os.path.dirname(urlparse(url).path)
# Download linked fonts and images
def repl(match):
src = match.group(1)
if src.startswith('//localhost'):
return 'url()'
# Don't download data: files
if src.startswith('data:'):
return match.group(0)
src_url = urljoin(base_url, os.path.join(file_dir, src))
if _is_blacklisted(src_url, url_blacklist):
print(' Skipping downloading blacklisted url', src_url)
return 'url()'
derived_filename = derive_filename(src_url)
download_file(src_url, destination, request_fn=request_fn,
filename=derived_filename)
return 'url("%s")' % derived_filename
return _CSS_URL_RE.sub(repl, content)
# Download all linked static assets.
download_assets("img[src]", "src") # Images
download_assets("link[href]", "href",
content_middleware=css_content_middleware,
node_filter=css_node_filter) # CSS
download_assets("script[src]", "src",
content_middleware=js_content_middleware) # JS
download_assets("source[src]", "src") # Potentially audio
download_assets("source[srcset]", "srcset") # Potentially audio
# ... and also run the middleware on CSS/JS embedded in the page source to
# get linked files.
for node in doc.select('style'):
node.string = css_content_middleware(node.get_text(), url='')
for node in doc.select('script'):
if not node.attrs.get('src'):
node.string = js_content_middleware(node.get_text(), url='')
return doc | Download all static assets referenced from an HTML page.
The goal is to easily create HTML5 apps! Downloads JS, CSS, images, and
audio clips.
Args:
doc: The HTML page source as a string or BeautifulSoup instance.
destination: The folder to download the static assets to!
base_url: The base URL where assets will be downloaded from.
request_fn: The function to be called to make requests, passed to
ricecooker.utils.html.download_file(). Pass in a custom one for custom
caching logic.
url_blacklist: A list of keywords of files to not include in downloading.
Will do substring matching, so e.g. 'acorn.js' will match
'/some/path/to/acorn.js'.
js_middleware: If specificed, JS content will be passed into this callback
which is expected to return JS content with any modifications.
css_middleware: If specificed, CSS content will be passed into this callback
which is expected to return CSS content with any modifications.
Return the modified page HTML with links rewritten to the locations of the
downloaded static files, as a BeautifulSoup object. (Call str() on it to
extract the raw HTML.) |
def evaluate_ising(linear, quad, state):
"""Calculate the energy of a state given the Hamiltonian.
Args:
linear: Linear Hamiltonian terms.
quad: Quadratic Hamiltonian terms.
state: Vector of spins describing the system state.
Returns:
Energy of the state evaluated by the given energy function.
"""
# If we were given a numpy array cast to list
if _numpy and isinstance(state, np.ndarray):
return evaluate_ising(linear, quad, state.tolist())
# Accumulate the linear and quadratic values
energy = 0.0
for index, value in uniform_iterator(linear):
energy += state[index] * value
for (index_a, index_b), value in six.iteritems(quad):
energy += value * state[index_a] * state[index_b]
return energy | Calculate the energy of a state given the Hamiltonian.
Args:
linear: Linear Hamiltonian terms.
quad: Quadratic Hamiltonian terms.
state: Vector of spins describing the system state.
Returns:
Energy of the state evaluated by the given energy function. |
def _eq(left, right):
"""
Equality comparison that allows for equality between tuple and list types
with equivalent elements.
"""
if isinstance(left, (tuple, list)) and isinstance(right, (tuple, list)):
return len(left) == len(right) and all(_eq(*pair) for pair in zip(left, right))
else:
return left == right | Equality comparison that allows for equality between tuple and list types
with equivalent elements. |
def _set_base_dn(self):
"""Get Base DN from LDAP"""
results = self._search(
'cn=config',
'(objectClass=*)',
['nsslapd-defaultnamingcontext'],
scope=ldap.SCOPE_BASE
)
if results and type(results) is list:
dn, attrs = results[0]
r = attrs['nsslapd-defaultnamingcontext'][0].decode('utf-8')
else:
raise Exception
self._base_dn = r
self._active_user_base = 'cn=users,cn=accounts,' + self._base_dn
self._stage_user_base = 'cn=staged users,cn=accounts,cn=provisioning,' + self._base_dn
self._preserved_user_base = 'cn=deleted users,cn=accounts,cn=provisioning,' + self._base_dn
self._groups_base = 'cn=groups,cn=accounts,' + self._base_dn
log.debug('Base DN: %s' % self._base_dn) | Get Base DN from LDAP |
def create(cls, mr_spec, shard_number, shard_attempt, _writer_state=None):
"""Inherit docs."""
writer_spec = cls.get_params(mr_spec.mapper, allow_old=False)
# Determine parameters
key = cls._generate_filename(writer_spec, mr_spec.name,
mr_spec.mapreduce_id,
shard_number, shard_attempt)
status = _ConsistentStatus()
status.writer_spec = writer_spec
status.mainfile = cls._open_file(writer_spec, key)
status.mapreduce_id = mr_spec.mapreduce_id
status.shard = shard_number
return cls(status) | Inherit docs. |
def set_multivar(self, section, option, value=''):
'''
This function is unique to the GitConfigParser. It will add another
value for the option if it already exists, converting the option's
value to a list if applicable.
If "value" is a list, then any existing values for the specified
section and option will be replaced with the list being passed.
'''
self._string_check(value, allow_list=True)
if not section or section == self.DEFAULTSECT:
sectdict = self._defaults
else:
try:
sectdict = self._sections[section]
except KeyError:
raise NoSectionError( # pylint: disable=undefined-variable
salt.utils.stringutils.to_str(section))
key = self.optionxform(option)
self._add_option(sectdict, key, value) | This function is unique to the GitConfigParser. It will add another
value for the option if it already exists, converting the option's
value to a list if applicable.
If "value" is a list, then any existing values for the specified
section and option will be replaced with the list being passed. |
def get_context():
"""Provide the context to use.
This function takes care of creating new contexts in case of forks.
"""
pid = os.getpid()
if pid not in context:
context[pid] = zmq.Context()
logger.debug('renewed context for PID %d', pid)
return context[pid] | Provide the context to use.
This function takes care of creating new contexts in case of forks. |
def undo_sign_in(entry, session=None):
"""Delete a signed in entry.
:param entry: `models.Entry` object. The entry to delete.
:param session: (optional) SQLAlchemy session through which to access the database.
""" # noqa
if session is None:
session = Session()
else:
session = session
entry_to_delete = (
session
.query(Entry)
.filter(Entry.uuid == entry.uuid)
.one_or_none()
)
if entry_to_delete:
logger.info('Undo sign in: {}'.format(entry_to_delete.user_id))
logger.debug('Undo sign in: {}'.format(entry_to_delete))
session.delete(entry_to_delete)
session.commit()
else:
error_message = 'Entry not found: {}'.format(entry)
logger.error(error_message)
raise ValueError(error_message) | Delete a signed in entry.
:param entry: `models.Entry` object. The entry to delete.
:param session: (optional) SQLAlchemy session through which to access the database. |
def submit(self, command="", blocksize=1, job_name="parsl.auto"):
''' The submit method takes the command string to be executed upon
instantiation of a resource most often to start a pilot (such as IPP engine
or even Swift-T engines).
Args :
- command (str) : The bash command string to be executed.
- blocksize (int) : Blocksize to be requested
KWargs:
- job_name (str) : Human friendly name to be assigned to the job request
Returns:
- A job identifier, this could be an integer, string etc
Raises:
- ExecutionProviderException or its subclasses
'''
# Note: Fix this later to avoid confusing behavior.
# We should always allocate blocks in integer counts of node_granularity
if blocksize < self.nodes_per_block:
blocksize = self.nodes_per_block
# Set job name
job_name = "{0}.{1}".format(job_name, time.time())
# Set script path
script_path = "{0}/{1}.submit".format(self.script_dir, job_name)
script_path = os.path.abspath(script_path)
job_config = self.get_configs(command, blocksize)
logger.debug("Writing submit script")
self._write_submit_script(template_string, script_path, job_name, job_config)
channel_script_path = self.channel.push_file(script_path, self.channel.script_dir)
cmd = "qsub -terse {0}".format(channel_script_path)
retcode, stdout, stderr = super().execute_wait(cmd, 10)
if retcode == 0:
for line in stdout.split('\n'):
job_id = line.strip()
if not job_id:
continue
self.resources[job_id] = {'job_id': job_id, 'status': 'PENDING', 'blocksize': blocksize}
return job_id
else:
print("[WARNING!!] Submission of command to scale_out failed")
logger.error("Retcode:%s STDOUT:%s STDERR:%s", retcode, stdout.strip(), stderr.strip()) | The submit method takes the command string to be executed upon
instantiation of a resource most often to start a pilot (such as IPP engine
or even Swift-T engines).
Args :
- command (str) : The bash command string to be executed.
- blocksize (int) : Blocksize to be requested
KWargs:
- job_name (str) : Human friendly name to be assigned to the job request
Returns:
- A job identifier, this could be an integer, string etc
Raises:
- ExecutionProviderException or its subclasses |
def set(self, newvalue):
# type: (B) -> Callable[[S], T]
'''Set the focus to `newvalue`.
>>> from lenses import lens
>>> set_item_one_to_four = lens[1].set(4)
>>> set_item_one_to_four([1, 2, 3])
[1, 4, 3]
'''
def setter(state):
return self._optic.set(state, newvalue)
return setter | Set the focus to `newvalue`.
>>> from lenses import lens
>>> set_item_one_to_four = lens[1].set(4)
>>> set_item_one_to_four([1, 2, 3])
[1, 4, 3] |
def walk(self, address):
'''
Returns a stream of pairs of node addresses and data, raising
AddressNotInTree if ADDRESS is not in the tree.
First the ancestors of ADDRESS (including itself) are yielded,
earliest to latest, and then the descendants of ADDRESS are
yielded in an unspecified order.
Arguments:
address (str): the address to be walked
'''
for step in self._walk_to_address(address):
node = step
yield node.address, node.data
to_process = deque()
to_process.extendleft(
node.children)
while to_process:
node = to_process.pop()
yield node.address, node.data
if node.children:
to_process.extendleft(
node.children) | Returns a stream of pairs of node addresses and data, raising
AddressNotInTree if ADDRESS is not in the tree.
First the ancestors of ADDRESS (including itself) are yielded,
earliest to latest, and then the descendants of ADDRESS are
yielded in an unspecified order.
Arguments:
address (str): the address to be walked |
def is_same_channel(self, left, right):
""" Check if given nicknames are equal in the server's case mapping. """
return self.normalize(left) == self.normalize(right) | Check if given nicknames are equal in the server's case mapping. |
def memoize(func):
""" Memoization decorator for a function taking one or more arguments. """
class Memodict(dict):
""" just a dict"""
def __getitem__(self, *key):
return dict.__getitem__(self, key)
def __missing__(self, key):
""" this makes it faster """
ret = self[key] = func(*key)
return ret
return Memodict().__getitem__ | Memoization decorator for a function taking one or more arguments. |
def get_queryset(qs=None, app=DEFAULT_APP, db_alias=None):
"""
>>> get_queryset('Permission', app='django.contrib.auth').count() > 0
True
"""
# app = get_app(app);
# print 'get_model' + repr(model) + ' app ' + repr(app)
if isinstance(qs, (djmodels.Manager, djmodels.query.QuerySet)):
qs = qs.all()
else:
qs = get_model(qs, app=app).objects.all()
if db_alias:
return qs.using(db_alias)
else:
return qs | >>> get_queryset('Permission', app='django.contrib.auth').count() > 0
True |
async def health_check(self) -> Iterator[HealthCheckFail]:
"""
Perform the checks. So far:
- Make a list of the unique destination states from the transitions
list, then check the health of each of them.
"""
ds_class = getattr(settings, 'DEFAULT_STATE', '')
forbidden_defaults = [None, '', 'bernard.engine.state.DefaultState']
if ds_class in forbidden_defaults:
yield HealthCheckFail(
'00005',
f'Default state (`DEFAULT_STATE` in settings) is not set. '
f'You need to set it to your own implementation. Please refer '
f'yourself to the doc. See '
f'https://github.com/BernardFW/bernard/blob/develop/doc/'
f'get_started.md#statespy'
)
try:
import_class(ds_class)
except (ImportError, KeyError, AttributeError, TypeError):
yield HealthCheckFail(
'00005',
f'Cannot import "{ds_class}", which is the value'
f' of `DEFAULT_STATE` in the configuration. This means either'
f' that your `PYTHONPATH` is wrong or that the value you gave'
f' to `DEFAULT_STATE` is wrong. You need to provide a default'
f' state class for this framework to work. Please refer'
f' yourself to the documentation for more information. See'
f' https://github.com/BernardFW/bernard/blob/develop/doc/'
f'get_started.md#statespy'
)
states = set(t.dest for t in self.transitions)
for state in states:
async for check in state.health_check():
yield check | Perform the checks. So far:
- Make a list of the unique destination states from the transitions
list, then check the health of each of them. |
def parse_share_url(share_url):
"""Return the group_id and share_token in a group's share url.
:param str share_url: the share url of a group
"""
*__, group_id, share_token = share_url.rstrip('/').split('/')
return group_id, share_token | Return the group_id and share_token in a group's share url.
:param str share_url: the share url of a group |
def telegram(self) -> list:
"""Returns list of Telegram compatible states of the RichMessage
instance nested controls.
Returns:
telegram_controls: Telegram representation of RichMessage instance nested
controls.
"""
telegram_controls = [control.telegram() for control in self.controls]
return telegram_controls | Returns list of Telegram compatible states of the RichMessage
instance nested controls.
Returns:
telegram_controls: Telegram representation of RichMessage instance nested
controls. |
async def peek(self, task_id):
"""
Get task without changing its state
:param task_id: Task id
:return: Task instance
"""
args = (task_id,)
res = await self.conn.call(self.__funcs['peek'], args)
return self._create_task(res.body) | Get task without changing its state
:param task_id: Task id
:return: Task instance |
def rooms_info(self, room_id=None, room_name=None):
"""Retrieves the information about the room."""
if room_id is not None:
return self.__call_api_get('rooms.info', roomId=room_id)
elif room_name is not None:
return self.__call_api_get('rooms.info', roomName=room_name)
else:
raise RocketMissingParamException('roomId or roomName required') | Retrieves the information about the room. |
def avail_images(kwargs=None, call=None):
'''
Return a dict of all available VM images on the cloud provider.
'''
if call == 'action':
raise SaltCloudSystemExit(
'The avail_images function must be called with '
'-f or --function, or with the --list-images option'
)
if not isinstance(kwargs, dict):
kwargs = {}
if 'owner' in kwargs:
owner = kwargs['owner']
else:
provider = get_configured_provider()
owner = config.get_cloud_config_value(
'owner', provider, __opts__, default='amazon'
)
ret = {}
params = {'Action': 'DescribeImages',
'Owner': owner}
images = aws.query(params,
location=get_location(),
provider=get_provider(),
opts=__opts__,
sigver='4')
for image in images:
ret[image['imageId']] = image
return ret | Return a dict of all available VM images on the cloud provider. |
def get_ip_addresses():
"""
:return: all knows IP Address
"""
LOGGER.debug("IPAddressService.get_ip_addresses")
args = {'http_operation': 'GET', 'operation_path': ''}
response = IPAddressService.requester.call(args)
ret = None
if response.rc == 0:
ret = []
for ipAddress in response.response_content['ipAddresses']:
ret.append(IPAddress.json_2_ip_address(ipAddress))
elif response.rc != 404:
err_msg = 'IPAddressService.get_ip_addresses - Problem while getting IP Address. ' \
'Reason: ' + str(response.response_content) + '-' + str(response.error_message) + \
" (" + str(response.rc) + ")"
LOGGER.warning(err_msg)
return ret | :return: all knows IP Address |
def filter_backends(backends, filters=None, **kwargs):
"""Return the backends matching the specified filtering.
Filter the `backends` list by their `configuration` or `status`
attributes, or from a boolean callable. The criteria for filtering can
be specified via `**kwargs` or as a callable via `filters`, and the
backends must fulfill all specified conditions.
Args:
backends (list[BaseBackend]): list of backends.
filters (callable): filtering conditions as a callable.
**kwargs (dict): dict of criteria.
Returns:
list[BaseBackend]: a list of backend instances matching the
conditions.
"""
def _match_all(obj, criteria):
"""Return True if all items in criteria matches items in obj."""
return all(getattr(obj, key_, None) == value_ for
key_, value_ in criteria.items())
# Inspect the backends to decide which filters belong to
# backend.configuration and which ones to backend.status, as it does
# not involve querying the API.
configuration_filters = {}
status_filters = {}
for key, value in kwargs.items():
if all(key in backend.configuration() for backend in backends):
configuration_filters[key] = value
else:
status_filters[key] = value
# 1. Apply backend.configuration filtering.
if configuration_filters:
backends = [b for b in backends if
_match_all(b.configuration(), configuration_filters)]
# 2. Apply backend.status filtering (it involves one API call for
# each backend).
if status_filters:
backends = [b for b in backends if
_match_all(b.status(), status_filters)]
# 3. Apply acceptor filter.
backends = list(filter(filters, backends))
return backends | Return the backends matching the specified filtering.
Filter the `backends` list by their `configuration` or `status`
attributes, or from a boolean callable. The criteria for filtering can
be specified via `**kwargs` or as a callable via `filters`, and the
backends must fulfill all specified conditions.
Args:
backends (list[BaseBackend]): list of backends.
filters (callable): filtering conditions as a callable.
**kwargs (dict): dict of criteria.
Returns:
list[BaseBackend]: a list of backend instances matching the
conditions. |
def collapse(self, id_user):
"""Collapse comment beloging to user."""
c = CmtCOLLAPSED(id_bibrec=self.id_bibrec, id_cmtRECORDCOMMENT=self.id,
id_user=id_user)
db.session.add(c)
db.session.commit() | Collapse comment beloging to user. |
def set_cookie(self, kaka, request):
"""Returns a http_cookiejar.Cookie based on a set-cookie header line"""
if not kaka:
return
part = urlparse(request.url)
_domain = part.hostname
logger.debug("%s: '%s'", _domain, kaka)
for cookie_name, morsel in kaka.items():
std_attr = ATTRS.copy()
std_attr["name"] = cookie_name
_tmp = morsel.coded_value
if _tmp.startswith('"') and _tmp.endswith('"'):
std_attr["value"] = _tmp[1:-1]
else:
std_attr["value"] = _tmp
std_attr["version"] = 0
# copy attributes that have values
for attr in morsel.keys():
if attr in ATTRS:
if morsel[attr]:
if attr == "expires":
std_attr[attr] = _since_epoch(morsel[attr])
elif attr == "path":
if morsel[attr].endswith(","):
std_attr[attr] = morsel[attr][:-1]
else:
std_attr[attr] = morsel[attr]
else:
std_attr[attr] = morsel[attr]
elif attr == "max-age":
if morsel["max-age"]:
std_attr["expires"] = time.time() + int(morsel["max-age"])
for att, item in PAIRS.items():
if std_attr[att]:
std_attr[item] = True
if std_attr["domain"]:
if std_attr["domain"].startswith("."):
std_attr["domain_initial_dot"] = True
else:
std_attr["domain"] = _domain
std_attr["domain_specified"] = True
if morsel["max-age"] is 0:
try:
self.cookiejar.clear(domain=std_attr["domain"],
path=std_attr["path"],
name=std_attr["name"])
except ValueError:
pass
elif std_attr["expires"] and std_attr["expires"] < utc_now():
try:
self.cookiejar.clear(domain=std_attr["domain"],
path=std_attr["path"],
name=std_attr["name"])
except ValueError:
pass
else:
new_cookie = http_cookiejar.Cookie(**std_attr)
self.cookiejar.set_cookie(new_cookie) | Returns a http_cookiejar.Cookie based on a set-cookie header line |
def make_private(self, recursive=False, future=False, client=None):
"""Update bucket's ACL, revoking read access for anonymous users.
:type recursive: bool
:param recursive: If True, this will make all blobs inside the bucket
private as well.
:type future: bool
:param future: If True, this will make all objects created in the
future private as well.
:type client: :class:`~google.cloud.storage.client.Client` or
``NoneType``
:param client: Optional. The client to use. If not passed, falls back
to the ``client`` stored on the current bucket.
:raises ValueError:
If ``recursive`` is True, and the bucket contains more than 256
blobs. This is to prevent extremely long runtime of this
method. For such buckets, iterate over the blobs returned by
:meth:`list_blobs` and call
:meth:`~google.cloud.storage.blob.Blob.make_private`
for each blob.
"""
self.acl.all().revoke_read()
self.acl.save(client=client)
if future:
doa = self.default_object_acl
if not doa.loaded:
doa.reload(client=client)
doa.all().revoke_read()
doa.save(client=client)
if recursive:
blobs = list(
self.list_blobs(
projection="full",
max_results=self._MAX_OBJECTS_FOR_ITERATION + 1,
client=client,
)
)
if len(blobs) > self._MAX_OBJECTS_FOR_ITERATION:
message = (
"Refusing to make private recursively with more than "
"%d objects. If you actually want to make every object "
"in this bucket private, iterate through the blobs "
"returned by 'Bucket.list_blobs()' and call "
"'make_private' on each one."
) % (self._MAX_OBJECTS_FOR_ITERATION,)
raise ValueError(message)
for blob in blobs:
blob.acl.all().revoke_read()
blob.acl.save(client=client) | Update bucket's ACL, revoking read access for anonymous users.
:type recursive: bool
:param recursive: If True, this will make all blobs inside the bucket
private as well.
:type future: bool
:param future: If True, this will make all objects created in the
future private as well.
:type client: :class:`~google.cloud.storage.client.Client` or
``NoneType``
:param client: Optional. The client to use. If not passed, falls back
to the ``client`` stored on the current bucket.
:raises ValueError:
If ``recursive`` is True, and the bucket contains more than 256
blobs. This is to prevent extremely long runtime of this
method. For such buckets, iterate over the blobs returned by
:meth:`list_blobs` and call
:meth:`~google.cloud.storage.blob.Blob.make_private`
for each blob. |
def show(self, dump=False, indent=3, lvl="", label_lvl=""):
"""
Prints or returns (when "dump" is true) a hierarchical view of the
packet.
:param dump: determine if it prints or returns the string value
:param int indent: the size of indentation for each layer
:param str lvl: additional information about the layer lvl
:param str label_lvl: additional information about the layer fields
:return: return a hierarchical view if dump, else print it
"""
return self._show_or_dump(dump, indent, lvl, label_lvl) | Prints or returns (when "dump" is true) a hierarchical view of the
packet.
:param dump: determine if it prints or returns the string value
:param int indent: the size of indentation for each layer
:param str lvl: additional information about the layer lvl
:param str label_lvl: additional information about the layer fields
:return: return a hierarchical view if dump, else print it |
def is_declared(self, expression_var):
""" True if expression_var is declared in this constraint set """
if not isinstance(expression_var, Variable):
raise ValueError(f'Expression must be a Variable (not a {type(expression_var)})')
return any(expression_var is x for x in self.get_declared_variables()) | True if expression_var is declared in this constraint set |
def get_lock(lockfile):
"""
Tries to write a lockfile containing the current pid. Excepts if
the lockfile already contains the pid of a running process.
Although this should prevent a lock from being granted twice, it
can theoretically deny a lock unjustly in the unlikely event that
the original process is gone but another unrelated process has
been assigned the same pid by the OS.
"""
pidfile = open(lockfile, "a+")
# here we do some meta-locking by getting an exclusive lock on the
# pidfile before reading it, to prevent two daemons from seeing a
# stale lock at the same time, and both trying to run
try:
fcntl.flock(pidfile.fileno(), fcntl.LOCK_EX|fcntl.LOCK_NB)
except IOError,e:
raise RuntimeError, "failed to lock %s: %s" % (lockfile, e)
# we got the file lock, so check for a pid therein
pidfile.seek(0)
pidfile_pid = pidfile.readline().strip()
if pidfile_pid.isdigit():
if pycbc_glue.utils.pid_exists(int(pidfile_pid)):
raise RuntimeError, ("pidfile %s contains pid (%s) of a running "
"process" % (lockfile, pidfile_pid))
else:
print ("pidfile %s contains stale pid %s; writing new lock" %
(lockfile, pidfile_pid))
# the pidfile didn't exist or was stale, so grab a new lock
pidfile.truncate(0)
pidfile.write("%d\n" % os.getpid())
pidfile.close()
# should be entirely unecessary, but paranoia always served me well
confirm_lock(lockfile)
return True | Tries to write a lockfile containing the current pid. Excepts if
the lockfile already contains the pid of a running process.
Although this should prevent a lock from being granted twice, it
can theoretically deny a lock unjustly in the unlikely event that
the original process is gone but another unrelated process has
been assigned the same pid by the OS. |
async def check_permissions(self, action: str, **kwargs):
"""
Check if the action should be permitted.
Raises an appropriate exception if the request is not permitted.
"""
for permission in await self.get_permissions(action=action, **kwargs):
if not await ensure_async(permission.has_permission)(
scope=self.scope, consumer=self, action=action, **kwargs):
raise PermissionDenied() | Check if the action should be permitted.
Raises an appropriate exception if the request is not permitted. |
def autohide(obj):
"""
Automatically hide setup() and teardown() methods, recursively.
"""
# Members on obj
for name, item in six.iteritems(vars(obj)):
if callable(item) and name in ('setup', 'teardown'):
item = hide(item)
# Recurse into class members
for name, subclass in class_members(obj):
autohide(subclass) | Automatically hide setup() and teardown() methods, recursively. |
def migrate_config(self, current_config, config_to_migrate,
always_update, update_defaults):
"""Migrate config value in current_config, updating config_to_migrate.
Given the current_config object, it will attempt to find a value
based on all the names given. If no name could be found, then it
will simply set the value to the default.
If a value is found and is in the list of previous_defaults, it will
either update or keep the old value based on if update_defaults is
set.
If a non-default value is set it will either keep this value or update
it based on if ``always_update`` is true.
Args:
current_config (dict): Current configuration.
config_to_migrate (dict): Config to update.
always_update (bool): Always update value.
update_defaults (bool): Update values found in previous_defaults
"""
value = self._search_config_for_possible_names(current_config)
self._update_config(config_to_migrate, value,
always_update, update_defaults) | Migrate config value in current_config, updating config_to_migrate.
Given the current_config object, it will attempt to find a value
based on all the names given. If no name could be found, then it
will simply set the value to the default.
If a value is found and is in the list of previous_defaults, it will
either update or keep the old value based on if update_defaults is
set.
If a non-default value is set it will either keep this value or update
it based on if ``always_update`` is true.
Args:
current_config (dict): Current configuration.
config_to_migrate (dict): Config to update.
always_update (bool): Always update value.
update_defaults (bool): Update values found in previous_defaults |
def get_apo(self, symbol, interval='daily', series_type='close',
fastperiod=None, slowperiod=None, matype=None):
""" Return the absolute price oscillator values in two
json objects as data and meta_data. It raises ValueError when problems
arise
Keyword Arguments:
symbol: the symbol for the equity we want to get its data
interval: time interval between two conscutive values,
supported values are '1min', '5min', '15min', '30min', '60min', 'daily',
'weekly', 'monthly' (default '60min)'
series_type: The desired price type in the time series. Four types
are supported: 'close', 'open', 'high', 'low' (default 'close')
fastperiod: Positive integers are accepted (default=None)
slowperiod: Positive integers are accepted (default=None)
matype : Moving average type. By default, fastmatype=0.
Integers 0 - 8 are accepted (check down the mappings) or the string
containing the math type can also be used.
* 0 = Simple Moving Average (SMA),
* 1 = Exponential Moving Average (EMA),
* 2 = Weighted Moving Average (WMA),
* 3 = Double Exponential Moving Average (DEMA),
* 4 = Triple Exponential Moving Average (TEMA),
* 5 = Triangular Moving Average (TRIMA),
* 6 = T3 Moving Average,
* 7 = Kaufman Adaptive Moving Average (KAMA),
* 8 = MESA Adaptive Moving Average (MAMA)
"""
_FUNCTION_KEY = "APO"
return _FUNCTION_KEY, 'Technical Analysis: APO', 'Meta Data' | Return the absolute price oscillator values in two
json objects as data and meta_data. It raises ValueError when problems
arise
Keyword Arguments:
symbol: the symbol for the equity we want to get its data
interval: time interval between two conscutive values,
supported values are '1min', '5min', '15min', '30min', '60min', 'daily',
'weekly', 'monthly' (default '60min)'
series_type: The desired price type in the time series. Four types
are supported: 'close', 'open', 'high', 'low' (default 'close')
fastperiod: Positive integers are accepted (default=None)
slowperiod: Positive integers are accepted (default=None)
matype : Moving average type. By default, fastmatype=0.
Integers 0 - 8 are accepted (check down the mappings) or the string
containing the math type can also be used.
* 0 = Simple Moving Average (SMA),
* 1 = Exponential Moving Average (EMA),
* 2 = Weighted Moving Average (WMA),
* 3 = Double Exponential Moving Average (DEMA),
* 4 = Triple Exponential Moving Average (TEMA),
* 5 = Triangular Moving Average (TRIMA),
* 6 = T3 Moving Average,
* 7 = Kaufman Adaptive Moving Average (KAMA),
* 8 = MESA Adaptive Moving Average (MAMA) |
def _get_method_kwargs(self):
"""
Helper method. Returns kwargs needed to filter the correct object.
Can also be used to create the correct object.
"""
method_kwargs = {
'user': self.user,
'content_type': self.ctype,
'object_id': self.content_object.pk,
}
return method_kwargs | Helper method. Returns kwargs needed to filter the correct object.
Can also be used to create the correct object. |
def set_value(self, selector, new_value, by=By.CSS_SELECTOR,
timeout=settings.LARGE_TIMEOUT):
""" This method uses JavaScript to update a text field. """
if self.timeout_multiplier and timeout == settings.LARGE_TIMEOUT:
timeout = self.__get_new_timeout(timeout)
if page_utils.is_xpath_selector(selector):
by = By.XPATH
orginal_selector = selector
css_selector = self.convert_to_css_selector(selector, by=by)
self.__demo_mode_highlight_if_active(orginal_selector, by)
if not self.demo_mode:
self.scroll_to(orginal_selector, by=by, timeout=timeout)
value = re.escape(new_value)
value = self.__escape_quotes_if_needed(value)
css_selector = re.escape(css_selector)
css_selector = self.__escape_quotes_if_needed(css_selector)
script = ("""document.querySelector('%s').value='%s';"""
% (css_selector, value))
self.execute_script(script)
if new_value.endswith('\n'):
element = self.wait_for_element_present(
orginal_selector, by=by, timeout=timeout)
element.send_keys(Keys.RETURN)
if settings.WAIT_FOR_RSC_ON_PAGE_LOADS:
self.wait_for_ready_state_complete()
self.__demo_mode_pause_if_active() | This method uses JavaScript to update a text field. |
def load_ref_spectra():
""" Pull out wl, flux, ivar from files of training spectra """
data_dir = "/Users/annaho/Data/AAOmega/ref_spectra"
# Load the files & count the number of training objects
ff = glob.glob("%s/*.txt" %data_dir)
nstars = len(ff)
print("We have %s training objects" %nstars)
# Read the first file to get the wavelength array
f = ff[0]
data = Table.read(f, format="ascii.fast_no_header")
wl = data['col1']
npix = len(wl)
print("We have %s pixels" %npix)
tr_flux = np.zeros((nstars,npix))
tr_ivar = np.zeros(tr_flux.shape)
for i,f in enumerate(ff):
data = Table.read(f, format="ascii.fast_no_header")
flux = data['col2']
tr_flux[i,:] = flux
sigma = data['col3']
tr_ivar[i,:] = 1.0 / sigma**2
return np.array(ff), wl, tr_flux, tr_ivar | Pull out wl, flux, ivar from files of training spectra |
def index_buffer(self, buffer, index_element_size=4):
"""
Set the index buffer for this VAO
Args:
buffer: ``moderngl.Buffer``, ``numpy.array`` or ``bytes``
Keyword Args:
index_element_size (int): Byte size of each element. 1, 2 or 4
"""
if not type(buffer) in [moderngl.Buffer, numpy.ndarray, bytes]:
raise VAOError("buffer parameter must be a moderngl.Buffer, numpy.ndarray or bytes instance")
if isinstance(buffer, numpy.ndarray):
buffer = self.ctx.buffer(buffer.tobytes())
if isinstance(buffer, bytes):
buffer = self.ctx.buffer(data=buffer)
self._index_buffer = buffer
self._index_element_size = index_element_size | Set the index buffer for this VAO
Args:
buffer: ``moderngl.Buffer``, ``numpy.array`` or ``bytes``
Keyword Args:
index_element_size (int): Byte size of each element. 1, 2 or 4 |
def DeserializeForImport(self, reader):
"""
Deserialize full object.
Args:
reader (neo.IO.BinaryReader):
"""
super(Block, self).Deserialize(reader)
self.Transactions = []
transaction_length = reader.ReadVarInt()
for i in range(0, transaction_length):
tx = Transaction.DeserializeFrom(reader)
self.Transactions.append(tx)
if len(self.Transactions) < 1:
raise Exception('Invalid format %s ' % self.Index) | Deserialize full object.
Args:
reader (neo.IO.BinaryReader): |
Subsets and Splits