code
stringlengths 75
104k
| docstring
stringlengths 1
46.9k
| text
stringlengths 164
112k
|
---|---|---|
def render(self, context):
"""
The default Django render() method for the tag.
This method resolves the filter expressions, and calls :func:`render_tag`.
"""
# Resolve token kwargs
tag_args = [expr.resolve(context) for expr in self.args] if self.compile_args else self.args
tag_kwargs = dict([(name, expr.resolve(context)) for name, expr in six.iteritems(self.kwargs)]) if self.compile_kwargs else self.kwargs
return self.render_tag(context, *tag_args, **tag_kwargs) | The default Django render() method for the tag.
This method resolves the filter expressions, and calls :func:`render_tag`. | Below is the the instruction that describes the task:
### Input:
The default Django render() method for the tag.
This method resolves the filter expressions, and calls :func:`render_tag`.
### Response:
def render(self, context):
"""
The default Django render() method for the tag.
This method resolves the filter expressions, and calls :func:`render_tag`.
"""
# Resolve token kwargs
tag_args = [expr.resolve(context) for expr in self.args] if self.compile_args else self.args
tag_kwargs = dict([(name, expr.resolve(context)) for name, expr in six.iteritems(self.kwargs)]) if self.compile_kwargs else self.kwargs
return self.render_tag(context, *tag_args, **tag_kwargs) |
def _pdf(self, x, dist, cache):
"""Probability density function."""
return evaluation.evaluate_density(
dist, numpy.arctan(x), cache=cache)/(1+x*x) | Probability density function. | Below is the the instruction that describes the task:
### Input:
Probability density function.
### Response:
def _pdf(self, x, dist, cache):
"""Probability density function."""
return evaluation.evaluate_density(
dist, numpy.arctan(x), cache=cache)/(1+x*x) |
def _finalize_nonblock_blob(self, sd, metadata, digest):
# type: (SyncCopy, blobxfer.models.synccopy.Descriptor, dict,
# str) -> None
"""Finalize Non-Block blob
:param SyncCopy self: this
:param blobxfer.models.synccopy.Descriptor sd: synccopy descriptor
:param dict metadata: metadata dict
:param str digest: md5 digest
"""
# set md5 page blob property if required
if (blobxfer.util.is_not_empty(digest) or
sd.dst_entity.cache_control is not None):
self._set_blob_properties(sd, digest)
# set metadata if needed
if blobxfer.util.is_not_empty(metadata):
self._set_blob_metadata(sd, metadata) | Finalize Non-Block blob
:param SyncCopy self: this
:param blobxfer.models.synccopy.Descriptor sd: synccopy descriptor
:param dict metadata: metadata dict
:param str digest: md5 digest | Below is the the instruction that describes the task:
### Input:
Finalize Non-Block blob
:param SyncCopy self: this
:param blobxfer.models.synccopy.Descriptor sd: synccopy descriptor
:param dict metadata: metadata dict
:param str digest: md5 digest
### Response:
def _finalize_nonblock_blob(self, sd, metadata, digest):
# type: (SyncCopy, blobxfer.models.synccopy.Descriptor, dict,
# str) -> None
"""Finalize Non-Block blob
:param SyncCopy self: this
:param blobxfer.models.synccopy.Descriptor sd: synccopy descriptor
:param dict metadata: metadata dict
:param str digest: md5 digest
"""
# set md5 page blob property if required
if (blobxfer.util.is_not_empty(digest) or
sd.dst_entity.cache_control is not None):
self._set_blob_properties(sd, digest)
# set metadata if needed
if blobxfer.util.is_not_empty(metadata):
self._set_blob_metadata(sd, metadata) |
def mtFeatureExtractionToFile(fileName, midTermSize, midTermStep, shortTermSize, shortTermStep, outPutFile,
storeStFeatures=False, storeToCSV=False, PLOT=False):
"""
This function is used as a wrapper to:
a) read the content of a WAV file
b) perform mid-term feature extraction on that signal
c) write the mid-term feature sequences to a numpy file
"""
[fs, x] = audioBasicIO.readAudioFile(fileName)
x = audioBasicIO.stereo2mono(x)
if storeStFeatures:
[mtF, stF, _] = mtFeatureExtraction(x, fs,
round(fs * midTermSize),
round(fs * midTermStep),
round(fs * shortTermSize),
round(fs * shortTermStep))
else:
[mtF, _, _] = mtFeatureExtraction(x, fs, round(fs*midTermSize),
round(fs * midTermStep),
round(fs * shortTermSize),
round(fs * shortTermStep))
# save mt features to numpy file
numpy.save(outPutFile, mtF)
if PLOT:
print("Mid-term numpy file: " + outPutFile + ".npy saved")
if storeToCSV:
numpy.savetxt(outPutFile+".csv", mtF.T, delimiter=",")
if PLOT:
print("Mid-term CSV file: " + outPutFile + ".csv saved")
if storeStFeatures:
# save st features to numpy file
numpy.save(outPutFile+"_st", stF)
if PLOT:
print("Short-term numpy file: " + outPutFile + "_st.npy saved")
if storeToCSV:
# store st features to CSV file
numpy.savetxt(outPutFile+"_st.csv", stF.T, delimiter=",")
if PLOT:
print("Short-term CSV file: " + outPutFile + "_st.csv saved") | This function is used as a wrapper to:
a) read the content of a WAV file
b) perform mid-term feature extraction on that signal
c) write the mid-term feature sequences to a numpy file | Below is the the instruction that describes the task:
### Input:
This function is used as a wrapper to:
a) read the content of a WAV file
b) perform mid-term feature extraction on that signal
c) write the mid-term feature sequences to a numpy file
### Response:
def mtFeatureExtractionToFile(fileName, midTermSize, midTermStep, shortTermSize, shortTermStep, outPutFile,
storeStFeatures=False, storeToCSV=False, PLOT=False):
"""
This function is used as a wrapper to:
a) read the content of a WAV file
b) perform mid-term feature extraction on that signal
c) write the mid-term feature sequences to a numpy file
"""
[fs, x] = audioBasicIO.readAudioFile(fileName)
x = audioBasicIO.stereo2mono(x)
if storeStFeatures:
[mtF, stF, _] = mtFeatureExtraction(x, fs,
round(fs * midTermSize),
round(fs * midTermStep),
round(fs * shortTermSize),
round(fs * shortTermStep))
else:
[mtF, _, _] = mtFeatureExtraction(x, fs, round(fs*midTermSize),
round(fs * midTermStep),
round(fs * shortTermSize),
round(fs * shortTermStep))
# save mt features to numpy file
numpy.save(outPutFile, mtF)
if PLOT:
print("Mid-term numpy file: " + outPutFile + ".npy saved")
if storeToCSV:
numpy.savetxt(outPutFile+".csv", mtF.T, delimiter=",")
if PLOT:
print("Mid-term CSV file: " + outPutFile + ".csv saved")
if storeStFeatures:
# save st features to numpy file
numpy.save(outPutFile+"_st", stF)
if PLOT:
print("Short-term numpy file: " + outPutFile + "_st.npy saved")
if storeToCSV:
# store st features to CSV file
numpy.savetxt(outPutFile+"_st.csv", stF.T, delimiter=",")
if PLOT:
print("Short-term CSV file: " + outPutFile + "_st.csv saved") |
def getscale(self):
"""Obtain the scale values along a dimension.
Args::
no argument
Returns::
list with the scale values; the list length is equal to the
dimension length; the element type is equal to the dimension
data type, as set when the 'setdimscale()' method was called.
C library equivalent : SDgetdimscale
"""
# Get dimension info. If data_type is 0, no scale have been set
# on the dimension.
status, dim_name, dim_size, data_type, n_attrs = _C.SDdiminfo(self._id)
_checkErr('getscale', status, 'cannot execute')
if data_type == 0:
raise HDF4Error("no scale set on that dimension")
# dim_size is 0 for an unlimited dimension. The actual length is
# obtained through SDgetinfo.
if dim_size == 0:
dim_size = self._sds.info()[2][self._index]
# Get scale values.
if data_type in [SDC.UCHAR8, SDC.UINT8]:
buf = _C.array_byte(dim_size)
elif data_type == SDC.INT8:
buf = _C.array_int8(dim_size)
elif data_type == SDC.INT16:
buf = _C.array_int16(dim_size)
elif data_type == SDC.UINT16:
buf = _C.array_uint16(dim_size)
elif data_type == SDC.INT32:
buf = _C.array_int32(dim_size)
elif data_type == SDC.UINT32:
buf = _C.array_uint32(dim_size)
elif data_type == SDC.FLOAT32:
buf = _C.array_float32(dim_size)
elif data_type == SDC.FLOAT64:
buf = _C.array_float64(dim_size)
else:
raise HDF4Error("getscale: dimension has an "\
"illegal or unsupported type %d" % data_type)
status = _C.SDgetdimscale(self._id, buf)
_checkErr('getscale', status, 'cannot execute')
return _array_to_ret(buf, dim_size) | Obtain the scale values along a dimension.
Args::
no argument
Returns::
list with the scale values; the list length is equal to the
dimension length; the element type is equal to the dimension
data type, as set when the 'setdimscale()' method was called.
C library equivalent : SDgetdimscale | Below is the the instruction that describes the task:
### Input:
Obtain the scale values along a dimension.
Args::
no argument
Returns::
list with the scale values; the list length is equal to the
dimension length; the element type is equal to the dimension
data type, as set when the 'setdimscale()' method was called.
C library equivalent : SDgetdimscale
### Response:
def getscale(self):
"""Obtain the scale values along a dimension.
Args::
no argument
Returns::
list with the scale values; the list length is equal to the
dimension length; the element type is equal to the dimension
data type, as set when the 'setdimscale()' method was called.
C library equivalent : SDgetdimscale
"""
# Get dimension info. If data_type is 0, no scale have been set
# on the dimension.
status, dim_name, dim_size, data_type, n_attrs = _C.SDdiminfo(self._id)
_checkErr('getscale', status, 'cannot execute')
if data_type == 0:
raise HDF4Error("no scale set on that dimension")
# dim_size is 0 for an unlimited dimension. The actual length is
# obtained through SDgetinfo.
if dim_size == 0:
dim_size = self._sds.info()[2][self._index]
# Get scale values.
if data_type in [SDC.UCHAR8, SDC.UINT8]:
buf = _C.array_byte(dim_size)
elif data_type == SDC.INT8:
buf = _C.array_int8(dim_size)
elif data_type == SDC.INT16:
buf = _C.array_int16(dim_size)
elif data_type == SDC.UINT16:
buf = _C.array_uint16(dim_size)
elif data_type == SDC.INT32:
buf = _C.array_int32(dim_size)
elif data_type == SDC.UINT32:
buf = _C.array_uint32(dim_size)
elif data_type == SDC.FLOAT32:
buf = _C.array_float32(dim_size)
elif data_type == SDC.FLOAT64:
buf = _C.array_float64(dim_size)
else:
raise HDF4Error("getscale: dimension has an "\
"illegal or unsupported type %d" % data_type)
status = _C.SDgetdimscale(self._id, buf)
_checkErr('getscale', status, 'cannot execute')
return _array_to_ret(buf, dim_size) |
def lang(self):
""" Languages this text is in
:return: List of available languages
"""
return str(self.graph.value(self.asNode(), DC.language)) | Languages this text is in
:return: List of available languages | Below is the the instruction that describes the task:
### Input:
Languages this text is in
:return: List of available languages
### Response:
def lang(self):
""" Languages this text is in
:return: List of available languages
"""
return str(self.graph.value(self.asNode(), DC.language)) |
def read_moc_json(moc, filename=None, file=None):
"""Read JSON encoded data into a MOC.
Either a filename, or an open file object can be specified.
"""
if file is not None:
obj = _read_json(file)
else:
with open(filename, 'rb') as f:
obj = _read_json(f)
for (order, cells) in obj.items():
moc.add(order, cells) | Read JSON encoded data into a MOC.
Either a filename, or an open file object can be specified. | Below is the the instruction that describes the task:
### Input:
Read JSON encoded data into a MOC.
Either a filename, or an open file object can be specified.
### Response:
def read_moc_json(moc, filename=None, file=None):
"""Read JSON encoded data into a MOC.
Either a filename, or an open file object can be specified.
"""
if file is not None:
obj = _read_json(file)
else:
with open(filename, 'rb') as f:
obj = _read_json(f)
for (order, cells) in obj.items():
moc.add(order, cells) |
def get_fleet(self, airline_key):
"""Get the fleet for a particular airline.
Given a airline code form the get_airlines() method output, this method returns the fleet for the airline.
Args:
airline_key (str): The code for the airline on flightradar24
Returns:
A list of dicts, one for each aircraft in the airlines fleet
Example::
from pyflightdata import FlightData
f=FlightData()
#optional login
f.login(myemail,mypassword)
f.get_fleet('ai-aic')
"""
url = AIRLINE_FLEET_BASE.format(airline_key)
return self._fr24.get_airline_fleet_data(url, self.AUTH_TOKEN != '') | Get the fleet for a particular airline.
Given a airline code form the get_airlines() method output, this method returns the fleet for the airline.
Args:
airline_key (str): The code for the airline on flightradar24
Returns:
A list of dicts, one for each aircraft in the airlines fleet
Example::
from pyflightdata import FlightData
f=FlightData()
#optional login
f.login(myemail,mypassword)
f.get_fleet('ai-aic') | Below is the the instruction that describes the task:
### Input:
Get the fleet for a particular airline.
Given a airline code form the get_airlines() method output, this method returns the fleet for the airline.
Args:
airline_key (str): The code for the airline on flightradar24
Returns:
A list of dicts, one for each aircraft in the airlines fleet
Example::
from pyflightdata import FlightData
f=FlightData()
#optional login
f.login(myemail,mypassword)
f.get_fleet('ai-aic')
### Response:
def get_fleet(self, airline_key):
"""Get the fleet for a particular airline.
Given a airline code form the get_airlines() method output, this method returns the fleet for the airline.
Args:
airline_key (str): The code for the airline on flightradar24
Returns:
A list of dicts, one for each aircraft in the airlines fleet
Example::
from pyflightdata import FlightData
f=FlightData()
#optional login
f.login(myemail,mypassword)
f.get_fleet('ai-aic')
"""
url = AIRLINE_FLEET_BASE.format(airline_key)
return self._fr24.get_airline_fleet_data(url, self.AUTH_TOKEN != '') |
def connect(self, dests=[], name=None, id='', props={}):
'''Connect this port to other ports.
After the connection has been made, a delayed reparse of the
connections for this and the destination port will be triggered.
@param dests A list of the destination Port objects. Must be provided.
@param name The name of the connection. If None, a suitable default
will be created based on the names of the two ports.
@param id The ID of this connection. If None, one will be generated by
the RTC implementation.
@param props Properties of the connection. Required values depend on
the type of the two ports being connected.
@raises IncompatibleDataPortConnectionPropsError, FailedToConnectError
'''
with self._mutex:
if self.porttype == 'DataInPort' or self.porttype == 'DataOutPort':
for prop in props:
if prop in self.properties:
if props[prop] not in [x.strip() for x in self.properties[prop].split(',')] and \
'any' not in self.properties[prop].lower():
# Invalid property selected
raise exceptions.IncompatibleDataPortConnectionPropsError
for d in dests:
if prop in d.properties:
if props[prop] not in [x.strip() for x in d.properties[prop].split(',')] and \
'any' not in d.properties[prop].lower():
# Invalid property selected
raise exceptions.IncompatibleDataPortConnectionPropsError
if not name:
name = self.name + '_'.join([d.name for d in dests])
props = utils.dict_to_nvlist(props)
profile = RTC.ConnectorProfile(name, id,
[self._obj] + [d._obj for d in dests], props)
return_code, profile = self._obj.connect(profile)
if return_code != RTC.RTC_OK:
raise exceptions.FailedToConnectError(return_code)
self.reparse_connections()
for d in dests:
d.reparse_connections() | Connect this port to other ports.
After the connection has been made, a delayed reparse of the
connections for this and the destination port will be triggered.
@param dests A list of the destination Port objects. Must be provided.
@param name The name of the connection. If None, a suitable default
will be created based on the names of the two ports.
@param id The ID of this connection. If None, one will be generated by
the RTC implementation.
@param props Properties of the connection. Required values depend on
the type of the two ports being connected.
@raises IncompatibleDataPortConnectionPropsError, FailedToConnectError | Below is the the instruction that describes the task:
### Input:
Connect this port to other ports.
After the connection has been made, a delayed reparse of the
connections for this and the destination port will be triggered.
@param dests A list of the destination Port objects. Must be provided.
@param name The name of the connection. If None, a suitable default
will be created based on the names of the two ports.
@param id The ID of this connection. If None, one will be generated by
the RTC implementation.
@param props Properties of the connection. Required values depend on
the type of the two ports being connected.
@raises IncompatibleDataPortConnectionPropsError, FailedToConnectError
### Response:
def connect(self, dests=[], name=None, id='', props={}):
'''Connect this port to other ports.
After the connection has been made, a delayed reparse of the
connections for this and the destination port will be triggered.
@param dests A list of the destination Port objects. Must be provided.
@param name The name of the connection. If None, a suitable default
will be created based on the names of the two ports.
@param id The ID of this connection. If None, one will be generated by
the RTC implementation.
@param props Properties of the connection. Required values depend on
the type of the two ports being connected.
@raises IncompatibleDataPortConnectionPropsError, FailedToConnectError
'''
with self._mutex:
if self.porttype == 'DataInPort' or self.porttype == 'DataOutPort':
for prop in props:
if prop in self.properties:
if props[prop] not in [x.strip() for x in self.properties[prop].split(',')] and \
'any' not in self.properties[prop].lower():
# Invalid property selected
raise exceptions.IncompatibleDataPortConnectionPropsError
for d in dests:
if prop in d.properties:
if props[prop] not in [x.strip() for x in d.properties[prop].split(',')] and \
'any' not in d.properties[prop].lower():
# Invalid property selected
raise exceptions.IncompatibleDataPortConnectionPropsError
if not name:
name = self.name + '_'.join([d.name for d in dests])
props = utils.dict_to_nvlist(props)
profile = RTC.ConnectorProfile(name, id,
[self._obj] + [d._obj for d in dests], props)
return_code, profile = self._obj.connect(profile)
if return_code != RTC.RTC_OK:
raise exceptions.FailedToConnectError(return_code)
self.reparse_connections()
for d in dests:
d.reparse_connections() |
def read_raw_parser_conf(data: str) -> dict:
"""We expect to have a section like this
```
[commitizen]
name = cz_jira
files = [
"commitizen/__version__.py",
"pyproject.toml"
] # this tab at the end is important
```
"""
config = configparser.ConfigParser(allow_no_value=True)
config.read_string(data)
try:
_data: dict = dict(config["commitizen"])
if "files" in _data:
files = _data["files"]
_f = json.loads(files)
_data.update({"files": _f})
return _data
except KeyError:
return {} | We expect to have a section like this
```
[commitizen]
name = cz_jira
files = [
"commitizen/__version__.py",
"pyproject.toml"
] # this tab at the end is important
``` | Below is the the instruction that describes the task:
### Input:
We expect to have a section like this
```
[commitizen]
name = cz_jira
files = [
"commitizen/__version__.py",
"pyproject.toml"
] # this tab at the end is important
```
### Response:
def read_raw_parser_conf(data: str) -> dict:
"""We expect to have a section like this
```
[commitizen]
name = cz_jira
files = [
"commitizen/__version__.py",
"pyproject.toml"
] # this tab at the end is important
```
"""
config = configparser.ConfigParser(allow_no_value=True)
config.read_string(data)
try:
_data: dict = dict(config["commitizen"])
if "files" in _data:
files = _data["files"]
_f = json.loads(files)
_data.update({"files": _f})
return _data
except KeyError:
return {} |
def result_subsets(self, rs):
"""Break a result set into subsets with the same keys.
:param rs: Result set, rows of a result as a list of dicts
:type rs: list of dict
:return: A set with distinct keys (tuples), and a dict, by these tuples, of max. widths for each column
"""
keyset, maxwid = set(), {}
for r in rs:
key = tuple(sorted(r.keys()))
keyset.add(key)
if key not in maxwid:
maxwid[key] = [len(k) for k in key]
for i, k in enumerate(key):
strlen = len("{}".format(r[k]))
maxwid[key][i] = max(maxwid[key][i], strlen)
return keyset, maxwid | Break a result set into subsets with the same keys.
:param rs: Result set, rows of a result as a list of dicts
:type rs: list of dict
:return: A set with distinct keys (tuples), and a dict, by these tuples, of max. widths for each column | Below is the the instruction that describes the task:
### Input:
Break a result set into subsets with the same keys.
:param rs: Result set, rows of a result as a list of dicts
:type rs: list of dict
:return: A set with distinct keys (tuples), and a dict, by these tuples, of max. widths for each column
### Response:
def result_subsets(self, rs):
"""Break a result set into subsets with the same keys.
:param rs: Result set, rows of a result as a list of dicts
:type rs: list of dict
:return: A set with distinct keys (tuples), and a dict, by these tuples, of max. widths for each column
"""
keyset, maxwid = set(), {}
for r in rs:
key = tuple(sorted(r.keys()))
keyset.add(key)
if key not in maxwid:
maxwid[key] = [len(k) for k in key]
for i, k in enumerate(key):
strlen = len("{}".format(r[k]))
maxwid[key][i] = max(maxwid[key][i], strlen)
return keyset, maxwid |
def blockSignals( self, state ):
"""
Blocks the signals for this widget and its sub-parts.
:param state | <bool>
"""
super(XLocationWidget, self).blockSignals(state)
self._locationEdit.blockSignals(state)
self._locationButton.blockSignals(state) | Blocks the signals for this widget and its sub-parts.
:param state | <bool> | Below is the the instruction that describes the task:
### Input:
Blocks the signals for this widget and its sub-parts.
:param state | <bool>
### Response:
def blockSignals( self, state ):
"""
Blocks the signals for this widget and its sub-parts.
:param state | <bool>
"""
super(XLocationWidget, self).blockSignals(state)
self._locationEdit.blockSignals(state)
self._locationButton.blockSignals(state) |
def enable_call_trace():
""" Enable trace for calls to any function. """
def tracer(frame, event, arg):
if event == 'call':
co = frame.f_code
func_name = co.co_name
if func_name == 'write' or func_name == 'print':
# ignore write() calls from print statements
return
func_line_no = frame.f_lineno
func_filename = co.co_filename
caller = frame.f_back
if caller:
caller_line_no = caller.f_lineno
caller_filename = caller.f_code.co_filename
print('Call to `%s` on line %s:%s from %s:%s' %
(func_name, func_filename, func_line_no,
caller_filename, caller_line_no))
return
sys.settrace(tracer) | Enable trace for calls to any function. | Below is the the instruction that describes the task:
### Input:
Enable trace for calls to any function.
### Response:
def enable_call_trace():
""" Enable trace for calls to any function. """
def tracer(frame, event, arg):
if event == 'call':
co = frame.f_code
func_name = co.co_name
if func_name == 'write' or func_name == 'print':
# ignore write() calls from print statements
return
func_line_no = frame.f_lineno
func_filename = co.co_filename
caller = frame.f_back
if caller:
caller_line_no = caller.f_lineno
caller_filename = caller.f_code.co_filename
print('Call to `%s` on line %s:%s from %s:%s' %
(func_name, func_filename, func_line_no,
caller_filename, caller_line_no))
return
sys.settrace(tracer) |
def _get_model_param_names(cls):
r"""Get parameter names for the model"""
# fetch model parameters
if hasattr(cls, 'set_model_params'):
# introspect the constructor arguments to find the model parameters
# to represent
args, varargs, kw, default = getargspec_no_self(cls.set_model_params)
if varargs is not None:
raise RuntimeError("PyEMMA models should always specify their parameters in the signature"
" of their set_model_params (no varargs). %s doesn't follow this convention."
% (cls,))
return args
else:
# No parameters known
return [] | r"""Get parameter names for the model | Below is the the instruction that describes the task:
### Input:
r"""Get parameter names for the model
### Response:
def _get_model_param_names(cls):
r"""Get parameter names for the model"""
# fetch model parameters
if hasattr(cls, 'set_model_params'):
# introspect the constructor arguments to find the model parameters
# to represent
args, varargs, kw, default = getargspec_no_self(cls.set_model_params)
if varargs is not None:
raise RuntimeError("PyEMMA models should always specify their parameters in the signature"
" of their set_model_params (no varargs). %s doesn't follow this convention."
% (cls,))
return args
else:
# No parameters known
return [] |
def count_stages(self, matrix_name):
"""
Number of registered stages for given matrix name.
Parameters:
matrix_name (str): name of the matrix
Returns:
int: number of reported stages for given matrix name.
"""
return len(self.data[matrix_name]) if matrix_name in self.data else 0 | Number of registered stages for given matrix name.
Parameters:
matrix_name (str): name of the matrix
Returns:
int: number of reported stages for given matrix name. | Below is the the instruction that describes the task:
### Input:
Number of registered stages for given matrix name.
Parameters:
matrix_name (str): name of the matrix
Returns:
int: number of reported stages for given matrix name.
### Response:
def count_stages(self, matrix_name):
"""
Number of registered stages for given matrix name.
Parameters:
matrix_name (str): name of the matrix
Returns:
int: number of reported stages for given matrix name.
"""
return len(self.data[matrix_name]) if matrix_name in self.data else 0 |
def commit_fw_db_result(self):
"""Calls routine to update the FW create/delete result in DB. """
fw_dict = self.get_fw_dict()
self.update_fw_db_result(fw_dict.get('fw_id'), fw_dict) | Calls routine to update the FW create/delete result in DB. | Below is the the instruction that describes the task:
### Input:
Calls routine to update the FW create/delete result in DB.
### Response:
def commit_fw_db_result(self):
"""Calls routine to update the FW create/delete result in DB. """
fw_dict = self.get_fw_dict()
self.update_fw_db_result(fw_dict.get('fw_id'), fw_dict) |
def _open_url(cls, url):
"""Given a CFURL Python object, return an opened ExtAudioFileRef.
"""
file_obj = ctypes.c_void_p()
check(_coreaudio.ExtAudioFileOpenURL(
url._obj, ctypes.byref(file_obj)
))
return file_obj | Given a CFURL Python object, return an opened ExtAudioFileRef. | Below is the the instruction that describes the task:
### Input:
Given a CFURL Python object, return an opened ExtAudioFileRef.
### Response:
def _open_url(cls, url):
"""Given a CFURL Python object, return an opened ExtAudioFileRef.
"""
file_obj = ctypes.c_void_p()
check(_coreaudio.ExtAudioFileOpenURL(
url._obj, ctypes.byref(file_obj)
))
return file_obj |
def CheckHeaderFileIncluded(filename, include_state, error):
"""Logs an error if a source file does not include its header."""
# Do not check test files
fileinfo = FileInfo(filename)
if Search(_TEST_FILE_SUFFIX, fileinfo.BaseName()):
return
for ext in GetHeaderExtensions():
basefilename = filename[0:len(filename) - len(fileinfo.Extension())]
headerfile = basefilename + '.' + ext
if not os.path.exists(headerfile):
continue
headername = FileInfo(headerfile).RepositoryName()
first_include = None
for section_list in include_state.include_list:
for f in section_list:
if headername in f[0] or f[0] in headername:
return
if not first_include:
first_include = f[1]
error(filename, first_include, 'build/include', 5,
'%s should include its header file %s' % (fileinfo.RepositoryName(),
headername)) | Logs an error if a source file does not include its header. | Below is the the instruction that describes the task:
### Input:
Logs an error if a source file does not include its header.
### Response:
def CheckHeaderFileIncluded(filename, include_state, error):
"""Logs an error if a source file does not include its header."""
# Do not check test files
fileinfo = FileInfo(filename)
if Search(_TEST_FILE_SUFFIX, fileinfo.BaseName()):
return
for ext in GetHeaderExtensions():
basefilename = filename[0:len(filename) - len(fileinfo.Extension())]
headerfile = basefilename + '.' + ext
if not os.path.exists(headerfile):
continue
headername = FileInfo(headerfile).RepositoryName()
first_include = None
for section_list in include_state.include_list:
for f in section_list:
if headername in f[0] or f[0] in headername:
return
if not first_include:
first_include = f[1]
error(filename, first_include, 'build/include', 5,
'%s should include its header file %s' % (fileinfo.RepositoryName(),
headername)) |
def transform_26_27(inst, new_inst, i, n, offset,
instructions, new_asm):
"""Change JUMP_IF_FALSE and JUMP_IF_TRUE to
POP_JUMP_IF_FALSE and POP_JUMP_IF_TRUE"""
if inst.opname in ('JUMP_IF_FALSE', 'JUMP_IF_TRUE'):
i += 1
assert i < n
assert instructions[i].opname == 'POP_TOP'
new_inst.offset = offset
new_inst.opname = (
'POP_JUMP_IF_FALSE' if inst.opname == 'JUMP_IF_FALSE' else 'POP_JUMP_IF_TRUE'
)
new_asm.backpatch[-1].remove(inst)
new_inst.arg = 'L%d' % (inst.offset + inst.arg + 3)
new_asm.backpatch[-1].add(new_inst)
else:
xlate26_27(new_inst)
return xdis.op_size(new_inst.opcode, opcode_27) | Change JUMP_IF_FALSE and JUMP_IF_TRUE to
POP_JUMP_IF_FALSE and POP_JUMP_IF_TRUE | Below is the the instruction that describes the task:
### Input:
Change JUMP_IF_FALSE and JUMP_IF_TRUE to
POP_JUMP_IF_FALSE and POP_JUMP_IF_TRUE
### Response:
def transform_26_27(inst, new_inst, i, n, offset,
instructions, new_asm):
"""Change JUMP_IF_FALSE and JUMP_IF_TRUE to
POP_JUMP_IF_FALSE and POP_JUMP_IF_TRUE"""
if inst.opname in ('JUMP_IF_FALSE', 'JUMP_IF_TRUE'):
i += 1
assert i < n
assert instructions[i].opname == 'POP_TOP'
new_inst.offset = offset
new_inst.opname = (
'POP_JUMP_IF_FALSE' if inst.opname == 'JUMP_IF_FALSE' else 'POP_JUMP_IF_TRUE'
)
new_asm.backpatch[-1].remove(inst)
new_inst.arg = 'L%d' % (inst.offset + inst.arg + 3)
new_asm.backpatch[-1].add(new_inst)
else:
xlate26_27(new_inst)
return xdis.op_size(new_inst.opcode, opcode_27) |
def kernel(self, kernel):
"""
Sets the kernel.
:param kernel: the kernel to set
:type kernel: Kernel
"""
result = javabridge.static_call(
"weka/classifiers/KernelHelper", "setKernel",
"(Ljava/lang/Object;Lweka/classifiers/functions/supportVector/Kernel;)Z",
self.jobject, kernel.jobject)
if not result:
raise Exception("Failed to set kernel!") | Sets the kernel.
:param kernel: the kernel to set
:type kernel: Kernel | Below is the the instruction that describes the task:
### Input:
Sets the kernel.
:param kernel: the kernel to set
:type kernel: Kernel
### Response:
def kernel(self, kernel):
"""
Sets the kernel.
:param kernel: the kernel to set
:type kernel: Kernel
"""
result = javabridge.static_call(
"weka/classifiers/KernelHelper", "setKernel",
"(Ljava/lang/Object;Lweka/classifiers/functions/supportVector/Kernel;)Z",
self.jobject, kernel.jobject)
if not result:
raise Exception("Failed to set kernel!") |
def _create_axes_grid(length_plotters, rows, cols, **kwargs):
"""Create figure and axes for grids with multiple plots.
Parameters
----------
n_items : int
Number of panels required
rows : int
Number of rows
cols : int
Number of columns
Returns
-------
fig : matplotlib figure
ax : matplotlib axes
"""
kwargs.setdefault("constrained_layout", True)
fig, ax = plt.subplots(rows, cols, **kwargs)
ax = np.ravel(ax)
extra = (rows * cols) - length_plotters
if extra:
for i in range(1, extra + 1):
ax[-i].set_axis_off()
ax = ax[:-extra]
return fig, ax | Create figure and axes for grids with multiple plots.
Parameters
----------
n_items : int
Number of panels required
rows : int
Number of rows
cols : int
Number of columns
Returns
-------
fig : matplotlib figure
ax : matplotlib axes | Below is the the instruction that describes the task:
### Input:
Create figure and axes for grids with multiple plots.
Parameters
----------
n_items : int
Number of panels required
rows : int
Number of rows
cols : int
Number of columns
Returns
-------
fig : matplotlib figure
ax : matplotlib axes
### Response:
def _create_axes_grid(length_plotters, rows, cols, **kwargs):
"""Create figure and axes for grids with multiple plots.
Parameters
----------
n_items : int
Number of panels required
rows : int
Number of rows
cols : int
Number of columns
Returns
-------
fig : matplotlib figure
ax : matplotlib axes
"""
kwargs.setdefault("constrained_layout", True)
fig, ax = plt.subplots(rows, cols, **kwargs)
ax = np.ravel(ax)
extra = (rows * cols) - length_plotters
if extra:
for i in range(1, extra + 1):
ax[-i].set_axis_off()
ax = ax[:-extra]
return fig, ax |
def update_build_configuration(id, **kwargs):
"""
Update an existing BuildConfiguration with new information
:param id: ID of BuildConfiguration to update
:param name: Name of BuildConfiguration to update
:return:
"""
data = update_build_configuration_raw(id, **kwargs)
if data:
return utils.format_json(data) | Update an existing BuildConfiguration with new information
:param id: ID of BuildConfiguration to update
:param name: Name of BuildConfiguration to update
:return: | Below is the the instruction that describes the task:
### Input:
Update an existing BuildConfiguration with new information
:param id: ID of BuildConfiguration to update
:param name: Name of BuildConfiguration to update
:return:
### Response:
def update_build_configuration(id, **kwargs):
"""
Update an existing BuildConfiguration with new information
:param id: ID of BuildConfiguration to update
:param name: Name of BuildConfiguration to update
:return:
"""
data = update_build_configuration_raw(id, **kwargs)
if data:
return utils.format_json(data) |
def sigma(G, corpus, featureset_name, B=None, **kwargs):
"""
Calculate sigma (from `Chen 2009 <http://arxiv.org/pdf/0904.1439.pdf>`_)
for all of the nodes in a :class:`.GraphCollection`\.
You can set parameters for burstness estimation using ``kwargs``:
========= ===============================================================
Parameter Description
========= ===============================================================
s Scaling parameter ( > 1.)that controls graininess of burst
detection. Lower values make the model more sensitive. Defaults
to 1.1.
gamma Parameter that controls the 'cost' of higher burst states.
Defaults to 1.0.
k Number of burst states. Defaults to 5.
========= ===============================================================
Parameters
----------
G : :class:`.GraphCollection`
corpus : :class:`.Corpus`
feature : str
Name of a featureset in `corpus`.
Examples
--------
Assuming that you have a :class:`.Corpus` generated from WoS data that has
been sliced by ``date``.
.. code-block:: python
>>> # Generate a co-citation graph collection.
>>> from tethne import GraphCollection
>>> kwargs = { 'threshold':2, 'topn':100 }
>>> G = GraphCollection()
>>> G.build(corpus, 'date', 'papers', 'cocitation', method_kwargs=kwargs)
>>> # Calculate sigma. This may take several minutes, depending on the
>>> # size of your co-citaiton graph collection.
>>> from tethne.analyze.corpus import sigma
>>> G = sigma(G, corpus, 'citations')
>>> # Visualize...
>>> from tethne.writers import collection
>>> collection.to_dxgmml(G, '~/cocitation.xgmml')
In the visualization below, node and label sizes are mapped to ``sigma``,
and border width is mapped to ``citations``.
.. figure:: _static/images/cocitation_sigma2.png
:width: 600
:align: center
"""
if 'date' not in corpus.indices:
corpus.index('date')
# Calculate burstness if not provided.
if not B:
B = burstness(corpus, featureset_name, features=G.nodes(), **kwargs)
Sigma = {} # Keys are dates (from GraphCollection), values are
# node:sigma dicts.
for key, graph in G.iteritems():
centrality = nx.betweenness_centrality(graph)
sigma = {} # Sigma values for all features in this year.
attrs = {} # Sigma values for only those features in this graph.
for n_, burst in B.iteritems():
burst = dict(list(zip(*burst))) # Reorganize for easier lookup.
# Nodes are indexed as integers in the GraphCollection.
n = G.node_lookup[n_]
# We have burstness values for years in which the feature ``n``
# occurs, and we have centrality values for years in which ``n``
# made it into the graph.
if n in graph.nodes() and key in burst:
sigma[n] = ((centrality[n] + 1.) ** burst[key]) - 1.
attrs[n] = sigma[n]
# Update graph with sigma values.
nx.set_node_attributes(graph, 'sigma', attrs)
Sigma[key] = sigma
# Invert results and update the GraphCollection.master_graph.
# TODO: is there a more efficient way to do this?
inverse = defaultdict(dict)
for gname, result in Sigma.iteritems():
if hasattr(result, '__iter__'):
for n, val in result.iteritems():
inverse[n].update({gname: val})
nx.set_node_attributes(G.master_graph, 'sigma', inverse)
# We want to return results in the same format as burstness(); with node
# labels as keys; values are tuples ([years...], [sigma...]).
return {n: list(zip(*G.node_history(G.node_lookup[n], 'sigma').items()))
for n in B.keys()} | Calculate sigma (from `Chen 2009 <http://arxiv.org/pdf/0904.1439.pdf>`_)
for all of the nodes in a :class:`.GraphCollection`\.
You can set parameters for burstness estimation using ``kwargs``:
========= ===============================================================
Parameter Description
========= ===============================================================
s Scaling parameter ( > 1.)that controls graininess of burst
detection. Lower values make the model more sensitive. Defaults
to 1.1.
gamma Parameter that controls the 'cost' of higher burst states.
Defaults to 1.0.
k Number of burst states. Defaults to 5.
========= ===============================================================
Parameters
----------
G : :class:`.GraphCollection`
corpus : :class:`.Corpus`
feature : str
Name of a featureset in `corpus`.
Examples
--------
Assuming that you have a :class:`.Corpus` generated from WoS data that has
been sliced by ``date``.
.. code-block:: python
>>> # Generate a co-citation graph collection.
>>> from tethne import GraphCollection
>>> kwargs = { 'threshold':2, 'topn':100 }
>>> G = GraphCollection()
>>> G.build(corpus, 'date', 'papers', 'cocitation', method_kwargs=kwargs)
>>> # Calculate sigma. This may take several minutes, depending on the
>>> # size of your co-citaiton graph collection.
>>> from tethne.analyze.corpus import sigma
>>> G = sigma(G, corpus, 'citations')
>>> # Visualize...
>>> from tethne.writers import collection
>>> collection.to_dxgmml(G, '~/cocitation.xgmml')
In the visualization below, node and label sizes are mapped to ``sigma``,
and border width is mapped to ``citations``.
.. figure:: _static/images/cocitation_sigma2.png
:width: 600
:align: center | Below is the the instruction that describes the task:
### Input:
Calculate sigma (from `Chen 2009 <http://arxiv.org/pdf/0904.1439.pdf>`_)
for all of the nodes in a :class:`.GraphCollection`\.
You can set parameters for burstness estimation using ``kwargs``:
========= ===============================================================
Parameter Description
========= ===============================================================
s Scaling parameter ( > 1.)that controls graininess of burst
detection. Lower values make the model more sensitive. Defaults
to 1.1.
gamma Parameter that controls the 'cost' of higher burst states.
Defaults to 1.0.
k Number of burst states. Defaults to 5.
========= ===============================================================
Parameters
----------
G : :class:`.GraphCollection`
corpus : :class:`.Corpus`
feature : str
Name of a featureset in `corpus`.
Examples
--------
Assuming that you have a :class:`.Corpus` generated from WoS data that has
been sliced by ``date``.
.. code-block:: python
>>> # Generate a co-citation graph collection.
>>> from tethne import GraphCollection
>>> kwargs = { 'threshold':2, 'topn':100 }
>>> G = GraphCollection()
>>> G.build(corpus, 'date', 'papers', 'cocitation', method_kwargs=kwargs)
>>> # Calculate sigma. This may take several minutes, depending on the
>>> # size of your co-citaiton graph collection.
>>> from tethne.analyze.corpus import sigma
>>> G = sigma(G, corpus, 'citations')
>>> # Visualize...
>>> from tethne.writers import collection
>>> collection.to_dxgmml(G, '~/cocitation.xgmml')
In the visualization below, node and label sizes are mapped to ``sigma``,
and border width is mapped to ``citations``.
.. figure:: _static/images/cocitation_sigma2.png
:width: 600
:align: center
### Response:
def sigma(G, corpus, featureset_name, B=None, **kwargs):
"""
Calculate sigma (from `Chen 2009 <http://arxiv.org/pdf/0904.1439.pdf>`_)
for all of the nodes in a :class:`.GraphCollection`\.
You can set parameters for burstness estimation using ``kwargs``:
========= ===============================================================
Parameter Description
========= ===============================================================
s Scaling parameter ( > 1.)that controls graininess of burst
detection. Lower values make the model more sensitive. Defaults
to 1.1.
gamma Parameter that controls the 'cost' of higher burst states.
Defaults to 1.0.
k Number of burst states. Defaults to 5.
========= ===============================================================
Parameters
----------
G : :class:`.GraphCollection`
corpus : :class:`.Corpus`
feature : str
Name of a featureset in `corpus`.
Examples
--------
Assuming that you have a :class:`.Corpus` generated from WoS data that has
been sliced by ``date``.
.. code-block:: python
>>> # Generate a co-citation graph collection.
>>> from tethne import GraphCollection
>>> kwargs = { 'threshold':2, 'topn':100 }
>>> G = GraphCollection()
>>> G.build(corpus, 'date', 'papers', 'cocitation', method_kwargs=kwargs)
>>> # Calculate sigma. This may take several minutes, depending on the
>>> # size of your co-citaiton graph collection.
>>> from tethne.analyze.corpus import sigma
>>> G = sigma(G, corpus, 'citations')
>>> # Visualize...
>>> from tethne.writers import collection
>>> collection.to_dxgmml(G, '~/cocitation.xgmml')
In the visualization below, node and label sizes are mapped to ``sigma``,
and border width is mapped to ``citations``.
.. figure:: _static/images/cocitation_sigma2.png
:width: 600
:align: center
"""
if 'date' not in corpus.indices:
corpus.index('date')
# Calculate burstness if not provided.
if not B:
B = burstness(corpus, featureset_name, features=G.nodes(), **kwargs)
Sigma = {} # Keys are dates (from GraphCollection), values are
# node:sigma dicts.
for key, graph in G.iteritems():
centrality = nx.betweenness_centrality(graph)
sigma = {} # Sigma values for all features in this year.
attrs = {} # Sigma values for only those features in this graph.
for n_, burst in B.iteritems():
burst = dict(list(zip(*burst))) # Reorganize for easier lookup.
# Nodes are indexed as integers in the GraphCollection.
n = G.node_lookup[n_]
# We have burstness values for years in which the feature ``n``
# occurs, and we have centrality values for years in which ``n``
# made it into the graph.
if n in graph.nodes() and key in burst:
sigma[n] = ((centrality[n] + 1.) ** burst[key]) - 1.
attrs[n] = sigma[n]
# Update graph with sigma values.
nx.set_node_attributes(graph, 'sigma', attrs)
Sigma[key] = sigma
# Invert results and update the GraphCollection.master_graph.
# TODO: is there a more efficient way to do this?
inverse = defaultdict(dict)
for gname, result in Sigma.iteritems():
if hasattr(result, '__iter__'):
for n, val in result.iteritems():
inverse[n].update({gname: val})
nx.set_node_attributes(G.master_graph, 'sigma', inverse)
# We want to return results in the same format as burstness(); with node
# labels as keys; values are tuples ([years...], [sigma...]).
return {n: list(zip(*G.node_history(G.node_lookup[n], 'sigma').items()))
for n in B.keys()} |
def symbol_bollinger(symbol='GOOG',
start=datetime.datetime(2008, 1, 1), end=datetime.datetime(2009, 12, 31), price_type='close', cleaner=clean_dataframe,
window=20, sigma=1.):
"""Calculate the Bolinger indicator value
>>> symbol_bollinger("goog", '2008-1-1', '2008-2-1')[-1] # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE
-1.8782...
"""
symbols = normalize_symbols(symbol)
prices = price_dataframe(symbols, start=start, end=end, price_type=price_type, cleaner=cleaner)
return series_bollinger(prices[symbols[0]], window=window, sigma=sigma, plot=False) | Calculate the Bolinger indicator value
>>> symbol_bollinger("goog", '2008-1-1', '2008-2-1')[-1] # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE
-1.8782... | Below is the the instruction that describes the task:
### Input:
Calculate the Bolinger indicator value
>>> symbol_bollinger("goog", '2008-1-1', '2008-2-1')[-1] # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE
-1.8782...
### Response:
def symbol_bollinger(symbol='GOOG',
start=datetime.datetime(2008, 1, 1), end=datetime.datetime(2009, 12, 31), price_type='close', cleaner=clean_dataframe,
window=20, sigma=1.):
"""Calculate the Bolinger indicator value
>>> symbol_bollinger("goog", '2008-1-1', '2008-2-1')[-1] # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE
-1.8782...
"""
symbols = normalize_symbols(symbol)
prices = price_dataframe(symbols, start=start, end=end, price_type=price_type, cleaner=cleaner)
return series_bollinger(prices[symbols[0]], window=window, sigma=sigma, plot=False) |
async def unixlisten(path, onlink):
'''
Start an PF_UNIX server listening on the given path.
'''
info = {'path': path, 'unix': True}
async def onconn(reader, writer):
link = await Link.anit(reader, writer, info=info)
link.schedCoro(onlink(link))
return await asyncio.start_unix_server(onconn, path=path) | Start an PF_UNIX server listening on the given path. | Below is the the instruction that describes the task:
### Input:
Start an PF_UNIX server listening on the given path.
### Response:
async def unixlisten(path, onlink):
'''
Start an PF_UNIX server listening on the given path.
'''
info = {'path': path, 'unix': True}
async def onconn(reader, writer):
link = await Link.anit(reader, writer, info=info)
link.schedCoro(onlink(link))
return await asyncio.start_unix_server(onconn, path=path) |
def set_comment(self, format, *args):
"""
Add comment to config item before saving to disk. You can add as many
comment lines as you like. If you use a null format, all comments are
deleted.
"""
return lib.zconfig_set_comment(self._as_parameter_, format, *args) | Add comment to config item before saving to disk. You can add as many
comment lines as you like. If you use a null format, all comments are
deleted. | Below is the the instruction that describes the task:
### Input:
Add comment to config item before saving to disk. You can add as many
comment lines as you like. If you use a null format, all comments are
deleted.
### Response:
def set_comment(self, format, *args):
"""
Add comment to config item before saving to disk. You can add as many
comment lines as you like. If you use a null format, all comments are
deleted.
"""
return lib.zconfig_set_comment(self._as_parameter_, format, *args) |
def _query(self, variables, operation, evidence=None, joint=True):
"""
This is a generalized query method that can be used for both query and map query.
Parameters
----------
variables: list
list of variables for which you want to compute the probability
operation: str ('marginalize' | 'maximize')
The operation to do for passing messages between nodes.
evidence: dict
a dict key, value pair as {var: state_of_var_observed}
None if no evidence
Examples
--------
>>> from pgmpy.inference import BeliefPropagation
>>> from pgmpy.models import BayesianModel
>>> import numpy as np
>>> import pandas as pd
>>> values = pd.DataFrame(np.random.randint(low=0, high=2, size=(1000, 5)),
... columns=['A', 'B', 'C', 'D', 'E'])
>>> model = BayesianModel([('A', 'B'), ('C', 'B'), ('C', 'D'), ('B', 'E')])
>>> model.fit(values)
>>> inference = BeliefPropagation(model)
>>> phi_query = inference.query(['A', 'B'])
References
----------
Algorithm 10.4 Out-of-clique inference in clique tree
Probabilistic Graphical Models: Principles and Techniques Daphne Koller and Nir Friedman.
"""
is_calibrated = self._is_converged(operation=operation)
# Calibrate the junction tree if not calibrated
if not is_calibrated:
self.calibrate()
if not isinstance(variables, (list, tuple, set)):
query_variables = [variables]
else:
query_variables = list(variables)
query_variables.extend(evidence.keys() if evidence else [])
# Find a tree T' such that query_variables are a subset of scope(T')
nodes_with_query_variables = set()
for var in query_variables:
nodes_with_query_variables.update(filter(lambda x: var in x, self.junction_tree.nodes()))
subtree_nodes = nodes_with_query_variables
# Conversion of set to tuple just for indexing
nodes_with_query_variables = tuple(nodes_with_query_variables)
# As junction tree is a tree, that means that there would be only path between any two nodes in the tree
# thus we can just take the path between any two nodes; no matter there order is
for i in range(len(nodes_with_query_variables) - 1):
subtree_nodes.update(nx.shortest_path(self.junction_tree, nodes_with_query_variables[i],
nodes_with_query_variables[i + 1]))
subtree_undirected_graph = self.junction_tree.subgraph(subtree_nodes)
# Converting subtree into a junction tree
if len(subtree_nodes) == 1:
subtree = JunctionTree()
subtree.add_node(subtree_nodes.pop())
else:
subtree = JunctionTree(subtree_undirected_graph.edges())
# Selecting a node is root node. Root node would be having only one neighbor
if len(subtree.nodes()) == 1:
root_node = list(subtree.nodes())[0]
else:
root_node = tuple(filter(lambda x: len(list(subtree.neighbors(x))) == 1, subtree.nodes()))[0]
clique_potential_list = [self.clique_beliefs[root_node]]
# For other nodes in the subtree compute the clique potentials as follows
# As all the nodes are nothing but tuples so simple set(root_node) won't work at it would update the set with'
# all the elements of the tuple; instead use set([root_node]) as it would include only the tuple not the
# internal elements within it.
parent_nodes = set([root_node])
nodes_traversed = set()
while parent_nodes:
parent_node = parent_nodes.pop()
for child_node in set(subtree.neighbors(parent_node)) - nodes_traversed:
clique_potential_list.append(self.clique_beliefs[child_node] /
self.sepset_beliefs[frozenset([parent_node, child_node])])
parent_nodes.update([child_node])
nodes_traversed.update([parent_node])
# Add factors to the corresponding junction tree
subtree.add_factors(*clique_potential_list)
# Sum product variable elimination on the subtree
variable_elimination = VariableElimination(subtree)
if operation == 'marginalize':
return variable_elimination.query(variables=variables, evidence=evidence, joint=joint)
elif operation == 'maximize':
return variable_elimination.map_query(variables=variables, evidence=evidence) | This is a generalized query method that can be used for both query and map query.
Parameters
----------
variables: list
list of variables for which you want to compute the probability
operation: str ('marginalize' | 'maximize')
The operation to do for passing messages between nodes.
evidence: dict
a dict key, value pair as {var: state_of_var_observed}
None if no evidence
Examples
--------
>>> from pgmpy.inference import BeliefPropagation
>>> from pgmpy.models import BayesianModel
>>> import numpy as np
>>> import pandas as pd
>>> values = pd.DataFrame(np.random.randint(low=0, high=2, size=(1000, 5)),
... columns=['A', 'B', 'C', 'D', 'E'])
>>> model = BayesianModel([('A', 'B'), ('C', 'B'), ('C', 'D'), ('B', 'E')])
>>> model.fit(values)
>>> inference = BeliefPropagation(model)
>>> phi_query = inference.query(['A', 'B'])
References
----------
Algorithm 10.4 Out-of-clique inference in clique tree
Probabilistic Graphical Models: Principles and Techniques Daphne Koller and Nir Friedman. | Below is the the instruction that describes the task:
### Input:
This is a generalized query method that can be used for both query and map query.
Parameters
----------
variables: list
list of variables for which you want to compute the probability
operation: str ('marginalize' | 'maximize')
The operation to do for passing messages between nodes.
evidence: dict
a dict key, value pair as {var: state_of_var_observed}
None if no evidence
Examples
--------
>>> from pgmpy.inference import BeliefPropagation
>>> from pgmpy.models import BayesianModel
>>> import numpy as np
>>> import pandas as pd
>>> values = pd.DataFrame(np.random.randint(low=0, high=2, size=(1000, 5)),
... columns=['A', 'B', 'C', 'D', 'E'])
>>> model = BayesianModel([('A', 'B'), ('C', 'B'), ('C', 'D'), ('B', 'E')])
>>> model.fit(values)
>>> inference = BeliefPropagation(model)
>>> phi_query = inference.query(['A', 'B'])
References
----------
Algorithm 10.4 Out-of-clique inference in clique tree
Probabilistic Graphical Models: Principles and Techniques Daphne Koller and Nir Friedman.
### Response:
def _query(self, variables, operation, evidence=None, joint=True):
"""
This is a generalized query method that can be used for both query and map query.
Parameters
----------
variables: list
list of variables for which you want to compute the probability
operation: str ('marginalize' | 'maximize')
The operation to do for passing messages between nodes.
evidence: dict
a dict key, value pair as {var: state_of_var_observed}
None if no evidence
Examples
--------
>>> from pgmpy.inference import BeliefPropagation
>>> from pgmpy.models import BayesianModel
>>> import numpy as np
>>> import pandas as pd
>>> values = pd.DataFrame(np.random.randint(low=0, high=2, size=(1000, 5)),
... columns=['A', 'B', 'C', 'D', 'E'])
>>> model = BayesianModel([('A', 'B'), ('C', 'B'), ('C', 'D'), ('B', 'E')])
>>> model.fit(values)
>>> inference = BeliefPropagation(model)
>>> phi_query = inference.query(['A', 'B'])
References
----------
Algorithm 10.4 Out-of-clique inference in clique tree
Probabilistic Graphical Models: Principles and Techniques Daphne Koller and Nir Friedman.
"""
is_calibrated = self._is_converged(operation=operation)
# Calibrate the junction tree if not calibrated
if not is_calibrated:
self.calibrate()
if not isinstance(variables, (list, tuple, set)):
query_variables = [variables]
else:
query_variables = list(variables)
query_variables.extend(evidence.keys() if evidence else [])
# Find a tree T' such that query_variables are a subset of scope(T')
nodes_with_query_variables = set()
for var in query_variables:
nodes_with_query_variables.update(filter(lambda x: var in x, self.junction_tree.nodes()))
subtree_nodes = nodes_with_query_variables
# Conversion of set to tuple just for indexing
nodes_with_query_variables = tuple(nodes_with_query_variables)
# As junction tree is a tree, that means that there would be only path between any two nodes in the tree
# thus we can just take the path between any two nodes; no matter there order is
for i in range(len(nodes_with_query_variables) - 1):
subtree_nodes.update(nx.shortest_path(self.junction_tree, nodes_with_query_variables[i],
nodes_with_query_variables[i + 1]))
subtree_undirected_graph = self.junction_tree.subgraph(subtree_nodes)
# Converting subtree into a junction tree
if len(subtree_nodes) == 1:
subtree = JunctionTree()
subtree.add_node(subtree_nodes.pop())
else:
subtree = JunctionTree(subtree_undirected_graph.edges())
# Selecting a node is root node. Root node would be having only one neighbor
if len(subtree.nodes()) == 1:
root_node = list(subtree.nodes())[0]
else:
root_node = tuple(filter(lambda x: len(list(subtree.neighbors(x))) == 1, subtree.nodes()))[0]
clique_potential_list = [self.clique_beliefs[root_node]]
# For other nodes in the subtree compute the clique potentials as follows
# As all the nodes are nothing but tuples so simple set(root_node) won't work at it would update the set with'
# all the elements of the tuple; instead use set([root_node]) as it would include only the tuple not the
# internal elements within it.
parent_nodes = set([root_node])
nodes_traversed = set()
while parent_nodes:
parent_node = parent_nodes.pop()
for child_node in set(subtree.neighbors(parent_node)) - nodes_traversed:
clique_potential_list.append(self.clique_beliefs[child_node] /
self.sepset_beliefs[frozenset([parent_node, child_node])])
parent_nodes.update([child_node])
nodes_traversed.update([parent_node])
# Add factors to the corresponding junction tree
subtree.add_factors(*clique_potential_list)
# Sum product variable elimination on the subtree
variable_elimination = VariableElimination(subtree)
if operation == 'marginalize':
return variable_elimination.query(variables=variables, evidence=evidence, joint=joint)
elif operation == 'maximize':
return variable_elimination.map_query(variables=variables, evidence=evidence) |
def getCurrentFadeColor(self, bBackground):
"""Get current fade color value."""
fn = self.function_table.getCurrentFadeColor
result = fn(bBackground)
return result | Get current fade color value. | Below is the the instruction that describes the task:
### Input:
Get current fade color value.
### Response:
def getCurrentFadeColor(self, bBackground):
"""Get current fade color value."""
fn = self.function_table.getCurrentFadeColor
result = fn(bBackground)
return result |
def Execute(cmd,
args,
time_limit=-1,
bypass_whitelist=False,
daemon=False,
use_client_context=False,
cwd=None):
"""Executes commands on the client.
This function is the only place where commands will be executed
by the GRR client. This makes sure that all issued commands are compared to a
white list and no malicious commands are issued on the client machine.
Args:
cmd: The command to be executed.
args: List of arguments.
time_limit: Time in seconds the process is allowed to run.
bypass_whitelist: Allow execution of things that are not in the whitelist.
Note that this should only ever be called on a binary that passes the
VerifySignedBlob check.
daemon: Start the new process in the background.
use_client_context: Run this script in the client's context. Defaults to
system context.
cwd: Current working directory for the command.
Returns:
A tuple of stdout, stderr, return value and time taken.
"""
if not bypass_whitelist and not IsExecutionWhitelisted(cmd, args):
# Whitelist doesn't contain this cmd/arg pair
logging.info("Execution disallowed by whitelist: %s %s.", cmd,
" ".join(args))
return (b"", b"Execution disallowed by whitelist.", -1, -1)
if daemon:
pid = os.fork()
if pid == 0:
# This is the child, it will run the daemon process. We call os.setsid
# here to become the session leader of this new session and the process
# group leader of the new process group so we don't get killed when the
# main process exits.
try:
os.setsid()
except OSError:
# This only works if the process is running as root.
pass
_Execute(
cmd, args, time_limit, use_client_context=use_client_context, cwd=cwd)
os._exit(0) # pylint: disable=protected-access
else:
return _Execute(
cmd, args, time_limit, use_client_context=use_client_context, cwd=cwd) | Executes commands on the client.
This function is the only place where commands will be executed
by the GRR client. This makes sure that all issued commands are compared to a
white list and no malicious commands are issued on the client machine.
Args:
cmd: The command to be executed.
args: List of arguments.
time_limit: Time in seconds the process is allowed to run.
bypass_whitelist: Allow execution of things that are not in the whitelist.
Note that this should only ever be called on a binary that passes the
VerifySignedBlob check.
daemon: Start the new process in the background.
use_client_context: Run this script in the client's context. Defaults to
system context.
cwd: Current working directory for the command.
Returns:
A tuple of stdout, stderr, return value and time taken. | Below is the the instruction that describes the task:
### Input:
Executes commands on the client.
This function is the only place where commands will be executed
by the GRR client. This makes sure that all issued commands are compared to a
white list and no malicious commands are issued on the client machine.
Args:
cmd: The command to be executed.
args: List of arguments.
time_limit: Time in seconds the process is allowed to run.
bypass_whitelist: Allow execution of things that are not in the whitelist.
Note that this should only ever be called on a binary that passes the
VerifySignedBlob check.
daemon: Start the new process in the background.
use_client_context: Run this script in the client's context. Defaults to
system context.
cwd: Current working directory for the command.
Returns:
A tuple of stdout, stderr, return value and time taken.
### Response:
def Execute(cmd,
args,
time_limit=-1,
bypass_whitelist=False,
daemon=False,
use_client_context=False,
cwd=None):
"""Executes commands on the client.
This function is the only place where commands will be executed
by the GRR client. This makes sure that all issued commands are compared to a
white list and no malicious commands are issued on the client machine.
Args:
cmd: The command to be executed.
args: List of arguments.
time_limit: Time in seconds the process is allowed to run.
bypass_whitelist: Allow execution of things that are not in the whitelist.
Note that this should only ever be called on a binary that passes the
VerifySignedBlob check.
daemon: Start the new process in the background.
use_client_context: Run this script in the client's context. Defaults to
system context.
cwd: Current working directory for the command.
Returns:
A tuple of stdout, stderr, return value and time taken.
"""
if not bypass_whitelist and not IsExecutionWhitelisted(cmd, args):
# Whitelist doesn't contain this cmd/arg pair
logging.info("Execution disallowed by whitelist: %s %s.", cmd,
" ".join(args))
return (b"", b"Execution disallowed by whitelist.", -1, -1)
if daemon:
pid = os.fork()
if pid == 0:
# This is the child, it will run the daemon process. We call os.setsid
# here to become the session leader of this new session and the process
# group leader of the new process group so we don't get killed when the
# main process exits.
try:
os.setsid()
except OSError:
# This only works if the process is running as root.
pass
_Execute(
cmd, args, time_limit, use_client_context=use_client_context, cwd=cwd)
os._exit(0) # pylint: disable=protected-access
else:
return _Execute(
cmd, args, time_limit, use_client_context=use_client_context, cwd=cwd) |
def dataset(self, ref, load_all=False, exception=True):
"""Return a dataset, given a vid or id
:param ref: Vid or id for a dataset. If an id is provided, will it will return the one with the
largest revision number
:param load_all: Use a query that eagerly loads everything.
:return: :class:`ambry.orm.Dataset`
"""
ref = str(ref)
try:
ds = self.session.query(Dataset).filter(Dataset.vid == ref).one()
except NoResultFound:
ds = None
if not ds:
try:
ds = self.session \
.query(Dataset) \
.filter(Dataset.id == ref) \
.order_by(Dataset.revision.desc()) \
.first()
except NoResultFound:
ds = None
if not ds:
try:
ds = self.session.query(Dataset).filter(Dataset.vname == ref).one()
except NoResultFound:
ds = None
if not ds:
try:
ds = self.session \
.query(Dataset) \
.filter(Dataset.name == ref) \
.order_by(Dataset.revision.desc()) \
.first()
except NoResultFound:
ds = None
if ds:
ds._database = self
return ds
elif exception:
raise NotFoundError('No dataset in library for vid : {} '.format(ref))
else:
return None | Return a dataset, given a vid or id
:param ref: Vid or id for a dataset. If an id is provided, will it will return the one with the
largest revision number
:param load_all: Use a query that eagerly loads everything.
:return: :class:`ambry.orm.Dataset` | Below is the the instruction that describes the task:
### Input:
Return a dataset, given a vid or id
:param ref: Vid or id for a dataset. If an id is provided, will it will return the one with the
largest revision number
:param load_all: Use a query that eagerly loads everything.
:return: :class:`ambry.orm.Dataset`
### Response:
def dataset(self, ref, load_all=False, exception=True):
"""Return a dataset, given a vid or id
:param ref: Vid or id for a dataset. If an id is provided, will it will return the one with the
largest revision number
:param load_all: Use a query that eagerly loads everything.
:return: :class:`ambry.orm.Dataset`
"""
ref = str(ref)
try:
ds = self.session.query(Dataset).filter(Dataset.vid == ref).one()
except NoResultFound:
ds = None
if not ds:
try:
ds = self.session \
.query(Dataset) \
.filter(Dataset.id == ref) \
.order_by(Dataset.revision.desc()) \
.first()
except NoResultFound:
ds = None
if not ds:
try:
ds = self.session.query(Dataset).filter(Dataset.vname == ref).one()
except NoResultFound:
ds = None
if not ds:
try:
ds = self.session \
.query(Dataset) \
.filter(Dataset.name == ref) \
.order_by(Dataset.revision.desc()) \
.first()
except NoResultFound:
ds = None
if ds:
ds._database = self
return ds
elif exception:
raise NotFoundError('No dataset in library for vid : {} '.format(ref))
else:
return None |
def createCard( self, parent, record ):
"""
Creates a new widget that will represent the card view for the inpued
record.
:param parent | <QWidget>
record | <orb.Table>
:return <QWidget> || None
"""
cls = self.cardClass(record)
if ( cls ):
card = cls(parent)
card.setRecord(record)
return card
return None | Creates a new widget that will represent the card view for the inpued
record.
:param parent | <QWidget>
record | <orb.Table>
:return <QWidget> || None | Below is the the instruction that describes the task:
### Input:
Creates a new widget that will represent the card view for the inpued
record.
:param parent | <QWidget>
record | <orb.Table>
:return <QWidget> || None
### Response:
def createCard( self, parent, record ):
"""
Creates a new widget that will represent the card view for the inpued
record.
:param parent | <QWidget>
record | <orb.Table>
:return <QWidget> || None
"""
cls = self.cardClass(record)
if ( cls ):
card = cls(parent)
card.setRecord(record)
return card
return None |
def _get(self, obj):
''' Internal implementation of instance attribute access for the
``BasicPropertyDescriptor`` getter.
If the value has not been explicitly set by a user, return that
value. Otherwise, return the default.
Args:
obj (HasProps) : the instance to get a value of this property for
Returns:
object
Raises:
RuntimeError
If the |HasProps| instance has not yet been initialized, or if
this descriptor is on a class that is not a |HasProps|.
'''
if not hasattr(obj, '_property_values'):
raise RuntimeError("Cannot get a property value '%s' from a %s instance before HasProps.__init__" %
(self.name, obj.__class__.__name__))
if self.name not in obj._property_values:
return self._get_default(obj)
else:
return obj._property_values[self.name] | Internal implementation of instance attribute access for the
``BasicPropertyDescriptor`` getter.
If the value has not been explicitly set by a user, return that
value. Otherwise, return the default.
Args:
obj (HasProps) : the instance to get a value of this property for
Returns:
object
Raises:
RuntimeError
If the |HasProps| instance has not yet been initialized, or if
this descriptor is on a class that is not a |HasProps|. | Below is the the instruction that describes the task:
### Input:
Internal implementation of instance attribute access for the
``BasicPropertyDescriptor`` getter.
If the value has not been explicitly set by a user, return that
value. Otherwise, return the default.
Args:
obj (HasProps) : the instance to get a value of this property for
Returns:
object
Raises:
RuntimeError
If the |HasProps| instance has not yet been initialized, or if
this descriptor is on a class that is not a |HasProps|.
### Response:
def _get(self, obj):
''' Internal implementation of instance attribute access for the
``BasicPropertyDescriptor`` getter.
If the value has not been explicitly set by a user, return that
value. Otherwise, return the default.
Args:
obj (HasProps) : the instance to get a value of this property for
Returns:
object
Raises:
RuntimeError
If the |HasProps| instance has not yet been initialized, or if
this descriptor is on a class that is not a |HasProps|.
'''
if not hasattr(obj, '_property_values'):
raise RuntimeError("Cannot get a property value '%s' from a %s instance before HasProps.__init__" %
(self.name, obj.__class__.__name__))
if self.name not in obj._property_values:
return self._get_default(obj)
else:
return obj._property_values[self.name] |
def save_to_file(self, text, filename, name):
'''
Called by the engine to push a say command onto the queue.
@param text: Text to speak
@type text: unicode
@param name: Name to associate with the utterance
@type name: str
'''
self._push(self._driver.save_to_file, (text, filename), name) | Called by the engine to push a say command onto the queue.
@param text: Text to speak
@type text: unicode
@param name: Name to associate with the utterance
@type name: str | Below is the the instruction that describes the task:
### Input:
Called by the engine to push a say command onto the queue.
@param text: Text to speak
@type text: unicode
@param name: Name to associate with the utterance
@type name: str
### Response:
def save_to_file(self, text, filename, name):
'''
Called by the engine to push a say command onto the queue.
@param text: Text to speak
@type text: unicode
@param name: Name to associate with the utterance
@type name: str
'''
self._push(self._driver.save_to_file, (text, filename), name) |
def save_data(self, trigger_id, **data):
"""
let's save the data
:param trigger_id: trigger ID from which to save data
:param data: the data to check to be used and save
:type trigger_id: int
:type data: dict
:return: the status of the save statement
:rtype: boolean
"""
data['output_format'] = 'md'
title, content = super(ServiceTrello, self).save_data(trigger_id, **data)
if len(title):
# get the data of this trigger
t = Trello.objects.get(trigger_id=trigger_id)
# footer of the card
footer = self.set_card_footer(data, t)
content += footer
# 1 - we need to search the list and board where we will
# store the card so ...
# 1.a search the board_id by its name
# by retrieving all the boards
boards = self.trello_instance.list_boards()
board_id = ''
my_list = ''
for board in boards:
if t.board_name == board.name:
board_id = board.id
break
if board_id:
# 1.b search the list_id by its name
my_board = self.trello_instance.get_board(board_id)
lists = my_board.open_lists()
# just get the open list ; not all the archive ones
for list_in_board in lists:
# search the name of the list we set in the form
if t.list_name == list_in_board.name:
# return the (trello) list object to be able to add card at step 3
my_list = my_board.get_list(list_in_board.id)
break
# we didnt find the list in that board -> create it
if my_list == '':
my_list = my_board.add_list(t.list_name)
else:
# 2 if board_id and/or list_id does not exist, create it/them
my_board = self.trello_instance.add_board(t.board_name)
# add the list that didn't exists and return a (trello) list object
my_list = my_board.add_list(t.list_name)
# 3 create the card
my_list.add_card(title, content)
logger.debug(str('trello {} created').format(data['link']))
status = True
else:
sentence = "no token or link provided for trigger ID {}".format(trigger_id)
update_result(trigger_id, msg=sentence, status=False)
status = False
return status | let's save the data
:param trigger_id: trigger ID from which to save data
:param data: the data to check to be used and save
:type trigger_id: int
:type data: dict
:return: the status of the save statement
:rtype: boolean | Below is the the instruction that describes the task:
### Input:
let's save the data
:param trigger_id: trigger ID from which to save data
:param data: the data to check to be used and save
:type trigger_id: int
:type data: dict
:return: the status of the save statement
:rtype: boolean
### Response:
def save_data(self, trigger_id, **data):
"""
let's save the data
:param trigger_id: trigger ID from which to save data
:param data: the data to check to be used and save
:type trigger_id: int
:type data: dict
:return: the status of the save statement
:rtype: boolean
"""
data['output_format'] = 'md'
title, content = super(ServiceTrello, self).save_data(trigger_id, **data)
if len(title):
# get the data of this trigger
t = Trello.objects.get(trigger_id=trigger_id)
# footer of the card
footer = self.set_card_footer(data, t)
content += footer
# 1 - we need to search the list and board where we will
# store the card so ...
# 1.a search the board_id by its name
# by retrieving all the boards
boards = self.trello_instance.list_boards()
board_id = ''
my_list = ''
for board in boards:
if t.board_name == board.name:
board_id = board.id
break
if board_id:
# 1.b search the list_id by its name
my_board = self.trello_instance.get_board(board_id)
lists = my_board.open_lists()
# just get the open list ; not all the archive ones
for list_in_board in lists:
# search the name of the list we set in the form
if t.list_name == list_in_board.name:
# return the (trello) list object to be able to add card at step 3
my_list = my_board.get_list(list_in_board.id)
break
# we didnt find the list in that board -> create it
if my_list == '':
my_list = my_board.add_list(t.list_name)
else:
# 2 if board_id and/or list_id does not exist, create it/them
my_board = self.trello_instance.add_board(t.board_name)
# add the list that didn't exists and return a (trello) list object
my_list = my_board.add_list(t.list_name)
# 3 create the card
my_list.add_card(title, content)
logger.debug(str('trello {} created').format(data['link']))
status = True
else:
sentence = "no token or link provided for trigger ID {}".format(trigger_id)
update_result(trigger_id, msg=sentence, status=False)
status = False
return status |
def intersection(self):
"""Compute the intersection with the given MOC.
This command takes the name of a MOC file and forms the intersection
of the running MOC with that file.
::
pymoctool a.fits --intersection b.fits --output intersection.fits
"""
if self.moc is None:
raise CommandError('No MOC information present for intersection')
filename = self.params.pop()
self.moc = self.moc.intersection(MOC(filename=filename)) | Compute the intersection with the given MOC.
This command takes the name of a MOC file and forms the intersection
of the running MOC with that file.
::
pymoctool a.fits --intersection b.fits --output intersection.fits | Below is the the instruction that describes the task:
### Input:
Compute the intersection with the given MOC.
This command takes the name of a MOC file and forms the intersection
of the running MOC with that file.
::
pymoctool a.fits --intersection b.fits --output intersection.fits
### Response:
def intersection(self):
"""Compute the intersection with the given MOC.
This command takes the name of a MOC file and forms the intersection
of the running MOC with that file.
::
pymoctool a.fits --intersection b.fits --output intersection.fits
"""
if self.moc is None:
raise CommandError('No MOC information present for intersection')
filename = self.params.pop()
self.moc = self.moc.intersection(MOC(filename=filename)) |
def _compute_mean(self, C, rup, dists, sites, imt):
"""
Returns the mean ground motion acceleration and velocity
"""
mean = (self._get_magnitude_scaling_term(C, rup.mag) +
self._get_distance_scaling_term(C, rup.mag, dists.rrup) +
self._get_site_amplification_term(C, sites.vs30))
# convert from cm/s**2 to g for SA and from m/s**2 to g for PGA (PGV
# is already in cm/s) and also convert from base 10 to base e.
if imt.name == "PGA":
mean = np.log((10 ** mean) * ((2 * np.pi / 0.01) ** 2) *
1e-2 / g)
elif imt.name == "SA":
mean = np.log((10 ** mean) * ((2 * np.pi / imt.period) ** 2) *
1e-2 / g)
else:
mean = np.log(10 ** mean)
return mean | Returns the mean ground motion acceleration and velocity | Below is the the instruction that describes the task:
### Input:
Returns the mean ground motion acceleration and velocity
### Response:
def _compute_mean(self, C, rup, dists, sites, imt):
"""
Returns the mean ground motion acceleration and velocity
"""
mean = (self._get_magnitude_scaling_term(C, rup.mag) +
self._get_distance_scaling_term(C, rup.mag, dists.rrup) +
self._get_site_amplification_term(C, sites.vs30))
# convert from cm/s**2 to g for SA and from m/s**2 to g for PGA (PGV
# is already in cm/s) and also convert from base 10 to base e.
if imt.name == "PGA":
mean = np.log((10 ** mean) * ((2 * np.pi / 0.01) ** 2) *
1e-2 / g)
elif imt.name == "SA":
mean = np.log((10 ** mean) * ((2 * np.pi / imt.period) ** 2) *
1e-2 / g)
else:
mean = np.log(10 ** mean)
return mean |
def _is_converged(self):
'''Determine if calculation converged; for a relaxation (static) run
we look for ionic (electronic) convergence in the output'''
if self.is_relaxed():
# relaxation run case
return self._get_line(['End of', 'Geometry Optimization'], self.outputf, return_string=False)
else:
# static run case
return self._get_line('convergence has been achieved', self.outputf, return_string=False) | Determine if calculation converged; for a relaxation (static) run
we look for ionic (electronic) convergence in the output | Below is the the instruction that describes the task:
### Input:
Determine if calculation converged; for a relaxation (static) run
we look for ionic (electronic) convergence in the output
### Response:
def _is_converged(self):
'''Determine if calculation converged; for a relaxation (static) run
we look for ionic (electronic) convergence in the output'''
if self.is_relaxed():
# relaxation run case
return self._get_line(['End of', 'Geometry Optimization'], self.outputf, return_string=False)
else:
# static run case
return self._get_line('convergence has been achieved', self.outputf, return_string=False) |
def generate_operators(name, n_vars=1, hermitian=None, commutative=False):
"""Generates a number of commutative or noncommutative operators
:param name: The prefix in the symbolic representation of the noncommuting
variables. This will be suffixed by a number from 0 to
n_vars-1 if n_vars > 1.
:type name: str.
:param n_vars: The number of variables.
:type n_vars: int.
:param hermitian: Optional parameter to request Hermitian variables .
:type hermitian: bool.
:param commutative: Optional parameter to request commutative variables.
Commutative variables are Hermitian by default.
:type commutative: bool.
:returns: list of :class:`sympy.physics.quantum.operator.Operator` or
:class:`sympy.physics.quantum.operator.HermitianOperator`
variables
:Example:
>>> generate_variables('y', 2, commutative=True)
[y0, y1]
"""
variables = []
for i in range(n_vars):
if n_vars > 1:
var_name = '%s%s' % (name, i)
else:
var_name = '%s' % name
if hermitian is not None and hermitian:
variables.append(HermitianOperator(var_name))
else:
variables.append(Operator(var_name))
variables[-1].is_commutative = commutative
return variables | Generates a number of commutative or noncommutative operators
:param name: The prefix in the symbolic representation of the noncommuting
variables. This will be suffixed by a number from 0 to
n_vars-1 if n_vars > 1.
:type name: str.
:param n_vars: The number of variables.
:type n_vars: int.
:param hermitian: Optional parameter to request Hermitian variables .
:type hermitian: bool.
:param commutative: Optional parameter to request commutative variables.
Commutative variables are Hermitian by default.
:type commutative: bool.
:returns: list of :class:`sympy.physics.quantum.operator.Operator` or
:class:`sympy.physics.quantum.operator.HermitianOperator`
variables
:Example:
>>> generate_variables('y', 2, commutative=True)
[y0, y1] | Below is the the instruction that describes the task:
### Input:
Generates a number of commutative or noncommutative operators
:param name: The prefix in the symbolic representation of the noncommuting
variables. This will be suffixed by a number from 0 to
n_vars-1 if n_vars > 1.
:type name: str.
:param n_vars: The number of variables.
:type n_vars: int.
:param hermitian: Optional parameter to request Hermitian variables .
:type hermitian: bool.
:param commutative: Optional parameter to request commutative variables.
Commutative variables are Hermitian by default.
:type commutative: bool.
:returns: list of :class:`sympy.physics.quantum.operator.Operator` or
:class:`sympy.physics.quantum.operator.HermitianOperator`
variables
:Example:
>>> generate_variables('y', 2, commutative=True)
[y0, y1]
### Response:
def generate_operators(name, n_vars=1, hermitian=None, commutative=False):
"""Generates a number of commutative or noncommutative operators
:param name: The prefix in the symbolic representation of the noncommuting
variables. This will be suffixed by a number from 0 to
n_vars-1 if n_vars > 1.
:type name: str.
:param n_vars: The number of variables.
:type n_vars: int.
:param hermitian: Optional parameter to request Hermitian variables .
:type hermitian: bool.
:param commutative: Optional parameter to request commutative variables.
Commutative variables are Hermitian by default.
:type commutative: bool.
:returns: list of :class:`sympy.physics.quantum.operator.Operator` or
:class:`sympy.physics.quantum.operator.HermitianOperator`
variables
:Example:
>>> generate_variables('y', 2, commutative=True)
[y0, y1]
"""
variables = []
for i in range(n_vars):
if n_vars > 1:
var_name = '%s%s' % (name, i)
else:
var_name = '%s' % name
if hermitian is not None and hermitian:
variables.append(HermitianOperator(var_name))
else:
variables.append(Operator(var_name))
variables[-1].is_commutative = commutative
return variables |
def _stormpath_authenticate(self, username, password):
"""Check if Stormpath authentication works
:param username: Can be actual username or email
:param password: Account password
Returns an account object if successful or None otherwise.
"""
APPLICATION = get_application()
try:
result = APPLICATION.authenticate_account(username, password)
return result.account
except Error as e:
log.debug(e)
return None | Check if Stormpath authentication works
:param username: Can be actual username or email
:param password: Account password
Returns an account object if successful or None otherwise. | Below is the the instruction that describes the task:
### Input:
Check if Stormpath authentication works
:param username: Can be actual username or email
:param password: Account password
Returns an account object if successful or None otherwise.
### Response:
def _stormpath_authenticate(self, username, password):
"""Check if Stormpath authentication works
:param username: Can be actual username or email
:param password: Account password
Returns an account object if successful or None otherwise.
"""
APPLICATION = get_application()
try:
result = APPLICATION.authenticate_account(username, password)
return result.account
except Error as e:
log.debug(e)
return None |
def get_neighbors(index, lattice_length, width=0, periodic=False):
"""Get the forward neighbors of a site in a lattice.
:param index: Linear index of operator.
:type index: int.
:param lattice_length: The size of the 2D lattice in either dimension
:type lattice_length: int.
:param width: Optional parameter to define width.
:type width: int.
:param periodic: Optional parameter to indicate periodic boundary
conditions.
:type periodic: bool
:returns: list of int -- the neighbors in linear index.
"""
if width == 0:
width = lattice_length
neighbors = []
coords = divmod(index, width)
if coords[1] < width - 1:
neighbors.append(index + 1)
elif periodic and width > 1:
neighbors.append(index - width + 1)
if coords[0] < lattice_length - 1:
neighbors.append(index + width)
elif periodic:
neighbors.append(index - (lattice_length - 1) * width)
return neighbors | Get the forward neighbors of a site in a lattice.
:param index: Linear index of operator.
:type index: int.
:param lattice_length: The size of the 2D lattice in either dimension
:type lattice_length: int.
:param width: Optional parameter to define width.
:type width: int.
:param periodic: Optional parameter to indicate periodic boundary
conditions.
:type periodic: bool
:returns: list of int -- the neighbors in linear index. | Below is the the instruction that describes the task:
### Input:
Get the forward neighbors of a site in a lattice.
:param index: Linear index of operator.
:type index: int.
:param lattice_length: The size of the 2D lattice in either dimension
:type lattice_length: int.
:param width: Optional parameter to define width.
:type width: int.
:param periodic: Optional parameter to indicate periodic boundary
conditions.
:type periodic: bool
:returns: list of int -- the neighbors in linear index.
### Response:
def get_neighbors(index, lattice_length, width=0, periodic=False):
"""Get the forward neighbors of a site in a lattice.
:param index: Linear index of operator.
:type index: int.
:param lattice_length: The size of the 2D lattice in either dimension
:type lattice_length: int.
:param width: Optional parameter to define width.
:type width: int.
:param periodic: Optional parameter to indicate periodic boundary
conditions.
:type periodic: bool
:returns: list of int -- the neighbors in linear index.
"""
if width == 0:
width = lattice_length
neighbors = []
coords = divmod(index, width)
if coords[1] < width - 1:
neighbors.append(index + 1)
elif periodic and width > 1:
neighbors.append(index - width + 1)
if coords[0] < lattice_length - 1:
neighbors.append(index + width)
elif periodic:
neighbors.append(index - (lattice_length - 1) * width)
return neighbors |
def server_hardware_types(self):
"""
Gets the ServerHardwareTypes API client.
Returns:
ServerHardwareTypes:
"""
if not self.__server_hardware_types:
self.__server_hardware_types = ServerHardwareTypes(
self.__connection)
return self.__server_hardware_types | Gets the ServerHardwareTypes API client.
Returns:
ServerHardwareTypes: | Below is the the instruction that describes the task:
### Input:
Gets the ServerHardwareTypes API client.
Returns:
ServerHardwareTypes:
### Response:
def server_hardware_types(self):
"""
Gets the ServerHardwareTypes API client.
Returns:
ServerHardwareTypes:
"""
if not self.__server_hardware_types:
self.__server_hardware_types = ServerHardwareTypes(
self.__connection)
return self.__server_hardware_types |
def _CreateShapePointFolder(self, shapes_folder, shape):
"""Create a KML Folder containing all the shape points in a shape.
The folder contains placemarks for each shapepoint.
Args:
shapes_folder: A KML Shape Folder ElementTree.Element instance
shape: The shape to plot.
Returns:
The Folder ElementTree.Element instance or None.
"""
folder_name = shape.shape_id + ' Shape Points'
folder = self._CreateFolder(shapes_folder, folder_name, visible=False)
for (index, (lat, lon, dist)) in enumerate(shape.points):
placemark = self._CreatePlacemark(folder, str(index+1))
point = ET.SubElement(placemark, 'Point')
coordinates = ET.SubElement(point, 'coordinates')
coordinates.text = '%.6f,%.6f' % (lon, lat)
return folder | Create a KML Folder containing all the shape points in a shape.
The folder contains placemarks for each shapepoint.
Args:
shapes_folder: A KML Shape Folder ElementTree.Element instance
shape: The shape to plot.
Returns:
The Folder ElementTree.Element instance or None. | Below is the the instruction that describes the task:
### Input:
Create a KML Folder containing all the shape points in a shape.
The folder contains placemarks for each shapepoint.
Args:
shapes_folder: A KML Shape Folder ElementTree.Element instance
shape: The shape to plot.
Returns:
The Folder ElementTree.Element instance or None.
### Response:
def _CreateShapePointFolder(self, shapes_folder, shape):
"""Create a KML Folder containing all the shape points in a shape.
The folder contains placemarks for each shapepoint.
Args:
shapes_folder: A KML Shape Folder ElementTree.Element instance
shape: The shape to plot.
Returns:
The Folder ElementTree.Element instance or None.
"""
folder_name = shape.shape_id + ' Shape Points'
folder = self._CreateFolder(shapes_folder, folder_name, visible=False)
for (index, (lat, lon, dist)) in enumerate(shape.points):
placemark = self._CreatePlacemark(folder, str(index+1))
point = ET.SubElement(placemark, 'Point')
coordinates = ET.SubElement(point, 'coordinates')
coordinates.text = '%.6f,%.6f' % (lon, lat)
return folder |
def _do_autopaginating_api_call(self, path, params, method, parser_func,
next_marker_xpath, next_marker_param_name,
next_type_xpath=None, parser_kwargs=None):
"""
Given an API method, the arguments passed to it, and a function to
hand parsing off to, loop through the record sets in the API call
until all records have been yielded.
:param str method: The API method on the endpoint.
:param dict params: The kwargs from the top-level API method.
:param callable parser_func: A callable that is used for parsing the
output from the API call.
:param str next_marker_param_name: The XPath to the marker tag that
will determine whether we continue paginating.
:param str next_marker_param_name: The parameter name to manipulate
in the request data to bring up the next page on the next
request loop.
:keyword str next_type_xpath: For the
py:meth:`list_resource_record_sets_by_zone_id` method, there's
an additional paginator token. Specifying this XPath looks for it.
:keyword dict parser_kwargs: Optional dict of additional kwargs to pass
on to the parser function.
:rtype: generator
:returns: Returns a generator that may be returned by the top-level
API method.
"""
if not parser_kwargs:
parser_kwargs = {}
# We loop indefinitely since we have no idea how many "pages" of
# results we're going to have to go through.
while True:
# An lxml Element node.
root = self._send_request(path, params, method)
# Individually yield HostedZone instances after parsing/instantiating.
for record in parser_func(root, connection=self, **parser_kwargs):
yield record
# This will determine at what offset we start the next query.
next_marker = root.find(next_marker_xpath)
if next_marker is None:
# If the NextMarker tag is absent, we know we've hit the
# last page.
break
# if NextMarker is present, we'll adjust our API request params
# and query again for the next page.
params[next_marker_param_name] = next_marker.text
if next_type_xpath:
# This is a _list_resource_record_sets_by_zone_id call. Look
# for the given tag via XPath and adjust our type arg for
# the next request. Without specifying this, we loop
# infinitely.
next_type = root.find(next_type_xpath)
params['type'] = next_type.text | Given an API method, the arguments passed to it, and a function to
hand parsing off to, loop through the record sets in the API call
until all records have been yielded.
:param str method: The API method on the endpoint.
:param dict params: The kwargs from the top-level API method.
:param callable parser_func: A callable that is used for parsing the
output from the API call.
:param str next_marker_param_name: The XPath to the marker tag that
will determine whether we continue paginating.
:param str next_marker_param_name: The parameter name to manipulate
in the request data to bring up the next page on the next
request loop.
:keyword str next_type_xpath: For the
py:meth:`list_resource_record_sets_by_zone_id` method, there's
an additional paginator token. Specifying this XPath looks for it.
:keyword dict parser_kwargs: Optional dict of additional kwargs to pass
on to the parser function.
:rtype: generator
:returns: Returns a generator that may be returned by the top-level
API method. | Below is the the instruction that describes the task:
### Input:
Given an API method, the arguments passed to it, and a function to
hand parsing off to, loop through the record sets in the API call
until all records have been yielded.
:param str method: The API method on the endpoint.
:param dict params: The kwargs from the top-level API method.
:param callable parser_func: A callable that is used for parsing the
output from the API call.
:param str next_marker_param_name: The XPath to the marker tag that
will determine whether we continue paginating.
:param str next_marker_param_name: The parameter name to manipulate
in the request data to bring up the next page on the next
request loop.
:keyword str next_type_xpath: For the
py:meth:`list_resource_record_sets_by_zone_id` method, there's
an additional paginator token. Specifying this XPath looks for it.
:keyword dict parser_kwargs: Optional dict of additional kwargs to pass
on to the parser function.
:rtype: generator
:returns: Returns a generator that may be returned by the top-level
API method.
### Response:
def _do_autopaginating_api_call(self, path, params, method, parser_func,
next_marker_xpath, next_marker_param_name,
next_type_xpath=None, parser_kwargs=None):
"""
Given an API method, the arguments passed to it, and a function to
hand parsing off to, loop through the record sets in the API call
until all records have been yielded.
:param str method: The API method on the endpoint.
:param dict params: The kwargs from the top-level API method.
:param callable parser_func: A callable that is used for parsing the
output from the API call.
:param str next_marker_param_name: The XPath to the marker tag that
will determine whether we continue paginating.
:param str next_marker_param_name: The parameter name to manipulate
in the request data to bring up the next page on the next
request loop.
:keyword str next_type_xpath: For the
py:meth:`list_resource_record_sets_by_zone_id` method, there's
an additional paginator token. Specifying this XPath looks for it.
:keyword dict parser_kwargs: Optional dict of additional kwargs to pass
on to the parser function.
:rtype: generator
:returns: Returns a generator that may be returned by the top-level
API method.
"""
if not parser_kwargs:
parser_kwargs = {}
# We loop indefinitely since we have no idea how many "pages" of
# results we're going to have to go through.
while True:
# An lxml Element node.
root = self._send_request(path, params, method)
# Individually yield HostedZone instances after parsing/instantiating.
for record in parser_func(root, connection=self, **parser_kwargs):
yield record
# This will determine at what offset we start the next query.
next_marker = root.find(next_marker_xpath)
if next_marker is None:
# If the NextMarker tag is absent, we know we've hit the
# last page.
break
# if NextMarker is present, we'll adjust our API request params
# and query again for the next page.
params[next_marker_param_name] = next_marker.text
if next_type_xpath:
# This is a _list_resource_record_sets_by_zone_id call. Look
# for the given tag via XPath and adjust our type arg for
# the next request. Without specifying this, we loop
# infinitely.
next_type = root.find(next_type_xpath)
params['type'] = next_type.text |
def merge(self, other):
"""
Merge properties of other into self
Raises ValueError if any them are a blank
"""
if self.is_blank() or other.is_blank():
raise ValueError('Cannot merge if there is a blank.')
else:
self.properties.update(other.properties) | Merge properties of other into self
Raises ValueError if any them are a blank | Below is the the instruction that describes the task:
### Input:
Merge properties of other into self
Raises ValueError if any them are a blank
### Response:
def merge(self, other):
"""
Merge properties of other into self
Raises ValueError if any them are a blank
"""
if self.is_blank() or other.is_blank():
raise ValueError('Cannot merge if there is a blank.')
else:
self.properties.update(other.properties) |
def paste_template(self, template_name, template=None, deploy_dir=None):
" Paste template. "
LOGGER.debug("Paste template: %s" % template_name)
deploy_dir = deploy_dir or self.deploy_dir
template = template or self._get_template_path(template_name)
self.read([op.join(template, settings.CFGNAME)], extending=True)
for fname in gen_template_files(template):
curdir = op.join(deploy_dir, op.dirname(fname))
if not op.exists(curdir):
makedirs(curdir)
source = op.join(template, fname)
target = op.join(deploy_dir, fname)
copy2(source, target)
name, ext = op.splitext(fname)
if ext == '.tmpl':
t = Template.from_filename(target, namespace=self.as_dict())
with open(op.join(deploy_dir, name), 'w') as f:
f.write(t.substitute())
remove(target)
return deploy_dir | Paste template. | Below is the the instruction that describes the task:
### Input:
Paste template.
### Response:
def paste_template(self, template_name, template=None, deploy_dir=None):
" Paste template. "
LOGGER.debug("Paste template: %s" % template_name)
deploy_dir = deploy_dir or self.deploy_dir
template = template or self._get_template_path(template_name)
self.read([op.join(template, settings.CFGNAME)], extending=True)
for fname in gen_template_files(template):
curdir = op.join(deploy_dir, op.dirname(fname))
if not op.exists(curdir):
makedirs(curdir)
source = op.join(template, fname)
target = op.join(deploy_dir, fname)
copy2(source, target)
name, ext = op.splitext(fname)
if ext == '.tmpl':
t = Template.from_filename(target, namespace=self.as_dict())
with open(op.join(deploy_dir, name), 'w') as f:
f.write(t.substitute())
remove(target)
return deploy_dir |
def returnSplineList(dependentVar, independentVar, subsetPercentage=0.4,
cycles=10, minKnotPoints=10, initialKnots=200,
splineOrder=2, terminalExpansion=0.1
):
""" #TODO: docstring
Note: Expects sorted arrays.
:param dependentVar: #TODO: docstring
:param independentVar: #TODO: docstring
:param subsetPercentage: #TODO: docstring
:param cycles: #TODO: docstring
:param minKnotPoints: #TODO: docstring
:param initialKnots: #TODO: docstring
:param splineOrder: #TODO: docstring
:param terminalExpansion: expand subsets on both sides
:returns: #TODO: docstring
"""
expansions = ddict(list)
expansionArea = (independentVar[-1] - independentVar[0]) * terminalExpansion
#adds 100 data points at both ends of the dependent and independent array
for i in range(100):
expansions['indUp'].append(independentVar[-1] + expansionArea/100*i)
expansions['indDown'].append(independentVar[0] -
expansionArea/100*(100-i+1)
)
expansions['depUp'].append(dependentVar[-1])
expansions['depDown'].append(dependentVar[0])
dependentVar = numpy.array(expansions['depDown'] + list(dependentVar) +
expansions['depUp'], dtype=numpy.float64
)
independentVar = numpy.array(expansions['indDown'] + list(independentVar) +
expansions['indUp'], dtype=numpy.float64
)
splineList = list()
for cycle in range(cycles):
subset = sorted(random.sample(range(len(dependentVar)),
int(len(dependentVar) * subsetPercentage)
)
)
terminalExpansion
dependentSubset = dependentVar[subset]
independentSubset = independentVar[subset]
minIndVar = independentSubset[minKnotPoints]
maxIndVar = independentSubset[-minKnotPoints]
knots = [float(i) * (maxIndVar-minIndVar) / initialKnots + minIndVar
for i in range(1, initialKnots)
]
## remove knots with less then minKnotPoints data points ##
lastKnot = knots[0]
newKnotList = [lastKnot]
for knotPos in range(1,len(knots)):
nextKnot = knots[knotPos]
numHits = (len(independentSubset[(independentSubset >= lastKnot) &
(independentSubset <= nextKnot)])
)
if numHits >= minKnotPoints:
newKnotList.append(nextKnot)
lastKnot = nextKnot
knots = newKnotList
spline = LSQUnivariateSpline(independentSubset, dependentSubset, knots,
k=splineOrder)
splineList.append(spline)
return splineList | #TODO: docstring
Note: Expects sorted arrays.
:param dependentVar: #TODO: docstring
:param independentVar: #TODO: docstring
:param subsetPercentage: #TODO: docstring
:param cycles: #TODO: docstring
:param minKnotPoints: #TODO: docstring
:param initialKnots: #TODO: docstring
:param splineOrder: #TODO: docstring
:param terminalExpansion: expand subsets on both sides
:returns: #TODO: docstring | Below is the the instruction that describes the task:
### Input:
#TODO: docstring
Note: Expects sorted arrays.
:param dependentVar: #TODO: docstring
:param independentVar: #TODO: docstring
:param subsetPercentage: #TODO: docstring
:param cycles: #TODO: docstring
:param minKnotPoints: #TODO: docstring
:param initialKnots: #TODO: docstring
:param splineOrder: #TODO: docstring
:param terminalExpansion: expand subsets on both sides
:returns: #TODO: docstring
### Response:
def returnSplineList(dependentVar, independentVar, subsetPercentage=0.4,
cycles=10, minKnotPoints=10, initialKnots=200,
splineOrder=2, terminalExpansion=0.1
):
""" #TODO: docstring
Note: Expects sorted arrays.
:param dependentVar: #TODO: docstring
:param independentVar: #TODO: docstring
:param subsetPercentage: #TODO: docstring
:param cycles: #TODO: docstring
:param minKnotPoints: #TODO: docstring
:param initialKnots: #TODO: docstring
:param splineOrder: #TODO: docstring
:param terminalExpansion: expand subsets on both sides
:returns: #TODO: docstring
"""
expansions = ddict(list)
expansionArea = (independentVar[-1] - independentVar[0]) * terminalExpansion
#adds 100 data points at both ends of the dependent and independent array
for i in range(100):
expansions['indUp'].append(independentVar[-1] + expansionArea/100*i)
expansions['indDown'].append(independentVar[0] -
expansionArea/100*(100-i+1)
)
expansions['depUp'].append(dependentVar[-1])
expansions['depDown'].append(dependentVar[0])
dependentVar = numpy.array(expansions['depDown'] + list(dependentVar) +
expansions['depUp'], dtype=numpy.float64
)
independentVar = numpy.array(expansions['indDown'] + list(independentVar) +
expansions['indUp'], dtype=numpy.float64
)
splineList = list()
for cycle in range(cycles):
subset = sorted(random.sample(range(len(dependentVar)),
int(len(dependentVar) * subsetPercentage)
)
)
terminalExpansion
dependentSubset = dependentVar[subset]
independentSubset = independentVar[subset]
minIndVar = independentSubset[minKnotPoints]
maxIndVar = independentSubset[-minKnotPoints]
knots = [float(i) * (maxIndVar-minIndVar) / initialKnots + minIndVar
for i in range(1, initialKnots)
]
## remove knots with less then minKnotPoints data points ##
lastKnot = knots[0]
newKnotList = [lastKnot]
for knotPos in range(1,len(knots)):
nextKnot = knots[knotPos]
numHits = (len(independentSubset[(independentSubset >= lastKnot) &
(independentSubset <= nextKnot)])
)
if numHits >= minKnotPoints:
newKnotList.append(nextKnot)
lastKnot = nextKnot
knots = newKnotList
spline = LSQUnivariateSpline(independentSubset, dependentSubset, knots,
k=splineOrder)
splineList.append(spline)
return splineList |
def consume(self, event):
"""
process the current event, setup new state and teardown current state
"""
future_states = self.can(event)
new_state = future_states[0]
if len(future_states) > 1:
new_state = self.choose(event)
event.execute()
self.process(event)
new_state.setup(event, self)
self.teardown(event)
return new_state | process the current event, setup new state and teardown current state | Below is the the instruction that describes the task:
### Input:
process the current event, setup new state and teardown current state
### Response:
def consume(self, event):
"""
process the current event, setup new state and teardown current state
"""
future_states = self.can(event)
new_state = future_states[0]
if len(future_states) > 1:
new_state = self.choose(event)
event.execute()
self.process(event)
new_state.setup(event, self)
self.teardown(event)
return new_state |
def _Enum(docstring, *names):
"""Utility to generate enum classes used by annotations.
Args:
docstring: Docstring for the generated enum class.
*names: Enum names.
Returns:
A class that contains enum names as attributes.
"""
enums = dict(zip(names, range(len(names))))
reverse = dict((value, key) for key, value in enums.iteritems())
enums['reverse_mapping'] = reverse
enums['__doc__'] = docstring
return type('Enum', (object,), enums) | Utility to generate enum classes used by annotations.
Args:
docstring: Docstring for the generated enum class.
*names: Enum names.
Returns:
A class that contains enum names as attributes. | Below is the the instruction that describes the task:
### Input:
Utility to generate enum classes used by annotations.
Args:
docstring: Docstring for the generated enum class.
*names: Enum names.
Returns:
A class that contains enum names as attributes.
### Response:
def _Enum(docstring, *names):
"""Utility to generate enum classes used by annotations.
Args:
docstring: Docstring for the generated enum class.
*names: Enum names.
Returns:
A class that contains enum names as attributes.
"""
enums = dict(zip(names, range(len(names))))
reverse = dict((value, key) for key, value in enums.iteritems())
enums['reverse_mapping'] = reverse
enums['__doc__'] = docstring
return type('Enum', (object,), enums) |
def preview(src_path):
''' Generates a preview of src_path in the requested format.
:returns: A list of preview paths, one for each page. Blank list if unsupported.
'''
preview = embedded_preview(src_path)
if not is_valid_preview(preview):
preview = generator_preview(src_path)
if not is_valid_preview(preview):
preview = thumbnail_preview(src_path)
# Ensure the preview is returned in the right format
if is_valid_preview(preview):
if mimetype(preview) in [ExportMimeType.PNG]:
return [preview]
if mimetype(preview) in [ExportMimeType.PDF]:
return to_pngs(preview)
return [] | Generates a preview of src_path in the requested format.
:returns: A list of preview paths, one for each page. Blank list if unsupported. | Below is the the instruction that describes the task:
### Input:
Generates a preview of src_path in the requested format.
:returns: A list of preview paths, one for each page. Blank list if unsupported.
### Response:
def preview(src_path):
''' Generates a preview of src_path in the requested format.
:returns: A list of preview paths, one for each page. Blank list if unsupported.
'''
preview = embedded_preview(src_path)
if not is_valid_preview(preview):
preview = generator_preview(src_path)
if not is_valid_preview(preview):
preview = thumbnail_preview(src_path)
# Ensure the preview is returned in the right format
if is_valid_preview(preview):
if mimetype(preview) in [ExportMimeType.PNG]:
return [preview]
if mimetype(preview) in [ExportMimeType.PDF]:
return to_pngs(preview)
return [] |
def detect_os(self, ip):
"""
Runs the checker.py scripts to detect the os.
"""
process = subprocess.run(['python2', os.path.join(self.datadir, 'MS17-010', 'checker.py'), str(ip)], stdout=subprocess.PIPE)
out = process.stdout.decode('utf-8').split('\n')
system_os = ''
for line in out:
if line.startswith('Target OS:'):
system_os = line.replace('Target OS: ', '')
break
return system_os | Runs the checker.py scripts to detect the os. | Below is the the instruction that describes the task:
### Input:
Runs the checker.py scripts to detect the os.
### Response:
def detect_os(self, ip):
"""
Runs the checker.py scripts to detect the os.
"""
process = subprocess.run(['python2', os.path.join(self.datadir, 'MS17-010', 'checker.py'), str(ip)], stdout=subprocess.PIPE)
out = process.stdout.decode('utf-8').split('\n')
system_os = ''
for line in out:
if line.startswith('Target OS:'):
system_os = line.replace('Target OS: ', '')
break
return system_os |
def rehearse(
folders, references, handler,
repeat=0, roles=1, strict=False,
loop=None
):
"""Cast a set of objects into a sequence of scene scripts. Deliver the performance.
:param folders: A sequence of
:py:class:`turberfield.dialogue.model.SceneScript.Folder` objects.
:param references: A sequence of Python objects.
:param handler: A callable object. This will be invoked with every event from the
performance.
:param int repeat: Extra repetitions of each folder.
:param int roles: Maximum number of roles permitted each character.
:param bool strict: Only fully-cast scripts to be performed.
This function is a generator. It yields events from the performance.
"""
if isinstance(folders, SceneScript.Folder):
folders = [folders]
yield from handler(references, loop=loop)
matcher = Matcher(folders)
performer = Performer(folders, references)
while True:
folder, index, script, selection, interlude = performer.next(
folders, references, strict=strict, roles=roles
)
yield from handler(script, loop=loop)
for item in performer.run(react=False, strict=strict, roles=roles):
yield from handler(item, loop=loop)
if isinstance(interlude, Callable):
metadata = next(handler(
interlude, folder, index, references, loop=loop
), None)
yield metadata
if metadata is None:
return
branch = next(matcher.options(metadata))
if branch != folder:
performer = Performer([branch], references)
if not repeat:
break
else:
repeat -= 1 | Cast a set of objects into a sequence of scene scripts. Deliver the performance.
:param folders: A sequence of
:py:class:`turberfield.dialogue.model.SceneScript.Folder` objects.
:param references: A sequence of Python objects.
:param handler: A callable object. This will be invoked with every event from the
performance.
:param int repeat: Extra repetitions of each folder.
:param int roles: Maximum number of roles permitted each character.
:param bool strict: Only fully-cast scripts to be performed.
This function is a generator. It yields events from the performance. | Below is the the instruction that describes the task:
### Input:
Cast a set of objects into a sequence of scene scripts. Deliver the performance.
:param folders: A sequence of
:py:class:`turberfield.dialogue.model.SceneScript.Folder` objects.
:param references: A sequence of Python objects.
:param handler: A callable object. This will be invoked with every event from the
performance.
:param int repeat: Extra repetitions of each folder.
:param int roles: Maximum number of roles permitted each character.
:param bool strict: Only fully-cast scripts to be performed.
This function is a generator. It yields events from the performance.
### Response:
def rehearse(
folders, references, handler,
repeat=0, roles=1, strict=False,
loop=None
):
"""Cast a set of objects into a sequence of scene scripts. Deliver the performance.
:param folders: A sequence of
:py:class:`turberfield.dialogue.model.SceneScript.Folder` objects.
:param references: A sequence of Python objects.
:param handler: A callable object. This will be invoked with every event from the
performance.
:param int repeat: Extra repetitions of each folder.
:param int roles: Maximum number of roles permitted each character.
:param bool strict: Only fully-cast scripts to be performed.
This function is a generator. It yields events from the performance.
"""
if isinstance(folders, SceneScript.Folder):
folders = [folders]
yield from handler(references, loop=loop)
matcher = Matcher(folders)
performer = Performer(folders, references)
while True:
folder, index, script, selection, interlude = performer.next(
folders, references, strict=strict, roles=roles
)
yield from handler(script, loop=loop)
for item in performer.run(react=False, strict=strict, roles=roles):
yield from handler(item, loop=loop)
if isinstance(interlude, Callable):
metadata = next(handler(
interlude, folder, index, references, loop=loop
), None)
yield metadata
if metadata is None:
return
branch = next(matcher.options(metadata))
if branch != folder:
performer = Performer([branch], references)
if not repeat:
break
else:
repeat -= 1 |
def write(self, data):
"""Encrypt and write the given bytes"""
self._check_not_closed()
if not data:
return 0
enc_data = self.encryptor.update(data)
self.next_fp.write(enc_data)
self.offset += len(data)
return len(data) | Encrypt and write the given bytes | Below is the the instruction that describes the task:
### Input:
Encrypt and write the given bytes
### Response:
def write(self, data):
"""Encrypt and write the given bytes"""
self._check_not_closed()
if not data:
return 0
enc_data = self.encryptor.update(data)
self.next_fp.write(enc_data)
self.offset += len(data)
return len(data) |
def _set_auto_fields(self, model_obj):
"""Set the values of the auto field using counter"""
for field_name, field_obj in \
self.entity_cls.meta_.auto_fields:
counter_key = f'{self.schema_name}_{field_name}'
if not (field_name in model_obj and model_obj[field_name] is not None):
# Increment the counter and it should start from 1
counter = next(self.conn['counters'][counter_key])
if not counter:
counter = next(self.conn['counters'][counter_key])
model_obj[field_name] = counter
return model_obj | Set the values of the auto field using counter | Below is the the instruction that describes the task:
### Input:
Set the values of the auto field using counter
### Response:
def _set_auto_fields(self, model_obj):
"""Set the values of the auto field using counter"""
for field_name, field_obj in \
self.entity_cls.meta_.auto_fields:
counter_key = f'{self.schema_name}_{field_name}'
if not (field_name in model_obj and model_obj[field_name] is not None):
# Increment the counter and it should start from 1
counter = next(self.conn['counters'][counter_key])
if not counter:
counter = next(self.conn['counters'][counter_key])
model_obj[field_name] = counter
return model_obj |
def kxtrct(keywd, terms, nterms, instring, termlen=_default_len_out, stringlen=_default_len_out, substrlen=_default_len_out):
"""
Locate a keyword in a string and extract the substring from
the beginning of the first word following the keyword to the
beginning of the first subsequent recognized terminator of a list.
http://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/kxtrct_c.html
:param keywd: Word that marks the beginning of text of interest.
:type keywd: str
:param terms: Set of words, any of which marks the end of text.
:type terms: Array of str
:param nterms: Number of terms.
:type nterms: int
:param instring: String containing a sequence of words.
:type instring: str
:param termlen: Length of strings in string array term.
:type termlen: int
:param stringlen: Available space in argument string.
:type stringlen: int
:param substrlen: Available space in output substring.
:type substrlen: int
:return:
String containing a sequence of words,
String from end of keywd to beginning of first terms item found.
:rtype: tuple
"""
assert nterms <= len(terms)
# Python strings and string arrays => to C char pointers
keywd = stypes.stringToCharP(keywd)
terms = stypes.listToCharArrayPtr([s[:termlen-1] for s in terms[:nterms]],xLen=termlen,yLen=nterms)
instring = stypes.stringToCharP(instring[:stringlen-1],inlen=stringlen)
substr = stypes.stringToCharP(substrlen)
# Python ints => to C ints
termlen = ctypes.c_int(termlen)
nterms = ctypes.c_int(nterms)
stringlen = ctypes.c_int(stringlen)
substrlen = ctypes.c_int(substrlen)
found = ctypes.c_int()
libspice.kxtrct_c(keywd, termlen, terms, nterms,
stringlen, substrlen, instring, ctypes.byref(found),
substr)
return stypes.toPythonString(instring), stypes.toPythonString(
substr), bool(found.value) | Locate a keyword in a string and extract the substring from
the beginning of the first word following the keyword to the
beginning of the first subsequent recognized terminator of a list.
http://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/kxtrct_c.html
:param keywd: Word that marks the beginning of text of interest.
:type keywd: str
:param terms: Set of words, any of which marks the end of text.
:type terms: Array of str
:param nterms: Number of terms.
:type nterms: int
:param instring: String containing a sequence of words.
:type instring: str
:param termlen: Length of strings in string array term.
:type termlen: int
:param stringlen: Available space in argument string.
:type stringlen: int
:param substrlen: Available space in output substring.
:type substrlen: int
:return:
String containing a sequence of words,
String from end of keywd to beginning of first terms item found.
:rtype: tuple | Below is the the instruction that describes the task:
### Input:
Locate a keyword in a string and extract the substring from
the beginning of the first word following the keyword to the
beginning of the first subsequent recognized terminator of a list.
http://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/kxtrct_c.html
:param keywd: Word that marks the beginning of text of interest.
:type keywd: str
:param terms: Set of words, any of which marks the end of text.
:type terms: Array of str
:param nterms: Number of terms.
:type nterms: int
:param instring: String containing a sequence of words.
:type instring: str
:param termlen: Length of strings in string array term.
:type termlen: int
:param stringlen: Available space in argument string.
:type stringlen: int
:param substrlen: Available space in output substring.
:type substrlen: int
:return:
String containing a sequence of words,
String from end of keywd to beginning of first terms item found.
:rtype: tuple
### Response:
def kxtrct(keywd, terms, nterms, instring, termlen=_default_len_out, stringlen=_default_len_out, substrlen=_default_len_out):
"""
Locate a keyword in a string and extract the substring from
the beginning of the first word following the keyword to the
beginning of the first subsequent recognized terminator of a list.
http://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/kxtrct_c.html
:param keywd: Word that marks the beginning of text of interest.
:type keywd: str
:param terms: Set of words, any of which marks the end of text.
:type terms: Array of str
:param nterms: Number of terms.
:type nterms: int
:param instring: String containing a sequence of words.
:type instring: str
:param termlen: Length of strings in string array term.
:type termlen: int
:param stringlen: Available space in argument string.
:type stringlen: int
:param substrlen: Available space in output substring.
:type substrlen: int
:return:
String containing a sequence of words,
String from end of keywd to beginning of first terms item found.
:rtype: tuple
"""
assert nterms <= len(terms)
# Python strings and string arrays => to C char pointers
keywd = stypes.stringToCharP(keywd)
terms = stypes.listToCharArrayPtr([s[:termlen-1] for s in terms[:nterms]],xLen=termlen,yLen=nterms)
instring = stypes.stringToCharP(instring[:stringlen-1],inlen=stringlen)
substr = stypes.stringToCharP(substrlen)
# Python ints => to C ints
termlen = ctypes.c_int(termlen)
nterms = ctypes.c_int(nterms)
stringlen = ctypes.c_int(stringlen)
substrlen = ctypes.c_int(substrlen)
found = ctypes.c_int()
libspice.kxtrct_c(keywd, termlen, terms, nterms,
stringlen, substrlen, instring, ctypes.byref(found),
substr)
return stypes.toPythonString(instring), stypes.toPythonString(
substr), bool(found.value) |
def qos_rcv_queue_multicast_threshold_traffic_class4(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
qos = ET.SubElement(config, "qos", xmlns="urn:brocade.com:mgmt:brocade-qos")
rcv_queue = ET.SubElement(qos, "rcv-queue")
multicast = ET.SubElement(rcv_queue, "multicast")
threshold = ET.SubElement(multicast, "threshold")
traffic_class4 = ET.SubElement(threshold, "traffic-class4")
traffic_class4.text = kwargs.pop('traffic_class4')
callback = kwargs.pop('callback', self._callback)
return callback(config) | Auto Generated Code | Below is the the instruction that describes the task:
### Input:
Auto Generated Code
### Response:
def qos_rcv_queue_multicast_threshold_traffic_class4(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
qos = ET.SubElement(config, "qos", xmlns="urn:brocade.com:mgmt:brocade-qos")
rcv_queue = ET.SubElement(qos, "rcv-queue")
multicast = ET.SubElement(rcv_queue, "multicast")
threshold = ET.SubElement(multicast, "threshold")
traffic_class4 = ET.SubElement(threshold, "traffic-class4")
traffic_class4.text = kwargs.pop('traffic_class4')
callback = kwargs.pop('callback', self._callback)
return callback(config) |
def _get_text_id(self, witness):
"""Returns the database ID of the Text record for `witness`.
This may require creating such a record.
If `text`\'s checksum does not match an existing record's
checksum, the record's checksum is updated and all associated
TextNGram and TextHasNGram records are deleted.
:param witness: witness to add a record for
:type witness: `WitnessText`
:rtype: `int`
"""
name, siglum = witness.get_names()
text_record = self._conn.execute(constants.SELECT_TEXT_SQL,
[name, siglum]).fetchone()
if text_record is None:
text_id = self._add_text_record(witness)
else:
text_id = text_record['id']
if text_record['checksum'] != witness.get_checksum():
filename = witness.get_filename()
self._logger.info('Text {} has changed since it was added to '
'the database'.format(filename))
self._update_text_record(witness, text_id)
self._logger.info('Deleting potentially out-of-date n-grams')
self._delete_text_ngrams(text_id)
return text_id | Returns the database ID of the Text record for `witness`.
This may require creating such a record.
If `text`\'s checksum does not match an existing record's
checksum, the record's checksum is updated and all associated
TextNGram and TextHasNGram records are deleted.
:param witness: witness to add a record for
:type witness: `WitnessText`
:rtype: `int` | Below is the the instruction that describes the task:
### Input:
Returns the database ID of the Text record for `witness`.
This may require creating such a record.
If `text`\'s checksum does not match an existing record's
checksum, the record's checksum is updated and all associated
TextNGram and TextHasNGram records are deleted.
:param witness: witness to add a record for
:type witness: `WitnessText`
:rtype: `int`
### Response:
def _get_text_id(self, witness):
"""Returns the database ID of the Text record for `witness`.
This may require creating such a record.
If `text`\'s checksum does not match an existing record's
checksum, the record's checksum is updated and all associated
TextNGram and TextHasNGram records are deleted.
:param witness: witness to add a record for
:type witness: `WitnessText`
:rtype: `int`
"""
name, siglum = witness.get_names()
text_record = self._conn.execute(constants.SELECT_TEXT_SQL,
[name, siglum]).fetchone()
if text_record is None:
text_id = self._add_text_record(witness)
else:
text_id = text_record['id']
if text_record['checksum'] != witness.get_checksum():
filename = witness.get_filename()
self._logger.info('Text {} has changed since it was added to '
'the database'.format(filename))
self._update_text_record(witness, text_id)
self._logger.info('Deleting potentially out-of-date n-grams')
self._delete_text_ngrams(text_id)
return text_id |
def update_variable(self, var, grad_var):
"""Update the variable and its slots."""
params = self.params
global_step = tf.to_float(self.global_step) + 1
# compute learning rate
lrate = params.learning_rate
if params.learning_rate_decay_scheme == "noam":
lrate *= tf.minimum(global_step * params.learning_rate_warmup_steps**-1.5,
global_step**-0.5)
else:
assert params.learning_rate_decay_scheme == "none"
lrate *= tf.minimum(global_step / params.learning_rate_warmup_steps, 1.0)
# compute adjustment due to second moment
slots = params.slots[var.op.name]
grad_squared = tf.square(grad_var)
beta2_pow = tf.pow(params.beta2, global_step)
if params.factored_second_moment_accumulator and len(var.shape) == 2:
vr_update = tf.assign(slots["adam_vr"], slots["adam_vr"] * params.beta2 +
tf.reduce_mean(grad_squared, 1, keepdims=True) *
(1.0 - params.beta2))
vc_update = tf.assign(slots["adam_vc"], slots["adam_vc"] * params.beta2 +
tf.reduce_mean(grad_squared, 0, keepdims=True) *
(1.0 - params.beta2))
with tf.control_dependencies([vr_update, vc_update]):
vr = tf.sqrt(slots["adam_vr"] / (1.0 - beta2_pow)) + params.epsilon
vc = tf.sqrt(slots["adam_vc"] / (1.0 - beta2_pow)) + params.epsilon
vc /= tf.reduce_mean(vc)
denom = vr * vc
else:
v_update = tf.assign(slots["adam_v"],
slots["adam_v"] * params.beta2 + grad_squared *
(1.0 - params.beta2))
with tf.control_dependencies([v_update]):
denom = tf.sqrt(slots["adam_v"] / (1.0 - beta2_pow)) + params.epsilon
# compute momentum if applicable
if params.beta1 != 0.0:
m_update = tf.assign(slots["adam_m"],
slots["adam_m"] * params.beta1 + grad_var *
(1.0 - params.beta1))
with tf.control_dependencies([m_update]):
grad_var = slots["adam_m"]
# update var
subtrahend = lrate * grad_var / denom
new_val = _quantize(_dequantize(var, params) - subtrahend, params)
return tf.assign(var, new_val) | Update the variable and its slots. | Below is the the instruction that describes the task:
### Input:
Update the variable and its slots.
### Response:
def update_variable(self, var, grad_var):
"""Update the variable and its slots."""
params = self.params
global_step = tf.to_float(self.global_step) + 1
# compute learning rate
lrate = params.learning_rate
if params.learning_rate_decay_scheme == "noam":
lrate *= tf.minimum(global_step * params.learning_rate_warmup_steps**-1.5,
global_step**-0.5)
else:
assert params.learning_rate_decay_scheme == "none"
lrate *= tf.minimum(global_step / params.learning_rate_warmup_steps, 1.0)
# compute adjustment due to second moment
slots = params.slots[var.op.name]
grad_squared = tf.square(grad_var)
beta2_pow = tf.pow(params.beta2, global_step)
if params.factored_second_moment_accumulator and len(var.shape) == 2:
vr_update = tf.assign(slots["adam_vr"], slots["adam_vr"] * params.beta2 +
tf.reduce_mean(grad_squared, 1, keepdims=True) *
(1.0 - params.beta2))
vc_update = tf.assign(slots["adam_vc"], slots["adam_vc"] * params.beta2 +
tf.reduce_mean(grad_squared, 0, keepdims=True) *
(1.0 - params.beta2))
with tf.control_dependencies([vr_update, vc_update]):
vr = tf.sqrt(slots["adam_vr"] / (1.0 - beta2_pow)) + params.epsilon
vc = tf.sqrt(slots["adam_vc"] / (1.0 - beta2_pow)) + params.epsilon
vc /= tf.reduce_mean(vc)
denom = vr * vc
else:
v_update = tf.assign(slots["adam_v"],
slots["adam_v"] * params.beta2 + grad_squared *
(1.0 - params.beta2))
with tf.control_dependencies([v_update]):
denom = tf.sqrt(slots["adam_v"] / (1.0 - beta2_pow)) + params.epsilon
# compute momentum if applicable
if params.beta1 != 0.0:
m_update = tf.assign(slots["adam_m"],
slots["adam_m"] * params.beta1 + grad_var *
(1.0 - params.beta1))
with tf.control_dependencies([m_update]):
grad_var = slots["adam_m"]
# update var
subtrahend = lrate * grad_var / denom
new_val = _quantize(_dequantize(var, params) - subtrahend, params)
return tf.assign(var, new_val) |
def NOAJS_metric(bpmn_graph):
"""
Returns the value of the NOAJS metric (Number of Activities, joins and splits)
for the BPMNDiagramGraph instance.
:param bpmn_graph: an instance of BpmnDiagramGraph representing BPMN model.
"""
activities_count = all_activities_count(bpmn_graph)
gateways_count = all_gateways_count(bpmn_graph)
return activities_count + gateways_count | Returns the value of the NOAJS metric (Number of Activities, joins and splits)
for the BPMNDiagramGraph instance.
:param bpmn_graph: an instance of BpmnDiagramGraph representing BPMN model. | Below is the the instruction that describes the task:
### Input:
Returns the value of the NOAJS metric (Number of Activities, joins and splits)
for the BPMNDiagramGraph instance.
:param bpmn_graph: an instance of BpmnDiagramGraph representing BPMN model.
### Response:
def NOAJS_metric(bpmn_graph):
"""
Returns the value of the NOAJS metric (Number of Activities, joins and splits)
for the BPMNDiagramGraph instance.
:param bpmn_graph: an instance of BpmnDiagramGraph representing BPMN model.
"""
activities_count = all_activities_count(bpmn_graph)
gateways_count = all_gateways_count(bpmn_graph)
return activities_count + gateways_count |
def concatechain(*generators: types.FrameGenerator, separator: str = ''):
"""Return a generator that in each iteration takes one value from each of the
supplied generators, joins them together with the specified separator and
yields the result. Stops as soon as any iterator raises StopIteration and
returns the value contained in it.
Primarily created for chaining string generators, hence the name.
Args:
generators: Any number of generators that yield types that can be
joined together with the separator string.
separator: A separator to insert between each value yielded by
the different generators.
Returns:
A generator that yields strings that are the concatenation of one value
from each of the generators, joined together with the separator string.
"""
while True:
try:
next_ = [next(gen) for gen in generators]
yield separator.join(next_)
except StopIteration as exc:
return exc.value | Return a generator that in each iteration takes one value from each of the
supplied generators, joins them together with the specified separator and
yields the result. Stops as soon as any iterator raises StopIteration and
returns the value contained in it.
Primarily created for chaining string generators, hence the name.
Args:
generators: Any number of generators that yield types that can be
joined together with the separator string.
separator: A separator to insert between each value yielded by
the different generators.
Returns:
A generator that yields strings that are the concatenation of one value
from each of the generators, joined together with the separator string. | Below is the the instruction that describes the task:
### Input:
Return a generator that in each iteration takes one value from each of the
supplied generators, joins them together with the specified separator and
yields the result. Stops as soon as any iterator raises StopIteration and
returns the value contained in it.
Primarily created for chaining string generators, hence the name.
Args:
generators: Any number of generators that yield types that can be
joined together with the separator string.
separator: A separator to insert between each value yielded by
the different generators.
Returns:
A generator that yields strings that are the concatenation of one value
from each of the generators, joined together with the separator string.
### Response:
def concatechain(*generators: types.FrameGenerator, separator: str = ''):
"""Return a generator that in each iteration takes one value from each of the
supplied generators, joins them together with the specified separator and
yields the result. Stops as soon as any iterator raises StopIteration and
returns the value contained in it.
Primarily created for chaining string generators, hence the name.
Args:
generators: Any number of generators that yield types that can be
joined together with the separator string.
separator: A separator to insert between each value yielded by
the different generators.
Returns:
A generator that yields strings that are the concatenation of one value
from each of the generators, joined together with the separator string.
"""
while True:
try:
next_ = [next(gen) for gen in generators]
yield separator.join(next_)
except StopIteration as exc:
return exc.value |
def will_tag(self):
"""
Check whether the feed should be tagged
"""
wanttags = self.retrieve_config('Tag', 'no')
if wanttags == 'yes':
if aux.staggerexists:
willtag = True
else:
willtag = False
print(("You want me to tag {0}, but you have not installed "
"the Stagger module. I cannot honour your request.").
format(self.name), file=sys.stderr, flush=True)
else:
willtag = False
return willtag | Check whether the feed should be tagged | Below is the the instruction that describes the task:
### Input:
Check whether the feed should be tagged
### Response:
def will_tag(self):
"""
Check whether the feed should be tagged
"""
wanttags = self.retrieve_config('Tag', 'no')
if wanttags == 'yes':
if aux.staggerexists:
willtag = True
else:
willtag = False
print(("You want me to tag {0}, but you have not installed "
"the Stagger module. I cannot honour your request.").
format(self.name), file=sys.stderr, flush=True)
else:
willtag = False
return willtag |
def set_output(self, outfile):
''' Set's the output file, currently only useful with context-managers.
Note:
This function is experimental and may not last.
'''
if self._orig_stdout: # restore Usted
sys.stdout = self._orig_stdout
self._stream = outfile
sys.stdout = _LineWriter(self, self._stream, self.default) | Set's the output file, currently only useful with context-managers.
Note:
This function is experimental and may not last. | Below is the the instruction that describes the task:
### Input:
Set's the output file, currently only useful with context-managers.
Note:
This function is experimental and may not last.
### Response:
def set_output(self, outfile):
''' Set's the output file, currently only useful with context-managers.
Note:
This function is experimental and may not last.
'''
if self._orig_stdout: # restore Usted
sys.stdout = self._orig_stdout
self._stream = outfile
sys.stdout = _LineWriter(self, self._stream, self.default) |
def cos_zen(utc_time, lon, lat):
"""Cosine of the sun-zenith angle for *lon*, *lat* at *utc_time*.
utc_time: datetime.datetime instance of the UTC time
lon and lat in degrees.
"""
lon = np.deg2rad(lon)
lat = np.deg2rad(lat)
r_a, dec = sun_ra_dec(utc_time)
h__ = _local_hour_angle(utc_time, lon, r_a)
return (np.sin(lat) * np.sin(dec) + np.cos(lat) * np.cos(dec) * np.cos(h__)) | Cosine of the sun-zenith angle for *lon*, *lat* at *utc_time*.
utc_time: datetime.datetime instance of the UTC time
lon and lat in degrees. | Below is the the instruction that describes the task:
### Input:
Cosine of the sun-zenith angle for *lon*, *lat* at *utc_time*.
utc_time: datetime.datetime instance of the UTC time
lon and lat in degrees.
### Response:
def cos_zen(utc_time, lon, lat):
"""Cosine of the sun-zenith angle for *lon*, *lat* at *utc_time*.
utc_time: datetime.datetime instance of the UTC time
lon and lat in degrees.
"""
lon = np.deg2rad(lon)
lat = np.deg2rad(lat)
r_a, dec = sun_ra_dec(utc_time)
h__ = _local_hour_angle(utc_time, lon, r_a)
return (np.sin(lat) * np.sin(dec) + np.cos(lat) * np.cos(dec) * np.cos(h__)) |
def configure(self, org_name, api_token=None, base_url='okta.com', ttl=None, max_ttl=None, bypass_okta_mfa=False,
mount_point=DEFAULT_MOUNT_POINT):
"""Configure the connection parameters for Okta.
This path honors the distinction between the create and update capabilities inside ACL policies.
Supported methods:
POST: /auth/{mount_point}/config. Produces: 204 (empty body)
:param org_name: Name of the organization to be used in the Okta API.
:type org_name: str | unicode
:param api_token: Okta API token. This is required to query Okta for user group membership. If this is not
supplied only locally configured groups will be enabled.
:type api_token: str | unicode
:param base_url: If set, will be used as the base domain for API requests. Examples are okta.com,
oktapreview.com, and okta-emea.com.
:type base_url: str | unicode
:param ttl: Duration after which authentication will be expired.
:type ttl: str | unicode
:param max_ttl: Maximum duration after which authentication will be expired.
:type max_ttl: str | unicode
:param bypass_okta_mfa: Whether to bypass an Okta MFA request. Useful if using one of Vault's built-in MFA
mechanisms, but this will also cause certain other statuses to be ignored, such as PASSWORD_EXPIRED.
:type bypass_okta_mfa: bool
:param mount_point: The "path" the method/backend was mounted on.
:type mount_point: str | unicode
:return: The response of the request.
:rtype: requests.Response
"""
params = {
'org_name': org_name,
'api_token': api_token,
'base_url': base_url,
'ttl': ttl,
'max_ttl': max_ttl,
'bypass_okta_mfa': bypass_okta_mfa,
}
api_path = '/v1/auth/{mount_point}/config'.format(mount_point=mount_point)
return self._adapter.post(
url=api_path,
json=params,
) | Configure the connection parameters for Okta.
This path honors the distinction between the create and update capabilities inside ACL policies.
Supported methods:
POST: /auth/{mount_point}/config. Produces: 204 (empty body)
:param org_name: Name of the organization to be used in the Okta API.
:type org_name: str | unicode
:param api_token: Okta API token. This is required to query Okta for user group membership. If this is not
supplied only locally configured groups will be enabled.
:type api_token: str | unicode
:param base_url: If set, will be used as the base domain for API requests. Examples are okta.com,
oktapreview.com, and okta-emea.com.
:type base_url: str | unicode
:param ttl: Duration after which authentication will be expired.
:type ttl: str | unicode
:param max_ttl: Maximum duration after which authentication will be expired.
:type max_ttl: str | unicode
:param bypass_okta_mfa: Whether to bypass an Okta MFA request. Useful if using one of Vault's built-in MFA
mechanisms, but this will also cause certain other statuses to be ignored, such as PASSWORD_EXPIRED.
:type bypass_okta_mfa: bool
:param mount_point: The "path" the method/backend was mounted on.
:type mount_point: str | unicode
:return: The response of the request.
:rtype: requests.Response | Below is the the instruction that describes the task:
### Input:
Configure the connection parameters for Okta.
This path honors the distinction between the create and update capabilities inside ACL policies.
Supported methods:
POST: /auth/{mount_point}/config. Produces: 204 (empty body)
:param org_name: Name of the organization to be used in the Okta API.
:type org_name: str | unicode
:param api_token: Okta API token. This is required to query Okta for user group membership. If this is not
supplied only locally configured groups will be enabled.
:type api_token: str | unicode
:param base_url: If set, will be used as the base domain for API requests. Examples are okta.com,
oktapreview.com, and okta-emea.com.
:type base_url: str | unicode
:param ttl: Duration after which authentication will be expired.
:type ttl: str | unicode
:param max_ttl: Maximum duration after which authentication will be expired.
:type max_ttl: str | unicode
:param bypass_okta_mfa: Whether to bypass an Okta MFA request. Useful if using one of Vault's built-in MFA
mechanisms, but this will also cause certain other statuses to be ignored, such as PASSWORD_EXPIRED.
:type bypass_okta_mfa: bool
:param mount_point: The "path" the method/backend was mounted on.
:type mount_point: str | unicode
:return: The response of the request.
:rtype: requests.Response
### Response:
def configure(self, org_name, api_token=None, base_url='okta.com', ttl=None, max_ttl=None, bypass_okta_mfa=False,
mount_point=DEFAULT_MOUNT_POINT):
"""Configure the connection parameters for Okta.
This path honors the distinction between the create and update capabilities inside ACL policies.
Supported methods:
POST: /auth/{mount_point}/config. Produces: 204 (empty body)
:param org_name: Name of the organization to be used in the Okta API.
:type org_name: str | unicode
:param api_token: Okta API token. This is required to query Okta for user group membership. If this is not
supplied only locally configured groups will be enabled.
:type api_token: str | unicode
:param base_url: If set, will be used as the base domain for API requests. Examples are okta.com,
oktapreview.com, and okta-emea.com.
:type base_url: str | unicode
:param ttl: Duration after which authentication will be expired.
:type ttl: str | unicode
:param max_ttl: Maximum duration after which authentication will be expired.
:type max_ttl: str | unicode
:param bypass_okta_mfa: Whether to bypass an Okta MFA request. Useful if using one of Vault's built-in MFA
mechanisms, but this will also cause certain other statuses to be ignored, such as PASSWORD_EXPIRED.
:type bypass_okta_mfa: bool
:param mount_point: The "path" the method/backend was mounted on.
:type mount_point: str | unicode
:return: The response of the request.
:rtype: requests.Response
"""
params = {
'org_name': org_name,
'api_token': api_token,
'base_url': base_url,
'ttl': ttl,
'max_ttl': max_ttl,
'bypass_okta_mfa': bypass_okta_mfa,
}
api_path = '/v1/auth/{mount_point}/config'.format(mount_point=mount_point)
return self._adapter.post(
url=api_path,
json=params,
) |
def translate_exception(exc_info, initial_skip=0):
"""If passed an exc_info it will automatically rewrite the exceptions
all the way down to the correct line numbers and frames.
"""
tb = exc_info[2]
frames = []
# skip some internal frames if wanted
for x in range(initial_skip):
if tb is not None:
tb = tb.tb_next
initial_tb = tb
while tb is not None:
# skip frames decorated with @internalcode. These are internal
# calls we can't avoid and that are useless in template debugging
# output.
if tb.tb_frame.f_code in internal_code:
tb = tb.tb_next
continue
# save a reference to the next frame if we override the current
# one with a faked one.
next = tb.tb_next
# fake template exceptions
template = tb.tb_frame.f_globals.get('__TK_template_info__')
if template is not None:
lineno = template.get_corresponding_lineno(tb.tb_lineno)
tb = fake_exc_info(exc_info[:2] + (tb,), template.filename,
lineno)[2]
frames.append(make_frame_proxy(tb))
tb = next
# if we don't have any exceptions in the frames left, we have to reraise it unchanged. XXX: can we backup here? when could this happen?
if not frames:
reraise(exc_info[0], exc_info[1], exc_info[2])
return ProcessedTraceback(exc_info[0], exc_info[1], frames) | If passed an exc_info it will automatically rewrite the exceptions
all the way down to the correct line numbers and frames. | Below is the the instruction that describes the task:
### Input:
If passed an exc_info it will automatically rewrite the exceptions
all the way down to the correct line numbers and frames.
### Response:
def translate_exception(exc_info, initial_skip=0):
"""If passed an exc_info it will automatically rewrite the exceptions
all the way down to the correct line numbers and frames.
"""
tb = exc_info[2]
frames = []
# skip some internal frames if wanted
for x in range(initial_skip):
if tb is not None:
tb = tb.tb_next
initial_tb = tb
while tb is not None:
# skip frames decorated with @internalcode. These are internal
# calls we can't avoid and that are useless in template debugging
# output.
if tb.tb_frame.f_code in internal_code:
tb = tb.tb_next
continue
# save a reference to the next frame if we override the current
# one with a faked one.
next = tb.tb_next
# fake template exceptions
template = tb.tb_frame.f_globals.get('__TK_template_info__')
if template is not None:
lineno = template.get_corresponding_lineno(tb.tb_lineno)
tb = fake_exc_info(exc_info[:2] + (tb,), template.filename,
lineno)[2]
frames.append(make_frame_proxy(tb))
tb = next
# if we don't have any exceptions in the frames left, we have to reraise it unchanged. XXX: can we backup here? when could this happen?
if not frames:
reraise(exc_info[0], exc_info[1], exc_info[2])
return ProcessedTraceback(exc_info[0], exc_info[1], frames) |
def get_formatted_as_type(self, value, default=None, out_type=str):
"""Return formatted value for input value, returns as out_type.
Caveat emptor: if out_type is bool and value a string,
return will be True if str is 'True'. It will be False for all other
cases.
Args:
value: the value to format
default: if value is None, set to this
out_type: cast return as this type
Returns:
Formatted value of type out_type
"""
if value is None:
value = default
if isinstance(value, SpecialTagDirective):
result = value.get_value(self)
return types.cast_to_type(result, out_type)
if isinstance(value, str):
result = self.get_formatted_string(value)
result_type = type(result)
if out_type is result_type:
# get_formatted_string result is already a string
return result
elif out_type is bool and result_type is str:
# casting a str to bool is always True, hence special case. If
# the str value is 'False'/'false', presumably user can
# reasonably expect a bool False response.
return result.lower() in ['true', '1', '1.0']
else:
return out_type(result)
else:
return out_type(value) | Return formatted value for input value, returns as out_type.
Caveat emptor: if out_type is bool and value a string,
return will be True if str is 'True'. It will be False for all other
cases.
Args:
value: the value to format
default: if value is None, set to this
out_type: cast return as this type
Returns:
Formatted value of type out_type | Below is the the instruction that describes the task:
### Input:
Return formatted value for input value, returns as out_type.
Caveat emptor: if out_type is bool and value a string,
return will be True if str is 'True'. It will be False for all other
cases.
Args:
value: the value to format
default: if value is None, set to this
out_type: cast return as this type
Returns:
Formatted value of type out_type
### Response:
def get_formatted_as_type(self, value, default=None, out_type=str):
"""Return formatted value for input value, returns as out_type.
Caveat emptor: if out_type is bool and value a string,
return will be True if str is 'True'. It will be False for all other
cases.
Args:
value: the value to format
default: if value is None, set to this
out_type: cast return as this type
Returns:
Formatted value of type out_type
"""
if value is None:
value = default
if isinstance(value, SpecialTagDirective):
result = value.get_value(self)
return types.cast_to_type(result, out_type)
if isinstance(value, str):
result = self.get_formatted_string(value)
result_type = type(result)
if out_type is result_type:
# get_formatted_string result is already a string
return result
elif out_type is bool and result_type is str:
# casting a str to bool is always True, hence special case. If
# the str value is 'False'/'false', presumably user can
# reasonably expect a bool False response.
return result.lower() in ['true', '1', '1.0']
else:
return out_type(result)
else:
return out_type(value) |
def fake_KATCP_client_resource_factory(
KATCPClientResourceClass, fake_options, resource_spec, *args, **kwargs):
"""Create a fake KATCPClientResource-like class and a fake-manager
Parameters
----------
KATCPClientResourceClass : class
Subclass of :class:`katcp.resource_client.KATCPClientResource`
fake_options : dict
Options for the faking process. Keys:
allow_any_request : bool, default False
(TODO not implemented behaves as if it were True)
resource_spec, *args, **kwargs : passed to KATCPClientResourceClass
A subclass of the passed-in KATCPClientResourceClass is created that replaces the
internal InspecingClient instances with fakes using fake_inspecting_client_factory()
based on the InspectingClient class used by KATCPClientResourceClass.
Returns
-------
(fake_katcp_client_resource, fake_katcp_client_resource_manager):
fake_katcp_client_resource : instance of faked subclass of KATCPClientResourceClass
fake_katcp_client_resource_manager : :class:`FakeKATCPClientResourceManager` instance
Bound to the `fake_katcp_client_resource` instance.
"""
# TODO Implement allow_any_request functionality. When True, any unknown request (even
# if there is no fake implementation) should succeed
allow_any_request = fake_options.get('allow_any_request', False)
class FakeKATCPClientResource(KATCPClientResourceClass):
def inspecting_client_factory(self, host, port, ioloop_set_to):
real_instance = (super(FakeKATCPClientResource, self)
.inspecting_client_factory(host, port, ioloop_set_to) )
fic, fic_manager = fake_inspecting_client_factory(
real_instance.__class__, fake_options, host, port,
ioloop=ioloop_set_to, auto_reconnect=self.auto_reconnect)
self.fake_inspecting_client_manager = fic_manager
return fic
fkcr = FakeKATCPClientResource(resource_spec, *args, **kwargs)
fkcr_manager = FakeKATCPClientResourceManager(fkcr)
return (fkcr, fkcr_manager) | Create a fake KATCPClientResource-like class and a fake-manager
Parameters
----------
KATCPClientResourceClass : class
Subclass of :class:`katcp.resource_client.KATCPClientResource`
fake_options : dict
Options for the faking process. Keys:
allow_any_request : bool, default False
(TODO not implemented behaves as if it were True)
resource_spec, *args, **kwargs : passed to KATCPClientResourceClass
A subclass of the passed-in KATCPClientResourceClass is created that replaces the
internal InspecingClient instances with fakes using fake_inspecting_client_factory()
based on the InspectingClient class used by KATCPClientResourceClass.
Returns
-------
(fake_katcp_client_resource, fake_katcp_client_resource_manager):
fake_katcp_client_resource : instance of faked subclass of KATCPClientResourceClass
fake_katcp_client_resource_manager : :class:`FakeKATCPClientResourceManager` instance
Bound to the `fake_katcp_client_resource` instance. | Below is the the instruction that describes the task:
### Input:
Create a fake KATCPClientResource-like class and a fake-manager
Parameters
----------
KATCPClientResourceClass : class
Subclass of :class:`katcp.resource_client.KATCPClientResource`
fake_options : dict
Options for the faking process. Keys:
allow_any_request : bool, default False
(TODO not implemented behaves as if it were True)
resource_spec, *args, **kwargs : passed to KATCPClientResourceClass
A subclass of the passed-in KATCPClientResourceClass is created that replaces the
internal InspecingClient instances with fakes using fake_inspecting_client_factory()
based on the InspectingClient class used by KATCPClientResourceClass.
Returns
-------
(fake_katcp_client_resource, fake_katcp_client_resource_manager):
fake_katcp_client_resource : instance of faked subclass of KATCPClientResourceClass
fake_katcp_client_resource_manager : :class:`FakeKATCPClientResourceManager` instance
Bound to the `fake_katcp_client_resource` instance.
### Response:
def fake_KATCP_client_resource_factory(
KATCPClientResourceClass, fake_options, resource_spec, *args, **kwargs):
"""Create a fake KATCPClientResource-like class and a fake-manager
Parameters
----------
KATCPClientResourceClass : class
Subclass of :class:`katcp.resource_client.KATCPClientResource`
fake_options : dict
Options for the faking process. Keys:
allow_any_request : bool, default False
(TODO not implemented behaves as if it were True)
resource_spec, *args, **kwargs : passed to KATCPClientResourceClass
A subclass of the passed-in KATCPClientResourceClass is created that replaces the
internal InspecingClient instances with fakes using fake_inspecting_client_factory()
based on the InspectingClient class used by KATCPClientResourceClass.
Returns
-------
(fake_katcp_client_resource, fake_katcp_client_resource_manager):
fake_katcp_client_resource : instance of faked subclass of KATCPClientResourceClass
fake_katcp_client_resource_manager : :class:`FakeKATCPClientResourceManager` instance
Bound to the `fake_katcp_client_resource` instance.
"""
# TODO Implement allow_any_request functionality. When True, any unknown request (even
# if there is no fake implementation) should succeed
allow_any_request = fake_options.get('allow_any_request', False)
class FakeKATCPClientResource(KATCPClientResourceClass):
def inspecting_client_factory(self, host, port, ioloop_set_to):
real_instance = (super(FakeKATCPClientResource, self)
.inspecting_client_factory(host, port, ioloop_set_to) )
fic, fic_manager = fake_inspecting_client_factory(
real_instance.__class__, fake_options, host, port,
ioloop=ioloop_set_to, auto_reconnect=self.auto_reconnect)
self.fake_inspecting_client_manager = fic_manager
return fic
fkcr = FakeKATCPClientResource(resource_spec, *args, **kwargs)
fkcr_manager = FakeKATCPClientResourceManager(fkcr)
return (fkcr, fkcr_manager) |
def to_dict(self, remove_nones=False):
"""
Creates a dictionary representation of the object.
:param remove_nones: Whether ``None`` values should be filtered out of the dictionary. Defaults to ``False``.
:return: A dictionary representation of the report.
"""
if remove_nones:
report_dict = super().to_dict(remove_nones=True)
else:
report_dict = {
'title': self.title,
'reportBody': self.body,
'timeBegan': self.time_began,
'externalUrl': self.external_url,
'distributionType': self._get_distribution_type(),
'externalTrackingId': self.external_id,
'enclaveIds': self.enclave_ids,
'created': self.created,
'updated': self.updated,
}
# id field might not be present
if self.id is not None:
report_dict['id'] = self.id
else:
report_dict['id'] = None
return report_dict | Creates a dictionary representation of the object.
:param remove_nones: Whether ``None`` values should be filtered out of the dictionary. Defaults to ``False``.
:return: A dictionary representation of the report. | Below is the the instruction that describes the task:
### Input:
Creates a dictionary representation of the object.
:param remove_nones: Whether ``None`` values should be filtered out of the dictionary. Defaults to ``False``.
:return: A dictionary representation of the report.
### Response:
def to_dict(self, remove_nones=False):
"""
Creates a dictionary representation of the object.
:param remove_nones: Whether ``None`` values should be filtered out of the dictionary. Defaults to ``False``.
:return: A dictionary representation of the report.
"""
if remove_nones:
report_dict = super().to_dict(remove_nones=True)
else:
report_dict = {
'title': self.title,
'reportBody': self.body,
'timeBegan': self.time_began,
'externalUrl': self.external_url,
'distributionType': self._get_distribution_type(),
'externalTrackingId': self.external_id,
'enclaveIds': self.enclave_ids,
'created': self.created,
'updated': self.updated,
}
# id field might not be present
if self.id is not None:
report_dict['id'] = self.id
else:
report_dict['id'] = None
return report_dict |
def k_to_R_value(k, SI=True):
r'''Returns the R-value of a substance given its thermal conductivity,
Will return R-value in SI units unless SI is false. SI units are
m^2 K/(W*inch); Imperial units of R-value are ft^2 deg F*h/(BTU*inch).
Parameters
----------
k : float
Thermal conductivity of a substance [W/m/K]
SI : bool, optional
Whether to use the SI conversion or not
Returns
-------
R_value : float
R-value of a substance [m^2 K/(W*inch) or ft^2 deg F*h/(BTU*inch)]
Notes
-----
Provides the reverse conversion of R_value_to_k.
Examples
--------
>>> k_to_R_value(R_value_to_k(0.12)), k_to_R_value(R_value_to_k(0.71, SI=False), SI=False)
(0.11999999999999998, 0.7099999999999999)
References
----------
.. [1] Gesellschaft, V. D. I., ed. VDI Heat Atlas. 2nd edition.
Berlin; New York:: Springer, 2010.
'''
r = k_to_thermal_resistivity(k)
if SI:
return r*inch
else:
return r/(foot**2*degree_Fahrenheit*hour/Btu/inch) | r'''Returns the R-value of a substance given its thermal conductivity,
Will return R-value in SI units unless SI is false. SI units are
m^2 K/(W*inch); Imperial units of R-value are ft^2 deg F*h/(BTU*inch).
Parameters
----------
k : float
Thermal conductivity of a substance [W/m/K]
SI : bool, optional
Whether to use the SI conversion or not
Returns
-------
R_value : float
R-value of a substance [m^2 K/(W*inch) or ft^2 deg F*h/(BTU*inch)]
Notes
-----
Provides the reverse conversion of R_value_to_k.
Examples
--------
>>> k_to_R_value(R_value_to_k(0.12)), k_to_R_value(R_value_to_k(0.71, SI=False), SI=False)
(0.11999999999999998, 0.7099999999999999)
References
----------
.. [1] Gesellschaft, V. D. I., ed. VDI Heat Atlas. 2nd edition.
Berlin; New York:: Springer, 2010. | Below is the the instruction that describes the task:
### Input:
r'''Returns the R-value of a substance given its thermal conductivity,
Will return R-value in SI units unless SI is false. SI units are
m^2 K/(W*inch); Imperial units of R-value are ft^2 deg F*h/(BTU*inch).
Parameters
----------
k : float
Thermal conductivity of a substance [W/m/K]
SI : bool, optional
Whether to use the SI conversion or not
Returns
-------
R_value : float
R-value of a substance [m^2 K/(W*inch) or ft^2 deg F*h/(BTU*inch)]
Notes
-----
Provides the reverse conversion of R_value_to_k.
Examples
--------
>>> k_to_R_value(R_value_to_k(0.12)), k_to_R_value(R_value_to_k(0.71, SI=False), SI=False)
(0.11999999999999998, 0.7099999999999999)
References
----------
.. [1] Gesellschaft, V. D. I., ed. VDI Heat Atlas. 2nd edition.
Berlin; New York:: Springer, 2010.
### Response:
def k_to_R_value(k, SI=True):
r'''Returns the R-value of a substance given its thermal conductivity,
Will return R-value in SI units unless SI is false. SI units are
m^2 K/(W*inch); Imperial units of R-value are ft^2 deg F*h/(BTU*inch).
Parameters
----------
k : float
Thermal conductivity of a substance [W/m/K]
SI : bool, optional
Whether to use the SI conversion or not
Returns
-------
R_value : float
R-value of a substance [m^2 K/(W*inch) or ft^2 deg F*h/(BTU*inch)]
Notes
-----
Provides the reverse conversion of R_value_to_k.
Examples
--------
>>> k_to_R_value(R_value_to_k(0.12)), k_to_R_value(R_value_to_k(0.71, SI=False), SI=False)
(0.11999999999999998, 0.7099999999999999)
References
----------
.. [1] Gesellschaft, V. D. I., ed. VDI Heat Atlas. 2nd edition.
Berlin; New York:: Springer, 2010.
'''
r = k_to_thermal_resistivity(k)
if SI:
return r*inch
else:
return r/(foot**2*degree_Fahrenheit*hour/Btu/inch) |
def with_cpu(ops, model):
"""Wrap a model that should run on CPU, transferring inputs and outputs
as necessary."""
model.to_cpu()
def with_cpu_forward(inputs, drop=0.0):
cpu_outputs, backprop = model.begin_update(_to_cpu(inputs), drop=drop)
gpu_outputs = _to_device(ops, cpu_outputs)
def with_cpu_backprop(d_outputs, sgd=None):
cpu_d_outputs = _to_cpu(d_outputs)
return backprop(cpu_d_outputs, sgd=sgd)
return gpu_outputs, with_cpu_backprop
return wrap(with_cpu_forward, model) | Wrap a model that should run on CPU, transferring inputs and outputs
as necessary. | Below is the the instruction that describes the task:
### Input:
Wrap a model that should run on CPU, transferring inputs and outputs
as necessary.
### Response:
def with_cpu(ops, model):
"""Wrap a model that should run on CPU, transferring inputs and outputs
as necessary."""
model.to_cpu()
def with_cpu_forward(inputs, drop=0.0):
cpu_outputs, backprop = model.begin_update(_to_cpu(inputs), drop=drop)
gpu_outputs = _to_device(ops, cpu_outputs)
def with_cpu_backprop(d_outputs, sgd=None):
cpu_d_outputs = _to_cpu(d_outputs)
return backprop(cpu_d_outputs, sgd=sgd)
return gpu_outputs, with_cpu_backprop
return wrap(with_cpu_forward, model) |
def bookmarks_index_changed(self):
"""Update the UI when the bookmarks combobox has changed."""
index = self.bookmarks_list.currentIndex()
if index >= 0:
self.tool.reset()
rectangle = self.bookmarks_list.itemData(index)
self.tool.set_rectangle(rectangle)
self.canvas.setExtent(rectangle)
self.ok_button.setEnabled(True)
else:
self.ok_button.setDisabled(True) | Update the UI when the bookmarks combobox has changed. | Below is the the instruction that describes the task:
### Input:
Update the UI when the bookmarks combobox has changed.
### Response:
def bookmarks_index_changed(self):
"""Update the UI when the bookmarks combobox has changed."""
index = self.bookmarks_list.currentIndex()
if index >= 0:
self.tool.reset()
rectangle = self.bookmarks_list.itemData(index)
self.tool.set_rectangle(rectangle)
self.canvas.setExtent(rectangle)
self.ok_button.setEnabled(True)
else:
self.ok_button.setDisabled(True) |
def grad(func, argnum=None):
"""Return function that computes gradient of arguments.
Parameters
----------
func: a python function
The forward (loss) function.
argnum: an int or a list of int
The index of argument to calculate gradient for.
Returns
-------
grad_func: a python function
A function that would compute the gradient of arguments.
Examples
--------
>>> # autograd supports dynamic graph which is changed
>>> # every instance
>>> def func(x):
>>> r = random.randint(0, 1)
>>> if r % 2:
>>> return x**2
>>> else:
>>> return x/3
>>> # use `grad(func)` to get the gradient function
>>> for x in range(10):
>>> grad_func = grad(func)
>>> inputs = nd.array([[1, 2, 3], [4, 5, 6]])
>>> grad_vals = grad_func(inputs)
"""
grad_with_loss_func = grad_and_loss(func, argnum)
@functools.wraps(grad_with_loss_func)
def wrapped(*args):
return grad_with_loss_func(*args)[0]
return wrapped | Return function that computes gradient of arguments.
Parameters
----------
func: a python function
The forward (loss) function.
argnum: an int or a list of int
The index of argument to calculate gradient for.
Returns
-------
grad_func: a python function
A function that would compute the gradient of arguments.
Examples
--------
>>> # autograd supports dynamic graph which is changed
>>> # every instance
>>> def func(x):
>>> r = random.randint(0, 1)
>>> if r % 2:
>>> return x**2
>>> else:
>>> return x/3
>>> # use `grad(func)` to get the gradient function
>>> for x in range(10):
>>> grad_func = grad(func)
>>> inputs = nd.array([[1, 2, 3], [4, 5, 6]])
>>> grad_vals = grad_func(inputs) | Below is the the instruction that describes the task:
### Input:
Return function that computes gradient of arguments.
Parameters
----------
func: a python function
The forward (loss) function.
argnum: an int or a list of int
The index of argument to calculate gradient for.
Returns
-------
grad_func: a python function
A function that would compute the gradient of arguments.
Examples
--------
>>> # autograd supports dynamic graph which is changed
>>> # every instance
>>> def func(x):
>>> r = random.randint(0, 1)
>>> if r % 2:
>>> return x**2
>>> else:
>>> return x/3
>>> # use `grad(func)` to get the gradient function
>>> for x in range(10):
>>> grad_func = grad(func)
>>> inputs = nd.array([[1, 2, 3], [4, 5, 6]])
>>> grad_vals = grad_func(inputs)
### Response:
def grad(func, argnum=None):
"""Return function that computes gradient of arguments.
Parameters
----------
func: a python function
The forward (loss) function.
argnum: an int or a list of int
The index of argument to calculate gradient for.
Returns
-------
grad_func: a python function
A function that would compute the gradient of arguments.
Examples
--------
>>> # autograd supports dynamic graph which is changed
>>> # every instance
>>> def func(x):
>>> r = random.randint(0, 1)
>>> if r % 2:
>>> return x**2
>>> else:
>>> return x/3
>>> # use `grad(func)` to get the gradient function
>>> for x in range(10):
>>> grad_func = grad(func)
>>> inputs = nd.array([[1, 2, 3], [4, 5, 6]])
>>> grad_vals = grad_func(inputs)
"""
grad_with_loss_func = grad_and_loss(func, argnum)
@functools.wraps(grad_with_loss_func)
def wrapped(*args):
return grad_with_loss_func(*args)[0]
return wrapped |
def gather(self):
"""Return all the messages represented by this object. This will convert
the batch data into individual Message objects, which may be one
or more if multi_messages is set to `True`.
:rtype: list[~uamqp.message.Message]
"""
if self._multi_messages:
return self._multi_message_generator()
new_message = self._create_batch_message()
message_size = new_message.get_message_encoded_size() + self.size_offset
body_size = 0
for data in self._body_gen:
message_bytes = None
try:
if not data.application_properties: # Message-like object
data.application_properties = self.application_properties
message_bytes = data.encode_message()
except AttributeError: # raw data
wrap_message = Message(body=data, application_properties=self.application_properties)
message_bytes = wrap_message.encode_message()
body_size += len(message_bytes)
if (body_size + message_size) > self.max_message_length:
raise ValueError(
"Data set too large for a single message."
"Set multi_messages to True to split data across multiple messages.")
new_message._body.append(message_bytes) # pylint: disable=protected-access
new_message.on_send_complete = self.on_send_complete
return [new_message] | Return all the messages represented by this object. This will convert
the batch data into individual Message objects, which may be one
or more if multi_messages is set to `True`.
:rtype: list[~uamqp.message.Message] | Below is the the instruction that describes the task:
### Input:
Return all the messages represented by this object. This will convert
the batch data into individual Message objects, which may be one
or more if multi_messages is set to `True`.
:rtype: list[~uamqp.message.Message]
### Response:
def gather(self):
"""Return all the messages represented by this object. This will convert
the batch data into individual Message objects, which may be one
or more if multi_messages is set to `True`.
:rtype: list[~uamqp.message.Message]
"""
if self._multi_messages:
return self._multi_message_generator()
new_message = self._create_batch_message()
message_size = new_message.get_message_encoded_size() + self.size_offset
body_size = 0
for data in self._body_gen:
message_bytes = None
try:
if not data.application_properties: # Message-like object
data.application_properties = self.application_properties
message_bytes = data.encode_message()
except AttributeError: # raw data
wrap_message = Message(body=data, application_properties=self.application_properties)
message_bytes = wrap_message.encode_message()
body_size += len(message_bytes)
if (body_size + message_size) > self.max_message_length:
raise ValueError(
"Data set too large for a single message."
"Set multi_messages to True to split data across multiple messages.")
new_message._body.append(message_bytes) # pylint: disable=protected-access
new_message.on_send_complete = self.on_send_complete
return [new_message] |
def best_match(supported, header):
"""Return mime-type with the highest quality ('q') from list of candidates.
Takes a list of supported mime-types and finds the best match for all the
media-ranges listed in header. The value of header must be a string that
conforms to the format of the HTTP Accept: header. The value of 'supported'
is a list of mime-types. The list of supported mime-types should be sorted
in order of increasing desirability, in case of a situation where there is
a tie.
>>> best_match(['application/xbel+xml', 'text/xml'],
'text/*;q=0.5,*/*; q=0.1')
'text/xml'
:rtype: str
"""
split_header = _filter_blank(header.split(','))
parsed_header = [parse_media_range(r) for r in split_header]
weighted_matches = []
pos = 0
for mime_type in supported:
weighted_matches.append((
quality_and_fitness_parsed(mime_type, parsed_header),
pos,
mime_type
))
pos += 1
weighted_matches.sort()
return weighted_matches[-1][0][0] and weighted_matches[-1][2] or '' | Return mime-type with the highest quality ('q') from list of candidates.
Takes a list of supported mime-types and finds the best match for all the
media-ranges listed in header. The value of header must be a string that
conforms to the format of the HTTP Accept: header. The value of 'supported'
is a list of mime-types. The list of supported mime-types should be sorted
in order of increasing desirability, in case of a situation where there is
a tie.
>>> best_match(['application/xbel+xml', 'text/xml'],
'text/*;q=0.5,*/*; q=0.1')
'text/xml'
:rtype: str | Below is the the instruction that describes the task:
### Input:
Return mime-type with the highest quality ('q') from list of candidates.
Takes a list of supported mime-types and finds the best match for all the
media-ranges listed in header. The value of header must be a string that
conforms to the format of the HTTP Accept: header. The value of 'supported'
is a list of mime-types. The list of supported mime-types should be sorted
in order of increasing desirability, in case of a situation where there is
a tie.
>>> best_match(['application/xbel+xml', 'text/xml'],
'text/*;q=0.5,*/*; q=0.1')
'text/xml'
:rtype: str
### Response:
def best_match(supported, header):
"""Return mime-type with the highest quality ('q') from list of candidates.
Takes a list of supported mime-types and finds the best match for all the
media-ranges listed in header. The value of header must be a string that
conforms to the format of the HTTP Accept: header. The value of 'supported'
is a list of mime-types. The list of supported mime-types should be sorted
in order of increasing desirability, in case of a situation where there is
a tie.
>>> best_match(['application/xbel+xml', 'text/xml'],
'text/*;q=0.5,*/*; q=0.1')
'text/xml'
:rtype: str
"""
split_header = _filter_blank(header.split(','))
parsed_header = [parse_media_range(r) for r in split_header]
weighted_matches = []
pos = 0
for mime_type in supported:
weighted_matches.append((
quality_and_fitness_parsed(mime_type, parsed_header),
pos,
mime_type
))
pos += 1
weighted_matches.sort()
return weighted_matches[-1][0][0] and weighted_matches[-1][2] or '' |
def _attach_arguments(self):
"""Add the registered arguments to the parser."""
for arg in self.arguments:
self.parser.add_argument(*arg[0], **arg[1]) | Add the registered arguments to the parser. | Below is the the instruction that describes the task:
### Input:
Add the registered arguments to the parser.
### Response:
def _attach_arguments(self):
"""Add the registered arguments to the parser."""
for arg in self.arguments:
self.parser.add_argument(*arg[0], **arg[1]) |
def desymbolize(self):
"""
We believe this was a pointer and symbolized it before. Now we want to desymbolize it.
The following actions are performed:
- Reload content from memory
- Mark the sort as 'unknown'
:return: None
"""
self.sort = 'unknown'
content = self.binary.fast_memory_load(self.addr, self.size, bytes)
self.content = [ content ] | We believe this was a pointer and symbolized it before. Now we want to desymbolize it.
The following actions are performed:
- Reload content from memory
- Mark the sort as 'unknown'
:return: None | Below is the the instruction that describes the task:
### Input:
We believe this was a pointer and symbolized it before. Now we want to desymbolize it.
The following actions are performed:
- Reload content from memory
- Mark the sort as 'unknown'
:return: None
### Response:
def desymbolize(self):
"""
We believe this was a pointer and symbolized it before. Now we want to desymbolize it.
The following actions are performed:
- Reload content from memory
- Mark the sort as 'unknown'
:return: None
"""
self.sort = 'unknown'
content = self.binary.fast_memory_load(self.addr, self.size, bytes)
self.content = [ content ] |
def find_rt_coefficients_tot_intens(self, depth0=None):
"""Figure out total-intensity emission and absorption coefficients for the
current parameters.
**Argument**
*depth0* (default None)
A first guess to use for a good integration depth, in cm. If None,
the most recent value is used.
**Return value**
A tuple ``(j_I, alpha_I)``, where:
*j_I*
The total intensity emission coefficient, in erg/s/cm^3/Hz/sr.
*alpha_I*
The total intensity absorption coefficient, in cm^-1.
See :meth:`find_rt_coefficients` for an explanation how this routine
works. This version merely postprocesses the results from that method
to convert the coefficients to refer to total intensity.
"""
j_O, alpha_O, j_X, alpha_X = self.find_rt_coefficients(depth0=depth0)
j_I = j_O + j_X
alpha_I = 0.5 * (alpha_O + alpha_X) # uhh... right?
return (j_I, alpha_I) | Figure out total-intensity emission and absorption coefficients for the
current parameters.
**Argument**
*depth0* (default None)
A first guess to use for a good integration depth, in cm. If None,
the most recent value is used.
**Return value**
A tuple ``(j_I, alpha_I)``, where:
*j_I*
The total intensity emission coefficient, in erg/s/cm^3/Hz/sr.
*alpha_I*
The total intensity absorption coefficient, in cm^-1.
See :meth:`find_rt_coefficients` for an explanation how this routine
works. This version merely postprocesses the results from that method
to convert the coefficients to refer to total intensity. | Below is the the instruction that describes the task:
### Input:
Figure out total-intensity emission and absorption coefficients for the
current parameters.
**Argument**
*depth0* (default None)
A first guess to use for a good integration depth, in cm. If None,
the most recent value is used.
**Return value**
A tuple ``(j_I, alpha_I)``, where:
*j_I*
The total intensity emission coefficient, in erg/s/cm^3/Hz/sr.
*alpha_I*
The total intensity absorption coefficient, in cm^-1.
See :meth:`find_rt_coefficients` for an explanation how this routine
works. This version merely postprocesses the results from that method
to convert the coefficients to refer to total intensity.
### Response:
def find_rt_coefficients_tot_intens(self, depth0=None):
"""Figure out total-intensity emission and absorption coefficients for the
current parameters.
**Argument**
*depth0* (default None)
A first guess to use for a good integration depth, in cm. If None,
the most recent value is used.
**Return value**
A tuple ``(j_I, alpha_I)``, where:
*j_I*
The total intensity emission coefficient, in erg/s/cm^3/Hz/sr.
*alpha_I*
The total intensity absorption coefficient, in cm^-1.
See :meth:`find_rt_coefficients` for an explanation how this routine
works. This version merely postprocesses the results from that method
to convert the coefficients to refer to total intensity.
"""
j_O, alpha_O, j_X, alpha_X = self.find_rt_coefficients(depth0=depth0)
j_I = j_O + j_X
alpha_I = 0.5 * (alpha_O + alpha_X) # uhh... right?
return (j_I, alpha_I) |
def open(self):
"""
Load topology elements
"""
if self._status == "opened":
return
self.reset()
self._loading = True
self._status = "opened"
path = self._topology_file()
if not os.path.exists(path):
self._loading = False
return
try:
shutil.copy(path, path + ".backup")
except OSError:
pass
try:
project_data = load_topology(path)
#load meta of project
keys_to_load = [
"auto_start",
"auto_close",
"auto_open",
"scene_height",
"scene_width",
"zoom",
"show_layers",
"snap_to_grid",
"show_grid",
"show_interface_labels"
]
for key in keys_to_load:
val = project_data.get(key, None)
if val is not None:
setattr(self, key, val)
topology = project_data["topology"]
for compute in topology.get("computes", []):
yield from self.controller.add_compute(**compute)
for node in topology.get("nodes", []):
compute = self.controller.get_compute(node.pop("compute_id"))
name = node.pop("name")
node_id = node.pop("node_id", str(uuid.uuid4()))
yield from self.add_node(compute, name, node_id, dump=False, **node)
for link_data in topology.get("links", []):
if 'link_id' not in link_data.keys():
# skip the link
continue
link = yield from self.add_link(link_id=link_data["link_id"])
if "filters" in link_data:
yield from link.update_filters(link_data["filters"])
for node_link in link_data["nodes"]:
node = self.get_node(node_link["node_id"])
port = node.get_port(node_link["adapter_number"], node_link["port_number"])
if port is None:
log.warning("Port {}/{} for {} not found".format(node_link["adapter_number"], node_link["port_number"], node.name))
continue
if port.link is not None:
log.warning("Port {}/{} is already connected to link ID {}".format(node_link["adapter_number"], node_link["port_number"], port.link.id))
continue
yield from link.add_node(node, node_link["adapter_number"], node_link["port_number"], label=node_link.get("label"), dump=False)
if len(link.nodes) != 2:
# a link should have 2 attached nodes, this can happen with corrupted projects
yield from self.delete_link(link.id, force_delete=True)
for drawing_data in topology.get("drawings", []):
yield from self.add_drawing(dump=False, **drawing_data)
self.dump()
# We catch all error to be able to rollback the .gns3 to the previous state
except Exception as e:
for compute in list(self._project_created_on_compute):
try:
yield from compute.post("/projects/{}/close".format(self._id))
# We don't care if a compute is down at this step
except (ComputeError, aiohttp.web.HTTPNotFound, aiohttp.web.HTTPConflict, aiohttp.ServerDisconnectedError):
pass
try:
if os.path.exists(path + ".backup"):
shutil.copy(path + ".backup", path)
except (PermissionError, OSError):
pass
self._status = "closed"
self._loading = False
if isinstance(e, ComputeError):
raise aiohttp.web.HTTPConflict(text=str(e))
else:
raise e
try:
os.remove(path + ".backup")
except OSError:
pass
self._loading = False
# Should we start the nodes when project is open
if self._auto_start:
# Start all in the background without waiting for completion
# we ignore errors because we want to let the user open
# their project and fix it
asyncio.async(self.start_all()) | Load topology elements | Below is the the instruction that describes the task:
### Input:
Load topology elements
### Response:
def open(self):
"""
Load topology elements
"""
if self._status == "opened":
return
self.reset()
self._loading = True
self._status = "opened"
path = self._topology_file()
if not os.path.exists(path):
self._loading = False
return
try:
shutil.copy(path, path + ".backup")
except OSError:
pass
try:
project_data = load_topology(path)
#load meta of project
keys_to_load = [
"auto_start",
"auto_close",
"auto_open",
"scene_height",
"scene_width",
"zoom",
"show_layers",
"snap_to_grid",
"show_grid",
"show_interface_labels"
]
for key in keys_to_load:
val = project_data.get(key, None)
if val is not None:
setattr(self, key, val)
topology = project_data["topology"]
for compute in topology.get("computes", []):
yield from self.controller.add_compute(**compute)
for node in topology.get("nodes", []):
compute = self.controller.get_compute(node.pop("compute_id"))
name = node.pop("name")
node_id = node.pop("node_id", str(uuid.uuid4()))
yield from self.add_node(compute, name, node_id, dump=False, **node)
for link_data in topology.get("links", []):
if 'link_id' not in link_data.keys():
# skip the link
continue
link = yield from self.add_link(link_id=link_data["link_id"])
if "filters" in link_data:
yield from link.update_filters(link_data["filters"])
for node_link in link_data["nodes"]:
node = self.get_node(node_link["node_id"])
port = node.get_port(node_link["adapter_number"], node_link["port_number"])
if port is None:
log.warning("Port {}/{} for {} not found".format(node_link["adapter_number"], node_link["port_number"], node.name))
continue
if port.link is not None:
log.warning("Port {}/{} is already connected to link ID {}".format(node_link["adapter_number"], node_link["port_number"], port.link.id))
continue
yield from link.add_node(node, node_link["adapter_number"], node_link["port_number"], label=node_link.get("label"), dump=False)
if len(link.nodes) != 2:
# a link should have 2 attached nodes, this can happen with corrupted projects
yield from self.delete_link(link.id, force_delete=True)
for drawing_data in topology.get("drawings", []):
yield from self.add_drawing(dump=False, **drawing_data)
self.dump()
# We catch all error to be able to rollback the .gns3 to the previous state
except Exception as e:
for compute in list(self._project_created_on_compute):
try:
yield from compute.post("/projects/{}/close".format(self._id))
# We don't care if a compute is down at this step
except (ComputeError, aiohttp.web.HTTPNotFound, aiohttp.web.HTTPConflict, aiohttp.ServerDisconnectedError):
pass
try:
if os.path.exists(path + ".backup"):
shutil.copy(path + ".backup", path)
except (PermissionError, OSError):
pass
self._status = "closed"
self._loading = False
if isinstance(e, ComputeError):
raise aiohttp.web.HTTPConflict(text=str(e))
else:
raise e
try:
os.remove(path + ".backup")
except OSError:
pass
self._loading = False
# Should we start the nodes when project is open
if self._auto_start:
# Start all in the background without waiting for completion
# we ignore errors because we want to let the user open
# their project and fix it
asyncio.async(self.start_all()) |
def load_file_to_str(path):
# type: (str) -> str
"""
Load file into a string removing newlines
Args:
path (str): Path to file
Returns:
str: String contents of file
"""
with open(path, 'rt') as f:
string = f.read().replace(linesep, '')
if not string:
raise LoadError('%s file is empty!' % path)
return string | Load file into a string removing newlines
Args:
path (str): Path to file
Returns:
str: String contents of file | Below is the the instruction that describes the task:
### Input:
Load file into a string removing newlines
Args:
path (str): Path to file
Returns:
str: String contents of file
### Response:
def load_file_to_str(path):
# type: (str) -> str
"""
Load file into a string removing newlines
Args:
path (str): Path to file
Returns:
str: String contents of file
"""
with open(path, 'rt') as f:
string = f.read().replace(linesep, '')
if not string:
raise LoadError('%s file is empty!' % path)
return string |
def refresh(self):
"""刷新 Answer object 的属性.
例如赞同数增加了, 先调用 ``refresh()``
再访问 upvote_num属性, 可获得更新后的赞同数.
:return: None
"""
super().refresh()
self._html = None
self._upvote_num = None
self._content = None
self._collect_num = None
self._comment_num = None | 刷新 Answer object 的属性.
例如赞同数增加了, 先调用 ``refresh()``
再访问 upvote_num属性, 可获得更新后的赞同数.
:return: None | Below is the the instruction that describes the task:
### Input:
刷新 Answer object 的属性.
例如赞同数增加了, 先调用 ``refresh()``
再访问 upvote_num属性, 可获得更新后的赞同数.
:return: None
### Response:
def refresh(self):
"""刷新 Answer object 的属性.
例如赞同数增加了, 先调用 ``refresh()``
再访问 upvote_num属性, 可获得更新后的赞同数.
:return: None
"""
super().refresh()
self._html = None
self._upvote_num = None
self._content = None
self._collect_num = None
self._comment_num = None |
def transfer_options(cls, obj, new_obj, backend=None):
"""
Transfers options for all backends from one object to another.
Drops any options defined in the supplied drop list.
"""
backend = cls.current_backend if backend is None else backend
type_name = type(new_obj).__name__
group = type_name if obj.group == type(obj).__name__ else obj.group
spec = '.'.join([s for s in (type_name, group, obj.label) if s])
options = []
for group in Options._option_groups:
opts = cls.lookup_options(backend, obj, group)
if opts and opts.kwargs: options.append(Options(group, **opts.kwargs))
if options:
StoreOptions.set_options(new_obj, {spec: options}, backend) | Transfers options for all backends from one object to another.
Drops any options defined in the supplied drop list. | Below is the the instruction that describes the task:
### Input:
Transfers options for all backends from one object to another.
Drops any options defined in the supplied drop list.
### Response:
def transfer_options(cls, obj, new_obj, backend=None):
"""
Transfers options for all backends from one object to another.
Drops any options defined in the supplied drop list.
"""
backend = cls.current_backend if backend is None else backend
type_name = type(new_obj).__name__
group = type_name if obj.group == type(obj).__name__ else obj.group
spec = '.'.join([s for s in (type_name, group, obj.label) if s])
options = []
for group in Options._option_groups:
opts = cls.lookup_options(backend, obj, group)
if opts and opts.kwargs: options.append(Options(group, **opts.kwargs))
if options:
StoreOptions.set_options(new_obj, {spec: options}, backend) |
def query(scope, blueprint, debug, output, with_metadata, realtime, **description):
"""
e.g.
googleanalytics --identity debrouwere --account debrouwere --webproperty http://debrouwere.org \
query pageviews \
--start yesterday --limit -10 --sort -pageviews \
--dimensions pagepath \
--debug
"""
if realtime:
description['type'] = 'realtime'
if blueprint:
queries = from_blueprint(scope, blueprint)
else:
if not isinstance(scope, ga.account.Profile):
raise ValueError("Account and webproperty needed for query.")
queries = from_args(scope, **description)
for query in queries:
if debug:
click.echo(query.build())
report = query.serialize(format=output, with_metadata=with_metadata)
click.echo(report) | e.g.
googleanalytics --identity debrouwere --account debrouwere --webproperty http://debrouwere.org \
query pageviews \
--start yesterday --limit -10 --sort -pageviews \
--dimensions pagepath \
--debug | Below is the the instruction that describes the task:
### Input:
e.g.
googleanalytics --identity debrouwere --account debrouwere --webproperty http://debrouwere.org \
query pageviews \
--start yesterday --limit -10 --sort -pageviews \
--dimensions pagepath \
--debug
### Response:
def query(scope, blueprint, debug, output, with_metadata, realtime, **description):
"""
e.g.
googleanalytics --identity debrouwere --account debrouwere --webproperty http://debrouwere.org \
query pageviews \
--start yesterday --limit -10 --sort -pageviews \
--dimensions pagepath \
--debug
"""
if realtime:
description['type'] = 'realtime'
if blueprint:
queries = from_blueprint(scope, blueprint)
else:
if not isinstance(scope, ga.account.Profile):
raise ValueError("Account and webproperty needed for query.")
queries = from_args(scope, **description)
for query in queries:
if debug:
click.echo(query.build())
report = query.serialize(format=output, with_metadata=with_metadata)
click.echo(report) |
def getopt(args, shortopts, longopts = []):
"""getopt(args, options[, long_options]) -> opts, args
Parses command line options and parameter list. args is the
argument list to be parsed, without the leading reference to the
running program. Typically, this means "sys.argv[1:]". shortopts
is the string of option letters that the script wants to
recognize, with options that require an argument followed by a
colon (i.e., the same format that Unix getopt() uses). If
specified, longopts is a list of strings with the names of the
long options which should be supported. The leading '--'
characters should not be included in the option name. Options
which require an argument should be followed by an equal sign
('=').
The return value consists of two elements: the first is a list of
(option, value) pairs; the second is the list of program arguments
left after the option list was stripped (this is a trailing slice
of the first argument). Each option-and-value pair returned has
the option as its first element, prefixed with a hyphen (e.g.,
'-x'), and the option argument as its second element, or an empty
string if the option has no argument. The options occur in the
list in the same order in which they were found, thus allowing
multiple occurrences. Long and short options may be mixed.
"""
opts = []
if type(longopts) == type(""):
longopts = [longopts]
else:
longopts = list(longopts)
while args and args[0].startswith('-') and args[0] != '-':
if args[0] == '--':
args = args[1:]
break
if args[0].startswith('--'):
opts, args = do_longs(opts, args[0][2:], longopts, args[1:])
else:
opts, args = do_shorts(opts, args[0][1:], shortopts, args[1:])
return opts, args | getopt(args, options[, long_options]) -> opts, args
Parses command line options and parameter list. args is the
argument list to be parsed, without the leading reference to the
running program. Typically, this means "sys.argv[1:]". shortopts
is the string of option letters that the script wants to
recognize, with options that require an argument followed by a
colon (i.e., the same format that Unix getopt() uses). If
specified, longopts is a list of strings with the names of the
long options which should be supported. The leading '--'
characters should not be included in the option name. Options
which require an argument should be followed by an equal sign
('=').
The return value consists of two elements: the first is a list of
(option, value) pairs; the second is the list of program arguments
left after the option list was stripped (this is a trailing slice
of the first argument). Each option-and-value pair returned has
the option as its first element, prefixed with a hyphen (e.g.,
'-x'), and the option argument as its second element, or an empty
string if the option has no argument. The options occur in the
list in the same order in which they were found, thus allowing
multiple occurrences. Long and short options may be mixed. | Below is the the instruction that describes the task:
### Input:
getopt(args, options[, long_options]) -> opts, args
Parses command line options and parameter list. args is the
argument list to be parsed, without the leading reference to the
running program. Typically, this means "sys.argv[1:]". shortopts
is the string of option letters that the script wants to
recognize, with options that require an argument followed by a
colon (i.e., the same format that Unix getopt() uses). If
specified, longopts is a list of strings with the names of the
long options which should be supported. The leading '--'
characters should not be included in the option name. Options
which require an argument should be followed by an equal sign
('=').
The return value consists of two elements: the first is a list of
(option, value) pairs; the second is the list of program arguments
left after the option list was stripped (this is a trailing slice
of the first argument). Each option-and-value pair returned has
the option as its first element, prefixed with a hyphen (e.g.,
'-x'), and the option argument as its second element, or an empty
string if the option has no argument. The options occur in the
list in the same order in which they were found, thus allowing
multiple occurrences. Long and short options may be mixed.
### Response:
def getopt(args, shortopts, longopts = []):
"""getopt(args, options[, long_options]) -> opts, args
Parses command line options and parameter list. args is the
argument list to be parsed, without the leading reference to the
running program. Typically, this means "sys.argv[1:]". shortopts
is the string of option letters that the script wants to
recognize, with options that require an argument followed by a
colon (i.e., the same format that Unix getopt() uses). If
specified, longopts is a list of strings with the names of the
long options which should be supported. The leading '--'
characters should not be included in the option name. Options
which require an argument should be followed by an equal sign
('=').
The return value consists of two elements: the first is a list of
(option, value) pairs; the second is the list of program arguments
left after the option list was stripped (this is a trailing slice
of the first argument). Each option-and-value pair returned has
the option as its first element, prefixed with a hyphen (e.g.,
'-x'), and the option argument as its second element, or an empty
string if the option has no argument. The options occur in the
list in the same order in which they were found, thus allowing
multiple occurrences. Long and short options may be mixed.
"""
opts = []
if type(longopts) == type(""):
longopts = [longopts]
else:
longopts = list(longopts)
while args and args[0].startswith('-') and args[0] != '-':
if args[0] == '--':
args = args[1:]
break
if args[0].startswith('--'):
opts, args = do_longs(opts, args[0][2:], longopts, args[1:])
else:
opts, args = do_shorts(opts, args[0][1:], shortopts, args[1:])
return opts, args |
def set_system_date_time(years=None,
months=None,
days=None,
hours=None,
minutes=None,
seconds=None,
utc_offset=None):
'''
Set the system date and time. Each argument is an element of the date, but
not required. If an element is not passed, the current system value for
that element will be used. For example, if you don't pass the year, the
current system year will be used. (Used by set_system_date and
set_system_time)
Updates hardware clock, if present, in addition to software
(kernel) clock.
:param int years: Years digit, ie: 2015
:param int months: Months digit: 1 - 12
:param int days: Days digit: 1 - 31
:param int hours: Hours digit: 0 - 23
:param int minutes: Minutes digit: 0 - 59
:param int seconds: Seconds digit: 0 - 59
:param str utc_offset: The utc offset in 4 digit (+0600) format with an
optional sign (+/-). Will default to None which will use the local
timezone. To set the time based off of UTC use "'+0000'". Note: if
being passed through the command line will need to be quoted twice to
allow negative offsets.
:return: True if successful. Otherwise False.
:rtype: bool
CLI Example:
.. code-block:: bash
salt '*' system.set_system_date_time 2015 5 12 11 37 53 "'-0500'"
'''
# Get the current date/time
date_time = _get_offset_time(utc_offset)
# Check for passed values. If not passed, use current values
if years is None:
years = date_time.year
if months is None:
months = date_time.month
if days is None:
days = date_time.day
if hours is None:
hours = date_time.hour
if minutes is None:
minutes = date_time.minute
if seconds is None:
seconds = date_time.second
try:
new_datetime = datetime(years, months, days, hours, minutes, seconds, 0,
date_time.tzinfo)
except ValueError as err:
raise SaltInvocationError(err.message)
if not _date_bin_set_datetime(new_datetime):
return False
if has_settable_hwclock():
# Now that we've successfully set the software clock, we should
# update hardware clock for time to persist though reboot.
return _swclock_to_hwclock()
return True | Set the system date and time. Each argument is an element of the date, but
not required. If an element is not passed, the current system value for
that element will be used. For example, if you don't pass the year, the
current system year will be used. (Used by set_system_date and
set_system_time)
Updates hardware clock, if present, in addition to software
(kernel) clock.
:param int years: Years digit, ie: 2015
:param int months: Months digit: 1 - 12
:param int days: Days digit: 1 - 31
:param int hours: Hours digit: 0 - 23
:param int minutes: Minutes digit: 0 - 59
:param int seconds: Seconds digit: 0 - 59
:param str utc_offset: The utc offset in 4 digit (+0600) format with an
optional sign (+/-). Will default to None which will use the local
timezone. To set the time based off of UTC use "'+0000'". Note: if
being passed through the command line will need to be quoted twice to
allow negative offsets.
:return: True if successful. Otherwise False.
:rtype: bool
CLI Example:
.. code-block:: bash
salt '*' system.set_system_date_time 2015 5 12 11 37 53 "'-0500'" | Below is the the instruction that describes the task:
### Input:
Set the system date and time. Each argument is an element of the date, but
not required. If an element is not passed, the current system value for
that element will be used. For example, if you don't pass the year, the
current system year will be used. (Used by set_system_date and
set_system_time)
Updates hardware clock, if present, in addition to software
(kernel) clock.
:param int years: Years digit, ie: 2015
:param int months: Months digit: 1 - 12
:param int days: Days digit: 1 - 31
:param int hours: Hours digit: 0 - 23
:param int minutes: Minutes digit: 0 - 59
:param int seconds: Seconds digit: 0 - 59
:param str utc_offset: The utc offset in 4 digit (+0600) format with an
optional sign (+/-). Will default to None which will use the local
timezone. To set the time based off of UTC use "'+0000'". Note: if
being passed through the command line will need to be quoted twice to
allow negative offsets.
:return: True if successful. Otherwise False.
:rtype: bool
CLI Example:
.. code-block:: bash
salt '*' system.set_system_date_time 2015 5 12 11 37 53 "'-0500'"
### Response:
def set_system_date_time(years=None,
months=None,
days=None,
hours=None,
minutes=None,
seconds=None,
utc_offset=None):
'''
Set the system date and time. Each argument is an element of the date, but
not required. If an element is not passed, the current system value for
that element will be used. For example, if you don't pass the year, the
current system year will be used. (Used by set_system_date and
set_system_time)
Updates hardware clock, if present, in addition to software
(kernel) clock.
:param int years: Years digit, ie: 2015
:param int months: Months digit: 1 - 12
:param int days: Days digit: 1 - 31
:param int hours: Hours digit: 0 - 23
:param int minutes: Minutes digit: 0 - 59
:param int seconds: Seconds digit: 0 - 59
:param str utc_offset: The utc offset in 4 digit (+0600) format with an
optional sign (+/-). Will default to None which will use the local
timezone. To set the time based off of UTC use "'+0000'". Note: if
being passed through the command line will need to be quoted twice to
allow negative offsets.
:return: True if successful. Otherwise False.
:rtype: bool
CLI Example:
.. code-block:: bash
salt '*' system.set_system_date_time 2015 5 12 11 37 53 "'-0500'"
'''
# Get the current date/time
date_time = _get_offset_time(utc_offset)
# Check for passed values. If not passed, use current values
if years is None:
years = date_time.year
if months is None:
months = date_time.month
if days is None:
days = date_time.day
if hours is None:
hours = date_time.hour
if minutes is None:
minutes = date_time.minute
if seconds is None:
seconds = date_time.second
try:
new_datetime = datetime(years, months, days, hours, minutes, seconds, 0,
date_time.tzinfo)
except ValueError as err:
raise SaltInvocationError(err.message)
if not _date_bin_set_datetime(new_datetime):
return False
if has_settable_hwclock():
# Now that we've successfully set the software clock, we should
# update hardware clock for time to persist though reboot.
return _swclock_to_hwclock()
return True |
def save(self, *args, **kwargs):
"""
Update ``self.modified``.
"""
self.modified = timezone.now()
super(AbstractBaseModel, self).save(*args, **kwargs) | Update ``self.modified``. | Below is the the instruction that describes the task:
### Input:
Update ``self.modified``.
### Response:
def save(self, *args, **kwargs):
"""
Update ``self.modified``.
"""
self.modified = timezone.now()
super(AbstractBaseModel, self).save(*args, **kwargs) |
def _scroll_when_linewrapping(self, ui_content, width, height, cli):
"""
Scroll to make sure the cursor position is visible and that we maintain
the requested scroll offset.
Set `self.horizontal_scroll/vertical_scroll`.
"""
scroll_offsets_bottom = self.scroll_offsets.bottom
scroll_offsets_top = self.scroll_offsets.top
# We don't have horizontal scrolling.
self.horizontal_scroll = 0
# If the current line consumes more than the whole window height,
# then we have to scroll vertically inside this line. (We don't take
# the scroll offsets into account for this.)
# Also, ignore the scroll offsets in this case. Just set the vertical
# scroll to this line.
if ui_content.get_height_for_line(ui_content.cursor_position.y, width) > height - scroll_offsets_top:
# Calculate the height of the text before the cursor, with the line
# containing the cursor included, and the character belowe the
# cursor included as well.
line = explode_tokens(ui_content.get_line(ui_content.cursor_position.y))
text_before_cursor = token_list_to_text(line[:ui_content.cursor_position.x + 1])
text_before_height = UIContent.get_height_for_text(text_before_cursor, width)
# Adjust scroll offset.
self.vertical_scroll = ui_content.cursor_position.y
self.vertical_scroll_2 = min(text_before_height - 1, self.vertical_scroll_2)
self.vertical_scroll_2 = max(0, text_before_height - height, self.vertical_scroll_2)
return
else:
self.vertical_scroll_2 = 0
# Current line doesn't consume the whole height. Take scroll offsets into account.
def get_min_vertical_scroll():
# Make sure that the cursor line is not below the bottom.
# (Calculate how many lines can be shown between the cursor and the .)
used_height = 0
prev_lineno = ui_content.cursor_position.y
for lineno in range(ui_content.cursor_position.y, -1, -1):
used_height += ui_content.get_height_for_line(lineno, width)
if used_height > height - scroll_offsets_bottom:
return prev_lineno
else:
prev_lineno = lineno
return 0
def get_max_vertical_scroll():
# Make sure that the cursor line is not above the top.
prev_lineno = ui_content.cursor_position.y
used_height = 0
for lineno in range(ui_content.cursor_position.y - 1, -1, -1):
used_height += ui_content.get_height_for_line(lineno, width)
if used_height > scroll_offsets_top:
return prev_lineno
else:
prev_lineno = lineno
return prev_lineno
def get_topmost_visible():
"""
Calculate the upper most line that can be visible, while the bottom
is still visible. We should not allow scroll more than this if
`allow_scroll_beyond_bottom` is false.
"""
prev_lineno = ui_content.line_count - 1
used_height = 0
for lineno in range(ui_content.line_count - 1, -1, -1):
used_height += ui_content.get_height_for_line(lineno, width)
if used_height > height:
return prev_lineno
else:
prev_lineno = lineno
return prev_lineno
# Scroll vertically. (Make sure that the whole line which contains the
# cursor is visible.
topmost_visible = get_topmost_visible()
# Note: the `min(topmost_visible, ...)` is to make sure that we
# don't require scrolling up because of the bottom scroll offset,
# when we are at the end of the document.
self.vertical_scroll = max(self.vertical_scroll, min(topmost_visible, get_min_vertical_scroll()))
self.vertical_scroll = min(self.vertical_scroll, get_max_vertical_scroll())
# Disallow scrolling beyond bottom?
if not self.allow_scroll_beyond_bottom(cli):
self.vertical_scroll = min(self.vertical_scroll, topmost_visible) | Scroll to make sure the cursor position is visible and that we maintain
the requested scroll offset.
Set `self.horizontal_scroll/vertical_scroll`. | Below is the the instruction that describes the task:
### Input:
Scroll to make sure the cursor position is visible and that we maintain
the requested scroll offset.
Set `self.horizontal_scroll/vertical_scroll`.
### Response:
def _scroll_when_linewrapping(self, ui_content, width, height, cli):
"""
Scroll to make sure the cursor position is visible and that we maintain
the requested scroll offset.
Set `self.horizontal_scroll/vertical_scroll`.
"""
scroll_offsets_bottom = self.scroll_offsets.bottom
scroll_offsets_top = self.scroll_offsets.top
# We don't have horizontal scrolling.
self.horizontal_scroll = 0
# If the current line consumes more than the whole window height,
# then we have to scroll vertically inside this line. (We don't take
# the scroll offsets into account for this.)
# Also, ignore the scroll offsets in this case. Just set the vertical
# scroll to this line.
if ui_content.get_height_for_line(ui_content.cursor_position.y, width) > height - scroll_offsets_top:
# Calculate the height of the text before the cursor, with the line
# containing the cursor included, and the character belowe the
# cursor included as well.
line = explode_tokens(ui_content.get_line(ui_content.cursor_position.y))
text_before_cursor = token_list_to_text(line[:ui_content.cursor_position.x + 1])
text_before_height = UIContent.get_height_for_text(text_before_cursor, width)
# Adjust scroll offset.
self.vertical_scroll = ui_content.cursor_position.y
self.vertical_scroll_2 = min(text_before_height - 1, self.vertical_scroll_2)
self.vertical_scroll_2 = max(0, text_before_height - height, self.vertical_scroll_2)
return
else:
self.vertical_scroll_2 = 0
# Current line doesn't consume the whole height. Take scroll offsets into account.
def get_min_vertical_scroll():
# Make sure that the cursor line is not below the bottom.
# (Calculate how many lines can be shown between the cursor and the .)
used_height = 0
prev_lineno = ui_content.cursor_position.y
for lineno in range(ui_content.cursor_position.y, -1, -1):
used_height += ui_content.get_height_for_line(lineno, width)
if used_height > height - scroll_offsets_bottom:
return prev_lineno
else:
prev_lineno = lineno
return 0
def get_max_vertical_scroll():
# Make sure that the cursor line is not above the top.
prev_lineno = ui_content.cursor_position.y
used_height = 0
for lineno in range(ui_content.cursor_position.y - 1, -1, -1):
used_height += ui_content.get_height_for_line(lineno, width)
if used_height > scroll_offsets_top:
return prev_lineno
else:
prev_lineno = lineno
return prev_lineno
def get_topmost_visible():
"""
Calculate the upper most line that can be visible, while the bottom
is still visible. We should not allow scroll more than this if
`allow_scroll_beyond_bottom` is false.
"""
prev_lineno = ui_content.line_count - 1
used_height = 0
for lineno in range(ui_content.line_count - 1, -1, -1):
used_height += ui_content.get_height_for_line(lineno, width)
if used_height > height:
return prev_lineno
else:
prev_lineno = lineno
return prev_lineno
# Scroll vertically. (Make sure that the whole line which contains the
# cursor is visible.
topmost_visible = get_topmost_visible()
# Note: the `min(topmost_visible, ...)` is to make sure that we
# don't require scrolling up because of the bottom scroll offset,
# when we are at the end of the document.
self.vertical_scroll = max(self.vertical_scroll, min(topmost_visible, get_min_vertical_scroll()))
self.vertical_scroll = min(self.vertical_scroll, get_max_vertical_scroll())
# Disallow scrolling beyond bottom?
if not self.allow_scroll_beyond_bottom(cli):
self.vertical_scroll = min(self.vertical_scroll, topmost_visible) |
def abspath(self, path):
"""Return absolute path for a path relative to the current file."""
return os.path.abspath(os.path.join(os.path.dirname(self.path), path)) | Return absolute path for a path relative to the current file. | Below is the the instruction that describes the task:
### Input:
Return absolute path for a path relative to the current file.
### Response:
def abspath(self, path):
"""Return absolute path for a path relative to the current file."""
return os.path.abspath(os.path.join(os.path.dirname(self.path), path)) |
def profile(self, name, parName, logemin=None, logemax=None,
reoptimize=False,
xvals=None, npts=None, savestate=True, **kwargs):
"""Profile the likelihood for the given source and parameter.
Parameters
----------
name : str
Source name.
parName : str
Parameter name.
reoptimize : bool
Re-fit nuisance parameters at each step in the scan. Note
that enabling this option will only re-fit parameters that
were free when the method was executed.
Returns
-------
lnlprofile : dict
Dictionary containing results of likelihood scan.
"""
# Find the source
name = self.roi.get_source_by_name(name).name
par = self.like.normPar(name)
parName = self.like.normPar(name).getName()
idx = self.like.par_index(name, parName)
bounds = self.like.model[idx].getBounds()
value = self.like.model[idx].getValue()
loge_bounds = self.loge_bounds
optimizer = kwargs.get('optimizer', self.config['optimizer'])
if savestate:
saved_state = self._latch_state()
# If parameter is fixed temporarily free it
par.setFree(True)
if optimizer['optimizer'] == 'NEWTON':
self._create_fitcache()
if logemin is not None or logemax is not None:
loge_bounds = self.set_energy_range(logemin, logemax)
else:
loge_bounds = self.loge_bounds
loglike0 = -self.like()
if xvals is None:
err = par.error()
val = par.getValue()
if err <= 0 or val <= 3 * err:
xvals = 10 ** np.linspace(-2.0, 2.0, 51)
if val < xvals[0]:
xvals = np.insert(xvals, val, 0)
else:
xvals = np.linspace(0, 1, 25)
xvals = np.concatenate((-1.0 * xvals[1:][::-1], xvals))
xvals = val * 10 ** xvals
if np.isnan(xvals).any():
raise RuntimeError(
"Parameter scan points for %s::%s include infinite value." % (name, parName))
# Update parameter bounds to encompass scan range
try:
self.like[idx].setBounds(min(min(xvals), value, bounds[0]),
max(max(xvals), value, bounds[1]))
except RuntimeError:
self.logger.warning(
"Caught failure on setBounds for %s::%s." % (name, parName))
o = {'xvals': xvals,
'npred': np.zeros(len(xvals)),
'npred_wt': np.zeros(len(xvals)),
'dnde': np.zeros(len(xvals)),
'flux': np.zeros(len(xvals)),
'eflux': np.zeros(len(xvals)),
'dloglike': np.zeros(len(xvals)),
'loglike': np.zeros(len(xvals))
}
if reoptimize and hasattr(self.like.components[0].logLike,
'setUpdateFixedWeights'):
for c in self.components:
c.like.logLike.setUpdateFixedWeights(False)
for i, x in enumerate(xvals):
try:
self.like[idx] = x
except RuntimeError:
self.logger.warning(
"Caught failure on set for %s::%s: %.2f" % (name, parName, x))
if self.like.nFreeParams() > 1 and reoptimize:
# Only reoptimize if not all frozen
self.like.freeze(idx)
fit_output = self._fit(errors=False, **optimizer)
loglike1 = fit_output['loglike']
self.like.thaw(idx)
else:
loglike1 = -self.like()
flux = self.like[name].flux(10 ** loge_bounds[0],
10 ** loge_bounds[1])
eflux = self.like[name].energyFlux(10 ** loge_bounds[0],
10 ** loge_bounds[1])
prefactor = self.like[idx]
o['dloglike'][i] = loglike1 - loglike0
o['loglike'][i] = loglike1
o['dnde'][i] = prefactor.getTrueValue()
o['flux'][i] = flux
o['eflux'][i] = eflux
cs = self.model_counts_spectrum(name,
loge_bounds[0],
loge_bounds[1], summed=True)
o['npred'][i] += np.sum(cs)
cs_wt = self.model_counts_spectrum(name,
loge_bounds[0],
loge_bounds[1], summed=True, weighted=True)
o['npred_wt'][i] += np.sum(cs_wt)
self.like[idx] = value
if reoptimize and hasattr(self.like.components[0].logLike,
'setUpdateFixedWeights'):
for c in self.components:
c.like.logLike.setUpdateFixedWeights(True)
# Restore model parameters to original values
if savestate:
saved_state.restore()
self.like[idx].setBounds(*bounds)
if logemin is not None or logemax is not None:
self.set_energy_range(*loge_bounds)
return o | Profile the likelihood for the given source and parameter.
Parameters
----------
name : str
Source name.
parName : str
Parameter name.
reoptimize : bool
Re-fit nuisance parameters at each step in the scan. Note
that enabling this option will only re-fit parameters that
were free when the method was executed.
Returns
-------
lnlprofile : dict
Dictionary containing results of likelihood scan. | Below is the the instruction that describes the task:
### Input:
Profile the likelihood for the given source and parameter.
Parameters
----------
name : str
Source name.
parName : str
Parameter name.
reoptimize : bool
Re-fit nuisance parameters at each step in the scan. Note
that enabling this option will only re-fit parameters that
were free when the method was executed.
Returns
-------
lnlprofile : dict
Dictionary containing results of likelihood scan.
### Response:
def profile(self, name, parName, logemin=None, logemax=None,
reoptimize=False,
xvals=None, npts=None, savestate=True, **kwargs):
"""Profile the likelihood for the given source and parameter.
Parameters
----------
name : str
Source name.
parName : str
Parameter name.
reoptimize : bool
Re-fit nuisance parameters at each step in the scan. Note
that enabling this option will only re-fit parameters that
were free when the method was executed.
Returns
-------
lnlprofile : dict
Dictionary containing results of likelihood scan.
"""
# Find the source
name = self.roi.get_source_by_name(name).name
par = self.like.normPar(name)
parName = self.like.normPar(name).getName()
idx = self.like.par_index(name, parName)
bounds = self.like.model[idx].getBounds()
value = self.like.model[idx].getValue()
loge_bounds = self.loge_bounds
optimizer = kwargs.get('optimizer', self.config['optimizer'])
if savestate:
saved_state = self._latch_state()
# If parameter is fixed temporarily free it
par.setFree(True)
if optimizer['optimizer'] == 'NEWTON':
self._create_fitcache()
if logemin is not None or logemax is not None:
loge_bounds = self.set_energy_range(logemin, logemax)
else:
loge_bounds = self.loge_bounds
loglike0 = -self.like()
if xvals is None:
err = par.error()
val = par.getValue()
if err <= 0 or val <= 3 * err:
xvals = 10 ** np.linspace(-2.0, 2.0, 51)
if val < xvals[0]:
xvals = np.insert(xvals, val, 0)
else:
xvals = np.linspace(0, 1, 25)
xvals = np.concatenate((-1.0 * xvals[1:][::-1], xvals))
xvals = val * 10 ** xvals
if np.isnan(xvals).any():
raise RuntimeError(
"Parameter scan points for %s::%s include infinite value." % (name, parName))
# Update parameter bounds to encompass scan range
try:
self.like[idx].setBounds(min(min(xvals), value, bounds[0]),
max(max(xvals), value, bounds[1]))
except RuntimeError:
self.logger.warning(
"Caught failure on setBounds for %s::%s." % (name, parName))
o = {'xvals': xvals,
'npred': np.zeros(len(xvals)),
'npred_wt': np.zeros(len(xvals)),
'dnde': np.zeros(len(xvals)),
'flux': np.zeros(len(xvals)),
'eflux': np.zeros(len(xvals)),
'dloglike': np.zeros(len(xvals)),
'loglike': np.zeros(len(xvals))
}
if reoptimize and hasattr(self.like.components[0].logLike,
'setUpdateFixedWeights'):
for c in self.components:
c.like.logLike.setUpdateFixedWeights(False)
for i, x in enumerate(xvals):
try:
self.like[idx] = x
except RuntimeError:
self.logger.warning(
"Caught failure on set for %s::%s: %.2f" % (name, parName, x))
if self.like.nFreeParams() > 1 and reoptimize:
# Only reoptimize if not all frozen
self.like.freeze(idx)
fit_output = self._fit(errors=False, **optimizer)
loglike1 = fit_output['loglike']
self.like.thaw(idx)
else:
loglike1 = -self.like()
flux = self.like[name].flux(10 ** loge_bounds[0],
10 ** loge_bounds[1])
eflux = self.like[name].energyFlux(10 ** loge_bounds[0],
10 ** loge_bounds[1])
prefactor = self.like[idx]
o['dloglike'][i] = loglike1 - loglike0
o['loglike'][i] = loglike1
o['dnde'][i] = prefactor.getTrueValue()
o['flux'][i] = flux
o['eflux'][i] = eflux
cs = self.model_counts_spectrum(name,
loge_bounds[0],
loge_bounds[1], summed=True)
o['npred'][i] += np.sum(cs)
cs_wt = self.model_counts_spectrum(name,
loge_bounds[0],
loge_bounds[1], summed=True, weighted=True)
o['npred_wt'][i] += np.sum(cs_wt)
self.like[idx] = value
if reoptimize and hasattr(self.like.components[0].logLike,
'setUpdateFixedWeights'):
for c in self.components:
c.like.logLike.setUpdateFixedWeights(True)
# Restore model parameters to original values
if savestate:
saved_state.restore()
self.like[idx].setBounds(*bounds)
if logemin is not None or logemax is not None:
self.set_energy_range(*loge_bounds)
return o |
def get_results_msg(self, results, study):
"""Return summary for GOEA results."""
# To convert msg list to string: "\n".join(msg)
msg = []
if results:
fmt = "{M:6,} GO terms are associated with {N:6,} of {NT:6,}"
stu_items, num_gos_stu = self.get_item_cnt(results, "study_items")
pop_items, num_gos_pop = self.get_item_cnt(results, "pop_items")
stu_txt = fmt.format(N=len(stu_items), M=num_gos_stu, NT=len(set(study)))
pop_txt = fmt.format(N=len(pop_items), M=num_gos_pop, NT=self.pop_n)
msg.append("{POP} population items".format(POP=pop_txt))
msg.append("{STU} study items".format(STU=stu_txt))
return msg | Return summary for GOEA results. | Below is the the instruction that describes the task:
### Input:
Return summary for GOEA results.
### Response:
def get_results_msg(self, results, study):
"""Return summary for GOEA results."""
# To convert msg list to string: "\n".join(msg)
msg = []
if results:
fmt = "{M:6,} GO terms are associated with {N:6,} of {NT:6,}"
stu_items, num_gos_stu = self.get_item_cnt(results, "study_items")
pop_items, num_gos_pop = self.get_item_cnt(results, "pop_items")
stu_txt = fmt.format(N=len(stu_items), M=num_gos_stu, NT=len(set(study)))
pop_txt = fmt.format(N=len(pop_items), M=num_gos_pop, NT=self.pop_n)
msg.append("{POP} population items".format(POP=pop_txt))
msg.append("{STU} study items".format(STU=stu_txt))
return msg |
def reindex_similar(self, other, n_sphere=4):
"""Reindex ``other`` to be similarly indexed as ``self``.
Returns a reindexed copy of ``other`` that minimizes the
distance for each atom to itself in the same chemical environemt
from ``self`` to ``other``.
Read more about the definition of the chemical environment in
:func:`Cartesian.partition_chem_env`
.. note:: It is necessary to align ``self`` and other before
applying this method.
This can be done via :meth:`~Cartesian.align`.
.. note:: It is probably necessary to improve the result using
:meth:`~Cartesian.change_numbering()`.
Args:
other (Cartesian):
n_sphere (int): Wrapper around the argument for
:meth:`~Cartesian.partition_chem_env`.
Returns:
Cartesian: Reindexed version of other
"""
def make_subset_similar(m1, subset1, m2, subset2, index_dct):
"""Changes index_dct INPLACE"""
coords = ['x', 'y', 'z']
index1 = list(subset1)
for m1_i in index1:
dist_m2_to_m1_i = m2.get_distance_to(m1.loc[m1_i, coords],
subset2, sort=True)
m2_i = dist_m2_to_m1_i.index[0]
dist_new = dist_m2_to_m1_i.loc[m2_i, 'distance']
m2_pos_i = dist_m2_to_m1_i.loc[m2_i, coords]
counter = itertools.count()
found = False
while not found:
if m2_i in index_dct.keys():
old_m1_pos = m1.loc[index_dct[m2_i], coords]
if dist_new < np.linalg.norm(m2_pos_i - old_m1_pos):
index1.append(index_dct[m2_i])
index_dct[m2_i] = m1_i
found = True
else:
m2_i = dist_m2_to_m1_i.index[next(counter)]
dist_new = dist_m2_to_m1_i.loc[m2_i, 'distance']
m2_pos_i = dist_m2_to_m1_i.loc[m2_i, coords]
else:
index_dct[m2_i] = m1_i
found = True
return index_dct
molecule1 = self.copy()
molecule2 = other.copy()
partition1 = molecule1.partition_chem_env(n_sphere)
partition2 = molecule2.partition_chem_env(n_sphere)
index_dct = {}
for key in partition1:
message = ('You have chemically different molecules, regarding '
'the topology of their connectivity.')
assert len(partition1[key]) == len(partition2[key]), message
index_dct = make_subset_similar(molecule1, partition1[key],
molecule2, partition2[key],
index_dct)
molecule2.index = [index_dct[i] for i in molecule2.index]
return molecule2.loc[molecule1.index] | Reindex ``other`` to be similarly indexed as ``self``.
Returns a reindexed copy of ``other`` that minimizes the
distance for each atom to itself in the same chemical environemt
from ``self`` to ``other``.
Read more about the definition of the chemical environment in
:func:`Cartesian.partition_chem_env`
.. note:: It is necessary to align ``self`` and other before
applying this method.
This can be done via :meth:`~Cartesian.align`.
.. note:: It is probably necessary to improve the result using
:meth:`~Cartesian.change_numbering()`.
Args:
other (Cartesian):
n_sphere (int): Wrapper around the argument for
:meth:`~Cartesian.partition_chem_env`.
Returns:
Cartesian: Reindexed version of other | Below is the the instruction that describes the task:
### Input:
Reindex ``other`` to be similarly indexed as ``self``.
Returns a reindexed copy of ``other`` that minimizes the
distance for each atom to itself in the same chemical environemt
from ``self`` to ``other``.
Read more about the definition of the chemical environment in
:func:`Cartesian.partition_chem_env`
.. note:: It is necessary to align ``self`` and other before
applying this method.
This can be done via :meth:`~Cartesian.align`.
.. note:: It is probably necessary to improve the result using
:meth:`~Cartesian.change_numbering()`.
Args:
other (Cartesian):
n_sphere (int): Wrapper around the argument for
:meth:`~Cartesian.partition_chem_env`.
Returns:
Cartesian: Reindexed version of other
### Response:
def reindex_similar(self, other, n_sphere=4):
"""Reindex ``other`` to be similarly indexed as ``self``.
Returns a reindexed copy of ``other`` that minimizes the
distance for each atom to itself in the same chemical environemt
from ``self`` to ``other``.
Read more about the definition of the chemical environment in
:func:`Cartesian.partition_chem_env`
.. note:: It is necessary to align ``self`` and other before
applying this method.
This can be done via :meth:`~Cartesian.align`.
.. note:: It is probably necessary to improve the result using
:meth:`~Cartesian.change_numbering()`.
Args:
other (Cartesian):
n_sphere (int): Wrapper around the argument for
:meth:`~Cartesian.partition_chem_env`.
Returns:
Cartesian: Reindexed version of other
"""
def make_subset_similar(m1, subset1, m2, subset2, index_dct):
"""Changes index_dct INPLACE"""
coords = ['x', 'y', 'z']
index1 = list(subset1)
for m1_i in index1:
dist_m2_to_m1_i = m2.get_distance_to(m1.loc[m1_i, coords],
subset2, sort=True)
m2_i = dist_m2_to_m1_i.index[0]
dist_new = dist_m2_to_m1_i.loc[m2_i, 'distance']
m2_pos_i = dist_m2_to_m1_i.loc[m2_i, coords]
counter = itertools.count()
found = False
while not found:
if m2_i in index_dct.keys():
old_m1_pos = m1.loc[index_dct[m2_i], coords]
if dist_new < np.linalg.norm(m2_pos_i - old_m1_pos):
index1.append(index_dct[m2_i])
index_dct[m2_i] = m1_i
found = True
else:
m2_i = dist_m2_to_m1_i.index[next(counter)]
dist_new = dist_m2_to_m1_i.loc[m2_i, 'distance']
m2_pos_i = dist_m2_to_m1_i.loc[m2_i, coords]
else:
index_dct[m2_i] = m1_i
found = True
return index_dct
molecule1 = self.copy()
molecule2 = other.copy()
partition1 = molecule1.partition_chem_env(n_sphere)
partition2 = molecule2.partition_chem_env(n_sphere)
index_dct = {}
for key in partition1:
message = ('You have chemically different molecules, regarding '
'the topology of their connectivity.')
assert len(partition1[key]) == len(partition2[key]), message
index_dct = make_subset_similar(molecule1, partition1[key],
molecule2, partition2[key],
index_dct)
molecule2.index = [index_dct[i] for i in molecule2.index]
return molecule2.loc[molecule1.index] |
def copy_file_to_master(
file_path,
remote_path='.',
username=None,
key_path=None
):
""" Copy a file to the Mesos master
"""
return copy_file(shakedown.master_ip(), file_path, remote_path, username, key_path) | Copy a file to the Mesos master | Below is the the instruction that describes the task:
### Input:
Copy a file to the Mesos master
### Response:
def copy_file_to_master(
file_path,
remote_path='.',
username=None,
key_path=None
):
""" Copy a file to the Mesos master
"""
return copy_file(shakedown.master_ip(), file_path, remote_path, username, key_path) |
def compile(self, name, contents, path=None):
"""Compile the given Thrift document into a Python module.
The generated module contains,
.. py:attribute:: __services__
A collection of generated classes for all services defined in the
thrift file.
.. versionchanged:: 1.0
Renamed from ``services`` to ``__services__``.
.. py:attribute:: __types__
A collection of generated types for all types defined in the
thrift file.
.. versionchanged:: 1.0
Renamed from ``types`` to ``__types__``.
.. py:attribute:: __includes__
A collection of modules included by this module.
.. versionadded:: 1.0
.. py:attribute:: __constants__
A mapping of constant name to value for all constants defined in
the thrift file.
.. versionchanged:: 1.0
Renamed from ``constants`` to ``__constants__``.
.. py:attribute:: __thrift_source__
Contents of the .thrift file from which this module was compiled.
.. versionadded:: 1.1
.. py:function:: dumps(obj)
Serializes the given object using the protocol the compiler was
instantiated with.
.. py:function:: loads(cls, payload)
Deserializes an object of type ``cls`` from ``payload`` using the
protocol the compiler was instantiated with.
.. py:function:: dumps.message(obj, seqid=0)
Serializes the given request or response into a
:py:class:`~thriftrw.wire.Message` using the protocol that the
compiler was instantiated with.
See :ref:`calling-apache-thrift`.
.. versionadded:: 1.0
.. py:function:: loads.message(service, payload)
Deserializes a :py:class:`~thriftrw.wire.Message` from
``payload`` using the protocol the compiler was instantiated with.
A request or response of a method defined in the given service is
parsed in the message body.
See :ref:`calling-apache-thrift`.
.. versionadded:: 1.0
And one class each for every struct, union, exception, enum, and
service defined in the IDL.
Service classes have references to
:py:class:`thriftrw.spec.ServiceFunction` objects for each method
defined in the service.
:param str name:
Name of the Thrift document. This will be the name of the
generated module.
:param str contents:
Thrift document to compile
:param str path:
Path to the Thrift file being compiled. If not specified, imports
from within the Thrift file will be disallowed.
:returns:
ModuleSpec of the generated module.
"""
assert name
if path:
path = os.path.abspath(path)
if path in self._module_specs:
return self._module_specs[path]
module_spec = ModuleSpec(name, self.protocol, path, contents)
if path:
self._module_specs[path] = module_spec
program = self.parser.parse(contents)
header_processor = HeaderProcessor(self, module_spec, self.include_as)
for header in program.headers:
header.apply(header_processor)
generator = Generator(module_spec.scope, strict=self.strict)
for definition in program.definitions:
generator.process(definition)
return module_spec | Compile the given Thrift document into a Python module.
The generated module contains,
.. py:attribute:: __services__
A collection of generated classes for all services defined in the
thrift file.
.. versionchanged:: 1.0
Renamed from ``services`` to ``__services__``.
.. py:attribute:: __types__
A collection of generated types for all types defined in the
thrift file.
.. versionchanged:: 1.0
Renamed from ``types`` to ``__types__``.
.. py:attribute:: __includes__
A collection of modules included by this module.
.. versionadded:: 1.0
.. py:attribute:: __constants__
A mapping of constant name to value for all constants defined in
the thrift file.
.. versionchanged:: 1.0
Renamed from ``constants`` to ``__constants__``.
.. py:attribute:: __thrift_source__
Contents of the .thrift file from which this module was compiled.
.. versionadded:: 1.1
.. py:function:: dumps(obj)
Serializes the given object using the protocol the compiler was
instantiated with.
.. py:function:: loads(cls, payload)
Deserializes an object of type ``cls`` from ``payload`` using the
protocol the compiler was instantiated with.
.. py:function:: dumps.message(obj, seqid=0)
Serializes the given request or response into a
:py:class:`~thriftrw.wire.Message` using the protocol that the
compiler was instantiated with.
See :ref:`calling-apache-thrift`.
.. versionadded:: 1.0
.. py:function:: loads.message(service, payload)
Deserializes a :py:class:`~thriftrw.wire.Message` from
``payload`` using the protocol the compiler was instantiated with.
A request or response of a method defined in the given service is
parsed in the message body.
See :ref:`calling-apache-thrift`.
.. versionadded:: 1.0
And one class each for every struct, union, exception, enum, and
service defined in the IDL.
Service classes have references to
:py:class:`thriftrw.spec.ServiceFunction` objects for each method
defined in the service.
:param str name:
Name of the Thrift document. This will be the name of the
generated module.
:param str contents:
Thrift document to compile
:param str path:
Path to the Thrift file being compiled. If not specified, imports
from within the Thrift file will be disallowed.
:returns:
ModuleSpec of the generated module. | Below is the the instruction that describes the task:
### Input:
Compile the given Thrift document into a Python module.
The generated module contains,
.. py:attribute:: __services__
A collection of generated classes for all services defined in the
thrift file.
.. versionchanged:: 1.0
Renamed from ``services`` to ``__services__``.
.. py:attribute:: __types__
A collection of generated types for all types defined in the
thrift file.
.. versionchanged:: 1.0
Renamed from ``types`` to ``__types__``.
.. py:attribute:: __includes__
A collection of modules included by this module.
.. versionadded:: 1.0
.. py:attribute:: __constants__
A mapping of constant name to value for all constants defined in
the thrift file.
.. versionchanged:: 1.0
Renamed from ``constants`` to ``__constants__``.
.. py:attribute:: __thrift_source__
Contents of the .thrift file from which this module was compiled.
.. versionadded:: 1.1
.. py:function:: dumps(obj)
Serializes the given object using the protocol the compiler was
instantiated with.
.. py:function:: loads(cls, payload)
Deserializes an object of type ``cls`` from ``payload`` using the
protocol the compiler was instantiated with.
.. py:function:: dumps.message(obj, seqid=0)
Serializes the given request or response into a
:py:class:`~thriftrw.wire.Message` using the protocol that the
compiler was instantiated with.
See :ref:`calling-apache-thrift`.
.. versionadded:: 1.0
.. py:function:: loads.message(service, payload)
Deserializes a :py:class:`~thriftrw.wire.Message` from
``payload`` using the protocol the compiler was instantiated with.
A request or response of a method defined in the given service is
parsed in the message body.
See :ref:`calling-apache-thrift`.
.. versionadded:: 1.0
And one class each for every struct, union, exception, enum, and
service defined in the IDL.
Service classes have references to
:py:class:`thriftrw.spec.ServiceFunction` objects for each method
defined in the service.
:param str name:
Name of the Thrift document. This will be the name of the
generated module.
:param str contents:
Thrift document to compile
:param str path:
Path to the Thrift file being compiled. If not specified, imports
from within the Thrift file will be disallowed.
:returns:
ModuleSpec of the generated module.
### Response:
def compile(self, name, contents, path=None):
"""Compile the given Thrift document into a Python module.
The generated module contains,
.. py:attribute:: __services__
A collection of generated classes for all services defined in the
thrift file.
.. versionchanged:: 1.0
Renamed from ``services`` to ``__services__``.
.. py:attribute:: __types__
A collection of generated types for all types defined in the
thrift file.
.. versionchanged:: 1.0
Renamed from ``types`` to ``__types__``.
.. py:attribute:: __includes__
A collection of modules included by this module.
.. versionadded:: 1.0
.. py:attribute:: __constants__
A mapping of constant name to value for all constants defined in
the thrift file.
.. versionchanged:: 1.0
Renamed from ``constants`` to ``__constants__``.
.. py:attribute:: __thrift_source__
Contents of the .thrift file from which this module was compiled.
.. versionadded:: 1.1
.. py:function:: dumps(obj)
Serializes the given object using the protocol the compiler was
instantiated with.
.. py:function:: loads(cls, payload)
Deserializes an object of type ``cls`` from ``payload`` using the
protocol the compiler was instantiated with.
.. py:function:: dumps.message(obj, seqid=0)
Serializes the given request or response into a
:py:class:`~thriftrw.wire.Message` using the protocol that the
compiler was instantiated with.
See :ref:`calling-apache-thrift`.
.. versionadded:: 1.0
.. py:function:: loads.message(service, payload)
Deserializes a :py:class:`~thriftrw.wire.Message` from
``payload`` using the protocol the compiler was instantiated with.
A request or response of a method defined in the given service is
parsed in the message body.
See :ref:`calling-apache-thrift`.
.. versionadded:: 1.0
And one class each for every struct, union, exception, enum, and
service defined in the IDL.
Service classes have references to
:py:class:`thriftrw.spec.ServiceFunction` objects for each method
defined in the service.
:param str name:
Name of the Thrift document. This will be the name of the
generated module.
:param str contents:
Thrift document to compile
:param str path:
Path to the Thrift file being compiled. If not specified, imports
from within the Thrift file will be disallowed.
:returns:
ModuleSpec of the generated module.
"""
assert name
if path:
path = os.path.abspath(path)
if path in self._module_specs:
return self._module_specs[path]
module_spec = ModuleSpec(name, self.protocol, path, contents)
if path:
self._module_specs[path] = module_spec
program = self.parser.parse(contents)
header_processor = HeaderProcessor(self, module_spec, self.include_as)
for header in program.headers:
header.apply(header_processor)
generator = Generator(module_spec.scope, strict=self.strict)
for definition in program.definitions:
generator.process(definition)
return module_spec |
def Jar(env, target = None, source = [], *args, **kw):
"""
A pseudo-Builder wrapper around the separate Jar sources{File,Dir}
Builders.
"""
# jar target should not be a list so assume they passed
# no target and want implicit target to be made and the arg
# was actaully the list of sources
if SCons.Util.is_List(target) and source == []:
SCons.Warnings.Warning("Making implicit target jar file, " +
"and treating the list as sources")
source = target
target = None
# mutiple targets pass so build each target the same from the
# same source
#TODO Maybe this should only be done once, and the result copied
# for each target since it should result in the same?
if SCons.Util.is_List(target) and SCons.Util.is_List(source):
jars = []
for single_target in target:
jars += env.Jar( target = single_target, source = source, *args, **kw)
return jars
# they passed no target so make a target implicitly
if target == None:
try:
# make target from the first source file
target = os.path.splitext(str(source[0]))[0] + env.subst('$JARSUFFIX')
except:
# something strange is happening but attempt anyways
SCons.Warnings.Warning("Could not make implicit target from sources, using directory")
target = os.path.basename(str(env.Dir('.'))) + env.subst('$JARSUFFIX')
# make lists out of our target and sources
if not SCons.Util.is_List(target):
target = [target]
if not SCons.Util.is_List(source):
source = [source]
# setup for checking through all the sources and handle accordingly
java_class_suffix = env.subst('$JAVACLASSSUFFIX')
java_suffix = env.subst('$JAVASUFFIX')
target_classes = []
# function for determining what to do with a file and not a directory
# if its already a class file then it can be used as a
# source for jar, otherwise turn it into a class file then
# return the source
def file_to_class(s):
if(str(_my_normcase(s)).endswith(java_suffix)):
return env.JavaClassFile(source = s, *args, **kw)
else:
return [env.fs.File(s)]
# In the case that we are passed just string to a node which is directory
# but does not exist, we need to check all the current targets to see if
# that directory is going to exist so we can add it as a source to Jar builder
def get_all_targets(env, node='.'):
def get_all_targets_iter(env, node):
if node.has_builder():
yield node
for kid in node.all_children():
for kid in get_all_targets(env, kid):
yield kid
node = env.arg2nodes(node, env.fs.Entry)[0]
return list(get_all_targets_iter(env, node))
# loop through the sources and handle each accordingly
# the goal here is to get all the source files into a class
# file or a directory that contains class files
for s in source:
s = env.subst(s)
if isinstance(s, SCons.Node.FS.Base):
if isinstance(s, SCons.Node.FS.File):
# found a file so make sure its a class file
target_classes.extend(file_to_class(s))
else:
# found a dir so make sure its a dir of class files
target_classes.extend(env.JavaClassDir(source = env.fs.Dir(s), *args, **kw))
else:
if os.path.isfile(s):
# found a file that exists on the FS, make sure its a class file
target_classes.extend(file_to_class(s))
elif os.path.isdir(s):
# found a dir on the FS, add it as a dir of class files
target_classes.append(env.fs.Dir(s))
elif s[-len(java_suffix):] == java_suffix or s[-len(java_class_suffix):] == java_class_suffix:
# found a file that may not exists and is only a string
# so add it after converting it to a class file
target_classes.extend(file_to_class(s))
else:
# found a swig file so add it after converting it to class files
if(os.path.splitext(str(s))[1] == ".i"):
target_classes.extend(env.JavaClassFile(source = s, *args, **kw))
else:
# found a directory that does not yet exist, but can exist as a node
# check the target nodes to make sure it will be built, then add
# it as a source
for node in get_all_targets(env):
if(s in str(node) and os.path.splitext(str(node))[1] == ""):
target_classes.append(node)
# at this point all our sources have been converted to classes or directories of class
# so pass it to the Jar builder
return env.JarFile(target = target, source = target_classes, *args, **kw) | A pseudo-Builder wrapper around the separate Jar sources{File,Dir}
Builders. | Below is the the instruction that describes the task:
### Input:
A pseudo-Builder wrapper around the separate Jar sources{File,Dir}
Builders.
### Response:
def Jar(env, target = None, source = [], *args, **kw):
"""
A pseudo-Builder wrapper around the separate Jar sources{File,Dir}
Builders.
"""
# jar target should not be a list so assume they passed
# no target and want implicit target to be made and the arg
# was actaully the list of sources
if SCons.Util.is_List(target) and source == []:
SCons.Warnings.Warning("Making implicit target jar file, " +
"and treating the list as sources")
source = target
target = None
# mutiple targets pass so build each target the same from the
# same source
#TODO Maybe this should only be done once, and the result copied
# for each target since it should result in the same?
if SCons.Util.is_List(target) and SCons.Util.is_List(source):
jars = []
for single_target in target:
jars += env.Jar( target = single_target, source = source, *args, **kw)
return jars
# they passed no target so make a target implicitly
if target == None:
try:
# make target from the first source file
target = os.path.splitext(str(source[0]))[0] + env.subst('$JARSUFFIX')
except:
# something strange is happening but attempt anyways
SCons.Warnings.Warning("Could not make implicit target from sources, using directory")
target = os.path.basename(str(env.Dir('.'))) + env.subst('$JARSUFFIX')
# make lists out of our target and sources
if not SCons.Util.is_List(target):
target = [target]
if not SCons.Util.is_List(source):
source = [source]
# setup for checking through all the sources and handle accordingly
java_class_suffix = env.subst('$JAVACLASSSUFFIX')
java_suffix = env.subst('$JAVASUFFIX')
target_classes = []
# function for determining what to do with a file and not a directory
# if its already a class file then it can be used as a
# source for jar, otherwise turn it into a class file then
# return the source
def file_to_class(s):
if(str(_my_normcase(s)).endswith(java_suffix)):
return env.JavaClassFile(source = s, *args, **kw)
else:
return [env.fs.File(s)]
# In the case that we are passed just string to a node which is directory
# but does not exist, we need to check all the current targets to see if
# that directory is going to exist so we can add it as a source to Jar builder
def get_all_targets(env, node='.'):
def get_all_targets_iter(env, node):
if node.has_builder():
yield node
for kid in node.all_children():
for kid in get_all_targets(env, kid):
yield kid
node = env.arg2nodes(node, env.fs.Entry)[0]
return list(get_all_targets_iter(env, node))
# loop through the sources and handle each accordingly
# the goal here is to get all the source files into a class
# file or a directory that contains class files
for s in source:
s = env.subst(s)
if isinstance(s, SCons.Node.FS.Base):
if isinstance(s, SCons.Node.FS.File):
# found a file so make sure its a class file
target_classes.extend(file_to_class(s))
else:
# found a dir so make sure its a dir of class files
target_classes.extend(env.JavaClassDir(source = env.fs.Dir(s), *args, **kw))
else:
if os.path.isfile(s):
# found a file that exists on the FS, make sure its a class file
target_classes.extend(file_to_class(s))
elif os.path.isdir(s):
# found a dir on the FS, add it as a dir of class files
target_classes.append(env.fs.Dir(s))
elif s[-len(java_suffix):] == java_suffix or s[-len(java_class_suffix):] == java_class_suffix:
# found a file that may not exists and is only a string
# so add it after converting it to a class file
target_classes.extend(file_to_class(s))
else:
# found a swig file so add it after converting it to class files
if(os.path.splitext(str(s))[1] == ".i"):
target_classes.extend(env.JavaClassFile(source = s, *args, **kw))
else:
# found a directory that does not yet exist, but can exist as a node
# check the target nodes to make sure it will be built, then add
# it as a source
for node in get_all_targets(env):
if(s in str(node) and os.path.splitext(str(node))[1] == ""):
target_classes.append(node)
# at this point all our sources have been converted to classes or directories of class
# so pass it to the Jar builder
return env.JarFile(target = target, source = target_classes, *args, **kw) |
def get_commit(self, commit_id: str) -> Commit:
"""
Get the specified commit.
:param str commit_id: hash of the commit to analyze
:return: Commit
"""
return Commit(self.repo.commit(commit_id), self.path, self.main_branch) | Get the specified commit.
:param str commit_id: hash of the commit to analyze
:return: Commit | Below is the the instruction that describes the task:
### Input:
Get the specified commit.
:param str commit_id: hash of the commit to analyze
:return: Commit
### Response:
def get_commit(self, commit_id: str) -> Commit:
"""
Get the specified commit.
:param str commit_id: hash of the commit to analyze
:return: Commit
"""
return Commit(self.repo.commit(commit_id), self.path, self.main_branch) |
def dump(node):
""" Dump initialized object structure to yaml
"""
from qubell.api.private.platform import Auth, QubellPlatform
from qubell.api.private.organization import Organization
from qubell.api.private.application import Application
from qubell.api.private.instance import Instance
from qubell.api.private.revision import Revision
from qubell.api.private.environment import Environment
from qubell.api.private.zone import Zone
from qubell.api.private.manifest import Manifest
# Exclude keys from dump
# Format: { 'ClassName': ['fields', 'to', 'exclude']}
exclusion_list = {
Auth: ['cookies'],
QubellPlatform:['auth', ],
Organization: ['auth', 'organizationId', 'zone'],
Application: ['auth', 'applicationId', 'organization'],
Instance: ['auth', 'instanceId', 'application'],
Manifest: ['name', 'content'],
Revision: ['auth', 'revisionId'],
Environment: ['auth', 'environmentId', 'organization'],
Zone: ['auth', 'zoneId', 'organization'],
}
def obj_presenter(dumper, obj):
for x in exclusion_list.keys():
if isinstance(obj, x): # Find class
fields = obj.__dict__.copy()
for excl_item in exclusion_list[x]:
try:
fields.pop(excl_item)
except:
log.warn('No item %s in object %s' % (excl_item, x))
return dumper.represent_mapping('tag:yaml.org,2002:map', fields)
return dumper.represent_mapping('tag:yaml.org,2002:map', obj.__dict__)
noalias_dumper = yaml.dumper.Dumper
noalias_dumper.ignore_aliases = lambda self, data: True
yaml.add_representer(unicode, lambda dumper, value: dumper.represent_scalar(u'tag:yaml.org,2002:str', value))
yaml.add_multi_representer(object, obj_presenter)
serialized = yaml.dump(node, default_flow_style=False, Dumper=noalias_dumper)
return serialized | Dump initialized object structure to yaml | Below is the the instruction that describes the task:
### Input:
Dump initialized object structure to yaml
### Response:
def dump(node):
""" Dump initialized object structure to yaml
"""
from qubell.api.private.platform import Auth, QubellPlatform
from qubell.api.private.organization import Organization
from qubell.api.private.application import Application
from qubell.api.private.instance import Instance
from qubell.api.private.revision import Revision
from qubell.api.private.environment import Environment
from qubell.api.private.zone import Zone
from qubell.api.private.manifest import Manifest
# Exclude keys from dump
# Format: { 'ClassName': ['fields', 'to', 'exclude']}
exclusion_list = {
Auth: ['cookies'],
QubellPlatform:['auth', ],
Organization: ['auth', 'organizationId', 'zone'],
Application: ['auth', 'applicationId', 'organization'],
Instance: ['auth', 'instanceId', 'application'],
Manifest: ['name', 'content'],
Revision: ['auth', 'revisionId'],
Environment: ['auth', 'environmentId', 'organization'],
Zone: ['auth', 'zoneId', 'organization'],
}
def obj_presenter(dumper, obj):
for x in exclusion_list.keys():
if isinstance(obj, x): # Find class
fields = obj.__dict__.copy()
for excl_item in exclusion_list[x]:
try:
fields.pop(excl_item)
except:
log.warn('No item %s in object %s' % (excl_item, x))
return dumper.represent_mapping('tag:yaml.org,2002:map', fields)
return dumper.represent_mapping('tag:yaml.org,2002:map', obj.__dict__)
noalias_dumper = yaml.dumper.Dumper
noalias_dumper.ignore_aliases = lambda self, data: True
yaml.add_representer(unicode, lambda dumper, value: dumper.represent_scalar(u'tag:yaml.org,2002:str', value))
yaml.add_multi_representer(object, obj_presenter)
serialized = yaml.dump(node, default_flow_style=False, Dumper=noalias_dumper)
return serialized |
def read(self, structure):
""" Read and advance. """
start = self.offset
self.skip(structure.size)
return structure.read(self.buf, start) | Read and advance. | Below is the the instruction that describes the task:
### Input:
Read and advance.
### Response:
def read(self, structure):
""" Read and advance. """
start = self.offset
self.skip(structure.size)
return structure.read(self.buf, start) |
def _add_extra_handlers(self, handlers):
"""
Adds the extra handler (defined by the user)
:param handlers: a list of :py:class:`tornado.web.RequestHandler` instances.
:return:
"""
extra_handlers = [(h[0], h[1], {"microservice": self}) for h in self.extra_handlers]
handlers.extend(extra_handlers) | Adds the extra handler (defined by the user)
:param handlers: a list of :py:class:`tornado.web.RequestHandler` instances.
:return: | Below is the the instruction that describes the task:
### Input:
Adds the extra handler (defined by the user)
:param handlers: a list of :py:class:`tornado.web.RequestHandler` instances.
:return:
### Response:
def _add_extra_handlers(self, handlers):
"""
Adds the extra handler (defined by the user)
:param handlers: a list of :py:class:`tornado.web.RequestHandler` instances.
:return:
"""
extra_handlers = [(h[0], h[1], {"microservice": self}) for h in self.extra_handlers]
handlers.extend(extra_handlers) |
def unmasked(self, depth=0.01):
"""Return the unmasked overfitting metric for a given transit depth."""
return 1 - (np.hstack(self._O2) +
np.hstack(self._O3) / depth) / np.hstack(self._O1) | Return the unmasked overfitting metric for a given transit depth. | Below is the the instruction that describes the task:
### Input:
Return the unmasked overfitting metric for a given transit depth.
### Response:
def unmasked(self, depth=0.01):
"""Return the unmasked overfitting metric for a given transit depth."""
return 1 - (np.hstack(self._O2) +
np.hstack(self._O3) / depth) / np.hstack(self._O1) |
def read_settings(filename=None, defs=None):
"""
:param filename: Force load a file
:param defs: arguments you want to accept
:param default_filename: A config file from an environment variable (a fallback config file, if no other provided)
:return:
"""
# READ SETTINGS
defs = listwrap(defs)
defs.append({
"name": ["--config", "--settings", "--settings-file", "--settings_file"],
"help": "path to JSON file with settings",
"type": str,
"dest": "filename",
"default": None,
"required": False
})
args = argparse(defs)
args.filename = coalesce(filename, args.filename, "./config.json")
settings_file = File(args.filename)
if not settings_file.exists:
Log.error("Can not read configuration file {{filename}}", {
"filename": settings_file.abspath
})
settings = mo_json_config.get_file(settings_file)
settings.args = args
return settings | :param filename: Force load a file
:param defs: arguments you want to accept
:param default_filename: A config file from an environment variable (a fallback config file, if no other provided)
:return: | Below is the the instruction that describes the task:
### Input:
:param filename: Force load a file
:param defs: arguments you want to accept
:param default_filename: A config file from an environment variable (a fallback config file, if no other provided)
:return:
### Response:
def read_settings(filename=None, defs=None):
"""
:param filename: Force load a file
:param defs: arguments you want to accept
:param default_filename: A config file from an environment variable (a fallback config file, if no other provided)
:return:
"""
# READ SETTINGS
defs = listwrap(defs)
defs.append({
"name": ["--config", "--settings", "--settings-file", "--settings_file"],
"help": "path to JSON file with settings",
"type": str,
"dest": "filename",
"default": None,
"required": False
})
args = argparse(defs)
args.filename = coalesce(filename, args.filename, "./config.json")
settings_file = File(args.filename)
if not settings_file.exists:
Log.error("Can not read configuration file {{filename}}", {
"filename": settings_file.abspath
})
settings = mo_json_config.get_file(settings_file)
settings.args = args
return settings |
def write_acls(self, acls):
"""Writes the given autocorrelation lengths.
The ACL of each parameter is saved to
``[sampler_group]/acls/{param}']``. The maximum over all the
parameters is saved to the file's 'acl' attribute.
Parameters
----------
acls : dict
A dictionary of ACLs keyed by the parameter.
Returns
-------
ACL
The maximum of the acls that was written to the file.
"""
group = self.sampler_group + '/acls/{}'
# write the individual acls
for param in acls:
try:
# we need to use the write_direct function because it's
# apparently the only way to update scalars in h5py
self[group.format(param)].write_direct(
numpy.array(acls[param]))
except KeyError:
# dataset doesn't exist yet
self[group.format(param)] = acls[param]
# write the maximum over all params
acl = numpy.array(acls.values()).max()
self[self.sampler_group].attrs['acl'] = acl
# set the default thin interval to be the acl (if it is finite)
if numpy.isfinite(acl):
self.thin_interval = int(numpy.ceil(acl)) | Writes the given autocorrelation lengths.
The ACL of each parameter is saved to
``[sampler_group]/acls/{param}']``. The maximum over all the
parameters is saved to the file's 'acl' attribute.
Parameters
----------
acls : dict
A dictionary of ACLs keyed by the parameter.
Returns
-------
ACL
The maximum of the acls that was written to the file. | Below is the the instruction that describes the task:
### Input:
Writes the given autocorrelation lengths.
The ACL of each parameter is saved to
``[sampler_group]/acls/{param}']``. The maximum over all the
parameters is saved to the file's 'acl' attribute.
Parameters
----------
acls : dict
A dictionary of ACLs keyed by the parameter.
Returns
-------
ACL
The maximum of the acls that was written to the file.
### Response:
def write_acls(self, acls):
"""Writes the given autocorrelation lengths.
The ACL of each parameter is saved to
``[sampler_group]/acls/{param}']``. The maximum over all the
parameters is saved to the file's 'acl' attribute.
Parameters
----------
acls : dict
A dictionary of ACLs keyed by the parameter.
Returns
-------
ACL
The maximum of the acls that was written to the file.
"""
group = self.sampler_group + '/acls/{}'
# write the individual acls
for param in acls:
try:
# we need to use the write_direct function because it's
# apparently the only way to update scalars in h5py
self[group.format(param)].write_direct(
numpy.array(acls[param]))
except KeyError:
# dataset doesn't exist yet
self[group.format(param)] = acls[param]
# write the maximum over all params
acl = numpy.array(acls.values()).max()
self[self.sampler_group].attrs['acl'] = acl
# set the default thin interval to be the acl (if it is finite)
if numpy.isfinite(acl):
self.thin_interval = int(numpy.ceil(acl)) |
def update_cluster_topology(self, assignment):
"""Modify the cluster-topology with given assignment.
Change the replica set of partitions as in given assignment.
:param assignment: dict representing actions to be used to update the current
cluster-topology
:raises: InvalidBrokerIdError when broker-id is invalid
:raises: InvalidPartitionError when partition-name is invalid
"""
try:
for partition_name, replica_ids in six.iteritems(assignment):
try:
new_replicas = [self.brokers[b_id] for b_id in replica_ids]
except KeyError:
self.log.error(
"Invalid replicas %s for topic-partition %s-%s.",
', '.join([str(id) for id in replica_ids]),
partition_name[0],
partition_name[1],
)
raise InvalidBrokerIdError(
"Invalid replicas {0}.".format(
', '.join([str(id) for id in replica_ids])
),
)
try:
partition = self.partitions[partition_name]
old_replicas = [broker for broker in partition.replicas]
# No change needed. Save ourself some CPU time.
# Replica order matters as the first one is the leader.
if new_replicas == old_replicas:
continue
# Remove old partitions from broker
# This also updates partition replicas
for broker in old_replicas:
broker.remove_partition(partition)
# Add new partition to brokers
for broker in new_replicas:
broker.add_partition(partition)
except KeyError:
self.log.error(
"Invalid topic-partition %s-%s.",
partition_name[0],
partition_name[1],
)
raise InvalidPartitionError(
"Invalid topic-partition {0}-{1}."
.format(partition_name[0], partition_name[1]),
)
except KeyError:
self.log.error("Could not parse given assignment {0}".format(assignment))
raise | Modify the cluster-topology with given assignment.
Change the replica set of partitions as in given assignment.
:param assignment: dict representing actions to be used to update the current
cluster-topology
:raises: InvalidBrokerIdError when broker-id is invalid
:raises: InvalidPartitionError when partition-name is invalid | Below is the the instruction that describes the task:
### Input:
Modify the cluster-topology with given assignment.
Change the replica set of partitions as in given assignment.
:param assignment: dict representing actions to be used to update the current
cluster-topology
:raises: InvalidBrokerIdError when broker-id is invalid
:raises: InvalidPartitionError when partition-name is invalid
### Response:
def update_cluster_topology(self, assignment):
"""Modify the cluster-topology with given assignment.
Change the replica set of partitions as in given assignment.
:param assignment: dict representing actions to be used to update the current
cluster-topology
:raises: InvalidBrokerIdError when broker-id is invalid
:raises: InvalidPartitionError when partition-name is invalid
"""
try:
for partition_name, replica_ids in six.iteritems(assignment):
try:
new_replicas = [self.brokers[b_id] for b_id in replica_ids]
except KeyError:
self.log.error(
"Invalid replicas %s for topic-partition %s-%s.",
', '.join([str(id) for id in replica_ids]),
partition_name[0],
partition_name[1],
)
raise InvalidBrokerIdError(
"Invalid replicas {0}.".format(
', '.join([str(id) for id in replica_ids])
),
)
try:
partition = self.partitions[partition_name]
old_replicas = [broker for broker in partition.replicas]
# No change needed. Save ourself some CPU time.
# Replica order matters as the first one is the leader.
if new_replicas == old_replicas:
continue
# Remove old partitions from broker
# This also updates partition replicas
for broker in old_replicas:
broker.remove_partition(partition)
# Add new partition to brokers
for broker in new_replicas:
broker.add_partition(partition)
except KeyError:
self.log.error(
"Invalid topic-partition %s-%s.",
partition_name[0],
partition_name[1],
)
raise InvalidPartitionError(
"Invalid topic-partition {0}-{1}."
.format(partition_name[0], partition_name[1]),
)
except KeyError:
self.log.error("Could not parse given assignment {0}".format(assignment))
raise |
Subsets and Splits