code
stringlengths 75
104k
| docstring
stringlengths 1
46.9k
| text
stringlengths 164
112k
|
---|---|---|
def get_structure_from_mp(formula):
"""
Convenience method to get a crystal from the Materials Project database via
the API. Requires PMG_MAPI_KEY to be set.
Args:
formula (str): A formula
Returns:
(Structure) The lowest energy structure in Materials Project with that
formula.
"""
m = MPRester()
entries = m.get_entries(formula, inc_structure="final")
if len(entries) == 0:
raise ValueError("No structure with formula %s in Materials Project!" %
formula)
elif len(entries) > 1:
warnings.warn("%d structures with formula %s found in Materials "
"Project. The lowest energy structure will be returned." %
(len(entries), formula))
return min(entries, key=lambda e: e.energy_per_atom).structure | Convenience method to get a crystal from the Materials Project database via
the API. Requires PMG_MAPI_KEY to be set.
Args:
formula (str): A formula
Returns:
(Structure) The lowest energy structure in Materials Project with that
formula. | Below is the the instruction that describes the task:
### Input:
Convenience method to get a crystal from the Materials Project database via
the API. Requires PMG_MAPI_KEY to be set.
Args:
formula (str): A formula
Returns:
(Structure) The lowest energy structure in Materials Project with that
formula.
### Response:
def get_structure_from_mp(formula):
"""
Convenience method to get a crystal from the Materials Project database via
the API. Requires PMG_MAPI_KEY to be set.
Args:
formula (str): A formula
Returns:
(Structure) The lowest energy structure in Materials Project with that
formula.
"""
m = MPRester()
entries = m.get_entries(formula, inc_structure="final")
if len(entries) == 0:
raise ValueError("No structure with formula %s in Materials Project!" %
formula)
elif len(entries) > 1:
warnings.warn("%d structures with formula %s found in Materials "
"Project. The lowest energy structure will be returned." %
(len(entries), formula))
return min(entries, key=lambda e: e.energy_per_atom).structure |
def make_ica_funs(observed_dimension, latent_dimension):
"""These functions implement independent component analysis.
The model is:
latents are drawn i.i.d. for each data point from a product of student-ts.
weights are the same across all datapoints.
each data = latents * weghts + noise."""
def sample(weights, n_samples, noise_std, rs):
latents = rs.randn(latent_dimension, n_samples)
latents = np.array(sorted(latents.T, key=lambda a_entry: a_entry[0])).T
noise = rs.randn(n_samples, observed_dimension) * noise_std
observed = predict(weights, latents) + noise
return latents, observed
def predict(weights, latents):
return np.dot(weights, latents).T
def logprob(weights, latents, noise_std, observed):
preds = predict(weights, latents)
log_lik = np.sum(t.logpdf(preds, 2.4, observed, noise_std))
return log_lik
num_weights = observed_dimension * latent_dimension
def unpack_weights(weights):
return np.reshape(weights, (observed_dimension, latent_dimension))
return num_weights, sample, logprob, unpack_weights | These functions implement independent component analysis.
The model is:
latents are drawn i.i.d. for each data point from a product of student-ts.
weights are the same across all datapoints.
each data = latents * weghts + noise. | Below is the the instruction that describes the task:
### Input:
These functions implement independent component analysis.
The model is:
latents are drawn i.i.d. for each data point from a product of student-ts.
weights are the same across all datapoints.
each data = latents * weghts + noise.
### Response:
def make_ica_funs(observed_dimension, latent_dimension):
"""These functions implement independent component analysis.
The model is:
latents are drawn i.i.d. for each data point from a product of student-ts.
weights are the same across all datapoints.
each data = latents * weghts + noise."""
def sample(weights, n_samples, noise_std, rs):
latents = rs.randn(latent_dimension, n_samples)
latents = np.array(sorted(latents.T, key=lambda a_entry: a_entry[0])).T
noise = rs.randn(n_samples, observed_dimension) * noise_std
observed = predict(weights, latents) + noise
return latents, observed
def predict(weights, latents):
return np.dot(weights, latents).T
def logprob(weights, latents, noise_std, observed):
preds = predict(weights, latents)
log_lik = np.sum(t.logpdf(preds, 2.4, observed, noise_std))
return log_lik
num_weights = observed_dimension * latent_dimension
def unpack_weights(weights):
return np.reshape(weights, (observed_dimension, latent_dimension))
return num_weights, sample, logprob, unpack_weights |
def patterned(self):
"""Selects the pattern fill type.
Note that calling this method does not by itself set a foreground or
background color of the pattern. Rather it enables subsequent
assignments to properties like fore_color to set the pattern and
colors.
"""
pattFill = self._xPr.get_or_change_to_pattFill()
self._fill = _PattFill(pattFill) | Selects the pattern fill type.
Note that calling this method does not by itself set a foreground or
background color of the pattern. Rather it enables subsequent
assignments to properties like fore_color to set the pattern and
colors. | Below is the the instruction that describes the task:
### Input:
Selects the pattern fill type.
Note that calling this method does not by itself set a foreground or
background color of the pattern. Rather it enables subsequent
assignments to properties like fore_color to set the pattern and
colors.
### Response:
def patterned(self):
"""Selects the pattern fill type.
Note that calling this method does not by itself set a foreground or
background color of the pattern. Rather it enables subsequent
assignments to properties like fore_color to set the pattern and
colors.
"""
pattFill = self._xPr.get_or_change_to_pattFill()
self._fill = _PattFill(pattFill) |
def run(self):
"""Run charge balance command"""
# Load compound information
def compound_name(id):
if id not in self._model.compounds:
return id
return self._model.compounds[id].properties.get('name', id)
# Create a set of excluded reactions
exclude = set(self._args.exclude)
count = 0
unbalanced = 0
unchecked = 0
for reaction, charge in charge_balance(self._model):
count += 1
if reaction.id in exclude or reaction.equation is None:
continue
if math.isnan(charge):
logger.debug('Not checking reaction {};'
' missing charge'.format(reaction.id))
unchecked += 1
elif abs(charge) > self._args.epsilon:
unbalanced += 1
rxt = reaction.equation.translated_compounds(compound_name)
print('{}\t{}\t{}'.format(reaction.id, charge, rxt))
logger.info('Unbalanced reactions: {}/{}'.format(unbalanced, count))
logger.info('Unchecked reactions due to missing charge: {}/{}'.format(
unchecked, count))
logger.info('Reactions excluded from check: {}/{}'.format(
len(exclude), count)) | Run charge balance command | Below is the the instruction that describes the task:
### Input:
Run charge balance command
### Response:
def run(self):
"""Run charge balance command"""
# Load compound information
def compound_name(id):
if id not in self._model.compounds:
return id
return self._model.compounds[id].properties.get('name', id)
# Create a set of excluded reactions
exclude = set(self._args.exclude)
count = 0
unbalanced = 0
unchecked = 0
for reaction, charge in charge_balance(self._model):
count += 1
if reaction.id in exclude or reaction.equation is None:
continue
if math.isnan(charge):
logger.debug('Not checking reaction {};'
' missing charge'.format(reaction.id))
unchecked += 1
elif abs(charge) > self._args.epsilon:
unbalanced += 1
rxt = reaction.equation.translated_compounds(compound_name)
print('{}\t{}\t{}'.format(reaction.id, charge, rxt))
logger.info('Unbalanced reactions: {}/{}'.format(unbalanced, count))
logger.info('Unchecked reactions due to missing charge: {}/{}'.format(
unchecked, count))
logger.info('Reactions excluded from check: {}/{}'.format(
len(exclude), count)) |
def variantsGenerator(self, request):
"""
Returns a generator over the (variant, nextPageToken) pairs defined
by the specified request.
"""
compoundId = datamodel.VariantSetCompoundId \
.parse(request.variant_set_id)
dataset = self.getDataRepository().getDataset(compoundId.dataset_id)
variantSet = dataset.getVariantSet(compoundId.variant_set_id)
intervalIterator = paging.VariantsIntervalIterator(
request, variantSet)
return intervalIterator | Returns a generator over the (variant, nextPageToken) pairs defined
by the specified request. | Below is the the instruction that describes the task:
### Input:
Returns a generator over the (variant, nextPageToken) pairs defined
by the specified request.
### Response:
def variantsGenerator(self, request):
"""
Returns a generator over the (variant, nextPageToken) pairs defined
by the specified request.
"""
compoundId = datamodel.VariantSetCompoundId \
.parse(request.variant_set_id)
dataset = self.getDataRepository().getDataset(compoundId.dataset_id)
variantSet = dataset.getVariantSet(compoundId.variant_set_id)
intervalIterator = paging.VariantsIntervalIterator(
request, variantSet)
return intervalIterator |
def flaskrun(app, default_host="127.0.0.1", default_port="8000"):
"""
Takes a flask.Flask instance and runs it. Parses
command-line flags to configure the app.
"""
# Set up the command-line options
parser = optparse.OptionParser()
parser.add_option(
"-H",
"--host",
help="Hostname of the Flask app " + "[default %s]" % default_host,
default=default_host,
)
parser.add_option(
"-P",
"--port",
help="Port for the Flask app " + "[default %s]" % default_port,
default=default_port,
)
# Two options useful for debugging purposes, but
# a bit dangerous so not exposed in the help message.
parser.add_option(
"-d", "--debug", action="store_true", dest="debug", help=optparse.SUPPRESS_HELP
)
parser.add_option(
"-p",
"--profile",
action="store_true",
dest="profile",
help=optparse.SUPPRESS_HELP,
)
options, _ = parser.parse_args()
# If the user selects the profiling option, then we need
# to do a little extra setup
if options.profile:
from werkzeug.contrib.profiler import ProfilerMiddleware
app.config["PROFILE"] = True
app.wsgi_app = ProfilerMiddleware(app.wsgi_app, restrictions=[30])
options.debug = True
app.run(debug=options.debug, host=options.host, port=int(options.port)) | Takes a flask.Flask instance and runs it. Parses
command-line flags to configure the app. | Below is the the instruction that describes the task:
### Input:
Takes a flask.Flask instance and runs it. Parses
command-line flags to configure the app.
### Response:
def flaskrun(app, default_host="127.0.0.1", default_port="8000"):
"""
Takes a flask.Flask instance and runs it. Parses
command-line flags to configure the app.
"""
# Set up the command-line options
parser = optparse.OptionParser()
parser.add_option(
"-H",
"--host",
help="Hostname of the Flask app " + "[default %s]" % default_host,
default=default_host,
)
parser.add_option(
"-P",
"--port",
help="Port for the Flask app " + "[default %s]" % default_port,
default=default_port,
)
# Two options useful for debugging purposes, but
# a bit dangerous so not exposed in the help message.
parser.add_option(
"-d", "--debug", action="store_true", dest="debug", help=optparse.SUPPRESS_HELP
)
parser.add_option(
"-p",
"--profile",
action="store_true",
dest="profile",
help=optparse.SUPPRESS_HELP,
)
options, _ = parser.parse_args()
# If the user selects the profiling option, then we need
# to do a little extra setup
if options.profile:
from werkzeug.contrib.profiler import ProfilerMiddleware
app.config["PROFILE"] = True
app.wsgi_app = ProfilerMiddleware(app.wsgi_app, restrictions=[30])
options.debug = True
app.run(debug=options.debug, host=options.host, port=int(options.port)) |
def set_connected(self, connected):
"""
:param bool connected:
"""
with self.__connect_wait_condition:
self.connected = connected
if connected:
self.__connect_wait_condition.notify() | :param bool connected: | Below is the the instruction that describes the task:
### Input:
:param bool connected:
### Response:
def set_connected(self, connected):
"""
:param bool connected:
"""
with self.__connect_wait_condition:
self.connected = connected
if connected:
self.__connect_wait_condition.notify() |
def ls(obj=None):
"""List available layers, or infos on a given layer"""
if obj is None:
import builtins
all = builtins.__dict__.copy()
all.update(globals())
objlst = sorted(conf.layers, key=lambda x:x.__name__)
for o in objlst:
print("%-10s : %s" %(o.__name__,o.name))
else:
if isinstance(obj, type) and issubclass(obj, Packet):
for f in obj.fields_desc:
print("%-10s : %-20s = (%s)" % (f.name, f.__class__.__name__, repr(f.default)))
elif isinstance(obj, Packet):
for f in obj.fields_desc:
print("%-10s : %-20s = %-15s (%s)" % (f.name, f.__class__.__name__, repr(getattr(obj,f.name)), repr(f.default)))
if not isinstance(obj.payload, NoPayload):
print("--")
ls(obj.payload)
else:
print("Not a packet class. Type 'ls()' to list packet classes.") | List available layers, or infos on a given layer | Below is the the instruction that describes the task:
### Input:
List available layers, or infos on a given layer
### Response:
def ls(obj=None):
"""List available layers, or infos on a given layer"""
if obj is None:
import builtins
all = builtins.__dict__.copy()
all.update(globals())
objlst = sorted(conf.layers, key=lambda x:x.__name__)
for o in objlst:
print("%-10s : %s" %(o.__name__,o.name))
else:
if isinstance(obj, type) and issubclass(obj, Packet):
for f in obj.fields_desc:
print("%-10s : %-20s = (%s)" % (f.name, f.__class__.__name__, repr(f.default)))
elif isinstance(obj, Packet):
for f in obj.fields_desc:
print("%-10s : %-20s = %-15s (%s)" % (f.name, f.__class__.__name__, repr(getattr(obj,f.name)), repr(f.default)))
if not isinstance(obj.payload, NoPayload):
print("--")
ls(obj.payload)
else:
print("Not a packet class. Type 'ls()' to list packet classes.") |
def _build_models_query(self, query):
"""
Builds a query from `query` that filters to documents only from registered models.
"""
registered_models_ct = self.build_models_list()
if registered_models_ct:
restrictions = [xapian.Query('%s%s' % (TERM_PREFIXES[DJANGO_CT], model_ct))
for model_ct in registered_models_ct]
limit_query = xapian.Query(xapian.Query.OP_OR, restrictions)
query = xapian.Query(xapian.Query.OP_AND, query, limit_query)
return query | Builds a query from `query` that filters to documents only from registered models. | Below is the the instruction that describes the task:
### Input:
Builds a query from `query` that filters to documents only from registered models.
### Response:
def _build_models_query(self, query):
"""
Builds a query from `query` that filters to documents only from registered models.
"""
registered_models_ct = self.build_models_list()
if registered_models_ct:
restrictions = [xapian.Query('%s%s' % (TERM_PREFIXES[DJANGO_CT], model_ct))
for model_ct in registered_models_ct]
limit_query = xapian.Query(xapian.Query.OP_OR, restrictions)
query = xapian.Query(xapian.Query.OP_AND, query, limit_query)
return query |
def _initialize_with_array(self, data, rowBased=True):
"""Set the matrix values from a two dimensional list."""
if rowBased:
self.matrix = []
if len(data) != self._rows:
raise ValueError("Size of Matrix does not match")
for col in xrange(self._columns):
self.matrix.append([])
for row in xrange(self._rows):
if len(data[row]) != self._columns:
raise ValueError("Size of Matrix does not match")
self.matrix[col].append(data[row][col])
else:
if len(data) != self._columns:
raise ValueError("Size of Matrix does not match")
for col in data:
if len(col) != self._rows:
raise ValueError("Size of Matrix does not match")
self.matrix = copy.deepcopy(data) | Set the matrix values from a two dimensional list. | Below is the the instruction that describes the task:
### Input:
Set the matrix values from a two dimensional list.
### Response:
def _initialize_with_array(self, data, rowBased=True):
"""Set the matrix values from a two dimensional list."""
if rowBased:
self.matrix = []
if len(data) != self._rows:
raise ValueError("Size of Matrix does not match")
for col in xrange(self._columns):
self.matrix.append([])
for row in xrange(self._rows):
if len(data[row]) != self._columns:
raise ValueError("Size of Matrix does not match")
self.matrix[col].append(data[row][col])
else:
if len(data) != self._columns:
raise ValueError("Size of Matrix does not match")
for col in data:
if len(col) != self._rows:
raise ValueError("Size of Matrix does not match")
self.matrix = copy.deepcopy(data) |
def save_assessment_part(self, assessment_part_form, *args, **kwargs):
"""Pass through to provider AssessmentPartAdminSession.update_assessment_part"""
# Implemented from kitosid template for -
# osid.resource.ResourceAdminSession.update_resource
if assessment_part_form.is_for_update():
return self.update_assessment_part(assessment_part_form, *args, **kwargs)
else:
return self.create_assessment_part(assessment_part_form, *args, **kwargs) | Pass through to provider AssessmentPartAdminSession.update_assessment_part | Below is the the instruction that describes the task:
### Input:
Pass through to provider AssessmentPartAdminSession.update_assessment_part
### Response:
def save_assessment_part(self, assessment_part_form, *args, **kwargs):
"""Pass through to provider AssessmentPartAdminSession.update_assessment_part"""
# Implemented from kitosid template for -
# osid.resource.ResourceAdminSession.update_resource
if assessment_part_form.is_for_update():
return self.update_assessment_part(assessment_part_form, *args, **kwargs)
else:
return self.create_assessment_part(assessment_part_form, *args, **kwargs) |
def run(self):
"""Connect to SABnzbd and get the data."""
try:
answer = urlopen(self.url + "&mode=queue").read().decode()
except (HTTPError, URLError) as error:
self.output = {
"full_text": str(error.reason),
"color": "#FF0000"
}
return
answer = json.loads(answer)
# if answer["status"] exists and is False, an error occured
if not answer.get("status", True):
self.output = {
"full_text": answer["error"],
"color": "#FF0000"
}
return
queue = answer["queue"]
self.status = queue["status"]
if self.is_paused():
color = self.color_paused
elif self.is_downloading():
color = self.color_downloading
else:
color = self.color
if self.is_downloading():
full_text = self.format.format(**queue)
else:
full_text = self.format_paused.format(**queue)
self.output = {
"full_text": full_text,
"color": color
} | Connect to SABnzbd and get the data. | Below is the the instruction that describes the task:
### Input:
Connect to SABnzbd and get the data.
### Response:
def run(self):
"""Connect to SABnzbd and get the data."""
try:
answer = urlopen(self.url + "&mode=queue").read().decode()
except (HTTPError, URLError) as error:
self.output = {
"full_text": str(error.reason),
"color": "#FF0000"
}
return
answer = json.loads(answer)
# if answer["status"] exists and is False, an error occured
if not answer.get("status", True):
self.output = {
"full_text": answer["error"],
"color": "#FF0000"
}
return
queue = answer["queue"]
self.status = queue["status"]
if self.is_paused():
color = self.color_paused
elif self.is_downloading():
color = self.color_downloading
else:
color = self.color
if self.is_downloading():
full_text = self.format.format(**queue)
else:
full_text = self.format_paused.format(**queue)
self.output = {
"full_text": full_text,
"color": color
} |
def _op_method(self, data):
"""Operator
This method returns the input data operated on by all of the operators
Parameters
----------
data : np.ndarray
Input data array
Returns
-------
np.ndarray linear operation results
"""
res = np.empty(len(self.operators), dtype=np.ndarray)
for i in range(len(self.operators)):
res[i] = self.operators[i].op(data)
return res | Operator
This method returns the input data operated on by all of the operators
Parameters
----------
data : np.ndarray
Input data array
Returns
-------
np.ndarray linear operation results | Below is the the instruction that describes the task:
### Input:
Operator
This method returns the input data operated on by all of the operators
Parameters
----------
data : np.ndarray
Input data array
Returns
-------
np.ndarray linear operation results
### Response:
def _op_method(self, data):
"""Operator
This method returns the input data operated on by all of the operators
Parameters
----------
data : np.ndarray
Input data array
Returns
-------
np.ndarray linear operation results
"""
res = np.empty(len(self.operators), dtype=np.ndarray)
for i in range(len(self.operators)):
res[i] = self.operators[i].op(data)
return res |
def _row_selector(self, other):
"""
Where other is an SArray of identical length as the current Frame,
this returns a selection of a subset of rows in the current SFrame
where the corresponding row in the selector is non-zero.
"""
if type(other) is SArray:
if self.__has_size__() and other.__has_size__() and len(other) != len(self):
raise IndexError("Cannot perform logical indexing on arrays of different length.")
with cython_context():
return SFrame(_proxy=self.__proxy__.logical_filter(other.__proxy__)) | Where other is an SArray of identical length as the current Frame,
this returns a selection of a subset of rows in the current SFrame
where the corresponding row in the selector is non-zero. | Below is the the instruction that describes the task:
### Input:
Where other is an SArray of identical length as the current Frame,
this returns a selection of a subset of rows in the current SFrame
where the corresponding row in the selector is non-zero.
### Response:
def _row_selector(self, other):
"""
Where other is an SArray of identical length as the current Frame,
this returns a selection of a subset of rows in the current SFrame
where the corresponding row in the selector is non-zero.
"""
if type(other) is SArray:
if self.__has_size__() and other.__has_size__() and len(other) != len(self):
raise IndexError("Cannot perform logical indexing on arrays of different length.")
with cython_context():
return SFrame(_proxy=self.__proxy__.logical_filter(other.__proxy__)) |
def submit_response(self, assessment_section_id, item_id, answer_form):
"""Submits an answer to an item.
arg: assessment_section_id (osid.id.Id): ``Id`` of the
``AssessmentSection``
arg: item_id (osid.id.Id): ``Id`` of the ``Item``
arg: answer_form (osid.assessment.AnswerForm): the response
raise: IllegalState - ``has_assessment_section_begun()`` is
``false or is_assessment_section_over()`` is ``true``
raise: InvalidArgument - one or more of the elements in the
form is invalid
raise: NotFound - ``assessment_section_id`` or ``item_id`` is
not found, or ``item_id`` not part of
``assessment_section_id``
raise: NullArgument - ``assessment_section_id, item_id,`` or
``answer_form`` is ``null``
raise: OperationFailed - unable to complete request
raise: PermissionDenied - authorization failure
raise: Unsupported - ``answer_form`` is not of this service
*compliance: mandatory -- This method must be implemented.*
"""
if not isinstance(answer_form, ABCAnswerForm):
raise errors.InvalidArgument('argument type is not an AnswerForm')
# OK, so the following should actually NEVER be true. Remove it?
if answer_form.is_for_update():
raise errors.InvalidArgument('the AnswerForm is for update only, not submit')
#
try:
if self._forms[answer_form.get_id().get_identifier()] == SUBMITTED:
raise errors.IllegalState('answer_form already used in a submit transaction')
except KeyError:
raise errors.Unsupported('answer_form did not originate from this assessment session')
if not answer_form.is_valid():
raise errors.InvalidArgument('one or more of the form elements is invalid')
answer_form._my_map['_id'] = ObjectId()
self.get_assessment_section(assessment_section_id).submit_response(item_id, answer_form)
self._forms[answer_form.get_id().get_identifier()] = SUBMITTED | Submits an answer to an item.
arg: assessment_section_id (osid.id.Id): ``Id`` of the
``AssessmentSection``
arg: item_id (osid.id.Id): ``Id`` of the ``Item``
arg: answer_form (osid.assessment.AnswerForm): the response
raise: IllegalState - ``has_assessment_section_begun()`` is
``false or is_assessment_section_over()`` is ``true``
raise: InvalidArgument - one or more of the elements in the
form is invalid
raise: NotFound - ``assessment_section_id`` or ``item_id`` is
not found, or ``item_id`` not part of
``assessment_section_id``
raise: NullArgument - ``assessment_section_id, item_id,`` or
``answer_form`` is ``null``
raise: OperationFailed - unable to complete request
raise: PermissionDenied - authorization failure
raise: Unsupported - ``answer_form`` is not of this service
*compliance: mandatory -- This method must be implemented.* | Below is the the instruction that describes the task:
### Input:
Submits an answer to an item.
arg: assessment_section_id (osid.id.Id): ``Id`` of the
``AssessmentSection``
arg: item_id (osid.id.Id): ``Id`` of the ``Item``
arg: answer_form (osid.assessment.AnswerForm): the response
raise: IllegalState - ``has_assessment_section_begun()`` is
``false or is_assessment_section_over()`` is ``true``
raise: InvalidArgument - one or more of the elements in the
form is invalid
raise: NotFound - ``assessment_section_id`` or ``item_id`` is
not found, or ``item_id`` not part of
``assessment_section_id``
raise: NullArgument - ``assessment_section_id, item_id,`` or
``answer_form`` is ``null``
raise: OperationFailed - unable to complete request
raise: PermissionDenied - authorization failure
raise: Unsupported - ``answer_form`` is not of this service
*compliance: mandatory -- This method must be implemented.*
### Response:
def submit_response(self, assessment_section_id, item_id, answer_form):
"""Submits an answer to an item.
arg: assessment_section_id (osid.id.Id): ``Id`` of the
``AssessmentSection``
arg: item_id (osid.id.Id): ``Id`` of the ``Item``
arg: answer_form (osid.assessment.AnswerForm): the response
raise: IllegalState - ``has_assessment_section_begun()`` is
``false or is_assessment_section_over()`` is ``true``
raise: InvalidArgument - one or more of the elements in the
form is invalid
raise: NotFound - ``assessment_section_id`` or ``item_id`` is
not found, or ``item_id`` not part of
``assessment_section_id``
raise: NullArgument - ``assessment_section_id, item_id,`` or
``answer_form`` is ``null``
raise: OperationFailed - unable to complete request
raise: PermissionDenied - authorization failure
raise: Unsupported - ``answer_form`` is not of this service
*compliance: mandatory -- This method must be implemented.*
"""
if not isinstance(answer_form, ABCAnswerForm):
raise errors.InvalidArgument('argument type is not an AnswerForm')
# OK, so the following should actually NEVER be true. Remove it?
if answer_form.is_for_update():
raise errors.InvalidArgument('the AnswerForm is for update only, not submit')
#
try:
if self._forms[answer_form.get_id().get_identifier()] == SUBMITTED:
raise errors.IllegalState('answer_form already used in a submit transaction')
except KeyError:
raise errors.Unsupported('answer_form did not originate from this assessment session')
if not answer_form.is_valid():
raise errors.InvalidArgument('one or more of the form elements is invalid')
answer_form._my_map['_id'] = ObjectId()
self.get_assessment_section(assessment_section_id).submit_response(item_id, answer_form)
self._forms[answer_form.get_id().get_identifier()] = SUBMITTED |
def _none_rejecter(validation_callable # type: Callable
):
# type: (...) -> Callable
"""
Wraps the given validation callable to reject None values. When a None value is received by the wrapper,
it is not passed to the validation_callable and instead this function will raise a WrappingFailure. When any other value is
received the validation_callable is called as usual.
:param validation_callable:
:return:
"""
# option (a) use the `decorate()` helper method to preserve name and signature of the inner object
# ==> NO, we want to support also non-function callable objects
# option (b) simply create a wrapper manually
def reject_none(x):
if x is not None:
return validation_callable(x)
else:
raise ValueIsNone(wrong_value=x)
# set a name so that the error messages are more user-friendly ==> NO ! here we want to see the checker
reject_none.__name__ = 'reject_none({})'.format(get_callable_name(validation_callable))
return reject_none | Wraps the given validation callable to reject None values. When a None value is received by the wrapper,
it is not passed to the validation_callable and instead this function will raise a WrappingFailure. When any other value is
received the validation_callable is called as usual.
:param validation_callable:
:return: | Below is the the instruction that describes the task:
### Input:
Wraps the given validation callable to reject None values. When a None value is received by the wrapper,
it is not passed to the validation_callable and instead this function will raise a WrappingFailure. When any other value is
received the validation_callable is called as usual.
:param validation_callable:
:return:
### Response:
def _none_rejecter(validation_callable # type: Callable
):
# type: (...) -> Callable
"""
Wraps the given validation callable to reject None values. When a None value is received by the wrapper,
it is not passed to the validation_callable and instead this function will raise a WrappingFailure. When any other value is
received the validation_callable is called as usual.
:param validation_callable:
:return:
"""
# option (a) use the `decorate()` helper method to preserve name and signature of the inner object
# ==> NO, we want to support also non-function callable objects
# option (b) simply create a wrapper manually
def reject_none(x):
if x is not None:
return validation_callable(x)
else:
raise ValueIsNone(wrong_value=x)
# set a name so that the error messages are more user-friendly ==> NO ! here we want to see the checker
reject_none.__name__ = 'reject_none({})'.format(get_callable_name(validation_callable))
return reject_none |
def traverse_bfs(self):
'''Perform a Breadth-First Search (BFS) starting at this ``Node`` object'. Yields (``Node``, distance) tuples
Args:
``include_self`` (``bool``): ``True`` to include self in the traversal, otherwise ``False``
'''
if not isinstance(include_self, bool):
raise TypeError("include_self must be a bool")
q = deque(); dist = dict(); dist[self] = 0; q.append((self,0))
while len(q) != 0:
curr = q.popleft(); yield curr
for c in curr[0].children:
if c not in dist:
if c.edge_length is None:
el = 0
else:
el = c.edge_length
dist[c] = dist[curr[0]] + el; q.append((c,dist[c]))
if curr[0].parent is not None and curr[0].parent not in dist:
if curr[0].edge_length is None:
el = 0
else:
el = curr[0].edge_length
dist[curr[0].parent] = dist[curr[0]] + el; q.append((curr[0].parent,dist[curr[0].parent])) | Perform a Breadth-First Search (BFS) starting at this ``Node`` object'. Yields (``Node``, distance) tuples
Args:
``include_self`` (``bool``): ``True`` to include self in the traversal, otherwise ``False`` | Below is the the instruction that describes the task:
### Input:
Perform a Breadth-First Search (BFS) starting at this ``Node`` object'. Yields (``Node``, distance) tuples
Args:
``include_self`` (``bool``): ``True`` to include self in the traversal, otherwise ``False``
### Response:
def traverse_bfs(self):
'''Perform a Breadth-First Search (BFS) starting at this ``Node`` object'. Yields (``Node``, distance) tuples
Args:
``include_self`` (``bool``): ``True`` to include self in the traversal, otherwise ``False``
'''
if not isinstance(include_self, bool):
raise TypeError("include_self must be a bool")
q = deque(); dist = dict(); dist[self] = 0; q.append((self,0))
while len(q) != 0:
curr = q.popleft(); yield curr
for c in curr[0].children:
if c not in dist:
if c.edge_length is None:
el = 0
else:
el = c.edge_length
dist[c] = dist[curr[0]] + el; q.append((c,dist[c]))
if curr[0].parent is not None and curr[0].parent not in dist:
if curr[0].edge_length is None:
el = 0
else:
el = curr[0].edge_length
dist[curr[0].parent] = dist[curr[0]] + el; q.append((curr[0].parent,dist[curr[0].parent])) |
def status(name, sig=None):
'''
Return the status for a service via daemontools, return pid if running
CLI Example:
.. code-block:: bash
salt '*' daemontools.status <service name>
'''
cmd = 'svstat {0}'.format(_service_path(name))
out = __salt__['cmd.run_stdout'](cmd, python_shell=False)
try:
pid = re.search(r'\(pid (\d+)\)', out).group(1)
except AttributeError:
pid = ''
return pid | Return the status for a service via daemontools, return pid if running
CLI Example:
.. code-block:: bash
salt '*' daemontools.status <service name> | Below is the the instruction that describes the task:
### Input:
Return the status for a service via daemontools, return pid if running
CLI Example:
.. code-block:: bash
salt '*' daemontools.status <service name>
### Response:
def status(name, sig=None):
'''
Return the status for a service via daemontools, return pid if running
CLI Example:
.. code-block:: bash
salt '*' daemontools.status <service name>
'''
cmd = 'svstat {0}'.format(_service_path(name))
out = __salt__['cmd.run_stdout'](cmd, python_shell=False)
try:
pid = re.search(r'\(pid (\d+)\)', out).group(1)
except AttributeError:
pid = ''
return pid |
def p_startswith(self, st, ignorecase=False):
"Return True if the input starts with `st` at current position"
length = len(st)
matcher = result = self.input[self.pos:self.pos + length]
if ignorecase:
matcher = result.lower()
st = st.lower()
if matcher == st:
self.pos += length
return result
return False | Return True if the input starts with `st` at current position | Below is the the instruction that describes the task:
### Input:
Return True if the input starts with `st` at current position
### Response:
def p_startswith(self, st, ignorecase=False):
"Return True if the input starts with `st` at current position"
length = len(st)
matcher = result = self.input[self.pos:self.pos + length]
if ignorecase:
matcher = result.lower()
st = st.lower()
if matcher == st:
self.pos += length
return result
return False |
def _non_framed_body_length(header, plaintext_length):
"""Calculates the length of a non-framed message body, given a complete header.
:param header: Complete message header object
:type header: aws_encryption_sdk.structures.MessageHeader
:param int plaintext_length: Length of plaintext in bytes
:rtype: int
"""
body_length = header.algorithm.iv_len # IV
body_length += 8 # Encrypted Content Length
body_length += plaintext_length # Encrypted Content
body_length += header.algorithm.auth_len # Authentication Tag
return body_length | Calculates the length of a non-framed message body, given a complete header.
:param header: Complete message header object
:type header: aws_encryption_sdk.structures.MessageHeader
:param int plaintext_length: Length of plaintext in bytes
:rtype: int | Below is the the instruction that describes the task:
### Input:
Calculates the length of a non-framed message body, given a complete header.
:param header: Complete message header object
:type header: aws_encryption_sdk.structures.MessageHeader
:param int plaintext_length: Length of plaintext in bytes
:rtype: int
### Response:
def _non_framed_body_length(header, plaintext_length):
"""Calculates the length of a non-framed message body, given a complete header.
:param header: Complete message header object
:type header: aws_encryption_sdk.structures.MessageHeader
:param int plaintext_length: Length of plaintext in bytes
:rtype: int
"""
body_length = header.algorithm.iv_len # IV
body_length += 8 # Encrypted Content Length
body_length += plaintext_length # Encrypted Content
body_length += header.algorithm.auth_len # Authentication Tag
return body_length |
def predict(data, training_dir=None, model_name=None, model_version=None, cloud=False):
"""Runs prediction locally or on the cloud.
Args:
data: List of csv strings or a Pandas DataFrame that match the model schema.
training_dir: local path to the trained output folder.
model_name: deployed model name
model_version: depoyed model version
cloud: bool. If False, does local prediction and data and training_dir
must be set. If True, does cloud prediction and data, model_name,
and model_version must be set.
For cloud prediction, the model must be created. This can be done by running
two gcloud commands::
1) gcloud beta ml models create NAME
2) gcloud beta ml versions create VERSION --model NAME --origin gs://BUCKET/training_dir/model
or these datalab commands:
1) import google.datalab as datalab
model = datalab.ml.ModelVersions(MODEL_NAME)
model.deploy(version_name=VERSION, path='gs://BUCKET/training_dir/model')
Note that the model must be on GCS.
Returns:
Pandas DataFrame.
"""
if cloud:
if not model_version or not model_name:
raise ValueError('model_version or model_name is not set')
if training_dir:
raise ValueError('training_dir not needed when cloud is True')
with warnings.catch_warnings():
warnings.simplefilter("ignore")
return cloud_predict(model_name, model_version, data)
else:
if not training_dir:
raise ValueError('training_dir is not set')
if model_version or model_name:
raise ValueError('model_name and model_version not needed when cloud is '
'False.')
with warnings.catch_warnings():
warnings.simplefilter("ignore")
return local_predict(training_dir, data) | Runs prediction locally or on the cloud.
Args:
data: List of csv strings or a Pandas DataFrame that match the model schema.
training_dir: local path to the trained output folder.
model_name: deployed model name
model_version: depoyed model version
cloud: bool. If False, does local prediction and data and training_dir
must be set. If True, does cloud prediction and data, model_name,
and model_version must be set.
For cloud prediction, the model must be created. This can be done by running
two gcloud commands::
1) gcloud beta ml models create NAME
2) gcloud beta ml versions create VERSION --model NAME --origin gs://BUCKET/training_dir/model
or these datalab commands:
1) import google.datalab as datalab
model = datalab.ml.ModelVersions(MODEL_NAME)
model.deploy(version_name=VERSION, path='gs://BUCKET/training_dir/model')
Note that the model must be on GCS.
Returns:
Pandas DataFrame. | Below is the the instruction that describes the task:
### Input:
Runs prediction locally or on the cloud.
Args:
data: List of csv strings or a Pandas DataFrame that match the model schema.
training_dir: local path to the trained output folder.
model_name: deployed model name
model_version: depoyed model version
cloud: bool. If False, does local prediction and data and training_dir
must be set. If True, does cloud prediction and data, model_name,
and model_version must be set.
For cloud prediction, the model must be created. This can be done by running
two gcloud commands::
1) gcloud beta ml models create NAME
2) gcloud beta ml versions create VERSION --model NAME --origin gs://BUCKET/training_dir/model
or these datalab commands:
1) import google.datalab as datalab
model = datalab.ml.ModelVersions(MODEL_NAME)
model.deploy(version_name=VERSION, path='gs://BUCKET/training_dir/model')
Note that the model must be on GCS.
Returns:
Pandas DataFrame.
### Response:
def predict(data, training_dir=None, model_name=None, model_version=None, cloud=False):
"""Runs prediction locally or on the cloud.
Args:
data: List of csv strings or a Pandas DataFrame that match the model schema.
training_dir: local path to the trained output folder.
model_name: deployed model name
model_version: depoyed model version
cloud: bool. If False, does local prediction and data and training_dir
must be set. If True, does cloud prediction and data, model_name,
and model_version must be set.
For cloud prediction, the model must be created. This can be done by running
two gcloud commands::
1) gcloud beta ml models create NAME
2) gcloud beta ml versions create VERSION --model NAME --origin gs://BUCKET/training_dir/model
or these datalab commands:
1) import google.datalab as datalab
model = datalab.ml.ModelVersions(MODEL_NAME)
model.deploy(version_name=VERSION, path='gs://BUCKET/training_dir/model')
Note that the model must be on GCS.
Returns:
Pandas DataFrame.
"""
if cloud:
if not model_version or not model_name:
raise ValueError('model_version or model_name is not set')
if training_dir:
raise ValueError('training_dir not needed when cloud is True')
with warnings.catch_warnings():
warnings.simplefilter("ignore")
return cloud_predict(model_name, model_version, data)
else:
if not training_dir:
raise ValueError('training_dir is not set')
if model_version or model_name:
raise ValueError('model_name and model_version not needed when cloud is '
'False.')
with warnings.catch_warnings():
warnings.simplefilter("ignore")
return local_predict(training_dir, data) |
def load_plugin(plugin_name):
"""
Given a plugin name, load plugin cls from plugin directory.
Will throw an exception if no plugin can be found.
"""
plugin_cls = plugin_map.get(plugin_name, None)
if not plugin_cls:
try:
plugin_module_name, plugin_cls_name = plugin_name.split(":")
plugin_module = import_module(plugin_module_name)
plugin_cls = getattr(plugin_module, plugin_cls_name)
except ValueError:
raise click.ClickException(
'"{}" is not a valid plugin path'.format(plugin_name)
)
except ImportError:
raise click.ClickException(
'"{}" does not name a Python module'.format(
plugin_module_name
)
)
except AttributeError:
raise click.ClickException(
'Module "{}" does not contain the class "{}"'.format(
plugin_module_name, plugin_cls_name
)
)
return plugin_cls | Given a plugin name, load plugin cls from plugin directory.
Will throw an exception if no plugin can be found. | Below is the the instruction that describes the task:
### Input:
Given a plugin name, load plugin cls from plugin directory.
Will throw an exception if no plugin can be found.
### Response:
def load_plugin(plugin_name):
"""
Given a plugin name, load plugin cls from plugin directory.
Will throw an exception if no plugin can be found.
"""
plugin_cls = plugin_map.get(plugin_name, None)
if not plugin_cls:
try:
plugin_module_name, plugin_cls_name = plugin_name.split(":")
plugin_module = import_module(plugin_module_name)
plugin_cls = getattr(plugin_module, plugin_cls_name)
except ValueError:
raise click.ClickException(
'"{}" is not a valid plugin path'.format(plugin_name)
)
except ImportError:
raise click.ClickException(
'"{}" does not name a Python module'.format(
plugin_module_name
)
)
except AttributeError:
raise click.ClickException(
'Module "{}" does not contain the class "{}"'.format(
plugin_module_name, plugin_cls_name
)
)
return plugin_cls |
def get_start_array(self, *start_words, **kwargs):
"""
Генерирует начало предложения.
:start_words: Попытаться начать предложение с этих слов.
"""
if not self.start_arrays:
raise MarkovTextExcept("Не с чего начинать генерацию.")
if not start_words:
return choice(self.start_arrays)
_variants = []
_weights = []
for tokens in self.start_arrays:
weight = 0b1
for word in start_words:
word = word.strip().lower()
for token in self.ONLY_WORDS.finditer(word):
if token.group() in tokens:
weight <<= 1
if weight > 0b1:
_variants.append(tokens)
_weights.append(weight)
if not _variants:
return choice(self.start_arrays)
return choices(_variants, weights=_weights, k=1)[0] | Генерирует начало предложения.
:start_words: Попытаться начать предложение с этих слов. | Below is the the instruction that describes the task:
### Input:
Генерирует начало предложения.
:start_words: Попытаться начать предложение с этих слов.
### Response:
def get_start_array(self, *start_words, **kwargs):
"""
Генерирует начало предложения.
:start_words: Попытаться начать предложение с этих слов.
"""
if not self.start_arrays:
raise MarkovTextExcept("Не с чего начинать генерацию.")
if not start_words:
return choice(self.start_arrays)
_variants = []
_weights = []
for tokens in self.start_arrays:
weight = 0b1
for word in start_words:
word = word.strip().lower()
for token in self.ONLY_WORDS.finditer(word):
if token.group() in tokens:
weight <<= 1
if weight > 0b1:
_variants.append(tokens)
_weights.append(weight)
if not _variants:
return choice(self.start_arrays)
return choices(_variants, weights=_weights, k=1)[0] |
def sample(self, iter, length=None, verbose=0):
"""
Draws iter samples from the posterior.
"""
self._cur_trace_index = 0
self.max_trace_length = iter
self._iter = iter
self.verbose = verbose or 0
self.seed()
# Assign Trace instances to tallyable objects.
self.db.connect_model(self)
# Initialize database -> initialize traces.
if length is None:
length = iter
self.db._initialize(self._funs_to_tally, length)
# Put traces on objects
for v in self._variables_to_tally:
v.trace = self.db._traces[v.__name__]
# Loop
self._current_iter = 0
self._loop()
self._finalize() | Draws iter samples from the posterior. | Below is the the instruction that describes the task:
### Input:
Draws iter samples from the posterior.
### Response:
def sample(self, iter, length=None, verbose=0):
"""
Draws iter samples from the posterior.
"""
self._cur_trace_index = 0
self.max_trace_length = iter
self._iter = iter
self.verbose = verbose or 0
self.seed()
# Assign Trace instances to tallyable objects.
self.db.connect_model(self)
# Initialize database -> initialize traces.
if length is None:
length = iter
self.db._initialize(self._funs_to_tally, length)
# Put traces on objects
for v in self._variables_to_tally:
v.trace = self.db._traces[v.__name__]
# Loop
self._current_iter = 0
self._loop()
self._finalize() |
def _filenames_from_arg(filename):
"""Utility function to deal with polymorphic filenames argument."""
if isinstance(filename, string_types):
filenames = [filename]
elif isinstance(filename, (list, tuple)):
filenames = filename
else:
raise Exception('filename argument must be string, list or tuple')
for fn in filenames:
if not os.path.exists(fn):
raise ValueError('file not found: %s' % fn)
if not os.path.isfile(fn):
raise ValueError('not a file: %s' % fn)
return filenames | Utility function to deal with polymorphic filenames argument. | Below is the the instruction that describes the task:
### Input:
Utility function to deal with polymorphic filenames argument.
### Response:
def _filenames_from_arg(filename):
"""Utility function to deal with polymorphic filenames argument."""
if isinstance(filename, string_types):
filenames = [filename]
elif isinstance(filename, (list, tuple)):
filenames = filename
else:
raise Exception('filename argument must be string, list or tuple')
for fn in filenames:
if not os.path.exists(fn):
raise ValueError('file not found: %s' % fn)
if not os.path.isfile(fn):
raise ValueError('not a file: %s' % fn)
return filenames |
def insertPrimaryDataset(self):
"""
API to insert A primary dataset in DBS
:param primaryDSObj: primary dataset object
:type primaryDSObj: dict
:key primary_ds_type: TYPE (out of valid types in DBS, MC, DATA) (Required)
:key primary_ds_name: Name of the primary dataset (Required)
"""
try :
body = request.body.read()
indata = cjson.decode(body)
indata = validateJSONInputNoCopy("primds", indata)
indata.update({"creation_date": dbsUtils().getTime(), "create_by": dbsUtils().getCreateBy() })
self.dbsPrimaryDataset.insertPrimaryDataset(indata)
except cjson.DecodeError as dc:
dbsExceptionHandler("dbsException-invalid-input2", "Wrong format/data from insert PrimaryDataset input", self.logger.exception, str(dc))
except dbsException as de:
dbsExceptionHandler(de.eCode, de.message, self.logger.exception, de.message)
except HTTPError as he:
raise he
except Exception as ex:
sError = "DBSWriterModel/insertPrimaryDataset. %s\n Exception trace: \n %s" \
% (ex, traceback.format_exc())
dbsExceptionHandler('dbsException-server-error', dbsExceptionCode['dbsException-server-error'], self.logger.exception, sError) | API to insert A primary dataset in DBS
:param primaryDSObj: primary dataset object
:type primaryDSObj: dict
:key primary_ds_type: TYPE (out of valid types in DBS, MC, DATA) (Required)
:key primary_ds_name: Name of the primary dataset (Required) | Below is the the instruction that describes the task:
### Input:
API to insert A primary dataset in DBS
:param primaryDSObj: primary dataset object
:type primaryDSObj: dict
:key primary_ds_type: TYPE (out of valid types in DBS, MC, DATA) (Required)
:key primary_ds_name: Name of the primary dataset (Required)
### Response:
def insertPrimaryDataset(self):
"""
API to insert A primary dataset in DBS
:param primaryDSObj: primary dataset object
:type primaryDSObj: dict
:key primary_ds_type: TYPE (out of valid types in DBS, MC, DATA) (Required)
:key primary_ds_name: Name of the primary dataset (Required)
"""
try :
body = request.body.read()
indata = cjson.decode(body)
indata = validateJSONInputNoCopy("primds", indata)
indata.update({"creation_date": dbsUtils().getTime(), "create_by": dbsUtils().getCreateBy() })
self.dbsPrimaryDataset.insertPrimaryDataset(indata)
except cjson.DecodeError as dc:
dbsExceptionHandler("dbsException-invalid-input2", "Wrong format/data from insert PrimaryDataset input", self.logger.exception, str(dc))
except dbsException as de:
dbsExceptionHandler(de.eCode, de.message, self.logger.exception, de.message)
except HTTPError as he:
raise he
except Exception as ex:
sError = "DBSWriterModel/insertPrimaryDataset. %s\n Exception trace: \n %s" \
% (ex, traceback.format_exc())
dbsExceptionHandler('dbsException-server-error', dbsExceptionCode['dbsException-server-error'], self.logger.exception, sError) |
def list_view_on_selected(self, widget, selected_item_key):
""" The selection event of the listView, returns a key of the clicked event.
You can retrieve the item rapidly
"""
self.lbl.set_text('List selection: ' + self.listView.children[selected_item_key].get_text()) | The selection event of the listView, returns a key of the clicked event.
You can retrieve the item rapidly | Below is the the instruction that describes the task:
### Input:
The selection event of the listView, returns a key of the clicked event.
You can retrieve the item rapidly
### Response:
def list_view_on_selected(self, widget, selected_item_key):
""" The selection event of the listView, returns a key of the clicked event.
You can retrieve the item rapidly
"""
self.lbl.set_text('List selection: ' + self.listView.children[selected_item_key].get_text()) |
def ready(self):
'''Whether or not enough time has passed since the last failure'''
if self._last_failed:
delta = time.time() - self._last_failed
return delta >= self.backoff()
return True | Whether or not enough time has passed since the last failure | Below is the the instruction that describes the task:
### Input:
Whether or not enough time has passed since the last failure
### Response:
def ready(self):
'''Whether or not enough time has passed since the last failure'''
if self._last_failed:
delta = time.time() - self._last_failed
return delta >= self.backoff()
return True |
def create_subscription(self, client_id, client_secret, callback_url,
object_type=model.Subscription.OBJECT_TYPE_ACTIVITY,
aspect_type=model.Subscription.ASPECT_TYPE_CREATE,
verify_token=model.Subscription.VERIFY_TOKEN_DEFAULT):
"""
Creates a webhook event subscription.
http://strava.github.io/api/partner/v3/events/#create-a-subscription
:param client_id: application's ID, obtained during registration
:type client_id: int
:param client_secret: application's secret, obtained during registration
:type client_secret: str
:param callback_url: callback URL where Strava will first send a GET request to validate, then subsequently send POST requests with updates
:type callback_url: str
:param object_type: object_type (currently only `activity` is supported)
:type object_type: str
:param aspect_type: object_type (currently only `create` is supported)
:type aspect_type: str
:param verify_token: a token you can use to verify Strava's GET callback request
:type verify_token: str
:return: An instance of :class:`stravalib.model.Subscription`.
:rtype: :class:`stravalib.model.Subscription`
Notes:
`object_type` and `aspect_type` are given defaults because there is currently only one valid value for each.
`verify_token` is set to a default in the event that the author doesn't want to specify one.
The appliction must have permission to make use of the webhook API. Access can be requested by contacting developers -at- strava.com.
"""
params = dict(client_id=client_id, client_secret=client_secret,
object_type=object_type, aspect_type=aspect_type,
callback_url=callback_url, verify_token=verify_token)
raw = self.protocol.post('/push_subscriptions', use_webhook_server=True,
**params)
return model.Subscription.deserialize(raw, bind_client=self) | Creates a webhook event subscription.
http://strava.github.io/api/partner/v3/events/#create-a-subscription
:param client_id: application's ID, obtained during registration
:type client_id: int
:param client_secret: application's secret, obtained during registration
:type client_secret: str
:param callback_url: callback URL where Strava will first send a GET request to validate, then subsequently send POST requests with updates
:type callback_url: str
:param object_type: object_type (currently only `activity` is supported)
:type object_type: str
:param aspect_type: object_type (currently only `create` is supported)
:type aspect_type: str
:param verify_token: a token you can use to verify Strava's GET callback request
:type verify_token: str
:return: An instance of :class:`stravalib.model.Subscription`.
:rtype: :class:`stravalib.model.Subscription`
Notes:
`object_type` and `aspect_type` are given defaults because there is currently only one valid value for each.
`verify_token` is set to a default in the event that the author doesn't want to specify one.
The appliction must have permission to make use of the webhook API. Access can be requested by contacting developers -at- strava.com. | Below is the the instruction that describes the task:
### Input:
Creates a webhook event subscription.
http://strava.github.io/api/partner/v3/events/#create-a-subscription
:param client_id: application's ID, obtained during registration
:type client_id: int
:param client_secret: application's secret, obtained during registration
:type client_secret: str
:param callback_url: callback URL where Strava will first send a GET request to validate, then subsequently send POST requests with updates
:type callback_url: str
:param object_type: object_type (currently only `activity` is supported)
:type object_type: str
:param aspect_type: object_type (currently only `create` is supported)
:type aspect_type: str
:param verify_token: a token you can use to verify Strava's GET callback request
:type verify_token: str
:return: An instance of :class:`stravalib.model.Subscription`.
:rtype: :class:`stravalib.model.Subscription`
Notes:
`object_type` and `aspect_type` are given defaults because there is currently only one valid value for each.
`verify_token` is set to a default in the event that the author doesn't want to specify one.
The appliction must have permission to make use of the webhook API. Access can be requested by contacting developers -at- strava.com.
### Response:
def create_subscription(self, client_id, client_secret, callback_url,
object_type=model.Subscription.OBJECT_TYPE_ACTIVITY,
aspect_type=model.Subscription.ASPECT_TYPE_CREATE,
verify_token=model.Subscription.VERIFY_TOKEN_DEFAULT):
"""
Creates a webhook event subscription.
http://strava.github.io/api/partner/v3/events/#create-a-subscription
:param client_id: application's ID, obtained during registration
:type client_id: int
:param client_secret: application's secret, obtained during registration
:type client_secret: str
:param callback_url: callback URL where Strava will first send a GET request to validate, then subsequently send POST requests with updates
:type callback_url: str
:param object_type: object_type (currently only `activity` is supported)
:type object_type: str
:param aspect_type: object_type (currently only `create` is supported)
:type aspect_type: str
:param verify_token: a token you can use to verify Strava's GET callback request
:type verify_token: str
:return: An instance of :class:`stravalib.model.Subscription`.
:rtype: :class:`stravalib.model.Subscription`
Notes:
`object_type` and `aspect_type` are given defaults because there is currently only one valid value for each.
`verify_token` is set to a default in the event that the author doesn't want to specify one.
The appliction must have permission to make use of the webhook API. Access can be requested by contacting developers -at- strava.com.
"""
params = dict(client_id=client_id, client_secret=client_secret,
object_type=object_type, aspect_type=aspect_type,
callback_url=callback_url, verify_token=verify_token)
raw = self.protocol.post('/push_subscriptions', use_webhook_server=True,
**params)
return model.Subscription.deserialize(raw, bind_client=self) |
def get_transactions(self, include_investment=False):
"""Returns the transaction data as a Pandas DataFrame."""
assert_pd()
s = StringIO(self.get_transactions_csv(
include_investment=include_investment))
s.seek(0)
df = pd.read_csv(s, parse_dates=['Date'])
df.columns = [c.lower().replace(' ', '_') for c in df.columns]
df.category = (df.category.str.lower()
.replace('uncategorized', pd.np.nan))
return df | Returns the transaction data as a Pandas DataFrame. | Below is the the instruction that describes the task:
### Input:
Returns the transaction data as a Pandas DataFrame.
### Response:
def get_transactions(self, include_investment=False):
"""Returns the transaction data as a Pandas DataFrame."""
assert_pd()
s = StringIO(self.get_transactions_csv(
include_investment=include_investment))
s.seek(0)
df = pd.read_csv(s, parse_dates=['Date'])
df.columns = [c.lower().replace(' ', '_') for c in df.columns]
df.category = (df.category.str.lower()
.replace('uncategorized', pd.np.nan))
return df |
def clean_extra(self):
"""Clean extra files/directories specified by get_extra_paths()"""
extra_paths = self.get_extra_paths()
for path in extra_paths:
if not os.path.exists(path):
continue
if os.path.isdir(path):
self._clean_directory(path)
else:
self._clean_file(path) | Clean extra files/directories specified by get_extra_paths() | Below is the the instruction that describes the task:
### Input:
Clean extra files/directories specified by get_extra_paths()
### Response:
def clean_extra(self):
"""Clean extra files/directories specified by get_extra_paths()"""
extra_paths = self.get_extra_paths()
for path in extra_paths:
if not os.path.exists(path):
continue
if os.path.isdir(path):
self._clean_directory(path)
else:
self._clean_file(path) |
def coverage_pileup(self, space, start, end):
"""Retrieve pileup coverage across a specified region.
"""
return ((col.pos, self._normalize(col.n, self._total))
for col in self._bam.pileup(space, start, end)) | Retrieve pileup coverage across a specified region. | Below is the the instruction that describes the task:
### Input:
Retrieve pileup coverage across a specified region.
### Response:
def coverage_pileup(self, space, start, end):
"""Retrieve pileup coverage across a specified region.
"""
return ((col.pos, self._normalize(col.n, self._total))
for col in self._bam.pileup(space, start, end)) |
def is_dir(dirname):
'''Checks if a path is an actual directory that exists'''
if not os.path.isdir(dirname):
msg = "{0} is not a directory".format(dirname)
raise argparse.ArgumentTypeError(msg)
else:
return dirname | Checks if a path is an actual directory that exists | Below is the the instruction that describes the task:
### Input:
Checks if a path is an actual directory that exists
### Response:
def is_dir(dirname):
'''Checks if a path is an actual directory that exists'''
if not os.path.isdir(dirname):
msg = "{0} is not a directory".format(dirname)
raise argparse.ArgumentTypeError(msg)
else:
return dirname |
def get_amount_arrears_transactions(self, billing_cycle):
"""Get the sum of all transaction legs in to_account during given billing cycle"""
previous_billing_cycle = billing_cycle.get_previous()
if not previous_billing_cycle:
return Decimal(0)
return self.to_account.balance(
transaction__date__lt=previous_billing_cycle.date_range.upper,
transaction__date__gte=previous_billing_cycle.date_range.lower,
) | Get the sum of all transaction legs in to_account during given billing cycle | Below is the the instruction that describes the task:
### Input:
Get the sum of all transaction legs in to_account during given billing cycle
### Response:
def get_amount_arrears_transactions(self, billing_cycle):
"""Get the sum of all transaction legs in to_account during given billing cycle"""
previous_billing_cycle = billing_cycle.get_previous()
if not previous_billing_cycle:
return Decimal(0)
return self.to_account.balance(
transaction__date__lt=previous_billing_cycle.date_range.upper,
transaction__date__gte=previous_billing_cycle.date_range.lower,
) |
def to_internal_value(self, data):
"""Convert to internal value."""
user = getattr(self.context.get('request'), 'user')
queryset = self.get_queryset()
permission = get_full_perm('view', queryset.model)
try:
return get_objects_for_user(
user,
permission,
queryset.filter(**{self.slug_field: data}),
).latest()
except ObjectDoesNotExist:
self.fail(
'does_not_exist',
slug_name=self.slug_field,
value=smart_text(data),
model_name=queryset.model._meta.model_name, # pylint: disable=protected-access
)
except (TypeError, ValueError):
self.fail('invalid') | Convert to internal value. | Below is the the instruction that describes the task:
### Input:
Convert to internal value.
### Response:
def to_internal_value(self, data):
"""Convert to internal value."""
user = getattr(self.context.get('request'), 'user')
queryset = self.get_queryset()
permission = get_full_perm('view', queryset.model)
try:
return get_objects_for_user(
user,
permission,
queryset.filter(**{self.slug_field: data}),
).latest()
except ObjectDoesNotExist:
self.fail(
'does_not_exist',
slug_name=self.slug_field,
value=smart_text(data),
model_name=queryset.model._meta.model_name, # pylint: disable=protected-access
)
except (TypeError, ValueError):
self.fail('invalid') |
def exec_command(command, cwd=None):
r'''
Helper to exec locally (subprocess) or remotely (paramiko)
'''
rc = None
stdout = stderr = None
if ssh_conn is None:
ld_library_path = {'LD_LIBRARY_PATH': '.:%s' % os.environ.get('LD_LIBRARY_PATH', '')}
p = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True, env=ld_library_path, cwd=cwd)
stdout, stderr = p.communicate()
rc = p.returncode
else:
# environment= requires paramiko >= 2.1 (fails with 2.0.2)
final_command = command if cwd is None else 'cd %s && %s %s' % (cwd, 'LD_LIBRARY_PATH=.:$LD_LIBRARY_PATH', command)
ssh_stdin, ssh_stdout, ssh_stderr = ssh_conn.exec_command(final_command)
stdout = ''.join(ssh_stdout.readlines())
stderr = ''.join(ssh_stderr.readlines())
rc = ssh_stdout.channel.recv_exit_status()
return rc, stdout, stderr | r'''
Helper to exec locally (subprocess) or remotely (paramiko) | Below is the the instruction that describes the task:
### Input:
r'''
Helper to exec locally (subprocess) or remotely (paramiko)
### Response:
def exec_command(command, cwd=None):
r'''
Helper to exec locally (subprocess) or remotely (paramiko)
'''
rc = None
stdout = stderr = None
if ssh_conn is None:
ld_library_path = {'LD_LIBRARY_PATH': '.:%s' % os.environ.get('LD_LIBRARY_PATH', '')}
p = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True, env=ld_library_path, cwd=cwd)
stdout, stderr = p.communicate()
rc = p.returncode
else:
# environment= requires paramiko >= 2.1 (fails with 2.0.2)
final_command = command if cwd is None else 'cd %s && %s %s' % (cwd, 'LD_LIBRARY_PATH=.:$LD_LIBRARY_PATH', command)
ssh_stdin, ssh_stdout, ssh_stderr = ssh_conn.exec_command(final_command)
stdout = ''.join(ssh_stdout.readlines())
stderr = ''.join(ssh_stderr.readlines())
rc = ssh_stdout.channel.recv_exit_status()
return rc, stdout, stderr |
def publish_to_target(self, target_arn, message):
"""
Publish a message to a topic or an endpoint.
:param target_arn: either a TopicArn or an EndpointArn
:type target_arn: str
:param message: the default message you want to send
:param message: str
"""
conn = self.get_conn()
messages = {
'default': message
}
return conn.publish(
TargetArn=target_arn,
Message=json.dumps(messages),
MessageStructure='json'
) | Publish a message to a topic or an endpoint.
:param target_arn: either a TopicArn or an EndpointArn
:type target_arn: str
:param message: the default message you want to send
:param message: str | Below is the the instruction that describes the task:
### Input:
Publish a message to a topic or an endpoint.
:param target_arn: either a TopicArn or an EndpointArn
:type target_arn: str
:param message: the default message you want to send
:param message: str
### Response:
def publish_to_target(self, target_arn, message):
"""
Publish a message to a topic or an endpoint.
:param target_arn: either a TopicArn or an EndpointArn
:type target_arn: str
:param message: the default message you want to send
:param message: str
"""
conn = self.get_conn()
messages = {
'default': message
}
return conn.publish(
TargetArn=target_arn,
Message=json.dumps(messages),
MessageStructure='json'
) |
def update(path,value,timestamp=None):
"""update(path,value,timestamp=None)
path is a string
value is a float
timestamp is either an int or float
"""
value = float(value)
fh = None
try:
fh = open(path,'r+b')
return file_update(fh, value, timestamp)
finally:
if fh:
fh.close() | update(path,value,timestamp=None)
path is a string
value is a float
timestamp is either an int or float | Below is the the instruction that describes the task:
### Input:
update(path,value,timestamp=None)
path is a string
value is a float
timestamp is either an int or float
### Response:
def update(path,value,timestamp=None):
"""update(path,value,timestamp=None)
path is a string
value is a float
timestamp is either an int or float
"""
value = float(value)
fh = None
try:
fh = open(path,'r+b')
return file_update(fh, value, timestamp)
finally:
if fh:
fh.close() |
def detectAndroid(self):
"""Return detection of an Android device
Detects *any* Android OS-based device: phone, tablet, and multi-media player.
Also detects Google TV.
"""
if UAgentInfo.deviceAndroid in self.__userAgent \
or self.detectGoogleTV():
return True
return False | Return detection of an Android device
Detects *any* Android OS-based device: phone, tablet, and multi-media player.
Also detects Google TV. | Below is the the instruction that describes the task:
### Input:
Return detection of an Android device
Detects *any* Android OS-based device: phone, tablet, and multi-media player.
Also detects Google TV.
### Response:
def detectAndroid(self):
"""Return detection of an Android device
Detects *any* Android OS-based device: phone, tablet, and multi-media player.
Also detects Google TV.
"""
if UAgentInfo.deviceAndroid in self.__userAgent \
or self.detectGoogleTV():
return True
return False |
def add_field(self, field, **kwargs):
"""Add each field as a private field."""
getattr(self, self._private_fields_name).append(field)
self._expire_cache(reverse=True)
self._expire_cache(reverse=False) | Add each field as a private field. | Below is the the instruction that describes the task:
### Input:
Add each field as a private field.
### Response:
def add_field(self, field, **kwargs):
"""Add each field as a private field."""
getattr(self, self._private_fields_name).append(field)
self._expire_cache(reverse=True)
self._expire_cache(reverse=False) |
def launch(self, args=None):
"""
This method triggers the parsing of arguments.
"""
self.options = self.parse_args(args)
if self.options.saveinputmeta:
# save original input options
self.save_input_meta()
if self.options.inputmeta:
# read new options from JSON file
self.options = self.get_options_from_file(self.options.inputmeta)
self.run(self.options)
# if required save meta data for the output after running the plugin app
if self.options.saveoutputmeta:
self.save_output_meta() | This method triggers the parsing of arguments. | Below is the the instruction that describes the task:
### Input:
This method triggers the parsing of arguments.
### Response:
def launch(self, args=None):
"""
This method triggers the parsing of arguments.
"""
self.options = self.parse_args(args)
if self.options.saveinputmeta:
# save original input options
self.save_input_meta()
if self.options.inputmeta:
# read new options from JSON file
self.options = self.get_options_from_file(self.options.inputmeta)
self.run(self.options)
# if required save meta data for the output after running the plugin app
if self.options.saveoutputmeta:
self.save_output_meta() |
def _create_ids(self, home_teams, away_teams):
"""
Creates IDs for both players/teams
"""
categories = pd.Categorical(np.append(home_teams,away_teams))
home_id, away_id = categories.codes[0:int(len(categories)/2)], categories.codes[int(len(categories)/2):len(categories)+1]
return home_id, away_id | Creates IDs for both players/teams | Below is the the instruction that describes the task:
### Input:
Creates IDs for both players/teams
### Response:
def _create_ids(self, home_teams, away_teams):
"""
Creates IDs for both players/teams
"""
categories = pd.Categorical(np.append(home_teams,away_teams))
home_id, away_id = categories.codes[0:int(len(categories)/2)], categories.codes[int(len(categories)/2):len(categories)+1]
return home_id, away_id |
def get_all_floating_ips(self):
"""
This function returns a list of FloatingIP objects.
"""
data = self.get_data("floating_ips")
floating_ips = list()
for jsoned in data['floating_ips']:
floating_ip = FloatingIP(**jsoned)
floating_ip.token = self.token
floating_ips.append(floating_ip)
return floating_ips | This function returns a list of FloatingIP objects. | Below is the the instruction that describes the task:
### Input:
This function returns a list of FloatingIP objects.
### Response:
def get_all_floating_ips(self):
"""
This function returns a list of FloatingIP objects.
"""
data = self.get_data("floating_ips")
floating_ips = list()
for jsoned in data['floating_ips']:
floating_ip = FloatingIP(**jsoned)
floating_ip.token = self.token
floating_ips.append(floating_ip)
return floating_ips |
def create_session(self):
"""Create a session.
First we look in self.key_file for a path to a json file with the
credentials. The key file should have 'AWSAccessKeyId' and 'AWSSecretKey'.
Next we look at self.profile for a profile name and try
to use the Session call to automatically pick up the keys for the profile from
the user default keys file ~/.aws/config.
Finally, boto3 will look for the keys in environment variables:
AWS_ACCESS_KEY_ID: The access key for your AWS account.
AWS_SECRET_ACCESS_KEY: The secret key for your AWS account.
AWS_SESSION_TOKEN: The session key for your AWS account.
This is only needed when you are using temporary credentials.
The AWS_SECURITY_TOKEN environment variable can also be used,
but is only supported for backwards compatibility purposes.
AWS_SESSION_TOKEN is supported by multiple AWS SDKs besides python.
"""
session = None
if self.key_file is not None:
credfile = os.path.expandvars(os.path.expanduser(self.key_file))
try:
with open(credfile, 'r') as f:
creds = json.load(f)
except json.JSONDecodeError as e:
logger.error(
"EC2Provider '{}': json decode error in credential file {}".format(self.label, credfile)
)
raise e
except Exception as e:
logger.debug(
"EC2Provider '{0}' caught exception while reading credential file: {1}".format(
self.label, credfile
)
)
raise e
logger.debug("EC2Provider '{}': Using credential file to create session".format(self.label))
session = boto3.session.Session(region_name=self.region, **creds)
elif self.profile is not None:
logger.debug("EC2Provider '{}': Using profile name to create session".format(self.label))
session = boto3.session.Session(
profile_name=self.profile, region_name=self.region
)
else:
logger.debug("EC2Provider '{}': Using environment variables to create session".format(self.label))
session = boto3.session.Session(region_name=self.region)
return session | Create a session.
First we look in self.key_file for a path to a json file with the
credentials. The key file should have 'AWSAccessKeyId' and 'AWSSecretKey'.
Next we look at self.profile for a profile name and try
to use the Session call to automatically pick up the keys for the profile from
the user default keys file ~/.aws/config.
Finally, boto3 will look for the keys in environment variables:
AWS_ACCESS_KEY_ID: The access key for your AWS account.
AWS_SECRET_ACCESS_KEY: The secret key for your AWS account.
AWS_SESSION_TOKEN: The session key for your AWS account.
This is only needed when you are using temporary credentials.
The AWS_SECURITY_TOKEN environment variable can also be used,
but is only supported for backwards compatibility purposes.
AWS_SESSION_TOKEN is supported by multiple AWS SDKs besides python. | Below is the the instruction that describes the task:
### Input:
Create a session.
First we look in self.key_file for a path to a json file with the
credentials. The key file should have 'AWSAccessKeyId' and 'AWSSecretKey'.
Next we look at self.profile for a profile name and try
to use the Session call to automatically pick up the keys for the profile from
the user default keys file ~/.aws/config.
Finally, boto3 will look for the keys in environment variables:
AWS_ACCESS_KEY_ID: The access key for your AWS account.
AWS_SECRET_ACCESS_KEY: The secret key for your AWS account.
AWS_SESSION_TOKEN: The session key for your AWS account.
This is only needed when you are using temporary credentials.
The AWS_SECURITY_TOKEN environment variable can also be used,
but is only supported for backwards compatibility purposes.
AWS_SESSION_TOKEN is supported by multiple AWS SDKs besides python.
### Response:
def create_session(self):
"""Create a session.
First we look in self.key_file for a path to a json file with the
credentials. The key file should have 'AWSAccessKeyId' and 'AWSSecretKey'.
Next we look at self.profile for a profile name and try
to use the Session call to automatically pick up the keys for the profile from
the user default keys file ~/.aws/config.
Finally, boto3 will look for the keys in environment variables:
AWS_ACCESS_KEY_ID: The access key for your AWS account.
AWS_SECRET_ACCESS_KEY: The secret key for your AWS account.
AWS_SESSION_TOKEN: The session key for your AWS account.
This is only needed when you are using temporary credentials.
The AWS_SECURITY_TOKEN environment variable can also be used,
but is only supported for backwards compatibility purposes.
AWS_SESSION_TOKEN is supported by multiple AWS SDKs besides python.
"""
session = None
if self.key_file is not None:
credfile = os.path.expandvars(os.path.expanduser(self.key_file))
try:
with open(credfile, 'r') as f:
creds = json.load(f)
except json.JSONDecodeError as e:
logger.error(
"EC2Provider '{}': json decode error in credential file {}".format(self.label, credfile)
)
raise e
except Exception as e:
logger.debug(
"EC2Provider '{0}' caught exception while reading credential file: {1}".format(
self.label, credfile
)
)
raise e
logger.debug("EC2Provider '{}': Using credential file to create session".format(self.label))
session = boto3.session.Session(region_name=self.region, **creds)
elif self.profile is not None:
logger.debug("EC2Provider '{}': Using profile name to create session".format(self.label))
session = boto3.session.Session(
profile_name=self.profile, region_name=self.region
)
else:
logger.debug("EC2Provider '{}': Using environment variables to create session".format(self.label))
session = boto3.session.Session(region_name=self.region)
return session |
def get_port_profile_status_output_port_profile_mac_association_applied_interface_interface_type(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
get_port_profile_status = ET.Element("get_port_profile_status")
config = get_port_profile_status
output = ET.SubElement(get_port_profile_status, "output")
port_profile = ET.SubElement(output, "port-profile")
name_key = ET.SubElement(port_profile, "name")
name_key.text = kwargs.pop('name')
mac_association = ET.SubElement(port_profile, "mac-association")
mac_key = ET.SubElement(mac_association, "mac")
mac_key.text = kwargs.pop('mac')
applied_interface = ET.SubElement(mac_association, "applied-interface")
interface_type = ET.SubElement(applied_interface, "interface-type")
interface_type.text = kwargs.pop('interface_type')
callback = kwargs.pop('callback', self._callback)
return callback(config) | Auto Generated Code | Below is the the instruction that describes the task:
### Input:
Auto Generated Code
### Response:
def get_port_profile_status_output_port_profile_mac_association_applied_interface_interface_type(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
get_port_profile_status = ET.Element("get_port_profile_status")
config = get_port_profile_status
output = ET.SubElement(get_port_profile_status, "output")
port_profile = ET.SubElement(output, "port-profile")
name_key = ET.SubElement(port_profile, "name")
name_key.text = kwargs.pop('name')
mac_association = ET.SubElement(port_profile, "mac-association")
mac_key = ET.SubElement(mac_association, "mac")
mac_key.text = kwargs.pop('mac')
applied_interface = ET.SubElement(mac_association, "applied-interface")
interface_type = ET.SubElement(applied_interface, "interface-type")
interface_type.text = kwargs.pop('interface_type')
callback = kwargs.pop('callback', self._callback)
return callback(config) |
def connect(self, task_spec):
"""
Connect the *following* task to this one. In other words, the
given task is added as an output task.
task -- the task to connect to.
"""
self.thread_starter.outputs.append(task_spec)
task_spec._connect_notify(self.thread_starter) | Connect the *following* task to this one. In other words, the
given task is added as an output task.
task -- the task to connect to. | Below is the the instruction that describes the task:
### Input:
Connect the *following* task to this one. In other words, the
given task is added as an output task.
task -- the task to connect to.
### Response:
def connect(self, task_spec):
"""
Connect the *following* task to this one. In other words, the
given task is added as an output task.
task -- the task to connect to.
"""
self.thread_starter.outputs.append(task_spec)
task_spec._connect_notify(self.thread_starter) |
def time_estimate(self, duration, **kwargs):
"""Set an estimated time of work for the object.
Args:
duration (str): Duration in human format (e.g. 3h30)
**kwargs: Extra options to send to the server (e.g. sudo)
Raises:
GitlabAuthenticationError: If authentication is not correct
GitlabTimeTrackingError: If the time tracking update cannot be done
"""
path = '%s/%s/time_estimate' % (self.manager.path, self.get_id())
data = {'duration': duration}
return self.manager.gitlab.http_post(path, post_data=data, **kwargs) | Set an estimated time of work for the object.
Args:
duration (str): Duration in human format (e.g. 3h30)
**kwargs: Extra options to send to the server (e.g. sudo)
Raises:
GitlabAuthenticationError: If authentication is not correct
GitlabTimeTrackingError: If the time tracking update cannot be done | Below is the the instruction that describes the task:
### Input:
Set an estimated time of work for the object.
Args:
duration (str): Duration in human format (e.g. 3h30)
**kwargs: Extra options to send to the server (e.g. sudo)
Raises:
GitlabAuthenticationError: If authentication is not correct
GitlabTimeTrackingError: If the time tracking update cannot be done
### Response:
def time_estimate(self, duration, **kwargs):
"""Set an estimated time of work for the object.
Args:
duration (str): Duration in human format (e.g. 3h30)
**kwargs: Extra options to send to the server (e.g. sudo)
Raises:
GitlabAuthenticationError: If authentication is not correct
GitlabTimeTrackingError: If the time tracking update cannot be done
"""
path = '%s/%s/time_estimate' % (self.manager.path, self.get_id())
data = {'duration': duration}
return self.manager.gitlab.http_post(path, post_data=data, **kwargs) |
def create_model(self, model_server_workers=None, role=None, vpc_config_override=VPC_CONFIG_DEFAULT):
"""Create a SageMaker ``ChainerModel`` object that can be deployed to an ``Endpoint``.
Args:
role (str): The ``ExecutionRoleArn`` IAM Role ARN for the ``Model``, which is also used during
transform jobs. If not specified, the role from the Estimator will be used.
model_server_workers (int): Optional. The number of worker processes used by the inference server.
If None, server will use one worker per vCPU.
vpc_config_override (dict[str, list[str]]): Optional override for VpcConfig set on the model.
Default: use subnets and security groups from this Estimator.
* 'Subnets' (list[str]): List of subnet ids.
* 'SecurityGroupIds' (list[str]): List of security group ids.
Returns:
sagemaker.chainer.model.ChainerModel: A SageMaker ``ChainerModel`` object.
See :func:`~sagemaker.chainer.model.ChainerModel` for full details.
"""
role = role or self.role
return ChainerModel(self.model_data, role, self.entry_point, source_dir=self._model_source_dir(),
enable_cloudwatch_metrics=self.enable_cloudwatch_metrics, name=self._current_job_name,
container_log_level=self.container_log_level, code_location=self.code_location,
py_version=self.py_version, framework_version=self.framework_version,
model_server_workers=model_server_workers, image=self.image_name,
sagemaker_session=self.sagemaker_session,
vpc_config=self.get_vpc_config(vpc_config_override), dependencies=self.dependencies) | Create a SageMaker ``ChainerModel`` object that can be deployed to an ``Endpoint``.
Args:
role (str): The ``ExecutionRoleArn`` IAM Role ARN for the ``Model``, which is also used during
transform jobs. If not specified, the role from the Estimator will be used.
model_server_workers (int): Optional. The number of worker processes used by the inference server.
If None, server will use one worker per vCPU.
vpc_config_override (dict[str, list[str]]): Optional override for VpcConfig set on the model.
Default: use subnets and security groups from this Estimator.
* 'Subnets' (list[str]): List of subnet ids.
* 'SecurityGroupIds' (list[str]): List of security group ids.
Returns:
sagemaker.chainer.model.ChainerModel: A SageMaker ``ChainerModel`` object.
See :func:`~sagemaker.chainer.model.ChainerModel` for full details. | Below is the the instruction that describes the task:
### Input:
Create a SageMaker ``ChainerModel`` object that can be deployed to an ``Endpoint``.
Args:
role (str): The ``ExecutionRoleArn`` IAM Role ARN for the ``Model``, which is also used during
transform jobs. If not specified, the role from the Estimator will be used.
model_server_workers (int): Optional. The number of worker processes used by the inference server.
If None, server will use one worker per vCPU.
vpc_config_override (dict[str, list[str]]): Optional override for VpcConfig set on the model.
Default: use subnets and security groups from this Estimator.
* 'Subnets' (list[str]): List of subnet ids.
* 'SecurityGroupIds' (list[str]): List of security group ids.
Returns:
sagemaker.chainer.model.ChainerModel: A SageMaker ``ChainerModel`` object.
See :func:`~sagemaker.chainer.model.ChainerModel` for full details.
### Response:
def create_model(self, model_server_workers=None, role=None, vpc_config_override=VPC_CONFIG_DEFAULT):
"""Create a SageMaker ``ChainerModel`` object that can be deployed to an ``Endpoint``.
Args:
role (str): The ``ExecutionRoleArn`` IAM Role ARN for the ``Model``, which is also used during
transform jobs. If not specified, the role from the Estimator will be used.
model_server_workers (int): Optional. The number of worker processes used by the inference server.
If None, server will use one worker per vCPU.
vpc_config_override (dict[str, list[str]]): Optional override for VpcConfig set on the model.
Default: use subnets and security groups from this Estimator.
* 'Subnets' (list[str]): List of subnet ids.
* 'SecurityGroupIds' (list[str]): List of security group ids.
Returns:
sagemaker.chainer.model.ChainerModel: A SageMaker ``ChainerModel`` object.
See :func:`~sagemaker.chainer.model.ChainerModel` for full details.
"""
role = role or self.role
return ChainerModel(self.model_data, role, self.entry_point, source_dir=self._model_source_dir(),
enable_cloudwatch_metrics=self.enable_cloudwatch_metrics, name=self._current_job_name,
container_log_level=self.container_log_level, code_location=self.code_location,
py_version=self.py_version, framework_version=self.framework_version,
model_server_workers=model_server_workers, image=self.image_name,
sagemaker_session=self.sagemaker_session,
vpc_config=self.get_vpc_config(vpc_config_override), dependencies=self.dependencies) |
async def raw(self, command, *args, _conn=None, **kwargs):
"""
Send the raw command to the underlying client. Note that by using this CMD you
will lose compatibility with other backends.
Due to limitations with aiomcache client, args have to be provided as bytes.
For rest of backends, str.
:param command: str with the command.
:param timeout: int or float in seconds specifying maximum timeout
for the operations to last
:returns: whatever the underlying client returns
:raises: :class:`asyncio.TimeoutError` if it lasts more than self.timeout
"""
start = time.monotonic()
ret = await self._raw(
command, *args, encoding=self.serializer.encoding, _conn=_conn, **kwargs
)
logger.debug("%s (%.4f)s", command, time.monotonic() - start)
return ret | Send the raw command to the underlying client. Note that by using this CMD you
will lose compatibility with other backends.
Due to limitations with aiomcache client, args have to be provided as bytes.
For rest of backends, str.
:param command: str with the command.
:param timeout: int or float in seconds specifying maximum timeout
for the operations to last
:returns: whatever the underlying client returns
:raises: :class:`asyncio.TimeoutError` if it lasts more than self.timeout | Below is the the instruction that describes the task:
### Input:
Send the raw command to the underlying client. Note that by using this CMD you
will lose compatibility with other backends.
Due to limitations with aiomcache client, args have to be provided as bytes.
For rest of backends, str.
:param command: str with the command.
:param timeout: int or float in seconds specifying maximum timeout
for the operations to last
:returns: whatever the underlying client returns
:raises: :class:`asyncio.TimeoutError` if it lasts more than self.timeout
### Response:
async def raw(self, command, *args, _conn=None, **kwargs):
"""
Send the raw command to the underlying client. Note that by using this CMD you
will lose compatibility with other backends.
Due to limitations with aiomcache client, args have to be provided as bytes.
For rest of backends, str.
:param command: str with the command.
:param timeout: int or float in seconds specifying maximum timeout
for the operations to last
:returns: whatever the underlying client returns
:raises: :class:`asyncio.TimeoutError` if it lasts more than self.timeout
"""
start = time.monotonic()
ret = await self._raw(
command, *args, encoding=self.serializer.encoding, _conn=_conn, **kwargs
)
logger.debug("%s (%.4f)s", command, time.monotonic() - start)
return ret |
def get_conn(self, aws_access_key=None, aws_secret_key=None):
'''
Hook point for overriding how the CounterPool gets its connection to
AWS.
'''
return boto.connect_dynamodb(
aws_access_key_id=aws_access_key,
aws_secret_access_key=aws_secret_key,
) | Hook point for overriding how the CounterPool gets its connection to
AWS. | Below is the the instruction that describes the task:
### Input:
Hook point for overriding how the CounterPool gets its connection to
AWS.
### Response:
def get_conn(self, aws_access_key=None, aws_secret_key=None):
'''
Hook point for overriding how the CounterPool gets its connection to
AWS.
'''
return boto.connect_dynamodb(
aws_access_key_id=aws_access_key,
aws_secret_access_key=aws_secret_key,
) |
def build_skeleton(nodes, independencies):
"""Estimates a graph skeleton (UndirectedGraph) from a set of independencies
using (the first part of) the PC algorithm. The independencies can either be
provided as an instance of the `Independencies`-class or by passing a
decision function that decides any conditional independency assertion.
Returns a tuple `(skeleton, separating_sets)`.
If an Independencies-instance is passed, the contained IndependenceAssertions
have to admit a faithful BN representation. This is the case if
they are obtained as a set of d-seperations of some Bayesian network or
if the independence assertions are closed under the semi-graphoid axioms.
Otherwise the procedure may fail to identify the correct structure.
Parameters
----------
nodes: list, array-like
A list of node/variable names of the network skeleton.
independencies: Independencies-instance or function.
The source of independency information from which to build the skeleton.
The provided Independencies should admit a faithful representation.
Can either be provided as an Independencies()-instance or by passing a
function `f(X, Y, Zs)` that returns `True` when X _|_ Y | Zs,
otherwise `False`. (X, Y being individual nodes and Zs a list of nodes).
Returns
-------
skeleton: UndirectedGraph
An estimate for the undirected graph skeleton of the BN underlying the data.
separating_sets: dict
A dict containing for each pair of not directly connected nodes a
separating set ("witnessing set") of variables that makes then
conditionally independent. (needed for edge orientation procedures)
Reference
---------
[1] Neapolitan, Learning Bayesian Networks, Section 10.1.2, Algorithm 10.2 (page 550)
http://www.cs.technion.ac.il/~dang/books/Learning%20Bayesian%20Networks(Neapolitan,%20Richard).pdf
[2] Koller & Friedman, Probabilistic Graphical Models - Principles and Techniques, 2009
Section 3.4.2.1 (page 85), Algorithm 3.3
Examples
--------
>>> from pgmpy.estimators import ConstraintBasedEstimator
>>> from pgmpy.models import DAG
>>> from pgmpy.independencies import Independencies
>>> # build skeleton from list of independencies:
... ind = Independencies(['B', 'C'], ['A', ['B', 'C'], 'D'])
>>> # we need to compute closure, otherwise this set of independencies doesn't
... # admit a faithful representation:
... ind = ind.closure()
>>> skel, sep_sets = ConstraintBasedEstimator.build_skeleton("ABCD", ind)
>>> print(skel.edges())
[('A', 'D'), ('B', 'D'), ('C', 'D')]
>>> # build skeleton from d-seperations of DAG:
... model = DAG([('A', 'C'), ('B', 'C'), ('B', 'D'), ('C', 'E')])
>>> skel, sep_sets = ConstraintBasedEstimator.build_skeleton(model.nodes(), model.get_independencies())
>>> print(skel.edges())
[('A', 'C'), ('B', 'C'), ('B', 'D'), ('C', 'E')]
"""
nodes = list(nodes)
if isinstance(independencies, Independencies):
def is_independent(X, Y, Zs):
return IndependenceAssertion(X, Y, Zs) in independencies
elif callable(independencies):
is_independent = independencies
else:
raise ValueError("'independencies' must be either Independencies-instance " +
"or a ternary function that decides independencies.")
graph = UndirectedGraph(combinations(nodes, 2))
lim_neighbors = 0
separating_sets = dict()
while not all([len(list(graph.neighbors(node))) < lim_neighbors for node in nodes]):
for node in nodes:
for neighbor in list(graph.neighbors(node)):
# search if there is a set of neighbors (of size lim_neighbors)
# that makes X and Y independent:
for separating_set in combinations(set(graph.neighbors(node)) - set([neighbor]), lim_neighbors):
if is_independent(node, neighbor, separating_set):
separating_sets[frozenset((node, neighbor))] = separating_set
graph.remove_edge(node, neighbor)
break
lim_neighbors += 1
return graph, separating_sets | Estimates a graph skeleton (UndirectedGraph) from a set of independencies
using (the first part of) the PC algorithm. The independencies can either be
provided as an instance of the `Independencies`-class or by passing a
decision function that decides any conditional independency assertion.
Returns a tuple `(skeleton, separating_sets)`.
If an Independencies-instance is passed, the contained IndependenceAssertions
have to admit a faithful BN representation. This is the case if
they are obtained as a set of d-seperations of some Bayesian network or
if the independence assertions are closed under the semi-graphoid axioms.
Otherwise the procedure may fail to identify the correct structure.
Parameters
----------
nodes: list, array-like
A list of node/variable names of the network skeleton.
independencies: Independencies-instance or function.
The source of independency information from which to build the skeleton.
The provided Independencies should admit a faithful representation.
Can either be provided as an Independencies()-instance or by passing a
function `f(X, Y, Zs)` that returns `True` when X _|_ Y | Zs,
otherwise `False`. (X, Y being individual nodes and Zs a list of nodes).
Returns
-------
skeleton: UndirectedGraph
An estimate for the undirected graph skeleton of the BN underlying the data.
separating_sets: dict
A dict containing for each pair of not directly connected nodes a
separating set ("witnessing set") of variables that makes then
conditionally independent. (needed for edge orientation procedures)
Reference
---------
[1] Neapolitan, Learning Bayesian Networks, Section 10.1.2, Algorithm 10.2 (page 550)
http://www.cs.technion.ac.il/~dang/books/Learning%20Bayesian%20Networks(Neapolitan,%20Richard).pdf
[2] Koller & Friedman, Probabilistic Graphical Models - Principles and Techniques, 2009
Section 3.4.2.1 (page 85), Algorithm 3.3
Examples
--------
>>> from pgmpy.estimators import ConstraintBasedEstimator
>>> from pgmpy.models import DAG
>>> from pgmpy.independencies import Independencies
>>> # build skeleton from list of independencies:
... ind = Independencies(['B', 'C'], ['A', ['B', 'C'], 'D'])
>>> # we need to compute closure, otherwise this set of independencies doesn't
... # admit a faithful representation:
... ind = ind.closure()
>>> skel, sep_sets = ConstraintBasedEstimator.build_skeleton("ABCD", ind)
>>> print(skel.edges())
[('A', 'D'), ('B', 'D'), ('C', 'D')]
>>> # build skeleton from d-seperations of DAG:
... model = DAG([('A', 'C'), ('B', 'C'), ('B', 'D'), ('C', 'E')])
>>> skel, sep_sets = ConstraintBasedEstimator.build_skeleton(model.nodes(), model.get_independencies())
>>> print(skel.edges())
[('A', 'C'), ('B', 'C'), ('B', 'D'), ('C', 'E')] | Below is the the instruction that describes the task:
### Input:
Estimates a graph skeleton (UndirectedGraph) from a set of independencies
using (the first part of) the PC algorithm. The independencies can either be
provided as an instance of the `Independencies`-class or by passing a
decision function that decides any conditional independency assertion.
Returns a tuple `(skeleton, separating_sets)`.
If an Independencies-instance is passed, the contained IndependenceAssertions
have to admit a faithful BN representation. This is the case if
they are obtained as a set of d-seperations of some Bayesian network or
if the independence assertions are closed under the semi-graphoid axioms.
Otherwise the procedure may fail to identify the correct structure.
Parameters
----------
nodes: list, array-like
A list of node/variable names of the network skeleton.
independencies: Independencies-instance or function.
The source of independency information from which to build the skeleton.
The provided Independencies should admit a faithful representation.
Can either be provided as an Independencies()-instance or by passing a
function `f(X, Y, Zs)` that returns `True` when X _|_ Y | Zs,
otherwise `False`. (X, Y being individual nodes and Zs a list of nodes).
Returns
-------
skeleton: UndirectedGraph
An estimate for the undirected graph skeleton of the BN underlying the data.
separating_sets: dict
A dict containing for each pair of not directly connected nodes a
separating set ("witnessing set") of variables that makes then
conditionally independent. (needed for edge orientation procedures)
Reference
---------
[1] Neapolitan, Learning Bayesian Networks, Section 10.1.2, Algorithm 10.2 (page 550)
http://www.cs.technion.ac.il/~dang/books/Learning%20Bayesian%20Networks(Neapolitan,%20Richard).pdf
[2] Koller & Friedman, Probabilistic Graphical Models - Principles and Techniques, 2009
Section 3.4.2.1 (page 85), Algorithm 3.3
Examples
--------
>>> from pgmpy.estimators import ConstraintBasedEstimator
>>> from pgmpy.models import DAG
>>> from pgmpy.independencies import Independencies
>>> # build skeleton from list of independencies:
... ind = Independencies(['B', 'C'], ['A', ['B', 'C'], 'D'])
>>> # we need to compute closure, otherwise this set of independencies doesn't
... # admit a faithful representation:
... ind = ind.closure()
>>> skel, sep_sets = ConstraintBasedEstimator.build_skeleton("ABCD", ind)
>>> print(skel.edges())
[('A', 'D'), ('B', 'D'), ('C', 'D')]
>>> # build skeleton from d-seperations of DAG:
... model = DAG([('A', 'C'), ('B', 'C'), ('B', 'D'), ('C', 'E')])
>>> skel, sep_sets = ConstraintBasedEstimator.build_skeleton(model.nodes(), model.get_independencies())
>>> print(skel.edges())
[('A', 'C'), ('B', 'C'), ('B', 'D'), ('C', 'E')]
### Response:
def build_skeleton(nodes, independencies):
"""Estimates a graph skeleton (UndirectedGraph) from a set of independencies
using (the first part of) the PC algorithm. The independencies can either be
provided as an instance of the `Independencies`-class or by passing a
decision function that decides any conditional independency assertion.
Returns a tuple `(skeleton, separating_sets)`.
If an Independencies-instance is passed, the contained IndependenceAssertions
have to admit a faithful BN representation. This is the case if
they are obtained as a set of d-seperations of some Bayesian network or
if the independence assertions are closed under the semi-graphoid axioms.
Otherwise the procedure may fail to identify the correct structure.
Parameters
----------
nodes: list, array-like
A list of node/variable names of the network skeleton.
independencies: Independencies-instance or function.
The source of independency information from which to build the skeleton.
The provided Independencies should admit a faithful representation.
Can either be provided as an Independencies()-instance or by passing a
function `f(X, Y, Zs)` that returns `True` when X _|_ Y | Zs,
otherwise `False`. (X, Y being individual nodes and Zs a list of nodes).
Returns
-------
skeleton: UndirectedGraph
An estimate for the undirected graph skeleton of the BN underlying the data.
separating_sets: dict
A dict containing for each pair of not directly connected nodes a
separating set ("witnessing set") of variables that makes then
conditionally independent. (needed for edge orientation procedures)
Reference
---------
[1] Neapolitan, Learning Bayesian Networks, Section 10.1.2, Algorithm 10.2 (page 550)
http://www.cs.technion.ac.il/~dang/books/Learning%20Bayesian%20Networks(Neapolitan,%20Richard).pdf
[2] Koller & Friedman, Probabilistic Graphical Models - Principles and Techniques, 2009
Section 3.4.2.1 (page 85), Algorithm 3.3
Examples
--------
>>> from pgmpy.estimators import ConstraintBasedEstimator
>>> from pgmpy.models import DAG
>>> from pgmpy.independencies import Independencies
>>> # build skeleton from list of independencies:
... ind = Independencies(['B', 'C'], ['A', ['B', 'C'], 'D'])
>>> # we need to compute closure, otherwise this set of independencies doesn't
... # admit a faithful representation:
... ind = ind.closure()
>>> skel, sep_sets = ConstraintBasedEstimator.build_skeleton("ABCD", ind)
>>> print(skel.edges())
[('A', 'D'), ('B', 'D'), ('C', 'D')]
>>> # build skeleton from d-seperations of DAG:
... model = DAG([('A', 'C'), ('B', 'C'), ('B', 'D'), ('C', 'E')])
>>> skel, sep_sets = ConstraintBasedEstimator.build_skeleton(model.nodes(), model.get_independencies())
>>> print(skel.edges())
[('A', 'C'), ('B', 'C'), ('B', 'D'), ('C', 'E')]
"""
nodes = list(nodes)
if isinstance(independencies, Independencies):
def is_independent(X, Y, Zs):
return IndependenceAssertion(X, Y, Zs) in independencies
elif callable(independencies):
is_independent = independencies
else:
raise ValueError("'independencies' must be either Independencies-instance " +
"or a ternary function that decides independencies.")
graph = UndirectedGraph(combinations(nodes, 2))
lim_neighbors = 0
separating_sets = dict()
while not all([len(list(graph.neighbors(node))) < lim_neighbors for node in nodes]):
for node in nodes:
for neighbor in list(graph.neighbors(node)):
# search if there is a set of neighbors (of size lim_neighbors)
# that makes X and Y independent:
for separating_set in combinations(set(graph.neighbors(node)) - set([neighbor]), lim_neighbors):
if is_independent(node, neighbor, separating_set):
separating_sets[frozenset((node, neighbor))] = separating_set
graph.remove_edge(node, neighbor)
break
lim_neighbors += 1
return graph, separating_sets |
def _import_module(self, module_path):
"""Dynamically import a module returning a handle to it.
:param str module_path: The module path
:rtype: module
"""
LOGGER.debug('Importing %s', module_path)
try:
return __import__(module_path)
except ImportError as error:
LOGGER.critical('Could not import %s: %s', module_path, error)
return None | Dynamically import a module returning a handle to it.
:param str module_path: The module path
:rtype: module | Below is the the instruction that describes the task:
### Input:
Dynamically import a module returning a handle to it.
:param str module_path: The module path
:rtype: module
### Response:
def _import_module(self, module_path):
"""Dynamically import a module returning a handle to it.
:param str module_path: The module path
:rtype: module
"""
LOGGER.debug('Importing %s', module_path)
try:
return __import__(module_path)
except ImportError as error:
LOGGER.critical('Could not import %s: %s', module_path, error)
return None |
def pl_resolution(KB, alpha):
"Propositional-logic resolution: say if alpha follows from KB. [Fig. 7.12]"
clauses = KB.clauses + conjuncts(to_cnf(~alpha))
new = set()
while True:
n = len(clauses)
pairs = [(clauses[i], clauses[j])
for i in range(n) for j in range(i+1, n)]
for (ci, cj) in pairs:
resolvents = pl_resolve(ci, cj)
if FALSE in resolvents: return True
new = new.union(set(resolvents))
if new.issubset(set(clauses)): return False
for c in new:
if c not in clauses: clauses.append(c) | Propositional-logic resolution: say if alpha follows from KB. [Fig. 7.12] | Below is the the instruction that describes the task:
### Input:
Propositional-logic resolution: say if alpha follows from KB. [Fig. 7.12]
### Response:
def pl_resolution(KB, alpha):
"Propositional-logic resolution: say if alpha follows from KB. [Fig. 7.12]"
clauses = KB.clauses + conjuncts(to_cnf(~alpha))
new = set()
while True:
n = len(clauses)
pairs = [(clauses[i], clauses[j])
for i in range(n) for j in range(i+1, n)]
for (ci, cj) in pairs:
resolvents = pl_resolve(ci, cj)
if FALSE in resolvents: return True
new = new.union(set(resolvents))
if new.issubset(set(clauses)): return False
for c in new:
if c not in clauses: clauses.append(c) |
def _get_model_fitting(self, con_est_id):
"""
Retreive model fitting that corresponds to contrast with identifier
'con_id'
from the list of model fitting objects stored in self.model_fittings
"""
for (mpe_id, pe_ids), contrasts in self.contrasts.items():
for contrast in contrasts:
if contrast.estimation.id == con_est_id:
model_fitting_id = mpe_id
pe_map_ids = pe_ids
break
for model_fitting in self.model_fittings:
if model_fitting.activity.id == model_fitting_id:
return (model_fitting, pe_map_ids)
raise Exception("Model fitting of contrast : " + str(con_est_id) +
" not found.") | Retreive model fitting that corresponds to contrast with identifier
'con_id'
from the list of model fitting objects stored in self.model_fittings | Below is the the instruction that describes the task:
### Input:
Retreive model fitting that corresponds to contrast with identifier
'con_id'
from the list of model fitting objects stored in self.model_fittings
### Response:
def _get_model_fitting(self, con_est_id):
"""
Retreive model fitting that corresponds to contrast with identifier
'con_id'
from the list of model fitting objects stored in self.model_fittings
"""
for (mpe_id, pe_ids), contrasts in self.contrasts.items():
for contrast in contrasts:
if contrast.estimation.id == con_est_id:
model_fitting_id = mpe_id
pe_map_ids = pe_ids
break
for model_fitting in self.model_fittings:
if model_fitting.activity.id == model_fitting_id:
return (model_fitting, pe_map_ids)
raise Exception("Model fitting of contrast : " + str(con_est_id) +
" not found.") |
def new(self, sources_by_grp):
"""
Generate a new CompositeSourceModel from the given dictionary.
:param sources_by_group: a dictionary grp_id -> sources
:returns: a new CompositeSourceModel instance
"""
source_models = []
for sm in self.source_models:
src_groups = []
for src_group in sm.src_groups:
sg = copy.copy(src_group)
sg.sources = sorted(sources_by_grp.get(sg.id, []),
key=operator.attrgetter('id'))
src_groups.append(sg)
newsm = logictree.LtSourceModel(
sm.names, sm.weight, sm.path, src_groups,
sm.num_gsim_paths, sm.ordinal, sm.samples)
source_models.append(newsm)
new = self.__class__(self.gsim_lt, self.source_model_lt, source_models,
self.optimize_same_id)
new.info.update_eff_ruptures(new.get_num_ruptures())
new.info.tot_weight = new.get_weight()
return new | Generate a new CompositeSourceModel from the given dictionary.
:param sources_by_group: a dictionary grp_id -> sources
:returns: a new CompositeSourceModel instance | Below is the the instruction that describes the task:
### Input:
Generate a new CompositeSourceModel from the given dictionary.
:param sources_by_group: a dictionary grp_id -> sources
:returns: a new CompositeSourceModel instance
### Response:
def new(self, sources_by_grp):
"""
Generate a new CompositeSourceModel from the given dictionary.
:param sources_by_group: a dictionary grp_id -> sources
:returns: a new CompositeSourceModel instance
"""
source_models = []
for sm in self.source_models:
src_groups = []
for src_group in sm.src_groups:
sg = copy.copy(src_group)
sg.sources = sorted(sources_by_grp.get(sg.id, []),
key=operator.attrgetter('id'))
src_groups.append(sg)
newsm = logictree.LtSourceModel(
sm.names, sm.weight, sm.path, src_groups,
sm.num_gsim_paths, sm.ordinal, sm.samples)
source_models.append(newsm)
new = self.__class__(self.gsim_lt, self.source_model_lt, source_models,
self.optimize_same_id)
new.info.update_eff_ruptures(new.get_num_ruptures())
new.info.tot_weight = new.get_weight()
return new |
def parse_args():
"""Parses command line arguments."""
parser = argparse.ArgumentParser(
description='Tool to run attacks and defenses.')
parser.add_argument('--attacks_dir', required=True,
help='Location of all attacks.')
parser.add_argument('--targeted_attacks_dir', required=True,
help='Location of all targeted attacks.')
parser.add_argument('--defenses_dir', required=True,
help='Location of all defenses.')
parser.add_argument('--dataset_dir', required=True,
help='Location of the dataset.')
parser.add_argument('--dataset_metadata', required=True,
help='Location of the dataset metadata.')
parser.add_argument('--intermediate_results_dir', required=True,
help='Directory to store intermediate results.')
parser.add_argument('--output_dir', required=True,
help=('Output directory.'))
parser.add_argument('--epsilon', required=False, type=int, default=16,
help='Maximum allowed size of adversarial perturbation')
parser.add_argument('--gpu', dest='use_gpu', action='store_true')
parser.add_argument('--nogpu', dest='use_gpu', action='store_false')
parser.set_defaults(use_gpu=False)
parser.add_argument('--save_all_classification',
dest='save_all_classification', action='store_true')
parser.add_argument('--nosave_all_classification',
dest='save_all_classification', action='store_false')
parser.set_defaults(save_all_classification=False)
return parser.parse_args() | Parses command line arguments. | Below is the the instruction that describes the task:
### Input:
Parses command line arguments.
### Response:
def parse_args():
"""Parses command line arguments."""
parser = argparse.ArgumentParser(
description='Tool to run attacks and defenses.')
parser.add_argument('--attacks_dir', required=True,
help='Location of all attacks.')
parser.add_argument('--targeted_attacks_dir', required=True,
help='Location of all targeted attacks.')
parser.add_argument('--defenses_dir', required=True,
help='Location of all defenses.')
parser.add_argument('--dataset_dir', required=True,
help='Location of the dataset.')
parser.add_argument('--dataset_metadata', required=True,
help='Location of the dataset metadata.')
parser.add_argument('--intermediate_results_dir', required=True,
help='Directory to store intermediate results.')
parser.add_argument('--output_dir', required=True,
help=('Output directory.'))
parser.add_argument('--epsilon', required=False, type=int, default=16,
help='Maximum allowed size of adversarial perturbation')
parser.add_argument('--gpu', dest='use_gpu', action='store_true')
parser.add_argument('--nogpu', dest='use_gpu', action='store_false')
parser.set_defaults(use_gpu=False)
parser.add_argument('--save_all_classification',
dest='save_all_classification', action='store_true')
parser.add_argument('--nosave_all_classification',
dest='save_all_classification', action='store_false')
parser.set_defaults(save_all_classification=False)
return parser.parse_args() |
def iter_setup_packages(srcdir, packages):
""" A generator that finds and imports all of the ``setup_package.py``
modules in the source packages.
Returns
-------
modgen : generator
A generator that yields (modname, mod), where `mod` is the module and
`modname` is the module name for the ``setup_package.py`` modules.
"""
for packagename in packages:
package_parts = packagename.split('.')
package_path = os.path.join(srcdir, *package_parts)
setup_package = os.path.relpath(
os.path.join(package_path, 'setup_package.py'))
if os.path.isfile(setup_package):
module = import_file(setup_package,
name=packagename + '.setup_package')
yield module | A generator that finds and imports all of the ``setup_package.py``
modules in the source packages.
Returns
-------
modgen : generator
A generator that yields (modname, mod), where `mod` is the module and
`modname` is the module name for the ``setup_package.py`` modules. | Below is the the instruction that describes the task:
### Input:
A generator that finds and imports all of the ``setup_package.py``
modules in the source packages.
Returns
-------
modgen : generator
A generator that yields (modname, mod), where `mod` is the module and
`modname` is the module name for the ``setup_package.py`` modules.
### Response:
def iter_setup_packages(srcdir, packages):
""" A generator that finds and imports all of the ``setup_package.py``
modules in the source packages.
Returns
-------
modgen : generator
A generator that yields (modname, mod), where `mod` is the module and
`modname` is the module name for the ``setup_package.py`` modules.
"""
for packagename in packages:
package_parts = packagename.split('.')
package_path = os.path.join(srcdir, *package_parts)
setup_package = os.path.relpath(
os.path.join(package_path, 'setup_package.py'))
if os.path.isfile(setup_package):
module = import_file(setup_package,
name=packagename + '.setup_package')
yield module |
def gpg_download_key( key_id, key_server, config_dir=None ):
"""
Download a GPG key from a key server.
Do not import it into any keyrings.
Return the ASCII-armored key
"""
config_dir = get_config_dir( config_dir )
tmpdir = make_gpg_tmphome( prefix="download", config_dir=config_dir )
gpg = gnupg.GPG( homedir=tmpdir )
recvdat = gpg.recv_keys( key_server, key_id )
fingerprint = None
try:
assert recvdat.count == 1
assert len(recvdat.fingerprints) == 1
fingerprint = recvdat.fingerprints[0]
except AssertionError, e:
log.exception(e)
log.error( "Failed to fetch key '%s' from '%s'" % (key_id, key_server))
shutil.rmtree( tmpdir )
return None
keydat = gpg.export_keys( [fingerprint] )
shutil.rmtree( tmpdir )
return str(keydat) | Download a GPG key from a key server.
Do not import it into any keyrings.
Return the ASCII-armored key | Below is the the instruction that describes the task:
### Input:
Download a GPG key from a key server.
Do not import it into any keyrings.
Return the ASCII-armored key
### Response:
def gpg_download_key( key_id, key_server, config_dir=None ):
"""
Download a GPG key from a key server.
Do not import it into any keyrings.
Return the ASCII-armored key
"""
config_dir = get_config_dir( config_dir )
tmpdir = make_gpg_tmphome( prefix="download", config_dir=config_dir )
gpg = gnupg.GPG( homedir=tmpdir )
recvdat = gpg.recv_keys( key_server, key_id )
fingerprint = None
try:
assert recvdat.count == 1
assert len(recvdat.fingerprints) == 1
fingerprint = recvdat.fingerprints[0]
except AssertionError, e:
log.exception(e)
log.error( "Failed to fetch key '%s' from '%s'" % (key_id, key_server))
shutil.rmtree( tmpdir )
return None
keydat = gpg.export_keys( [fingerprint] )
shutil.rmtree( tmpdir )
return str(keydat) |
def add_image(self, figure, dpi=72):
'''
Adds an image to the last chapter/section.
The image will be stored in the `{self.title}_files` directory.
:param matplotlib.figure figure:
A matplotlib figure to be saved into the report
'''
name = os.path.join(self._dir, '/fig%s.png' % self.fig_counter)
self.fig_counter += 1
figure.savefig(name, dpi=dpi)
plt.close(figure)
self.body += '<img src="%s" />\n' % name | Adds an image to the last chapter/section.
The image will be stored in the `{self.title}_files` directory.
:param matplotlib.figure figure:
A matplotlib figure to be saved into the report | Below is the the instruction that describes the task:
### Input:
Adds an image to the last chapter/section.
The image will be stored in the `{self.title}_files` directory.
:param matplotlib.figure figure:
A matplotlib figure to be saved into the report
### Response:
def add_image(self, figure, dpi=72):
'''
Adds an image to the last chapter/section.
The image will be stored in the `{self.title}_files` directory.
:param matplotlib.figure figure:
A matplotlib figure to be saved into the report
'''
name = os.path.join(self._dir, '/fig%s.png' % self.fig_counter)
self.fig_counter += 1
figure.savefig(name, dpi=dpi)
plt.close(figure)
self.body += '<img src="%s" />\n' % name |
def dump(self, out=sys.stdout, row_fn=repr, limit=-1, indent=0):
"""Dump out the contents of this table in a nested listing.
@param out: output stream to write to
@param row_fn: function to call to display individual rows
@param limit: number of records to show at deepest level of pivot (-1=show all)
@param indent: current nesting level
"""
NL = '\n'
if indent:
out.write(" "*indent + self.pivot_key_str())
else:
out.write("Pivot: %s" % ','.join(self._pivot_attrs))
out.write(NL)
if self.has_subtables():
do_all(sub.dump(out, row_fn, limit, indent+1) for sub in self.subtables if sub)
else:
if limit >= 0:
showslice = slice(0, limit)
else:
showslice = slice(None, None)
do_all(out.write(" "*(indent+1) + row_fn(r) + NL) for r in self.obs[showslice])
out.flush() | Dump out the contents of this table in a nested listing.
@param out: output stream to write to
@param row_fn: function to call to display individual rows
@param limit: number of records to show at deepest level of pivot (-1=show all)
@param indent: current nesting level | Below is the the instruction that describes the task:
### Input:
Dump out the contents of this table in a nested listing.
@param out: output stream to write to
@param row_fn: function to call to display individual rows
@param limit: number of records to show at deepest level of pivot (-1=show all)
@param indent: current nesting level
### Response:
def dump(self, out=sys.stdout, row_fn=repr, limit=-1, indent=0):
"""Dump out the contents of this table in a nested listing.
@param out: output stream to write to
@param row_fn: function to call to display individual rows
@param limit: number of records to show at deepest level of pivot (-1=show all)
@param indent: current nesting level
"""
NL = '\n'
if indent:
out.write(" "*indent + self.pivot_key_str())
else:
out.write("Pivot: %s" % ','.join(self._pivot_attrs))
out.write(NL)
if self.has_subtables():
do_all(sub.dump(out, row_fn, limit, indent+1) for sub in self.subtables if sub)
else:
if limit >= 0:
showslice = slice(0, limit)
else:
showslice = slice(None, None)
do_all(out.write(" "*(indent+1) + row_fn(r) + NL) for r in self.obs[showslice])
out.flush() |
def remove(name=None, pkgs=None, recursive=True, **kwargs):
'''
name
The name of the package to be deleted.
recursive
Also remove dependent packages (not required elsewhere).
Default mode: enabled.
Multiple Package Options:
pkgs
A list of packages to delete. Must be passed as a python list. The
``name`` parameter will be ignored if this option is passed.
Returns a list containing the removed packages.
CLI Example:
.. code-block:: bash
salt '*' pkg.remove <package name> [recursive=False]
salt '*' pkg.remove <package1>,<package2>,<package3> [recursive=False]
salt '*' pkg.remove pkgs='["foo", "bar"]' [recursive=False]
'''
try:
pkg_params, pkg_type = __salt__['pkg_resource.parse_targets'](
name, pkgs
)
except MinionError as exc:
raise CommandExecutionError(exc)
if not pkg_params:
return {}
old = list_pkgs()
# keep only installed packages
targets = [x for x in pkg_params if x in old]
if not targets:
return {}
cmd = ['xbps-remove', '-y']
if recursive:
cmd.append('-R')
cmd.extend(targets)
__salt__['cmd.run'](cmd, output_loglevel='trace')
__context__.pop('pkg.list_pkgs', None)
new = list_pkgs()
return salt.utils.data.compare_dicts(old, new) | name
The name of the package to be deleted.
recursive
Also remove dependent packages (not required elsewhere).
Default mode: enabled.
Multiple Package Options:
pkgs
A list of packages to delete. Must be passed as a python list. The
``name`` parameter will be ignored if this option is passed.
Returns a list containing the removed packages.
CLI Example:
.. code-block:: bash
salt '*' pkg.remove <package name> [recursive=False]
salt '*' pkg.remove <package1>,<package2>,<package3> [recursive=False]
salt '*' pkg.remove pkgs='["foo", "bar"]' [recursive=False] | Below is the the instruction that describes the task:
### Input:
name
The name of the package to be deleted.
recursive
Also remove dependent packages (not required elsewhere).
Default mode: enabled.
Multiple Package Options:
pkgs
A list of packages to delete. Must be passed as a python list. The
``name`` parameter will be ignored if this option is passed.
Returns a list containing the removed packages.
CLI Example:
.. code-block:: bash
salt '*' pkg.remove <package name> [recursive=False]
salt '*' pkg.remove <package1>,<package2>,<package3> [recursive=False]
salt '*' pkg.remove pkgs='["foo", "bar"]' [recursive=False]
### Response:
def remove(name=None, pkgs=None, recursive=True, **kwargs):
'''
name
The name of the package to be deleted.
recursive
Also remove dependent packages (not required elsewhere).
Default mode: enabled.
Multiple Package Options:
pkgs
A list of packages to delete. Must be passed as a python list. The
``name`` parameter will be ignored if this option is passed.
Returns a list containing the removed packages.
CLI Example:
.. code-block:: bash
salt '*' pkg.remove <package name> [recursive=False]
salt '*' pkg.remove <package1>,<package2>,<package3> [recursive=False]
salt '*' pkg.remove pkgs='["foo", "bar"]' [recursive=False]
'''
try:
pkg_params, pkg_type = __salt__['pkg_resource.parse_targets'](
name, pkgs
)
except MinionError as exc:
raise CommandExecutionError(exc)
if not pkg_params:
return {}
old = list_pkgs()
# keep only installed packages
targets = [x for x in pkg_params if x in old]
if not targets:
return {}
cmd = ['xbps-remove', '-y']
if recursive:
cmd.append('-R')
cmd.extend(targets)
__salt__['cmd.run'](cmd, output_loglevel='trace')
__context__.pop('pkg.list_pkgs', None)
new = list_pkgs()
return salt.utils.data.compare_dicts(old, new) |
def split_fusion_transcript(annotation_path, transcripts):
"""
Finds the breakpoint in the fusion transcript and splits the 5' donor from the 3' acceptor
:param str annotation_path: Path to transcript annotation file
:param dict transcripts: Dictionary of fusion transcripts
:return: 5' donor sequences and 3' acceptor sequences
:rtype: tuple
"""
annotation = collections.defaultdict(dict)
forward = 'ACGTN'
reverse = 'TGCAN'
trans = string.maketrans(forward, reverse)
# Pull in assembled transcript annotation
five_pr_splits = collections.defaultdict(dict)
three_pr_splits = collections.defaultdict(dict)
regex = re.compile(r'ID=(?P<ID>.*);Name=(?P<Name>.*);Target=(?P<Target>.*)\s(?P<start>\d+)\s(?P<stop>\d+)')
with open(annotation_path, 'r') as gff:
for line in gff:
print(line)
if line.startswith('#'):
_, eyd, fusion = line.strip().split()
fusion, start_stop = fusion.split(':')
left_break, right_break = start_stop.split('-')
annotation[fusion][eyd] = {}
annotation[fusion][eyd]['left_break'] = left_break
annotation[fusion][eyd]['right_break'] = right_break
else:
line = line.strip().split('\t')
fusion = line[0]
strand = line[6]
block_start = line[3]
block_stop = line[4]
attr = line[8]
m = regex.search(attr)
if m:
transcript_id = m.group('Name')
rb = any([block_start == annotation[fusion][transcript_id]['right_break'],
block_stop == annotation[fusion][transcript_id]['right_break']])
lb = any([block_start == annotation[fusion][transcript_id]['left_break'],
block_stop == annotation[fusion][transcript_id]['left_break']])
if strand == '-' and rb:
transcript_split = int(m.group('stop')) + 1 # Off by one
# Take the reverse complement to orient transcripts from 5' to 3'
five_seq = transcripts[transcript_id][transcript_split:]
five_pr_splits[fusion][transcript_id] = five_seq.translate(trans)[::-1]
three_seq = transcripts[transcript_id][:transcript_split]
three_pr_splits[fusion][transcript_id] = three_seq.translate(trans)[::-1]
elif strand == '+' and lb:
transcript_split = int(m.group('stop'))
s1 = transcripts[transcript_id][:transcript_split]
five_pr_splits[fusion][transcript_id] = s1
s2 = transcripts[transcript_id][transcript_split:]
three_pr_splits[fusion][transcript_id] = s2
return five_pr_splits, three_pr_splits | Finds the breakpoint in the fusion transcript and splits the 5' donor from the 3' acceptor
:param str annotation_path: Path to transcript annotation file
:param dict transcripts: Dictionary of fusion transcripts
:return: 5' donor sequences and 3' acceptor sequences
:rtype: tuple | Below is the the instruction that describes the task:
### Input:
Finds the breakpoint in the fusion transcript and splits the 5' donor from the 3' acceptor
:param str annotation_path: Path to transcript annotation file
:param dict transcripts: Dictionary of fusion transcripts
:return: 5' donor sequences and 3' acceptor sequences
:rtype: tuple
### Response:
def split_fusion_transcript(annotation_path, transcripts):
"""
Finds the breakpoint in the fusion transcript and splits the 5' donor from the 3' acceptor
:param str annotation_path: Path to transcript annotation file
:param dict transcripts: Dictionary of fusion transcripts
:return: 5' donor sequences and 3' acceptor sequences
:rtype: tuple
"""
annotation = collections.defaultdict(dict)
forward = 'ACGTN'
reverse = 'TGCAN'
trans = string.maketrans(forward, reverse)
# Pull in assembled transcript annotation
five_pr_splits = collections.defaultdict(dict)
three_pr_splits = collections.defaultdict(dict)
regex = re.compile(r'ID=(?P<ID>.*);Name=(?P<Name>.*);Target=(?P<Target>.*)\s(?P<start>\d+)\s(?P<stop>\d+)')
with open(annotation_path, 'r') as gff:
for line in gff:
print(line)
if line.startswith('#'):
_, eyd, fusion = line.strip().split()
fusion, start_stop = fusion.split(':')
left_break, right_break = start_stop.split('-')
annotation[fusion][eyd] = {}
annotation[fusion][eyd]['left_break'] = left_break
annotation[fusion][eyd]['right_break'] = right_break
else:
line = line.strip().split('\t')
fusion = line[0]
strand = line[6]
block_start = line[3]
block_stop = line[4]
attr = line[8]
m = regex.search(attr)
if m:
transcript_id = m.group('Name')
rb = any([block_start == annotation[fusion][transcript_id]['right_break'],
block_stop == annotation[fusion][transcript_id]['right_break']])
lb = any([block_start == annotation[fusion][transcript_id]['left_break'],
block_stop == annotation[fusion][transcript_id]['left_break']])
if strand == '-' and rb:
transcript_split = int(m.group('stop')) + 1 # Off by one
# Take the reverse complement to orient transcripts from 5' to 3'
five_seq = transcripts[transcript_id][transcript_split:]
five_pr_splits[fusion][transcript_id] = five_seq.translate(trans)[::-1]
three_seq = transcripts[transcript_id][:transcript_split]
three_pr_splits[fusion][transcript_id] = three_seq.translate(trans)[::-1]
elif strand == '+' and lb:
transcript_split = int(m.group('stop'))
s1 = transcripts[transcript_id][:transcript_split]
five_pr_splits[fusion][transcript_id] = s1
s2 = transcripts[transcript_id][transcript_split:]
three_pr_splits[fusion][transcript_id] = s2
return five_pr_splits, three_pr_splits |
def register(lifter, arch_name):
"""
Registers a Lifter or Postprocessor to be used by pyvex. Lifters are are given priority based on the order
in which they are registered. Postprocessors will be run in registration order.
:param lifter: The Lifter or Postprocessor to register
:vartype lifter: :class:`Lifter` or :class:`Postprocessor`
"""
if issubclass(lifter, Lifter):
l.debug("Registering lifter %s for architecture %s.", lifter.__name__, arch_name)
lifters[arch_name].append(lifter)
if issubclass(lifter, Postprocessor):
l.debug("Registering postprocessor %s for architecture %s.", lifter.__name__, arch_name)
postprocessors[arch_name].append(lifter) | Registers a Lifter or Postprocessor to be used by pyvex. Lifters are are given priority based on the order
in which they are registered. Postprocessors will be run in registration order.
:param lifter: The Lifter or Postprocessor to register
:vartype lifter: :class:`Lifter` or :class:`Postprocessor` | Below is the the instruction that describes the task:
### Input:
Registers a Lifter or Postprocessor to be used by pyvex. Lifters are are given priority based on the order
in which they are registered. Postprocessors will be run in registration order.
:param lifter: The Lifter or Postprocessor to register
:vartype lifter: :class:`Lifter` or :class:`Postprocessor`
### Response:
def register(lifter, arch_name):
"""
Registers a Lifter or Postprocessor to be used by pyvex. Lifters are are given priority based on the order
in which they are registered. Postprocessors will be run in registration order.
:param lifter: The Lifter or Postprocessor to register
:vartype lifter: :class:`Lifter` or :class:`Postprocessor`
"""
if issubclass(lifter, Lifter):
l.debug("Registering lifter %s for architecture %s.", lifter.__name__, arch_name)
lifters[arch_name].append(lifter)
if issubclass(lifter, Postprocessor):
l.debug("Registering postprocessor %s for architecture %s.", lifter.__name__, arch_name)
postprocessors[arch_name].append(lifter) |
def logger_init():
"""Initialize logger instance."""
log = logging.getLogger("pyinotify")
console_handler = logging.StreamHandler()
console_handler.setFormatter(
logging.Formatter("[%(asctime)s %(name)s %(levelname)s] %(message)s"))
log.addHandler(console_handler)
log.setLevel(20)
return log | Initialize logger instance. | Below is the the instruction that describes the task:
### Input:
Initialize logger instance.
### Response:
def logger_init():
"""Initialize logger instance."""
log = logging.getLogger("pyinotify")
console_handler = logging.StreamHandler()
console_handler.setFormatter(
logging.Formatter("[%(asctime)s %(name)s %(levelname)s] %(message)s"))
log.addHandler(console_handler)
log.setLevel(20)
return log |
def get_file_stream_api(self):
"""This creates a new file pusher thread. Call start to initiate the thread that talks to W&B"""
if not self._file_stream_api:
if self._current_run_id is None:
raise UsageError(
'Must have a current run to use file stream API.')
self._file_stream_api = FileStreamApi(self, self._current_run_id)
return self._file_stream_api | This creates a new file pusher thread. Call start to initiate the thread that talks to W&B | Below is the the instruction that describes the task:
### Input:
This creates a new file pusher thread. Call start to initiate the thread that talks to W&B
### Response:
def get_file_stream_api(self):
"""This creates a new file pusher thread. Call start to initiate the thread that talks to W&B"""
if not self._file_stream_api:
if self._current_run_id is None:
raise UsageError(
'Must have a current run to use file stream API.')
self._file_stream_api = FileStreamApi(self, self._current_run_id)
return self._file_stream_api |
def push_design_documents(self, design_path):
"""
Push the design documents stored in `design_path` to the server
"""
for db_name in os.listdir(design_path):
if db_name.startswith("__") or db_name.startswith("."):
continue
db_path = os.path.join(design_path, db_name)
doc = self._folder_to_dict(db_path)
doc_id = "_design/openag"
doc["_id"] = doc_id
db = self[db_name]
if doc_id in db:
old_doc = db[doc_id]
doc["_rev"] = old_doc["_rev"]
if doc == old_doc:
continue
db[doc_id] = doc | Push the design documents stored in `design_path` to the server | Below is the the instruction that describes the task:
### Input:
Push the design documents stored in `design_path` to the server
### Response:
def push_design_documents(self, design_path):
"""
Push the design documents stored in `design_path` to the server
"""
for db_name in os.listdir(design_path):
if db_name.startswith("__") or db_name.startswith("."):
continue
db_path = os.path.join(design_path, db_name)
doc = self._folder_to_dict(db_path)
doc_id = "_design/openag"
doc["_id"] = doc_id
db = self[db_name]
if doc_id in db:
old_doc = db[doc_id]
doc["_rev"] = old_doc["_rev"]
if doc == old_doc:
continue
db[doc_id] = doc |
def edges_to_dict_of_dataframes(grid, edges):
"""
Export edges to DataFrame
Parameters
----------
grid: ding0.Network
edges: list
Edges of Ding0.Network graph
Returns
-------
edges_dict: dict
"""
omega = 2 * pi * 50
srid = int(cfg_ding0.get('geo', 'srid'))
lines = {'line_id': [], 'bus0': [], 'bus1': [], 'x': [], 'r': [],
's_nom': [], 'length': [], 'cables': [], 'geom': [],
'grid_id': []}
# iterate over edges and add them one by one
for edge in edges:
line_name = '_'.join(['MV',
str(grid.id_db),
'lin',
str(edge['branch'].id_db)])
# TODO: find the real cause for being L, C, I_th_max type of Series
if (isinstance(edge['branch'].type['L'], Series) or
isinstance(edge['branch'].type['C'], Series)):
x = omega * edge['branch'].type['L'].values[0] * 1e-3
else:
x = omega * edge['branch'].type['L'] * 1e-3
if isinstance(edge['branch'].type['R'], Series):
r = edge['branch'].type['R'].values[0]
else:
r = edge['branch'].type['R']
if (isinstance(edge['branch'].type['I_max_th'], Series) or
isinstance(edge['branch'].type['U_n'], Series)):
s_nom = sqrt(3) * edge['branch'].type['I_max_th'].values[0] * \
edge['branch'].type['U_n'].values[0]
else:
s_nom = sqrt(3) * edge['branch'].type['I_max_th'] * \
edge['branch'].type['U_n']
# get lengths of line
l = edge['branch'].length / 1e3
lines['line_id'].append(line_name)
lines['bus0'].append(edge['adj_nodes'][0].pypsa_id)
lines['bus1'].append(edge['adj_nodes'][1].pypsa_id)
lines['x'].append(x * l)
lines['r'].append(r * l)
lines['s_nom'].append(s_nom)
lines['length'].append(l)
lines['cables'].append(3)
lines['geom'].append(from_shape(
LineString([edge['adj_nodes'][0].geo_data,
edge['adj_nodes'][1].geo_data]),
srid=srid))
lines['grid_id'].append(grid.id_db)
return {'Line': DataFrame(lines).set_index('line_id')} | Export edges to DataFrame
Parameters
----------
grid: ding0.Network
edges: list
Edges of Ding0.Network graph
Returns
-------
edges_dict: dict | Below is the the instruction that describes the task:
### Input:
Export edges to DataFrame
Parameters
----------
grid: ding0.Network
edges: list
Edges of Ding0.Network graph
Returns
-------
edges_dict: dict
### Response:
def edges_to_dict_of_dataframes(grid, edges):
"""
Export edges to DataFrame
Parameters
----------
grid: ding0.Network
edges: list
Edges of Ding0.Network graph
Returns
-------
edges_dict: dict
"""
omega = 2 * pi * 50
srid = int(cfg_ding0.get('geo', 'srid'))
lines = {'line_id': [], 'bus0': [], 'bus1': [], 'x': [], 'r': [],
's_nom': [], 'length': [], 'cables': [], 'geom': [],
'grid_id': []}
# iterate over edges and add them one by one
for edge in edges:
line_name = '_'.join(['MV',
str(grid.id_db),
'lin',
str(edge['branch'].id_db)])
# TODO: find the real cause for being L, C, I_th_max type of Series
if (isinstance(edge['branch'].type['L'], Series) or
isinstance(edge['branch'].type['C'], Series)):
x = omega * edge['branch'].type['L'].values[0] * 1e-3
else:
x = omega * edge['branch'].type['L'] * 1e-3
if isinstance(edge['branch'].type['R'], Series):
r = edge['branch'].type['R'].values[0]
else:
r = edge['branch'].type['R']
if (isinstance(edge['branch'].type['I_max_th'], Series) or
isinstance(edge['branch'].type['U_n'], Series)):
s_nom = sqrt(3) * edge['branch'].type['I_max_th'].values[0] * \
edge['branch'].type['U_n'].values[0]
else:
s_nom = sqrt(3) * edge['branch'].type['I_max_th'] * \
edge['branch'].type['U_n']
# get lengths of line
l = edge['branch'].length / 1e3
lines['line_id'].append(line_name)
lines['bus0'].append(edge['adj_nodes'][0].pypsa_id)
lines['bus1'].append(edge['adj_nodes'][1].pypsa_id)
lines['x'].append(x * l)
lines['r'].append(r * l)
lines['s_nom'].append(s_nom)
lines['length'].append(l)
lines['cables'].append(3)
lines['geom'].append(from_shape(
LineString([edge['adj_nodes'][0].geo_data,
edge['adj_nodes'][1].geo_data]),
srid=srid))
lines['grid_id'].append(grid.id_db)
return {'Line': DataFrame(lines).set_index('line_id')} |
def _secondary_max(self):
"""Getter for the maximum series value"""
return (
self.secondary_range[1]
if (self.secondary_range
and self.secondary_range[1] is not None) else
(max(self._secondary_values) if self._secondary_values else None)
) | Getter for the maximum series value | Below is the the instruction that describes the task:
### Input:
Getter for the maximum series value
### Response:
def _secondary_max(self):
"""Getter for the maximum series value"""
return (
self.secondary_range[1]
if (self.secondary_range
and self.secondary_range[1] is not None) else
(max(self._secondary_values) if self._secondary_values else None)
) |
def _connect(self):
"""Connect to the serial port. This should be run in a new thread."""
while self.protocol:
_LOGGER.info('Trying to connect to %s', self.port)
try:
ser = serial.serial_for_url(
self.port, self.baud, timeout=self.timeout)
except serial.SerialException:
_LOGGER.error('Unable to connect to %s', self.port)
_LOGGER.info(
'Waiting %s secs before trying to connect again',
self.reconnect_timeout)
time.sleep(self.reconnect_timeout)
else:
transport = serial.threaded.ReaderThread(
ser, lambda: self.protocol)
transport.daemon = False
poll_thread = threading.Thread(target=self._poll_queue)
self._stop_event.clear()
poll_thread.start()
transport.start()
transport.connect()
return | Connect to the serial port. This should be run in a new thread. | Below is the the instruction that describes the task:
### Input:
Connect to the serial port. This should be run in a new thread.
### Response:
def _connect(self):
"""Connect to the serial port. This should be run in a new thread."""
while self.protocol:
_LOGGER.info('Trying to connect to %s', self.port)
try:
ser = serial.serial_for_url(
self.port, self.baud, timeout=self.timeout)
except serial.SerialException:
_LOGGER.error('Unable to connect to %s', self.port)
_LOGGER.info(
'Waiting %s secs before trying to connect again',
self.reconnect_timeout)
time.sleep(self.reconnect_timeout)
else:
transport = serial.threaded.ReaderThread(
ser, lambda: self.protocol)
transport.daemon = False
poll_thread = threading.Thread(target=self._poll_queue)
self._stop_event.clear()
poll_thread.start()
transport.start()
transport.connect()
return |
def add(self, *args, **kwargs):
"""Add Cookie objects by their names, or create new ones under
specified names.
Any unnamed arguments are interpreted as existing cookies, and
are added under the value in their .name attribute. With keyword
arguments, the key is interpreted as the cookie name and the
value as the UNENCODED value stored in the cookie.
"""
# Only the first one is accessible through the main interface,
# others accessible through get_all (all_cookies).
for cookie in args:
self.all_cookies.append(cookie)
if cookie.name in self:
continue
self[cookie.name] = cookie
for key, value in kwargs.items():
cookie = self.cookie_class(key, value)
self.all_cookies.append(cookie)
if key in self:
continue
self[key] = cookie | Add Cookie objects by their names, or create new ones under
specified names.
Any unnamed arguments are interpreted as existing cookies, and
are added under the value in their .name attribute. With keyword
arguments, the key is interpreted as the cookie name and the
value as the UNENCODED value stored in the cookie. | Below is the the instruction that describes the task:
### Input:
Add Cookie objects by their names, or create new ones under
specified names.
Any unnamed arguments are interpreted as existing cookies, and
are added under the value in their .name attribute. With keyword
arguments, the key is interpreted as the cookie name and the
value as the UNENCODED value stored in the cookie.
### Response:
def add(self, *args, **kwargs):
"""Add Cookie objects by their names, or create new ones under
specified names.
Any unnamed arguments are interpreted as existing cookies, and
are added under the value in their .name attribute. With keyword
arguments, the key is interpreted as the cookie name and the
value as the UNENCODED value stored in the cookie.
"""
# Only the first one is accessible through the main interface,
# others accessible through get_all (all_cookies).
for cookie in args:
self.all_cookies.append(cookie)
if cookie.name in self:
continue
self[cookie.name] = cookie
for key, value in kwargs.items():
cookie = self.cookie_class(key, value)
self.all_cookies.append(cookie)
if key in self:
continue
self[key] = cookie |
def get_model_name(model):
"""
Detects the model name for a Scikit-Learn model or pipeline.
Parameters
----------
model: class or instance
The object to determine the name for. If the model is an estimator it
returns the class name; if it is a Pipeline it returns the class name
of the final transformer or estimator in the Pipeline.
Returns
-------
name : string
The name of the model or pipeline.
"""
if not is_estimator(model):
raise YellowbrickTypeError(
"Cannot detect the model name for non estimator: '{}'".format(
type(model)
)
)
else:
if isinstance(model, Pipeline):
return get_model_name(model.steps[-1][-1])
else:
return model.__class__.__name__ | Detects the model name for a Scikit-Learn model or pipeline.
Parameters
----------
model: class or instance
The object to determine the name for. If the model is an estimator it
returns the class name; if it is a Pipeline it returns the class name
of the final transformer or estimator in the Pipeline.
Returns
-------
name : string
The name of the model or pipeline. | Below is the the instruction that describes the task:
### Input:
Detects the model name for a Scikit-Learn model or pipeline.
Parameters
----------
model: class or instance
The object to determine the name for. If the model is an estimator it
returns the class name; if it is a Pipeline it returns the class name
of the final transformer or estimator in the Pipeline.
Returns
-------
name : string
The name of the model or pipeline.
### Response:
def get_model_name(model):
"""
Detects the model name for a Scikit-Learn model or pipeline.
Parameters
----------
model: class or instance
The object to determine the name for. If the model is an estimator it
returns the class name; if it is a Pipeline it returns the class name
of the final transformer or estimator in the Pipeline.
Returns
-------
name : string
The name of the model or pipeline.
"""
if not is_estimator(model):
raise YellowbrickTypeError(
"Cannot detect the model name for non estimator: '{}'".format(
type(model)
)
)
else:
if isinstance(model, Pipeline):
return get_model_name(model.steps[-1][-1])
else:
return model.__class__.__name__ |
def predict(self, control=None, control_matrix=None,
process_matrix=None, process_covariance=None):
"""
Predict the next *a priori* state mean and covariance given the last
posterior. As a special case the first call to this method will
initialise the posterior and prior estimates from the
*initial_state_estimate* and *initial_covariance* arguments passed when
this object was created. In this case the *process_matrix* and
*process_covariance* arguments are unused but are still recorded in the
:py:attr:`.process_matrices` and :py:attr:`.process_covariances`
attributes.
Args:
control (array or None): If specified, the control input for this
predict step.
control_matrix (array or None): If specified, the control matrix to
use for this time step.
process_matrix (array or None): If specified, the process matrix to
use for this time step.
process_covariance (array or None): If specified, the process
covariance to use for this time step.
"""
# Sanitise arguments
if process_matrix is None:
process_matrix = self._defaults['process_matrix']
if process_covariance is None:
process_covariance = self._defaults['process_covariance']
if control_matrix is None:
control_matrix = self._defaults['control_matrix']
if len(self.prior_state_estimates) == 0:
# Special case: first call
self.prior_state_estimates.append(self._initial_state_estimate)
else:
# Usual case
process_matrix = as_square_array(process_matrix)
process_covariance = as_square_array(process_covariance)
if process_matrix.shape[0] != process_covariance.shape[0]:
raise ValueError("Process matrix and noise have incompatible " \
"shapes: {} vs {}".format(
process_matrix.shape, process_covariance.shape))
if control_matrix is not None:
control_matrix = np.atleast_2d(control_matrix)
if control is not None:
control = np.atleast_1d(control)
# Update state mean and covariance
prev_posterior_mean = self.posterior_state_estimates[-1].mean
prev_posterior_cov = self.posterior_state_estimates[-1].cov
prior_mean = process_matrix.dot(prev_posterior_mean)
if control is not None:
prior_mean += control_matrix.dot(control)
prior_cov = process_matrix.dot(prev_posterior_cov).dot(
process_matrix.T) + process_covariance
self.prior_state_estimates.append(
MultivariateNormal(mean=prior_mean, cov=prior_cov))
# Record transition matrix
self.process_matrices.append(process_matrix)
self.process_covariances.append(process_covariance)
# Append empty list to measurements for this time step
self.measurements.append([])
self.measurement_matrices.append([])
# Seed posterior estimates with the prior one.
self.posterior_state_estimates.append(self.prior_state_estimates[-1]) | Predict the next *a priori* state mean and covariance given the last
posterior. As a special case the first call to this method will
initialise the posterior and prior estimates from the
*initial_state_estimate* and *initial_covariance* arguments passed when
this object was created. In this case the *process_matrix* and
*process_covariance* arguments are unused but are still recorded in the
:py:attr:`.process_matrices` and :py:attr:`.process_covariances`
attributes.
Args:
control (array or None): If specified, the control input for this
predict step.
control_matrix (array or None): If specified, the control matrix to
use for this time step.
process_matrix (array or None): If specified, the process matrix to
use for this time step.
process_covariance (array or None): If specified, the process
covariance to use for this time step. | Below is the the instruction that describes the task:
### Input:
Predict the next *a priori* state mean and covariance given the last
posterior. As a special case the first call to this method will
initialise the posterior and prior estimates from the
*initial_state_estimate* and *initial_covariance* arguments passed when
this object was created. In this case the *process_matrix* and
*process_covariance* arguments are unused but are still recorded in the
:py:attr:`.process_matrices` and :py:attr:`.process_covariances`
attributes.
Args:
control (array or None): If specified, the control input for this
predict step.
control_matrix (array or None): If specified, the control matrix to
use for this time step.
process_matrix (array or None): If specified, the process matrix to
use for this time step.
process_covariance (array or None): If specified, the process
covariance to use for this time step.
### Response:
def predict(self, control=None, control_matrix=None,
process_matrix=None, process_covariance=None):
"""
Predict the next *a priori* state mean and covariance given the last
posterior. As a special case the first call to this method will
initialise the posterior and prior estimates from the
*initial_state_estimate* and *initial_covariance* arguments passed when
this object was created. In this case the *process_matrix* and
*process_covariance* arguments are unused but are still recorded in the
:py:attr:`.process_matrices` and :py:attr:`.process_covariances`
attributes.
Args:
control (array or None): If specified, the control input for this
predict step.
control_matrix (array or None): If specified, the control matrix to
use for this time step.
process_matrix (array or None): If specified, the process matrix to
use for this time step.
process_covariance (array or None): If specified, the process
covariance to use for this time step.
"""
# Sanitise arguments
if process_matrix is None:
process_matrix = self._defaults['process_matrix']
if process_covariance is None:
process_covariance = self._defaults['process_covariance']
if control_matrix is None:
control_matrix = self._defaults['control_matrix']
if len(self.prior_state_estimates) == 0:
# Special case: first call
self.prior_state_estimates.append(self._initial_state_estimate)
else:
# Usual case
process_matrix = as_square_array(process_matrix)
process_covariance = as_square_array(process_covariance)
if process_matrix.shape[0] != process_covariance.shape[0]:
raise ValueError("Process matrix and noise have incompatible " \
"shapes: {} vs {}".format(
process_matrix.shape, process_covariance.shape))
if control_matrix is not None:
control_matrix = np.atleast_2d(control_matrix)
if control is not None:
control = np.atleast_1d(control)
# Update state mean and covariance
prev_posterior_mean = self.posterior_state_estimates[-1].mean
prev_posterior_cov = self.posterior_state_estimates[-1].cov
prior_mean = process_matrix.dot(prev_posterior_mean)
if control is not None:
prior_mean += control_matrix.dot(control)
prior_cov = process_matrix.dot(prev_posterior_cov).dot(
process_matrix.T) + process_covariance
self.prior_state_estimates.append(
MultivariateNormal(mean=prior_mean, cov=prior_cov))
# Record transition matrix
self.process_matrices.append(process_matrix)
self.process_covariances.append(process_covariance)
# Append empty list to measurements for this time step
self.measurements.append([])
self.measurement_matrices.append([])
# Seed posterior estimates with the prior one.
self.posterior_state_estimates.append(self.prior_state_estimates[-1]) |
def hosted_number_orders(self):
"""
:rtype: twilio.rest.preview.hosted_numbers.hosted_number_order.HostedNumberOrderList
"""
if self._hosted_number_orders is None:
self._hosted_number_orders = HostedNumberOrderList(self)
return self._hosted_number_orders | :rtype: twilio.rest.preview.hosted_numbers.hosted_number_order.HostedNumberOrderList | Below is the the instruction that describes the task:
### Input:
:rtype: twilio.rest.preview.hosted_numbers.hosted_number_order.HostedNumberOrderList
### Response:
def hosted_number_orders(self):
"""
:rtype: twilio.rest.preview.hosted_numbers.hosted_number_order.HostedNumberOrderList
"""
if self._hosted_number_orders is None:
self._hosted_number_orders = HostedNumberOrderList(self)
return self._hosted_number_orders |
def condition_from_code(condcode):
"""Get the condition name from the condition code."""
if condcode in __BRCONDITIONS:
cond_data = __BRCONDITIONS[condcode]
return {CONDCODE: condcode,
CONDITION: cond_data[0],
DETAILED: cond_data[1],
EXACT: cond_data[2],
EXACTNL: cond_data[3],
}
return None | Get the condition name from the condition code. | Below is the the instruction that describes the task:
### Input:
Get the condition name from the condition code.
### Response:
def condition_from_code(condcode):
"""Get the condition name from the condition code."""
if condcode in __BRCONDITIONS:
cond_data = __BRCONDITIONS[condcode]
return {CONDCODE: condcode,
CONDITION: cond_data[0],
DETAILED: cond_data[1],
EXACT: cond_data[2],
EXACTNL: cond_data[3],
}
return None |
def is_polynomial(self):
"""
Tells whether it is a linear combination of natural powers of ``x``.
"""
return all(isinstance(k, INT_TYPES) and k >= 0 for k in self._data) | Tells whether it is a linear combination of natural powers of ``x``. | Below is the the instruction that describes the task:
### Input:
Tells whether it is a linear combination of natural powers of ``x``.
### Response:
def is_polynomial(self):
"""
Tells whether it is a linear combination of natural powers of ``x``.
"""
return all(isinstance(k, INT_TYPES) and k >= 0 for k in self._data) |
def main():
data = {'demo':{'foo': '<foo>', 'bar': ['1', '2']}}
# xml写入文件 提供文件名
lazyxml.dump(data, 'xml/dump.xml')
# xml写入文件 提供文件句柄
with open('xml/dump-fp.xml', 'w') as fp:
lazyxml.dump(data, fp)
# xml写入文件 提供类文件对象
from cStringIO import StringIO
buffer = StringIO()
lazyxml.dump(data, buffer)
print buffer.getvalue()
# <?xml version="1.0" encoding="utf-8"?><demo><foo><![CDATA[1]]></foo><bar><![CDATA[2]]></bar></demo>
buffer.close()
# 默认
print lazyxml.dumps(data)
# '<?xml version="1.0" encoding="utf-8"?><demo><foo><![CDATA[<foo>]]></foo><bar><![CDATA[1]]></bar><bar><![CDATA[2]]></bar></demo>'
# 不声明xml头部
print lazyxml.dumps(data, header_declare=False)
# '<demo><foo><![CDATA[<foo>]]></foo><bar><![CDATA[1]]></bar><bar><![CDATA[2]]></bar></demo>'
# 不使用CDATA格式
print lazyxml.dumps(data, cdata=False)
# '<?xml version="1.0" encoding="utf-8"?><demo><foo><foo></foo><bar>1</bar><bar>2</bar></demo>'
# 缩进和美观xml
print lazyxml.dumps(data, indent=' ' * 4)
# <?xml version="1.0" encoding="utf-8"?>
# <demo>
# <foo><![CDATA[<foo>]]></foo>
# <bar><![CDATA[1]]></bar>
# <bar><![CDATA[2]]></bar>
# </demo>
# 使用标签名称排序
print lazyxml.dumps(data, ksort=True)
# '<?xml version="1.0" encoding="utf-8"?><demo><bar><![CDATA[1]]></bar><bar><![CDATA[2]]></bar><foo><![CDATA[<foo>]]></foo></demo>'
# 使用标签名称倒序排序
print lazyxml.dumps(data, ksort=True, reverse=True)
# '<?xml version="1.0" encoding="utf-8"?><demo><foo><![CDATA[<foo>]]></foo><bar><![CDATA[1]]></bar><bar><![CDATA[2]]></bar></demo>'
# 含有属性的xml数据
kw = {
'hasattr': True,
'ksort': True,
'indent': ' ' * 4,
'attrkey': ATTRKEY,
'valuekey': VALUEKEY
}
print lazyxml.dumps(ATTRDATA, **kw)
"""
<root a1="1" a2="2">
<test1 a="1" b="2" c="3">
<normal index="5" required="false">
<bar><![CDATA[1]]></bar>
<bar><![CDATA[2]]></bar>
<foo><![CDATA[<foo-1>]]></foo>
</normal>
<repeat1 index="1" required="false">
<bar><![CDATA[1]]></bar>
<bar><![CDATA[2]]></bar>
<foo><![CDATA[<foo-1>]]></foo>
</repeat1>
<repeat1 index="1" required="false">
<bar><![CDATA[3]]></bar>
<bar><![CDATA[4]]></bar>
<foo><![CDATA[<foo-2>]]></foo>
</repeat1>
<repeat2 index="2" required="true"><![CDATA[1]]></repeat2>
<repeat2 index="2" required="true"><![CDATA[2]]></repeat2>
<repeat3 index="3" required="true">
<sub><![CDATA[1]]></sub>
<sub><![CDATA[2]]></sub>
</repeat3>
<repeat3 index="4" required="true">
<sub><![CDATA[1]]></sub>
<sub><![CDATA[2]]></sub>
<sub><![CDATA[3]]></sub>
</repeat3>
</test1>
<test2 a="1" b="2" c="3"><![CDATA[测试用]]></test2>
</root>
""" | <root a1="1" a2="2">
<test1 a="1" b="2" c="3">
<normal index="5" required="false">
<bar><![CDATA[1]]></bar>
<bar><![CDATA[2]]></bar>
<foo><![CDATA[<foo-1>]]></foo>
</normal>
<repeat1 index="1" required="false">
<bar><![CDATA[1]]></bar>
<bar><![CDATA[2]]></bar>
<foo><![CDATA[<foo-1>]]></foo>
</repeat1>
<repeat1 index="1" required="false">
<bar><![CDATA[3]]></bar>
<bar><![CDATA[4]]></bar>
<foo><![CDATA[<foo-2>]]></foo>
</repeat1>
<repeat2 index="2" required="true"><![CDATA[1]]></repeat2>
<repeat2 index="2" required="true"><![CDATA[2]]></repeat2>
<repeat3 index="3" required="true">
<sub><![CDATA[1]]></sub>
<sub><![CDATA[2]]></sub>
</repeat3>
<repeat3 index="4" required="true">
<sub><![CDATA[1]]></sub>
<sub><![CDATA[2]]></sub>
<sub><![CDATA[3]]></sub>
</repeat3>
</test1>
<test2 a="1" b="2" c="3"><![CDATA[测试用]]></test2>
</root> | Below is the the instruction that describes the task:
### Input:
<root a1="1" a2="2">
<test1 a="1" b="2" c="3">
<normal index="5" required="false">
<bar><![CDATA[1]]></bar>
<bar><![CDATA[2]]></bar>
<foo><![CDATA[<foo-1>]]></foo>
</normal>
<repeat1 index="1" required="false">
<bar><![CDATA[1]]></bar>
<bar><![CDATA[2]]></bar>
<foo><![CDATA[<foo-1>]]></foo>
</repeat1>
<repeat1 index="1" required="false">
<bar><![CDATA[3]]></bar>
<bar><![CDATA[4]]></bar>
<foo><![CDATA[<foo-2>]]></foo>
</repeat1>
<repeat2 index="2" required="true"><![CDATA[1]]></repeat2>
<repeat2 index="2" required="true"><![CDATA[2]]></repeat2>
<repeat3 index="3" required="true">
<sub><![CDATA[1]]></sub>
<sub><![CDATA[2]]></sub>
</repeat3>
<repeat3 index="4" required="true">
<sub><![CDATA[1]]></sub>
<sub><![CDATA[2]]></sub>
<sub><![CDATA[3]]></sub>
</repeat3>
</test1>
<test2 a="1" b="2" c="3"><![CDATA[测试用]]></test2>
</root>
### Response:
def main():
data = {'demo':{'foo': '<foo>', 'bar': ['1', '2']}}
# xml写入文件 提供文件名
lazyxml.dump(data, 'xml/dump.xml')
# xml写入文件 提供文件句柄
with open('xml/dump-fp.xml', 'w') as fp:
lazyxml.dump(data, fp)
# xml写入文件 提供类文件对象
from cStringIO import StringIO
buffer = StringIO()
lazyxml.dump(data, buffer)
print buffer.getvalue()
# <?xml version="1.0" encoding="utf-8"?><demo><foo><![CDATA[1]]></foo><bar><![CDATA[2]]></bar></demo>
buffer.close()
# 默认
print lazyxml.dumps(data)
# '<?xml version="1.0" encoding="utf-8"?><demo><foo><![CDATA[<foo>]]></foo><bar><![CDATA[1]]></bar><bar><![CDATA[2]]></bar></demo>'
# 不声明xml头部
print lazyxml.dumps(data, header_declare=False)
# '<demo><foo><![CDATA[<foo>]]></foo><bar><![CDATA[1]]></bar><bar><![CDATA[2]]></bar></demo>'
# 不使用CDATA格式
print lazyxml.dumps(data, cdata=False)
# '<?xml version="1.0" encoding="utf-8"?><demo><foo><foo></foo><bar>1</bar><bar>2</bar></demo>'
# 缩进和美观xml
print lazyxml.dumps(data, indent=' ' * 4)
# <?xml version="1.0" encoding="utf-8"?>
# <demo>
# <foo><![CDATA[<foo>]]></foo>
# <bar><![CDATA[1]]></bar>
# <bar><![CDATA[2]]></bar>
# </demo>
# 使用标签名称排序
print lazyxml.dumps(data, ksort=True)
# '<?xml version="1.0" encoding="utf-8"?><demo><bar><![CDATA[1]]></bar><bar><![CDATA[2]]></bar><foo><![CDATA[<foo>]]></foo></demo>'
# 使用标签名称倒序排序
print lazyxml.dumps(data, ksort=True, reverse=True)
# '<?xml version="1.0" encoding="utf-8"?><demo><foo><![CDATA[<foo>]]></foo><bar><![CDATA[1]]></bar><bar><![CDATA[2]]></bar></demo>'
# 含有属性的xml数据
kw = {
'hasattr': True,
'ksort': True,
'indent': ' ' * 4,
'attrkey': ATTRKEY,
'valuekey': VALUEKEY
}
print lazyxml.dumps(ATTRDATA, **kw)
"""
<root a1="1" a2="2">
<test1 a="1" b="2" c="3">
<normal index="5" required="false">
<bar><![CDATA[1]]></bar>
<bar><![CDATA[2]]></bar>
<foo><![CDATA[<foo-1>]]></foo>
</normal>
<repeat1 index="1" required="false">
<bar><![CDATA[1]]></bar>
<bar><![CDATA[2]]></bar>
<foo><![CDATA[<foo-1>]]></foo>
</repeat1>
<repeat1 index="1" required="false">
<bar><![CDATA[3]]></bar>
<bar><![CDATA[4]]></bar>
<foo><![CDATA[<foo-2>]]></foo>
</repeat1>
<repeat2 index="2" required="true"><![CDATA[1]]></repeat2>
<repeat2 index="2" required="true"><![CDATA[2]]></repeat2>
<repeat3 index="3" required="true">
<sub><![CDATA[1]]></sub>
<sub><![CDATA[2]]></sub>
</repeat3>
<repeat3 index="4" required="true">
<sub><![CDATA[1]]></sub>
<sub><![CDATA[2]]></sub>
<sub><![CDATA[3]]></sub>
</repeat3>
</test1>
<test2 a="1" b="2" c="3"><![CDATA[测试用]]></test2>
</root>
""" |
def cells_rt_meta(workbook, sheet, row, col):
"""
Traverse all cells in a row. If you find new data in a cell, add it to the list.
:param obj workbook:
:param str sheet:
:param int row:
:param int col:
:return list: Cell data for a specific row
"""
logger_excel.info("enter cells_rt_meta")
col_loop = 0
cell_data = []
temp_sheet = workbook.sheet_by_name(sheet)
while col_loop < temp_sheet.ncols:
col += 1
col_loop += 1
try:
if temp_sheet.cell_value(row, col) != xlrd.empty_cell and temp_sheet.cell_value(row, col) != '':
cell_data.append(temp_sheet.cell_value(row, col))
except IndexError as e:
logger_excel.warn("cells_rt_meta: IndexError: sheet: {}, row: {}, col: {}, {}".format(sheet, row, col, e))
logger_excel.info("exit cells_right_meta")
return cell_data | Traverse all cells in a row. If you find new data in a cell, add it to the list.
:param obj workbook:
:param str sheet:
:param int row:
:param int col:
:return list: Cell data for a specific row | Below is the the instruction that describes the task:
### Input:
Traverse all cells in a row. If you find new data in a cell, add it to the list.
:param obj workbook:
:param str sheet:
:param int row:
:param int col:
:return list: Cell data for a specific row
### Response:
def cells_rt_meta(workbook, sheet, row, col):
"""
Traverse all cells in a row. If you find new data in a cell, add it to the list.
:param obj workbook:
:param str sheet:
:param int row:
:param int col:
:return list: Cell data for a specific row
"""
logger_excel.info("enter cells_rt_meta")
col_loop = 0
cell_data = []
temp_sheet = workbook.sheet_by_name(sheet)
while col_loop < temp_sheet.ncols:
col += 1
col_loop += 1
try:
if temp_sheet.cell_value(row, col) != xlrd.empty_cell and temp_sheet.cell_value(row, col) != '':
cell_data.append(temp_sheet.cell_value(row, col))
except IndexError as e:
logger_excel.warn("cells_rt_meta: IndexError: sheet: {}, row: {}, col: {}, {}".format(sheet, row, col, e))
logger_excel.info("exit cells_right_meta")
return cell_data |
def get_list(self, terms, limit=0, sort=False, ranks=None):
"""
Get the specified cards from the stack.
:arg term:
The search term. Can be a card full name, value, suit,
abbreviation, or stack indice.
:arg int limit:
The number of items to retrieve for each term.
:arg bool sort:
Whether or not to sort the results, by poker ranks.
:arg dict ranks:
The rank dict to reference for sorting. If ``None``, it will
default to ``DEFAULT_RANKS``.
:returns:
A list of the specified cards, if found.
"""
ranks = ranks or self.ranks
got_cards = []
try:
indices = self.find_list(terms, limit=limit)
got_cards = [self.cards[i] for i in indices if self.cards[i]
not in got_cards]
self.cards = [v for i, v in enumerate(self.cards) if
i not in indices]
except:
indices = []
for item in terms:
try:
card = self.cards[item]
if card not in got_cards:
got_cards.append(card)
indices.append(item)
except:
indices += self.find(item, limit=limit)
got_cards += [self.cards[i] for i in indices if
self.cards[i] not in got_cards]
self.cards = [v for i, v in enumerate(self.cards) if
i not in indices]
if sort:
got_cards = sort_cards(got_cards, ranks)
return got_cards | Get the specified cards from the stack.
:arg term:
The search term. Can be a card full name, value, suit,
abbreviation, or stack indice.
:arg int limit:
The number of items to retrieve for each term.
:arg bool sort:
Whether or not to sort the results, by poker ranks.
:arg dict ranks:
The rank dict to reference for sorting. If ``None``, it will
default to ``DEFAULT_RANKS``.
:returns:
A list of the specified cards, if found. | Below is the the instruction that describes the task:
### Input:
Get the specified cards from the stack.
:arg term:
The search term. Can be a card full name, value, suit,
abbreviation, or stack indice.
:arg int limit:
The number of items to retrieve for each term.
:arg bool sort:
Whether or not to sort the results, by poker ranks.
:arg dict ranks:
The rank dict to reference for sorting. If ``None``, it will
default to ``DEFAULT_RANKS``.
:returns:
A list of the specified cards, if found.
### Response:
def get_list(self, terms, limit=0, sort=False, ranks=None):
"""
Get the specified cards from the stack.
:arg term:
The search term. Can be a card full name, value, suit,
abbreviation, or stack indice.
:arg int limit:
The number of items to retrieve for each term.
:arg bool sort:
Whether or not to sort the results, by poker ranks.
:arg dict ranks:
The rank dict to reference for sorting. If ``None``, it will
default to ``DEFAULT_RANKS``.
:returns:
A list of the specified cards, if found.
"""
ranks = ranks or self.ranks
got_cards = []
try:
indices = self.find_list(terms, limit=limit)
got_cards = [self.cards[i] for i in indices if self.cards[i]
not in got_cards]
self.cards = [v for i, v in enumerate(self.cards) if
i not in indices]
except:
indices = []
for item in terms:
try:
card = self.cards[item]
if card not in got_cards:
got_cards.append(card)
indices.append(item)
except:
indices += self.find(item, limit=limit)
got_cards += [self.cards[i] for i in indices if
self.cards[i] not in got_cards]
self.cards = [v for i, v in enumerate(self.cards) if
i not in indices]
if sort:
got_cards = sort_cards(got_cards, ranks)
return got_cards |
def _get_pattern(self, pattern_id):
"""Get pattern item by id."""
for key in ('PATTERNS1', 'PATTERNS2', 'PATTERNS3'):
if key in self.tagged_blocks:
data = self.tagged_blocks.get_data(key)
for pattern in data:
if pattern.pattern_id == pattern_id:
return pattern
return None | Get pattern item by id. | Below is the the instruction that describes the task:
### Input:
Get pattern item by id.
### Response:
def _get_pattern(self, pattern_id):
"""Get pattern item by id."""
for key in ('PATTERNS1', 'PATTERNS2', 'PATTERNS3'):
if key in self.tagged_blocks:
data = self.tagged_blocks.get_data(key)
for pattern in data:
if pattern.pattern_id == pattern_id:
return pattern
return None |
def uuid(cls):
"""UUID column, or synonym to existing :attr:`id` column if that is a UUID"""
if hasattr(cls, '__uuid_primary_key__') and cls.__uuid_primary_key__:
return synonym('id')
else:
return immutable(Column(UUIDType(binary=False), default=uuid_.uuid4, unique=True, nullable=False)) | UUID column, or synonym to existing :attr:`id` column if that is a UUID | Below is the the instruction that describes the task:
### Input:
UUID column, or synonym to existing :attr:`id` column if that is a UUID
### Response:
def uuid(cls):
"""UUID column, or synonym to existing :attr:`id` column if that is a UUID"""
if hasattr(cls, '__uuid_primary_key__') and cls.__uuid_primary_key__:
return synonym('id')
else:
return immutable(Column(UUIDType(binary=False), default=uuid_.uuid4, unique=True, nullable=False)) |
def continuousGenerator(self, request):
"""
Returns a generator over the (continuous, nextPageToken) pairs
defined by the (JSON string) request.
"""
compoundId = None
if request.continuous_set_id != "":
compoundId = datamodel.ContinuousSetCompoundId.parse(
request.continuous_set_id)
if compoundId is None:
raise exceptions.ContinuousSetNotSpecifiedException()
dataset = self.getDataRepository().getDataset(
compoundId.dataset_id)
continuousSet = dataset.getContinuousSet(request.continuous_set_id)
iterator = paging.ContinuousIterator(request, continuousSet)
return iterator | Returns a generator over the (continuous, nextPageToken) pairs
defined by the (JSON string) request. | Below is the the instruction that describes the task:
### Input:
Returns a generator over the (continuous, nextPageToken) pairs
defined by the (JSON string) request.
### Response:
def continuousGenerator(self, request):
"""
Returns a generator over the (continuous, nextPageToken) pairs
defined by the (JSON string) request.
"""
compoundId = None
if request.continuous_set_id != "":
compoundId = datamodel.ContinuousSetCompoundId.parse(
request.continuous_set_id)
if compoundId is None:
raise exceptions.ContinuousSetNotSpecifiedException()
dataset = self.getDataRepository().getDataset(
compoundId.dataset_id)
continuousSet = dataset.getContinuousSet(request.continuous_set_id)
iterator = paging.ContinuousIterator(request, continuousSet)
return iterator |
def render_addPersonForm(self, ctx, data):
"""
Create and return a L{liveform.LiveForm} for creating a new L{Person}.
"""
addPersonForm = liveform.LiveForm(
self.addPerson, self._baseParameters, description='Add Person')
addPersonForm.compact()
addPersonForm.jsClass = u'Mantissa.People.AddPersonForm'
addPersonForm.setFragmentParent(self)
return addPersonForm | Create and return a L{liveform.LiveForm} for creating a new L{Person}. | Below is the the instruction that describes the task:
### Input:
Create and return a L{liveform.LiveForm} for creating a new L{Person}.
### Response:
def render_addPersonForm(self, ctx, data):
"""
Create and return a L{liveform.LiveForm} for creating a new L{Person}.
"""
addPersonForm = liveform.LiveForm(
self.addPerson, self._baseParameters, description='Add Person')
addPersonForm.compact()
addPersonForm.jsClass = u'Mantissa.People.AddPersonForm'
addPersonForm.setFragmentParent(self)
return addPersonForm |
def add_rpt(self, sequence, mod, pt):
"""Add a repeater to the previous sequence"""
modstr = self.value(mod)
if modstr == '!!':
# cursor on the REPEATER
self._stream.restore_context()
# log the error
self.diagnostic.notify(
error.Severity.ERROR,
"Cannot repeat a lookahead rule",
error.LocationInfo.from_stream(self._stream, is_error=True)
)
raise self.diagnostic
if modstr == '!':
# cursor on the REPEATER
self._stream.restore_context()
# log the error
self.diagnostic.notify(
error.Severity.ERROR,
"Cannot repeat a negated rule",
error.LocationInfo.from_stream(self._stream, is_error=True)
)
raise self.diagnostic
oldnode = sequence
sequence.parser_tree = pt.functor(oldnode.parser_tree)
return True | Add a repeater to the previous sequence | Below is the the instruction that describes the task:
### Input:
Add a repeater to the previous sequence
### Response:
def add_rpt(self, sequence, mod, pt):
"""Add a repeater to the previous sequence"""
modstr = self.value(mod)
if modstr == '!!':
# cursor on the REPEATER
self._stream.restore_context()
# log the error
self.diagnostic.notify(
error.Severity.ERROR,
"Cannot repeat a lookahead rule",
error.LocationInfo.from_stream(self._stream, is_error=True)
)
raise self.diagnostic
if modstr == '!':
# cursor on the REPEATER
self._stream.restore_context()
# log the error
self.diagnostic.notify(
error.Severity.ERROR,
"Cannot repeat a negated rule",
error.LocationInfo.from_stream(self._stream, is_error=True)
)
raise self.diagnostic
oldnode = sequence
sequence.parser_tree = pt.functor(oldnode.parser_tree)
return True |
def _ss(self, class_def):
"""calculates sum of squares for a class"""
yc = self.y[class_def]
css = yc - yc.mean()
css *= css
return sum(css) | calculates sum of squares for a class | Below is the the instruction that describes the task:
### Input:
calculates sum of squares for a class
### Response:
def _ss(self, class_def):
"""calculates sum of squares for a class"""
yc = self.y[class_def]
css = yc - yc.mean()
css *= css
return sum(css) |
def backwards(self, orm):
"Write your backwards methods here."
from django.contrib.auth.models import Group
projects = orm['samples.Project'].objects.all()
names = [PROJECT_GROUP_TEMPLATE.format(p.name) for p in projects]
# Remove groups named after these teams
Group.objects.filter(name__in=names).delete() | Write your backwards methods here. | Below is the the instruction that describes the task:
### Input:
Write your backwards methods here.
### Response:
def backwards(self, orm):
"Write your backwards methods here."
from django.contrib.auth.models import Group
projects = orm['samples.Project'].objects.all()
names = [PROJECT_GROUP_TEMPLATE.format(p.name) for p in projects]
# Remove groups named after these teams
Group.objects.filter(name__in=names).delete() |
def git_remote(git_repo):
"""Return the URL for remote git repository.
Depending on the system setup it returns ssh or https remote.
"""
github_token = os.getenv(GITHUB_TOKEN_KEY)
if github_token:
return 'https://{0}@github.com/{1}'.format(
github_token, git_repo)
return '[email protected]:{0}'.format(git_repo) | Return the URL for remote git repository.
Depending on the system setup it returns ssh or https remote. | Below is the the instruction that describes the task:
### Input:
Return the URL for remote git repository.
Depending on the system setup it returns ssh or https remote.
### Response:
def git_remote(git_repo):
"""Return the URL for remote git repository.
Depending on the system setup it returns ssh or https remote.
"""
github_token = os.getenv(GITHUB_TOKEN_KEY)
if github_token:
return 'https://{0}@github.com/{1}'.format(
github_token, git_repo)
return '[email protected]:{0}'.format(git_repo) |
def toListInt(value):
"""
Convert a value to list of ints, if possible.
"""
if TypeConverters._can_convert_to_list(value):
value = TypeConverters.toList(value)
if all(map(lambda v: TypeConverters._is_integer(v), value)):
return [int(v) for v in value]
raise TypeError("Could not convert %s to list of ints" % value) | Convert a value to list of ints, if possible. | Below is the the instruction that describes the task:
### Input:
Convert a value to list of ints, if possible.
### Response:
def toListInt(value):
"""
Convert a value to list of ints, if possible.
"""
if TypeConverters._can_convert_to_list(value):
value = TypeConverters.toList(value)
if all(map(lambda v: TypeConverters._is_integer(v), value)):
return [int(v) for v in value]
raise TypeError("Could not convert %s to list of ints" % value) |
def _set_typeahead(cls, el, value):
"""
Convert given `el` to typeahead input and set it to `value`.
This method also sets the dropdown icons and descriptors.
Args:
el (obj): Element reference to the input you want to convert to
typeahead.
value (list): List of dicts with two keys: ``source`` and ``val``.
"""
PlaceholderHandler.reset_placeholder_dropdown(el)
# if there is no elements, show alert icon in glyph
if not value and not el.value:
DropdownHandler.set_dropdown_glyph(el.id, "glyphicon-alert")
return
# if there is only one element, don't use typeahead, just put the
# information to the input, set different dropdown glyph and put source
# to the dropdown
if len(value) == 1:
source = value[0]["source"].strip()
dropdown_el = DropdownHandler.set_dropdown_glyph(
el.id,
"glyphicon-eye-open"
)
dropdown_content = "<span class='gray_text'> (%s)</span>"
# save the source to the dropdown menu
if source:
dropdown_el.html = dropdown_content % source[::-1]
el.value = value[0]["val"]
return
# get reference to parent element
parent_id = el.parent.id
if "typeahead" not in parent_id.lower():
parent_id = el.parent.parent.id
if parent_id in cls._set_by_typeahead:
window.destroy_typeahead_tag("#" + parent_id)
# if there are multiple elements, put them to the typeahead and show
# dropdown glyph
window.make_typeahead_tag("#" + parent_id, value)
DropdownHandler.set_dropdown_glyph(el.id, "glyphicon-menu-down")
PlaceholderHandler.set_placeholder_dropdown(el)
cls._set_by_typeahead.add(parent_id) | Convert given `el` to typeahead input and set it to `value`.
This method also sets the dropdown icons and descriptors.
Args:
el (obj): Element reference to the input you want to convert to
typeahead.
value (list): List of dicts with two keys: ``source`` and ``val``. | Below is the the instruction that describes the task:
### Input:
Convert given `el` to typeahead input and set it to `value`.
This method also sets the dropdown icons and descriptors.
Args:
el (obj): Element reference to the input you want to convert to
typeahead.
value (list): List of dicts with two keys: ``source`` and ``val``.
### Response:
def _set_typeahead(cls, el, value):
"""
Convert given `el` to typeahead input and set it to `value`.
This method also sets the dropdown icons and descriptors.
Args:
el (obj): Element reference to the input you want to convert to
typeahead.
value (list): List of dicts with two keys: ``source`` and ``val``.
"""
PlaceholderHandler.reset_placeholder_dropdown(el)
# if there is no elements, show alert icon in glyph
if not value and not el.value:
DropdownHandler.set_dropdown_glyph(el.id, "glyphicon-alert")
return
# if there is only one element, don't use typeahead, just put the
# information to the input, set different dropdown glyph and put source
# to the dropdown
if len(value) == 1:
source = value[0]["source"].strip()
dropdown_el = DropdownHandler.set_dropdown_glyph(
el.id,
"glyphicon-eye-open"
)
dropdown_content = "<span class='gray_text'> (%s)</span>"
# save the source to the dropdown menu
if source:
dropdown_el.html = dropdown_content % source[::-1]
el.value = value[0]["val"]
return
# get reference to parent element
parent_id = el.parent.id
if "typeahead" not in parent_id.lower():
parent_id = el.parent.parent.id
if parent_id in cls._set_by_typeahead:
window.destroy_typeahead_tag("#" + parent_id)
# if there are multiple elements, put them to the typeahead and show
# dropdown glyph
window.make_typeahead_tag("#" + parent_id, value)
DropdownHandler.set_dropdown_glyph(el.id, "glyphicon-menu-down")
PlaceholderHandler.set_placeholder_dropdown(el)
cls._set_by_typeahead.add(parent_id) |
def velocity_dispersion(self, kwargs_lens, kwargs_lens_light, lens_light_model_bool_list=None, aniso_param=1,
r_eff=None, R_slit=0.81, dR_slit=0.1, psf_fwhm=0.7, num_evaluate=1000):
"""
computes the LOS velocity dispersion of the lens within a slit of size R_slit x dR_slit and seeing psf_fwhm.
The assumptions are a Hernquist light profile and the spherical power-law lens model at the first position.
Further information can be found in the AnalyticKinematics() class.
:param kwargs_lens: lens model parameters
:param kwargs_lens_light: deflector light parameters
:param aniso_param: scaled r_ani with respect to the half light radius
:param r_eff: half light radius, if not provided, will be computed from the lens light model
:param R_slit: width of the slit
:param dR_slit: length of the slit
:param psf_fwhm: full width at half maximum of the seeing condition
:param num_evaluate: number of spectral rendering of the light distribution that end up on the slit
:return: velocity dispersion in units [km/s]
"""
gamma = kwargs_lens[0]['gamma']
if 'center_x' in kwargs_lens_light[0]:
center_x, center_y = kwargs_lens_light[0]['center_x'], kwargs_lens_light[0]['center_y']
else:
center_x, center_y = 0, 0
if r_eff is None:
r_eff = self.lens_analysis.half_light_radius_lens(kwargs_lens_light, center_x=center_x, center_y=center_y, model_bool_list=lens_light_model_bool_list)
theta_E = kwargs_lens[0]['theta_E']
r_ani = aniso_param * r_eff
sigma2 = self.analytic_kinematics.vel_disp(gamma, theta_E, r_eff, r_ani, R_slit, dR_slit, FWHM=psf_fwhm, rendering_number=num_evaluate)
return sigma2 | computes the LOS velocity dispersion of the lens within a slit of size R_slit x dR_slit and seeing psf_fwhm.
The assumptions are a Hernquist light profile and the spherical power-law lens model at the first position.
Further information can be found in the AnalyticKinematics() class.
:param kwargs_lens: lens model parameters
:param kwargs_lens_light: deflector light parameters
:param aniso_param: scaled r_ani with respect to the half light radius
:param r_eff: half light radius, if not provided, will be computed from the lens light model
:param R_slit: width of the slit
:param dR_slit: length of the slit
:param psf_fwhm: full width at half maximum of the seeing condition
:param num_evaluate: number of spectral rendering of the light distribution that end up on the slit
:return: velocity dispersion in units [km/s] | Below is the the instruction that describes the task:
### Input:
computes the LOS velocity dispersion of the lens within a slit of size R_slit x dR_slit and seeing psf_fwhm.
The assumptions are a Hernquist light profile and the spherical power-law lens model at the first position.
Further information can be found in the AnalyticKinematics() class.
:param kwargs_lens: lens model parameters
:param kwargs_lens_light: deflector light parameters
:param aniso_param: scaled r_ani with respect to the half light radius
:param r_eff: half light radius, if not provided, will be computed from the lens light model
:param R_slit: width of the slit
:param dR_slit: length of the slit
:param psf_fwhm: full width at half maximum of the seeing condition
:param num_evaluate: number of spectral rendering of the light distribution that end up on the slit
:return: velocity dispersion in units [km/s]
### Response:
def velocity_dispersion(self, kwargs_lens, kwargs_lens_light, lens_light_model_bool_list=None, aniso_param=1,
r_eff=None, R_slit=0.81, dR_slit=0.1, psf_fwhm=0.7, num_evaluate=1000):
"""
computes the LOS velocity dispersion of the lens within a slit of size R_slit x dR_slit and seeing psf_fwhm.
The assumptions are a Hernquist light profile and the spherical power-law lens model at the first position.
Further information can be found in the AnalyticKinematics() class.
:param kwargs_lens: lens model parameters
:param kwargs_lens_light: deflector light parameters
:param aniso_param: scaled r_ani with respect to the half light radius
:param r_eff: half light radius, if not provided, will be computed from the lens light model
:param R_slit: width of the slit
:param dR_slit: length of the slit
:param psf_fwhm: full width at half maximum of the seeing condition
:param num_evaluate: number of spectral rendering of the light distribution that end up on the slit
:return: velocity dispersion in units [km/s]
"""
gamma = kwargs_lens[0]['gamma']
if 'center_x' in kwargs_lens_light[0]:
center_x, center_y = kwargs_lens_light[0]['center_x'], kwargs_lens_light[0]['center_y']
else:
center_x, center_y = 0, 0
if r_eff is None:
r_eff = self.lens_analysis.half_light_radius_lens(kwargs_lens_light, center_x=center_x, center_y=center_y, model_bool_list=lens_light_model_bool_list)
theta_E = kwargs_lens[0]['theta_E']
r_ani = aniso_param * r_eff
sigma2 = self.analytic_kinematics.vel_disp(gamma, theta_E, r_eff, r_ani, R_slit, dR_slit, FWHM=psf_fwhm, rendering_number=num_evaluate)
return sigma2 |
def reset(self, seed):
"""
Reset this generator's seed generator and any clones.
"""
logger.debug(f'Resetting {self} (seed={seed})')
self.seed_generator.reset(seed)
for c in self.clones:
c.reset(seed) | Reset this generator's seed generator and any clones. | Below is the the instruction that describes the task:
### Input:
Reset this generator's seed generator and any clones.
### Response:
def reset(self, seed):
"""
Reset this generator's seed generator and any clones.
"""
logger.debug(f'Resetting {self} (seed={seed})')
self.seed_generator.reset(seed)
for c in self.clones:
c.reset(seed) |
def _query_compressed(options, collection_name, num_to_skip,
num_to_return, query, field_selector,
opts, check_keys=False, ctx=None):
"""Internal compressed query message helper."""
op_query, max_bson_size = _query(
options,
collection_name,
num_to_skip,
num_to_return,
query,
field_selector,
opts,
check_keys)
rid, msg = _compress(2004, op_query, ctx)
return rid, msg, max_bson_size | Internal compressed query message helper. | Below is the the instruction that describes the task:
### Input:
Internal compressed query message helper.
### Response:
def _query_compressed(options, collection_name, num_to_skip,
num_to_return, query, field_selector,
opts, check_keys=False, ctx=None):
"""Internal compressed query message helper."""
op_query, max_bson_size = _query(
options,
collection_name,
num_to_skip,
num_to_return,
query,
field_selector,
opts,
check_keys)
rid, msg = _compress(2004, op_query, ctx)
return rid, msg, max_bson_size |
def _compute_dependencies(self):
"""Recompute this distribution's dependencies."""
from _markerlib import compile as compile_marker
dm = self.__dep_map = {None: []}
reqs = []
# Including any condition expressions
for req in self._parsed_pkg_info.get_all('Requires-Dist') or []:
distvers, mark = self._preparse_requirement(req)
parsed = parse_requirements(distvers).next()
parsed.marker_fn = compile_marker(mark)
reqs.append(parsed)
def reqs_for_extra(extra):
for req in reqs:
if req.marker_fn(override={'extra':extra}):
yield req
common = frozenset(reqs_for_extra(None))
dm[None].extend(common)
for extra in self._parsed_pkg_info.get_all('Provides-Extra') or []:
extra = safe_extra(extra.strip())
dm[extra] = list(frozenset(reqs_for_extra(extra)) - common)
return dm | Recompute this distribution's dependencies. | Below is the the instruction that describes the task:
### Input:
Recompute this distribution's dependencies.
### Response:
def _compute_dependencies(self):
"""Recompute this distribution's dependencies."""
from _markerlib import compile as compile_marker
dm = self.__dep_map = {None: []}
reqs = []
# Including any condition expressions
for req in self._parsed_pkg_info.get_all('Requires-Dist') or []:
distvers, mark = self._preparse_requirement(req)
parsed = parse_requirements(distvers).next()
parsed.marker_fn = compile_marker(mark)
reqs.append(parsed)
def reqs_for_extra(extra):
for req in reqs:
if req.marker_fn(override={'extra':extra}):
yield req
common = frozenset(reqs_for_extra(None))
dm[None].extend(common)
for extra in self._parsed_pkg_info.get_all('Provides-Extra') or []:
extra = safe_extra(extra.strip())
dm[extra] = list(frozenset(reqs_for_extra(extra)) - common)
return dm |
def write_lammpsdata(structure, filename, atom_style='full'):
"""Output a LAMMPS data file.
Outputs a LAMMPS data file in the 'full' atom style format. Assumes use
of 'real' units. See http://lammps.sandia.gov/doc/atom_style.html for
more information on atom styles.
Parameters
----------
structure : parmed.Structure
ParmEd structure object
filename : str
Path of the output file
atom_style: str
Defines the style of atoms to be saved in a LAMMPS data file. The following atom
styles are currently supported: 'full', 'atomic', 'charge', 'molecular'
see http://lammps.sandia.gov/doc/atom_style.html for more
information on atom styles.
Notes
-----
See http://lammps.sandia.gov/doc/2001/data_format.html for a full description
of the LAMMPS data format. Currently the following sections are supported (in
addition to the header): *Masses*, *Nonbond Coeffs*, *Bond Coeffs*, *Angle
Coeffs*, *Dihedral Coeffs*, *Atoms*, *Bonds*, *Angles*, *Dihedrals*
Some of this function has beed adopted from `mdtraj`'s support of the LAMMPSTRJ
trajectory format. See https://github.com/mdtraj/mdtraj/blob/master/mdtraj/formats/lammpstrj.py for details.
"""
if atom_style not in ['atomic', 'charge', 'molecular', 'full']:
raise ValueError('Atom style "{}" is invalid or is not currently supported'.format(atom_style))
xyz = np.array([[atom.xx,atom.xy,atom.xz] for atom in structure.atoms])
forcefield = True
if structure[0].type == '':
forcefield = False
# Internally use nm
box = Box(lengths=np.array([0.1 * val for val in structure.box[0:3]]),
angles=structure.box[3:6])
if forcefield:
types = [atom.type for atom in structure.atoms]
else:
types = [atom.name for atom in structure.atoms]
unique_types = list(set(types))
unique_types.sort(key=natural_sort)
charges = [atom.charge for atom in structure.atoms]
bonds = [[bond.atom1.idx+1, bond.atom2.idx+1] for bond in structure.bonds]
angles = [[angle.atom1.idx+1,
angle.atom2.idx+1,
angle.atom3.idx+1] for angle in structure.angles]
dihedrals = [[dihedral.atom1.idx+1,
dihedral.atom2.idx+1,
dihedral.atom3.idx+1,
dihedral.atom4.idx+1] for dihedral in structure.rb_torsions]
if bonds:
if len(structure.bond_types) == 0:
bond_types = np.ones(len(bonds),dtype=int)
else:
unique_bond_types = dict(enumerate(set([(round(bond.type.k,3),
round(bond.type.req,3)) for bond in structure.bonds])))
unique_bond_types = OrderedDict([(y,x+1) for x,y in unique_bond_types.items()])
bond_types = [unique_bond_types[(round(bond.type.k,3),
round(bond.type.req,3))] for bond in structure.bonds]
if angles:
unique_angle_types = dict(enumerate(set([(round(angle.type.k,3),
round(angle.type.theteq,3)) for angle in structure.angles])))
unique_angle_types = OrderedDict([(y,x+1) for x,y in unique_angle_types.items()])
angle_types = [unique_angle_types[(round(angle.type.k,3),
round(angle.type.theteq,3))] for angle in structure.angles]
if dihedrals:
unique_dihedral_types = dict(enumerate(set([(round(dihedral.type.c0,3),
round(dihedral.type.c1,3),
round(dihedral.type.c2,3),
round(dihedral.type.c3,3),
round(dihedral.type.c4,3),
round(dihedral.type.c5,3),
round(dihedral.type.scee,1),
round(dihedral.type.scnb,1)) for dihedral in structure.rb_torsions])))
unique_dihedral_types = OrderedDict([(y,x+1) for x,y in unique_dihedral_types.items()])
dihedral_types = [unique_dihedral_types[(round(dihedral.type.c0,3),
round(dihedral.type.c1,3),
round(dihedral.type.c2,3),
round(dihedral.type.c3,3),
round(dihedral.type.c4,3),
round(dihedral.type.c5,3),
round(dihedral.type.scee,1),
round(dihedral.type.scnb,1))] for dihedral in structure.rb_torsions]
with open(filename, 'w') as data:
data.write(filename+' - created by mBuild\n\n')
data.write('{:d} atoms\n'.format(len(structure.atoms)))
if atom_style in ['full', 'molecular']:
data.write('{:d} bonds\n'.format(len(bonds)))
data.write('{:d} angles\n'.format(len(angles)))
data.write('{:d} dihedrals\n\n'.format(len(dihedrals)))
data.write('{:d} atom types\n'.format(len(set(types))))
if atom_style in ['full', 'molecular']:
if bonds:
data.write('{:d} bond types\n'.format(len(set(bond_types))))
if angles:
data.write('{:d} angle types\n'.format(len(set(angle_types))))
if dihedrals:
data.write('{:d} dihedral types\n'.format(len(set(dihedral_types))))
data.write('\n')
# Box data
if np.allclose(box.angles, np.array([90, 90, 90])):
for i,dim in enumerate(['x','y','z']):
data.write('{0:.6f} {1:.6f} {2}lo {2}hi\n'.format(
10.0 * box.mins[i],
10.0 * box.maxs[i],
dim))
else:
a, b, c = 10.0 * box.lengths
alpha, beta, gamma = np.radians(box.angles)
lx = a
xy = b * np.cos(gamma)
xz = c * np.cos(beta)
ly = np.sqrt(b**2 - xy**2)
yz = (b*c*np.cos(alpha) - xy*xz) / ly
lz = np.sqrt(c**2 - xz**2 - yz**2)
xlo, ylo, zlo = 10.0 * box.mins
xhi = xlo + lx
yhi = ylo + ly
zhi = zlo + lz
xlo_bound = xlo + np.min([0.0, xy, xz, xy+xz])
xhi_bound = xhi + np.max([0.0, xy, xz, xy+xz])
ylo_bound = ylo + np.min([0.0, yz])
yhi_bound = yhi + np.max([0.0, yz])
zlo_bound = zlo
zhi_bound = zhi
data.write('{0:.6f} {1:.6f} xlo xhi\n'.format(
xlo_bound, xhi_bound))
data.write('{0:.6f} {1:.6f} ylo yhi\n'.format(
ylo_bound, yhi_bound))
data.write('{0:.6f} {1:.6f} zlo zhi\n'.format(
zlo_bound, zhi_bound))
data.write('{0:.6f} {1:.6f} {2:6f} xy xz yz\n'.format(
xy, xz, yz))
# Mass data
masses = [atom.mass for atom in structure.atoms]
mass_dict = dict([(unique_types.index(atom_type)+1,mass) for atom_type,mass in zip(types,masses)])
data.write('\nMasses\n\n')
for atom_type,mass in mass_dict.items():
data.write('{:d}\t{:.6f}\t# {}\n'.format(atom_type,mass,unique_types[atom_type-1]))
if forcefield:
# Pair coefficients
epsilons = [atom.epsilon for atom in structure.atoms]
sigmas = [atom.sigma for atom in structure.atoms]
epsilon_dict = dict([(unique_types.index(atom_type)+1,epsilon) for atom_type,epsilon in zip(types,epsilons)])
sigma_dict = dict([(unique_types.index(atom_type)+1,sigma) for atom_type,sigma in zip(types,sigmas)])
data.write('\nPair Coeffs # lj\n\n')
for idx,epsilon in epsilon_dict.items():
data.write('{}\t{:.5f}\t{:.5f}\n'.format(idx,epsilon,sigma_dict[idx]))
# Bond coefficients
if bonds:
data.write('\nBond Coeffs # harmonic\n\n')
for params,idx in unique_bond_types.items():
data.write('{}\t{}\t{}\n'.format(idx,*params))
# Angle coefficients
if angles:
data.write('\nAngle Coeffs # harmonic\n\n')
for params,idx in unique_angle_types.items():
data.write('{}\t{}\t{:.5f}\n'.format(idx,*params))
# Dihedral coefficients
if dihedrals:
data.write('\nDihedral Coeffs # opls\n\n')
for params,idx in unique_dihedral_types.items():
opls_coeffs = RB_to_OPLS(params[0],
params[1],
params[2],
params[3],
params[4],
params[5])
data.write('{}\t{:.5f}\t{:.5f}\t{:.5f}\t{:.5f}\n'.format(idx,*opls_coeffs))
# Atom data
data.write('\nAtoms\n\n')
if atom_style == 'atomic':
atom_line = '{index:d}\t{type_index:d}\t{x:.6f}\t{y:.6f}\t{z:.6f}\n'
elif atom_style == 'charge':
atom_line = '{index:d}\t{type_index:d}\t{charge:.6f}\t{x:.6f}\t{y:.6f}\t{z:.6f}\n'
elif atom_style == 'molecular':
atom_line = '{index:d}\t{zero:d}\t{type_index:d}\t{x:.6f}\t{y:.6f}\t{z:.6f}\n'
elif atom_style == 'full':
atom_line ='{index:d}\t{zero:d}\t{type_index:d}\t{charge:.6f}\t{x:.6f}\t{y:.6f}\t{z:.6f}\n'
for i,coords in enumerate(xyz):
data.write(atom_line.format(
index=i+1,type_index=unique_types.index(types[i])+1,
zero=0,charge=charges[i],
x=coords[0],y=coords[1],z=coords[2]))
if atom_style in ['full', 'molecular']:
# Bond data
if bonds:
data.write('\nBonds\n\n')
for i,bond in enumerate(bonds):
data.write('{:d}\t{:d}\t{:d}\t{:d}\n'.format(
i+1,bond_types[i],bond[0],bond[1]))
# Angle data
if angles:
data.write('\nAngles\n\n')
for i,angle in enumerate(angles):
data.write('{:d}\t{:d}\t{:d}\t{:d}\t{:d}\n'.format(
i+1,angle_types[i],angle[0],angle[1],angle[2]))
# Dihedral data
if dihedrals:
data.write('\nDihedrals\n\n')
for i,dihedral in enumerate(dihedrals):
data.write('{:d}\t{:d}\t{:d}\t{:d}\t{:d}\t{:d}\n'.format(
i+1,dihedral_types[i],dihedral[0],
dihedral[1],dihedral[2],dihedral[3])) | Output a LAMMPS data file.
Outputs a LAMMPS data file in the 'full' atom style format. Assumes use
of 'real' units. See http://lammps.sandia.gov/doc/atom_style.html for
more information on atom styles.
Parameters
----------
structure : parmed.Structure
ParmEd structure object
filename : str
Path of the output file
atom_style: str
Defines the style of atoms to be saved in a LAMMPS data file. The following atom
styles are currently supported: 'full', 'atomic', 'charge', 'molecular'
see http://lammps.sandia.gov/doc/atom_style.html for more
information on atom styles.
Notes
-----
See http://lammps.sandia.gov/doc/2001/data_format.html for a full description
of the LAMMPS data format. Currently the following sections are supported (in
addition to the header): *Masses*, *Nonbond Coeffs*, *Bond Coeffs*, *Angle
Coeffs*, *Dihedral Coeffs*, *Atoms*, *Bonds*, *Angles*, *Dihedrals*
Some of this function has beed adopted from `mdtraj`'s support of the LAMMPSTRJ
trajectory format. See https://github.com/mdtraj/mdtraj/blob/master/mdtraj/formats/lammpstrj.py for details. | Below is the the instruction that describes the task:
### Input:
Output a LAMMPS data file.
Outputs a LAMMPS data file in the 'full' atom style format. Assumes use
of 'real' units. See http://lammps.sandia.gov/doc/atom_style.html for
more information on atom styles.
Parameters
----------
structure : parmed.Structure
ParmEd structure object
filename : str
Path of the output file
atom_style: str
Defines the style of atoms to be saved in a LAMMPS data file. The following atom
styles are currently supported: 'full', 'atomic', 'charge', 'molecular'
see http://lammps.sandia.gov/doc/atom_style.html for more
information on atom styles.
Notes
-----
See http://lammps.sandia.gov/doc/2001/data_format.html for a full description
of the LAMMPS data format. Currently the following sections are supported (in
addition to the header): *Masses*, *Nonbond Coeffs*, *Bond Coeffs*, *Angle
Coeffs*, *Dihedral Coeffs*, *Atoms*, *Bonds*, *Angles*, *Dihedrals*
Some of this function has beed adopted from `mdtraj`'s support of the LAMMPSTRJ
trajectory format. See https://github.com/mdtraj/mdtraj/blob/master/mdtraj/formats/lammpstrj.py for details.
### Response:
def write_lammpsdata(structure, filename, atom_style='full'):
"""Output a LAMMPS data file.
Outputs a LAMMPS data file in the 'full' atom style format. Assumes use
of 'real' units. See http://lammps.sandia.gov/doc/atom_style.html for
more information on atom styles.
Parameters
----------
structure : parmed.Structure
ParmEd structure object
filename : str
Path of the output file
atom_style: str
Defines the style of atoms to be saved in a LAMMPS data file. The following atom
styles are currently supported: 'full', 'atomic', 'charge', 'molecular'
see http://lammps.sandia.gov/doc/atom_style.html for more
information on atom styles.
Notes
-----
See http://lammps.sandia.gov/doc/2001/data_format.html for a full description
of the LAMMPS data format. Currently the following sections are supported (in
addition to the header): *Masses*, *Nonbond Coeffs*, *Bond Coeffs*, *Angle
Coeffs*, *Dihedral Coeffs*, *Atoms*, *Bonds*, *Angles*, *Dihedrals*
Some of this function has beed adopted from `mdtraj`'s support of the LAMMPSTRJ
trajectory format. See https://github.com/mdtraj/mdtraj/blob/master/mdtraj/formats/lammpstrj.py for details.
"""
if atom_style not in ['atomic', 'charge', 'molecular', 'full']:
raise ValueError('Atom style "{}" is invalid or is not currently supported'.format(atom_style))
xyz = np.array([[atom.xx,atom.xy,atom.xz] for atom in structure.atoms])
forcefield = True
if structure[0].type == '':
forcefield = False
# Internally use nm
box = Box(lengths=np.array([0.1 * val for val in structure.box[0:3]]),
angles=structure.box[3:6])
if forcefield:
types = [atom.type for atom in structure.atoms]
else:
types = [atom.name for atom in structure.atoms]
unique_types = list(set(types))
unique_types.sort(key=natural_sort)
charges = [atom.charge for atom in structure.atoms]
bonds = [[bond.atom1.idx+1, bond.atom2.idx+1] for bond in structure.bonds]
angles = [[angle.atom1.idx+1,
angle.atom2.idx+1,
angle.atom3.idx+1] for angle in structure.angles]
dihedrals = [[dihedral.atom1.idx+1,
dihedral.atom2.idx+1,
dihedral.atom3.idx+1,
dihedral.atom4.idx+1] for dihedral in structure.rb_torsions]
if bonds:
if len(structure.bond_types) == 0:
bond_types = np.ones(len(bonds),dtype=int)
else:
unique_bond_types = dict(enumerate(set([(round(bond.type.k,3),
round(bond.type.req,3)) for bond in structure.bonds])))
unique_bond_types = OrderedDict([(y,x+1) for x,y in unique_bond_types.items()])
bond_types = [unique_bond_types[(round(bond.type.k,3),
round(bond.type.req,3))] for bond in structure.bonds]
if angles:
unique_angle_types = dict(enumerate(set([(round(angle.type.k,3),
round(angle.type.theteq,3)) for angle in structure.angles])))
unique_angle_types = OrderedDict([(y,x+1) for x,y in unique_angle_types.items()])
angle_types = [unique_angle_types[(round(angle.type.k,3),
round(angle.type.theteq,3))] for angle in structure.angles]
if dihedrals:
unique_dihedral_types = dict(enumerate(set([(round(dihedral.type.c0,3),
round(dihedral.type.c1,3),
round(dihedral.type.c2,3),
round(dihedral.type.c3,3),
round(dihedral.type.c4,3),
round(dihedral.type.c5,3),
round(dihedral.type.scee,1),
round(dihedral.type.scnb,1)) for dihedral in structure.rb_torsions])))
unique_dihedral_types = OrderedDict([(y,x+1) for x,y in unique_dihedral_types.items()])
dihedral_types = [unique_dihedral_types[(round(dihedral.type.c0,3),
round(dihedral.type.c1,3),
round(dihedral.type.c2,3),
round(dihedral.type.c3,3),
round(dihedral.type.c4,3),
round(dihedral.type.c5,3),
round(dihedral.type.scee,1),
round(dihedral.type.scnb,1))] for dihedral in structure.rb_torsions]
with open(filename, 'w') as data:
data.write(filename+' - created by mBuild\n\n')
data.write('{:d} atoms\n'.format(len(structure.atoms)))
if atom_style in ['full', 'molecular']:
data.write('{:d} bonds\n'.format(len(bonds)))
data.write('{:d} angles\n'.format(len(angles)))
data.write('{:d} dihedrals\n\n'.format(len(dihedrals)))
data.write('{:d} atom types\n'.format(len(set(types))))
if atom_style in ['full', 'molecular']:
if bonds:
data.write('{:d} bond types\n'.format(len(set(bond_types))))
if angles:
data.write('{:d} angle types\n'.format(len(set(angle_types))))
if dihedrals:
data.write('{:d} dihedral types\n'.format(len(set(dihedral_types))))
data.write('\n')
# Box data
if np.allclose(box.angles, np.array([90, 90, 90])):
for i,dim in enumerate(['x','y','z']):
data.write('{0:.6f} {1:.6f} {2}lo {2}hi\n'.format(
10.0 * box.mins[i],
10.0 * box.maxs[i],
dim))
else:
a, b, c = 10.0 * box.lengths
alpha, beta, gamma = np.radians(box.angles)
lx = a
xy = b * np.cos(gamma)
xz = c * np.cos(beta)
ly = np.sqrt(b**2 - xy**2)
yz = (b*c*np.cos(alpha) - xy*xz) / ly
lz = np.sqrt(c**2 - xz**2 - yz**2)
xlo, ylo, zlo = 10.0 * box.mins
xhi = xlo + lx
yhi = ylo + ly
zhi = zlo + lz
xlo_bound = xlo + np.min([0.0, xy, xz, xy+xz])
xhi_bound = xhi + np.max([0.0, xy, xz, xy+xz])
ylo_bound = ylo + np.min([0.0, yz])
yhi_bound = yhi + np.max([0.0, yz])
zlo_bound = zlo
zhi_bound = zhi
data.write('{0:.6f} {1:.6f} xlo xhi\n'.format(
xlo_bound, xhi_bound))
data.write('{0:.6f} {1:.6f} ylo yhi\n'.format(
ylo_bound, yhi_bound))
data.write('{0:.6f} {1:.6f} zlo zhi\n'.format(
zlo_bound, zhi_bound))
data.write('{0:.6f} {1:.6f} {2:6f} xy xz yz\n'.format(
xy, xz, yz))
# Mass data
masses = [atom.mass for atom in structure.atoms]
mass_dict = dict([(unique_types.index(atom_type)+1,mass) for atom_type,mass in zip(types,masses)])
data.write('\nMasses\n\n')
for atom_type,mass in mass_dict.items():
data.write('{:d}\t{:.6f}\t# {}\n'.format(atom_type,mass,unique_types[atom_type-1]))
if forcefield:
# Pair coefficients
epsilons = [atom.epsilon for atom in structure.atoms]
sigmas = [atom.sigma for atom in structure.atoms]
epsilon_dict = dict([(unique_types.index(atom_type)+1,epsilon) for atom_type,epsilon in zip(types,epsilons)])
sigma_dict = dict([(unique_types.index(atom_type)+1,sigma) for atom_type,sigma in zip(types,sigmas)])
data.write('\nPair Coeffs # lj\n\n')
for idx,epsilon in epsilon_dict.items():
data.write('{}\t{:.5f}\t{:.5f}\n'.format(idx,epsilon,sigma_dict[idx]))
# Bond coefficients
if bonds:
data.write('\nBond Coeffs # harmonic\n\n')
for params,idx in unique_bond_types.items():
data.write('{}\t{}\t{}\n'.format(idx,*params))
# Angle coefficients
if angles:
data.write('\nAngle Coeffs # harmonic\n\n')
for params,idx in unique_angle_types.items():
data.write('{}\t{}\t{:.5f}\n'.format(idx,*params))
# Dihedral coefficients
if dihedrals:
data.write('\nDihedral Coeffs # opls\n\n')
for params,idx in unique_dihedral_types.items():
opls_coeffs = RB_to_OPLS(params[0],
params[1],
params[2],
params[3],
params[4],
params[5])
data.write('{}\t{:.5f}\t{:.5f}\t{:.5f}\t{:.5f}\n'.format(idx,*opls_coeffs))
# Atom data
data.write('\nAtoms\n\n')
if atom_style == 'atomic':
atom_line = '{index:d}\t{type_index:d}\t{x:.6f}\t{y:.6f}\t{z:.6f}\n'
elif atom_style == 'charge':
atom_line = '{index:d}\t{type_index:d}\t{charge:.6f}\t{x:.6f}\t{y:.6f}\t{z:.6f}\n'
elif atom_style == 'molecular':
atom_line = '{index:d}\t{zero:d}\t{type_index:d}\t{x:.6f}\t{y:.6f}\t{z:.6f}\n'
elif atom_style == 'full':
atom_line ='{index:d}\t{zero:d}\t{type_index:d}\t{charge:.6f}\t{x:.6f}\t{y:.6f}\t{z:.6f}\n'
for i,coords in enumerate(xyz):
data.write(atom_line.format(
index=i+1,type_index=unique_types.index(types[i])+1,
zero=0,charge=charges[i],
x=coords[0],y=coords[1],z=coords[2]))
if atom_style in ['full', 'molecular']:
# Bond data
if bonds:
data.write('\nBonds\n\n')
for i,bond in enumerate(bonds):
data.write('{:d}\t{:d}\t{:d}\t{:d}\n'.format(
i+1,bond_types[i],bond[0],bond[1]))
# Angle data
if angles:
data.write('\nAngles\n\n')
for i,angle in enumerate(angles):
data.write('{:d}\t{:d}\t{:d}\t{:d}\t{:d}\n'.format(
i+1,angle_types[i],angle[0],angle[1],angle[2]))
# Dihedral data
if dihedrals:
data.write('\nDihedrals\n\n')
for i,dihedral in enumerate(dihedrals):
data.write('{:d}\t{:d}\t{:d}\t{:d}\t{:d}\t{:d}\n'.format(
i+1,dihedral_types[i],dihedral[0],
dihedral[1],dihedral[2],dihedral[3])) |
def get_profile_histogram(x, y, n_bins=100):
'''Takes 2D point data (x,y) and creates a profile histogram similar to the TProfile in ROOT. It calculates
the y mean for every bin at the bin center and gives the y mean error as error bars.
Parameters
----------
x : array like
data x positions
y : array like
data y positions
n_bins : int
the number of bins used to create the histogram
'''
if len(x) != len(y):
raise ValueError('x and y dimensions have to be the same')
y = y.astype(np.float32)
n, bin_edges = np.histogram(x, bins=n_bins) # needed to calculate the number of points per bin
sy = np.histogram(x, bins=n_bins, weights=y)[0] # the sum of the bin values
sy2 = np.histogram(x, bins=n_bins, weights=y * y)[0] # the quadratic sum of the bin values
bin_centers = (bin_edges[1:] + bin_edges[:-1]) / 2 # calculate the bin center for all bins
mean = sy / n # calculate the mean of all bins
std = np.sqrt((sy2 / n - mean * mean)) # TODO: not understood, need check if this is really the standard deviation
std_mean = std / np.sqrt((n - 1))
mean[np.isnan(mean)] = 0.
std_mean[np.isnan(std_mean)] = 0.
return bin_centers, mean, std_mean | Takes 2D point data (x,y) and creates a profile histogram similar to the TProfile in ROOT. It calculates
the y mean for every bin at the bin center and gives the y mean error as error bars.
Parameters
----------
x : array like
data x positions
y : array like
data y positions
n_bins : int
the number of bins used to create the histogram | Below is the the instruction that describes the task:
### Input:
Takes 2D point data (x,y) and creates a profile histogram similar to the TProfile in ROOT. It calculates
the y mean for every bin at the bin center and gives the y mean error as error bars.
Parameters
----------
x : array like
data x positions
y : array like
data y positions
n_bins : int
the number of bins used to create the histogram
### Response:
def get_profile_histogram(x, y, n_bins=100):
'''Takes 2D point data (x,y) and creates a profile histogram similar to the TProfile in ROOT. It calculates
the y mean for every bin at the bin center and gives the y mean error as error bars.
Parameters
----------
x : array like
data x positions
y : array like
data y positions
n_bins : int
the number of bins used to create the histogram
'''
if len(x) != len(y):
raise ValueError('x and y dimensions have to be the same')
y = y.astype(np.float32)
n, bin_edges = np.histogram(x, bins=n_bins) # needed to calculate the number of points per bin
sy = np.histogram(x, bins=n_bins, weights=y)[0] # the sum of the bin values
sy2 = np.histogram(x, bins=n_bins, weights=y * y)[0] # the quadratic sum of the bin values
bin_centers = (bin_edges[1:] + bin_edges[:-1]) / 2 # calculate the bin center for all bins
mean = sy / n # calculate the mean of all bins
std = np.sqrt((sy2 / n - mean * mean)) # TODO: not understood, need check if this is really the standard deviation
std_mean = std / np.sqrt((n - 1))
mean[np.isnan(mean)] = 0.
std_mean[np.isnan(std_mean)] = 0.
return bin_centers, mean, std_mean |
def unpack(self, buff, offset=0):
"""Unpack a binary message into this object's attributes.
Unpack the binary value *buff* and update this object attributes based
on the results.
Args:
buff (bytes): Binary data package to be unpacked.
offset (int): Where to begin unpacking.
Raises:
Exception: If there is a struct unpacking error.
"""
try:
unpacked_data = struct.unpack('!4B', buff[offset:offset+4])
self._value = '.'.join([str(x) for x in unpacked_data])
except struct.error as exception:
raise exceptions.UnpackException('%s; %s: %s' % (exception,
offset, buff)) | Unpack a binary message into this object's attributes.
Unpack the binary value *buff* and update this object attributes based
on the results.
Args:
buff (bytes): Binary data package to be unpacked.
offset (int): Where to begin unpacking.
Raises:
Exception: If there is a struct unpacking error. | Below is the the instruction that describes the task:
### Input:
Unpack a binary message into this object's attributes.
Unpack the binary value *buff* and update this object attributes based
on the results.
Args:
buff (bytes): Binary data package to be unpacked.
offset (int): Where to begin unpacking.
Raises:
Exception: If there is a struct unpacking error.
### Response:
def unpack(self, buff, offset=0):
"""Unpack a binary message into this object's attributes.
Unpack the binary value *buff* and update this object attributes based
on the results.
Args:
buff (bytes): Binary data package to be unpacked.
offset (int): Where to begin unpacking.
Raises:
Exception: If there is a struct unpacking error.
"""
try:
unpacked_data = struct.unpack('!4B', buff[offset:offset+4])
self._value = '.'.join([str(x) for x in unpacked_data])
except struct.error as exception:
raise exceptions.UnpackException('%s; %s: %s' % (exception,
offset, buff)) |
def _deinterlace(self, raw):
"""
Read raw pixel data, undo filters, deinterlace, and flatten.
Return a single array of values.
"""
# Values per row (of the target image)
vpr = self.width * self.planes
# Values per image
vpi = vpr * self.height
# Interleaving writes to the output array randomly
# (well, not quite), so the entire output array must be in memory.
# Make a result array, and make it big enough.
if self.bitdepth > 8:
a = array('H', [0] * vpi)
else:
a = bytearray([0] * vpi)
source_offset = 0
for lines in adam7_generate(self.width, self.height):
# The previous (reconstructed) scanline.
# `None` at the beginning of a pass
# to indicate that there is no previous line.
recon = None
for x, y, xstep in lines:
# Pixels per row (reduced pass image)
ppr = int(math.ceil((self.width - x) / float(xstep)))
# Row size in bytes for this pass.
row_size = int(math.ceil(self.psize * ppr))
filter_type = raw[source_offset]
source_offset += 1
scanline = raw[source_offset: source_offset + row_size]
source_offset += row_size
recon = self.undo_filter(filter_type, scanline, recon)
# Convert so that there is one element per pixel value
flat = self._bytes_to_values(recon, width=ppr)
if xstep == 1:
assert x == 0
offset = y * vpr
a[offset: offset + vpr] = flat
else:
offset = y * vpr + x * self.planes
end_offset = (y + 1) * vpr
skip = self.planes * xstep
for i in range(self.planes):
a[offset + i: end_offset: skip] = \
flat[i:: self.planes]
return a | Read raw pixel data, undo filters, deinterlace, and flatten.
Return a single array of values. | Below is the the instruction that describes the task:
### Input:
Read raw pixel data, undo filters, deinterlace, and flatten.
Return a single array of values.
### Response:
def _deinterlace(self, raw):
"""
Read raw pixel data, undo filters, deinterlace, and flatten.
Return a single array of values.
"""
# Values per row (of the target image)
vpr = self.width * self.planes
# Values per image
vpi = vpr * self.height
# Interleaving writes to the output array randomly
# (well, not quite), so the entire output array must be in memory.
# Make a result array, and make it big enough.
if self.bitdepth > 8:
a = array('H', [0] * vpi)
else:
a = bytearray([0] * vpi)
source_offset = 0
for lines in adam7_generate(self.width, self.height):
# The previous (reconstructed) scanline.
# `None` at the beginning of a pass
# to indicate that there is no previous line.
recon = None
for x, y, xstep in lines:
# Pixels per row (reduced pass image)
ppr = int(math.ceil((self.width - x) / float(xstep)))
# Row size in bytes for this pass.
row_size = int(math.ceil(self.psize * ppr))
filter_type = raw[source_offset]
source_offset += 1
scanline = raw[source_offset: source_offset + row_size]
source_offset += row_size
recon = self.undo_filter(filter_type, scanline, recon)
# Convert so that there is one element per pixel value
flat = self._bytes_to_values(recon, width=ppr)
if xstep == 1:
assert x == 0
offset = y * vpr
a[offset: offset + vpr] = flat
else:
offset = y * vpr + x * self.planes
end_offset = (y + 1) * vpr
skip = self.planes * xstep
for i in range(self.planes):
a[offset + i: end_offset: skip] = \
flat[i:: self.planes]
return a |
def _set_collector_profile(self, v, load=False):
"""
Setter method for collector_profile, mapped from YANG variable /telemetry/collector/collector_profile (list)
If this variable is read-only (config: false) in the
source YANG file, then _set_collector_profile is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_collector_profile() directly.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=YANGListType("collector_profiletype collector_profilename",collector_profile.collector_profile, yang_name="collector-profile", rest_name="profile", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='collector-profiletype collector-profilename', extensions={u'tailf-common': {u'info': u'Create a profile for Collector', u'cli-suppress-mode': None, u'cli-suppress-list-no': None, u'alt-name': u'profile', u'cli-full-command': None, u'callpoint': u'CollectorProfile'}}), is_container='list', yang_name="collector-profile", rest_name="profile", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Create a profile for Collector', u'cli-suppress-mode': None, u'cli-suppress-list-no': None, u'alt-name': u'profile', u'cli-full-command': None, u'callpoint': u'CollectorProfile'}}, namespace='urn:brocade.com:mgmt:brocade-telemetry', defining_module='brocade-telemetry', yang_type='list', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """collector_profile must be of a type compatible with list""",
'defined-type': "list",
'generated-type': """YANGDynClass(base=YANGListType("collector_profiletype collector_profilename",collector_profile.collector_profile, yang_name="collector-profile", rest_name="profile", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='collector-profiletype collector-profilename', extensions={u'tailf-common': {u'info': u'Create a profile for Collector', u'cli-suppress-mode': None, u'cli-suppress-list-no': None, u'alt-name': u'profile', u'cli-full-command': None, u'callpoint': u'CollectorProfile'}}), is_container='list', yang_name="collector-profile", rest_name="profile", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Create a profile for Collector', u'cli-suppress-mode': None, u'cli-suppress-list-no': None, u'alt-name': u'profile', u'cli-full-command': None, u'callpoint': u'CollectorProfile'}}, namespace='urn:brocade.com:mgmt:brocade-telemetry', defining_module='brocade-telemetry', yang_type='list', is_config=True)""",
})
self.__collector_profile = t
if hasattr(self, '_set'):
self._set() | Setter method for collector_profile, mapped from YANG variable /telemetry/collector/collector_profile (list)
If this variable is read-only (config: false) in the
source YANG file, then _set_collector_profile is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_collector_profile() directly. | Below is the the instruction that describes the task:
### Input:
Setter method for collector_profile, mapped from YANG variable /telemetry/collector/collector_profile (list)
If this variable is read-only (config: false) in the
source YANG file, then _set_collector_profile is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_collector_profile() directly.
### Response:
def _set_collector_profile(self, v, load=False):
"""
Setter method for collector_profile, mapped from YANG variable /telemetry/collector/collector_profile (list)
If this variable is read-only (config: false) in the
source YANG file, then _set_collector_profile is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_collector_profile() directly.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=YANGListType("collector_profiletype collector_profilename",collector_profile.collector_profile, yang_name="collector-profile", rest_name="profile", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='collector-profiletype collector-profilename', extensions={u'tailf-common': {u'info': u'Create a profile for Collector', u'cli-suppress-mode': None, u'cli-suppress-list-no': None, u'alt-name': u'profile', u'cli-full-command': None, u'callpoint': u'CollectorProfile'}}), is_container='list', yang_name="collector-profile", rest_name="profile", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Create a profile for Collector', u'cli-suppress-mode': None, u'cli-suppress-list-no': None, u'alt-name': u'profile', u'cli-full-command': None, u'callpoint': u'CollectorProfile'}}, namespace='urn:brocade.com:mgmt:brocade-telemetry', defining_module='brocade-telemetry', yang_type='list', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """collector_profile must be of a type compatible with list""",
'defined-type': "list",
'generated-type': """YANGDynClass(base=YANGListType("collector_profiletype collector_profilename",collector_profile.collector_profile, yang_name="collector-profile", rest_name="profile", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='collector-profiletype collector-profilename', extensions={u'tailf-common': {u'info': u'Create a profile for Collector', u'cli-suppress-mode': None, u'cli-suppress-list-no': None, u'alt-name': u'profile', u'cli-full-command': None, u'callpoint': u'CollectorProfile'}}), is_container='list', yang_name="collector-profile", rest_name="profile", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Create a profile for Collector', u'cli-suppress-mode': None, u'cli-suppress-list-no': None, u'alt-name': u'profile', u'cli-full-command': None, u'callpoint': u'CollectorProfile'}}, namespace='urn:brocade.com:mgmt:brocade-telemetry', defining_module='brocade-telemetry', yang_type='list', is_config=True)""",
})
self.__collector_profile = t
if hasattr(self, '_set'):
self._set() |
def autocorrelated_relaxed_clock(self, root_rate, autocorrel,
distribution='lognormal'):
"""
Attaches rates to each node according to autocorrelated lognormal
model from Kishino et al.(2001), or autocorrelated exponential
"""
optioncheck(distribution, ['exponential', 'lognormal'])
if autocorrel == 0:
for node in self._tree.preorder_node_iter():
node.rate = root_rate
return
for node in self._tree.preorder_node_iter():
if node == self._tree.seed_node:
node.rate = root_rate
else:
parent_rate = node.parent_node.rate
bl = node.edge_length
if distribution == 'lognormal':
node.rate = logn_correlated_rate(parent_rate, bl,
autocorrel)
else:
node.rate = np.random.exponential(parent_rate) | Attaches rates to each node according to autocorrelated lognormal
model from Kishino et al.(2001), or autocorrelated exponential | Below is the the instruction that describes the task:
### Input:
Attaches rates to each node according to autocorrelated lognormal
model from Kishino et al.(2001), or autocorrelated exponential
### Response:
def autocorrelated_relaxed_clock(self, root_rate, autocorrel,
distribution='lognormal'):
"""
Attaches rates to each node according to autocorrelated lognormal
model from Kishino et al.(2001), or autocorrelated exponential
"""
optioncheck(distribution, ['exponential', 'lognormal'])
if autocorrel == 0:
for node in self._tree.preorder_node_iter():
node.rate = root_rate
return
for node in self._tree.preorder_node_iter():
if node == self._tree.seed_node:
node.rate = root_rate
else:
parent_rate = node.parent_node.rate
bl = node.edge_length
if distribution == 'lognormal':
node.rate = logn_correlated_rate(parent_rate, bl,
autocorrel)
else:
node.rate = np.random.exponential(parent_rate) |
def hil_gps_send(self, time_usec, fix_type, lat, lon, alt, eph, epv, vel, vn, ve, vd, cog, satellites_visible, force_mavlink1=False):
'''
The global position, as returned by the Global Positioning System
(GPS). This is NOT the global
position estimate of the sytem, but rather a RAW
sensor value. See message GLOBAL_POSITION for the
global position estimate. Coordinate frame is right-
handed, Z-axis up (GPS frame).
time_usec : Timestamp (microseconds since UNIX epoch or microseconds since system boot) (uint64_t)
fix_type : 0-1: no fix, 2: 2D fix, 3: 3D fix. Some applications will not use the value of this field unless it is at least two, so always correctly fill in the fix. (uint8_t)
lat : Latitude (WGS84), in degrees * 1E7 (int32_t)
lon : Longitude (WGS84), in degrees * 1E7 (int32_t)
alt : Altitude (AMSL, not WGS84), in meters * 1000 (positive for up) (int32_t)
eph : GPS HDOP horizontal dilution of position in cm (m*100). If unknown, set to: 65535 (uint16_t)
epv : GPS VDOP vertical dilution of position in cm (m*100). If unknown, set to: 65535 (uint16_t)
vel : GPS ground speed (m/s * 100). If unknown, set to: 65535 (uint16_t)
vn : GPS velocity in cm/s in NORTH direction in earth-fixed NED frame (int16_t)
ve : GPS velocity in cm/s in EAST direction in earth-fixed NED frame (int16_t)
vd : GPS velocity in cm/s in DOWN direction in earth-fixed NED frame (int16_t)
cog : Course over ground (NOT heading, but direction of movement) in degrees * 100, 0.0..359.99 degrees. If unknown, set to: 65535 (uint16_t)
satellites_visible : Number of satellites visible. If unknown, set to 255 (uint8_t)
'''
return self.send(self.hil_gps_encode(time_usec, fix_type, lat, lon, alt, eph, epv, vel, vn, ve, vd, cog, satellites_visible), force_mavlink1=force_mavlink1) | The global position, as returned by the Global Positioning System
(GPS). This is NOT the global
position estimate of the sytem, but rather a RAW
sensor value. See message GLOBAL_POSITION for the
global position estimate. Coordinate frame is right-
handed, Z-axis up (GPS frame).
time_usec : Timestamp (microseconds since UNIX epoch or microseconds since system boot) (uint64_t)
fix_type : 0-1: no fix, 2: 2D fix, 3: 3D fix. Some applications will not use the value of this field unless it is at least two, so always correctly fill in the fix. (uint8_t)
lat : Latitude (WGS84), in degrees * 1E7 (int32_t)
lon : Longitude (WGS84), in degrees * 1E7 (int32_t)
alt : Altitude (AMSL, not WGS84), in meters * 1000 (positive for up) (int32_t)
eph : GPS HDOP horizontal dilution of position in cm (m*100). If unknown, set to: 65535 (uint16_t)
epv : GPS VDOP vertical dilution of position in cm (m*100). If unknown, set to: 65535 (uint16_t)
vel : GPS ground speed (m/s * 100). If unknown, set to: 65535 (uint16_t)
vn : GPS velocity in cm/s in NORTH direction in earth-fixed NED frame (int16_t)
ve : GPS velocity in cm/s in EAST direction in earth-fixed NED frame (int16_t)
vd : GPS velocity in cm/s in DOWN direction in earth-fixed NED frame (int16_t)
cog : Course over ground (NOT heading, but direction of movement) in degrees * 100, 0.0..359.99 degrees. If unknown, set to: 65535 (uint16_t)
satellites_visible : Number of satellites visible. If unknown, set to 255 (uint8_t) | Below is the the instruction that describes the task:
### Input:
The global position, as returned by the Global Positioning System
(GPS). This is NOT the global
position estimate of the sytem, but rather a RAW
sensor value. See message GLOBAL_POSITION for the
global position estimate. Coordinate frame is right-
handed, Z-axis up (GPS frame).
time_usec : Timestamp (microseconds since UNIX epoch or microseconds since system boot) (uint64_t)
fix_type : 0-1: no fix, 2: 2D fix, 3: 3D fix. Some applications will not use the value of this field unless it is at least two, so always correctly fill in the fix. (uint8_t)
lat : Latitude (WGS84), in degrees * 1E7 (int32_t)
lon : Longitude (WGS84), in degrees * 1E7 (int32_t)
alt : Altitude (AMSL, not WGS84), in meters * 1000 (positive for up) (int32_t)
eph : GPS HDOP horizontal dilution of position in cm (m*100). If unknown, set to: 65535 (uint16_t)
epv : GPS VDOP vertical dilution of position in cm (m*100). If unknown, set to: 65535 (uint16_t)
vel : GPS ground speed (m/s * 100). If unknown, set to: 65535 (uint16_t)
vn : GPS velocity in cm/s in NORTH direction in earth-fixed NED frame (int16_t)
ve : GPS velocity in cm/s in EAST direction in earth-fixed NED frame (int16_t)
vd : GPS velocity in cm/s in DOWN direction in earth-fixed NED frame (int16_t)
cog : Course over ground (NOT heading, but direction of movement) in degrees * 100, 0.0..359.99 degrees. If unknown, set to: 65535 (uint16_t)
satellites_visible : Number of satellites visible. If unknown, set to 255 (uint8_t)
### Response:
def hil_gps_send(self, time_usec, fix_type, lat, lon, alt, eph, epv, vel, vn, ve, vd, cog, satellites_visible, force_mavlink1=False):
'''
The global position, as returned by the Global Positioning System
(GPS). This is NOT the global
position estimate of the sytem, but rather a RAW
sensor value. See message GLOBAL_POSITION for the
global position estimate. Coordinate frame is right-
handed, Z-axis up (GPS frame).
time_usec : Timestamp (microseconds since UNIX epoch or microseconds since system boot) (uint64_t)
fix_type : 0-1: no fix, 2: 2D fix, 3: 3D fix. Some applications will not use the value of this field unless it is at least two, so always correctly fill in the fix. (uint8_t)
lat : Latitude (WGS84), in degrees * 1E7 (int32_t)
lon : Longitude (WGS84), in degrees * 1E7 (int32_t)
alt : Altitude (AMSL, not WGS84), in meters * 1000 (positive for up) (int32_t)
eph : GPS HDOP horizontal dilution of position in cm (m*100). If unknown, set to: 65535 (uint16_t)
epv : GPS VDOP vertical dilution of position in cm (m*100). If unknown, set to: 65535 (uint16_t)
vel : GPS ground speed (m/s * 100). If unknown, set to: 65535 (uint16_t)
vn : GPS velocity in cm/s in NORTH direction in earth-fixed NED frame (int16_t)
ve : GPS velocity in cm/s in EAST direction in earth-fixed NED frame (int16_t)
vd : GPS velocity in cm/s in DOWN direction in earth-fixed NED frame (int16_t)
cog : Course over ground (NOT heading, but direction of movement) in degrees * 100, 0.0..359.99 degrees. If unknown, set to: 65535 (uint16_t)
satellites_visible : Number of satellites visible. If unknown, set to 255 (uint8_t)
'''
return self.send(self.hil_gps_encode(time_usec, fix_type, lat, lon, alt, eph, epv, vel, vn, ve, vd, cog, satellites_visible), force_mavlink1=force_mavlink1) |
def tsne(adata, **kwargs) -> Union[Axes, List[Axes], None]:
"""\
Scatter plot in tSNE basis.
Parameters
----------
{adata_color_etc}
{edges_arrows}
{scatter_bulk}
{show_save_ax}
Returns
-------
If `show==False` a :class:`~matplotlib.axes.Axes` or a list of it.
"""
return plot_scatter(adata, 'tsne', **kwargs) | \
Scatter plot in tSNE basis.
Parameters
----------
{adata_color_etc}
{edges_arrows}
{scatter_bulk}
{show_save_ax}
Returns
-------
If `show==False` a :class:`~matplotlib.axes.Axes` or a list of it. | Below is the the instruction that describes the task:
### Input:
\
Scatter plot in tSNE basis.
Parameters
----------
{adata_color_etc}
{edges_arrows}
{scatter_bulk}
{show_save_ax}
Returns
-------
If `show==False` a :class:`~matplotlib.axes.Axes` or a list of it.
### Response:
def tsne(adata, **kwargs) -> Union[Axes, List[Axes], None]:
"""\
Scatter plot in tSNE basis.
Parameters
----------
{adata_color_etc}
{edges_arrows}
{scatter_bulk}
{show_save_ax}
Returns
-------
If `show==False` a :class:`~matplotlib.axes.Axes` or a list of it.
"""
return plot_scatter(adata, 'tsne', **kwargs) |
Subsets and Splits