code
stringlengths 75
104k
| docstring
stringlengths 1
46.9k
| text
stringlengths 164
112k
|
---|---|---|
def update(self, observable, handlers):
"""Toolbar ReaderObserver callback that is notified when
readers are added or removed."""
addedreaders, removedreaders = handlers
for reader in addedreaders:
item = self.Append(str(reader))
self.SetClientData(item, reader)
for reader in removedreaders:
item = self.FindString(str(reader))
if wx.NOT_FOUND != item:
self.Delete(item)
selection = self.GetSelection() | Toolbar ReaderObserver callback that is notified when
readers are added or removed. | Below is the the instruction that describes the task:
### Input:
Toolbar ReaderObserver callback that is notified when
readers are added or removed.
### Response:
def update(self, observable, handlers):
"""Toolbar ReaderObserver callback that is notified when
readers are added or removed."""
addedreaders, removedreaders = handlers
for reader in addedreaders:
item = self.Append(str(reader))
self.SetClientData(item, reader)
for reader in removedreaders:
item = self.FindString(str(reader))
if wx.NOT_FOUND != item:
self.Delete(item)
selection = self.GetSelection() |
def _build_rhs(p, q, deriv):
"""The right hand side of the equation system matrix"""
b = [0 for _ in range(p+q+1)]
b[deriv] = math.factorial(deriv)
return np.array(b) | The right hand side of the equation system matrix | Below is the the instruction that describes the task:
### Input:
The right hand side of the equation system matrix
### Response:
def _build_rhs(p, q, deriv):
"""The right hand side of the equation system matrix"""
b = [0 for _ in range(p+q+1)]
b[deriv] = math.factorial(deriv)
return np.array(b) |
def import_surf_mesh(file_name):
""" Generates a NURBS surface object from a mesh file.
:param file_name: input mesh file
:type file_name: str
:return: a NURBS surface
:rtype: NURBS.Surface
"""
raw_content = read_file(file_name)
raw_content = raw_content.split("\n")
content = []
for rc in raw_content:
temp = rc.strip().split()
content.append(temp)
# 1st line defines the dimension and it must be 3
if int(content[0][0]) != 3:
raise TypeError("Input mesh '" + str(file_name) + "' must be 3-dimensional")
# Create a NURBS surface instance and fill with the data read from mesh file
surf = shortcuts.generate_surface(rational=True)
# 2nd line is the degrees
surf.degree_u = int(content[1][0])
surf.degree_v = int(content[1][1])
# 3rd line is the number of weighted control points in u and v directions
dim_u = int(content[2][0])
dim_v = int(content[2][1])
# Starting from 6th line, we have the weighted control points
ctrlpts_end = 5 + (dim_u * dim_v)
ctrlpts_mesh = content[5:ctrlpts_end]
# mesh files have the control points in u-row order format
ctrlpts = compatibility.flip_ctrlpts_u(ctrlpts_mesh, dim_u, dim_v)
# mesh files store control points in format (x, y, z, w)
ctrlptsw = compatibility.generate_ctrlptsw(ctrlpts)
# Set control points
surf.set_ctrlpts(ctrlptsw, dim_u, dim_v)
# 4th and 5th lines are knot vectors
surf.knotvector_u = [float(u) for u in content[3]]
surf.knotvector_v = [float(v) for v in content[4]]
# Return the surface instance
return surf | Generates a NURBS surface object from a mesh file.
:param file_name: input mesh file
:type file_name: str
:return: a NURBS surface
:rtype: NURBS.Surface | Below is the the instruction that describes the task:
### Input:
Generates a NURBS surface object from a mesh file.
:param file_name: input mesh file
:type file_name: str
:return: a NURBS surface
:rtype: NURBS.Surface
### Response:
def import_surf_mesh(file_name):
""" Generates a NURBS surface object from a mesh file.
:param file_name: input mesh file
:type file_name: str
:return: a NURBS surface
:rtype: NURBS.Surface
"""
raw_content = read_file(file_name)
raw_content = raw_content.split("\n")
content = []
for rc in raw_content:
temp = rc.strip().split()
content.append(temp)
# 1st line defines the dimension and it must be 3
if int(content[0][0]) != 3:
raise TypeError("Input mesh '" + str(file_name) + "' must be 3-dimensional")
# Create a NURBS surface instance and fill with the data read from mesh file
surf = shortcuts.generate_surface(rational=True)
# 2nd line is the degrees
surf.degree_u = int(content[1][0])
surf.degree_v = int(content[1][1])
# 3rd line is the number of weighted control points in u and v directions
dim_u = int(content[2][0])
dim_v = int(content[2][1])
# Starting from 6th line, we have the weighted control points
ctrlpts_end = 5 + (dim_u * dim_v)
ctrlpts_mesh = content[5:ctrlpts_end]
# mesh files have the control points in u-row order format
ctrlpts = compatibility.flip_ctrlpts_u(ctrlpts_mesh, dim_u, dim_v)
# mesh files store control points in format (x, y, z, w)
ctrlptsw = compatibility.generate_ctrlptsw(ctrlpts)
# Set control points
surf.set_ctrlpts(ctrlptsw, dim_u, dim_v)
# 4th and 5th lines are knot vectors
surf.knotvector_u = [float(u) for u in content[3]]
surf.knotvector_v = [float(v) for v in content[4]]
# Return the surface instance
return surf |
def attitude_quaternion_encode(self, time_boot_ms, q1, q2, q3, q4, rollspeed, pitchspeed, yawspeed):
'''
The attitude in the aeronautical frame (right-handed, Z-down, X-front,
Y-right), expressed as quaternion. Quaternion order is
w, x, y, z and a zero rotation would be expressed as
(1 0 0 0).
time_boot_ms : Timestamp (milliseconds since system boot) (uint32_t)
q1 : Quaternion component 1, w (1 in null-rotation) (float)
q2 : Quaternion component 2, x (0 in null-rotation) (float)
q3 : Quaternion component 3, y (0 in null-rotation) (float)
q4 : Quaternion component 4, z (0 in null-rotation) (float)
rollspeed : Roll angular speed (rad/s) (float)
pitchspeed : Pitch angular speed (rad/s) (float)
yawspeed : Yaw angular speed (rad/s) (float)
'''
return MAVLink_attitude_quaternion_message(time_boot_ms, q1, q2, q3, q4, rollspeed, pitchspeed, yawspeed) | The attitude in the aeronautical frame (right-handed, Z-down, X-front,
Y-right), expressed as quaternion. Quaternion order is
w, x, y, z and a zero rotation would be expressed as
(1 0 0 0).
time_boot_ms : Timestamp (milliseconds since system boot) (uint32_t)
q1 : Quaternion component 1, w (1 in null-rotation) (float)
q2 : Quaternion component 2, x (0 in null-rotation) (float)
q3 : Quaternion component 3, y (0 in null-rotation) (float)
q4 : Quaternion component 4, z (0 in null-rotation) (float)
rollspeed : Roll angular speed (rad/s) (float)
pitchspeed : Pitch angular speed (rad/s) (float)
yawspeed : Yaw angular speed (rad/s) (float) | Below is the the instruction that describes the task:
### Input:
The attitude in the aeronautical frame (right-handed, Z-down, X-front,
Y-right), expressed as quaternion. Quaternion order is
w, x, y, z and a zero rotation would be expressed as
(1 0 0 0).
time_boot_ms : Timestamp (milliseconds since system boot) (uint32_t)
q1 : Quaternion component 1, w (1 in null-rotation) (float)
q2 : Quaternion component 2, x (0 in null-rotation) (float)
q3 : Quaternion component 3, y (0 in null-rotation) (float)
q4 : Quaternion component 4, z (0 in null-rotation) (float)
rollspeed : Roll angular speed (rad/s) (float)
pitchspeed : Pitch angular speed (rad/s) (float)
yawspeed : Yaw angular speed (rad/s) (float)
### Response:
def attitude_quaternion_encode(self, time_boot_ms, q1, q2, q3, q4, rollspeed, pitchspeed, yawspeed):
'''
The attitude in the aeronautical frame (right-handed, Z-down, X-front,
Y-right), expressed as quaternion. Quaternion order is
w, x, y, z and a zero rotation would be expressed as
(1 0 0 0).
time_boot_ms : Timestamp (milliseconds since system boot) (uint32_t)
q1 : Quaternion component 1, w (1 in null-rotation) (float)
q2 : Quaternion component 2, x (0 in null-rotation) (float)
q3 : Quaternion component 3, y (0 in null-rotation) (float)
q4 : Quaternion component 4, z (0 in null-rotation) (float)
rollspeed : Roll angular speed (rad/s) (float)
pitchspeed : Pitch angular speed (rad/s) (float)
yawspeed : Yaw angular speed (rad/s) (float)
'''
return MAVLink_attitude_quaternion_message(time_boot_ms, q1, q2, q3, q4, rollspeed, pitchspeed, yawspeed) |
def save(self, inplace=True):
"""
Saves all modification to the marker on the server.
:param inplace Apply edits on the current instance or get a new one.
:return: Marker instance.
"""
modified_data = self._modified_data()
if bool(modified_data):
extra = {
'resource': self.__class__.__name__,
'query': {
'id': self.id,
'modified_data': modified_data
}
}
logger.info('Saving marker', extra=extra)
data = self._api.patch(url=self._URL['get'].format(id=self.id),
data=modified_data).json()
marker = Marker(api=self._api, **data)
return marker
else:
raise ResourceNotModified() | Saves all modification to the marker on the server.
:param inplace Apply edits on the current instance or get a new one.
:return: Marker instance. | Below is the the instruction that describes the task:
### Input:
Saves all modification to the marker on the server.
:param inplace Apply edits on the current instance or get a new one.
:return: Marker instance.
### Response:
def save(self, inplace=True):
"""
Saves all modification to the marker on the server.
:param inplace Apply edits on the current instance or get a new one.
:return: Marker instance.
"""
modified_data = self._modified_data()
if bool(modified_data):
extra = {
'resource': self.__class__.__name__,
'query': {
'id': self.id,
'modified_data': modified_data
}
}
logger.info('Saving marker', extra=extra)
data = self._api.patch(url=self._URL['get'].format(id=self.id),
data=modified_data).json()
marker = Marker(api=self._api, **data)
return marker
else:
raise ResourceNotModified() |
def load_data(self, sess, inputs, state_inputs):
"""Bulk loads the specified inputs into device memory.
The shape of the inputs must conform to the shapes of the input
placeholders this optimizer was constructed with.
The data is split equally across all the devices. If the data is not
evenly divisible by the batch size, excess data will be discarded.
Args:
sess: TensorFlow session.
inputs: List of arrays matching the input placeholders, of shape
[BATCH_SIZE, ...].
state_inputs: List of RNN input arrays. These arrays have size
[BATCH_SIZE / MAX_SEQ_LEN, ...].
Returns:
The number of tuples loaded per device.
"""
if log_once("load_data"):
logger.info(
"Training on concatenated sample batches:\n\n{}\n".format(
summarize({
"placeholders": self.loss_inputs,
"inputs": inputs,
"state_inputs": state_inputs
})))
feed_dict = {}
assert len(self.loss_inputs) == len(inputs + state_inputs), \
(self.loss_inputs, inputs, state_inputs)
# Let's suppose we have the following input data, and 2 devices:
# 1 2 3 4 5 6 7 <- state inputs shape
# A A A B B B C C C D D D E E E F F F G G G <- inputs shape
# The data is truncated and split across devices as follows:
# |---| seq len = 3
# |---------------------------------| seq batch size = 6 seqs
# |----------------| per device batch size = 9 tuples
if len(state_inputs) > 0:
smallest_array = state_inputs[0]
seq_len = len(inputs[0]) // len(state_inputs[0])
self._loaded_max_seq_len = seq_len
else:
smallest_array = inputs[0]
self._loaded_max_seq_len = 1
sequences_per_minibatch = (
self.max_per_device_batch_size // self._loaded_max_seq_len * len(
self.devices))
if sequences_per_minibatch < 1:
logger.warn(
("Target minibatch size is {}, however the rollout sequence "
"length is {}, hence the minibatch size will be raised to "
"{}.").format(self.max_per_device_batch_size,
self._loaded_max_seq_len,
self._loaded_max_seq_len * len(self.devices)))
sequences_per_minibatch = 1
if len(smallest_array) < sequences_per_minibatch:
# Dynamically shrink the batch size if insufficient data
sequences_per_minibatch = make_divisible_by(
len(smallest_array), len(self.devices))
if log_once("data_slicing"):
logger.info(
("Divided {} rollout sequences, each of length {}, among "
"{} devices.").format(
len(smallest_array), self._loaded_max_seq_len,
len(self.devices)))
if sequences_per_minibatch < len(self.devices):
raise ValueError(
"Must load at least 1 tuple sequence per device. Try "
"increasing `sgd_minibatch_size` or reducing `max_seq_len` "
"to ensure that at least one sequence fits per device.")
self._loaded_per_device_batch_size = (sequences_per_minibatch // len(
self.devices) * self._loaded_max_seq_len)
if len(state_inputs) > 0:
# First truncate the RNN state arrays to the sequences_per_minib.
state_inputs = [
make_divisible_by(arr, sequences_per_minibatch)
for arr in state_inputs
]
# Then truncate the data inputs to match
inputs = [arr[:len(state_inputs[0]) * seq_len] for arr in inputs]
assert len(state_inputs[0]) * seq_len == len(inputs[0]), \
(len(state_inputs[0]), sequences_per_minibatch, seq_len,
len(inputs[0]))
for ph, arr in zip(self.loss_inputs, inputs + state_inputs):
feed_dict[ph] = arr
truncated_len = len(inputs[0])
else:
for ph, arr in zip(self.loss_inputs, inputs + state_inputs):
truncated_arr = make_divisible_by(arr, sequences_per_minibatch)
feed_dict[ph] = truncated_arr
truncated_len = len(truncated_arr)
sess.run([t.init_op for t in self._towers], feed_dict=feed_dict)
self.num_tuples_loaded = truncated_len
tuples_per_device = truncated_len // len(self.devices)
assert tuples_per_device > 0, "No data loaded?"
assert tuples_per_device % self._loaded_per_device_batch_size == 0
return tuples_per_device | Bulk loads the specified inputs into device memory.
The shape of the inputs must conform to the shapes of the input
placeholders this optimizer was constructed with.
The data is split equally across all the devices. If the data is not
evenly divisible by the batch size, excess data will be discarded.
Args:
sess: TensorFlow session.
inputs: List of arrays matching the input placeholders, of shape
[BATCH_SIZE, ...].
state_inputs: List of RNN input arrays. These arrays have size
[BATCH_SIZE / MAX_SEQ_LEN, ...].
Returns:
The number of tuples loaded per device. | Below is the the instruction that describes the task:
### Input:
Bulk loads the specified inputs into device memory.
The shape of the inputs must conform to the shapes of the input
placeholders this optimizer was constructed with.
The data is split equally across all the devices. If the data is not
evenly divisible by the batch size, excess data will be discarded.
Args:
sess: TensorFlow session.
inputs: List of arrays matching the input placeholders, of shape
[BATCH_SIZE, ...].
state_inputs: List of RNN input arrays. These arrays have size
[BATCH_SIZE / MAX_SEQ_LEN, ...].
Returns:
The number of tuples loaded per device.
### Response:
def load_data(self, sess, inputs, state_inputs):
"""Bulk loads the specified inputs into device memory.
The shape of the inputs must conform to the shapes of the input
placeholders this optimizer was constructed with.
The data is split equally across all the devices. If the data is not
evenly divisible by the batch size, excess data will be discarded.
Args:
sess: TensorFlow session.
inputs: List of arrays matching the input placeholders, of shape
[BATCH_SIZE, ...].
state_inputs: List of RNN input arrays. These arrays have size
[BATCH_SIZE / MAX_SEQ_LEN, ...].
Returns:
The number of tuples loaded per device.
"""
if log_once("load_data"):
logger.info(
"Training on concatenated sample batches:\n\n{}\n".format(
summarize({
"placeholders": self.loss_inputs,
"inputs": inputs,
"state_inputs": state_inputs
})))
feed_dict = {}
assert len(self.loss_inputs) == len(inputs + state_inputs), \
(self.loss_inputs, inputs, state_inputs)
# Let's suppose we have the following input data, and 2 devices:
# 1 2 3 4 5 6 7 <- state inputs shape
# A A A B B B C C C D D D E E E F F F G G G <- inputs shape
# The data is truncated and split across devices as follows:
# |---| seq len = 3
# |---------------------------------| seq batch size = 6 seqs
# |----------------| per device batch size = 9 tuples
if len(state_inputs) > 0:
smallest_array = state_inputs[0]
seq_len = len(inputs[0]) // len(state_inputs[0])
self._loaded_max_seq_len = seq_len
else:
smallest_array = inputs[0]
self._loaded_max_seq_len = 1
sequences_per_minibatch = (
self.max_per_device_batch_size // self._loaded_max_seq_len * len(
self.devices))
if sequences_per_minibatch < 1:
logger.warn(
("Target minibatch size is {}, however the rollout sequence "
"length is {}, hence the minibatch size will be raised to "
"{}.").format(self.max_per_device_batch_size,
self._loaded_max_seq_len,
self._loaded_max_seq_len * len(self.devices)))
sequences_per_minibatch = 1
if len(smallest_array) < sequences_per_minibatch:
# Dynamically shrink the batch size if insufficient data
sequences_per_minibatch = make_divisible_by(
len(smallest_array), len(self.devices))
if log_once("data_slicing"):
logger.info(
("Divided {} rollout sequences, each of length {}, among "
"{} devices.").format(
len(smallest_array), self._loaded_max_seq_len,
len(self.devices)))
if sequences_per_minibatch < len(self.devices):
raise ValueError(
"Must load at least 1 tuple sequence per device. Try "
"increasing `sgd_minibatch_size` or reducing `max_seq_len` "
"to ensure that at least one sequence fits per device.")
self._loaded_per_device_batch_size = (sequences_per_minibatch // len(
self.devices) * self._loaded_max_seq_len)
if len(state_inputs) > 0:
# First truncate the RNN state arrays to the sequences_per_minib.
state_inputs = [
make_divisible_by(arr, sequences_per_minibatch)
for arr in state_inputs
]
# Then truncate the data inputs to match
inputs = [arr[:len(state_inputs[0]) * seq_len] for arr in inputs]
assert len(state_inputs[0]) * seq_len == len(inputs[0]), \
(len(state_inputs[0]), sequences_per_minibatch, seq_len,
len(inputs[0]))
for ph, arr in zip(self.loss_inputs, inputs + state_inputs):
feed_dict[ph] = arr
truncated_len = len(inputs[0])
else:
for ph, arr in zip(self.loss_inputs, inputs + state_inputs):
truncated_arr = make_divisible_by(arr, sequences_per_minibatch)
feed_dict[ph] = truncated_arr
truncated_len = len(truncated_arr)
sess.run([t.init_op for t in self._towers], feed_dict=feed_dict)
self.num_tuples_loaded = truncated_len
tuples_per_device = truncated_len // len(self.devices)
assert tuples_per_device > 0, "No data loaded?"
assert tuples_per_device % self._loaded_per_device_batch_size == 0
return tuples_per_device |
def update_email_marketing_campaign(self, email_marketing_campaign,
name, email_content, from_email,
from_name, reply_to_email, subject,
text_content, address,
is_view_as_webpage_enabled=False,
view_as_web_page_link_text='',
view_as_web_page_text='',
is_permission_reminder_enabled=False,
permission_reminder_text=''):
"""Update a Constant Contact email marketing campaign.
Returns the updated EmailMarketingCampaign object.
"""
url = self.api.join(
'/'.join([self.EMAIL_MARKETING_CAMPAIGN_URL,
str(email_marketing_campaign.constant_contact_id)]))
inlined_email_content = self.inline_css(email_content)
minified_email_content = html_minify(inlined_email_content)
worked_around_email_content = work_around(minified_email_content)
data = {
'name': name,
'subject': subject,
'from_name': from_name,
'from_email': from_email,
'reply_to_email': reply_to_email,
'email_content': worked_around_email_content,
'email_content_format': 'HTML',
'text_content': text_content,
'message_footer': {
'organization_name': address['organization_name'],
'address_line_1': address['address_line_1'],
'address_line_2': address['address_line_2'],
'address_line_3': address['address_line_3'],
'city': address['city'],
'state': address['state'],
'international_state': address['international_state'],
'postal_code': address['postal_code'],
'country': address['country']
},
'is_view_as_webpage_enabled': is_view_as_webpage_enabled,
'view_as_web_page_link_text': view_as_web_page_link_text,
'view_as_web_page_text': view_as_web_page_text,
'is_permission_reminder_enabled': is_permission_reminder_enabled,
'permission_reminder_text': permission_reminder_text
}
response = url.put(data=json.dumps(data),
headers={'content-type': 'application/json'})
self.handle_response_status(response)
email_marketing_campaign.data = response.json()
email_marketing_campaign.save()
return email_marketing_campaign | Update a Constant Contact email marketing campaign.
Returns the updated EmailMarketingCampaign object. | Below is the the instruction that describes the task:
### Input:
Update a Constant Contact email marketing campaign.
Returns the updated EmailMarketingCampaign object.
### Response:
def update_email_marketing_campaign(self, email_marketing_campaign,
name, email_content, from_email,
from_name, reply_to_email, subject,
text_content, address,
is_view_as_webpage_enabled=False,
view_as_web_page_link_text='',
view_as_web_page_text='',
is_permission_reminder_enabled=False,
permission_reminder_text=''):
"""Update a Constant Contact email marketing campaign.
Returns the updated EmailMarketingCampaign object.
"""
url = self.api.join(
'/'.join([self.EMAIL_MARKETING_CAMPAIGN_URL,
str(email_marketing_campaign.constant_contact_id)]))
inlined_email_content = self.inline_css(email_content)
minified_email_content = html_minify(inlined_email_content)
worked_around_email_content = work_around(minified_email_content)
data = {
'name': name,
'subject': subject,
'from_name': from_name,
'from_email': from_email,
'reply_to_email': reply_to_email,
'email_content': worked_around_email_content,
'email_content_format': 'HTML',
'text_content': text_content,
'message_footer': {
'organization_name': address['organization_name'],
'address_line_1': address['address_line_1'],
'address_line_2': address['address_line_2'],
'address_line_3': address['address_line_3'],
'city': address['city'],
'state': address['state'],
'international_state': address['international_state'],
'postal_code': address['postal_code'],
'country': address['country']
},
'is_view_as_webpage_enabled': is_view_as_webpage_enabled,
'view_as_web_page_link_text': view_as_web_page_link_text,
'view_as_web_page_text': view_as_web_page_text,
'is_permission_reminder_enabled': is_permission_reminder_enabled,
'permission_reminder_text': permission_reminder_text
}
response = url.put(data=json.dumps(data),
headers={'content-type': 'application/json'})
self.handle_response_status(response)
email_marketing_campaign.data = response.json()
email_marketing_campaign.save()
return email_marketing_campaign |
def get_process_params(xmldoc, program, param, require_unique_program = True):
"""
Return a list of the values stored in the process_params table for
params named param for the program(s) named program. The values
are returned as Python native types, not as the strings appearing
in the XML document. If require_unique_program is True (default),
then the document must contain exactly one program with the
requested name, otherwise ValueError is raised. If
require_unique_program is not True, then there must be at least one
program with the requested name otherwise ValueError is raised.
"""
process_ids = lsctables.ProcessTable.get_table(xmldoc).get_ids_by_program(program)
if len(process_ids) < 1:
raise ValueError("process table must contain at least one program named '%s'" % program)
elif require_unique_program and len(process_ids) != 1:
raise ValueError("process table must contain exactly one program named '%s'" % program)
return [row.pyvalue for row in lsctables.ProcessParamsTable.get_table(xmldoc) if (row.process_id in process_ids) and (row.param == param)] | Return a list of the values stored in the process_params table for
params named param for the program(s) named program. The values
are returned as Python native types, not as the strings appearing
in the XML document. If require_unique_program is True (default),
then the document must contain exactly one program with the
requested name, otherwise ValueError is raised. If
require_unique_program is not True, then there must be at least one
program with the requested name otherwise ValueError is raised. | Below is the the instruction that describes the task:
### Input:
Return a list of the values stored in the process_params table for
params named param for the program(s) named program. The values
are returned as Python native types, not as the strings appearing
in the XML document. If require_unique_program is True (default),
then the document must contain exactly one program with the
requested name, otherwise ValueError is raised. If
require_unique_program is not True, then there must be at least one
program with the requested name otherwise ValueError is raised.
### Response:
def get_process_params(xmldoc, program, param, require_unique_program = True):
"""
Return a list of the values stored in the process_params table for
params named param for the program(s) named program. The values
are returned as Python native types, not as the strings appearing
in the XML document. If require_unique_program is True (default),
then the document must contain exactly one program with the
requested name, otherwise ValueError is raised. If
require_unique_program is not True, then there must be at least one
program with the requested name otherwise ValueError is raised.
"""
process_ids = lsctables.ProcessTable.get_table(xmldoc).get_ids_by_program(program)
if len(process_ids) < 1:
raise ValueError("process table must contain at least one program named '%s'" % program)
elif require_unique_program and len(process_ids) != 1:
raise ValueError("process table must contain exactly one program named '%s'" % program)
return [row.pyvalue for row in lsctables.ProcessParamsTable.get_table(xmldoc) if (row.process_id in process_ids) and (row.param == param)] |
def load_all_methods(self):
r'''Method which picks out coefficients for the specified chemical
from the various dictionaries and DataFrames storing it. All data is
stored as attributes. This method also sets :obj:`Tmin`, :obj:`Tmax`,
and :obj:`all_methods` as a set of methods for which the data exists for.
Called on initialization only. See the source code for the variables at
which the coefficients are stored. The coefficients can safely be
altered once the class is initialized. This method can be called again
to reset the parameters.
'''
methods = []
if self.CASRN in CRC_inorg_s_const_data.index:
methods.append(CRC_INORG_S)
self.CRC_INORG_S_Vm = float(CRC_inorg_s_const_data.at[self.CASRN, 'Vm'])
# if all((self.Tt, self.Vml_Tt, self.MW)):
# self.rhol_Tt = Vm_to_rho(self.Vml_Tt, self.MW)
# methods.append(GOODMAN)
self.all_methods = set(methods) | r'''Method which picks out coefficients for the specified chemical
from the various dictionaries and DataFrames storing it. All data is
stored as attributes. This method also sets :obj:`Tmin`, :obj:`Tmax`,
and :obj:`all_methods` as a set of methods for which the data exists for.
Called on initialization only. See the source code for the variables at
which the coefficients are stored. The coefficients can safely be
altered once the class is initialized. This method can be called again
to reset the parameters. | Below is the the instruction that describes the task:
### Input:
r'''Method which picks out coefficients for the specified chemical
from the various dictionaries and DataFrames storing it. All data is
stored as attributes. This method also sets :obj:`Tmin`, :obj:`Tmax`,
and :obj:`all_methods` as a set of methods for which the data exists for.
Called on initialization only. See the source code for the variables at
which the coefficients are stored. The coefficients can safely be
altered once the class is initialized. This method can be called again
to reset the parameters.
### Response:
def load_all_methods(self):
r'''Method which picks out coefficients for the specified chemical
from the various dictionaries and DataFrames storing it. All data is
stored as attributes. This method also sets :obj:`Tmin`, :obj:`Tmax`,
and :obj:`all_methods` as a set of methods for which the data exists for.
Called on initialization only. See the source code for the variables at
which the coefficients are stored. The coefficients can safely be
altered once the class is initialized. This method can be called again
to reset the parameters.
'''
methods = []
if self.CASRN in CRC_inorg_s_const_data.index:
methods.append(CRC_INORG_S)
self.CRC_INORG_S_Vm = float(CRC_inorg_s_const_data.at[self.CASRN, 'Vm'])
# if all((self.Tt, self.Vml_Tt, self.MW)):
# self.rhol_Tt = Vm_to_rho(self.Vml_Tt, self.MW)
# methods.append(GOODMAN)
self.all_methods = set(methods) |
def many_psds(k=2,fs=1.0, b0=1.0, N=1024):
""" compute average of many PSDs """
psd=[]
for j in range(k):
print j
x = noise.white(N=2*4096,b0=b0,fs=fs)
f, tmp = noise.numpy_psd(x,fs)
if j==0:
psd = tmp
else:
psd = psd + tmp
return f, psd/k | compute average of many PSDs | Below is the the instruction that describes the task:
### Input:
compute average of many PSDs
### Response:
def many_psds(k=2,fs=1.0, b0=1.0, N=1024):
""" compute average of many PSDs """
psd=[]
for j in range(k):
print j
x = noise.white(N=2*4096,b0=b0,fs=fs)
f, tmp = noise.numpy_psd(x,fs)
if j==0:
psd = tmp
else:
psd = psd + tmp
return f, psd/k |
def ping(self, timeout=12):
"""
Send a keep-alive request for the endpoint.
Args:
timeout (int): maximum amount of time for the endpoint to stay active
"""
self.conn("POST", "{0}/users/ME/endpoints/{1}/active".format(self.conn.msgsHost, self.id),
auth=SkypeConnection.Auth.RegToken, json={"timeout": timeout}) | Send a keep-alive request for the endpoint.
Args:
timeout (int): maximum amount of time for the endpoint to stay active | Below is the the instruction that describes the task:
### Input:
Send a keep-alive request for the endpoint.
Args:
timeout (int): maximum amount of time for the endpoint to stay active
### Response:
def ping(self, timeout=12):
"""
Send a keep-alive request for the endpoint.
Args:
timeout (int): maximum amount of time for the endpoint to stay active
"""
self.conn("POST", "{0}/users/ME/endpoints/{1}/active".format(self.conn.msgsHost, self.id),
auth=SkypeConnection.Auth.RegToken, json={"timeout": timeout}) |
def jarFlags(target, source, env, for_signature):
"""If we have a manifest, make sure that the 'm'
flag is specified."""
jarflags = env.subst('$JARFLAGS', target=target, source=source)
for src in source:
contents = src.get_text_contents()
if contents[:16] == "Manifest-Version":
if not 'm' in jarflags:
return jarflags + 'm'
break
return jarflags | If we have a manifest, make sure that the 'm'
flag is specified. | Below is the the instruction that describes the task:
### Input:
If we have a manifest, make sure that the 'm'
flag is specified.
### Response:
def jarFlags(target, source, env, for_signature):
"""If we have a manifest, make sure that the 'm'
flag is specified."""
jarflags = env.subst('$JARFLAGS', target=target, source=source)
for src in source:
contents = src.get_text_contents()
if contents[:16] == "Manifest-Version":
if not 'm' in jarflags:
return jarflags + 'm'
break
return jarflags |
def blend_html_colour_to_white(html_colour, alpha):
"""
:param html_colour: Colour string like FF552B or #334455
:param alpha: Alpha value
:return: Html colour alpha blended onto white
"""
html_colour = html_colour.upper()
has_hash = False
if html_colour[0] == '#':
has_hash = True
html_colour = html_colour[1:]
r_str = html_colour[0:2]
g_str = html_colour[2:4]
b_str = html_colour[4:6]
r = int(r_str, 16)
g = int(g_str, 16)
b = int(b_str, 16)
r = int(alpha * r + (1 - alpha) * 255)
g = int(alpha * g + (1 - alpha) * 255)
b = int(alpha * b + (1 - alpha) * 255)
out = '{:02X}{:02X}{:02X}'.format(r, g, b)
if has_hash:
out = '#' + out
return out | :param html_colour: Colour string like FF552B or #334455
:param alpha: Alpha value
:return: Html colour alpha blended onto white | Below is the the instruction that describes the task:
### Input:
:param html_colour: Colour string like FF552B or #334455
:param alpha: Alpha value
:return: Html colour alpha blended onto white
### Response:
def blend_html_colour_to_white(html_colour, alpha):
"""
:param html_colour: Colour string like FF552B or #334455
:param alpha: Alpha value
:return: Html colour alpha blended onto white
"""
html_colour = html_colour.upper()
has_hash = False
if html_colour[0] == '#':
has_hash = True
html_colour = html_colour[1:]
r_str = html_colour[0:2]
g_str = html_colour[2:4]
b_str = html_colour[4:6]
r = int(r_str, 16)
g = int(g_str, 16)
b = int(b_str, 16)
r = int(alpha * r + (1 - alpha) * 255)
g = int(alpha * g + (1 - alpha) * 255)
b = int(alpha * b + (1 - alpha) * 255)
out = '{:02X}{:02X}{:02X}'.format(r, g, b)
if has_hash:
out = '#' + out
return out |
def make_github_markdown_writer(opts):
"""
Creates a Writer object used for parsing and writing Markdown files with
a GitHub style anchor transformation
opts is a namespace object containing runtime options. It should
generally include the following attributes:
* 'open': a string corresponding to the opening portion of the wrapper
identifier. Built-in AnchorHub usage defaults this to '{'
* 'close: a string corresponding ot the closing portion of the wrapper
identifier. Built-in AnchorHub usage defaults this to '}'
* 'wrapper_regex': An escaped regular expression that matches tags
located inside of wrappers
:param opts: namespace object, usually created from command-line
arguments, that is used to pass runtime options to concrete
WriterStrategy objects.
:return: A Writer object designed for parsing, modifying, and writing
AnchorHub tags to converted anchors in Markdown files using GitHub style
anchors
"""
assert hasattr(opts, 'wrapper_regex')
atx = MarkdownATXWriterStrategy(opts, 'ATX headers')
setext = MarkdownSetextWriterStrategy(opts, 'Setext headers')
inline = MarkdownInlineLinkWriterStrategy(opts, 'inline links')
ref = MarkdownReferenceLinkWriterStrategy(opts, 'reference links')
code_block_switch = ghswitches.code_block_switch
strategies = [atx, setext, inline, ref]
switches = [code_block_switch]
return Writer(strategies, switches=switches) | Creates a Writer object used for parsing and writing Markdown files with
a GitHub style anchor transformation
opts is a namespace object containing runtime options. It should
generally include the following attributes:
* 'open': a string corresponding to the opening portion of the wrapper
identifier. Built-in AnchorHub usage defaults this to '{'
* 'close: a string corresponding ot the closing portion of the wrapper
identifier. Built-in AnchorHub usage defaults this to '}'
* 'wrapper_regex': An escaped regular expression that matches tags
located inside of wrappers
:param opts: namespace object, usually created from command-line
arguments, that is used to pass runtime options to concrete
WriterStrategy objects.
:return: A Writer object designed for parsing, modifying, and writing
AnchorHub tags to converted anchors in Markdown files using GitHub style
anchors | Below is the the instruction that describes the task:
### Input:
Creates a Writer object used for parsing and writing Markdown files with
a GitHub style anchor transformation
opts is a namespace object containing runtime options. It should
generally include the following attributes:
* 'open': a string corresponding to the opening portion of the wrapper
identifier. Built-in AnchorHub usage defaults this to '{'
* 'close: a string corresponding ot the closing portion of the wrapper
identifier. Built-in AnchorHub usage defaults this to '}'
* 'wrapper_regex': An escaped regular expression that matches tags
located inside of wrappers
:param opts: namespace object, usually created from command-line
arguments, that is used to pass runtime options to concrete
WriterStrategy objects.
:return: A Writer object designed for parsing, modifying, and writing
AnchorHub tags to converted anchors in Markdown files using GitHub style
anchors
### Response:
def make_github_markdown_writer(opts):
"""
Creates a Writer object used for parsing and writing Markdown files with
a GitHub style anchor transformation
opts is a namespace object containing runtime options. It should
generally include the following attributes:
* 'open': a string corresponding to the opening portion of the wrapper
identifier. Built-in AnchorHub usage defaults this to '{'
* 'close: a string corresponding ot the closing portion of the wrapper
identifier. Built-in AnchorHub usage defaults this to '}'
* 'wrapper_regex': An escaped regular expression that matches tags
located inside of wrappers
:param opts: namespace object, usually created from command-line
arguments, that is used to pass runtime options to concrete
WriterStrategy objects.
:return: A Writer object designed for parsing, modifying, and writing
AnchorHub tags to converted anchors in Markdown files using GitHub style
anchors
"""
assert hasattr(opts, 'wrapper_regex')
atx = MarkdownATXWriterStrategy(opts, 'ATX headers')
setext = MarkdownSetextWriterStrategy(opts, 'Setext headers')
inline = MarkdownInlineLinkWriterStrategy(opts, 'inline links')
ref = MarkdownReferenceLinkWriterStrategy(opts, 'reference links')
code_block_switch = ghswitches.code_block_switch
strategies = [atx, setext, inline, ref]
switches = [code_block_switch]
return Writer(strategies, switches=switches) |
def gen_chunks(self, gen):
"""Generates byte chunks of a given size.
Takes a bytes generator and yields chunks of a maximum of
``chunk_size`` bytes.
Parameters
----------
gen : generator
The bytes generator that produces the bytes
"""
for data in gen:
size = len(data)
if size < self.chunk_size:
yield data
else:
mv = buffer(data)
offset = 0
while offset < size:
nb = min(self.chunk_size, size - offset)
yield mv[offset:offset + nb]
offset += nb | Generates byte chunks of a given size.
Takes a bytes generator and yields chunks of a maximum of
``chunk_size`` bytes.
Parameters
----------
gen : generator
The bytes generator that produces the bytes | Below is the the instruction that describes the task:
### Input:
Generates byte chunks of a given size.
Takes a bytes generator and yields chunks of a maximum of
``chunk_size`` bytes.
Parameters
----------
gen : generator
The bytes generator that produces the bytes
### Response:
def gen_chunks(self, gen):
"""Generates byte chunks of a given size.
Takes a bytes generator and yields chunks of a maximum of
``chunk_size`` bytes.
Parameters
----------
gen : generator
The bytes generator that produces the bytes
"""
for data in gen:
size = len(data)
if size < self.chunk_size:
yield data
else:
mv = buffer(data)
offset = 0
while offset < size:
nb = min(self.chunk_size, size - offset)
yield mv[offset:offset + nb]
offset += nb |
def _load_market_scheme(self):
''' Load market yaml description '''
try:
self.scheme = yaml.load(open(self.scheme_path, 'r'))
except Exception, error:
raise LoadMarketSchemeFailed(reason=error) | Load market yaml description | Below is the the instruction that describes the task:
### Input:
Load market yaml description
### Response:
def _load_market_scheme(self):
''' Load market yaml description '''
try:
self.scheme = yaml.load(open(self.scheme_path, 'r'))
except Exception, error:
raise LoadMarketSchemeFailed(reason=error) |
def __capture(self, checkout_id, **kwargs):
"""Call documentation: `/checkout/capture
<https://www.wepay.com/developer/reference/checkout#capture>`_, plus
extra keyword parameters:
:keyword str access_token: will be used instead of instance's
``access_token``, with ``batch_mode=True`` will set `authorization`
param to it's value.
:keyword bool batch_mode: turn on/off the batch_mode, see
:class:`wepay.api.WePay`
:keyword str batch_reference_id: `reference_id` param for batch call,
see :class:`wepay.api.WePay`
:keyword str api_version: WePay API version, see
:class:`wepay.api.WePay`
"""
params = {
'checkout_id': checkout_id
}
return self.make_call(self.__capture, params, kwargs) | Call documentation: `/checkout/capture
<https://www.wepay.com/developer/reference/checkout#capture>`_, plus
extra keyword parameters:
:keyword str access_token: will be used instead of instance's
``access_token``, with ``batch_mode=True`` will set `authorization`
param to it's value.
:keyword bool batch_mode: turn on/off the batch_mode, see
:class:`wepay.api.WePay`
:keyword str batch_reference_id: `reference_id` param for batch call,
see :class:`wepay.api.WePay`
:keyword str api_version: WePay API version, see
:class:`wepay.api.WePay` | Below is the the instruction that describes the task:
### Input:
Call documentation: `/checkout/capture
<https://www.wepay.com/developer/reference/checkout#capture>`_, plus
extra keyword parameters:
:keyword str access_token: will be used instead of instance's
``access_token``, with ``batch_mode=True`` will set `authorization`
param to it's value.
:keyword bool batch_mode: turn on/off the batch_mode, see
:class:`wepay.api.WePay`
:keyword str batch_reference_id: `reference_id` param for batch call,
see :class:`wepay.api.WePay`
:keyword str api_version: WePay API version, see
:class:`wepay.api.WePay`
### Response:
def __capture(self, checkout_id, **kwargs):
"""Call documentation: `/checkout/capture
<https://www.wepay.com/developer/reference/checkout#capture>`_, plus
extra keyword parameters:
:keyword str access_token: will be used instead of instance's
``access_token``, with ``batch_mode=True`` will set `authorization`
param to it's value.
:keyword bool batch_mode: turn on/off the batch_mode, see
:class:`wepay.api.WePay`
:keyword str batch_reference_id: `reference_id` param for batch call,
see :class:`wepay.api.WePay`
:keyword str api_version: WePay API version, see
:class:`wepay.api.WePay`
"""
params = {
'checkout_id': checkout_id
}
return self.make_call(self.__capture, params, kwargs) |
def make_list(var, num_terms=1):
""" Make a variable a list if it is not already
If variable is not a list it will make it a list of the correct length with
all terms identical.
"""
if not isinstance(var, list):
if isinstance(var, tuple):
var = list(var)
else:
var = [var]
#if len(var) == 1:
for _ in range(1, num_terms):
var.append(var[0])
return var | Make a variable a list if it is not already
If variable is not a list it will make it a list of the correct length with
all terms identical. | Below is the the instruction that describes the task:
### Input:
Make a variable a list if it is not already
If variable is not a list it will make it a list of the correct length with
all terms identical.
### Response:
def make_list(var, num_terms=1):
""" Make a variable a list if it is not already
If variable is not a list it will make it a list of the correct length with
all terms identical.
"""
if not isinstance(var, list):
if isinstance(var, tuple):
var = list(var)
else:
var = [var]
#if len(var) == 1:
for _ in range(1, num_terms):
var.append(var[0])
return var |
def section_menu(
context, show_section_root=True, show_multiple_levels=True,
apply_active_classes=True, allow_repeating_parents=True,
max_levels=settings.DEFAULT_SECTION_MENU_MAX_LEVELS,
template='', sub_menu_template='', sub_menu_templates=None,
use_specific=settings.DEFAULT_SECTION_MENU_USE_SPECIFIC,
use_absolute_page_urls=False, add_sub_menus_inline=None, **kwargs
):
"""Render a section menu for the current section."""
validate_supplied_values('section_menu', max_levels=max_levels,
use_specific=use_specific)
if not show_multiple_levels:
max_levels = 1
menu_class = settings.objects.SECTION_MENU_CLASS
return menu_class.render_from_tag(
context=context,
max_levels=max_levels,
use_specific=use_specific,
apply_active_classes=apply_active_classes,
allow_repeating_parents=allow_repeating_parents,
use_absolute_page_urls=use_absolute_page_urls,
add_sub_menus_inline=add_sub_menus_inline,
template_name=template,
sub_menu_template_name=sub_menu_template,
sub_menu_template_names=split_if_string(sub_menu_templates),
show_section_root=show_section_root,
**kwargs
) | Render a section menu for the current section. | Below is the the instruction that describes the task:
### Input:
Render a section menu for the current section.
### Response:
def section_menu(
context, show_section_root=True, show_multiple_levels=True,
apply_active_classes=True, allow_repeating_parents=True,
max_levels=settings.DEFAULT_SECTION_MENU_MAX_LEVELS,
template='', sub_menu_template='', sub_menu_templates=None,
use_specific=settings.DEFAULT_SECTION_MENU_USE_SPECIFIC,
use_absolute_page_urls=False, add_sub_menus_inline=None, **kwargs
):
"""Render a section menu for the current section."""
validate_supplied_values('section_menu', max_levels=max_levels,
use_specific=use_specific)
if not show_multiple_levels:
max_levels = 1
menu_class = settings.objects.SECTION_MENU_CLASS
return menu_class.render_from_tag(
context=context,
max_levels=max_levels,
use_specific=use_specific,
apply_active_classes=apply_active_classes,
allow_repeating_parents=allow_repeating_parents,
use_absolute_page_urls=use_absolute_page_urls,
add_sub_menus_inline=add_sub_menus_inline,
template_name=template,
sub_menu_template_name=sub_menu_template,
sub_menu_template_names=split_if_string(sub_menu_templates),
show_section_root=show_section_root,
**kwargs
) |
def export_yaml(obj, file_name):
""" Exports curves and surfaces in YAML format.
.. note::
Requires `ruamel.yaml <https://pypi.org/project/ruamel.yaml/>`_ package.
YAML format is also used by the `geomdl command-line application <https://github.com/orbingol/geomdl-cli>`_
as a way to input shape data from the command line.
:param obj: input geometry
:type obj: abstract.SplineGeometry, multi.AbstractContainer
:param file_name: name of the output file
:type file_name: str
:raises GeomdlException: an error occurred writing the file
"""
def callback(data):
# Ref: https://yaml.readthedocs.io/en/latest/example.html#output-of-dump-as-a-string
stream = StringIO()
yaml = YAML()
yaml.dump(data, stream)
return stream.getvalue()
# Check if it is possible to import 'ruamel.yaml'
try:
from ruamel.yaml import YAML
except ImportError:
raise exch.GeomdlException("Please install 'ruamel.yaml' package to use YAML format: pip install ruamel.yaml")
# Export data
exported_data = exch.export_dict_str(obj=obj, callback=callback)
# Write to file
return exch.write_file(file_name, exported_data) | Exports curves and surfaces in YAML format.
.. note::
Requires `ruamel.yaml <https://pypi.org/project/ruamel.yaml/>`_ package.
YAML format is also used by the `geomdl command-line application <https://github.com/orbingol/geomdl-cli>`_
as a way to input shape data from the command line.
:param obj: input geometry
:type obj: abstract.SplineGeometry, multi.AbstractContainer
:param file_name: name of the output file
:type file_name: str
:raises GeomdlException: an error occurred writing the file | Below is the the instruction that describes the task:
### Input:
Exports curves and surfaces in YAML format.
.. note::
Requires `ruamel.yaml <https://pypi.org/project/ruamel.yaml/>`_ package.
YAML format is also used by the `geomdl command-line application <https://github.com/orbingol/geomdl-cli>`_
as a way to input shape data from the command line.
:param obj: input geometry
:type obj: abstract.SplineGeometry, multi.AbstractContainer
:param file_name: name of the output file
:type file_name: str
:raises GeomdlException: an error occurred writing the file
### Response:
def export_yaml(obj, file_name):
""" Exports curves and surfaces in YAML format.
.. note::
Requires `ruamel.yaml <https://pypi.org/project/ruamel.yaml/>`_ package.
YAML format is also used by the `geomdl command-line application <https://github.com/orbingol/geomdl-cli>`_
as a way to input shape data from the command line.
:param obj: input geometry
:type obj: abstract.SplineGeometry, multi.AbstractContainer
:param file_name: name of the output file
:type file_name: str
:raises GeomdlException: an error occurred writing the file
"""
def callback(data):
# Ref: https://yaml.readthedocs.io/en/latest/example.html#output-of-dump-as-a-string
stream = StringIO()
yaml = YAML()
yaml.dump(data, stream)
return stream.getvalue()
# Check if it is possible to import 'ruamel.yaml'
try:
from ruamel.yaml import YAML
except ImportError:
raise exch.GeomdlException("Please install 'ruamel.yaml' package to use YAML format: pip install ruamel.yaml")
# Export data
exported_data = exch.export_dict_str(obj=obj, callback=callback)
# Write to file
return exch.write_file(file_name, exported_data) |
def p_single_statement_systemcall(self, p):
'single_statement : systemcall SEMICOLON'
p[0] = SingleStatement(p[1], lineno=p.lineno(1))
p.set_lineno(0, p.lineno(1)) | single_statement : systemcall SEMICOLON | Below is the the instruction that describes the task:
### Input:
single_statement : systemcall SEMICOLON
### Response:
def p_single_statement_systemcall(self, p):
'single_statement : systemcall SEMICOLON'
p[0] = SingleStatement(p[1], lineno=p.lineno(1))
p.set_lineno(0, p.lineno(1)) |
def _set_vlan_add(self, v, load=False):
"""
Setter method for vlan_add, mapped from YANG variable /routing_system/evpn_config/evpn/evpn_instance/vlan/vlan_add (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_vlan_add is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_vlan_add() directly.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=vlan_add.vlan_add, is_container='container', presence=False, yang_name="vlan-add", rest_name="", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Add/Remove VLANs from EVPN Instance', u'cli-drop-node-name': None}}, namespace='urn:brocade.com:mgmt:brocade-bgp', defining_module='brocade-bgp', yang_type='container', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """vlan_add must be of a type compatible with container""",
'defined-type': "container",
'generated-type': """YANGDynClass(base=vlan_add.vlan_add, is_container='container', presence=False, yang_name="vlan-add", rest_name="", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Add/Remove VLANs from EVPN Instance', u'cli-drop-node-name': None}}, namespace='urn:brocade.com:mgmt:brocade-bgp', defining_module='brocade-bgp', yang_type='container', is_config=True)""",
})
self.__vlan_add = t
if hasattr(self, '_set'):
self._set() | Setter method for vlan_add, mapped from YANG variable /routing_system/evpn_config/evpn/evpn_instance/vlan/vlan_add (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_vlan_add is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_vlan_add() directly. | Below is the the instruction that describes the task:
### Input:
Setter method for vlan_add, mapped from YANG variable /routing_system/evpn_config/evpn/evpn_instance/vlan/vlan_add (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_vlan_add is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_vlan_add() directly.
### Response:
def _set_vlan_add(self, v, load=False):
"""
Setter method for vlan_add, mapped from YANG variable /routing_system/evpn_config/evpn/evpn_instance/vlan/vlan_add (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_vlan_add is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_vlan_add() directly.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=vlan_add.vlan_add, is_container='container', presence=False, yang_name="vlan-add", rest_name="", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Add/Remove VLANs from EVPN Instance', u'cli-drop-node-name': None}}, namespace='urn:brocade.com:mgmt:brocade-bgp', defining_module='brocade-bgp', yang_type='container', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """vlan_add must be of a type compatible with container""",
'defined-type': "container",
'generated-type': """YANGDynClass(base=vlan_add.vlan_add, is_container='container', presence=False, yang_name="vlan-add", rest_name="", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Add/Remove VLANs from EVPN Instance', u'cli-drop-node-name': None}}, namespace='urn:brocade.com:mgmt:brocade-bgp', defining_module='brocade-bgp', yang_type='container', is_config=True)""",
})
self.__vlan_add = t
if hasattr(self, '_set'):
self._set() |
def collect_results(project, force=False):
"""collect_results."""
if not project.crawlable:
return project
now = datetime.datetime.now()
if (now - project.updated_at).total_seconds() < 4 and (not force):
return project
result_paths = []
if os.path.isdir(project.path_name):
result_paths.extend(_list_result_paths(project.path_name))
registered_results = db.session.query(Result.path_name).filter_by(
project_id=project.id
).all()
registered_paths = {r.path_name for r in registered_results}
for result_path in result_paths:
if result_path not in registered_paths:
_register_result(project.id, result_path)
project.updated_at = datetime.datetime.now()
db.session.commit()
return project | collect_results. | Below is the the instruction that describes the task:
### Input:
collect_results.
### Response:
def collect_results(project, force=False):
"""collect_results."""
if not project.crawlable:
return project
now = datetime.datetime.now()
if (now - project.updated_at).total_seconds() < 4 and (not force):
return project
result_paths = []
if os.path.isdir(project.path_name):
result_paths.extend(_list_result_paths(project.path_name))
registered_results = db.session.query(Result.path_name).filter_by(
project_id=project.id
).all()
registered_paths = {r.path_name for r in registered_results}
for result_path in result_paths:
if result_path not in registered_paths:
_register_result(project.id, result_path)
project.updated_at = datetime.datetime.now()
db.session.commit()
return project |
def ref(function, callback=None):
"""
Returns a weak reference to the given method or function.
If the callback argument is not None, it is called as soon
as the referenced function is garbage deleted.
:type function: callable
:param function: The function to reference.
:type callback: callable
:param callback: Called when the function dies.
"""
try:
function.__func__
except AttributeError:
return _WeakMethodFree(function, callback)
return _WeakMethodBound(function, callback) | Returns a weak reference to the given method or function.
If the callback argument is not None, it is called as soon
as the referenced function is garbage deleted.
:type function: callable
:param function: The function to reference.
:type callback: callable
:param callback: Called when the function dies. | Below is the the instruction that describes the task:
### Input:
Returns a weak reference to the given method or function.
If the callback argument is not None, it is called as soon
as the referenced function is garbage deleted.
:type function: callable
:param function: The function to reference.
:type callback: callable
:param callback: Called when the function dies.
### Response:
def ref(function, callback=None):
"""
Returns a weak reference to the given method or function.
If the callback argument is not None, it is called as soon
as the referenced function is garbage deleted.
:type function: callable
:param function: The function to reference.
:type callback: callable
:param callback: Called when the function dies.
"""
try:
function.__func__
except AttributeError:
return _WeakMethodFree(function, callback)
return _WeakMethodBound(function, callback) |
def predict(self, data, output_margin=False, ntree_limit=0, pred_leaf=False):
"""
Predict with data.
NOTE: This function is not thread safe.
For each booster object, predict can only be called from one thread.
If you want to run prediction using multiple thread, call bst.copy() to make copies
of model object and then call predict
Parameters
----------
data : DMatrix
The dmatrix storing the input.
output_margin : bool
Whether to output the raw untransformed margin value.
ntree_limit : int
Limit number of trees in the prediction; defaults to 0 (use all trees).
pred_leaf : bool
When this option is on, the output will be a matrix of (nsample, ntrees)
with each record indicating the predicted leaf index of each sample in each tree.
Note that the leaf index of a tree is unique per tree, so you may find leaf 1
in both tree 1 and tree 0.
Returns
-------
prediction : numpy array
"""
option_mask = 0x00
if output_margin:
option_mask |= 0x01
if pred_leaf:
option_mask |= 0x02
self._validate_features(data)
length = ctypes.c_ulong()
preds = ctypes.POINTER(ctypes.c_float)()
_check_call(_LIB.XGBoosterPredict(self.handle, data.handle,
option_mask, ntree_limit,
ctypes.byref(length),
ctypes.byref(preds)))
preds = ctypes2numpy(preds, length.value, np.float32)
if pred_leaf:
preds = preds.astype(np.int32)
nrow = data.num_row()
if preds.size != nrow and preds.size % nrow == 0:
preds = preds.reshape(nrow, preds.size / nrow)
return preds | Predict with data.
NOTE: This function is not thread safe.
For each booster object, predict can only be called from one thread.
If you want to run prediction using multiple thread, call bst.copy() to make copies
of model object and then call predict
Parameters
----------
data : DMatrix
The dmatrix storing the input.
output_margin : bool
Whether to output the raw untransformed margin value.
ntree_limit : int
Limit number of trees in the prediction; defaults to 0 (use all trees).
pred_leaf : bool
When this option is on, the output will be a matrix of (nsample, ntrees)
with each record indicating the predicted leaf index of each sample in each tree.
Note that the leaf index of a tree is unique per tree, so you may find leaf 1
in both tree 1 and tree 0.
Returns
-------
prediction : numpy array | Below is the the instruction that describes the task:
### Input:
Predict with data.
NOTE: This function is not thread safe.
For each booster object, predict can only be called from one thread.
If you want to run prediction using multiple thread, call bst.copy() to make copies
of model object and then call predict
Parameters
----------
data : DMatrix
The dmatrix storing the input.
output_margin : bool
Whether to output the raw untransformed margin value.
ntree_limit : int
Limit number of trees in the prediction; defaults to 0 (use all trees).
pred_leaf : bool
When this option is on, the output will be a matrix of (nsample, ntrees)
with each record indicating the predicted leaf index of each sample in each tree.
Note that the leaf index of a tree is unique per tree, so you may find leaf 1
in both tree 1 and tree 0.
Returns
-------
prediction : numpy array
### Response:
def predict(self, data, output_margin=False, ntree_limit=0, pred_leaf=False):
"""
Predict with data.
NOTE: This function is not thread safe.
For each booster object, predict can only be called from one thread.
If you want to run prediction using multiple thread, call bst.copy() to make copies
of model object and then call predict
Parameters
----------
data : DMatrix
The dmatrix storing the input.
output_margin : bool
Whether to output the raw untransformed margin value.
ntree_limit : int
Limit number of trees in the prediction; defaults to 0 (use all trees).
pred_leaf : bool
When this option is on, the output will be a matrix of (nsample, ntrees)
with each record indicating the predicted leaf index of each sample in each tree.
Note that the leaf index of a tree is unique per tree, so you may find leaf 1
in both tree 1 and tree 0.
Returns
-------
prediction : numpy array
"""
option_mask = 0x00
if output_margin:
option_mask |= 0x01
if pred_leaf:
option_mask |= 0x02
self._validate_features(data)
length = ctypes.c_ulong()
preds = ctypes.POINTER(ctypes.c_float)()
_check_call(_LIB.XGBoosterPredict(self.handle, data.handle,
option_mask, ntree_limit,
ctypes.byref(length),
ctypes.byref(preds)))
preds = ctypes2numpy(preds, length.value, np.float32)
if pred_leaf:
preds = preds.astype(np.int32)
nrow = data.num_row()
if preds.size != nrow and preds.size % nrow == 0:
preds = preds.reshape(nrow, preds.size / nrow)
return preds |
def index(self):
'''
Index all the keys added so far and make them searchable.
'''
for i, hashtable in enumerate(self.hashtables):
self.sorted_hashtables[i] = [H for H in hashtable.keys()]
self.sorted_hashtables[i].sort() | Index all the keys added so far and make them searchable. | Below is the the instruction that describes the task:
### Input:
Index all the keys added so far and make them searchable.
### Response:
def index(self):
'''
Index all the keys added so far and make them searchable.
'''
for i, hashtable in enumerate(self.hashtables):
self.sorted_hashtables[i] = [H for H in hashtable.keys()]
self.sorted_hashtables[i].sort() |
def _get_task_from_task_dir(self, job_id, user_id, task_id, task_attempt):
"""Return a Task object with this task's info."""
# We need to be very careful about how we read and interpret the contents
# of the task directory. The directory could be changing because a new
# task is being created. The directory could be changing because a task
# is ending.
#
# If the meta.yaml does not exist, the task does not yet exist.
# If the meta.yaml exists, it means the task is scheduled. It does not mean
# it is yet running.
# If the task.pid file exists, it means that the runner.sh was started.
task_dir = self._task_directory(job_id, task_id, task_attempt)
job_descriptor = self._read_task_metadata(task_dir)
if not job_descriptor:
return None
# If we read up an old task, the user-id will not be in the job_descriptor.
if not job_descriptor.job_metadata.get('user-id'):
job_descriptor.job_metadata['user-id'] = user_id
# Get the pid of the runner
pid = -1
try:
with open(os.path.join(task_dir, 'task.pid'), 'r') as f:
pid = int(f.readline().strip())
except (IOError, OSError):
pass
# Get the script contents
script = None
script_name = job_descriptor.job_metadata.get('script-name')
if script_name:
script = self._read_script(task_dir, script_name)
# Read the files written by the runner.sh.
# For new tasks, these may not have been written yet.
end_time = self._get_end_time_from_task_dir(task_dir)
last_update = self._get_last_update_time_from_task_dir(task_dir)
events = self._get_events_from_task_dir(task_dir)
status = self._get_status_from_task_dir(task_dir)
log_detail = self._get_log_detail_from_task_dir(task_dir)
# If the status file is not yet written, then mark the task as pending
if not status:
status = 'RUNNING'
log_detail = ['Pending']
return LocalTask(
task_status=status,
events=events,
log_detail=log_detail,
job_descriptor=job_descriptor,
end_time=end_time,
last_update=last_update,
pid=pid,
script=script) | Return a Task object with this task's info. | Below is the the instruction that describes the task:
### Input:
Return a Task object with this task's info.
### Response:
def _get_task_from_task_dir(self, job_id, user_id, task_id, task_attempt):
"""Return a Task object with this task's info."""
# We need to be very careful about how we read and interpret the contents
# of the task directory. The directory could be changing because a new
# task is being created. The directory could be changing because a task
# is ending.
#
# If the meta.yaml does not exist, the task does not yet exist.
# If the meta.yaml exists, it means the task is scheduled. It does not mean
# it is yet running.
# If the task.pid file exists, it means that the runner.sh was started.
task_dir = self._task_directory(job_id, task_id, task_attempt)
job_descriptor = self._read_task_metadata(task_dir)
if not job_descriptor:
return None
# If we read up an old task, the user-id will not be in the job_descriptor.
if not job_descriptor.job_metadata.get('user-id'):
job_descriptor.job_metadata['user-id'] = user_id
# Get the pid of the runner
pid = -1
try:
with open(os.path.join(task_dir, 'task.pid'), 'r') as f:
pid = int(f.readline().strip())
except (IOError, OSError):
pass
# Get the script contents
script = None
script_name = job_descriptor.job_metadata.get('script-name')
if script_name:
script = self._read_script(task_dir, script_name)
# Read the files written by the runner.sh.
# For new tasks, these may not have been written yet.
end_time = self._get_end_time_from_task_dir(task_dir)
last_update = self._get_last_update_time_from_task_dir(task_dir)
events = self._get_events_from_task_dir(task_dir)
status = self._get_status_from_task_dir(task_dir)
log_detail = self._get_log_detail_from_task_dir(task_dir)
# If the status file is not yet written, then mark the task as pending
if not status:
status = 'RUNNING'
log_detail = ['Pending']
return LocalTask(
task_status=status,
events=events,
log_detail=log_detail,
job_descriptor=job_descriptor,
end_time=end_time,
last_update=last_update,
pid=pid,
script=script) |
def buses_of_vlvl(network, voltage_level):
""" Get bus-ids of given voltage level(s).
Parameters
----------
network : :class:`pypsa.Network
Overall container of PyPSA
voltage_level: list
Returns
-------
list
List containing bus-ids.
"""
mask = network.buses.v_nom.isin(voltage_level)
df = network.buses[mask]
return df.index | Get bus-ids of given voltage level(s).
Parameters
----------
network : :class:`pypsa.Network
Overall container of PyPSA
voltage_level: list
Returns
-------
list
List containing bus-ids. | Below is the the instruction that describes the task:
### Input:
Get bus-ids of given voltage level(s).
Parameters
----------
network : :class:`pypsa.Network
Overall container of PyPSA
voltage_level: list
Returns
-------
list
List containing bus-ids.
### Response:
def buses_of_vlvl(network, voltage_level):
""" Get bus-ids of given voltage level(s).
Parameters
----------
network : :class:`pypsa.Network
Overall container of PyPSA
voltage_level: list
Returns
-------
list
List containing bus-ids.
"""
mask = network.buses.v_nom.isin(voltage_level)
df = network.buses[mask]
return df.index |
def adjust_brightness_contrast(image, brightness=0., contrast=0.):
"""
Adjust the brightness and/or contrast of an image
:param image: OpenCV BGR image
:param contrast: Float, contrast adjustment with 0 meaning no change
:param brightness: Float, brightness adjustment with 0 meaning no change
"""
beta = 0
# See the OpenCV docs for more info on the `beta` parameter to addWeighted
# https://docs.opencv.org/3.4.2/d2/de8/group__core__array.html#gafafb2513349db3bcff51f54ee5592a19
return cv2.addWeighted(image,
1 + float(contrast) / 100.,
image,
beta,
float(brightness)) | Adjust the brightness and/or contrast of an image
:param image: OpenCV BGR image
:param contrast: Float, contrast adjustment with 0 meaning no change
:param brightness: Float, brightness adjustment with 0 meaning no change | Below is the the instruction that describes the task:
### Input:
Adjust the brightness and/or contrast of an image
:param image: OpenCV BGR image
:param contrast: Float, contrast adjustment with 0 meaning no change
:param brightness: Float, brightness adjustment with 0 meaning no change
### Response:
def adjust_brightness_contrast(image, brightness=0., contrast=0.):
"""
Adjust the brightness and/or contrast of an image
:param image: OpenCV BGR image
:param contrast: Float, contrast adjustment with 0 meaning no change
:param brightness: Float, brightness adjustment with 0 meaning no change
"""
beta = 0
# See the OpenCV docs for more info on the `beta` parameter to addWeighted
# https://docs.opencv.org/3.4.2/d2/de8/group__core__array.html#gafafb2513349db3bcff51f54ee5592a19
return cv2.addWeighted(image,
1 + float(contrast) / 100.,
image,
beta,
float(brightness)) |
def get_package_status(owner, repo, identifier):
"""Get the status for a package in a repository."""
client = get_packages_api()
with catch_raise_api_exception():
data, _, headers = client.packages_status_with_http_info(
owner=owner, repo=repo, identifier=identifier
)
ratelimits.maybe_rate_limit(client, headers)
# pylint: disable=no-member
# Pylint detects the returned value as a tuple
return (
data.is_sync_completed,
data.is_sync_failed,
data.sync_progress,
data.status_str,
data.stage_str,
data.status_reason,
) | Get the status for a package in a repository. | Below is the the instruction that describes the task:
### Input:
Get the status for a package in a repository.
### Response:
def get_package_status(owner, repo, identifier):
"""Get the status for a package in a repository."""
client = get_packages_api()
with catch_raise_api_exception():
data, _, headers = client.packages_status_with_http_info(
owner=owner, repo=repo, identifier=identifier
)
ratelimits.maybe_rate_limit(client, headers)
# pylint: disable=no-member
# Pylint detects the returned value as a tuple
return (
data.is_sync_completed,
data.is_sync_failed,
data.sync_progress,
data.status_str,
data.stage_str,
data.status_reason,
) |
def __read_and_render_yaml_file(source,
template,
saltenv):
'''
Read a yaml file and, if needed, renders that using the specifieds
templating. Returns the python objects defined inside of the file.
'''
sfn = __salt__['cp.cache_file'](source, saltenv)
if not sfn:
raise CommandExecutionError(
'Source file \'{0}\' not found'.format(source))
with salt.utils.files.fopen(sfn, 'r') as src:
contents = src.read()
if template:
if template in salt.utils.templates.TEMPLATE_REGISTRY:
# TODO: should we allow user to set also `context` like # pylint: disable=fixme
# `file.managed` does?
# Apply templating
data = salt.utils.templates.TEMPLATE_REGISTRY[template](
contents,
from_str=True,
to_str=True,
saltenv=saltenv,
grains=__grains__,
pillar=__pillar__,
salt=__salt__,
opts=__opts__)
if not data['result']:
# Failed to render the template
raise CommandExecutionError(
'Failed to render file path with error: '
'{0}'.format(data['data'])
)
contents = data['data'].encode('utf-8')
else:
raise CommandExecutionError(
'Unknown template specified: {0}'.format(
template))
return salt.utils.yaml.safe_load(contents) | Read a yaml file and, if needed, renders that using the specifieds
templating. Returns the python objects defined inside of the file. | Below is the the instruction that describes the task:
### Input:
Read a yaml file and, if needed, renders that using the specifieds
templating. Returns the python objects defined inside of the file.
### Response:
def __read_and_render_yaml_file(source,
template,
saltenv):
'''
Read a yaml file and, if needed, renders that using the specifieds
templating. Returns the python objects defined inside of the file.
'''
sfn = __salt__['cp.cache_file'](source, saltenv)
if not sfn:
raise CommandExecutionError(
'Source file \'{0}\' not found'.format(source))
with salt.utils.files.fopen(sfn, 'r') as src:
contents = src.read()
if template:
if template in salt.utils.templates.TEMPLATE_REGISTRY:
# TODO: should we allow user to set also `context` like # pylint: disable=fixme
# `file.managed` does?
# Apply templating
data = salt.utils.templates.TEMPLATE_REGISTRY[template](
contents,
from_str=True,
to_str=True,
saltenv=saltenv,
grains=__grains__,
pillar=__pillar__,
salt=__salt__,
opts=__opts__)
if not data['result']:
# Failed to render the template
raise CommandExecutionError(
'Failed to render file path with error: '
'{0}'.format(data['data'])
)
contents = data['data'].encode('utf-8')
else:
raise CommandExecutionError(
'Unknown template specified: {0}'.format(
template))
return salt.utils.yaml.safe_load(contents) |
def _get_valid_endpoint(resp, name, entry_type):
"""
Parse the service catalog returned by the Identity API for an endpoint matching
the Nova service with the requested version
Sends a CRITICAL service check when no viable candidates are found in the Catalog
"""
catalog = resp.get('token', {}).get('catalog', [])
for entry in catalog:
if (
entry.get('name')
and entry.get('type')
and entry.get('name') == name
and entry.get('type') == entry_type
):
# Collect any endpoints on the public or internal interface
valid_endpoints = {}
for ep in entry.get('endpoints'):
interface = ep.get('interface', '')
if interface in ['public', 'internal']:
valid_endpoints[interface] = ep.get('url')
if valid_endpoints:
# Favor public endpoints over internal
return valid_endpoints.get('public', valid_endpoints.get('internal'))
return None | Parse the service catalog returned by the Identity API for an endpoint matching
the Nova service with the requested version
Sends a CRITICAL service check when no viable candidates are found in the Catalog | Below is the the instruction that describes the task:
### Input:
Parse the service catalog returned by the Identity API for an endpoint matching
the Nova service with the requested version
Sends a CRITICAL service check when no viable candidates are found in the Catalog
### Response:
def _get_valid_endpoint(resp, name, entry_type):
"""
Parse the service catalog returned by the Identity API for an endpoint matching
the Nova service with the requested version
Sends a CRITICAL service check when no viable candidates are found in the Catalog
"""
catalog = resp.get('token', {}).get('catalog', [])
for entry in catalog:
if (
entry.get('name')
and entry.get('type')
and entry.get('name') == name
and entry.get('type') == entry_type
):
# Collect any endpoints on the public or internal interface
valid_endpoints = {}
for ep in entry.get('endpoints'):
interface = ep.get('interface', '')
if interface in ['public', 'internal']:
valid_endpoints[interface] = ep.get('url')
if valid_endpoints:
# Favor public endpoints over internal
return valid_endpoints.get('public', valid_endpoints.get('internal'))
return None |
def signal_alias_exists(alias: str) -> bool:
"""
Checks if signal alias exists.
:param alias: Signal alias.
:return:
"""
if SignalDispatcher.signals.get(alias):
return True
return False | Checks if signal alias exists.
:param alias: Signal alias.
:return: | Below is the the instruction that describes the task:
### Input:
Checks if signal alias exists.
:param alias: Signal alias.
:return:
### Response:
def signal_alias_exists(alias: str) -> bool:
"""
Checks if signal alias exists.
:param alias: Signal alias.
:return:
"""
if SignalDispatcher.signals.get(alias):
return True
return False |
def hdfpath_to_nifti1image(file_path, h5path):
"""Returns a nibabel Nifti1Image from a HDF5 group datasets
Parameters
----------
file_path: string
HDF5 file path
h5path:
HDF5 group path in file_path
Returns
-------
nibabel Nifti1Image
"""
with h5py.File(file_path, 'r') as f:
return hdfgroup_to_nifti1image(f[h5path]) | Returns a nibabel Nifti1Image from a HDF5 group datasets
Parameters
----------
file_path: string
HDF5 file path
h5path:
HDF5 group path in file_path
Returns
-------
nibabel Nifti1Image | Below is the the instruction that describes the task:
### Input:
Returns a nibabel Nifti1Image from a HDF5 group datasets
Parameters
----------
file_path: string
HDF5 file path
h5path:
HDF5 group path in file_path
Returns
-------
nibabel Nifti1Image
### Response:
def hdfpath_to_nifti1image(file_path, h5path):
"""Returns a nibabel Nifti1Image from a HDF5 group datasets
Parameters
----------
file_path: string
HDF5 file path
h5path:
HDF5 group path in file_path
Returns
-------
nibabel Nifti1Image
"""
with h5py.File(file_path, 'r') as f:
return hdfgroup_to_nifti1image(f[h5path]) |
def main(inputstructs, inputpdbids):
"""Main function. Calls functions for processing, report generation and visualization."""
pdbid, pdbpath = None, None
# #@todo For multiprocessing, implement better stacktracing for errors
# Print title and version
title = "* Protein-Ligand Interaction Profiler v%s *" % __version__
write_message('\n' + '*' * len(title) + '\n')
write_message(title)
write_message('\n' + '*' * len(title) + '\n\n')
outputprefix = config.OUTPUTFILENAME
if inputstructs is not None: # Process PDB file(s)
num_structures = len(inputstructs)
inputstructs = remove_duplicates(inputstructs)
read_from_stdin = False
for inputstruct in inputstructs:
if inputstruct == '-':
inputstruct = sys.stdin.read()
read_from_stdin = True
if config.RAWSTRING:
if sys.version_info < (3,):
inputstruct = bytes(inputstruct).decode('unicode_escape')
else:
inputstruct = bytes(inputstruct, 'utf8').decode('unicode_escape')
else:
if os.path.getsize(inputstruct) == 0:
sysexit(2, 'Empty PDB file\n') # Exit if input file is empty
if num_structures > 1:
basename = inputstruct.split('.')[-2].split('/')[-1]
config.OUTPATH = '/'.join([config.BASEPATH, basename])
outputprefix = 'report'
process_pdb(inputstruct, config.OUTPATH, as_string=read_from_stdin, outputprefix=outputprefix)
else: # Try to fetch the current PDB structure(s) directly from the RCBS server
num_pdbids = len(inputpdbids)
inputpdbids = remove_duplicates(inputpdbids)
for inputpdbid in inputpdbids:
pdbpath, pdbid = download_structure(inputpdbid)
if num_pdbids > 1:
config.OUTPATH = '/'.join([config.BASEPATH, pdbid[1:3].upper(), pdbid.upper()])
outputprefix = 'report'
process_pdb(pdbpath, config.OUTPATH, outputprefix=outputprefix)
if (pdbid is not None or inputstructs is not None) and config.BASEPATH is not None:
if config.BASEPATH in ['.', './']:
write_message('\nFinished analysis. Find the result files in the working directory.\n\n')
else:
write_message('\nFinished analysis. Find the result files in %s\n\n' % config.BASEPATH) | Main function. Calls functions for processing, report generation and visualization. | Below is the the instruction that describes the task:
### Input:
Main function. Calls functions for processing, report generation and visualization.
### Response:
def main(inputstructs, inputpdbids):
"""Main function. Calls functions for processing, report generation and visualization."""
pdbid, pdbpath = None, None
# #@todo For multiprocessing, implement better stacktracing for errors
# Print title and version
title = "* Protein-Ligand Interaction Profiler v%s *" % __version__
write_message('\n' + '*' * len(title) + '\n')
write_message(title)
write_message('\n' + '*' * len(title) + '\n\n')
outputprefix = config.OUTPUTFILENAME
if inputstructs is not None: # Process PDB file(s)
num_structures = len(inputstructs)
inputstructs = remove_duplicates(inputstructs)
read_from_stdin = False
for inputstruct in inputstructs:
if inputstruct == '-':
inputstruct = sys.stdin.read()
read_from_stdin = True
if config.RAWSTRING:
if sys.version_info < (3,):
inputstruct = bytes(inputstruct).decode('unicode_escape')
else:
inputstruct = bytes(inputstruct, 'utf8').decode('unicode_escape')
else:
if os.path.getsize(inputstruct) == 0:
sysexit(2, 'Empty PDB file\n') # Exit if input file is empty
if num_structures > 1:
basename = inputstruct.split('.')[-2].split('/')[-1]
config.OUTPATH = '/'.join([config.BASEPATH, basename])
outputprefix = 'report'
process_pdb(inputstruct, config.OUTPATH, as_string=read_from_stdin, outputprefix=outputprefix)
else: # Try to fetch the current PDB structure(s) directly from the RCBS server
num_pdbids = len(inputpdbids)
inputpdbids = remove_duplicates(inputpdbids)
for inputpdbid in inputpdbids:
pdbpath, pdbid = download_structure(inputpdbid)
if num_pdbids > 1:
config.OUTPATH = '/'.join([config.BASEPATH, pdbid[1:3].upper(), pdbid.upper()])
outputprefix = 'report'
process_pdb(pdbpath, config.OUTPATH, outputprefix=outputprefix)
if (pdbid is not None or inputstructs is not None) and config.BASEPATH is not None:
if config.BASEPATH in ['.', './']:
write_message('\nFinished analysis. Find the result files in the working directory.\n\n')
else:
write_message('\nFinished analysis. Find the result files in %s\n\n' % config.BASEPATH) |
def get_jenkins_job_urls(
rosdistro_name, jenkins_url, release_build_name, targets):
"""
Get the Jenkins job urls for each target.
The placeholder {pkg} needs to be replaced with the ROS package name.
:return: a dict indexed by targets containing a string
"""
urls = {}
for target in targets:
view_name = get_release_view_name(
rosdistro_name, release_build_name,
target.os_name, target.os_code_name, target.arch)
base_url = jenkins_url + '/view/%s/job/%s__{pkg}__' % \
(view_name, view_name)
if target.arch == 'source':
urls[target] = base_url + '%s_%s__source' % \
(target.os_name, target.os_code_name)
else:
urls[target] = base_url + '%s_%s_%s__binary' % \
(target.os_name, target.os_code_name, target.arch)
return urls | Get the Jenkins job urls for each target.
The placeholder {pkg} needs to be replaced with the ROS package name.
:return: a dict indexed by targets containing a string | Below is the the instruction that describes the task:
### Input:
Get the Jenkins job urls for each target.
The placeholder {pkg} needs to be replaced with the ROS package name.
:return: a dict indexed by targets containing a string
### Response:
def get_jenkins_job_urls(
rosdistro_name, jenkins_url, release_build_name, targets):
"""
Get the Jenkins job urls for each target.
The placeholder {pkg} needs to be replaced with the ROS package name.
:return: a dict indexed by targets containing a string
"""
urls = {}
for target in targets:
view_name = get_release_view_name(
rosdistro_name, release_build_name,
target.os_name, target.os_code_name, target.arch)
base_url = jenkins_url + '/view/%s/job/%s__{pkg}__' % \
(view_name, view_name)
if target.arch == 'source':
urls[target] = base_url + '%s_%s__source' % \
(target.os_name, target.os_code_name)
else:
urls[target] = base_url + '%s_%s_%s__binary' % \
(target.os_name, target.os_code_name, target.arch)
return urls |
def count_id(w0):
"""
0 -> no terms idd
1 -> most term idd are shared in root morphem
2 -> most term idd are shared in flexing morphem
3 -> most term idd are shared root <-> flexing (crossed)
:param w0:
:param w1:
:return:
"""
def f(w1):
count = [set(w0.root).intersection(w1.root),
set(w0.flexing).intersection(w1.flexing),
set(w0.root).intersection(w1.flexing) | set(w1.root).intersection(w0.flexing)]
if any(count):
return max((1,2,3), key=lambda i: len(count[i - 1]))
else:
return 0
return f | 0 -> no terms idd
1 -> most term idd are shared in root morphem
2 -> most term idd are shared in flexing morphem
3 -> most term idd are shared root <-> flexing (crossed)
:param w0:
:param w1:
:return: | Below is the the instruction that describes the task:
### Input:
0 -> no terms idd
1 -> most term idd are shared in root morphem
2 -> most term idd are shared in flexing morphem
3 -> most term idd are shared root <-> flexing (crossed)
:param w0:
:param w1:
:return:
### Response:
def count_id(w0):
"""
0 -> no terms idd
1 -> most term idd are shared in root morphem
2 -> most term idd are shared in flexing morphem
3 -> most term idd are shared root <-> flexing (crossed)
:param w0:
:param w1:
:return:
"""
def f(w1):
count = [set(w0.root).intersection(w1.root),
set(w0.flexing).intersection(w1.flexing),
set(w0.root).intersection(w1.flexing) | set(w1.root).intersection(w0.flexing)]
if any(count):
return max((1,2,3), key=lambda i: len(count[i - 1]))
else:
return 0
return f |
def voicing_measures(ref_voicing, est_voicing):
"""Compute the voicing recall and false alarm rates given two voicing
indicator sequences, one as reference (truth) and the other as the estimate
(prediction). The sequences must be of the same length.
Examples
--------
>>> ref_time, ref_freq = mir_eval.io.load_time_series('ref.txt')
>>> est_time, est_freq = mir_eval.io.load_time_series('est.txt')
>>> (ref_v, ref_c,
... est_v, est_c) = mir_eval.melody.to_cent_voicing(ref_time,
... ref_freq,
... est_time,
... est_freq)
>>> recall, false_alarm = mir_eval.melody.voicing_measures(ref_v,
... est_v)
Parameters
----------
ref_voicing : np.ndarray
Reference boolean voicing array
est_voicing : np.ndarray
Estimated boolean voicing array
Returns
-------
vx_recall : float
Voicing recall rate, the fraction of voiced frames in ref
indicated as voiced in est
vx_false_alarm : float
Voicing false alarm rate, the fraction of unvoiced frames in ref
indicated as voiced in est
"""
validate_voicing(ref_voicing, est_voicing)
ref_voicing = ref_voicing.astype(bool)
est_voicing = est_voicing.astype(bool)
# When input arrays are empty, return 0 by special case
if ref_voicing.size == 0 or est_voicing.size == 0:
return 0.
# How voicing is computed
# | ref_v | !ref_v |
# -------|-------|--------|
# est_v | TP | FP |
# -------|-------|------- |
# !est_v | FN | TN |
# -------------------------
TP = (ref_voicing*est_voicing).sum()
FP = ((ref_voicing == 0)*est_voicing).sum()
FN = (ref_voicing*(est_voicing == 0)).sum()
TN = ((ref_voicing == 0)*(est_voicing == 0)).sum()
# Voicing recall = fraction of voiced frames according the reference that
# are declared as voiced by the estimate
if TP + FN == 0:
vx_recall = 0.
else:
vx_recall = TP/float(TP + FN)
# Voicing false alarm = fraction of unvoiced frames according to the
# reference that are declared as voiced by the estimate
if FP + TN == 0:
vx_false_alm = 0.
else:
vx_false_alm = FP/float(FP + TN)
return vx_recall, vx_false_alm | Compute the voicing recall and false alarm rates given two voicing
indicator sequences, one as reference (truth) and the other as the estimate
(prediction). The sequences must be of the same length.
Examples
--------
>>> ref_time, ref_freq = mir_eval.io.load_time_series('ref.txt')
>>> est_time, est_freq = mir_eval.io.load_time_series('est.txt')
>>> (ref_v, ref_c,
... est_v, est_c) = mir_eval.melody.to_cent_voicing(ref_time,
... ref_freq,
... est_time,
... est_freq)
>>> recall, false_alarm = mir_eval.melody.voicing_measures(ref_v,
... est_v)
Parameters
----------
ref_voicing : np.ndarray
Reference boolean voicing array
est_voicing : np.ndarray
Estimated boolean voicing array
Returns
-------
vx_recall : float
Voicing recall rate, the fraction of voiced frames in ref
indicated as voiced in est
vx_false_alarm : float
Voicing false alarm rate, the fraction of unvoiced frames in ref
indicated as voiced in est | Below is the the instruction that describes the task:
### Input:
Compute the voicing recall and false alarm rates given two voicing
indicator sequences, one as reference (truth) and the other as the estimate
(prediction). The sequences must be of the same length.
Examples
--------
>>> ref_time, ref_freq = mir_eval.io.load_time_series('ref.txt')
>>> est_time, est_freq = mir_eval.io.load_time_series('est.txt')
>>> (ref_v, ref_c,
... est_v, est_c) = mir_eval.melody.to_cent_voicing(ref_time,
... ref_freq,
... est_time,
... est_freq)
>>> recall, false_alarm = mir_eval.melody.voicing_measures(ref_v,
... est_v)
Parameters
----------
ref_voicing : np.ndarray
Reference boolean voicing array
est_voicing : np.ndarray
Estimated boolean voicing array
Returns
-------
vx_recall : float
Voicing recall rate, the fraction of voiced frames in ref
indicated as voiced in est
vx_false_alarm : float
Voicing false alarm rate, the fraction of unvoiced frames in ref
indicated as voiced in est
### Response:
def voicing_measures(ref_voicing, est_voicing):
"""Compute the voicing recall and false alarm rates given two voicing
indicator sequences, one as reference (truth) and the other as the estimate
(prediction). The sequences must be of the same length.
Examples
--------
>>> ref_time, ref_freq = mir_eval.io.load_time_series('ref.txt')
>>> est_time, est_freq = mir_eval.io.load_time_series('est.txt')
>>> (ref_v, ref_c,
... est_v, est_c) = mir_eval.melody.to_cent_voicing(ref_time,
... ref_freq,
... est_time,
... est_freq)
>>> recall, false_alarm = mir_eval.melody.voicing_measures(ref_v,
... est_v)
Parameters
----------
ref_voicing : np.ndarray
Reference boolean voicing array
est_voicing : np.ndarray
Estimated boolean voicing array
Returns
-------
vx_recall : float
Voicing recall rate, the fraction of voiced frames in ref
indicated as voiced in est
vx_false_alarm : float
Voicing false alarm rate, the fraction of unvoiced frames in ref
indicated as voiced in est
"""
validate_voicing(ref_voicing, est_voicing)
ref_voicing = ref_voicing.astype(bool)
est_voicing = est_voicing.astype(bool)
# When input arrays are empty, return 0 by special case
if ref_voicing.size == 0 or est_voicing.size == 0:
return 0.
# How voicing is computed
# | ref_v | !ref_v |
# -------|-------|--------|
# est_v | TP | FP |
# -------|-------|------- |
# !est_v | FN | TN |
# -------------------------
TP = (ref_voicing*est_voicing).sum()
FP = ((ref_voicing == 0)*est_voicing).sum()
FN = (ref_voicing*(est_voicing == 0)).sum()
TN = ((ref_voicing == 0)*(est_voicing == 0)).sum()
# Voicing recall = fraction of voiced frames according the reference that
# are declared as voiced by the estimate
if TP + FN == 0:
vx_recall = 0.
else:
vx_recall = TP/float(TP + FN)
# Voicing false alarm = fraction of unvoiced frames according to the
# reference that are declared as voiced by the estimate
if FP + TN == 0:
vx_false_alm = 0.
else:
vx_false_alm = FP/float(FP + TN)
return vx_recall, vx_false_alm |
def find_or_create_by_name(self, item_name, items_list, item_type):
"""
See if item with item_name exists in item_list.
If not, create that item.
Either way, return an item of type item_type.
"""
item = self.find_by_name(item_name, items_list)
if not item:
item = self.data_lists[item_type][2](item_name, None)
return item | See if item with item_name exists in item_list.
If not, create that item.
Either way, return an item of type item_type. | Below is the the instruction that describes the task:
### Input:
See if item with item_name exists in item_list.
If not, create that item.
Either way, return an item of type item_type.
### Response:
def find_or_create_by_name(self, item_name, items_list, item_type):
"""
See if item with item_name exists in item_list.
If not, create that item.
Either way, return an item of type item_type.
"""
item = self.find_by_name(item_name, items_list)
if not item:
item = self.data_lists[item_type][2](item_name, None)
return item |
def _readintbe(self, length, start):
"""Read bits and interpret as a big-endian signed int."""
if length % 8:
raise InterpretError("Big-endian integers must be whole-byte. "
"Length = {0} bits.", length)
return self._readint(length, start) | Read bits and interpret as a big-endian signed int. | Below is the the instruction that describes the task:
### Input:
Read bits and interpret as a big-endian signed int.
### Response:
def _readintbe(self, length, start):
"""Read bits and interpret as a big-endian signed int."""
if length % 8:
raise InterpretError("Big-endian integers must be whole-byte. "
"Length = {0} bits.", length)
return self._readint(length, start) |
def create_nio(self, node, nio_settings):
"""
Creates a new NIO.
:param node: Dynamips node instance
:param nio_settings: information to create the NIO
:returns: a NIO object
"""
nio = None
if nio_settings["type"] == "nio_udp":
lport = nio_settings["lport"]
rhost = nio_settings["rhost"]
rport = nio_settings["rport"]
try:
info = socket.getaddrinfo(rhost, rport, socket.AF_UNSPEC, socket.SOCK_DGRAM, 0, socket.AI_PASSIVE)
if not info:
raise DynamipsError("getaddrinfo returns an empty list on {}:{}".format(rhost, rport))
for res in info:
af, socktype, proto, _, sa = res
with socket.socket(af, socktype, proto) as sock:
sock.connect(sa)
except OSError as e:
raise DynamipsError("Could not create an UDP connection to {}:{}: {}".format(rhost, rport, e))
nio = NIOUDP(node, lport, rhost, rport, nio_settings.get("filters", {}))
elif nio_settings["type"] == "nio_generic_ethernet":
ethernet_device = nio_settings["ethernet_device"]
if sys.platform.startswith("win"):
# replace the interface name by the GUID on Windows
windows_interfaces = interfaces()
npf_interface = None
for interface in windows_interfaces:
if interface["name"] == ethernet_device:
npf_interface = interface["id"]
if not npf_interface:
raise DynamipsError("Could not find interface {} on this host".format(ethernet_device))
else:
ethernet_device = npf_interface
if not is_interface_up(ethernet_device):
raise aiohttp.web.HTTPConflict(text="Ethernet interface {} is down".format(ethernet_device))
nio = NIOGenericEthernet(node.hypervisor, ethernet_device)
elif nio_settings["type"] == "nio_linux_ethernet":
if sys.platform.startswith("win"):
raise DynamipsError("This NIO type is not supported on Windows")
ethernet_device = nio_settings["ethernet_device"]
nio = NIOLinuxEthernet(node.hypervisor, ethernet_device)
elif nio_settings["type"] == "nio_tap":
tap_device = nio_settings["tap_device"]
nio = NIOTAP(node.hypervisor, tap_device)
if not is_interface_up(tap_device):
# test after the TAP interface has been created (if it doesn't exist yet)
raise aiohttp.web.HTTPConflict(text="TAP interface {} is down".format(tap_device))
elif nio_settings["type"] == "nio_unix":
local_file = nio_settings["local_file"]
remote_file = nio_settings["remote_file"]
nio = NIOUNIX(node.hypervisor, local_file, remote_file)
elif nio_settings["type"] == "nio_vde":
control_file = nio_settings["control_file"]
local_file = nio_settings["local_file"]
nio = NIOVDE(node.hypervisor, control_file, local_file)
elif nio_settings["type"] == "nio_null":
nio = NIONull(node.hypervisor)
else:
raise aiohttp.web.HTTPConflict(text="NIO of type {} is not supported".format(nio_settings["type"]))
yield from nio.create()
return nio | Creates a new NIO.
:param node: Dynamips node instance
:param nio_settings: information to create the NIO
:returns: a NIO object | Below is the the instruction that describes the task:
### Input:
Creates a new NIO.
:param node: Dynamips node instance
:param nio_settings: information to create the NIO
:returns: a NIO object
### Response:
def create_nio(self, node, nio_settings):
"""
Creates a new NIO.
:param node: Dynamips node instance
:param nio_settings: information to create the NIO
:returns: a NIO object
"""
nio = None
if nio_settings["type"] == "nio_udp":
lport = nio_settings["lport"]
rhost = nio_settings["rhost"]
rport = nio_settings["rport"]
try:
info = socket.getaddrinfo(rhost, rport, socket.AF_UNSPEC, socket.SOCK_DGRAM, 0, socket.AI_PASSIVE)
if not info:
raise DynamipsError("getaddrinfo returns an empty list on {}:{}".format(rhost, rport))
for res in info:
af, socktype, proto, _, sa = res
with socket.socket(af, socktype, proto) as sock:
sock.connect(sa)
except OSError as e:
raise DynamipsError("Could not create an UDP connection to {}:{}: {}".format(rhost, rport, e))
nio = NIOUDP(node, lport, rhost, rport, nio_settings.get("filters", {}))
elif nio_settings["type"] == "nio_generic_ethernet":
ethernet_device = nio_settings["ethernet_device"]
if sys.platform.startswith("win"):
# replace the interface name by the GUID on Windows
windows_interfaces = interfaces()
npf_interface = None
for interface in windows_interfaces:
if interface["name"] == ethernet_device:
npf_interface = interface["id"]
if not npf_interface:
raise DynamipsError("Could not find interface {} on this host".format(ethernet_device))
else:
ethernet_device = npf_interface
if not is_interface_up(ethernet_device):
raise aiohttp.web.HTTPConflict(text="Ethernet interface {} is down".format(ethernet_device))
nio = NIOGenericEthernet(node.hypervisor, ethernet_device)
elif nio_settings["type"] == "nio_linux_ethernet":
if sys.platform.startswith("win"):
raise DynamipsError("This NIO type is not supported on Windows")
ethernet_device = nio_settings["ethernet_device"]
nio = NIOLinuxEthernet(node.hypervisor, ethernet_device)
elif nio_settings["type"] == "nio_tap":
tap_device = nio_settings["tap_device"]
nio = NIOTAP(node.hypervisor, tap_device)
if not is_interface_up(tap_device):
# test after the TAP interface has been created (if it doesn't exist yet)
raise aiohttp.web.HTTPConflict(text="TAP interface {} is down".format(tap_device))
elif nio_settings["type"] == "nio_unix":
local_file = nio_settings["local_file"]
remote_file = nio_settings["remote_file"]
nio = NIOUNIX(node.hypervisor, local_file, remote_file)
elif nio_settings["type"] == "nio_vde":
control_file = nio_settings["control_file"]
local_file = nio_settings["local_file"]
nio = NIOVDE(node.hypervisor, control_file, local_file)
elif nio_settings["type"] == "nio_null":
nio = NIONull(node.hypervisor)
else:
raise aiohttp.web.HTTPConflict(text="NIO of type {} is not supported".format(nio_settings["type"]))
yield from nio.create()
return nio |
def from_content(cls, content):
"""Parses a Tibia.com response into a House object.
Parameters
----------
content: :class:`str`
HTML content of the page.
Returns
-------
:class:`House`
The house contained in the page, or None if the house doesn't exist.
Raises
------
InvalidContent
If the content is not the house section on Tibia.com
"""
parsed_content = parse_tibiacom_content(content)
image_column, desc_column, *_ = parsed_content.find_all('td')
if "Error" in image_column.text:
return None
image = image_column.find('img')
for br in desc_column.find_all("br"):
br.replace_with("\n")
description = desc_column.text.replace("\u00a0", " ").replace("\n\n","\n")
lines = description.splitlines()
try:
name, beds, info, state, *_ = lines
except ValueError:
raise InvalidContent("content does is not from the house section of Tibia.com")
house = cls(name.strip())
house.image_url = image["src"]
house.id = int(id_regex.search(house.image_url).group(1))
m = bed_regex.search(beds)
if m:
house.type = HouseType.GUILDHALL if m.group("type") in ["guildhall", "clanhall"] else HouseType.HOUSE
beds_word = m.group("beds")
if beds_word == "no":
house.beds = 0
else:
house.beds = parse_number_words(beds_word)
m = info_regex.search(info)
if m:
house.world = m.group("world")
house.rent = int(m.group("rent"))
house.size = int(m.group("size"))
house._parse_status(state)
return house | Parses a Tibia.com response into a House object.
Parameters
----------
content: :class:`str`
HTML content of the page.
Returns
-------
:class:`House`
The house contained in the page, or None if the house doesn't exist.
Raises
------
InvalidContent
If the content is not the house section on Tibia.com | Below is the the instruction that describes the task:
### Input:
Parses a Tibia.com response into a House object.
Parameters
----------
content: :class:`str`
HTML content of the page.
Returns
-------
:class:`House`
The house contained in the page, or None if the house doesn't exist.
Raises
------
InvalidContent
If the content is not the house section on Tibia.com
### Response:
def from_content(cls, content):
"""Parses a Tibia.com response into a House object.
Parameters
----------
content: :class:`str`
HTML content of the page.
Returns
-------
:class:`House`
The house contained in the page, or None if the house doesn't exist.
Raises
------
InvalidContent
If the content is not the house section on Tibia.com
"""
parsed_content = parse_tibiacom_content(content)
image_column, desc_column, *_ = parsed_content.find_all('td')
if "Error" in image_column.text:
return None
image = image_column.find('img')
for br in desc_column.find_all("br"):
br.replace_with("\n")
description = desc_column.text.replace("\u00a0", " ").replace("\n\n","\n")
lines = description.splitlines()
try:
name, beds, info, state, *_ = lines
except ValueError:
raise InvalidContent("content does is not from the house section of Tibia.com")
house = cls(name.strip())
house.image_url = image["src"]
house.id = int(id_regex.search(house.image_url).group(1))
m = bed_regex.search(beds)
if m:
house.type = HouseType.GUILDHALL if m.group("type") in ["guildhall", "clanhall"] else HouseType.HOUSE
beds_word = m.group("beds")
if beds_word == "no":
house.beds = 0
else:
house.beds = parse_number_words(beds_word)
m = info_regex.search(info)
if m:
house.world = m.group("world")
house.rent = int(m.group("rent"))
house.size = int(m.group("size"))
house._parse_status(state)
return house |
def path(self, target, args, kw):
"""Build a URL path fragment for a resource or route.
Possible values for `target`:
A string that does not start with a '.' and does not contain ':'.
: Looks up the route of the same name on this mapper and returns it's
path.
A string of the form 'a:b', 'a:b:c', etc.
: Follows the route to nested mappers by splitting off consecutive
segments. Returns the path of the route found by looking up the
final segment on the last mapper.
A `Route` object
: Returns the path for the route.
A resource that was added previously
: Looks up the first route that points to this resource and
returns its path.
"""
if type(target) in string_types:
if ':' in target:
# Build path a nested route name
prefix, rest = target.split(':', 1)
route = self.named_routes[prefix]
prefix_params = route._pop_params(args, kw)
prefix_path = route.path([], prefix_params)
next_mapper = route.resource
return prefix_path + next_mapper.path(rest, args, kw)
else:
# Build path for a named route
return self.named_routes[target].path(args, kw)
elif isinstance(target, Route):
# Build path for a route instance, used by build_url('.')
for route in self.routes:
if route is target:
return route.path(args, kw)
raise InvalidArgumentError("Route '%s' not found in this %s object." % (target, self.__class__.__name__))
else:
# Build path for resource by object id
target_id = id(target)
if target_id in self._lookup:
return self._lookup[target_id].path(args, kw)
raise InvalidArgumentError("No Route found for target '%s' in this %s object." % (target, self.__class__.__name__)) | Build a URL path fragment for a resource or route.
Possible values for `target`:
A string that does not start with a '.' and does not contain ':'.
: Looks up the route of the same name on this mapper and returns it's
path.
A string of the form 'a:b', 'a:b:c', etc.
: Follows the route to nested mappers by splitting off consecutive
segments. Returns the path of the route found by looking up the
final segment on the last mapper.
A `Route` object
: Returns the path for the route.
A resource that was added previously
: Looks up the first route that points to this resource and
returns its path. | Below is the the instruction that describes the task:
### Input:
Build a URL path fragment for a resource or route.
Possible values for `target`:
A string that does not start with a '.' and does not contain ':'.
: Looks up the route of the same name on this mapper and returns it's
path.
A string of the form 'a:b', 'a:b:c', etc.
: Follows the route to nested mappers by splitting off consecutive
segments. Returns the path of the route found by looking up the
final segment on the last mapper.
A `Route` object
: Returns the path for the route.
A resource that was added previously
: Looks up the first route that points to this resource and
returns its path.
### Response:
def path(self, target, args, kw):
"""Build a URL path fragment for a resource or route.
Possible values for `target`:
A string that does not start with a '.' and does not contain ':'.
: Looks up the route of the same name on this mapper and returns it's
path.
A string of the form 'a:b', 'a:b:c', etc.
: Follows the route to nested mappers by splitting off consecutive
segments. Returns the path of the route found by looking up the
final segment on the last mapper.
A `Route` object
: Returns the path for the route.
A resource that was added previously
: Looks up the first route that points to this resource and
returns its path.
"""
if type(target) in string_types:
if ':' in target:
# Build path a nested route name
prefix, rest = target.split(':', 1)
route = self.named_routes[prefix]
prefix_params = route._pop_params(args, kw)
prefix_path = route.path([], prefix_params)
next_mapper = route.resource
return prefix_path + next_mapper.path(rest, args, kw)
else:
# Build path for a named route
return self.named_routes[target].path(args, kw)
elif isinstance(target, Route):
# Build path for a route instance, used by build_url('.')
for route in self.routes:
if route is target:
return route.path(args, kw)
raise InvalidArgumentError("Route '%s' not found in this %s object." % (target, self.__class__.__name__))
else:
# Build path for resource by object id
target_id = id(target)
if target_id in self._lookup:
return self._lookup[target_id].path(args, kw)
raise InvalidArgumentError("No Route found for target '%s' in this %s object." % (target, self.__class__.__name__)) |
def __we_c(cls, calib, tc, temp, we_v):
"""
Compute weC from sensor temperature compensation of weV
"""
offset_v = calib.pid_elc_mv / 1000.0
response_v = we_v - offset_v # remove electronic zero
response_c = tc.correct(temp, response_v) # correct the response component
if response_c is None:
return None
we_c = response_c + offset_v # replace electronic zero
return we_c | Compute weC from sensor temperature compensation of weV | Below is the the instruction that describes the task:
### Input:
Compute weC from sensor temperature compensation of weV
### Response:
def __we_c(cls, calib, tc, temp, we_v):
"""
Compute weC from sensor temperature compensation of weV
"""
offset_v = calib.pid_elc_mv / 1000.0
response_v = we_v - offset_v # remove electronic zero
response_c = tc.correct(temp, response_v) # correct the response component
if response_c is None:
return None
we_c = response_c + offset_v # replace electronic zero
return we_c |
def middle(self):
""" Returns the middle point of the bounding box
:return: middle point
:rtype: (float, float)
"""
return (self.min_x + self.max_x) / 2, (self.min_y + self.max_y) / 2 | Returns the middle point of the bounding box
:return: middle point
:rtype: (float, float) | Below is the the instruction that describes the task:
### Input:
Returns the middle point of the bounding box
:return: middle point
:rtype: (float, float)
### Response:
def middle(self):
""" Returns the middle point of the bounding box
:return: middle point
:rtype: (float, float)
"""
return (self.min_x + self.max_x) / 2, (self.min_y + self.max_y) / 2 |
def add_ip(self,oid,value,label=None):
"""Short helper to add an IP address value to the MIB subtree."""
self.add_oid_entry(oid,'IPADDRESS',value,label=label) | Short helper to add an IP address value to the MIB subtree. | Below is the the instruction that describes the task:
### Input:
Short helper to add an IP address value to the MIB subtree.
### Response:
def add_ip(self,oid,value,label=None):
"""Short helper to add an IP address value to the MIB subtree."""
self.add_oid_entry(oid,'IPADDRESS',value,label=label) |
def __validate_and_fix_spark_args(spark_args):
"""
Prepares spark arguments. In the command-line script, they are passed as for example
`-s master=local[4] deploy-mode=client verbose`, which would be passed to spark-submit as
`--master local[4] --deploy-mode client --verbose`
Parameters
----------
spark_args (List): List of spark arguments
Returns
-------
fixed_args (List): List of fixed and validated spark arguments
"""
pattern = re.compile(r'[\w\-_]+=.+')
fixed_args = []
for arg in spark_args:
if arg not in SPARK_SUBMIT_FLAGS:
if not pattern.match(arg):
raise SystemExit('Spark argument `%s` does not seem to be in the correct format '
'`ARG_NAME=ARG_VAL`, and is also not recognized to be one of the'
'valid spark-submit flags (%s).' % (arg, str(SPARK_SUBMIT_FLAGS)))
eq_pos = arg.find('=')
fixed_args.append('--' + arg[:eq_pos])
fixed_args.append(arg[eq_pos + 1:])
else:
fixed_args.append('--' + arg)
return fixed_args | Prepares spark arguments. In the command-line script, they are passed as for example
`-s master=local[4] deploy-mode=client verbose`, which would be passed to spark-submit as
`--master local[4] --deploy-mode client --verbose`
Parameters
----------
spark_args (List): List of spark arguments
Returns
-------
fixed_args (List): List of fixed and validated spark arguments | Below is the the instruction that describes the task:
### Input:
Prepares spark arguments. In the command-line script, they are passed as for example
`-s master=local[4] deploy-mode=client verbose`, which would be passed to spark-submit as
`--master local[4] --deploy-mode client --verbose`
Parameters
----------
spark_args (List): List of spark arguments
Returns
-------
fixed_args (List): List of fixed and validated spark arguments
### Response:
def __validate_and_fix_spark_args(spark_args):
"""
Prepares spark arguments. In the command-line script, they are passed as for example
`-s master=local[4] deploy-mode=client verbose`, which would be passed to spark-submit as
`--master local[4] --deploy-mode client --verbose`
Parameters
----------
spark_args (List): List of spark arguments
Returns
-------
fixed_args (List): List of fixed and validated spark arguments
"""
pattern = re.compile(r'[\w\-_]+=.+')
fixed_args = []
for arg in spark_args:
if arg not in SPARK_SUBMIT_FLAGS:
if not pattern.match(arg):
raise SystemExit('Spark argument `%s` does not seem to be in the correct format '
'`ARG_NAME=ARG_VAL`, and is also not recognized to be one of the'
'valid spark-submit flags (%s).' % (arg, str(SPARK_SUBMIT_FLAGS)))
eq_pos = arg.find('=')
fixed_args.append('--' + arg[:eq_pos])
fixed_args.append(arg[eq_pos + 1:])
else:
fixed_args.append('--' + arg)
return fixed_args |
def match_agent_id(self, agent_id, match):
"""Matches the agent identified by the given ``Id``.
arg: agent_id (osid.id.Id): the Id of the ``Agent``
arg: match (boolean): ``true`` if a positive match, ``false``
for a negative match
raise: NullArgument - ``agent_id`` is ``null``
*compliance: mandatory -- This method must be implemented.*
"""
self._add_match('agentId', str(agent_id), bool(match)) | Matches the agent identified by the given ``Id``.
arg: agent_id (osid.id.Id): the Id of the ``Agent``
arg: match (boolean): ``true`` if a positive match, ``false``
for a negative match
raise: NullArgument - ``agent_id`` is ``null``
*compliance: mandatory -- This method must be implemented.* | Below is the the instruction that describes the task:
### Input:
Matches the agent identified by the given ``Id``.
arg: agent_id (osid.id.Id): the Id of the ``Agent``
arg: match (boolean): ``true`` if a positive match, ``false``
for a negative match
raise: NullArgument - ``agent_id`` is ``null``
*compliance: mandatory -- This method must be implemented.*
### Response:
def match_agent_id(self, agent_id, match):
"""Matches the agent identified by the given ``Id``.
arg: agent_id (osid.id.Id): the Id of the ``Agent``
arg: match (boolean): ``true`` if a positive match, ``false``
for a negative match
raise: NullArgument - ``agent_id`` is ``null``
*compliance: mandatory -- This method must be implemented.*
"""
self._add_match('agentId', str(agent_id), bool(match)) |
def checktype_seq(self, seq, kind, *, unique=False, **kargs):
"""Raise TypeError if seq is not a sequence of elements satisfying
kind. Optionally require elements to be unique.
As a special case, a string is considered to be an atomic value
rather than a sequence of single-character strings. (Thus,
checktype_seq('foo', str) will fail.)
"""
exp = self.str_kind(kind)
# Make sure we have a sequence.
try:
iterator = iter(seq)
# Generators aren't sequences. This avoids a confusing bug
# where we consume a generator by type-checking it, and leave
# only an exhausted iterator for the user code.
len(seq)
except TypeError:
got = self.str_valtype(seq)
raise TypeError('Expected sequence of {}; '
'got {} instead of sequence'.format(exp, got))
if isinstance(seq, str):
raise TypeError('Expected sequence of {}; got single str '
'(strings do not count as character '
'sequences)'.format(exp))
for i, item in enumerate(iterator):
# Depend on checktype() to check individual elements,
# but generate an error message that includes the position
# of the failure.
try:
self.checktype(item, kind, **kargs)
except TypeError:
got = self.str_valtype(item)
raise TypeError('Expected sequence of {}; '
'got sequence with {} at position {}'.format(
exp, got, i)) from None
if unique:
seen = []
for i, item in enumerate(seq):
if item in seen:
raise TypeError('Duplicate element {} at '
'position {}'.format(repr(item), i))
seen.append(item) | Raise TypeError if seq is not a sequence of elements satisfying
kind. Optionally require elements to be unique.
As a special case, a string is considered to be an atomic value
rather than a sequence of single-character strings. (Thus,
checktype_seq('foo', str) will fail.) | Below is the the instruction that describes the task:
### Input:
Raise TypeError if seq is not a sequence of elements satisfying
kind. Optionally require elements to be unique.
As a special case, a string is considered to be an atomic value
rather than a sequence of single-character strings. (Thus,
checktype_seq('foo', str) will fail.)
### Response:
def checktype_seq(self, seq, kind, *, unique=False, **kargs):
"""Raise TypeError if seq is not a sequence of elements satisfying
kind. Optionally require elements to be unique.
As a special case, a string is considered to be an atomic value
rather than a sequence of single-character strings. (Thus,
checktype_seq('foo', str) will fail.)
"""
exp = self.str_kind(kind)
# Make sure we have a sequence.
try:
iterator = iter(seq)
# Generators aren't sequences. This avoids a confusing bug
# where we consume a generator by type-checking it, and leave
# only an exhausted iterator for the user code.
len(seq)
except TypeError:
got = self.str_valtype(seq)
raise TypeError('Expected sequence of {}; '
'got {} instead of sequence'.format(exp, got))
if isinstance(seq, str):
raise TypeError('Expected sequence of {}; got single str '
'(strings do not count as character '
'sequences)'.format(exp))
for i, item in enumerate(iterator):
# Depend on checktype() to check individual elements,
# but generate an error message that includes the position
# of the failure.
try:
self.checktype(item, kind, **kargs)
except TypeError:
got = self.str_valtype(item)
raise TypeError('Expected sequence of {}; '
'got sequence with {} at position {}'.format(
exp, got, i)) from None
if unique:
seen = []
for i, item in enumerate(seq):
if item in seen:
raise TypeError('Duplicate element {} at '
'position {}'.format(repr(item), i))
seen.append(item) |
def transform_data(from_client, from_project, from_logstore, from_time,
to_time=None,
to_client=None, to_project=None, to_logstore=None,
shard_list=None,
config=None,
batch_size=None, compress=None,
cg_name=None, c_name=None,
cg_heartbeat_interval=None, cg_data_fetch_interval=None, cg_in_order=None,
cg_worker_pool_size=None
):
"""
transform data from one logstore to another one (could be the same or in different region), the time is log received time on server side.
"""
if not config:
logger.info("transform_data: config is not configured, use copy data by default.")
return copy_data(from_client, from_project, from_logstore, from_time, to_time=to_time,
to_client=to_client, to_project=to_project, to_logstore=to_logstore,
shard_list=shard_list,
batch_size=batch_size, compress=compress)
to_client = to_client or from_client
# increase the timeout to 2 min at least
from_client.timeout = max(from_client.timeout, 120)
to_client.timeout = max(to_client.timeout, 120)
to_project = to_project or from_project
to_logstore = to_logstore or from_logstore
if not cg_name:
# batch mode
to_time = to_time or "end"
cpu_count = multiprocessing.cpu_count() * 2
shards = from_client.list_shards(from_project, from_logstore).get_shards_info()
current_shards = [str(shard['shardID']) for shard in shards]
target_shards = _parse_shard_list(shard_list, current_shards)
worker_size = min(cpu_count, len(target_shards))
result = dict()
total_count = 0
total_removed = 0
with ProcessPoolExecutor(max_workers=worker_size) as pool:
futures = [pool.submit(transform_worker, from_client, from_project, from_logstore, shard,
from_time, to_time, config,
to_client, to_project, to_logstore,
batch_size=batch_size, compress=compress)
for shard in target_shards]
for future in as_completed(futures):
if future.exception():
logger.error("get error when transforming data: {0}".format(future.exception()))
else:
partition, count, removed, processed, failed = future.result()
total_count += count
total_removed += removed
if count:
result[partition] = {"total_count": count, "transformed":
processed, "removed": removed, "failed": failed}
return LogResponse({}, {"total_count": total_count, "shards": result})
else:
# consumer group mode
c_name = c_name or "transform_data_{0}".format(multiprocessing.current_process().pid)
cg_heartbeat_interval = cg_heartbeat_interval or 20
cg_data_fetch_interval = cg_data_fetch_interval or 2
cg_in_order = False if cg_in_order is None else cg_in_order
cg_worker_pool_size = cg_worker_pool_size or 3
option = LogHubConfig(from_client._endpoint, from_client._accessKeyId, from_client._accessKey,
from_project, from_logstore, cg_name,
c_name, cursor_position=CursorPosition.SPECIAL_TIMER_CURSOR,
cursor_start_time=from_time,
cursor_end_time=to_time,
heartbeat_interval=cg_heartbeat_interval, data_fetch_interval=cg_data_fetch_interval,
in_order=cg_in_order,
worker_pool_size=cg_worker_pool_size)
TransformDataConsumer.set_transform_options(config, to_client, to_project, to_logstore)
result = {"total_count": 0, "shards": {}}
l = RLock()
def status_updator(shard_id, count=0, removed=0, processed=0, failed=0):
logger.info("status update is called, shard: {0}, count: {1}, removed: {2}, processed: {3}, failed: {4}".format(shard_id, count, removed, processed, failed))
with l:
result["total_count"] += count
if shard_id in result["shards"]:
data = result["shards"][shard_id]
result["shards"][shard_id] = {"total_count": data["total_count"] + count, "transformed": data["transformed"] + processed, "removed": data["removed"] + removed, "failed": data["failed"] + failed}
else:
result["shards"][shard_id] = {"total_count": count, "transformed": processed, "removed": removed, "failed": failed}
worker = ConsumerWorker(TransformDataConsumer, consumer_option=option, args=(status_updator, ) )
worker.start()
try:
while worker.is_alive():
worker.join(timeout=60)
logger.info("transform_data: worker exit unexpected, try to shutdown it")
worker.shutdown()
except KeyboardInterrupt:
logger.info("transform_data: *** try to exit **** ")
print("try to stop transforming data.")
worker.shutdown()
worker.join(timeout=120)
return LogResponse({}, result) | transform data from one logstore to another one (could be the same or in different region), the time is log received time on server side. | Below is the the instruction that describes the task:
### Input:
transform data from one logstore to another one (could be the same or in different region), the time is log received time on server side.
### Response:
def transform_data(from_client, from_project, from_logstore, from_time,
to_time=None,
to_client=None, to_project=None, to_logstore=None,
shard_list=None,
config=None,
batch_size=None, compress=None,
cg_name=None, c_name=None,
cg_heartbeat_interval=None, cg_data_fetch_interval=None, cg_in_order=None,
cg_worker_pool_size=None
):
"""
transform data from one logstore to another one (could be the same or in different region), the time is log received time on server side.
"""
if not config:
logger.info("transform_data: config is not configured, use copy data by default.")
return copy_data(from_client, from_project, from_logstore, from_time, to_time=to_time,
to_client=to_client, to_project=to_project, to_logstore=to_logstore,
shard_list=shard_list,
batch_size=batch_size, compress=compress)
to_client = to_client or from_client
# increase the timeout to 2 min at least
from_client.timeout = max(from_client.timeout, 120)
to_client.timeout = max(to_client.timeout, 120)
to_project = to_project or from_project
to_logstore = to_logstore or from_logstore
if not cg_name:
# batch mode
to_time = to_time or "end"
cpu_count = multiprocessing.cpu_count() * 2
shards = from_client.list_shards(from_project, from_logstore).get_shards_info()
current_shards = [str(shard['shardID']) for shard in shards]
target_shards = _parse_shard_list(shard_list, current_shards)
worker_size = min(cpu_count, len(target_shards))
result = dict()
total_count = 0
total_removed = 0
with ProcessPoolExecutor(max_workers=worker_size) as pool:
futures = [pool.submit(transform_worker, from_client, from_project, from_logstore, shard,
from_time, to_time, config,
to_client, to_project, to_logstore,
batch_size=batch_size, compress=compress)
for shard in target_shards]
for future in as_completed(futures):
if future.exception():
logger.error("get error when transforming data: {0}".format(future.exception()))
else:
partition, count, removed, processed, failed = future.result()
total_count += count
total_removed += removed
if count:
result[partition] = {"total_count": count, "transformed":
processed, "removed": removed, "failed": failed}
return LogResponse({}, {"total_count": total_count, "shards": result})
else:
# consumer group mode
c_name = c_name or "transform_data_{0}".format(multiprocessing.current_process().pid)
cg_heartbeat_interval = cg_heartbeat_interval or 20
cg_data_fetch_interval = cg_data_fetch_interval or 2
cg_in_order = False if cg_in_order is None else cg_in_order
cg_worker_pool_size = cg_worker_pool_size or 3
option = LogHubConfig(from_client._endpoint, from_client._accessKeyId, from_client._accessKey,
from_project, from_logstore, cg_name,
c_name, cursor_position=CursorPosition.SPECIAL_TIMER_CURSOR,
cursor_start_time=from_time,
cursor_end_time=to_time,
heartbeat_interval=cg_heartbeat_interval, data_fetch_interval=cg_data_fetch_interval,
in_order=cg_in_order,
worker_pool_size=cg_worker_pool_size)
TransformDataConsumer.set_transform_options(config, to_client, to_project, to_logstore)
result = {"total_count": 0, "shards": {}}
l = RLock()
def status_updator(shard_id, count=0, removed=0, processed=0, failed=0):
logger.info("status update is called, shard: {0}, count: {1}, removed: {2}, processed: {3}, failed: {4}".format(shard_id, count, removed, processed, failed))
with l:
result["total_count"] += count
if shard_id in result["shards"]:
data = result["shards"][shard_id]
result["shards"][shard_id] = {"total_count": data["total_count"] + count, "transformed": data["transformed"] + processed, "removed": data["removed"] + removed, "failed": data["failed"] + failed}
else:
result["shards"][shard_id] = {"total_count": count, "transformed": processed, "removed": removed, "failed": failed}
worker = ConsumerWorker(TransformDataConsumer, consumer_option=option, args=(status_updator, ) )
worker.start()
try:
while worker.is_alive():
worker.join(timeout=60)
logger.info("transform_data: worker exit unexpected, try to shutdown it")
worker.shutdown()
except KeyboardInterrupt:
logger.info("transform_data: *** try to exit **** ")
print("try to stop transforming data.")
worker.shutdown()
worker.join(timeout=120)
return LogResponse({}, result) |
def find_video_by_id(self, video_id):
"""doc: http://open.youku.com/docs/doc?id=44
"""
url = 'https://openapi.youku.com/v2/videos/show_basic.json'
params = {
'client_id': self.client_id,
'video_id': video_id
}
r = requests.get(url, params=params)
check_error(r)
return r.json() | doc: http://open.youku.com/docs/doc?id=44 | Below is the the instruction that describes the task:
### Input:
doc: http://open.youku.com/docs/doc?id=44
### Response:
def find_video_by_id(self, video_id):
"""doc: http://open.youku.com/docs/doc?id=44
"""
url = 'https://openapi.youku.com/v2/videos/show_basic.json'
params = {
'client_id': self.client_id,
'video_id': video_id
}
r = requests.get(url, params=params)
check_error(r)
return r.json() |
def backward_committor(T, A, B):
r"""Backward committor between given sets.
The backward committor u(x) between sets A and B is the
probability for the chain starting in x to have come from A last
rather than from B.
Parameters
----------
T : (M, M) ndarray
Transition matrix
A : array_like
List of integer state labels for set A
B : array_like
List of integer state labels for set B
Returns
-------
u : (M, ) ndarray
Vector of forward committor probabilities
Notes
-----
The forward committor is a solution to the following
boundary-value problem
.. math::
\sum_j K_{ij} \pi_{j} u_{j}=0 for i in X\(A u B) (I)
u_{i}=1 for i \in A (II)
u_{i}=0 for i \in B (III)
with adjoint of the generator matrix K=(D_pi(P-I))'.
"""
X = set(range(T.shape[0]))
A = set(A)
B = set(B)
AB = A.intersection(B)
notAB = X.difference(A).difference(B)
if len(AB) > 0:
raise ValueError("Sets A and B have to be disjoint")
pi = stationary_distribution(T)
L = T - eye(T.shape[0], T.shape[0])
D = diags([pi, ], [0, ])
K = (D.dot(L)).T
"""Assemble left-hand side W for linear system"""
"""Equation (I)"""
W = 1.0 * K
"""Equation (II)"""
W = W.todok()
W[list(A), :] = 0.0
W.tocsr()
W = W + coo_matrix((np.ones(len(A)), (list(A), list(A))), shape=W.shape).tocsr()
"""Equation (III)"""
W = W.todok()
W[list(B), :] = 0.0
W.tocsr()
W = W + coo_matrix((np.ones(len(B)), (list(B), list(B))), shape=W.shape).tocsr()
"""Assemble right-hand side r for linear system"""
"""Equation (I)+(III)"""
r = np.zeros(T.shape[0])
"""Equation (II)"""
r[list(A)] = 1.0
u = spsolve(W, r)
return u | r"""Backward committor between given sets.
The backward committor u(x) between sets A and B is the
probability for the chain starting in x to have come from A last
rather than from B.
Parameters
----------
T : (M, M) ndarray
Transition matrix
A : array_like
List of integer state labels for set A
B : array_like
List of integer state labels for set B
Returns
-------
u : (M, ) ndarray
Vector of forward committor probabilities
Notes
-----
The forward committor is a solution to the following
boundary-value problem
.. math::
\sum_j K_{ij} \pi_{j} u_{j}=0 for i in X\(A u B) (I)
u_{i}=1 for i \in A (II)
u_{i}=0 for i \in B (III)
with adjoint of the generator matrix K=(D_pi(P-I))'. | Below is the the instruction that describes the task:
### Input:
r"""Backward committor between given sets.
The backward committor u(x) between sets A and B is the
probability for the chain starting in x to have come from A last
rather than from B.
Parameters
----------
T : (M, M) ndarray
Transition matrix
A : array_like
List of integer state labels for set A
B : array_like
List of integer state labels for set B
Returns
-------
u : (M, ) ndarray
Vector of forward committor probabilities
Notes
-----
The forward committor is a solution to the following
boundary-value problem
.. math::
\sum_j K_{ij} \pi_{j} u_{j}=0 for i in X\(A u B) (I)
u_{i}=1 for i \in A (II)
u_{i}=0 for i \in B (III)
with adjoint of the generator matrix K=(D_pi(P-I))'.
### Response:
def backward_committor(T, A, B):
r"""Backward committor between given sets.
The backward committor u(x) between sets A and B is the
probability for the chain starting in x to have come from A last
rather than from B.
Parameters
----------
T : (M, M) ndarray
Transition matrix
A : array_like
List of integer state labels for set A
B : array_like
List of integer state labels for set B
Returns
-------
u : (M, ) ndarray
Vector of forward committor probabilities
Notes
-----
The forward committor is a solution to the following
boundary-value problem
.. math::
\sum_j K_{ij} \pi_{j} u_{j}=0 for i in X\(A u B) (I)
u_{i}=1 for i \in A (II)
u_{i}=0 for i \in B (III)
with adjoint of the generator matrix K=(D_pi(P-I))'.
"""
X = set(range(T.shape[0]))
A = set(A)
B = set(B)
AB = A.intersection(B)
notAB = X.difference(A).difference(B)
if len(AB) > 0:
raise ValueError("Sets A and B have to be disjoint")
pi = stationary_distribution(T)
L = T - eye(T.shape[0], T.shape[0])
D = diags([pi, ], [0, ])
K = (D.dot(L)).T
"""Assemble left-hand side W for linear system"""
"""Equation (I)"""
W = 1.0 * K
"""Equation (II)"""
W = W.todok()
W[list(A), :] = 0.0
W.tocsr()
W = W + coo_matrix((np.ones(len(A)), (list(A), list(A))), shape=W.shape).tocsr()
"""Equation (III)"""
W = W.todok()
W[list(B), :] = 0.0
W.tocsr()
W = W + coo_matrix((np.ones(len(B)), (list(B), list(B))), shape=W.shape).tocsr()
"""Assemble right-hand side r for linear system"""
"""Equation (I)+(III)"""
r = np.zeros(T.shape[0])
"""Equation (II)"""
r[list(A)] = 1.0
u = spsolve(W, r)
return u |
def get_raw_access_token(self,
request_token,
request_token_secret,
method='GET',
**kwargs):
'''
Returns a Requests' response over the
:attr:`rauth.OAuth1Service.access_token_url`.
Use this if your endpoint if you need the full `Response` object.
:param request_token: The request token as returned by
:meth:`get_request_token`.
:type request_token: str
:param request_token_secret: The request token secret as returned by
:meth:`get_request_token`.
:type request_token_secret: str
:param method: A string representation of the HTTP method to be
used, defaults to `GET`.
:type method: str
:param \*\*kwargs: Optional arguments. Same as Requests.
:type \*\*kwargs: dict
'''
# ensure we've set the access_token_url
if self.access_token_url is None:
raise TypeError('access_token_url must not be None')
session = self.get_session((request_token, request_token_secret))
self.access_token_response = session.request(method,
self.access_token_url,
**kwargs)
return self.access_token_response | Returns a Requests' response over the
:attr:`rauth.OAuth1Service.access_token_url`.
Use this if your endpoint if you need the full `Response` object.
:param request_token: The request token as returned by
:meth:`get_request_token`.
:type request_token: str
:param request_token_secret: The request token secret as returned by
:meth:`get_request_token`.
:type request_token_secret: str
:param method: A string representation of the HTTP method to be
used, defaults to `GET`.
:type method: str
:param \*\*kwargs: Optional arguments. Same as Requests.
:type \*\*kwargs: dict | Below is the the instruction that describes the task:
### Input:
Returns a Requests' response over the
:attr:`rauth.OAuth1Service.access_token_url`.
Use this if your endpoint if you need the full `Response` object.
:param request_token: The request token as returned by
:meth:`get_request_token`.
:type request_token: str
:param request_token_secret: The request token secret as returned by
:meth:`get_request_token`.
:type request_token_secret: str
:param method: A string representation of the HTTP method to be
used, defaults to `GET`.
:type method: str
:param \*\*kwargs: Optional arguments. Same as Requests.
:type \*\*kwargs: dict
### Response:
def get_raw_access_token(self,
request_token,
request_token_secret,
method='GET',
**kwargs):
'''
Returns a Requests' response over the
:attr:`rauth.OAuth1Service.access_token_url`.
Use this if your endpoint if you need the full `Response` object.
:param request_token: The request token as returned by
:meth:`get_request_token`.
:type request_token: str
:param request_token_secret: The request token secret as returned by
:meth:`get_request_token`.
:type request_token_secret: str
:param method: A string representation of the HTTP method to be
used, defaults to `GET`.
:type method: str
:param \*\*kwargs: Optional arguments. Same as Requests.
:type \*\*kwargs: dict
'''
# ensure we've set the access_token_url
if self.access_token_url is None:
raise TypeError('access_token_url must not be None')
session = self.get_session((request_token, request_token_secret))
self.access_token_response = session.request(method,
self.access_token_url,
**kwargs)
return self.access_token_response |
def places_nearby(client, location=None, radius=None, keyword=None,
language=None, min_price=None, max_price=None, name=None,
open_now=False, rank_by=None, type=None, page_token=None):
"""
Performs nearby search for places.
:param location: The latitude/longitude value for which you wish to obtain the
closest, human-readable address.
:type location: string, dict, list, or tuple
:param radius: Distance in meters within which to bias results.
:type radius: int
:param region: The region code, optional parameter.
See more @ https://developers.google.com/places/web-service/search
:type region: string
:param keyword: A term to be matched against all content that Google has
indexed for this place.
:type keyword: string
:param language: The language in which to return results.
:type language: string
:param min_price: Restricts results to only those places with no less than
this price level. Valid values are in the range from 0
(most affordable) to 4 (most expensive).
:type min_price: int
:param max_price: Restricts results to only those places with no greater
than this price level. Valid values are in the range
from 0 (most affordable) to 4 (most expensive).
:type max_price: int
:param name: One or more terms to be matched against the names of places.
:type name: string or list of strings
:param open_now: Return only those places that are open for business at
the time the query is sent.
:type open_now: bool
:param rank_by: Specifies the order in which results are listed.
Possible values are: prominence (default), distance
:type rank_by: string
:param type: Restricts the results to places matching the specified type.
The full list of supported types is available here:
https://developers.google.com/places/supported_types
:type type: string
:param page_token: Token from a previous search that when provided will
returns the next page of results for the same search.
:type page_token: string
:rtype: result dict with the following keys:
status: status code
results: list of places
html_attributions: set of attributions which must be displayed
next_page_token: token for retrieving the next page of results
"""
if not location and not page_token:
raise ValueError("either a location or page_token arg is required")
if rank_by == "distance":
if not (keyword or name or type):
raise ValueError("either a keyword, name, or type arg is required "
"when rank_by is set to distance")
elif radius is not None:
raise ValueError("radius cannot be specified when rank_by is set to "
"distance")
return _places(client, "nearby", location=location, radius=radius,
keyword=keyword, language=language, min_price=min_price,
max_price=max_price, name=name, open_now=open_now,
rank_by=rank_by, type=type, page_token=page_token) | Performs nearby search for places.
:param location: The latitude/longitude value for which you wish to obtain the
closest, human-readable address.
:type location: string, dict, list, or tuple
:param radius: Distance in meters within which to bias results.
:type radius: int
:param region: The region code, optional parameter.
See more @ https://developers.google.com/places/web-service/search
:type region: string
:param keyword: A term to be matched against all content that Google has
indexed for this place.
:type keyword: string
:param language: The language in which to return results.
:type language: string
:param min_price: Restricts results to only those places with no less than
this price level. Valid values are in the range from 0
(most affordable) to 4 (most expensive).
:type min_price: int
:param max_price: Restricts results to only those places with no greater
than this price level. Valid values are in the range
from 0 (most affordable) to 4 (most expensive).
:type max_price: int
:param name: One or more terms to be matched against the names of places.
:type name: string or list of strings
:param open_now: Return only those places that are open for business at
the time the query is sent.
:type open_now: bool
:param rank_by: Specifies the order in which results are listed.
Possible values are: prominence (default), distance
:type rank_by: string
:param type: Restricts the results to places matching the specified type.
The full list of supported types is available here:
https://developers.google.com/places/supported_types
:type type: string
:param page_token: Token from a previous search that when provided will
returns the next page of results for the same search.
:type page_token: string
:rtype: result dict with the following keys:
status: status code
results: list of places
html_attributions: set of attributions which must be displayed
next_page_token: token for retrieving the next page of results | Below is the the instruction that describes the task:
### Input:
Performs nearby search for places.
:param location: The latitude/longitude value for which you wish to obtain the
closest, human-readable address.
:type location: string, dict, list, or tuple
:param radius: Distance in meters within which to bias results.
:type radius: int
:param region: The region code, optional parameter.
See more @ https://developers.google.com/places/web-service/search
:type region: string
:param keyword: A term to be matched against all content that Google has
indexed for this place.
:type keyword: string
:param language: The language in which to return results.
:type language: string
:param min_price: Restricts results to only those places with no less than
this price level. Valid values are in the range from 0
(most affordable) to 4 (most expensive).
:type min_price: int
:param max_price: Restricts results to only those places with no greater
than this price level. Valid values are in the range
from 0 (most affordable) to 4 (most expensive).
:type max_price: int
:param name: One or more terms to be matched against the names of places.
:type name: string or list of strings
:param open_now: Return only those places that are open for business at
the time the query is sent.
:type open_now: bool
:param rank_by: Specifies the order in which results are listed.
Possible values are: prominence (default), distance
:type rank_by: string
:param type: Restricts the results to places matching the specified type.
The full list of supported types is available here:
https://developers.google.com/places/supported_types
:type type: string
:param page_token: Token from a previous search that when provided will
returns the next page of results for the same search.
:type page_token: string
:rtype: result dict with the following keys:
status: status code
results: list of places
html_attributions: set of attributions which must be displayed
next_page_token: token for retrieving the next page of results
### Response:
def places_nearby(client, location=None, radius=None, keyword=None,
language=None, min_price=None, max_price=None, name=None,
open_now=False, rank_by=None, type=None, page_token=None):
"""
Performs nearby search for places.
:param location: The latitude/longitude value for which you wish to obtain the
closest, human-readable address.
:type location: string, dict, list, or tuple
:param radius: Distance in meters within which to bias results.
:type radius: int
:param region: The region code, optional parameter.
See more @ https://developers.google.com/places/web-service/search
:type region: string
:param keyword: A term to be matched against all content that Google has
indexed for this place.
:type keyword: string
:param language: The language in which to return results.
:type language: string
:param min_price: Restricts results to only those places with no less than
this price level. Valid values are in the range from 0
(most affordable) to 4 (most expensive).
:type min_price: int
:param max_price: Restricts results to only those places with no greater
than this price level. Valid values are in the range
from 0 (most affordable) to 4 (most expensive).
:type max_price: int
:param name: One or more terms to be matched against the names of places.
:type name: string or list of strings
:param open_now: Return only those places that are open for business at
the time the query is sent.
:type open_now: bool
:param rank_by: Specifies the order in which results are listed.
Possible values are: prominence (default), distance
:type rank_by: string
:param type: Restricts the results to places matching the specified type.
The full list of supported types is available here:
https://developers.google.com/places/supported_types
:type type: string
:param page_token: Token from a previous search that when provided will
returns the next page of results for the same search.
:type page_token: string
:rtype: result dict with the following keys:
status: status code
results: list of places
html_attributions: set of attributions which must be displayed
next_page_token: token for retrieving the next page of results
"""
if not location and not page_token:
raise ValueError("either a location or page_token arg is required")
if rank_by == "distance":
if not (keyword or name or type):
raise ValueError("either a keyword, name, or type arg is required "
"when rank_by is set to distance")
elif radius is not None:
raise ValueError("radius cannot be specified when rank_by is set to "
"distance")
return _places(client, "nearby", location=location, radius=radius,
keyword=keyword, language=language, min_price=min_price,
max_price=max_price, name=name, open_now=open_now,
rank_by=rank_by, type=type, page_token=page_token) |
def upload(self, stop_at=None):
"""
Perform file upload.
Performs continous upload of chunks of the file. The size uploaded at each cycle is
the value of the attribute 'chunk_size'.
:Args:
- stop_at (Optional[int]):
Determines at what offset value the upload should stop. If not specified this
defaults to the file size.
"""
self.stop_at = stop_at or self.file_size
while self.offset < self.stop_at:
self.upload_chunk()
else:
if self.log_func:
self.log_func("maximum upload specified({} bytes) has been reached".format(self.stop_at)) | Perform file upload.
Performs continous upload of chunks of the file. The size uploaded at each cycle is
the value of the attribute 'chunk_size'.
:Args:
- stop_at (Optional[int]):
Determines at what offset value the upload should stop. If not specified this
defaults to the file size. | Below is the the instruction that describes the task:
### Input:
Perform file upload.
Performs continous upload of chunks of the file. The size uploaded at each cycle is
the value of the attribute 'chunk_size'.
:Args:
- stop_at (Optional[int]):
Determines at what offset value the upload should stop. If not specified this
defaults to the file size.
### Response:
def upload(self, stop_at=None):
"""
Perform file upload.
Performs continous upload of chunks of the file. The size uploaded at each cycle is
the value of the attribute 'chunk_size'.
:Args:
- stop_at (Optional[int]):
Determines at what offset value the upload should stop. If not specified this
defaults to the file size.
"""
self.stop_at = stop_at or self.file_size
while self.offset < self.stop_at:
self.upload_chunk()
else:
if self.log_func:
self.log_func("maximum upload specified({} bytes) has been reached".format(self.stop_at)) |
def writeDB(filename, catalog, meta=None):
"""
Output an sqlite3 database containing one table for each source type
Parameters
----------
filename : str
Output filename
catalog : list
List of sources of type :class:`AegeanTools.models.OutputSource`,
:class:`AegeanTools.models.SimpleSource`, or :class:`AegeanTools.models.IslandSource`.
meta : dict
Meta data to be written to table `meta`
Returns
-------
None
"""
def sqlTypes(obj, names):
"""
Return the sql type corresponding to each named parameter in obj
"""
types = []
for n in names:
val = getattr(obj, n)
if isinstance(val, bool):
types.append("BOOL")
elif isinstance(val, (int, np.int64, np.int32)):
types.append("INT")
elif isinstance(val, (float, np.float64, np.float32)): # float32 is bugged and claims not to be a float
types.append("FLOAT")
elif isinstance(val, six.string_types):
types.append("VARCHAR")
else:
log.warning("Column {0} is of unknown type {1}".format(n, type(n)))
log.warning("Using VARCHAR")
types.append("VARCHAR")
return types
if os.path.exists(filename):
log.warning("overwriting {0}".format(filename))
os.remove(filename)
conn = sqlite3.connect(filename)
db = conn.cursor()
# determine the column names by inspecting the catalog class
for t, tn in zip(classify_catalog(catalog), ["components", "islands", "simples"]):
if len(t) < 1:
continue #don't write empty tables
col_names = t[0].names
col_types = sqlTypes(t[0], col_names)
stmnt = ','.join(["{0} {1}".format(a, b) for a, b in zip(col_names, col_types)])
db.execute('CREATE TABLE {0} ({1})'.format(tn, stmnt))
stmnt = 'INSERT INTO {0} ({1}) VALUES ({2})'.format(tn, ','.join(col_names), ','.join(['?' for i in col_names]))
# expend the iterators that are created by python 3+
data = list(map(nulls, list(r.as_list() for r in t)))
db.executemany(stmnt, data)
log.info("Created table {0}".format(tn))
# metadata add some meta data
db.execute("CREATE TABLE meta (key VARCHAR, val VARCHAR)")
for k in meta:
db.execute("INSERT INTO meta (key, val) VALUES (?,?)", (k, meta[k]))
conn.commit()
log.info(db.execute("SELECT name FROM sqlite_master WHERE type='table';").fetchall())
conn.close()
log.info("Wrote file {0}".format(filename))
return | Output an sqlite3 database containing one table for each source type
Parameters
----------
filename : str
Output filename
catalog : list
List of sources of type :class:`AegeanTools.models.OutputSource`,
:class:`AegeanTools.models.SimpleSource`, or :class:`AegeanTools.models.IslandSource`.
meta : dict
Meta data to be written to table `meta`
Returns
-------
None | Below is the the instruction that describes the task:
### Input:
Output an sqlite3 database containing one table for each source type
Parameters
----------
filename : str
Output filename
catalog : list
List of sources of type :class:`AegeanTools.models.OutputSource`,
:class:`AegeanTools.models.SimpleSource`, or :class:`AegeanTools.models.IslandSource`.
meta : dict
Meta data to be written to table `meta`
Returns
-------
None
### Response:
def writeDB(filename, catalog, meta=None):
"""
Output an sqlite3 database containing one table for each source type
Parameters
----------
filename : str
Output filename
catalog : list
List of sources of type :class:`AegeanTools.models.OutputSource`,
:class:`AegeanTools.models.SimpleSource`, or :class:`AegeanTools.models.IslandSource`.
meta : dict
Meta data to be written to table `meta`
Returns
-------
None
"""
def sqlTypes(obj, names):
"""
Return the sql type corresponding to each named parameter in obj
"""
types = []
for n in names:
val = getattr(obj, n)
if isinstance(val, bool):
types.append("BOOL")
elif isinstance(val, (int, np.int64, np.int32)):
types.append("INT")
elif isinstance(val, (float, np.float64, np.float32)): # float32 is bugged and claims not to be a float
types.append("FLOAT")
elif isinstance(val, six.string_types):
types.append("VARCHAR")
else:
log.warning("Column {0} is of unknown type {1}".format(n, type(n)))
log.warning("Using VARCHAR")
types.append("VARCHAR")
return types
if os.path.exists(filename):
log.warning("overwriting {0}".format(filename))
os.remove(filename)
conn = sqlite3.connect(filename)
db = conn.cursor()
# determine the column names by inspecting the catalog class
for t, tn in zip(classify_catalog(catalog), ["components", "islands", "simples"]):
if len(t) < 1:
continue #don't write empty tables
col_names = t[0].names
col_types = sqlTypes(t[0], col_names)
stmnt = ','.join(["{0} {1}".format(a, b) for a, b in zip(col_names, col_types)])
db.execute('CREATE TABLE {0} ({1})'.format(tn, stmnt))
stmnt = 'INSERT INTO {0} ({1}) VALUES ({2})'.format(tn, ','.join(col_names), ','.join(['?' for i in col_names]))
# expend the iterators that are created by python 3+
data = list(map(nulls, list(r.as_list() for r in t)))
db.executemany(stmnt, data)
log.info("Created table {0}".format(tn))
# metadata add some meta data
db.execute("CREATE TABLE meta (key VARCHAR, val VARCHAR)")
for k in meta:
db.execute("INSERT INTO meta (key, val) VALUES (?,?)", (k, meta[k]))
conn.commit()
log.info(db.execute("SELECT name FROM sqlite_master WHERE type='table';").fetchall())
conn.close()
log.info("Wrote file {0}".format(filename))
return |
def refine_get_urls(original):
"""
serve static files (and media files also)
in production the webserver should serve requested
static files itself and never let requests to /static/*
and /media/* get to the django application.
"""
def get_urls():
from django.conf.urls import url
from django.conf import settings
from django.contrib.staticfiles.urls import staticfiles_urlpatterns
from django.views.static import serve
if settings.DEBUG:
return staticfiles_urlpatterns() + [
url(r'^media/(?P<path>.*)$', serve, {
'document_root': settings.MEDIA_ROOT,
}),
] + original()
else:
return original()
return get_urls | serve static files (and media files also)
in production the webserver should serve requested
static files itself and never let requests to /static/*
and /media/* get to the django application. | Below is the the instruction that describes the task:
### Input:
serve static files (and media files also)
in production the webserver should serve requested
static files itself and never let requests to /static/*
and /media/* get to the django application.
### Response:
def refine_get_urls(original):
"""
serve static files (and media files also)
in production the webserver should serve requested
static files itself and never let requests to /static/*
and /media/* get to the django application.
"""
def get_urls():
from django.conf.urls import url
from django.conf import settings
from django.contrib.staticfiles.urls import staticfiles_urlpatterns
from django.views.static import serve
if settings.DEBUG:
return staticfiles_urlpatterns() + [
url(r'^media/(?P<path>.*)$', serve, {
'document_root': settings.MEDIA_ROOT,
}),
] + original()
else:
return original()
return get_urls |
def handle_new_tuple_set_2(self, hts2):
"""Called when new HeronTupleSet2 arrives
Convert(Assemble) HeronTupleSet2(raw byte array) to HeronTupleSet
See more at GitHub PR #1421
:param tuple_msg_set: HeronTupleSet2 type
"""
if self.my_pplan_helper is None or self.my_instance is None:
Log.error("Got tuple set when no instance assigned yet")
else:
hts = tuple_pb2.HeronTupleSet()
if hts2.HasField('control'):
hts.control.CopyFrom(hts2.control)
else:
hdts = tuple_pb2.HeronDataTupleSet()
hdts.stream.CopyFrom(hts2.data.stream)
try:
for trunk in hts2.data.tuples:
added_tuple = hdts.tuples.add()
added_tuple.ParseFromString(trunk)
except Exception:
Log.exception('Fail to deserialize HeronDataTuple')
hts.data.CopyFrom(hdts)
self.in_stream.offer(hts)
if self.my_pplan_helper.is_topology_running():
self.my_instance.py_class.process_incoming_tuples() | Called when new HeronTupleSet2 arrives
Convert(Assemble) HeronTupleSet2(raw byte array) to HeronTupleSet
See more at GitHub PR #1421
:param tuple_msg_set: HeronTupleSet2 type | Below is the the instruction that describes the task:
### Input:
Called when new HeronTupleSet2 arrives
Convert(Assemble) HeronTupleSet2(raw byte array) to HeronTupleSet
See more at GitHub PR #1421
:param tuple_msg_set: HeronTupleSet2 type
### Response:
def handle_new_tuple_set_2(self, hts2):
"""Called when new HeronTupleSet2 arrives
Convert(Assemble) HeronTupleSet2(raw byte array) to HeronTupleSet
See more at GitHub PR #1421
:param tuple_msg_set: HeronTupleSet2 type
"""
if self.my_pplan_helper is None or self.my_instance is None:
Log.error("Got tuple set when no instance assigned yet")
else:
hts = tuple_pb2.HeronTupleSet()
if hts2.HasField('control'):
hts.control.CopyFrom(hts2.control)
else:
hdts = tuple_pb2.HeronDataTupleSet()
hdts.stream.CopyFrom(hts2.data.stream)
try:
for trunk in hts2.data.tuples:
added_tuple = hdts.tuples.add()
added_tuple.ParseFromString(trunk)
except Exception:
Log.exception('Fail to deserialize HeronDataTuple')
hts.data.CopyFrom(hdts)
self.in_stream.offer(hts)
if self.my_pplan_helper.is_topology_running():
self.my_instance.py_class.process_incoming_tuples() |
def set_inputhook(self, callback):
"""Set PyOS_InputHook to callback and return the previous one."""
# On platforms with 'readline' support, it's all too likely to
# have a KeyboardInterrupt signal delivered *even before* an
# initial ``try:`` clause in the callback can be executed, so
# we need to disable CTRL+C in this situation.
ignore_CTRL_C()
self._callback = callback
self._callback_pyfunctype = self.PYFUNC(callback)
pyos_inputhook_ptr = self.get_pyos_inputhook()
original = self.get_pyos_inputhook_as_func()
pyos_inputhook_ptr.value = \
ctypes.cast(self._callback_pyfunctype, ctypes.c_void_p).value
self._installed = True
return original | Set PyOS_InputHook to callback and return the previous one. | Below is the the instruction that describes the task:
### Input:
Set PyOS_InputHook to callback and return the previous one.
### Response:
def set_inputhook(self, callback):
"""Set PyOS_InputHook to callback and return the previous one."""
# On platforms with 'readline' support, it's all too likely to
# have a KeyboardInterrupt signal delivered *even before* an
# initial ``try:`` clause in the callback can be executed, so
# we need to disable CTRL+C in this situation.
ignore_CTRL_C()
self._callback = callback
self._callback_pyfunctype = self.PYFUNC(callback)
pyos_inputhook_ptr = self.get_pyos_inputhook()
original = self.get_pyos_inputhook_as_func()
pyos_inputhook_ptr.value = \
ctypes.cast(self._callback_pyfunctype, ctypes.c_void_p).value
self._installed = True
return original |
def _load_builtins(self):
"""
Fill the Registry with the hard coded key bindings.
"""
pymux = self.pymux
kb = KeyBindings()
# Create filters.
has_prefix = HasPrefix(pymux)
waits_for_confirmation = WaitsForConfirmation(pymux)
prompt_or_command_focus = has_focus(COMMAND) | has_focus(PROMPT)
display_pane_numbers = Condition(lambda: pymux.display_pane_numbers)
in_scroll_buffer_not_searching = InScrollBufferNotSearching(pymux)
@kb.add(Keys.Any, filter=has_prefix)
def _(event):
" Ignore unknown Ctrl-B prefixed key sequences. "
pymux.get_client_state().has_prefix = False
@kb.add('c-c', filter=prompt_or_command_focus & ~has_prefix)
@kb.add('c-g', filter=prompt_or_command_focus & ~has_prefix)
# @kb.add('backspace', filter=has_focus(COMMAND) & ~has_prefix &
# Condition(lambda: cli.buffers[COMMAND].text == ''))
def _(event):
" Leave command mode. "
pymux.leave_command_mode(append_to_history=False)
@kb.add('y', filter=waits_for_confirmation)
@kb.add('Y', filter=waits_for_confirmation)
def _(event):
"""
Confirm command.
"""
client_state = pymux.get_client_state()
command = client_state.confirm_command
client_state.confirm_command = None
client_state.confirm_text = None
pymux.handle_command(command)
@kb.add('n', filter=waits_for_confirmation)
@kb.add('N', filter=waits_for_confirmation)
@kb.add('c-c' , filter=waits_for_confirmation)
def _(event):
"""
Cancel command.
"""
client_state = pymux.get_client_state()
client_state.confirm_command = None
client_state.confirm_text = None
@kb.add('c-c', filter=in_scroll_buffer_not_searching)
@kb.add('enter', filter=in_scroll_buffer_not_searching)
@kb.add('q', filter=in_scroll_buffer_not_searching)
def _(event):
" Exit scroll buffer. "
pane = pymux.arrangement.get_active_pane()
pane.exit_scroll_buffer()
@kb.add(' ', filter=in_scroll_buffer_not_searching)
def _(event):
" Enter selection mode when pressing space in copy mode. "
event.current_buffer.start_selection(selection_type=SelectionType.CHARACTERS)
@kb.add('enter', filter=in_scroll_buffer_not_searching & has_selection)
def _(event):
" Copy selection when pressing Enter. "
clipboard_data = event.current_buffer.copy_selection()
event.app.clipboard.set_data(clipboard_data)
@kb.add('v', filter=in_scroll_buffer_not_searching & has_selection)
def _(event):
" Toggle between selection types. "
types = [SelectionType.LINES, SelectionType.BLOCK, SelectionType.CHARACTERS]
selection_state = event.current_buffer.selection_state
try:
index = types.index(selection_state.type)
except ValueError: # Not in list.
index = 0
selection_state.type = types[(index + 1) % len(types)]
@Condition
def popup_displayed():
return self.pymux.get_client_state().display_popup
@kb.add('q', filter=popup_displayed, eager=True)
def _(event):
" Quit pop-up dialog. "
self.pymux.get_client_state().display_popup = False
@kb.add(Keys.Any, eager=True, filter=display_pane_numbers)
def _(event):
" When the pane numbers are shown. Any key press should hide them. "
pymux.display_pane_numbers = False
@Condition
def clock_displayed():
" "
pane = pymux.arrangement.get_active_pane()
return pane.clock_mode
@kb.add(Keys.Any, eager=True, filter=clock_displayed)
def _(event):
" When the clock is displayed. Any key press should hide it. "
pane = pymux.arrangement.get_active_pane()
pane.clock_mode = False
return kb | Fill the Registry with the hard coded key bindings. | Below is the the instruction that describes the task:
### Input:
Fill the Registry with the hard coded key bindings.
### Response:
def _load_builtins(self):
"""
Fill the Registry with the hard coded key bindings.
"""
pymux = self.pymux
kb = KeyBindings()
# Create filters.
has_prefix = HasPrefix(pymux)
waits_for_confirmation = WaitsForConfirmation(pymux)
prompt_or_command_focus = has_focus(COMMAND) | has_focus(PROMPT)
display_pane_numbers = Condition(lambda: pymux.display_pane_numbers)
in_scroll_buffer_not_searching = InScrollBufferNotSearching(pymux)
@kb.add(Keys.Any, filter=has_prefix)
def _(event):
" Ignore unknown Ctrl-B prefixed key sequences. "
pymux.get_client_state().has_prefix = False
@kb.add('c-c', filter=prompt_or_command_focus & ~has_prefix)
@kb.add('c-g', filter=prompt_or_command_focus & ~has_prefix)
# @kb.add('backspace', filter=has_focus(COMMAND) & ~has_prefix &
# Condition(lambda: cli.buffers[COMMAND].text == ''))
def _(event):
" Leave command mode. "
pymux.leave_command_mode(append_to_history=False)
@kb.add('y', filter=waits_for_confirmation)
@kb.add('Y', filter=waits_for_confirmation)
def _(event):
"""
Confirm command.
"""
client_state = pymux.get_client_state()
command = client_state.confirm_command
client_state.confirm_command = None
client_state.confirm_text = None
pymux.handle_command(command)
@kb.add('n', filter=waits_for_confirmation)
@kb.add('N', filter=waits_for_confirmation)
@kb.add('c-c' , filter=waits_for_confirmation)
def _(event):
"""
Cancel command.
"""
client_state = pymux.get_client_state()
client_state.confirm_command = None
client_state.confirm_text = None
@kb.add('c-c', filter=in_scroll_buffer_not_searching)
@kb.add('enter', filter=in_scroll_buffer_not_searching)
@kb.add('q', filter=in_scroll_buffer_not_searching)
def _(event):
" Exit scroll buffer. "
pane = pymux.arrangement.get_active_pane()
pane.exit_scroll_buffer()
@kb.add(' ', filter=in_scroll_buffer_not_searching)
def _(event):
" Enter selection mode when pressing space in copy mode. "
event.current_buffer.start_selection(selection_type=SelectionType.CHARACTERS)
@kb.add('enter', filter=in_scroll_buffer_not_searching & has_selection)
def _(event):
" Copy selection when pressing Enter. "
clipboard_data = event.current_buffer.copy_selection()
event.app.clipboard.set_data(clipboard_data)
@kb.add('v', filter=in_scroll_buffer_not_searching & has_selection)
def _(event):
" Toggle between selection types. "
types = [SelectionType.LINES, SelectionType.BLOCK, SelectionType.CHARACTERS]
selection_state = event.current_buffer.selection_state
try:
index = types.index(selection_state.type)
except ValueError: # Not in list.
index = 0
selection_state.type = types[(index + 1) % len(types)]
@Condition
def popup_displayed():
return self.pymux.get_client_state().display_popup
@kb.add('q', filter=popup_displayed, eager=True)
def _(event):
" Quit pop-up dialog. "
self.pymux.get_client_state().display_popup = False
@kb.add(Keys.Any, eager=True, filter=display_pane_numbers)
def _(event):
" When the pane numbers are shown. Any key press should hide them. "
pymux.display_pane_numbers = False
@Condition
def clock_displayed():
" "
pane = pymux.arrangement.get_active_pane()
return pane.clock_mode
@kb.add(Keys.Any, eager=True, filter=clock_displayed)
def _(event):
" When the clock is displayed. Any key press should hide it. "
pane = pymux.arrangement.get_active_pane()
pane.clock_mode = False
return kb |
def normalize_list_of_dicts(value, default_key, default_value=UNDEFINED):
"""
Converts given value to a list of dictionaries as follows:
* ``[{...}]`` → ``[{...}]``
* ``{...}`` → ``[{...}]``
* ``'xyz'`` → ``[{default_key: 'xyz'}]``
* ``None`` → ``[{default_key: default_value}]`` (if specified)
* ``None`` → ``[]``
:param default_value:
only Unicode, i.e. `str` in Python 3.x and **only** `unicode` in Python 2.x
"""
if value is None:
if default_value is UNDEFINED:
return []
value = default_value
if isinstance(value, dict):
return [value]
if isinstance(value, text_type):
return [{default_key: value}]
if isinstance(value, list):
if not all(isinstance(x, dict) for x in value):
def _fix(x):
return {default_key: x} if isinstance(x, text_type) else x
return list(map(_fix, value))
return value | Converts given value to a list of dictionaries as follows:
* ``[{...}]`` → ``[{...}]``
* ``{...}`` → ``[{...}]``
* ``'xyz'`` → ``[{default_key: 'xyz'}]``
* ``None`` → ``[{default_key: default_value}]`` (if specified)
* ``None`` → ``[]``
:param default_value:
only Unicode, i.e. `str` in Python 3.x and **only** `unicode` in Python 2.x | Below is the the instruction that describes the task:
### Input:
Converts given value to a list of dictionaries as follows:
* ``[{...}]`` → ``[{...}]``
* ``{...}`` → ``[{...}]``
* ``'xyz'`` → ``[{default_key: 'xyz'}]``
* ``None`` → ``[{default_key: default_value}]`` (if specified)
* ``None`` → ``[]``
:param default_value:
only Unicode, i.e. `str` in Python 3.x and **only** `unicode` in Python 2.x
### Response:
def normalize_list_of_dicts(value, default_key, default_value=UNDEFINED):
"""
Converts given value to a list of dictionaries as follows:
* ``[{...}]`` → ``[{...}]``
* ``{...}`` → ``[{...}]``
* ``'xyz'`` → ``[{default_key: 'xyz'}]``
* ``None`` → ``[{default_key: default_value}]`` (if specified)
* ``None`` → ``[]``
:param default_value:
only Unicode, i.e. `str` in Python 3.x and **only** `unicode` in Python 2.x
"""
if value is None:
if default_value is UNDEFINED:
return []
value = default_value
if isinstance(value, dict):
return [value]
if isinstance(value, text_type):
return [{default_key: value}]
if isinstance(value, list):
if not all(isinstance(x, dict) for x in value):
def _fix(x):
return {default_key: x} if isinstance(x, text_type) else x
return list(map(_fix, value))
return value |
def mtz2tw(n,c,e,l):
"""mtz: model for the traveling salesman problem with time windows
(based on Miller-Tucker-Zemlin's one-index potential formulation, stronger constraints)
Parameters:
- n: number of nodes
- c[i,j]: cost for traversing arc (i,j)
- e[i]: earliest date for visiting node i
- l[i]: latest date for visiting node i
Returns a model, ready to be solved.
"""
model = Model("tsptw - mtz-strong")
x,u = {},{}
for i in range(1,n+1):
u[i] = model.addVar(lb=e[i], ub=l[i], vtype="C", name="u(%s)"%i)
for j in range(1,n+1):
if i != j:
x[i,j] = model.addVar(vtype="B", name="x(%s,%s)"%(i,j))
for i in range(1,n+1):
model.addCons(quicksum(x[i,j] for j in range(1,n+1) if j != i) == 1, "Out(%s)"%i)
model.addCons(quicksum(x[j,i] for j in range(1,n+1) if j != i) == 1, "In(%s)"%i)
for j in range(2,n+1):
if i != j:
M1 = max(l[i] + c[i,j] - e[j], 0)
M2 = max(l[i] + min(-c[j,i], e[j]-e[i]) - e[j], 0)
model.addCons(u[i] + c[i,j] - M1*(1-x[i,j]) + M2*x[j,i] <= u[j], "LiftedMTZ(%s,%s)"%(i,j))
for i in range(2,n+1):
model.addCons(e[i] + quicksum(max(e[j]+c[j,i]-e[i],0) * x[j,i] for j in range(1,n+1) if i != j) \
<= u[i], "LiftedLB(%s)"%i)
model.addCons(u[i] <= l[i] - \
quicksum(max(l[i]-l[j]+c[i,j],0) * x[i,j] for j in range(2,n+1) if i != j), \
"LiftedUB(%s)"%i)
model.setObjective(quicksum(c[i,j]*x[i,j] for (i,j) in x), "minimize")
model.data = x,u
return model | mtz: model for the traveling salesman problem with time windows
(based on Miller-Tucker-Zemlin's one-index potential formulation, stronger constraints)
Parameters:
- n: number of nodes
- c[i,j]: cost for traversing arc (i,j)
- e[i]: earliest date for visiting node i
- l[i]: latest date for visiting node i
Returns a model, ready to be solved. | Below is the the instruction that describes the task:
### Input:
mtz: model for the traveling salesman problem with time windows
(based on Miller-Tucker-Zemlin's one-index potential formulation, stronger constraints)
Parameters:
- n: number of nodes
- c[i,j]: cost for traversing arc (i,j)
- e[i]: earliest date for visiting node i
- l[i]: latest date for visiting node i
Returns a model, ready to be solved.
### Response:
def mtz2tw(n,c,e,l):
"""mtz: model for the traveling salesman problem with time windows
(based on Miller-Tucker-Zemlin's one-index potential formulation, stronger constraints)
Parameters:
- n: number of nodes
- c[i,j]: cost for traversing arc (i,j)
- e[i]: earliest date for visiting node i
- l[i]: latest date for visiting node i
Returns a model, ready to be solved.
"""
model = Model("tsptw - mtz-strong")
x,u = {},{}
for i in range(1,n+1):
u[i] = model.addVar(lb=e[i], ub=l[i], vtype="C", name="u(%s)"%i)
for j in range(1,n+1):
if i != j:
x[i,j] = model.addVar(vtype="B", name="x(%s,%s)"%(i,j))
for i in range(1,n+1):
model.addCons(quicksum(x[i,j] for j in range(1,n+1) if j != i) == 1, "Out(%s)"%i)
model.addCons(quicksum(x[j,i] for j in range(1,n+1) if j != i) == 1, "In(%s)"%i)
for j in range(2,n+1):
if i != j:
M1 = max(l[i] + c[i,j] - e[j], 0)
M2 = max(l[i] + min(-c[j,i], e[j]-e[i]) - e[j], 0)
model.addCons(u[i] + c[i,j] - M1*(1-x[i,j]) + M2*x[j,i] <= u[j], "LiftedMTZ(%s,%s)"%(i,j))
for i in range(2,n+1):
model.addCons(e[i] + quicksum(max(e[j]+c[j,i]-e[i],0) * x[j,i] for j in range(1,n+1) if i != j) \
<= u[i], "LiftedLB(%s)"%i)
model.addCons(u[i] <= l[i] - \
quicksum(max(l[i]-l[j]+c[i,j],0) * x[i,j] for j in range(2,n+1) if i != j), \
"LiftedUB(%s)"%i)
model.setObjective(quicksum(c[i,j]*x[i,j] for (i,j) in x), "minimize")
model.data = x,u
return model |
def create_error(msg, cause=None):
"""Creates a ``GaxError`` or subclass.
Attributes:
msg (string): describes the error that occurred.
cause (Exception, optional): the exception raised by a lower
layer of the RPC stack (for example, gRPC) that caused this
exception, or None if this exception originated in GAX.
Returns:
.GaxError: The exception that wraps ``cause``.
"""
status_code = config.exc_to_code(cause)
status_name = config.NAME_STATUS_CODES.get(status_code)
if status_name == 'INVALID_ARGUMENT':
return InvalidArgumentError(msg, cause=cause)
else:
return GaxError(msg, cause=cause) | Creates a ``GaxError`` or subclass.
Attributes:
msg (string): describes the error that occurred.
cause (Exception, optional): the exception raised by a lower
layer of the RPC stack (for example, gRPC) that caused this
exception, or None if this exception originated in GAX.
Returns:
.GaxError: The exception that wraps ``cause``. | Below is the the instruction that describes the task:
### Input:
Creates a ``GaxError`` or subclass.
Attributes:
msg (string): describes the error that occurred.
cause (Exception, optional): the exception raised by a lower
layer of the RPC stack (for example, gRPC) that caused this
exception, or None if this exception originated in GAX.
Returns:
.GaxError: The exception that wraps ``cause``.
### Response:
def create_error(msg, cause=None):
"""Creates a ``GaxError`` or subclass.
Attributes:
msg (string): describes the error that occurred.
cause (Exception, optional): the exception raised by a lower
layer of the RPC stack (for example, gRPC) that caused this
exception, or None if this exception originated in GAX.
Returns:
.GaxError: The exception that wraps ``cause``.
"""
status_code = config.exc_to_code(cause)
status_name = config.NAME_STATUS_CODES.get(status_code)
if status_name == 'INVALID_ARGUMENT':
return InvalidArgumentError(msg, cause=cause)
else:
return GaxError(msg, cause=cause) |
def bin2hex(fin, fout, offset=0):
"""Simple bin-to-hex convertor.
@return 0 if all OK
@param fin input bin file (filename or file-like object)
@param fout output hex file (filename or file-like object)
@param offset starting address offset for loading bin
"""
h = IntelHex()
try:
h.loadbin(fin, offset)
except IOError:
e = sys.exc_info()[1] # current exception
txt = 'ERROR: unable to load bin file:', str(e)
print(txt)
return 1
try:
h.tofile(fout, format='hex')
except IOError:
e = sys.exc_info()[1] # current exception
txt = "ERROR: Could not write to file: %s: %s" % (fout, str(e))
print(txt)
return 1
return 0 | Simple bin-to-hex convertor.
@return 0 if all OK
@param fin input bin file (filename or file-like object)
@param fout output hex file (filename or file-like object)
@param offset starting address offset for loading bin | Below is the the instruction that describes the task:
### Input:
Simple bin-to-hex convertor.
@return 0 if all OK
@param fin input bin file (filename or file-like object)
@param fout output hex file (filename or file-like object)
@param offset starting address offset for loading bin
### Response:
def bin2hex(fin, fout, offset=0):
"""Simple bin-to-hex convertor.
@return 0 if all OK
@param fin input bin file (filename or file-like object)
@param fout output hex file (filename or file-like object)
@param offset starting address offset for loading bin
"""
h = IntelHex()
try:
h.loadbin(fin, offset)
except IOError:
e = sys.exc_info()[1] # current exception
txt = 'ERROR: unable to load bin file:', str(e)
print(txt)
return 1
try:
h.tofile(fout, format='hex')
except IOError:
e = sys.exc_info()[1] # current exception
txt = "ERROR: Could not write to file: %s: %s" % (fout, str(e))
print(txt)
return 1
return 0 |
def track_progress(**context):
"""Print training progress. Called after each epoch."""
model = context["model"]
train_X = context["train_X"]
dev_X = context["dev_X"]
dev_y = context["dev_y"]
n_train = len(train_X)
trainer = context["trainer"]
def each_epoch():
global epoch_train_acc, epoch
with model.use_params(trainer.optimizer.averages):
avg_acc = model.evaluate_logloss(dev_X, dev_y)
stats = (avg_acc, float(epoch_train_acc) / n_train, trainer.dropout)
print("%.3f dev acc, %.3f train acc, %.4f drop" % stats)
epoch_train_acc = 0.0
epoch += 1
return each_epoch | Print training progress. Called after each epoch. | Below is the the instruction that describes the task:
### Input:
Print training progress. Called after each epoch.
### Response:
def track_progress(**context):
"""Print training progress. Called after each epoch."""
model = context["model"]
train_X = context["train_X"]
dev_X = context["dev_X"]
dev_y = context["dev_y"]
n_train = len(train_X)
trainer = context["trainer"]
def each_epoch():
global epoch_train_acc, epoch
with model.use_params(trainer.optimizer.averages):
avg_acc = model.evaluate_logloss(dev_X, dev_y)
stats = (avg_acc, float(epoch_train_acc) / n_train, trainer.dropout)
print("%.3f dev acc, %.3f train acc, %.4f drop" % stats)
epoch_train_acc = 0.0
epoch += 1
return each_epoch |
def annotate(title, format_type, message=None, data=None, metric=1.0):
"""
Annotate a test case with info that should be displayed in the reports.
Parameters
----------
title : str
A human-readable descriptive title of the test case.
format_type : str
A string that determines how the result data is formatted in the
report. It is expected not to be None.
* 'number' : 'data' is a single number which can be an integer or
float and should be represented as such.
* 'count' : 'data' is a list, set or tuple. Choosing 'count' will
display the length of that list e.g. number of metabolites without
formula.
* 'percent' : Instead of 'data' the content of 'metric' ought to be
displayed e.g. percentage of metabolites without charge.
'metric' is expected to be a floating point number.
* 'raw' : 'data' is ought to be displayed "as is" without formatting.
This option is appropriate for single strings or a boolean output.
message : str
A short written explanation that states and possibly explains the test
result.
data
Raw data which the test case generates and assesses. Can be of the
following types: list, set, tuple, string, float, integer, and boolean.
metric: float
A value x in the range of 0 <= x <= 1 which represents the fraction of
'data' to the total in the model. For example, if 'data' are all
metabolites without formula, 'metric' should be the fraction of
metabolites without formula from the total of metabolites in the model.
Returns
-------
function
The decorated function, now extended by the attribute 'annotation'.
Notes
-----
Adds "annotation" attribute to the function object, which stores values for
predefined keys as a dictionary.
"""
if format_type not in TYPES:
raise ValueError(
"Invalid type. Expected one of: {}.".format(", ".join(TYPES)))
def decorator(func):
func.annotation = dict(
title=title,
summary=extended_summary(func),
message=message,
data=data,
format_type=format_type,
metric=metric)
return func
return decorator | Annotate a test case with info that should be displayed in the reports.
Parameters
----------
title : str
A human-readable descriptive title of the test case.
format_type : str
A string that determines how the result data is formatted in the
report. It is expected not to be None.
* 'number' : 'data' is a single number which can be an integer or
float and should be represented as such.
* 'count' : 'data' is a list, set or tuple. Choosing 'count' will
display the length of that list e.g. number of metabolites without
formula.
* 'percent' : Instead of 'data' the content of 'metric' ought to be
displayed e.g. percentage of metabolites without charge.
'metric' is expected to be a floating point number.
* 'raw' : 'data' is ought to be displayed "as is" without formatting.
This option is appropriate for single strings or a boolean output.
message : str
A short written explanation that states and possibly explains the test
result.
data
Raw data which the test case generates and assesses. Can be of the
following types: list, set, tuple, string, float, integer, and boolean.
metric: float
A value x in the range of 0 <= x <= 1 which represents the fraction of
'data' to the total in the model. For example, if 'data' are all
metabolites without formula, 'metric' should be the fraction of
metabolites without formula from the total of metabolites in the model.
Returns
-------
function
The decorated function, now extended by the attribute 'annotation'.
Notes
-----
Adds "annotation" attribute to the function object, which stores values for
predefined keys as a dictionary. | Below is the the instruction that describes the task:
### Input:
Annotate a test case with info that should be displayed in the reports.
Parameters
----------
title : str
A human-readable descriptive title of the test case.
format_type : str
A string that determines how the result data is formatted in the
report. It is expected not to be None.
* 'number' : 'data' is a single number which can be an integer or
float and should be represented as such.
* 'count' : 'data' is a list, set or tuple. Choosing 'count' will
display the length of that list e.g. number of metabolites without
formula.
* 'percent' : Instead of 'data' the content of 'metric' ought to be
displayed e.g. percentage of metabolites without charge.
'metric' is expected to be a floating point number.
* 'raw' : 'data' is ought to be displayed "as is" without formatting.
This option is appropriate for single strings or a boolean output.
message : str
A short written explanation that states and possibly explains the test
result.
data
Raw data which the test case generates and assesses. Can be of the
following types: list, set, tuple, string, float, integer, and boolean.
metric: float
A value x in the range of 0 <= x <= 1 which represents the fraction of
'data' to the total in the model. For example, if 'data' are all
metabolites without formula, 'metric' should be the fraction of
metabolites without formula from the total of metabolites in the model.
Returns
-------
function
The decorated function, now extended by the attribute 'annotation'.
Notes
-----
Adds "annotation" attribute to the function object, which stores values for
predefined keys as a dictionary.
### Response:
def annotate(title, format_type, message=None, data=None, metric=1.0):
"""
Annotate a test case with info that should be displayed in the reports.
Parameters
----------
title : str
A human-readable descriptive title of the test case.
format_type : str
A string that determines how the result data is formatted in the
report. It is expected not to be None.
* 'number' : 'data' is a single number which can be an integer or
float and should be represented as such.
* 'count' : 'data' is a list, set or tuple. Choosing 'count' will
display the length of that list e.g. number of metabolites without
formula.
* 'percent' : Instead of 'data' the content of 'metric' ought to be
displayed e.g. percentage of metabolites without charge.
'metric' is expected to be a floating point number.
* 'raw' : 'data' is ought to be displayed "as is" without formatting.
This option is appropriate for single strings or a boolean output.
message : str
A short written explanation that states and possibly explains the test
result.
data
Raw data which the test case generates and assesses. Can be of the
following types: list, set, tuple, string, float, integer, and boolean.
metric: float
A value x in the range of 0 <= x <= 1 which represents the fraction of
'data' to the total in the model. For example, if 'data' are all
metabolites without formula, 'metric' should be the fraction of
metabolites without formula from the total of metabolites in the model.
Returns
-------
function
The decorated function, now extended by the attribute 'annotation'.
Notes
-----
Adds "annotation" attribute to the function object, which stores values for
predefined keys as a dictionary.
"""
if format_type not in TYPES:
raise ValueError(
"Invalid type. Expected one of: {}.".format(", ".join(TYPES)))
def decorator(func):
func.annotation = dict(
title=title,
summary=extended_summary(func),
message=message,
data=data,
format_type=format_type,
metric=metric)
return func
return decorator |
def haversine(lon1, lat1, lon2, lat2, earth_radius=6357000):
"""Calculate the great circle distance between two points on earth in Kilometers
on the earth (specified in decimal degrees)
.. seealso:: :func:`distance_points`
:param float lon1: longitude of first place (decimal degrees)
:param float lat1: latitude of first place (decimal degrees)
:param float lon2: longitude of second place (decimal degrees)
:param float lat2: latitude of second place (decimal degrees)
:param earth_radius: earth_radius (use 6367 for KM 6367000 for meters 3956 for miles
- http://stackoverflow.com/questions/5283900/what-earth-radius-should-i-use-to-calculate-distances-near-the-poles
:Example:
>>> London_long=-0.126 ; London_lat=51.50; Paris_long = 2.350; Paris_lat = 48.856
>>> haversine(London_long, London_lat, Paris_long, Paris_lat)
342.55375272454864
:returns: float distance in Kilometers
"""
# convert decimal degrees to radiant
lon1, lat1, lon2, lat2 = list(map(math.radians, [lon1, lat1, lon2, lat2]))
# haversine formula
dlon = lon2 - lon1
dlat = lat2 - lat1
a = math.sin(dlat/2)**2 + math.cos(lat1) * math.cos(lat2) * math.sin(dlon/2)**2
c = 2 * math.asin(math.sqrt(a))
distance = earth_radius * c # 6371 # Radius of earth in kilometers. Use 3956 for miles
return distance | Calculate the great circle distance between two points on earth in Kilometers
on the earth (specified in decimal degrees)
.. seealso:: :func:`distance_points`
:param float lon1: longitude of first place (decimal degrees)
:param float lat1: latitude of first place (decimal degrees)
:param float lon2: longitude of second place (decimal degrees)
:param float lat2: latitude of second place (decimal degrees)
:param earth_radius: earth_radius (use 6367 for KM 6367000 for meters 3956 for miles
- http://stackoverflow.com/questions/5283900/what-earth-radius-should-i-use-to-calculate-distances-near-the-poles
:Example:
>>> London_long=-0.126 ; London_lat=51.50; Paris_long = 2.350; Paris_lat = 48.856
>>> haversine(London_long, London_lat, Paris_long, Paris_lat)
342.55375272454864
:returns: float distance in Kilometers | Below is the the instruction that describes the task:
### Input:
Calculate the great circle distance between two points on earth in Kilometers
on the earth (specified in decimal degrees)
.. seealso:: :func:`distance_points`
:param float lon1: longitude of first place (decimal degrees)
:param float lat1: latitude of first place (decimal degrees)
:param float lon2: longitude of second place (decimal degrees)
:param float lat2: latitude of second place (decimal degrees)
:param earth_radius: earth_radius (use 6367 for KM 6367000 for meters 3956 for miles
- http://stackoverflow.com/questions/5283900/what-earth-radius-should-i-use-to-calculate-distances-near-the-poles
:Example:
>>> London_long=-0.126 ; London_lat=51.50; Paris_long = 2.350; Paris_lat = 48.856
>>> haversine(London_long, London_lat, Paris_long, Paris_lat)
342.55375272454864
:returns: float distance in Kilometers
### Response:
def haversine(lon1, lat1, lon2, lat2, earth_radius=6357000):
"""Calculate the great circle distance between two points on earth in Kilometers
on the earth (specified in decimal degrees)
.. seealso:: :func:`distance_points`
:param float lon1: longitude of first place (decimal degrees)
:param float lat1: latitude of first place (decimal degrees)
:param float lon2: longitude of second place (decimal degrees)
:param float lat2: latitude of second place (decimal degrees)
:param earth_radius: earth_radius (use 6367 for KM 6367000 for meters 3956 for miles
- http://stackoverflow.com/questions/5283900/what-earth-radius-should-i-use-to-calculate-distances-near-the-poles
:Example:
>>> London_long=-0.126 ; London_lat=51.50; Paris_long = 2.350; Paris_lat = 48.856
>>> haversine(London_long, London_lat, Paris_long, Paris_lat)
342.55375272454864
:returns: float distance in Kilometers
"""
# convert decimal degrees to radiant
lon1, lat1, lon2, lat2 = list(map(math.radians, [lon1, lat1, lon2, lat2]))
# haversine formula
dlon = lon2 - lon1
dlat = lat2 - lat1
a = math.sin(dlat/2)**2 + math.cos(lat1) * math.cos(lat2) * math.sin(dlon/2)**2
c = 2 * math.asin(math.sqrt(a))
distance = earth_radius * c # 6371 # Radius of earth in kilometers. Use 3956 for miles
return distance |
def list_user_threads_view(request, targetUsername):
''' View of threads a user has created. '''
targetUser = get_object_or_404(User, username=targetUsername)
targetProfile = get_object_or_404(UserProfile, user=targetUser)
threads = Thread.objects.filter(owner=targetProfile)
page_name = "{0}'s Threads".format(targetUser.get_full_name())
create_form = ThreadForm(
request.POST if "submit_thread_form" in request.POST else None,
profile=UserProfile.objects.get(user=request.user),
prefix="create",
)
if create_form.is_valid():
thread = create_form.save()
return HttpResponseRedirect(reverse("threads:view_thread", kwargs={"pk": thread.pk}))
elif request.method == "POST":
messages.add_message(request, messages.ERROR, MESSAGES['THREAD_ERROR'])
return render_to_response('list_threads.html', {
'page_name': page_name,
'threads': threads,
"create_form": create_form,
'targetUsername': targetUsername,
}, context_instance=RequestContext(request)) | View of threads a user has created. | Below is the the instruction that describes the task:
### Input:
View of threads a user has created.
### Response:
def list_user_threads_view(request, targetUsername):
''' View of threads a user has created. '''
targetUser = get_object_or_404(User, username=targetUsername)
targetProfile = get_object_or_404(UserProfile, user=targetUser)
threads = Thread.objects.filter(owner=targetProfile)
page_name = "{0}'s Threads".format(targetUser.get_full_name())
create_form = ThreadForm(
request.POST if "submit_thread_form" in request.POST else None,
profile=UserProfile.objects.get(user=request.user),
prefix="create",
)
if create_form.is_valid():
thread = create_form.save()
return HttpResponseRedirect(reverse("threads:view_thread", kwargs={"pk": thread.pk}))
elif request.method == "POST":
messages.add_message(request, messages.ERROR, MESSAGES['THREAD_ERROR'])
return render_to_response('list_threads.html', {
'page_name': page_name,
'threads': threads,
"create_form": create_form,
'targetUsername': targetUsername,
}, context_instance=RequestContext(request)) |
def beam(problem, beam_size=100, iterations_limit=0, viewer=None):
'''
Beam search.
beam_size is the size of the beam.
If iterations_limit is specified, the algorithm will end after that
number of iterations. Else, it will continue until it can't find a
better node than the current one.
Requires: SearchProblem.actions, SearchProblem.result, SearchProblem.value,
and SearchProblem.generate_random_state.
'''
return _local_search(problem,
_all_expander,
iterations_limit=iterations_limit,
fringe_size=beam_size,
random_initial_states=True,
stop_when_no_better=iterations_limit==0,
viewer=viewer) | Beam search.
beam_size is the size of the beam.
If iterations_limit is specified, the algorithm will end after that
number of iterations. Else, it will continue until it can't find a
better node than the current one.
Requires: SearchProblem.actions, SearchProblem.result, SearchProblem.value,
and SearchProblem.generate_random_state. | Below is the the instruction that describes the task:
### Input:
Beam search.
beam_size is the size of the beam.
If iterations_limit is specified, the algorithm will end after that
number of iterations. Else, it will continue until it can't find a
better node than the current one.
Requires: SearchProblem.actions, SearchProblem.result, SearchProblem.value,
and SearchProblem.generate_random_state.
### Response:
def beam(problem, beam_size=100, iterations_limit=0, viewer=None):
'''
Beam search.
beam_size is the size of the beam.
If iterations_limit is specified, the algorithm will end after that
number of iterations. Else, it will continue until it can't find a
better node than the current one.
Requires: SearchProblem.actions, SearchProblem.result, SearchProblem.value,
and SearchProblem.generate_random_state.
'''
return _local_search(problem,
_all_expander,
iterations_limit=iterations_limit,
fringe_size=beam_size,
random_initial_states=True,
stop_when_no_better=iterations_limit==0,
viewer=viewer) |
def get_epochs_given_midtimes_and_period(
t_mid,
period,
err_t_mid=None,
t0_fixed=None,
t0_percentile=None,
verbose=False
):
'''This calculates the future epochs for a transit, given a period and a
starting epoch
The equation used is::
t_mid = period*epoch + t0
Default behavior if no kwargs are used is to define `t0` as the median
finite time of the passed `t_mid` array.
Only one of `err_t_mid` or `t0_fixed` should be passed.
Parameters
----------
t_mid : np.array
A np.array of transit mid-time measurements
period : float
The period used to calculate epochs, per the equation above. For typical
use cases, a period precise to ~1e-5 days is sufficient to get correct
epochs.
err_t_mid : None or np.array
If provided, contains the errors of the transit mid-time
measurements. The zero-point epoch is then set equal to the average of
the transit times, weighted as `1/err_t_mid^2` . This minimizes the
covariance between the transit epoch and the period (e.g., Gibson et
al. 2013). For standard O-C analysis this is the best method.
t0_fixed : None or float:
If provided, use this t0 as the starting epoch. (Overrides all others).
t0_percentile : None or float
If provided, use this percentile of `t_mid` to define `t0`.
Returns
-------
tuple
This is the of the form `(integer_epoch_array, t0)`.
`integer_epoch_array` is an array of integer epochs (float-type),
of length equal to the number of *finite* mid-times passed.
'''
kwargarr = np.array([isinstance(err_t_mid,np.ndarray),
t0_fixed,
t0_percentile])
if not _single_true(kwargarr) and not np.all(~kwargarr.astype(bool)):
raise AssertionError(
'can have at most one of err_t_mid, t0_fixed, t0_percentile')
t_mid = t_mid[np.isfinite(t_mid)]
N_midtimes = len(t_mid)
if t0_fixed:
t0 = t0_fixed
elif isinstance(err_t_mid,np.ndarray):
# get the weighted average. then round it to the nearest transit epoch.
t0_avg = np.average(t_mid, weights=1/err_t_mid**2)
t0_options = np.arange(min(t_mid), max(t_mid)+period, period)
t0 = t0_options[np.argmin(np.abs(t0_options - t0_avg))]
else:
if not t0_percentile:
# if there are an odd number of times, take the median time as
# epoch=0. elif there are an even number of times, take the lower
# of the two middle times as epoch=0.
if N_midtimes % 2 == 1:
t0 = np.median(t_mid)
else:
t0 = t_mid[int(N_midtimes/2)]
else:
t0 = np.sort(t_mid)[int(N_midtimes*t0_percentile/100)]
epoch = (t_mid - t0)/period
# do not convert numpy entries to actual ints, because np.nan is float type
int_epoch = np.round(epoch, 0)
if verbose:
LOGINFO('epochs before rounding')
LOGINFO('\n{:s}'.format(repr(epoch)))
LOGINFO('epochs after rounding')
LOGINFO('\n{:s}'.format(repr(int_epoch)))
return int_epoch, t0 | This calculates the future epochs for a transit, given a period and a
starting epoch
The equation used is::
t_mid = period*epoch + t0
Default behavior if no kwargs are used is to define `t0` as the median
finite time of the passed `t_mid` array.
Only one of `err_t_mid` or `t0_fixed` should be passed.
Parameters
----------
t_mid : np.array
A np.array of transit mid-time measurements
period : float
The period used to calculate epochs, per the equation above. For typical
use cases, a period precise to ~1e-5 days is sufficient to get correct
epochs.
err_t_mid : None or np.array
If provided, contains the errors of the transit mid-time
measurements. The zero-point epoch is then set equal to the average of
the transit times, weighted as `1/err_t_mid^2` . This minimizes the
covariance between the transit epoch and the period (e.g., Gibson et
al. 2013). For standard O-C analysis this is the best method.
t0_fixed : None or float:
If provided, use this t0 as the starting epoch. (Overrides all others).
t0_percentile : None or float
If provided, use this percentile of `t_mid` to define `t0`.
Returns
-------
tuple
This is the of the form `(integer_epoch_array, t0)`.
`integer_epoch_array` is an array of integer epochs (float-type),
of length equal to the number of *finite* mid-times passed. | Below is the the instruction that describes the task:
### Input:
This calculates the future epochs for a transit, given a period and a
starting epoch
The equation used is::
t_mid = period*epoch + t0
Default behavior if no kwargs are used is to define `t0` as the median
finite time of the passed `t_mid` array.
Only one of `err_t_mid` or `t0_fixed` should be passed.
Parameters
----------
t_mid : np.array
A np.array of transit mid-time measurements
period : float
The period used to calculate epochs, per the equation above. For typical
use cases, a period precise to ~1e-5 days is sufficient to get correct
epochs.
err_t_mid : None or np.array
If provided, contains the errors of the transit mid-time
measurements. The zero-point epoch is then set equal to the average of
the transit times, weighted as `1/err_t_mid^2` . This minimizes the
covariance between the transit epoch and the period (e.g., Gibson et
al. 2013). For standard O-C analysis this is the best method.
t0_fixed : None or float:
If provided, use this t0 as the starting epoch. (Overrides all others).
t0_percentile : None or float
If provided, use this percentile of `t_mid` to define `t0`.
Returns
-------
tuple
This is the of the form `(integer_epoch_array, t0)`.
`integer_epoch_array` is an array of integer epochs (float-type),
of length equal to the number of *finite* mid-times passed.
### Response:
def get_epochs_given_midtimes_and_period(
t_mid,
period,
err_t_mid=None,
t0_fixed=None,
t0_percentile=None,
verbose=False
):
'''This calculates the future epochs for a transit, given a period and a
starting epoch
The equation used is::
t_mid = period*epoch + t0
Default behavior if no kwargs are used is to define `t0` as the median
finite time of the passed `t_mid` array.
Only one of `err_t_mid` or `t0_fixed` should be passed.
Parameters
----------
t_mid : np.array
A np.array of transit mid-time measurements
period : float
The period used to calculate epochs, per the equation above. For typical
use cases, a period precise to ~1e-5 days is sufficient to get correct
epochs.
err_t_mid : None or np.array
If provided, contains the errors of the transit mid-time
measurements. The zero-point epoch is then set equal to the average of
the transit times, weighted as `1/err_t_mid^2` . This minimizes the
covariance between the transit epoch and the period (e.g., Gibson et
al. 2013). For standard O-C analysis this is the best method.
t0_fixed : None or float:
If provided, use this t0 as the starting epoch. (Overrides all others).
t0_percentile : None or float
If provided, use this percentile of `t_mid` to define `t0`.
Returns
-------
tuple
This is the of the form `(integer_epoch_array, t0)`.
`integer_epoch_array` is an array of integer epochs (float-type),
of length equal to the number of *finite* mid-times passed.
'''
kwargarr = np.array([isinstance(err_t_mid,np.ndarray),
t0_fixed,
t0_percentile])
if not _single_true(kwargarr) and not np.all(~kwargarr.astype(bool)):
raise AssertionError(
'can have at most one of err_t_mid, t0_fixed, t0_percentile')
t_mid = t_mid[np.isfinite(t_mid)]
N_midtimes = len(t_mid)
if t0_fixed:
t0 = t0_fixed
elif isinstance(err_t_mid,np.ndarray):
# get the weighted average. then round it to the nearest transit epoch.
t0_avg = np.average(t_mid, weights=1/err_t_mid**2)
t0_options = np.arange(min(t_mid), max(t_mid)+period, period)
t0 = t0_options[np.argmin(np.abs(t0_options - t0_avg))]
else:
if not t0_percentile:
# if there are an odd number of times, take the median time as
# epoch=0. elif there are an even number of times, take the lower
# of the two middle times as epoch=0.
if N_midtimes % 2 == 1:
t0 = np.median(t_mid)
else:
t0 = t_mid[int(N_midtimes/2)]
else:
t0 = np.sort(t_mid)[int(N_midtimes*t0_percentile/100)]
epoch = (t_mid - t0)/period
# do not convert numpy entries to actual ints, because np.nan is float type
int_epoch = np.round(epoch, 0)
if verbose:
LOGINFO('epochs before rounding')
LOGINFO('\n{:s}'.format(repr(epoch)))
LOGINFO('epochs after rounding')
LOGINFO('\n{:s}'.format(repr(int_epoch)))
return int_epoch, t0 |
def build_html(path_jinja2, template_name, path_outfile, template_kwargs=None):
'''Helper function for building an html from a latex jinja2 template
:param path_jinja2: the root directory for latex jinja2 templates
:param template_name: the relative path, to path_jinja2, to the desired
jinja2 Latex template
:param path_outfile: the full path to the desired final output file
Must contain the same file extension as files generated by
cmd_wo_infile, otherwise the process will fail
:param template_kwargs: a dictionary of key/values for jinja2 variables
'''
latex_template_object = LatexBuild(
path_jinja2,
template_name,
template_kwargs,
)
return latex_template_object.build_html(path_outfile) | Helper function for building an html from a latex jinja2 template
:param path_jinja2: the root directory for latex jinja2 templates
:param template_name: the relative path, to path_jinja2, to the desired
jinja2 Latex template
:param path_outfile: the full path to the desired final output file
Must contain the same file extension as files generated by
cmd_wo_infile, otherwise the process will fail
:param template_kwargs: a dictionary of key/values for jinja2 variables | Below is the the instruction that describes the task:
### Input:
Helper function for building an html from a latex jinja2 template
:param path_jinja2: the root directory for latex jinja2 templates
:param template_name: the relative path, to path_jinja2, to the desired
jinja2 Latex template
:param path_outfile: the full path to the desired final output file
Must contain the same file extension as files generated by
cmd_wo_infile, otherwise the process will fail
:param template_kwargs: a dictionary of key/values for jinja2 variables
### Response:
def build_html(path_jinja2, template_name, path_outfile, template_kwargs=None):
'''Helper function for building an html from a latex jinja2 template
:param path_jinja2: the root directory for latex jinja2 templates
:param template_name: the relative path, to path_jinja2, to the desired
jinja2 Latex template
:param path_outfile: the full path to the desired final output file
Must contain the same file extension as files generated by
cmd_wo_infile, otherwise the process will fail
:param template_kwargs: a dictionary of key/values for jinja2 variables
'''
latex_template_object = LatexBuild(
path_jinja2,
template_name,
template_kwargs,
)
return latex_template_object.build_html(path_outfile) |
def run(command, raw_output=False):
"""Run a command using subprocess.
:param command: command line to be run
:type command: str
:param raw_output: does not attempt to convert the output as unicode
:type raw_output: bool
:return: error code, output (``stdout``) and error (``stderr``)
:rtype: tuple
"""
p = Popen(command.split(), stdout=PIPE, stderr=PIPE)
(stdout, stderr) = p.communicate()
# On python 3, subprocess.Popen returns bytes objects.
if not raw_output:
return (
p.returncode,
[line.rstrip() for line in stdout.decode("utf-8").splitlines()],
[line.rstrip() for line in stderr.decode("utf-8").splitlines()]
)
else:
return (p.returncode, stdout, stderr) | Run a command using subprocess.
:param command: command line to be run
:type command: str
:param raw_output: does not attempt to convert the output as unicode
:type raw_output: bool
:return: error code, output (``stdout``) and error (``stderr``)
:rtype: tuple | Below is the the instruction that describes the task:
### Input:
Run a command using subprocess.
:param command: command line to be run
:type command: str
:param raw_output: does not attempt to convert the output as unicode
:type raw_output: bool
:return: error code, output (``stdout``) and error (``stderr``)
:rtype: tuple
### Response:
def run(command, raw_output=False):
"""Run a command using subprocess.
:param command: command line to be run
:type command: str
:param raw_output: does not attempt to convert the output as unicode
:type raw_output: bool
:return: error code, output (``stdout``) and error (``stderr``)
:rtype: tuple
"""
p = Popen(command.split(), stdout=PIPE, stderr=PIPE)
(stdout, stderr) = p.communicate()
# On python 3, subprocess.Popen returns bytes objects.
if not raw_output:
return (
p.returncode,
[line.rstrip() for line in stdout.decode("utf-8").splitlines()],
[line.rstrip() for line in stderr.decode("utf-8").splitlines()]
)
else:
return (p.returncode, stdout, stderr) |
def _cont_norm_running_quantile_regions(wl, fluxes, ivars, q, delta_lambda,
ranges, verbose=True):
""" Perform continuum normalization using running quantile, for spectrum
that comes in chunks
"""
print("contnorm.py: continuum norm using running quantile")
print("Taking spectra in %s chunks" % len(ranges))
nstars = fluxes.shape[0]
norm_fluxes = np.zeros(fluxes.shape)
norm_ivars = np.zeros(ivars.shape)
for chunk in ranges:
start = chunk[0]
stop = chunk[1]
output = _cont_norm_running_quantile(
wl[start:stop], fluxes[:,start:stop],
ivars[:,start:stop], q, delta_lambda)
norm_fluxes[:,start:stop] = output[0]
norm_ivars[:,start:stop] = output[1]
return norm_fluxes, norm_ivars | Perform continuum normalization using running quantile, for spectrum
that comes in chunks | Below is the the instruction that describes the task:
### Input:
Perform continuum normalization using running quantile, for spectrum
that comes in chunks
### Response:
def _cont_norm_running_quantile_regions(wl, fluxes, ivars, q, delta_lambda,
ranges, verbose=True):
""" Perform continuum normalization using running quantile, for spectrum
that comes in chunks
"""
print("contnorm.py: continuum norm using running quantile")
print("Taking spectra in %s chunks" % len(ranges))
nstars = fluxes.shape[0]
norm_fluxes = np.zeros(fluxes.shape)
norm_ivars = np.zeros(ivars.shape)
for chunk in ranges:
start = chunk[0]
stop = chunk[1]
output = _cont_norm_running_quantile(
wl[start:stop], fluxes[:,start:stop],
ivars[:,start:stop], q, delta_lambda)
norm_fluxes[:,start:stop] = output[0]
norm_ivars[:,start:stop] = output[1]
return norm_fluxes, norm_ivars |
def convert_nexus_to_format(dataset_as_nexus, dataset_format):
"""
Converts nexus format to Phylip and Fasta using Biopython tools.
:param dataset_as_nexus:
:param dataset_format:
:return:
"""
fake_handle = StringIO(dataset_as_nexus)
nexus_al = AlignIO.parse(fake_handle, 'nexus')
tmp_file = make_random_filename()
AlignIO.write(nexus_al, tmp_file, dataset_format)
dataset_as_fasta = read_and_delete_tmp_file(tmp_file)
return dataset_as_fasta | Converts nexus format to Phylip and Fasta using Biopython tools.
:param dataset_as_nexus:
:param dataset_format:
:return: | Below is the the instruction that describes the task:
### Input:
Converts nexus format to Phylip and Fasta using Biopython tools.
:param dataset_as_nexus:
:param dataset_format:
:return:
### Response:
def convert_nexus_to_format(dataset_as_nexus, dataset_format):
"""
Converts nexus format to Phylip and Fasta using Biopython tools.
:param dataset_as_nexus:
:param dataset_format:
:return:
"""
fake_handle = StringIO(dataset_as_nexus)
nexus_al = AlignIO.parse(fake_handle, 'nexus')
tmp_file = make_random_filename()
AlignIO.write(nexus_al, tmp_file, dataset_format)
dataset_as_fasta = read_and_delete_tmp_file(tmp_file)
return dataset_as_fasta |
def p_statement_draw_attr(p):
""" statement : DRAW attr_list expr COMMA expr
"""
p[0] = make_sentence('DRAW',
make_typecast(TYPE.integer, p[3], p.lineno(4)),
make_typecast(TYPE.integer, p[5], p.lineno(4)), p[2]) | statement : DRAW attr_list expr COMMA expr | Below is the the instruction that describes the task:
### Input:
statement : DRAW attr_list expr COMMA expr
### Response:
def p_statement_draw_attr(p):
""" statement : DRAW attr_list expr COMMA expr
"""
p[0] = make_sentence('DRAW',
make_typecast(TYPE.integer, p[3], p.lineno(4)),
make_typecast(TYPE.integer, p[5], p.lineno(4)), p[2]) |
def content_edge_check(self, url):
"""Retrieve headers and MD5 hash of the content for a particular url from each Fastly edge server."""
prefixes = ["http://", "https://"]
for prefix in prefixes:
if url.startswith(prefix):
url = url[len(prefix):]
break
content = self._fetch("/content/edge_check/%s" % url)
return content | Retrieve headers and MD5 hash of the content for a particular url from each Fastly edge server. | Below is the the instruction that describes the task:
### Input:
Retrieve headers and MD5 hash of the content for a particular url from each Fastly edge server.
### Response:
def content_edge_check(self, url):
"""Retrieve headers and MD5 hash of the content for a particular url from each Fastly edge server."""
prefixes = ["http://", "https://"]
for prefix in prefixes:
if url.startswith(prefix):
url = url[len(prefix):]
break
content = self._fetch("/content/edge_check/%s" % url)
return content |
def _compute_key(self, id, nbytes):
"id is 'A' - 'F' for the various keys used by ssh"
m = Message()
m.add_mpint(self.K)
m.add_bytes(self.H)
m.add_byte(id)
m.add_bytes(self.session_id)
out = sofar = SHA.new(str(m)).digest()
while len(out) < nbytes:
m = Message()
m.add_mpint(self.K)
m.add_bytes(self.H)
m.add_bytes(sofar)
digest = SHA.new(str(m)).digest()
out += digest
sofar += digest
return out[:nbytes] | id is 'A' - 'F' for the various keys used by ssh | Below is the the instruction that describes the task:
### Input:
id is 'A' - 'F' for the various keys used by ssh
### Response:
def _compute_key(self, id, nbytes):
"id is 'A' - 'F' for the various keys used by ssh"
m = Message()
m.add_mpint(self.K)
m.add_bytes(self.H)
m.add_byte(id)
m.add_bytes(self.session_id)
out = sofar = SHA.new(str(m)).digest()
while len(out) < nbytes:
m = Message()
m.add_mpint(self.K)
m.add_bytes(self.H)
m.add_bytes(sofar)
digest = SHA.new(str(m)).digest()
out += digest
sofar += digest
return out[:nbytes] |
def make_datastore_query(self, cursor=None):
"""Returns a datastore.Query that generates all namespaces in the range.
Args:
cursor: start cursor for the query.
Returns:
A datastore.Query instance that generates db.Keys for each namespace in
the NamespaceRange.
"""
filters = {}
filters['__key__ >= '] = _key_for_namespace(
self.namespace_start, self.app)
filters['__key__ <= '] = _key_for_namespace(
self.namespace_end, self.app)
return datastore.Query('__namespace__',
filters=filters,
keys_only=True,
cursor=cursor,
_app=self.app) | Returns a datastore.Query that generates all namespaces in the range.
Args:
cursor: start cursor for the query.
Returns:
A datastore.Query instance that generates db.Keys for each namespace in
the NamespaceRange. | Below is the the instruction that describes the task:
### Input:
Returns a datastore.Query that generates all namespaces in the range.
Args:
cursor: start cursor for the query.
Returns:
A datastore.Query instance that generates db.Keys for each namespace in
the NamespaceRange.
### Response:
def make_datastore_query(self, cursor=None):
"""Returns a datastore.Query that generates all namespaces in the range.
Args:
cursor: start cursor for the query.
Returns:
A datastore.Query instance that generates db.Keys for each namespace in
the NamespaceRange.
"""
filters = {}
filters['__key__ >= '] = _key_for_namespace(
self.namespace_start, self.app)
filters['__key__ <= '] = _key_for_namespace(
self.namespace_end, self.app)
return datastore.Query('__namespace__',
filters=filters,
keys_only=True,
cursor=cursor,
_app=self.app) |
def get_backoff_time(self):
""" Formula for computing the current backoff
:rtype: float
"""
if self._observed_errors <= 1:
return 0
backoff_value = self.backoff_factor * (2 ** (self._observed_errors - 1))
return min(self.BACKOFF_MAX, backoff_value) | Formula for computing the current backoff
:rtype: float | Below is the the instruction that describes the task:
### Input:
Formula for computing the current backoff
:rtype: float
### Response:
def get_backoff_time(self):
""" Formula for computing the current backoff
:rtype: float
"""
if self._observed_errors <= 1:
return 0
backoff_value = self.backoff_factor * (2 ** (self._observed_errors - 1))
return min(self.BACKOFF_MAX, backoff_value) |
def plot(self, numPoints=100):
"""
Specific plotting method for cylinders.
"""
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
# generate cylinder
x = np.linspace(- self.radius, self.radius, numPoints)
z = np.linspace(- self.height / 2., self.height / 2., numPoints)
Xc, Zc = np.meshgrid(x, z)
Yc = np.sqrt(self.radius ** 2 - Xc ** 2)
# plot
ax.plot_surface(Xc, Yc, Zc, alpha=0.2, rstride=20, cstride=10)
ax.plot_surface(Xc, -Yc, Zc, alpha=0.2, rstride=20, cstride=10)
ax.set_xlabel("X")
ax.set_ylabel("Y")
ax.set_zlabel("Z")
plt.title("{}".format(self))
return fig, ax | Specific plotting method for cylinders. | Below is the the instruction that describes the task:
### Input:
Specific plotting method for cylinders.
### Response:
def plot(self, numPoints=100):
"""
Specific plotting method for cylinders.
"""
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
# generate cylinder
x = np.linspace(- self.radius, self.radius, numPoints)
z = np.linspace(- self.height / 2., self.height / 2., numPoints)
Xc, Zc = np.meshgrid(x, z)
Yc = np.sqrt(self.radius ** 2 - Xc ** 2)
# plot
ax.plot_surface(Xc, Yc, Zc, alpha=0.2, rstride=20, cstride=10)
ax.plot_surface(Xc, -Yc, Zc, alpha=0.2, rstride=20, cstride=10)
ax.set_xlabel("X")
ax.set_ylabel("Y")
ax.set_zlabel("Z")
plt.title("{}".format(self))
return fig, ax |
def run(self):
"""
this is the actual execution of the instrument thread: continuously read values from the probes
"""
eta = self.settings['noise_strength']
gamma = 2 * np.pi * self.settings['noise_bandwidth']
dt = 1. / self.settings['update frequency']
control = self.settings['control']
self._state = self._output
while self._stop is False:
A = -gamma * dt
noise = np.sqrt(2*gamma*eta)*np.random.randn()
self._state *= (1. + A)
self._state += noise + control
self._output = self._state
self.msleep(int(1e3 / self.settings['update frequency'])) | this is the actual execution of the instrument thread: continuously read values from the probes | Below is the the instruction that describes the task:
### Input:
this is the actual execution of the instrument thread: continuously read values from the probes
### Response:
def run(self):
"""
this is the actual execution of the instrument thread: continuously read values from the probes
"""
eta = self.settings['noise_strength']
gamma = 2 * np.pi * self.settings['noise_bandwidth']
dt = 1. / self.settings['update frequency']
control = self.settings['control']
self._state = self._output
while self._stop is False:
A = -gamma * dt
noise = np.sqrt(2*gamma*eta)*np.random.randn()
self._state *= (1. + A)
self._state += noise + control
self._output = self._state
self.msleep(int(1e3 / self.settings['update frequency'])) |
def get_wigner(z, freq, sample_freq, histbins=200, show_plot=False):
"""
Calculates an approximation to the wigner quasi-probability distribution
by splitting the z position array into slices of the length of one period
of the motion. This slice is then associated with phase from -180 to 180
degrees. These slices are then histogramed in order to get a distribution
of counts of where the particle is observed at each phase. The 2d array
containing the counts varying with position and phase is then passed through
the inverse radon transformation using the Simultaneous Algebraic
Reconstruction Technique approximation from the scikit-image package.
Parameters
----------
z : ndarray
trace of z motion
freq : float
frequency of motion
sample_freq : float
sample frequency of the z array
histbins : int, optional (default=200)
number of bins to use in histogramming data for each phase
show_plot : bool, optional (default=False)
Whether or not to plot the phase distribution
Returns
-------
iradon_output : ndarray
2d array of size (histbins x histbins)
bin_centres : ndarray
positions of the bin centres
"""
phase, phase_slices = extract_slices(z, freq, sample_freq, show_plot=False)
counts_array, bin_edges = histogram_phase(phase_slices, phase, histbins, show_plot=show_plot)
diff = bin_edges[1] - bin_edges[0]
bin_centres = bin_edges[:-1] + diff
iradon_output = _iradon_sart(counts_array, theta=phase)
#_plt.imshow(iradon_output, extent=[bin_centres[0], bin_centres[-1], bin_centres[0], bin_centres[-1]])
#_plt.show()
return iradon_output, bin_centres | Calculates an approximation to the wigner quasi-probability distribution
by splitting the z position array into slices of the length of one period
of the motion. This slice is then associated with phase from -180 to 180
degrees. These slices are then histogramed in order to get a distribution
of counts of where the particle is observed at each phase. The 2d array
containing the counts varying with position and phase is then passed through
the inverse radon transformation using the Simultaneous Algebraic
Reconstruction Technique approximation from the scikit-image package.
Parameters
----------
z : ndarray
trace of z motion
freq : float
frequency of motion
sample_freq : float
sample frequency of the z array
histbins : int, optional (default=200)
number of bins to use in histogramming data for each phase
show_plot : bool, optional (default=False)
Whether or not to plot the phase distribution
Returns
-------
iradon_output : ndarray
2d array of size (histbins x histbins)
bin_centres : ndarray
positions of the bin centres | Below is the the instruction that describes the task:
### Input:
Calculates an approximation to the wigner quasi-probability distribution
by splitting the z position array into slices of the length of one period
of the motion. This slice is then associated with phase from -180 to 180
degrees. These slices are then histogramed in order to get a distribution
of counts of where the particle is observed at each phase. The 2d array
containing the counts varying with position and phase is then passed through
the inverse radon transformation using the Simultaneous Algebraic
Reconstruction Technique approximation from the scikit-image package.
Parameters
----------
z : ndarray
trace of z motion
freq : float
frequency of motion
sample_freq : float
sample frequency of the z array
histbins : int, optional (default=200)
number of bins to use in histogramming data for each phase
show_plot : bool, optional (default=False)
Whether or not to plot the phase distribution
Returns
-------
iradon_output : ndarray
2d array of size (histbins x histbins)
bin_centres : ndarray
positions of the bin centres
### Response:
def get_wigner(z, freq, sample_freq, histbins=200, show_plot=False):
"""
Calculates an approximation to the wigner quasi-probability distribution
by splitting the z position array into slices of the length of one period
of the motion. This slice is then associated with phase from -180 to 180
degrees. These slices are then histogramed in order to get a distribution
of counts of where the particle is observed at each phase. The 2d array
containing the counts varying with position and phase is then passed through
the inverse radon transformation using the Simultaneous Algebraic
Reconstruction Technique approximation from the scikit-image package.
Parameters
----------
z : ndarray
trace of z motion
freq : float
frequency of motion
sample_freq : float
sample frequency of the z array
histbins : int, optional (default=200)
number of bins to use in histogramming data for each phase
show_plot : bool, optional (default=False)
Whether or not to plot the phase distribution
Returns
-------
iradon_output : ndarray
2d array of size (histbins x histbins)
bin_centres : ndarray
positions of the bin centres
"""
phase, phase_slices = extract_slices(z, freq, sample_freq, show_plot=False)
counts_array, bin_edges = histogram_phase(phase_slices, phase, histbins, show_plot=show_plot)
diff = bin_edges[1] - bin_edges[0]
bin_centres = bin_edges[:-1] + diff
iradon_output = _iradon_sart(counts_array, theta=phase)
#_plt.imshow(iradon_output, extent=[bin_centres[0], bin_centres[-1], bin_centres[0], bin_centres[-1]])
#_plt.show()
return iradon_output, bin_centres |
def set_event(self, ref, tk_event, callback):
"""
Sets a callback for this widget against a ref (reference) for a
tk_event, setting the callback to None will remove it.
"""
# has an EventCallback been created for this tk event
if tk_event not in self._event_callbacks:
self._event_callbacks[tk_event] = EventCallback(self._widget, self._tks, tk_event)
# assign this ref to this event callback
self._refs[ref] = self._event_callbacks[tk_event]
# set up the callback
self._refs[ref].set_callback(ref, callback) | Sets a callback for this widget against a ref (reference) for a
tk_event, setting the callback to None will remove it. | Below is the the instruction that describes the task:
### Input:
Sets a callback for this widget against a ref (reference) for a
tk_event, setting the callback to None will remove it.
### Response:
def set_event(self, ref, tk_event, callback):
"""
Sets a callback for this widget against a ref (reference) for a
tk_event, setting the callback to None will remove it.
"""
# has an EventCallback been created for this tk event
if tk_event not in self._event_callbacks:
self._event_callbacks[tk_event] = EventCallback(self._widget, self._tks, tk_event)
# assign this ref to this event callback
self._refs[ref] = self._event_callbacks[tk_event]
# set up the callback
self._refs[ref].set_callback(ref, callback) |
def exists(self, path):
"""
Does provided path exist on S3?
"""
(bucket, key) = self._path_to_bucket_and_key(path)
# root always exists
if self._is_root(key):
return True
# file
if self._exists(bucket, key):
return True
if self.isdir(path):
return True
logger.debug('Path %s does not exist', path)
return False | Does provided path exist on S3? | Below is the the instruction that describes the task:
### Input:
Does provided path exist on S3?
### Response:
def exists(self, path):
"""
Does provided path exist on S3?
"""
(bucket, key) = self._path_to_bucket_and_key(path)
# root always exists
if self._is_root(key):
return True
# file
if self._exists(bucket, key):
return True
if self.isdir(path):
return True
logger.debug('Path %s does not exist', path)
return False |
def read_root_generation_progress(self):
"""Read the configuration and process of the current root generation attempt.
Supported methods:
GET: /sys/generate-root/attempt. Produces: 200 application/json
:return: The JSON response of the request.
:rtype: dict
"""
api_path = '/v1/sys/generate-root/attempt'
response = self._adapter.get(
url=api_path,
)
return response.json() | Read the configuration and process of the current root generation attempt.
Supported methods:
GET: /sys/generate-root/attempt. Produces: 200 application/json
:return: The JSON response of the request.
:rtype: dict | Below is the the instruction that describes the task:
### Input:
Read the configuration and process of the current root generation attempt.
Supported methods:
GET: /sys/generate-root/attempt. Produces: 200 application/json
:return: The JSON response of the request.
:rtype: dict
### Response:
def read_root_generation_progress(self):
"""Read the configuration and process of the current root generation attempt.
Supported methods:
GET: /sys/generate-root/attempt. Produces: 200 application/json
:return: The JSON response of the request.
:rtype: dict
"""
api_path = '/v1/sys/generate-root/attempt'
response = self._adapter.get(
url=api_path,
)
return response.json() |
def get_limits(self, limit_sum=None):
"""
Gets the current limit data if it is different from the data
indicated by limit_sum. The db argument is used for hydrating
the limit objects. Raises a NoChangeException if the
limit_sum represents no change, otherwise returns a tuple
consisting of the current limit_sum and a list of Limit
objects.
"""
with self.limit_lock:
# Any changes?
if limit_sum and self.limit_sum == limit_sum:
raise NoChangeException()
# Return a tuple of the limits and limit sum
return (self.limit_sum, self.limit_data) | Gets the current limit data if it is different from the data
indicated by limit_sum. The db argument is used for hydrating
the limit objects. Raises a NoChangeException if the
limit_sum represents no change, otherwise returns a tuple
consisting of the current limit_sum and a list of Limit
objects. | Below is the the instruction that describes the task:
### Input:
Gets the current limit data if it is different from the data
indicated by limit_sum. The db argument is used for hydrating
the limit objects. Raises a NoChangeException if the
limit_sum represents no change, otherwise returns a tuple
consisting of the current limit_sum and a list of Limit
objects.
### Response:
def get_limits(self, limit_sum=None):
"""
Gets the current limit data if it is different from the data
indicated by limit_sum. The db argument is used for hydrating
the limit objects. Raises a NoChangeException if the
limit_sum represents no change, otherwise returns a tuple
consisting of the current limit_sum and a list of Limit
objects.
"""
with self.limit_lock:
# Any changes?
if limit_sum and self.limit_sum == limit_sum:
raise NoChangeException()
# Return a tuple of the limits and limit sum
return (self.limit_sum, self.limit_data) |
def ping(self):
"""Attempts to ping the server using current credentials, and responds with the path of the currently
authenticated device"""
return self.handleresult(self.r.get(self.url,
params={"q": "this"})).text | Attempts to ping the server using current credentials, and responds with the path of the currently
authenticated device | Below is the the instruction that describes the task:
### Input:
Attempts to ping the server using current credentials, and responds with the path of the currently
authenticated device
### Response:
def ping(self):
"""Attempts to ping the server using current credentials, and responds with the path of the currently
authenticated device"""
return self.handleresult(self.r.get(self.url,
params={"q": "this"})).text |
def get_config(self, name, default=_MISSING):
"""Get a configuration setting from this DeviceAdapter.
See :meth:`AbstractDeviceAdapter.get_config`.
"""
val = self._config.get(name, default)
if val is _MISSING:
raise ArgumentError("DeviceAdapter config {} did not exist and no default".format(name))
return val | Get a configuration setting from this DeviceAdapter.
See :meth:`AbstractDeviceAdapter.get_config`. | Below is the the instruction that describes the task:
### Input:
Get a configuration setting from this DeviceAdapter.
See :meth:`AbstractDeviceAdapter.get_config`.
### Response:
def get_config(self, name, default=_MISSING):
"""Get a configuration setting from this DeviceAdapter.
See :meth:`AbstractDeviceAdapter.get_config`.
"""
val = self._config.get(name, default)
if val is _MISSING:
raise ArgumentError("DeviceAdapter config {} did not exist and no default".format(name))
return val |
def simulate_w(self,
index: int,
half_turns: float,
axis_half_turns: float):
"""Simulate a single qubit rotation gate about a X + b Y.
The gate simulated is U = exp(-i pi/2 W half_turns)
where W = cos(pi axis_half_turns) X + sin(pi axis_half_turns) Y
Args:
index: The qubit to act on.
half_turns: The amount of the overall rotation, see the formula
above.
axis_half_turns: The angle between the pauli X and Y operators,
see the formula above.
"""
args = self._shard_num_args({
'index': index,
'half_turns': half_turns,
'axis_half_turns': axis_half_turns
})
if index >= self._num_shard_qubits:
# W gate spans shards.
self._pool.map(_clear_scratch, args)
self._pool.map(_w_between_shards, args)
self._pool.map(_copy_scratch_to_state, args)
else:
# W gate is within a shard.
self._pool.map(_w_within_shard, args)
# Normalize after every w.
norm_squared = np.sum(self._pool.map(_norm_squared, args))
args = self._shard_num_args({
'norm_squared': norm_squared
})
self._pool.map(_renorm, args) | Simulate a single qubit rotation gate about a X + b Y.
The gate simulated is U = exp(-i pi/2 W half_turns)
where W = cos(pi axis_half_turns) X + sin(pi axis_half_turns) Y
Args:
index: The qubit to act on.
half_turns: The amount of the overall rotation, see the formula
above.
axis_half_turns: The angle between the pauli X and Y operators,
see the formula above. | Below is the the instruction that describes the task:
### Input:
Simulate a single qubit rotation gate about a X + b Y.
The gate simulated is U = exp(-i pi/2 W half_turns)
where W = cos(pi axis_half_turns) X + sin(pi axis_half_turns) Y
Args:
index: The qubit to act on.
half_turns: The amount of the overall rotation, see the formula
above.
axis_half_turns: The angle between the pauli X and Y operators,
see the formula above.
### Response:
def simulate_w(self,
index: int,
half_turns: float,
axis_half_turns: float):
"""Simulate a single qubit rotation gate about a X + b Y.
The gate simulated is U = exp(-i pi/2 W half_turns)
where W = cos(pi axis_half_turns) X + sin(pi axis_half_turns) Y
Args:
index: The qubit to act on.
half_turns: The amount of the overall rotation, see the formula
above.
axis_half_turns: The angle between the pauli X and Y operators,
see the formula above.
"""
args = self._shard_num_args({
'index': index,
'half_turns': half_turns,
'axis_half_turns': axis_half_turns
})
if index >= self._num_shard_qubits:
# W gate spans shards.
self._pool.map(_clear_scratch, args)
self._pool.map(_w_between_shards, args)
self._pool.map(_copy_scratch_to_state, args)
else:
# W gate is within a shard.
self._pool.map(_w_within_shard, args)
# Normalize after every w.
norm_squared = np.sum(self._pool.map(_norm_squared, args))
args = self._shard_num_args({
'norm_squared': norm_squared
})
self._pool.map(_renorm, args) |
def _scope_vars(scope, trainable_only=False):
"""
Get variables inside a scope
The scope can be specified as a string
Parameters
----------
scope: str or VariableScope
scope in which the variables reside.
trainable_only: bool
whether or not to return only the variables that were marked as
trainable.
Returns
-------
vars: [tf.Variable]
list of variables in `scope`.
"""
return tf.get_collection(
tf.GraphKeys.TRAINABLE_VARIABLES
if trainable_only else tf.GraphKeys.VARIABLES,
scope=scope if isinstance(scope, str) else scope.name) | Get variables inside a scope
The scope can be specified as a string
Parameters
----------
scope: str or VariableScope
scope in which the variables reside.
trainable_only: bool
whether or not to return only the variables that were marked as
trainable.
Returns
-------
vars: [tf.Variable]
list of variables in `scope`. | Below is the the instruction that describes the task:
### Input:
Get variables inside a scope
The scope can be specified as a string
Parameters
----------
scope: str or VariableScope
scope in which the variables reside.
trainable_only: bool
whether or not to return only the variables that were marked as
trainable.
Returns
-------
vars: [tf.Variable]
list of variables in `scope`.
### Response:
def _scope_vars(scope, trainable_only=False):
"""
Get variables inside a scope
The scope can be specified as a string
Parameters
----------
scope: str or VariableScope
scope in which the variables reside.
trainable_only: bool
whether or not to return only the variables that were marked as
trainable.
Returns
-------
vars: [tf.Variable]
list of variables in `scope`.
"""
return tf.get_collection(
tf.GraphKeys.TRAINABLE_VARIABLES
if trainable_only else tf.GraphKeys.VARIABLES,
scope=scope if isinstance(scope, str) else scope.name) |
def getFileHandle(self, dataFile, openMethod):
"""
Returns handle associated to the filename. If the file is
already opened, update its priority in the cache and return
its handle. Otherwise, open the file using openMethod, store
it in the cache and return the corresponding handle.
"""
if dataFile in self._memoTable:
handle = self._memoTable[dataFile]
self._update(dataFile, handle)
return handle
else:
try:
handle = openMethod(dataFile)
except ValueError:
raise exceptions.FileOpenFailedException(dataFile)
self._memoTable[dataFile] = handle
self._add(dataFile, handle)
if len(self._memoTable) > self._maxCacheSize:
dataFile = self._removeLru()
del self._memoTable[dataFile]
return handle | Returns handle associated to the filename. If the file is
already opened, update its priority in the cache and return
its handle. Otherwise, open the file using openMethod, store
it in the cache and return the corresponding handle. | Below is the the instruction that describes the task:
### Input:
Returns handle associated to the filename. If the file is
already opened, update its priority in the cache and return
its handle. Otherwise, open the file using openMethod, store
it in the cache and return the corresponding handle.
### Response:
def getFileHandle(self, dataFile, openMethod):
"""
Returns handle associated to the filename. If the file is
already opened, update its priority in the cache and return
its handle. Otherwise, open the file using openMethod, store
it in the cache and return the corresponding handle.
"""
if dataFile in self._memoTable:
handle = self._memoTable[dataFile]
self._update(dataFile, handle)
return handle
else:
try:
handle = openMethod(dataFile)
except ValueError:
raise exceptions.FileOpenFailedException(dataFile)
self._memoTable[dataFile] = handle
self._add(dataFile, handle)
if len(self._memoTable) > self._maxCacheSize:
dataFile = self._removeLru()
del self._memoTable[dataFile]
return handle |
def uniq(args):
"""
%prog uniq fastqfile
Retain only first instance of duplicate reads. Duplicate is defined as
having the same read name.
"""
p = OptionParser(uniq.__doc__)
p.set_outfile()
opts, args = p.parse_args(args)
if len(args) != 1:
sys.exit(not p.print_help())
fastqfile, = args
fw = must_open(opts.outfile, "w")
nduplicates = nreads = 0
seen = set()
for rec in iter_fastq(fastqfile):
nreads += 1
if rec is None:
break
name = rec.name
if name in seen:
nduplicates += 1
continue
seen.add(name)
print(rec, file=fw)
logging.debug("Removed duplicate reads: {}".\
format(percentage(nduplicates, nreads))) | %prog uniq fastqfile
Retain only first instance of duplicate reads. Duplicate is defined as
having the same read name. | Below is the the instruction that describes the task:
### Input:
%prog uniq fastqfile
Retain only first instance of duplicate reads. Duplicate is defined as
having the same read name.
### Response:
def uniq(args):
"""
%prog uniq fastqfile
Retain only first instance of duplicate reads. Duplicate is defined as
having the same read name.
"""
p = OptionParser(uniq.__doc__)
p.set_outfile()
opts, args = p.parse_args(args)
if len(args) != 1:
sys.exit(not p.print_help())
fastqfile, = args
fw = must_open(opts.outfile, "w")
nduplicates = nreads = 0
seen = set()
for rec in iter_fastq(fastqfile):
nreads += 1
if rec is None:
break
name = rec.name
if name in seen:
nduplicates += 1
continue
seen.add(name)
print(rec, file=fw)
logging.debug("Removed duplicate reads: {}".\
format(percentage(nduplicates, nreads))) |
def expr2dimacscnf(ex):
"""Convert an expression into an equivalent DIMACS CNF."""
litmap, nvars, clauses = ex.encode_cnf()
return litmap, DimacsCNF(nvars, clauses) | Convert an expression into an equivalent DIMACS CNF. | Below is the the instruction that describes the task:
### Input:
Convert an expression into an equivalent DIMACS CNF.
### Response:
def expr2dimacscnf(ex):
"""Convert an expression into an equivalent DIMACS CNF."""
litmap, nvars, clauses = ex.encode_cnf()
return litmap, DimacsCNF(nvars, clauses) |
def kl_setup(num_eig,sr,struct,prefixes,
factors_file="kl_factors.dat",islog=True, basis_file=None,
tpl_dir="."):
"""setup a karhuenen-Loeve based parameterization for a given
geostatistical structure.
Parameters
----------
num_eig : int
number of basis vectors to retain in the reduced basis
sr : flopy.reference.SpatialReference
struct : str or pyemu.geostats.Geostruct
geostatistical structure (or file containing one)
array_dict : dict
a dict of arrays to setup as KL-based parameters. The key becomes the
parameter name prefix. The total number of parameters is
len(array_dict) * num_eig
basis_file : str
the name of the PEST-format binary file where the reduced basis will be saved
tpl_file : str
the name of the template file to make. The template
file is a csv file with the parameter names, the
original factor values,and the template entries.
The original values can be used to set the parval1
entries in the control file
Returns
-------
back_array_dict : dict
a dictionary of back transformed arrays. This is useful to see
how much "smoothing" is taking place compared to the original
arrays.
Note
----
requires flopy
Example
-------
``>>>import flopy``
``>>>import pyemu``
``>>>m = flopy.modflow.Modflow.load("mymodel.nam")``
``>>>a_dict = {"hk":m.lpf.hk[0].array}``
``>>>ba_dict = pyemu.helpers.kl_setup(10,m.sr,"struct.dat",a_dict)``
"""
try:
import flopy
except Exception as e:
raise Exception("error import flopy: {0}".format(str(e)))
assert isinstance(sr,flopy.utils.SpatialReference)
# for name,array in array_dict.items():
# assert isinstance(array,np.ndarray)
# assert array.shape[0] == sr.nrow
# assert array.shape[1] == sr.ncol
# assert len(name) + len(str(num_eig)) <= 12,"name too long:{0}".\
# format(name)
if isinstance(struct,str):
assert os.path.exists(struct)
gs = pyemu.utils.read_struct_file(struct)
else:
gs = struct
names = []
for i in range(sr.nrow):
names.extend(["i{0:04d}j{1:04d}".format(i,j) for j in range(sr.ncol)])
cov = gs.covariance_matrix(sr.xcentergrid.flatten(),
sr.ycentergrid.flatten(),
names=names)
eig_names = ["eig_{0:04d}".format(i) for i in range(cov.shape[0])]
trunc_basis = cov.u
trunc_basis.col_names = eig_names
#trunc_basis.col_names = [""]
if basis_file is not None:
trunc_basis.to_binary(basis_file)
trunc_basis = trunc_basis[:,:num_eig]
eig_names = eig_names[:num_eig]
pp_df = pd.DataFrame({"name":eig_names},index=eig_names)
pp_df.loc[:,"x"] = -1.0 * sr.ncol
pp_df.loc[:,"y"] = -1.0 * sr.nrow
pp_df.loc[:,"zone"] = -999
pp_df.loc[:,"parval1"] = 1.0
pyemu.pp_utils.write_pp_file(os.path.join("temp.dat"),pp_df)
eigen_basis_to_factor_file(sr.nrow,sr.ncol,trunc_basis,factors_file=factors_file,islog=islog)
dfs = []
for prefix in prefixes:
tpl_file = os.path.join(tpl_dir,"{0}.dat_kl.tpl".format(prefix))
df = pyemu.pp_utils.pilot_points_to_tpl("temp.dat",tpl_file,prefix)
shutil.copy2("temp.dat",tpl_file.replace(".tpl",""))
df.loc[:,"tpl_file"] = tpl_file
df.loc[:,"in_file"] = tpl_file.replace(".tpl","")
df.loc[:,"prefix"] = prefix
df.loc[:,"pargp"] = "kl_{0}".format(prefix)
dfs.append(df)
#arr = pyemu.geostats.fac2real(df,factors_file=factors_file,out_file=None)
df = pd.concat(dfs)
df.loc[:,"parubnd"] = 10.0
df.loc[:,"parlbnd"] = 0.1
return pd.concat(dfs) | setup a karhuenen-Loeve based parameterization for a given
geostatistical structure.
Parameters
----------
num_eig : int
number of basis vectors to retain in the reduced basis
sr : flopy.reference.SpatialReference
struct : str or pyemu.geostats.Geostruct
geostatistical structure (or file containing one)
array_dict : dict
a dict of arrays to setup as KL-based parameters. The key becomes the
parameter name prefix. The total number of parameters is
len(array_dict) * num_eig
basis_file : str
the name of the PEST-format binary file where the reduced basis will be saved
tpl_file : str
the name of the template file to make. The template
file is a csv file with the parameter names, the
original factor values,and the template entries.
The original values can be used to set the parval1
entries in the control file
Returns
-------
back_array_dict : dict
a dictionary of back transformed arrays. This is useful to see
how much "smoothing" is taking place compared to the original
arrays.
Note
----
requires flopy
Example
-------
``>>>import flopy``
``>>>import pyemu``
``>>>m = flopy.modflow.Modflow.load("mymodel.nam")``
``>>>a_dict = {"hk":m.lpf.hk[0].array}``
``>>>ba_dict = pyemu.helpers.kl_setup(10,m.sr,"struct.dat",a_dict)`` | Below is the the instruction that describes the task:
### Input:
setup a karhuenen-Loeve based parameterization for a given
geostatistical structure.
Parameters
----------
num_eig : int
number of basis vectors to retain in the reduced basis
sr : flopy.reference.SpatialReference
struct : str or pyemu.geostats.Geostruct
geostatistical structure (or file containing one)
array_dict : dict
a dict of arrays to setup as KL-based parameters. The key becomes the
parameter name prefix. The total number of parameters is
len(array_dict) * num_eig
basis_file : str
the name of the PEST-format binary file where the reduced basis will be saved
tpl_file : str
the name of the template file to make. The template
file is a csv file with the parameter names, the
original factor values,and the template entries.
The original values can be used to set the parval1
entries in the control file
Returns
-------
back_array_dict : dict
a dictionary of back transformed arrays. This is useful to see
how much "smoothing" is taking place compared to the original
arrays.
Note
----
requires flopy
Example
-------
``>>>import flopy``
``>>>import pyemu``
``>>>m = flopy.modflow.Modflow.load("mymodel.nam")``
``>>>a_dict = {"hk":m.lpf.hk[0].array}``
``>>>ba_dict = pyemu.helpers.kl_setup(10,m.sr,"struct.dat",a_dict)``
### Response:
def kl_setup(num_eig,sr,struct,prefixes,
factors_file="kl_factors.dat",islog=True, basis_file=None,
tpl_dir="."):
"""setup a karhuenen-Loeve based parameterization for a given
geostatistical structure.
Parameters
----------
num_eig : int
number of basis vectors to retain in the reduced basis
sr : flopy.reference.SpatialReference
struct : str or pyemu.geostats.Geostruct
geostatistical structure (or file containing one)
array_dict : dict
a dict of arrays to setup as KL-based parameters. The key becomes the
parameter name prefix. The total number of parameters is
len(array_dict) * num_eig
basis_file : str
the name of the PEST-format binary file where the reduced basis will be saved
tpl_file : str
the name of the template file to make. The template
file is a csv file with the parameter names, the
original factor values,and the template entries.
The original values can be used to set the parval1
entries in the control file
Returns
-------
back_array_dict : dict
a dictionary of back transformed arrays. This is useful to see
how much "smoothing" is taking place compared to the original
arrays.
Note
----
requires flopy
Example
-------
``>>>import flopy``
``>>>import pyemu``
``>>>m = flopy.modflow.Modflow.load("mymodel.nam")``
``>>>a_dict = {"hk":m.lpf.hk[0].array}``
``>>>ba_dict = pyemu.helpers.kl_setup(10,m.sr,"struct.dat",a_dict)``
"""
try:
import flopy
except Exception as e:
raise Exception("error import flopy: {0}".format(str(e)))
assert isinstance(sr,flopy.utils.SpatialReference)
# for name,array in array_dict.items():
# assert isinstance(array,np.ndarray)
# assert array.shape[0] == sr.nrow
# assert array.shape[1] == sr.ncol
# assert len(name) + len(str(num_eig)) <= 12,"name too long:{0}".\
# format(name)
if isinstance(struct,str):
assert os.path.exists(struct)
gs = pyemu.utils.read_struct_file(struct)
else:
gs = struct
names = []
for i in range(sr.nrow):
names.extend(["i{0:04d}j{1:04d}".format(i,j) for j in range(sr.ncol)])
cov = gs.covariance_matrix(sr.xcentergrid.flatten(),
sr.ycentergrid.flatten(),
names=names)
eig_names = ["eig_{0:04d}".format(i) for i in range(cov.shape[0])]
trunc_basis = cov.u
trunc_basis.col_names = eig_names
#trunc_basis.col_names = [""]
if basis_file is not None:
trunc_basis.to_binary(basis_file)
trunc_basis = trunc_basis[:,:num_eig]
eig_names = eig_names[:num_eig]
pp_df = pd.DataFrame({"name":eig_names},index=eig_names)
pp_df.loc[:,"x"] = -1.0 * sr.ncol
pp_df.loc[:,"y"] = -1.0 * sr.nrow
pp_df.loc[:,"zone"] = -999
pp_df.loc[:,"parval1"] = 1.0
pyemu.pp_utils.write_pp_file(os.path.join("temp.dat"),pp_df)
eigen_basis_to_factor_file(sr.nrow,sr.ncol,trunc_basis,factors_file=factors_file,islog=islog)
dfs = []
for prefix in prefixes:
tpl_file = os.path.join(tpl_dir,"{0}.dat_kl.tpl".format(prefix))
df = pyemu.pp_utils.pilot_points_to_tpl("temp.dat",tpl_file,prefix)
shutil.copy2("temp.dat",tpl_file.replace(".tpl",""))
df.loc[:,"tpl_file"] = tpl_file
df.loc[:,"in_file"] = tpl_file.replace(".tpl","")
df.loc[:,"prefix"] = prefix
df.loc[:,"pargp"] = "kl_{0}".format(prefix)
dfs.append(df)
#arr = pyemu.geostats.fac2real(df,factors_file=factors_file,out_file=None)
df = pd.concat(dfs)
df.loc[:,"parubnd"] = 10.0
df.loc[:,"parlbnd"] = 0.1
return pd.concat(dfs) |
def get_review_sh(self, revision, item):
""" Add sorting hat enrichment fields for the author of the revision """
identity = self.get_sh_identity(revision)
update = parser.parse(item[self.get_field_date()])
erevision = self.get_item_sh_fields(identity, update)
return erevision | Add sorting hat enrichment fields for the author of the revision | Below is the the instruction that describes the task:
### Input:
Add sorting hat enrichment fields for the author of the revision
### Response:
def get_review_sh(self, revision, item):
""" Add sorting hat enrichment fields for the author of the revision """
identity = self.get_sh_identity(revision)
update = parser.parse(item[self.get_field_date()])
erevision = self.get_item_sh_fields(identity, update)
return erevision |
def adam_minimax(grad_both, init_params_max, init_params_min, callback=None, num_iters=100,
step_size_max=0.001, step_size_min=0.001, b1=0.9, b2=0.999, eps=10**-8):
"""Adam modified to do minimiax optimization, for instance to help with
training generative adversarial networks."""
x_max, unflatten_max = flatten(init_params_max)
x_min, unflatten_min = flatten(init_params_min)
m_max = np.zeros(len(x_max))
v_max = np.zeros(len(x_max))
m_min = np.zeros(len(x_min))
v_min = np.zeros(len(x_min))
for i in range(num_iters):
g_max_uf, g_min_uf = grad_both(unflatten_max(x_max),
unflatten_min(x_min), i)
g_max, _ = flatten(g_max_uf)
g_min, _ = flatten(g_min_uf)
if callback: callback(unflatten_max(x_max), unflatten_min(x_min), i,
unflatten_max(g_max), unflatten_min(g_min))
m_max = (1 - b1) * g_max + b1 * m_max # First moment estimate.
v_max = (1 - b2) * (g_max**2) + b2 * v_max # Second moment estimate.
mhat_max = m_max / (1 - b1**(i + 1)) # Bias correction.
vhat_max = v_max / (1 - b2**(i + 1))
x_max = x_max + step_size_max * mhat_max / (np.sqrt(vhat_max) + eps)
m_min = (1 - b1) * g_min + b1 * m_min # First moment estimate.
v_min = (1 - b2) * (g_min**2) + b2 * v_min # Second moment estimate.
mhat_min = m_min / (1 - b1**(i + 1)) # Bias correction.
vhat_min = v_min / (1 - b2**(i + 1))
x_min = x_min - step_size_min * mhat_min / (np.sqrt(vhat_min) + eps)
return unflatten_max(x_max), unflatten_min(x_min) | Adam modified to do minimiax optimization, for instance to help with
training generative adversarial networks. | Below is the the instruction that describes the task:
### Input:
Adam modified to do minimiax optimization, for instance to help with
training generative adversarial networks.
### Response:
def adam_minimax(grad_both, init_params_max, init_params_min, callback=None, num_iters=100,
step_size_max=0.001, step_size_min=0.001, b1=0.9, b2=0.999, eps=10**-8):
"""Adam modified to do minimiax optimization, for instance to help with
training generative adversarial networks."""
x_max, unflatten_max = flatten(init_params_max)
x_min, unflatten_min = flatten(init_params_min)
m_max = np.zeros(len(x_max))
v_max = np.zeros(len(x_max))
m_min = np.zeros(len(x_min))
v_min = np.zeros(len(x_min))
for i in range(num_iters):
g_max_uf, g_min_uf = grad_both(unflatten_max(x_max),
unflatten_min(x_min), i)
g_max, _ = flatten(g_max_uf)
g_min, _ = flatten(g_min_uf)
if callback: callback(unflatten_max(x_max), unflatten_min(x_min), i,
unflatten_max(g_max), unflatten_min(g_min))
m_max = (1 - b1) * g_max + b1 * m_max # First moment estimate.
v_max = (1 - b2) * (g_max**2) + b2 * v_max # Second moment estimate.
mhat_max = m_max / (1 - b1**(i + 1)) # Bias correction.
vhat_max = v_max / (1 - b2**(i + 1))
x_max = x_max + step_size_max * mhat_max / (np.sqrt(vhat_max) + eps)
m_min = (1 - b1) * g_min + b1 * m_min # First moment estimate.
v_min = (1 - b2) * (g_min**2) + b2 * v_min # Second moment estimate.
mhat_min = m_min / (1 - b1**(i + 1)) # Bias correction.
vhat_min = v_min / (1 - b2**(i + 1))
x_min = x_min - step_size_min * mhat_min / (np.sqrt(vhat_min) + eps)
return unflatten_max(x_max), unflatten_min(x_min) |
def _get_supported_py_config(tops, extended_cfg):
'''
Based on the Salt SSH configuration, create a YAML configuration
for the supported Python interpreter versions. This is then written into the thin.tgz
archive and then verified by salt.client.ssh.ssh_py_shim.get_executable()
Note: Minimum default of 2.x versions is 2.7 and 3.x is 3.0, unless specified in namespaces.
:return:
'''
pymap = []
for py_ver, tops in _six.iteritems(copy.deepcopy(tops)):
py_ver = int(py_ver)
if py_ver == 2:
pymap.append('py2:2:7')
elif py_ver == 3:
pymap.append('py3:3:0')
for ns, cfg in _six.iteritems(copy.deepcopy(extended_cfg) or {}):
pymap.append('{}:{}:{}'.format(ns, *cfg.get('py-version')))
pymap.append('')
return salt.utils.stringutils.to_bytes(os.linesep.join(pymap)) | Based on the Salt SSH configuration, create a YAML configuration
for the supported Python interpreter versions. This is then written into the thin.tgz
archive and then verified by salt.client.ssh.ssh_py_shim.get_executable()
Note: Minimum default of 2.x versions is 2.7 and 3.x is 3.0, unless specified in namespaces.
:return: | Below is the the instruction that describes the task:
### Input:
Based on the Salt SSH configuration, create a YAML configuration
for the supported Python interpreter versions. This is then written into the thin.tgz
archive and then verified by salt.client.ssh.ssh_py_shim.get_executable()
Note: Minimum default of 2.x versions is 2.7 and 3.x is 3.0, unless specified in namespaces.
:return:
### Response:
def _get_supported_py_config(tops, extended_cfg):
'''
Based on the Salt SSH configuration, create a YAML configuration
for the supported Python interpreter versions. This is then written into the thin.tgz
archive and then verified by salt.client.ssh.ssh_py_shim.get_executable()
Note: Minimum default of 2.x versions is 2.7 and 3.x is 3.0, unless specified in namespaces.
:return:
'''
pymap = []
for py_ver, tops in _six.iteritems(copy.deepcopy(tops)):
py_ver = int(py_ver)
if py_ver == 2:
pymap.append('py2:2:7')
elif py_ver == 3:
pymap.append('py3:3:0')
for ns, cfg in _six.iteritems(copy.deepcopy(extended_cfg) or {}):
pymap.append('{}:{}:{}'.format(ns, *cfg.get('py-version')))
pymap.append('')
return salt.utils.stringutils.to_bytes(os.linesep.join(pymap)) |
def _run_bunny(args):
"""Run CWL with rabix bunny.
"""
main_file, json_file, project_name = _get_main_and_json(args.directory)
work_dir = utils.safe_makedir(os.path.join(os.getcwd(), "bunny_work"))
flags = ["-b", work_dir]
log_file = os.path.join(work_dir, "%s-bunny.log" % project_name)
if os.path.exists(work_dir):
caches = [os.path.join(work_dir, d) for d in os.listdir(work_dir)
if os.path.isdir(os.path.join(work_dir, d))]
if caches:
flags += ["--cache-dir", max(caches, key=os.path.getmtime)]
if args.no_container:
_remove_bcbiovm_path()
flags += ["--no-container"]
cmd = ["rabix"] + flags + [main_file, json_file]
with utils.chdir(work_dir):
_run_tool(cmd, not args.no_container, work_dir, log_file) | Run CWL with rabix bunny. | Below is the the instruction that describes the task:
### Input:
Run CWL with rabix bunny.
### Response:
def _run_bunny(args):
"""Run CWL with rabix bunny.
"""
main_file, json_file, project_name = _get_main_and_json(args.directory)
work_dir = utils.safe_makedir(os.path.join(os.getcwd(), "bunny_work"))
flags = ["-b", work_dir]
log_file = os.path.join(work_dir, "%s-bunny.log" % project_name)
if os.path.exists(work_dir):
caches = [os.path.join(work_dir, d) for d in os.listdir(work_dir)
if os.path.isdir(os.path.join(work_dir, d))]
if caches:
flags += ["--cache-dir", max(caches, key=os.path.getmtime)]
if args.no_container:
_remove_bcbiovm_path()
flags += ["--no-container"]
cmd = ["rabix"] + flags + [main_file, json_file]
with utils.chdir(work_dir):
_run_tool(cmd, not args.no_container, work_dir, log_file) |
def cidr_netmask(cidr):
'''
Get the netmask address associated with a CIDR address.
CLI example::
salt myminion netaddress.cidr_netmask 192.168.0.0/20
'''
ips = netaddr.IPNetwork(cidr)
return six.text_type(ips.netmask) | Get the netmask address associated with a CIDR address.
CLI example::
salt myminion netaddress.cidr_netmask 192.168.0.0/20 | Below is the the instruction that describes the task:
### Input:
Get the netmask address associated with a CIDR address.
CLI example::
salt myminion netaddress.cidr_netmask 192.168.0.0/20
### Response:
def cidr_netmask(cidr):
'''
Get the netmask address associated with a CIDR address.
CLI example::
salt myminion netaddress.cidr_netmask 192.168.0.0/20
'''
ips = netaddr.IPNetwork(cidr)
return six.text_type(ips.netmask) |
def clustdealer(pairdealer, optim):
""" return optim clusters given iterators, and whether it got all or not"""
ccnt = 0
chunk = []
while ccnt < optim:
## try refreshing taker, else quit
try:
taker = itertools.takewhile(lambda x: x[0] != "//\n", pairdealer)
oneclust = ["".join(taker.next())]
except StopIteration:
#LOGGER.debug('last chunk %s', chunk)
return 1, chunk
## load one cluster
while 1:
try:
oneclust.append("".join(taker.next()))
except StopIteration:
break
chunk.append("".join(oneclust))
ccnt += 1
return 0, chunk | return optim clusters given iterators, and whether it got all or not | Below is the the instruction that describes the task:
### Input:
return optim clusters given iterators, and whether it got all or not
### Response:
def clustdealer(pairdealer, optim):
""" return optim clusters given iterators, and whether it got all or not"""
ccnt = 0
chunk = []
while ccnt < optim:
## try refreshing taker, else quit
try:
taker = itertools.takewhile(lambda x: x[0] != "//\n", pairdealer)
oneclust = ["".join(taker.next())]
except StopIteration:
#LOGGER.debug('last chunk %s', chunk)
return 1, chunk
## load one cluster
while 1:
try:
oneclust.append("".join(taker.next()))
except StopIteration:
break
chunk.append("".join(oneclust))
ccnt += 1
return 0, chunk |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.