code
stringlengths 75
104k
| docstring
stringlengths 1
46.9k
| text
stringlengths 164
112k
|
---|---|---|
def from_epsg_code(code):
"""
Load crs object from epsg code, via spatialreference.org.
Parses based on the proj4 representation.
Arguments:
- *code*: The EPSG code as an integer.
Returns:
- A CS instance of the indicated type.
"""
# must go online (or look up local table) to get crs details
code = str(code)
proj4 = utils.crscode_to_string("epsg", code, "proj4")
crs = from_proj4(proj4)
return crs | Load crs object from epsg code, via spatialreference.org.
Parses based on the proj4 representation.
Arguments:
- *code*: The EPSG code as an integer.
Returns:
- A CS instance of the indicated type. | Below is the the instruction that describes the task:
### Input:
Load crs object from epsg code, via spatialreference.org.
Parses based on the proj4 representation.
Arguments:
- *code*: The EPSG code as an integer.
Returns:
- A CS instance of the indicated type.
### Response:
def from_epsg_code(code):
"""
Load crs object from epsg code, via spatialreference.org.
Parses based on the proj4 representation.
Arguments:
- *code*: The EPSG code as an integer.
Returns:
- A CS instance of the indicated type.
"""
# must go online (or look up local table) to get crs details
code = str(code)
proj4 = utils.crscode_to_string("epsg", code, "proj4")
crs = from_proj4(proj4)
return crs |
def get_name(self):
"""
Tries to get WF name from 'process' or 'collobration' or 'pariticipant'
Returns:
str. WF name.
"""
paths = ['bpmn:process',
'bpmn:collaboration/bpmn:participant/',
'bpmn:collaboration',
]
for path in paths:
tag = self.root.find(path, NS)
if tag is not None and len(tag):
name = tag.get('name')
if name:
return name | Tries to get WF name from 'process' or 'collobration' or 'pariticipant'
Returns:
str. WF name. | Below is the the instruction that describes the task:
### Input:
Tries to get WF name from 'process' or 'collobration' or 'pariticipant'
Returns:
str. WF name.
### Response:
def get_name(self):
"""
Tries to get WF name from 'process' or 'collobration' or 'pariticipant'
Returns:
str. WF name.
"""
paths = ['bpmn:process',
'bpmn:collaboration/bpmn:participant/',
'bpmn:collaboration',
]
for path in paths:
tag = self.root.find(path, NS)
if tag is not None and len(tag):
name = tag.get('name')
if name:
return name |
def send(mail, server='localhost'):
"""
Sends the given mail.
:type mail: Mail
:param mail: The mail object.
:type server: string
:param server: The address of the mailserver.
"""
sender = mail.get_sender()
rcpt = mail.get_receipients()
session = smtplib.SMTP(server)
message = MIMEMultipart()
message['Subject'] = mail.get_subject()
message['From'] = mail.get_sender()
message['To'] = ', '.join(mail.get_to())
message['Cc'] = ', '.join(mail.get_cc())
message.preamble = 'Your mail client is not MIME aware.'
body = MIMEText(mail.get_body().encode("utf-8"), "plain", "utf-8")
body.add_header('Content-Disposition', 'inline')
message.attach(body)
for filename in mail.get_attachments():
message.attach(_get_mime_object(filename))
session.sendmail(sender, rcpt, message.as_string()) | Sends the given mail.
:type mail: Mail
:param mail: The mail object.
:type server: string
:param server: The address of the mailserver. | Below is the the instruction that describes the task:
### Input:
Sends the given mail.
:type mail: Mail
:param mail: The mail object.
:type server: string
:param server: The address of the mailserver.
### Response:
def send(mail, server='localhost'):
"""
Sends the given mail.
:type mail: Mail
:param mail: The mail object.
:type server: string
:param server: The address of the mailserver.
"""
sender = mail.get_sender()
rcpt = mail.get_receipients()
session = smtplib.SMTP(server)
message = MIMEMultipart()
message['Subject'] = mail.get_subject()
message['From'] = mail.get_sender()
message['To'] = ', '.join(mail.get_to())
message['Cc'] = ', '.join(mail.get_cc())
message.preamble = 'Your mail client is not MIME aware.'
body = MIMEText(mail.get_body().encode("utf-8"), "plain", "utf-8")
body.add_header('Content-Disposition', 'inline')
message.attach(body)
for filename in mail.get_attachments():
message.attach(_get_mime_object(filename))
session.sendmail(sender, rcpt, message.as_string()) |
def _play(self):
"""Send play command to receiver command via HTTP post."""
# Use pause command only for sources which support NETAUDIO
if self._input_func in self._netaudio_func_list:
body = {"cmd0": "PutNetAudioCommand/CurEnter",
"cmd1": "aspMainZone_WebUpdateStatus/",
"ZoneName": "MAIN ZONE"}
try:
if self.send_post_command(
self._urls.command_netaudio_post, body):
self._state = STATE_PLAYING
return True
else:
return False
except requests.exceptions.RequestException:
_LOGGER.error("Connection error: play command not sent.")
return False | Send play command to receiver command via HTTP post. | Below is the the instruction that describes the task:
### Input:
Send play command to receiver command via HTTP post.
### Response:
def _play(self):
"""Send play command to receiver command via HTTP post."""
# Use pause command only for sources which support NETAUDIO
if self._input_func in self._netaudio_func_list:
body = {"cmd0": "PutNetAudioCommand/CurEnter",
"cmd1": "aspMainZone_WebUpdateStatus/",
"ZoneName": "MAIN ZONE"}
try:
if self.send_post_command(
self._urls.command_netaudio_post, body):
self._state = STATE_PLAYING
return True
else:
return False
except requests.exceptions.RequestException:
_LOGGER.error("Connection error: play command not sent.")
return False |
def get_block_containing_tx(self, txid):
"""Retrieve the list of blocks (block ids) containing a
transaction with transaction id `txid`
Args:
txid (str): transaction id of the transaction to query
Returns:
Block id list (list(int))
"""
blocks = list(backend.query.get_block_with_transaction(self.connection, txid))
if len(blocks) > 1:
logger.critical('Transaction id %s exists in multiple blocks', txid)
return [block['height'] for block in blocks] | Retrieve the list of blocks (block ids) containing a
transaction with transaction id `txid`
Args:
txid (str): transaction id of the transaction to query
Returns:
Block id list (list(int)) | Below is the the instruction that describes the task:
### Input:
Retrieve the list of blocks (block ids) containing a
transaction with transaction id `txid`
Args:
txid (str): transaction id of the transaction to query
Returns:
Block id list (list(int))
### Response:
def get_block_containing_tx(self, txid):
"""Retrieve the list of blocks (block ids) containing a
transaction with transaction id `txid`
Args:
txid (str): transaction id of the transaction to query
Returns:
Block id list (list(int))
"""
blocks = list(backend.query.get_block_with_transaction(self.connection, txid))
if len(blocks) > 1:
logger.critical('Transaction id %s exists in multiple blocks', txid)
return [block['height'] for block in blocks] |
def append(self, position, array):
"""Append an array to the end of the map. The position
must be greater than any positions in the map"""
if not Gauged.map_append(self.ptr, position, array.ptr):
raise MemoryError | Append an array to the end of the map. The position
must be greater than any positions in the map | Below is the the instruction that describes the task:
### Input:
Append an array to the end of the map. The position
must be greater than any positions in the map
### Response:
def append(self, position, array):
"""Append an array to the end of the map. The position
must be greater than any positions in the map"""
if not Gauged.map_append(self.ptr, position, array.ptr):
raise MemoryError |
def setWorkingPlayAreaSize(self, sizeX, sizeZ):
"""Sets the Play Area in the working copy."""
fn = self.function_table.setWorkingPlayAreaSize
fn(sizeX, sizeZ) | Sets the Play Area in the working copy. | Below is the the instruction that describes the task:
### Input:
Sets the Play Area in the working copy.
### Response:
def setWorkingPlayAreaSize(self, sizeX, sizeZ):
"""Sets the Play Area in the working copy."""
fn = self.function_table.setWorkingPlayAreaSize
fn(sizeX, sizeZ) |
def DICOMfile_read(self, *args, **kwargs):
"""
Read a DICOM file and perform some initial
parsing of tags.
NB!
For thread safety, class member variables
should not be assigned since other threads
might override/change these variables in mid-
flight!
"""
b_status = False
l_tags = []
l_tagsToUse = []
d_tagsInString = {}
str_file = ""
d_DICOM = {
'dcm': None,
'd_dcm': {},
'strRaw': '',
'l_tagRaw': [],
'd_json': {},
'd_dicom': {},
'd_dicomSimple': {}
}
for k, v in kwargs.items():
if k == 'file': str_file = v
if k == 'l_tagsToUse': l_tags = v
if len(args):
l_file = args[0]
str_file = l_file[0]
str_localFile = os.path.basename(str_file)
str_path = os.path.dirname(str_file)
# self.dp.qprint("%s: In input base directory: %s" % (threading.currentThread().getName(), self.str_inputDir))
# self.dp.qprint("%s: Reading DICOM file in path: %s" % (threading.currentThread().getName(),str_path))
# self.dp.qprint("%s: Analysing tags on DICOM file: %s" % (threading.currentThread().getName(),str_localFile))
# self.dp.qprint("%s: Loading: %s" % (threading.currentThread().getName(),str_file))
try:
# self.dcm = dicom.read_file(str_file)
d_DICOM['dcm'] = dicom.read_file(str_file)
b_status = True
except:
self.dp.qprint('In directory: %s' % os.getcwd(), comms = 'error')
self.dp.qprint('Failed to read %s' % str_file, comms = 'error')
b_status = False
d_DICOM['d_dcm'] = dict(d_DICOM['dcm'])
d_DICOM['strRaw'] = str(d_DICOM['dcm'])
d_DICOM['l_tagRaw'] = d_DICOM['dcm'].dir()
if len(l_tags):
l_tagsToUse = l_tags
else:
l_tagsToUse = d_DICOM['l_tagRaw']
if 'PixelData' in l_tagsToUse:
l_tagsToUse.remove('PixelData')
for key in l_tagsToUse:
d_DICOM['d_dicom'][key] = d_DICOM['dcm'].data_element(key)
try:
d_DICOM['d_dicomSimple'][key] = getattr(d_DICOM['dcm'], key)
except:
d_DICOM['d_dicomSimple'][key] = "no attribute"
d_DICOM['d_json'][key] = str(d_DICOM['d_dicomSimple'][key])
# pudb.set_trace()
d_tagsInString = self.tagsInString_process(d_DICOM, self.str_outputFileStem)
str_outputFile = d_tagsInString['str_result']
return {
'status': b_status,
'inputPath': str_path,
'inputFilename': str_localFile,
'outputFileStem': str_outputFile,
'd_DICOM': d_DICOM,
'l_tagsToUse': l_tagsToUse
} | Read a DICOM file and perform some initial
parsing of tags.
NB!
For thread safety, class member variables
should not be assigned since other threads
might override/change these variables in mid-
flight! | Below is the the instruction that describes the task:
### Input:
Read a DICOM file and perform some initial
parsing of tags.
NB!
For thread safety, class member variables
should not be assigned since other threads
might override/change these variables in mid-
flight!
### Response:
def DICOMfile_read(self, *args, **kwargs):
"""
Read a DICOM file and perform some initial
parsing of tags.
NB!
For thread safety, class member variables
should not be assigned since other threads
might override/change these variables in mid-
flight!
"""
b_status = False
l_tags = []
l_tagsToUse = []
d_tagsInString = {}
str_file = ""
d_DICOM = {
'dcm': None,
'd_dcm': {},
'strRaw': '',
'l_tagRaw': [],
'd_json': {},
'd_dicom': {},
'd_dicomSimple': {}
}
for k, v in kwargs.items():
if k == 'file': str_file = v
if k == 'l_tagsToUse': l_tags = v
if len(args):
l_file = args[0]
str_file = l_file[0]
str_localFile = os.path.basename(str_file)
str_path = os.path.dirname(str_file)
# self.dp.qprint("%s: In input base directory: %s" % (threading.currentThread().getName(), self.str_inputDir))
# self.dp.qprint("%s: Reading DICOM file in path: %s" % (threading.currentThread().getName(),str_path))
# self.dp.qprint("%s: Analysing tags on DICOM file: %s" % (threading.currentThread().getName(),str_localFile))
# self.dp.qprint("%s: Loading: %s" % (threading.currentThread().getName(),str_file))
try:
# self.dcm = dicom.read_file(str_file)
d_DICOM['dcm'] = dicom.read_file(str_file)
b_status = True
except:
self.dp.qprint('In directory: %s' % os.getcwd(), comms = 'error')
self.dp.qprint('Failed to read %s' % str_file, comms = 'error')
b_status = False
d_DICOM['d_dcm'] = dict(d_DICOM['dcm'])
d_DICOM['strRaw'] = str(d_DICOM['dcm'])
d_DICOM['l_tagRaw'] = d_DICOM['dcm'].dir()
if len(l_tags):
l_tagsToUse = l_tags
else:
l_tagsToUse = d_DICOM['l_tagRaw']
if 'PixelData' in l_tagsToUse:
l_tagsToUse.remove('PixelData')
for key in l_tagsToUse:
d_DICOM['d_dicom'][key] = d_DICOM['dcm'].data_element(key)
try:
d_DICOM['d_dicomSimple'][key] = getattr(d_DICOM['dcm'], key)
except:
d_DICOM['d_dicomSimple'][key] = "no attribute"
d_DICOM['d_json'][key] = str(d_DICOM['d_dicomSimple'][key])
# pudb.set_trace()
d_tagsInString = self.tagsInString_process(d_DICOM, self.str_outputFileStem)
str_outputFile = d_tagsInString['str_result']
return {
'status': b_status,
'inputPath': str_path,
'inputFilename': str_localFile,
'outputFileStem': str_outputFile,
'd_DICOM': d_DICOM,
'l_tagsToUse': l_tagsToUse
} |
def tabify(text, options):
""" tabify(text : str, options : argparse.Namespace|str) -> str
>>> tabify(' (println "hello world")', '--tab=3')
'\t\t (println "hello world")'
Replace spaces with tabs
"""
opts = parse_options(options)
if opts.tab_size < 1:
return text
else:
tab_equiv = ' ' * opts.tab_size
return text.replace(tab_equiv, '\t') | tabify(text : str, options : argparse.Namespace|str) -> str
>>> tabify(' (println "hello world")', '--tab=3')
'\t\t (println "hello world")'
Replace spaces with tabs | Below is the the instruction that describes the task:
### Input:
tabify(text : str, options : argparse.Namespace|str) -> str
>>> tabify(' (println "hello world")', '--tab=3')
'\t\t (println "hello world")'
Replace spaces with tabs
### Response:
def tabify(text, options):
""" tabify(text : str, options : argparse.Namespace|str) -> str
>>> tabify(' (println "hello world")', '--tab=3')
'\t\t (println "hello world")'
Replace spaces with tabs
"""
opts = parse_options(options)
if opts.tab_size < 1:
return text
else:
tab_equiv = ' ' * opts.tab_size
return text.replace(tab_equiv, '\t') |
def _glfw_get_version(filename):
'''
Queries and returns the library version tuple or None by using a
subprocess.
'''
version_checker_source = """
import sys
import ctypes
def get_version(library_handle):
'''
Queries and returns the library version tuple or None.
'''
major_value = ctypes.c_int(0)
major = ctypes.pointer(major_value)
minor_value = ctypes.c_int(0)
minor = ctypes.pointer(minor_value)
rev_value = ctypes.c_int(0)
rev = ctypes.pointer(rev_value)
if hasattr(library_handle, 'glfwGetVersion'):
library_handle.glfwGetVersion(major, minor, rev)
version = (major_value.value,
minor_value.value,
rev_value.value)
return version
else:
return None
try:
input_func = raw_input
except NameError:
input_func = input
filename = input_func().strip()
try:
library_handle = ctypes.CDLL(filename)
except OSError:
pass
else:
version = get_version(library_handle)
print(version)
"""
args = [sys.executable, '-c', textwrap.dedent(version_checker_source)]
process = subprocess.Popen(args, universal_newlines=True,
stdin=subprocess.PIPE, stdout=subprocess.PIPE)
out = process.communicate(_to_char_p(filename))[0]
out = out.strip()
if out:
return eval(out)
else:
return None | Queries and returns the library version tuple or None by using a
subprocess. | Below is the the instruction that describes the task:
### Input:
Queries and returns the library version tuple or None by using a
subprocess.
### Response:
def _glfw_get_version(filename):
'''
Queries and returns the library version tuple or None by using a
subprocess.
'''
version_checker_source = """
import sys
import ctypes
def get_version(library_handle):
'''
Queries and returns the library version tuple or None.
'''
major_value = ctypes.c_int(0)
major = ctypes.pointer(major_value)
minor_value = ctypes.c_int(0)
minor = ctypes.pointer(minor_value)
rev_value = ctypes.c_int(0)
rev = ctypes.pointer(rev_value)
if hasattr(library_handle, 'glfwGetVersion'):
library_handle.glfwGetVersion(major, minor, rev)
version = (major_value.value,
minor_value.value,
rev_value.value)
return version
else:
return None
try:
input_func = raw_input
except NameError:
input_func = input
filename = input_func().strip()
try:
library_handle = ctypes.CDLL(filename)
except OSError:
pass
else:
version = get_version(library_handle)
print(version)
"""
args = [sys.executable, '-c', textwrap.dedent(version_checker_source)]
process = subprocess.Popen(args, universal_newlines=True,
stdin=subprocess.PIPE, stdout=subprocess.PIPE)
out = process.communicate(_to_char_p(filename))[0]
out = out.strip()
if out:
return eval(out)
else:
return None |
def _get_regex_pattern(label):
"""Return a regular expression of the label.
This takes care of plural and different kinds of separators.
"""
parts = _split_by_punctuation.split(label)
for index, part in enumerate(parts):
if index % 2 == 0:
# Word
if not parts[index].isdigit() and len(parts[index]) > 1:
parts[index] = _convert_word(parts[index])
else:
# Punctuation
if not parts[index + 1]:
# The separator is not followed by another word. Treat
# it as a symbol.
parts[index] = _convert_punctuation(
parts[index],
current_app.config["CLASSIFIER_SYMBOLS"]
)
else:
parts[index] = _convert_punctuation(
parts[index],
current_app.config["CLASSIFIER_SEPARATORS"]
)
return "".join(parts) | Return a regular expression of the label.
This takes care of plural and different kinds of separators. | Below is the the instruction that describes the task:
### Input:
Return a regular expression of the label.
This takes care of plural and different kinds of separators.
### Response:
def _get_regex_pattern(label):
"""Return a regular expression of the label.
This takes care of plural and different kinds of separators.
"""
parts = _split_by_punctuation.split(label)
for index, part in enumerate(parts):
if index % 2 == 0:
# Word
if not parts[index].isdigit() and len(parts[index]) > 1:
parts[index] = _convert_word(parts[index])
else:
# Punctuation
if not parts[index + 1]:
# The separator is not followed by another word. Treat
# it as a symbol.
parts[index] = _convert_punctuation(
parts[index],
current_app.config["CLASSIFIER_SYMBOLS"]
)
else:
parts[index] = _convert_punctuation(
parts[index],
current_app.config["CLASSIFIER_SEPARATORS"]
)
return "".join(parts) |
def _list_templates(settings):
"""
List templates from settings.
"""
for idx, option in enumerate(settings.config.get("project_templates"), start=1):
puts(" {0!s:5} {1!s:36}".format(
colored.yellow("[{0}]".format(idx)),
colored.cyan(option.get("name"))
))
if option.get("url"):
puts(" {0}\n".format(option.get("url"))) | List templates from settings. | Below is the the instruction that describes the task:
### Input:
List templates from settings.
### Response:
def _list_templates(settings):
"""
List templates from settings.
"""
for idx, option in enumerate(settings.config.get("project_templates"), start=1):
puts(" {0!s:5} {1!s:36}".format(
colored.yellow("[{0}]".format(idx)),
colored.cyan(option.get("name"))
))
if option.get("url"):
puts(" {0}\n".format(option.get("url"))) |
def delete_comment(repo: GithubRepository, comment_id: int) -> None:
"""
References:
https://developer.github.com/v3/issues/comments/#delete-a-comment
"""
url = ("https://api.github.com/repos/{}/{}/issues/comments/{}"
"?access_token={}".format(repo.organization,
repo.name,
comment_id,
repo.access_token))
response = requests.delete(url)
if response.status_code != 204:
raise RuntimeError(
'Comment delete failed. Code: {}. Content: {}.'.format(
response.status_code, response.content)) | References:
https://developer.github.com/v3/issues/comments/#delete-a-comment | Below is the the instruction that describes the task:
### Input:
References:
https://developer.github.com/v3/issues/comments/#delete-a-comment
### Response:
def delete_comment(repo: GithubRepository, comment_id: int) -> None:
"""
References:
https://developer.github.com/v3/issues/comments/#delete-a-comment
"""
url = ("https://api.github.com/repos/{}/{}/issues/comments/{}"
"?access_token={}".format(repo.organization,
repo.name,
comment_id,
repo.access_token))
response = requests.delete(url)
if response.status_code != 204:
raise RuntimeError(
'Comment delete failed. Code: {}. Content: {}.'.format(
response.status_code, response.content)) |
def shutdown(self):
"""
Shutdown the application and exit
:returns: No return value
"""
task = asyncio.ensure_future(self.core.shutdown())
self.loop.run_until_complete(task) | Shutdown the application and exit
:returns: No return value | Below is the the instruction that describes the task:
### Input:
Shutdown the application and exit
:returns: No return value
### Response:
def shutdown(self):
"""
Shutdown the application and exit
:returns: No return value
"""
task = asyncio.ensure_future(self.core.shutdown())
self.loop.run_until_complete(task) |
def min(self, constraints, X: BitVec, M=10000):
"""
Iteratively finds the minimum value for a symbol within given constraints.
:param constraints: constraints that the expression must fulfil
:param X: a symbol or expression
:param M: maximum number of iterations allowed
"""
assert isinstance(X, BitVec)
return self.optimize(constraints, X, 'minimize', M) | Iteratively finds the minimum value for a symbol within given constraints.
:param constraints: constraints that the expression must fulfil
:param X: a symbol or expression
:param M: maximum number of iterations allowed | Below is the the instruction that describes the task:
### Input:
Iteratively finds the minimum value for a symbol within given constraints.
:param constraints: constraints that the expression must fulfil
:param X: a symbol or expression
:param M: maximum number of iterations allowed
### Response:
def min(self, constraints, X: BitVec, M=10000):
"""
Iteratively finds the minimum value for a symbol within given constraints.
:param constraints: constraints that the expression must fulfil
:param X: a symbol or expression
:param M: maximum number of iterations allowed
"""
assert isinstance(X, BitVec)
return self.optimize(constraints, X, 'minimize', M) |
def select_ipam_strategy(self, network_id, network_strategy, **kwargs):
"""Return relevant IPAM strategy name.
:param network_id: neutron network id.
:param network_strategy: default strategy for the network.
NOTE(morgabra) This feels like a hack but I can't think of a better
idea. The root problem is we can now attach ports to networks with
a different backend driver/ipam strategy than the network speficies.
We handle the the backend driver part with allowing network_plugin to
be specified for port objects. This works pretty well because nova or
whatever knows when we are hooking up an Ironic node so it can pass
along that key during port_create().
IPAM is a little trickier, especially in Ironic's case, because we
*must* use a specific IPAM for provider networks. There isn't really
much of an option other than involve the backend driver when selecting
the IPAM strategy.
"""
LOG.info("Selecting IPAM strategy for network_id:%s "
"network_strategy:%s" % (network_id, network_strategy))
net_type = "tenant"
if STRATEGY.is_provider_network(network_id):
net_type = "provider"
strategy = self._ipam_strategies.get(net_type, {})
default = strategy.get("default")
overrides = strategy.get("overrides", {})
# If we override a particular strategy explicitly, we use it.
if network_strategy in overrides:
LOG.info("Selected overridden IPAM strategy: %s"
% (overrides[network_strategy]))
return overrides[network_strategy]
# Otherwise, we are free to use an explicit default.
if default:
LOG.info("Selected default IPAM strategy for tenant "
"network: %s" % (default))
return default
# Fallback to the network-specified IPAM strategy
LOG.info("Selected network strategy for tenant "
"network: %s" % (network_strategy))
return network_strategy | Return relevant IPAM strategy name.
:param network_id: neutron network id.
:param network_strategy: default strategy for the network.
NOTE(morgabra) This feels like a hack but I can't think of a better
idea. The root problem is we can now attach ports to networks with
a different backend driver/ipam strategy than the network speficies.
We handle the the backend driver part with allowing network_plugin to
be specified for port objects. This works pretty well because nova or
whatever knows when we are hooking up an Ironic node so it can pass
along that key during port_create().
IPAM is a little trickier, especially in Ironic's case, because we
*must* use a specific IPAM for provider networks. There isn't really
much of an option other than involve the backend driver when selecting
the IPAM strategy. | Below is the the instruction that describes the task:
### Input:
Return relevant IPAM strategy name.
:param network_id: neutron network id.
:param network_strategy: default strategy for the network.
NOTE(morgabra) This feels like a hack but I can't think of a better
idea. The root problem is we can now attach ports to networks with
a different backend driver/ipam strategy than the network speficies.
We handle the the backend driver part with allowing network_plugin to
be specified for port objects. This works pretty well because nova or
whatever knows when we are hooking up an Ironic node so it can pass
along that key during port_create().
IPAM is a little trickier, especially in Ironic's case, because we
*must* use a specific IPAM for provider networks. There isn't really
much of an option other than involve the backend driver when selecting
the IPAM strategy.
### Response:
def select_ipam_strategy(self, network_id, network_strategy, **kwargs):
"""Return relevant IPAM strategy name.
:param network_id: neutron network id.
:param network_strategy: default strategy for the network.
NOTE(morgabra) This feels like a hack but I can't think of a better
idea. The root problem is we can now attach ports to networks with
a different backend driver/ipam strategy than the network speficies.
We handle the the backend driver part with allowing network_plugin to
be specified for port objects. This works pretty well because nova or
whatever knows when we are hooking up an Ironic node so it can pass
along that key during port_create().
IPAM is a little trickier, especially in Ironic's case, because we
*must* use a specific IPAM for provider networks. There isn't really
much of an option other than involve the backend driver when selecting
the IPAM strategy.
"""
LOG.info("Selecting IPAM strategy for network_id:%s "
"network_strategy:%s" % (network_id, network_strategy))
net_type = "tenant"
if STRATEGY.is_provider_network(network_id):
net_type = "provider"
strategy = self._ipam_strategies.get(net_type, {})
default = strategy.get("default")
overrides = strategy.get("overrides", {})
# If we override a particular strategy explicitly, we use it.
if network_strategy in overrides:
LOG.info("Selected overridden IPAM strategy: %s"
% (overrides[network_strategy]))
return overrides[network_strategy]
# Otherwise, we are free to use an explicit default.
if default:
LOG.info("Selected default IPAM strategy for tenant "
"network: %s" % (default))
return default
# Fallback to the network-specified IPAM strategy
LOG.info("Selected network strategy for tenant "
"network: %s" % (network_strategy))
return network_strategy |
def generate_events_list(generator):
"""Populate the event_list variable to be used in jinja templates"""
if not localized_events:
generator.context['events_list'] = sorted(events, reverse = True,
key=lambda ev: (ev.dtstart, ev.dtend))
else:
generator.context['events_list'] = {k: sorted(v, reverse = True,
key=lambda ev: (ev.dtstart, ev.dtend))
for k, v in localized_events.items()} | Populate the event_list variable to be used in jinja templates | Below is the the instruction that describes the task:
### Input:
Populate the event_list variable to be used in jinja templates
### Response:
def generate_events_list(generator):
"""Populate the event_list variable to be used in jinja templates"""
if not localized_events:
generator.context['events_list'] = sorted(events, reverse = True,
key=lambda ev: (ev.dtstart, ev.dtend))
else:
generator.context['events_list'] = {k: sorted(v, reverse = True,
key=lambda ev: (ev.dtstart, ev.dtend))
for k, v in localized_events.items()} |
def __setup(local_download_dir_warc, log_level):
"""
Setup
:return:
"""
if not os.path.exists(local_download_dir_warc):
os.makedirs(local_download_dir_warc)
# make loggers quite
configure_logging({"LOG_LEVEL": "ERROR"})
logging.getLogger('requests').setLevel(logging.CRITICAL)
logging.getLogger('readability').setLevel(logging.CRITICAL)
logging.getLogger('PIL').setLevel(logging.CRITICAL)
logging.getLogger('newspaper').setLevel(logging.CRITICAL)
logging.getLogger('newsplease').setLevel(logging.CRITICAL)
logging.getLogger('urllib3').setLevel(logging.CRITICAL)
# set own logger
logging.basicConfig(level=log_level)
__logger = logging.getLogger(__name__)
__logger.setLevel(log_level) | Setup
:return: | Below is the the instruction that describes the task:
### Input:
Setup
:return:
### Response:
def __setup(local_download_dir_warc, log_level):
"""
Setup
:return:
"""
if not os.path.exists(local_download_dir_warc):
os.makedirs(local_download_dir_warc)
# make loggers quite
configure_logging({"LOG_LEVEL": "ERROR"})
logging.getLogger('requests').setLevel(logging.CRITICAL)
logging.getLogger('readability').setLevel(logging.CRITICAL)
logging.getLogger('PIL').setLevel(logging.CRITICAL)
logging.getLogger('newspaper').setLevel(logging.CRITICAL)
logging.getLogger('newsplease').setLevel(logging.CRITICAL)
logging.getLogger('urllib3').setLevel(logging.CRITICAL)
# set own logger
logging.basicConfig(level=log_level)
__logger = logging.getLogger(__name__)
__logger.setLevel(log_level) |
def asset(self, id):
"""Returns a single Asset.
:param int id: (required), id of the asset
:returns: :class:`Asset <github3.repos.release.Asset>`
"""
data = None
if int(id) > 0:
url = self._build_url('releases', 'assets', str(id),
base_url=self._api)
data = self._json(self._get(url, headers=Release.CUSTOM_HEADERS),
200)
return Asset(data, self) if data else None | Returns a single Asset.
:param int id: (required), id of the asset
:returns: :class:`Asset <github3.repos.release.Asset>` | Below is the the instruction that describes the task:
### Input:
Returns a single Asset.
:param int id: (required), id of the asset
:returns: :class:`Asset <github3.repos.release.Asset>`
### Response:
def asset(self, id):
"""Returns a single Asset.
:param int id: (required), id of the asset
:returns: :class:`Asset <github3.repos.release.Asset>`
"""
data = None
if int(id) > 0:
url = self._build_url('releases', 'assets', str(id),
base_url=self._api)
data = self._json(self._get(url, headers=Release.CUSTOM_HEADERS),
200)
return Asset(data, self) if data else None |
def deepcp(data):
"""Use ujson to do deep_copy"""
import ujson
try:
return ujson.loads(ujson.dumps(data))
except Exception:
return copy.deepcopy(data) | Use ujson to do deep_copy | Below is the the instruction that describes the task:
### Input:
Use ujson to do deep_copy
### Response:
def deepcp(data):
"""Use ujson to do deep_copy"""
import ujson
try:
return ujson.loads(ujson.dumps(data))
except Exception:
return copy.deepcopy(data) |
def bookmark(ctx):
"""Bookmark build job.
Uses [Caching](/references/polyaxon-cli/#caching)
Examples:
\b
```bash
$ polyaxon build bookmark
```
\b
```bash
$ polyaxon build -b 2 bookmark
```
"""
user, project_name, _build = get_build_or_local(ctx.obj.get('project'), ctx.obj.get('build'))
try:
PolyaxonClient().build_job.bookmark(user, project_name, _build)
except (PolyaxonHTTPError, PolyaxonShouldExitError, PolyaxonClientException) as e:
Printer.print_error('Could not bookmark build job `{}`.'.format(_build))
Printer.print_error('Error message `{}`.'.format(e))
sys.exit(1)
Printer.print_success("Build job bookmarked.") | Bookmark build job.
Uses [Caching](/references/polyaxon-cli/#caching)
Examples:
\b
```bash
$ polyaxon build bookmark
```
\b
```bash
$ polyaxon build -b 2 bookmark
``` | Below is the the instruction that describes the task:
### Input:
Bookmark build job.
Uses [Caching](/references/polyaxon-cli/#caching)
Examples:
\b
```bash
$ polyaxon build bookmark
```
\b
```bash
$ polyaxon build -b 2 bookmark
```
### Response:
def bookmark(ctx):
"""Bookmark build job.
Uses [Caching](/references/polyaxon-cli/#caching)
Examples:
\b
```bash
$ polyaxon build bookmark
```
\b
```bash
$ polyaxon build -b 2 bookmark
```
"""
user, project_name, _build = get_build_or_local(ctx.obj.get('project'), ctx.obj.get('build'))
try:
PolyaxonClient().build_job.bookmark(user, project_name, _build)
except (PolyaxonHTTPError, PolyaxonShouldExitError, PolyaxonClientException) as e:
Printer.print_error('Could not bookmark build job `{}`.'.format(_build))
Printer.print_error('Error message `{}`.'.format(e))
sys.exit(1)
Printer.print_success("Build job bookmarked.") |
def tuples(stream, *keys):
"""Reformat data as tuples.
Parameters
----------
stream : iterable
Stream of data objects.
*keys : strings
Keys to use for ordering data.
Yields
------
items : tuple of np.ndarrays
Data object reformated as a tuple.
Raises
------
DataError
If the stream contains items that are not data-like.
KeyError
If a data object does not contain the requested key.
"""
if not keys:
raise PescadorError('Unable to generate tuples from '
'an empty item set')
for data in stream:
try:
yield tuple(data[key] for key in keys)
except TypeError:
raise DataError("Malformed data stream: {}".format(data)) | Reformat data as tuples.
Parameters
----------
stream : iterable
Stream of data objects.
*keys : strings
Keys to use for ordering data.
Yields
------
items : tuple of np.ndarrays
Data object reformated as a tuple.
Raises
------
DataError
If the stream contains items that are not data-like.
KeyError
If a data object does not contain the requested key. | Below is the the instruction that describes the task:
### Input:
Reformat data as tuples.
Parameters
----------
stream : iterable
Stream of data objects.
*keys : strings
Keys to use for ordering data.
Yields
------
items : tuple of np.ndarrays
Data object reformated as a tuple.
Raises
------
DataError
If the stream contains items that are not data-like.
KeyError
If a data object does not contain the requested key.
### Response:
def tuples(stream, *keys):
"""Reformat data as tuples.
Parameters
----------
stream : iterable
Stream of data objects.
*keys : strings
Keys to use for ordering data.
Yields
------
items : tuple of np.ndarrays
Data object reformated as a tuple.
Raises
------
DataError
If the stream contains items that are not data-like.
KeyError
If a data object does not contain the requested key.
"""
if not keys:
raise PescadorError('Unable to generate tuples from '
'an empty item set')
for data in stream:
try:
yield tuple(data[key] for key in keys)
except TypeError:
raise DataError("Malformed data stream: {}".format(data)) |
def find_malformed_single_file_project(self): # type: () -> List[str]
"""
Take first non-setup.py python file. What a mess.
:return:
"""
files = [f for f in os.listdir(".") if os.path.isfile(f)]
candidates = []
# project misnamed & not in setup.py
for file in files:
if file.endswith("setup.py") or not file.endswith(".py"):
continue # duh
candidate = file.replace(".py", "")
if candidate != "setup":
candidates.append(candidate)
# return first
return candidates
# files with shebang
for file in files:
if file.endswith("setup.py"):
continue # duh
if "." not in file:
candidate = files
try:
firstline = self.file_opener.open_this(file, "r").readline()
if (
firstline.startswith("#")
and "python" in firstline
and candidate in self.setup_py_source()
):
candidates.append(candidate)
return candidates
except:
pass
# default.
return candidates | Take first non-setup.py python file. What a mess.
:return: | Below is the the instruction that describes the task:
### Input:
Take first non-setup.py python file. What a mess.
:return:
### Response:
def find_malformed_single_file_project(self): # type: () -> List[str]
"""
Take first non-setup.py python file. What a mess.
:return:
"""
files = [f for f in os.listdir(".") if os.path.isfile(f)]
candidates = []
# project misnamed & not in setup.py
for file in files:
if file.endswith("setup.py") or not file.endswith(".py"):
continue # duh
candidate = file.replace(".py", "")
if candidate != "setup":
candidates.append(candidate)
# return first
return candidates
# files with shebang
for file in files:
if file.endswith("setup.py"):
continue # duh
if "." not in file:
candidate = files
try:
firstline = self.file_opener.open_this(file, "r").readline()
if (
firstline.startswith("#")
and "python" in firstline
and candidate in self.setup_py_source()
):
candidates.append(candidate)
return candidates
except:
pass
# default.
return candidates |
def add_section(self, section_name):
"""Create a section of the report, to be headed by section_name
Text and images can be added by using the `section` argument of the
`add_text` and `add_image` methods. Sections can also be ordered by
using the `set_section_order` method.
By default, text and images that have no section will be placed after
all the sections, in the order they were added. This behavior may be
altered using the `sections_first` attribute of the `make_report`
method.
"""
self.section_headings.append(section_name)
if section_name in self.sections:
raise ValueError("Section %s already exists." % section_name)
self.sections[section_name] = []
return | Create a section of the report, to be headed by section_name
Text and images can be added by using the `section` argument of the
`add_text` and `add_image` methods. Sections can also be ordered by
using the `set_section_order` method.
By default, text and images that have no section will be placed after
all the sections, in the order they were added. This behavior may be
altered using the `sections_first` attribute of the `make_report`
method. | Below is the the instruction that describes the task:
### Input:
Create a section of the report, to be headed by section_name
Text and images can be added by using the `section` argument of the
`add_text` and `add_image` methods. Sections can also be ordered by
using the `set_section_order` method.
By default, text and images that have no section will be placed after
all the sections, in the order they were added. This behavior may be
altered using the `sections_first` attribute of the `make_report`
method.
### Response:
def add_section(self, section_name):
"""Create a section of the report, to be headed by section_name
Text and images can be added by using the `section` argument of the
`add_text` and `add_image` methods. Sections can also be ordered by
using the `set_section_order` method.
By default, text and images that have no section will be placed after
all the sections, in the order they were added. This behavior may be
altered using the `sections_first` attribute of the `make_report`
method.
"""
self.section_headings.append(section_name)
if section_name in self.sections:
raise ValueError("Section %s already exists." % section_name)
self.sections[section_name] = []
return |
def pop_message(self, till=None):
"""
RETURN TUPLE (message, payload) CALLER IS RESPONSIBLE FOR CALLING message.delete() WHEN DONE
DUMMY IMPLEMENTATION FOR DEBUGGING
"""
if till is not None and not isinstance(till, Signal):
Log.error("Expecting a signal")
return Null, self.pop(till=till) | RETURN TUPLE (message, payload) CALLER IS RESPONSIBLE FOR CALLING message.delete() WHEN DONE
DUMMY IMPLEMENTATION FOR DEBUGGING | Below is the the instruction that describes the task:
### Input:
RETURN TUPLE (message, payload) CALLER IS RESPONSIBLE FOR CALLING message.delete() WHEN DONE
DUMMY IMPLEMENTATION FOR DEBUGGING
### Response:
def pop_message(self, till=None):
"""
RETURN TUPLE (message, payload) CALLER IS RESPONSIBLE FOR CALLING message.delete() WHEN DONE
DUMMY IMPLEMENTATION FOR DEBUGGING
"""
if till is not None and not isinstance(till, Signal):
Log.error("Expecting a signal")
return Null, self.pop(till=till) |
def aggregationDivide(dividend, divisor):
"""
Return the result from dividing two dicts that represent date and time.
Both dividend and divisor are dicts that contain one or more of the following
keys: 'years', 'months', 'weeks', 'days', 'hours', 'minutes', seconds',
'milliseconds', 'microseconds'.
For example:
::
aggregationDivide({'hours': 4}, {'minutes': 15}) == 16
:param dividend: (dict) The numerator, as a dict representing a date and time
:param divisor: (dict) the denominator, as a dict representing a date and time
:returns: (float) number of times divisor goes into dividend
"""
# Convert each into microseconds
dividendMonthSec = aggregationToMonthsSeconds(dividend)
divisorMonthSec = aggregationToMonthsSeconds(divisor)
# It is a usage error to mix both months and seconds in the same operation
if (dividendMonthSec['months'] != 0 and divisorMonthSec['seconds'] != 0) \
or (dividendMonthSec['seconds'] != 0 and divisorMonthSec['months'] != 0):
raise RuntimeError("Aggregation dicts with months/years can only be "
"inter-operated with other aggregation dicts that contain "
"months/years")
if dividendMonthSec['months'] > 0:
return float(dividendMonthSec['months']) / divisor['months']
else:
return float(dividendMonthSec['seconds']) / divisorMonthSec['seconds'] | Return the result from dividing two dicts that represent date and time.
Both dividend and divisor are dicts that contain one or more of the following
keys: 'years', 'months', 'weeks', 'days', 'hours', 'minutes', seconds',
'milliseconds', 'microseconds'.
For example:
::
aggregationDivide({'hours': 4}, {'minutes': 15}) == 16
:param dividend: (dict) The numerator, as a dict representing a date and time
:param divisor: (dict) the denominator, as a dict representing a date and time
:returns: (float) number of times divisor goes into dividend | Below is the the instruction that describes the task:
### Input:
Return the result from dividing two dicts that represent date and time.
Both dividend and divisor are dicts that contain one or more of the following
keys: 'years', 'months', 'weeks', 'days', 'hours', 'minutes', seconds',
'milliseconds', 'microseconds'.
For example:
::
aggregationDivide({'hours': 4}, {'minutes': 15}) == 16
:param dividend: (dict) The numerator, as a dict representing a date and time
:param divisor: (dict) the denominator, as a dict representing a date and time
:returns: (float) number of times divisor goes into dividend
### Response:
def aggregationDivide(dividend, divisor):
"""
Return the result from dividing two dicts that represent date and time.
Both dividend and divisor are dicts that contain one or more of the following
keys: 'years', 'months', 'weeks', 'days', 'hours', 'minutes', seconds',
'milliseconds', 'microseconds'.
For example:
::
aggregationDivide({'hours': 4}, {'minutes': 15}) == 16
:param dividend: (dict) The numerator, as a dict representing a date and time
:param divisor: (dict) the denominator, as a dict representing a date and time
:returns: (float) number of times divisor goes into dividend
"""
# Convert each into microseconds
dividendMonthSec = aggregationToMonthsSeconds(dividend)
divisorMonthSec = aggregationToMonthsSeconds(divisor)
# It is a usage error to mix both months and seconds in the same operation
if (dividendMonthSec['months'] != 0 and divisorMonthSec['seconds'] != 0) \
or (dividendMonthSec['seconds'] != 0 and divisorMonthSec['months'] != 0):
raise RuntimeError("Aggregation dicts with months/years can only be "
"inter-operated with other aggregation dicts that contain "
"months/years")
if dividendMonthSec['months'] > 0:
return float(dividendMonthSec['months']) / divisor['months']
else:
return float(dividendMonthSec['seconds']) / divisorMonthSec['seconds'] |
def parse_config(self):
"""
Parse the xml file with remote servers and discover resources on each found server.
"""
tree = ElementTree.parse(self.file_xml)
root = tree.getroot()
for server in root.findall('server'):
destination = server.text
name = server.get("name")
self.discover_remote(destination, name) | Parse the xml file with remote servers and discover resources on each found server. | Below is the the instruction that describes the task:
### Input:
Parse the xml file with remote servers and discover resources on each found server.
### Response:
def parse_config(self):
"""
Parse the xml file with remote servers and discover resources on each found server.
"""
tree = ElementTree.parse(self.file_xml)
root = tree.getroot()
for server in root.findall('server'):
destination = server.text
name = server.get("name")
self.discover_remote(destination, name) |
def create_key_file(path):
"""
Creates a new encryption key in the path provided and sets the file
permissions. Setting the file permissions currently does not work
on Windows platforms because of the differences in how file
permissions are read and modified.
"""
iv = "{}{}".format(os.urandom(32), time.time())
new_key = generate_key(ensure_bytes(iv))
with open(path, "wb") as f:
f.write(base64.b64encode(new_key))
os.chmod(path, 0o400) | Creates a new encryption key in the path provided and sets the file
permissions. Setting the file permissions currently does not work
on Windows platforms because of the differences in how file
permissions are read and modified. | Below is the the instruction that describes the task:
### Input:
Creates a new encryption key in the path provided and sets the file
permissions. Setting the file permissions currently does not work
on Windows platforms because of the differences in how file
permissions are read and modified.
### Response:
def create_key_file(path):
"""
Creates a new encryption key in the path provided and sets the file
permissions. Setting the file permissions currently does not work
on Windows platforms because of the differences in how file
permissions are read and modified.
"""
iv = "{}{}".format(os.urandom(32), time.time())
new_key = generate_key(ensure_bytes(iv))
with open(path, "wb") as f:
f.write(base64.b64encode(new_key))
os.chmod(path, 0o400) |
def outlineColor(self, value):
"""gets/sets the outlineColor"""
if isinstance(value, Color) and \
not self._outline is None:
self._outline['color'] = value | gets/sets the outlineColor | Below is the the instruction that describes the task:
### Input:
gets/sets the outlineColor
### Response:
def outlineColor(self, value):
"""gets/sets the outlineColor"""
if isinstance(value, Color) and \
not self._outline is None:
self._outline['color'] = value |
def extend_with(func):
"""Extends with class or function"""
if not func.__name__ in ArgParseInator._plugins:
ArgParseInator._plugins[func.__name__] = func | Extends with class or function | Below is the the instruction that describes the task:
### Input:
Extends with class or function
### Response:
def extend_with(func):
"""Extends with class or function"""
if not func.__name__ in ArgParseInator._plugins:
ArgParseInator._plugins[func.__name__] = func |
def stop(self):
"""
Stops the connection
"""
self.__stop = True
self._queue.stop()
self._zk.stop() | Stops the connection | Below is the the instruction that describes the task:
### Input:
Stops the connection
### Response:
def stop(self):
"""
Stops the connection
"""
self.__stop = True
self._queue.stop()
self._zk.stop() |
def create_annotation(timestamp, value, host):
"""
Create a zipkin annotation object
:param timestamp: timestamp of when the annotation occured in microseconds
:param value: name of the annotation, such as 'sr'
:param host: zipkin endpoint object
:returns: zipkin annotation object
"""
return zipkin_core.Annotation(timestamp=timestamp, value=value, host=host) | Create a zipkin annotation object
:param timestamp: timestamp of when the annotation occured in microseconds
:param value: name of the annotation, such as 'sr'
:param host: zipkin endpoint object
:returns: zipkin annotation object | Below is the the instruction that describes the task:
### Input:
Create a zipkin annotation object
:param timestamp: timestamp of when the annotation occured in microseconds
:param value: name of the annotation, such as 'sr'
:param host: zipkin endpoint object
:returns: zipkin annotation object
### Response:
def create_annotation(timestamp, value, host):
"""
Create a zipkin annotation object
:param timestamp: timestamp of when the annotation occured in microseconds
:param value: name of the annotation, such as 'sr'
:param host: zipkin endpoint object
:returns: zipkin annotation object
"""
return zipkin_core.Annotation(timestamp=timestamp, value=value, host=host) |
def get_formset(self):
"""Provide the formset corresponding to this DataTable.
Use this to validate the formset and to get the submitted data back.
"""
if self._formset is None:
self._formset = self.formset_class(
self.request.POST or None,
initial=self._get_formset_data(),
prefix=self._meta.name)
return self._formset | Provide the formset corresponding to this DataTable.
Use this to validate the formset and to get the submitted data back. | Below is the the instruction that describes the task:
### Input:
Provide the formset corresponding to this DataTable.
Use this to validate the formset and to get the submitted data back.
### Response:
def get_formset(self):
"""Provide the formset corresponding to this DataTable.
Use this to validate the formset and to get the submitted data back.
"""
if self._formset is None:
self._formset = self.formset_class(
self.request.POST or None,
initial=self._get_formset_data(),
prefix=self._meta.name)
return self._formset |
def summarizePosition(self, index):
"""
Compute residue counts at a specific sequence index.
@param index: an C{int} index into the sequence.
@return: A C{dict} with the count of too-short (excluded) sequences,
and a Counter instance giving the residue counts.
"""
countAtPosition = Counter()
excludedCount = 0
for read in self:
try:
countAtPosition[read.sequence[index]] += 1
except IndexError:
excludedCount += 1
return {
'excludedCount': excludedCount,
'countAtPosition': countAtPosition
} | Compute residue counts at a specific sequence index.
@param index: an C{int} index into the sequence.
@return: A C{dict} with the count of too-short (excluded) sequences,
and a Counter instance giving the residue counts. | Below is the the instruction that describes the task:
### Input:
Compute residue counts at a specific sequence index.
@param index: an C{int} index into the sequence.
@return: A C{dict} with the count of too-short (excluded) sequences,
and a Counter instance giving the residue counts.
### Response:
def summarizePosition(self, index):
"""
Compute residue counts at a specific sequence index.
@param index: an C{int} index into the sequence.
@return: A C{dict} with the count of too-short (excluded) sequences,
and a Counter instance giving the residue counts.
"""
countAtPosition = Counter()
excludedCount = 0
for read in self:
try:
countAtPosition[read.sequence[index]] += 1
except IndexError:
excludedCount += 1
return {
'excludedCount': excludedCount,
'countAtPosition': countAtPosition
} |
def _snakify_name(self, name):
"""Snakify a name string.
In this context, "to snakify" means to strip a name of all
diacritics, convert it to lower case, and replace any spaces
inside the name with hyphens.
This way the name is made "machine-friendly", and ready to be
combined with a second name component into a full "snake_case"
name.
:param str name: A name to snakify.
:return str: A snakified name.
"""
name = self._strip_diacritics(name)
name = name.lower()
name = name.replace(' ', '-')
return name | Snakify a name string.
In this context, "to snakify" means to strip a name of all
diacritics, convert it to lower case, and replace any spaces
inside the name with hyphens.
This way the name is made "machine-friendly", and ready to be
combined with a second name component into a full "snake_case"
name.
:param str name: A name to snakify.
:return str: A snakified name. | Below is the the instruction that describes the task:
### Input:
Snakify a name string.
In this context, "to snakify" means to strip a name of all
diacritics, convert it to lower case, and replace any spaces
inside the name with hyphens.
This way the name is made "machine-friendly", and ready to be
combined with a second name component into a full "snake_case"
name.
:param str name: A name to snakify.
:return str: A snakified name.
### Response:
def _snakify_name(self, name):
"""Snakify a name string.
In this context, "to snakify" means to strip a name of all
diacritics, convert it to lower case, and replace any spaces
inside the name with hyphens.
This way the name is made "machine-friendly", and ready to be
combined with a second name component into a full "snake_case"
name.
:param str name: A name to snakify.
:return str: A snakified name.
"""
name = self._strip_diacritics(name)
name = name.lower()
name = name.replace(' ', '-')
return name |
def make(parser):
"""
Install Ceph packages on remote hosts.
"""
version = parser.add_mutually_exclusive_group()
# XXX deprecated in favor of release
version.add_argument(
'--stable',
nargs='?',
action=StoreVersion,
metavar='CODENAME',
help='[DEPRECATED] install a release known as CODENAME\
(done by default) (default: %(default)s)',
)
version.add_argument(
'--release',
nargs='?',
action=StoreVersion,
metavar='CODENAME',
help='install a release known as CODENAME\
(done by default) (default: %(default)s)',
)
version.add_argument(
'--testing',
nargs=0,
action=StoreVersion,
help='install the latest development release',
)
version.add_argument(
'--dev',
nargs='?',
action=StoreVersion,
const='master',
metavar='BRANCH_OR_TAG',
help='install a bleeding edge build from Git branch\
or tag (default: %(default)s)',
)
parser.add_argument(
'--dev-commit',
nargs='?',
action=StoreVersion,
metavar='COMMIT',
help='install a bleeding edge build from Git commit (defaults to master branch)',
)
version.set_defaults(
stable=None, # XXX deprecated in favor of release
release=None, # Set the default release in sanitize_args()
dev='master',
version_kind='stable',
)
parser.add_argument(
'--mon',
dest='install_mon',
action='store_true',
help='install the mon component only',
)
parser.add_argument(
'--mgr',
dest='install_mgr',
action='store_true',
help='install the mgr component only',
)
parser.add_argument(
'--mds',
dest='install_mds',
action='store_true',
help='install the mds component only',
)
parser.add_argument(
'--rgw',
dest='install_rgw',
action='store_true',
help='install the rgw component only',
)
parser.add_argument(
'--osd',
dest='install_osd',
action='store_true',
help='install the osd component only',
)
parser.add_argument(
'--tests',
dest='install_tests',
action='store_true',
help='install the testing components',
)
parser.add_argument(
'--cli', '--common',
dest='install_common',
action='store_true',
help='install the common component only',
)
parser.add_argument(
'--all',
dest='install_all',
action='store_true',
help='install all Ceph components (mon, osd, mds, rgw) except tests. This is the default',
)
repo = parser.add_mutually_exclusive_group()
repo.add_argument(
'--adjust-repos',
dest='adjust_repos',
action='store_true',
help='install packages modifying source repos',
)
repo.add_argument(
'--no-adjust-repos',
dest='adjust_repos',
action='store_false',
help='install packages without modifying source repos',
)
repo.add_argument(
'--repo',
action='store_true',
help='install repo files only (skips package installation)',
)
repo.set_defaults(
adjust_repos=True,
)
parser.add_argument(
'host',
metavar='HOST',
nargs='+',
help='hosts to install on',
)
parser.add_argument(
'--local-mirror',
nargs='?',
const='PATH',
default=None,
help='Fetch packages and push them to hosts for a local repo mirror',
)
parser.add_argument(
'--repo-url',
nargs='?',
dest='repo_url',
help='specify a repo URL that mirrors/contains Ceph packages',
)
parser.add_argument(
'--gpg-url',
nargs='?',
dest='gpg_url',
help='specify a GPG key URL to be used with custom repos\
(defaults to ceph.com)'
)
parser.add_argument(
'--nogpgcheck',
action='store_true',
help='install packages without gpgcheck',
)
parser.set_defaults(
func=install,
) | Install Ceph packages on remote hosts. | Below is the the instruction that describes the task:
### Input:
Install Ceph packages on remote hosts.
### Response:
def make(parser):
"""
Install Ceph packages on remote hosts.
"""
version = parser.add_mutually_exclusive_group()
# XXX deprecated in favor of release
version.add_argument(
'--stable',
nargs='?',
action=StoreVersion,
metavar='CODENAME',
help='[DEPRECATED] install a release known as CODENAME\
(done by default) (default: %(default)s)',
)
version.add_argument(
'--release',
nargs='?',
action=StoreVersion,
metavar='CODENAME',
help='install a release known as CODENAME\
(done by default) (default: %(default)s)',
)
version.add_argument(
'--testing',
nargs=0,
action=StoreVersion,
help='install the latest development release',
)
version.add_argument(
'--dev',
nargs='?',
action=StoreVersion,
const='master',
metavar='BRANCH_OR_TAG',
help='install a bleeding edge build from Git branch\
or tag (default: %(default)s)',
)
parser.add_argument(
'--dev-commit',
nargs='?',
action=StoreVersion,
metavar='COMMIT',
help='install a bleeding edge build from Git commit (defaults to master branch)',
)
version.set_defaults(
stable=None, # XXX deprecated in favor of release
release=None, # Set the default release in sanitize_args()
dev='master',
version_kind='stable',
)
parser.add_argument(
'--mon',
dest='install_mon',
action='store_true',
help='install the mon component only',
)
parser.add_argument(
'--mgr',
dest='install_mgr',
action='store_true',
help='install the mgr component only',
)
parser.add_argument(
'--mds',
dest='install_mds',
action='store_true',
help='install the mds component only',
)
parser.add_argument(
'--rgw',
dest='install_rgw',
action='store_true',
help='install the rgw component only',
)
parser.add_argument(
'--osd',
dest='install_osd',
action='store_true',
help='install the osd component only',
)
parser.add_argument(
'--tests',
dest='install_tests',
action='store_true',
help='install the testing components',
)
parser.add_argument(
'--cli', '--common',
dest='install_common',
action='store_true',
help='install the common component only',
)
parser.add_argument(
'--all',
dest='install_all',
action='store_true',
help='install all Ceph components (mon, osd, mds, rgw) except tests. This is the default',
)
repo = parser.add_mutually_exclusive_group()
repo.add_argument(
'--adjust-repos',
dest='adjust_repos',
action='store_true',
help='install packages modifying source repos',
)
repo.add_argument(
'--no-adjust-repos',
dest='adjust_repos',
action='store_false',
help='install packages without modifying source repos',
)
repo.add_argument(
'--repo',
action='store_true',
help='install repo files only (skips package installation)',
)
repo.set_defaults(
adjust_repos=True,
)
parser.add_argument(
'host',
metavar='HOST',
nargs='+',
help='hosts to install on',
)
parser.add_argument(
'--local-mirror',
nargs='?',
const='PATH',
default=None,
help='Fetch packages and push them to hosts for a local repo mirror',
)
parser.add_argument(
'--repo-url',
nargs='?',
dest='repo_url',
help='specify a repo URL that mirrors/contains Ceph packages',
)
parser.add_argument(
'--gpg-url',
nargs='?',
dest='gpg_url',
help='specify a GPG key URL to be used with custom repos\
(defaults to ceph.com)'
)
parser.add_argument(
'--nogpgcheck',
action='store_true',
help='install packages without gpgcheck',
)
parser.set_defaults(
func=install,
) |
def _get_envs_from_ref_paths(self, refs):
'''
Return the names of remote refs (stripped of the remote name) and tags
which are map to the branches and tags.
'''
def _check_ref(env_set, rname):
'''
Add the appropriate saltenv(s) to the set
'''
if rname in self.saltenv_revmap:
env_set.update(self.saltenv_revmap[rname])
else:
if rname == self.base:
env_set.add('base')
elif not self.disable_saltenv_mapping:
env_set.add(rname)
use_branches = 'branch' in self.ref_types
use_tags = 'tag' in self.ref_types
ret = set()
if salt.utils.stringutils.is_hex(self.base):
# gitfs_base or per-saltenv 'base' may point to a commit ID, which
# would not show up in the refs. Make sure we include it.
ret.add('base')
for ref in salt.utils.data.decode(refs):
if ref.startswith('refs/'):
ref = ref[5:]
rtype, rname = ref.split('/', 1)
if rtype == 'remotes' and use_branches:
parted = rname.partition('/')
rname = parted[2] if parted[2] else parted[0]
_check_ref(ret, rname)
elif rtype == 'tags' and use_tags:
_check_ref(ret, rname)
return ret | Return the names of remote refs (stripped of the remote name) and tags
which are map to the branches and tags. | Below is the the instruction that describes the task:
### Input:
Return the names of remote refs (stripped of the remote name) and tags
which are map to the branches and tags.
### Response:
def _get_envs_from_ref_paths(self, refs):
'''
Return the names of remote refs (stripped of the remote name) and tags
which are map to the branches and tags.
'''
def _check_ref(env_set, rname):
'''
Add the appropriate saltenv(s) to the set
'''
if rname in self.saltenv_revmap:
env_set.update(self.saltenv_revmap[rname])
else:
if rname == self.base:
env_set.add('base')
elif not self.disable_saltenv_mapping:
env_set.add(rname)
use_branches = 'branch' in self.ref_types
use_tags = 'tag' in self.ref_types
ret = set()
if salt.utils.stringutils.is_hex(self.base):
# gitfs_base or per-saltenv 'base' may point to a commit ID, which
# would not show up in the refs. Make sure we include it.
ret.add('base')
for ref in salt.utils.data.decode(refs):
if ref.startswith('refs/'):
ref = ref[5:]
rtype, rname = ref.split('/', 1)
if rtype == 'remotes' and use_branches:
parted = rname.partition('/')
rname = parted[2] if parted[2] else parted[0]
_check_ref(ret, rname)
elif rtype == 'tags' and use_tags:
_check_ref(ret, rname)
return ret |
def get_account_invitation(self, account_id, invitation_id, **kwargs): # noqa: E501
"""Details of a user invitation. # noqa: E501
An endpoint for retrieving the details of an active user invitation sent for a new or an existing user to join the account. **Example usage:** `curl https://api.us-east-1.mbedcloud.com/v3/accounts/{account-id}/user-invitations/{invitation-id} -H 'Authorization: Bearer API_KEY'` # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass asynchronous=True
>>> thread = api.get_account_invitation(account_id, invitation_id, asynchronous=True)
>>> result = thread.get()
:param asynchronous bool
:param str account_id: Account ID. (required)
:param str invitation_id: The ID of the invitation to be retrieved. (required)
:return: UserInvitationResp
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('asynchronous'):
return self.get_account_invitation_with_http_info(account_id, invitation_id, **kwargs) # noqa: E501
else:
(data) = self.get_account_invitation_with_http_info(account_id, invitation_id, **kwargs) # noqa: E501
return data | Details of a user invitation. # noqa: E501
An endpoint for retrieving the details of an active user invitation sent for a new or an existing user to join the account. **Example usage:** `curl https://api.us-east-1.mbedcloud.com/v3/accounts/{account-id}/user-invitations/{invitation-id} -H 'Authorization: Bearer API_KEY'` # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass asynchronous=True
>>> thread = api.get_account_invitation(account_id, invitation_id, asynchronous=True)
>>> result = thread.get()
:param asynchronous bool
:param str account_id: Account ID. (required)
:param str invitation_id: The ID of the invitation to be retrieved. (required)
:return: UserInvitationResp
If the method is called asynchronously,
returns the request thread. | Below is the the instruction that describes the task:
### Input:
Details of a user invitation. # noqa: E501
An endpoint for retrieving the details of an active user invitation sent for a new or an existing user to join the account. **Example usage:** `curl https://api.us-east-1.mbedcloud.com/v3/accounts/{account-id}/user-invitations/{invitation-id} -H 'Authorization: Bearer API_KEY'` # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass asynchronous=True
>>> thread = api.get_account_invitation(account_id, invitation_id, asynchronous=True)
>>> result = thread.get()
:param asynchronous bool
:param str account_id: Account ID. (required)
:param str invitation_id: The ID of the invitation to be retrieved. (required)
:return: UserInvitationResp
If the method is called asynchronously,
returns the request thread.
### Response:
def get_account_invitation(self, account_id, invitation_id, **kwargs): # noqa: E501
"""Details of a user invitation. # noqa: E501
An endpoint for retrieving the details of an active user invitation sent for a new or an existing user to join the account. **Example usage:** `curl https://api.us-east-1.mbedcloud.com/v3/accounts/{account-id}/user-invitations/{invitation-id} -H 'Authorization: Bearer API_KEY'` # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass asynchronous=True
>>> thread = api.get_account_invitation(account_id, invitation_id, asynchronous=True)
>>> result = thread.get()
:param asynchronous bool
:param str account_id: Account ID. (required)
:param str invitation_id: The ID of the invitation to be retrieved. (required)
:return: UserInvitationResp
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('asynchronous'):
return self.get_account_invitation_with_http_info(account_id, invitation_id, **kwargs) # noqa: E501
else:
(data) = self.get_account_invitation_with_http_info(account_id, invitation_id, **kwargs) # noqa: E501
return data |
def get_path_contents(self, project, provider_name, service_endpoint_id=None, repository=None, commit_or_branch=None, path=None):
"""GetPathContents.
[Preview API] Gets the contents of a directory in the given source code repository.
:param str project: Project ID or project name
:param str provider_name: The name of the source provider.
:param str service_endpoint_id: If specified, the ID of the service endpoint to query. Can only be omitted for providers that do not use service endpoints, e.g. TFVC or TFGit.
:param str repository: If specified, the vendor-specific identifier or the name of the repository to get branches. Can only be omitted for providers that do not support multiple repositories.
:param str commit_or_branch: The identifier of the commit or branch from which a file's contents are retrieved.
:param str path: The path contents to list, relative to the root of the repository.
:rtype: [SourceRepositoryItem]
"""
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'str')
if provider_name is not None:
route_values['providerName'] = self._serialize.url('provider_name', provider_name, 'str')
query_parameters = {}
if service_endpoint_id is not None:
query_parameters['serviceEndpointId'] = self._serialize.query('service_endpoint_id', service_endpoint_id, 'str')
if repository is not None:
query_parameters['repository'] = self._serialize.query('repository', repository, 'str')
if commit_or_branch is not None:
query_parameters['commitOrBranch'] = self._serialize.query('commit_or_branch', commit_or_branch, 'str')
if path is not None:
query_parameters['path'] = self._serialize.query('path', path, 'str')
response = self._send(http_method='GET',
location_id='7944d6fb-df01-4709-920a-7a189aa34037',
version='5.0-preview.1',
route_values=route_values,
query_parameters=query_parameters)
return self._deserialize('[SourceRepositoryItem]', self._unwrap_collection(response)) | GetPathContents.
[Preview API] Gets the contents of a directory in the given source code repository.
:param str project: Project ID or project name
:param str provider_name: The name of the source provider.
:param str service_endpoint_id: If specified, the ID of the service endpoint to query. Can only be omitted for providers that do not use service endpoints, e.g. TFVC or TFGit.
:param str repository: If specified, the vendor-specific identifier or the name of the repository to get branches. Can only be omitted for providers that do not support multiple repositories.
:param str commit_or_branch: The identifier of the commit or branch from which a file's contents are retrieved.
:param str path: The path contents to list, relative to the root of the repository.
:rtype: [SourceRepositoryItem] | Below is the the instruction that describes the task:
### Input:
GetPathContents.
[Preview API] Gets the contents of a directory in the given source code repository.
:param str project: Project ID or project name
:param str provider_name: The name of the source provider.
:param str service_endpoint_id: If specified, the ID of the service endpoint to query. Can only be omitted for providers that do not use service endpoints, e.g. TFVC or TFGit.
:param str repository: If specified, the vendor-specific identifier or the name of the repository to get branches. Can only be omitted for providers that do not support multiple repositories.
:param str commit_or_branch: The identifier of the commit or branch from which a file's contents are retrieved.
:param str path: The path contents to list, relative to the root of the repository.
:rtype: [SourceRepositoryItem]
### Response:
def get_path_contents(self, project, provider_name, service_endpoint_id=None, repository=None, commit_or_branch=None, path=None):
"""GetPathContents.
[Preview API] Gets the contents of a directory in the given source code repository.
:param str project: Project ID or project name
:param str provider_name: The name of the source provider.
:param str service_endpoint_id: If specified, the ID of the service endpoint to query. Can only be omitted for providers that do not use service endpoints, e.g. TFVC or TFGit.
:param str repository: If specified, the vendor-specific identifier or the name of the repository to get branches. Can only be omitted for providers that do not support multiple repositories.
:param str commit_or_branch: The identifier of the commit or branch from which a file's contents are retrieved.
:param str path: The path contents to list, relative to the root of the repository.
:rtype: [SourceRepositoryItem]
"""
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'str')
if provider_name is not None:
route_values['providerName'] = self._serialize.url('provider_name', provider_name, 'str')
query_parameters = {}
if service_endpoint_id is not None:
query_parameters['serviceEndpointId'] = self._serialize.query('service_endpoint_id', service_endpoint_id, 'str')
if repository is not None:
query_parameters['repository'] = self._serialize.query('repository', repository, 'str')
if commit_or_branch is not None:
query_parameters['commitOrBranch'] = self._serialize.query('commit_or_branch', commit_or_branch, 'str')
if path is not None:
query_parameters['path'] = self._serialize.query('path', path, 'str')
response = self._send(http_method='GET',
location_id='7944d6fb-df01-4709-920a-7a189aa34037',
version='5.0-preview.1',
route_values=route_values,
query_parameters=query_parameters)
return self._deserialize('[SourceRepositoryItem]', self._unwrap_collection(response)) |
def get_pager_spec(self):
""" Find the best pager settings for this command. If the user has
specified overrides in the INI config file we prefer those. """
self_config = self.get_config()
pagercmd = self_config.get('pager')
istty = self_config.getboolean('pager_istty')
core_config = self.get_config('core')
if pagercmd is None:
pagercmd = core_config.get('pager')
if istty is None:
istty = core_config.get('pager_istty')
return {
"pagercmd": pagercmd,
"istty": istty
} | Find the best pager settings for this command. If the user has
specified overrides in the INI config file we prefer those. | Below is the the instruction that describes the task:
### Input:
Find the best pager settings for this command. If the user has
specified overrides in the INI config file we prefer those.
### Response:
def get_pager_spec(self):
""" Find the best pager settings for this command. If the user has
specified overrides in the INI config file we prefer those. """
self_config = self.get_config()
pagercmd = self_config.get('pager')
istty = self_config.getboolean('pager_istty')
core_config = self.get_config('core')
if pagercmd is None:
pagercmd = core_config.get('pager')
if istty is None:
istty = core_config.get('pager_istty')
return {
"pagercmd": pagercmd,
"istty": istty
} |
def resolve(self, symbol):
"""
Resolve a symbol using the entrypoint group.
:param symbol: The symbol being resolved.
:returns: The value of that symbol. If the symbol cannot be
found, or if no entrypoint group was passed to the
constructor, will return ``None``.
"""
# Search for a corresponding symbol
if symbol not in self._resolve_cache:
result = None
# Search through entrypoints only if we have a group
if self._group is not None:
for ep in pkg_resources.iter_entry_points(self._group, symbol):
try:
result = ep.load()
except (ImportError, AttributeError,
pkg_resources.UnknownExtra):
continue
# We found the result we were looking for
break
# Cache the result
self._resolve_cache[symbol] = result
return self._resolve_cache[symbol] | Resolve a symbol using the entrypoint group.
:param symbol: The symbol being resolved.
:returns: The value of that symbol. If the symbol cannot be
found, or if no entrypoint group was passed to the
constructor, will return ``None``. | Below is the the instruction that describes the task:
### Input:
Resolve a symbol using the entrypoint group.
:param symbol: The symbol being resolved.
:returns: The value of that symbol. If the symbol cannot be
found, or if no entrypoint group was passed to the
constructor, will return ``None``.
### Response:
def resolve(self, symbol):
"""
Resolve a symbol using the entrypoint group.
:param symbol: The symbol being resolved.
:returns: The value of that symbol. If the symbol cannot be
found, or if no entrypoint group was passed to the
constructor, will return ``None``.
"""
# Search for a corresponding symbol
if symbol not in self._resolve_cache:
result = None
# Search through entrypoints only if we have a group
if self._group is not None:
for ep in pkg_resources.iter_entry_points(self._group, symbol):
try:
result = ep.load()
except (ImportError, AttributeError,
pkg_resources.UnknownExtra):
continue
# We found the result we were looking for
break
# Cache the result
self._resolve_cache[symbol] = result
return self._resolve_cache[symbol] |
def unmount_loopbacks(self):
"""Unmounts all loopback devices as identified by :func:`find_loopbacks`"""
# re-index loopback devices
self._index_loopbacks()
for dev in self.find_loopbacks():
_util.check_output_(['losetup', '-d', dev]) | Unmounts all loopback devices as identified by :func:`find_loopbacks` | Below is the the instruction that describes the task:
### Input:
Unmounts all loopback devices as identified by :func:`find_loopbacks`
### Response:
def unmount_loopbacks(self):
"""Unmounts all loopback devices as identified by :func:`find_loopbacks`"""
# re-index loopback devices
self._index_loopbacks()
for dev in self.find_loopbacks():
_util.check_output_(['losetup', '-d', dev]) |
def get_exit_code(self):
"""
@rtype: int
@return: Process exit code, or C{STILL_ACTIVE} if it's still alive.
@warning: If a process returns C{STILL_ACTIVE} as it's exit code,
you may not be able to determine if it's active or not with this
method. Use L{is_alive} to check if the process is still active.
Alternatively you can call L{get_handle} to get the handle object
and then L{ProcessHandle.wait} on it to wait until the process
finishes running.
"""
if win32.PROCESS_ALL_ACCESS == win32.PROCESS_ALL_ACCESS_VISTA:
dwAccess = win32.PROCESS_QUERY_LIMITED_INFORMATION
else:
dwAccess = win32.PROCESS_QUERY_INFORMATION
return win32.GetExitCodeProcess( self.get_handle(dwAccess) ) | @rtype: int
@return: Process exit code, or C{STILL_ACTIVE} if it's still alive.
@warning: If a process returns C{STILL_ACTIVE} as it's exit code,
you may not be able to determine if it's active or not with this
method. Use L{is_alive} to check if the process is still active.
Alternatively you can call L{get_handle} to get the handle object
and then L{ProcessHandle.wait} on it to wait until the process
finishes running. | Below is the the instruction that describes the task:
### Input:
@rtype: int
@return: Process exit code, or C{STILL_ACTIVE} if it's still alive.
@warning: If a process returns C{STILL_ACTIVE} as it's exit code,
you may not be able to determine if it's active or not with this
method. Use L{is_alive} to check if the process is still active.
Alternatively you can call L{get_handle} to get the handle object
and then L{ProcessHandle.wait} on it to wait until the process
finishes running.
### Response:
def get_exit_code(self):
"""
@rtype: int
@return: Process exit code, or C{STILL_ACTIVE} if it's still alive.
@warning: If a process returns C{STILL_ACTIVE} as it's exit code,
you may not be able to determine if it's active or not with this
method. Use L{is_alive} to check if the process is still active.
Alternatively you can call L{get_handle} to get the handle object
and then L{ProcessHandle.wait} on it to wait until the process
finishes running.
"""
if win32.PROCESS_ALL_ACCESS == win32.PROCESS_ALL_ACCESS_VISTA:
dwAccess = win32.PROCESS_QUERY_LIMITED_INFORMATION
else:
dwAccess = win32.PROCESS_QUERY_INFORMATION
return win32.GetExitCodeProcess( self.get_handle(dwAccess) ) |
def create_port_postcommit(self, context):
"""Create port non-database commit event."""
# No new events are handled until replay
# thread has put the switch in active state.
# If a switch is in active state, verify
# the switch is still in active state
# before accepting this new event.
#
# If create_port_postcommit fails, it causes
# other openstack dbs to be cleared and
# retries for new VMs will stop. Subnet
# transactions will continue to be retried.
vlan_segment, vxlan_segment = self._get_segments(
context.top_bound_segment,
context.bottom_bound_segment)
# Verify segment.
if not self._is_valid_segment(vlan_segment):
return
port = context.current
if self._is_supported_deviceowner(port):
if nexus_help.is_baremetal(context.current):
all_switches, active_switches = (
self._get_baremetal_switches(context.current))
else:
host_id = context.current.get(bc.portbindings.HOST_ID)
all_switches, active_switches = (
self._get_host_switches(host_id))
# Verify switch is still up before replay
# thread checks.
verified_active_switches = []
for switch_ip in active_switches:
try:
self.driver.get_nexus_type(switch_ip)
verified_active_switches.append(switch_ip)
except Exception as e:
LOG.error("Failed to ping "
"switch ip %(switch_ip)s error %(exp_err)s",
{'switch_ip': switch_ip, 'exp_err': e})
LOG.debug("Create Stats: thread %(thid)d, "
"all_switches %(all)d, "
"active %(active)d, verified %(verify)d",
{'thid': threading.current_thread().ident,
'all': len(all_switches),
'active': len(active_switches),
'verify': len(verified_active_switches)})
# if host_id is valid and there is no active
# switches remaining
if all_switches and not verified_active_switches:
raise excep.NexusConnectFailed(
nexus_host=all_switches[0], config="None",
exc="Create Failed: Port event can not "
"be processed at this time.") | Create port non-database commit event. | Below is the the instruction that describes the task:
### Input:
Create port non-database commit event.
### Response:
def create_port_postcommit(self, context):
"""Create port non-database commit event."""
# No new events are handled until replay
# thread has put the switch in active state.
# If a switch is in active state, verify
# the switch is still in active state
# before accepting this new event.
#
# If create_port_postcommit fails, it causes
# other openstack dbs to be cleared and
# retries for new VMs will stop. Subnet
# transactions will continue to be retried.
vlan_segment, vxlan_segment = self._get_segments(
context.top_bound_segment,
context.bottom_bound_segment)
# Verify segment.
if not self._is_valid_segment(vlan_segment):
return
port = context.current
if self._is_supported_deviceowner(port):
if nexus_help.is_baremetal(context.current):
all_switches, active_switches = (
self._get_baremetal_switches(context.current))
else:
host_id = context.current.get(bc.portbindings.HOST_ID)
all_switches, active_switches = (
self._get_host_switches(host_id))
# Verify switch is still up before replay
# thread checks.
verified_active_switches = []
for switch_ip in active_switches:
try:
self.driver.get_nexus_type(switch_ip)
verified_active_switches.append(switch_ip)
except Exception as e:
LOG.error("Failed to ping "
"switch ip %(switch_ip)s error %(exp_err)s",
{'switch_ip': switch_ip, 'exp_err': e})
LOG.debug("Create Stats: thread %(thid)d, "
"all_switches %(all)d, "
"active %(active)d, verified %(verify)d",
{'thid': threading.current_thread().ident,
'all': len(all_switches),
'active': len(active_switches),
'verify': len(verified_active_switches)})
# if host_id is valid and there is no active
# switches remaining
if all_switches and not verified_active_switches:
raise excep.NexusConnectFailed(
nexus_host=all_switches[0], config="None",
exc="Create Failed: Port event can not "
"be processed at this time.") |
def proc_collector(process_map, args, pipeline_string):
"""
Function that collects all processes available and stores a dictionary of
the required arguments of each process class to be passed to
procs_dict_parser
Parameters
----------
process_map: dict
The dictionary with the Processes currently available in flowcraft
and their corresponding classes as values
args: argparse.Namespace
The arguments passed through argparser that will be access to check the
type of list to be printed
pipeline_string: str
the pipeline string
"""
arguments_list = []
# prints a detailed list of the process class arguments
if args.detailed_list:
# list of attributes to be passed to proc_collector
arguments_list += [
"input_type",
"output_type",
"description",
"dependencies",
"conflicts",
"directives"
]
# prints a short list with each process and the corresponding description
if args.short_list:
arguments_list += [
"description"
]
if arguments_list:
# dict to store only the required entries
procs_dict = {}
# loops between all process_map Processes
for name, cls in process_map.items():
# instantiates each Process class
cls_inst = cls(template=name)
# checks if recipe is provided
if pipeline_string:
if name not in pipeline_string:
continue
d = {arg_key: vars(cls_inst)[arg_key] for arg_key in
vars(cls_inst) if arg_key in arguments_list}
procs_dict[name] = d
procs_dict_parser(procs_dict)
sys.exit(0) | Function that collects all processes available and stores a dictionary of
the required arguments of each process class to be passed to
procs_dict_parser
Parameters
----------
process_map: dict
The dictionary with the Processes currently available in flowcraft
and their corresponding classes as values
args: argparse.Namespace
The arguments passed through argparser that will be access to check the
type of list to be printed
pipeline_string: str
the pipeline string | Below is the the instruction that describes the task:
### Input:
Function that collects all processes available and stores a dictionary of
the required arguments of each process class to be passed to
procs_dict_parser
Parameters
----------
process_map: dict
The dictionary with the Processes currently available in flowcraft
and their corresponding classes as values
args: argparse.Namespace
The arguments passed through argparser that will be access to check the
type of list to be printed
pipeline_string: str
the pipeline string
### Response:
def proc_collector(process_map, args, pipeline_string):
"""
Function that collects all processes available and stores a dictionary of
the required arguments of each process class to be passed to
procs_dict_parser
Parameters
----------
process_map: dict
The dictionary with the Processes currently available in flowcraft
and their corresponding classes as values
args: argparse.Namespace
The arguments passed through argparser that will be access to check the
type of list to be printed
pipeline_string: str
the pipeline string
"""
arguments_list = []
# prints a detailed list of the process class arguments
if args.detailed_list:
# list of attributes to be passed to proc_collector
arguments_list += [
"input_type",
"output_type",
"description",
"dependencies",
"conflicts",
"directives"
]
# prints a short list with each process and the corresponding description
if args.short_list:
arguments_list += [
"description"
]
if arguments_list:
# dict to store only the required entries
procs_dict = {}
# loops between all process_map Processes
for name, cls in process_map.items():
# instantiates each Process class
cls_inst = cls(template=name)
# checks if recipe is provided
if pipeline_string:
if name not in pipeline_string:
continue
d = {arg_key: vars(cls_inst)[arg_key] for arg_key in
vars(cls_inst) if arg_key in arguments_list}
procs_dict[name] = d
procs_dict_parser(procs_dict)
sys.exit(0) |
def write(obj, data=None, **kwargs):
"""Write a value in to loader source
:param obj: settings object
:param data: vars to be stored
:param kwargs: vars to be stored
:return:
"""
if obj.REDIS_ENABLED_FOR_DYNACONF is False:
raise RuntimeError(
"Redis is not configured \n"
"export REDIS_ENABLED_FOR_DYNACONF=true\n"
"and configure the REDIS_FOR_DYNACONF_* variables"
)
client = StrictRedis(**obj.REDIS_FOR_DYNACONF)
holder = obj.get("ENVVAR_PREFIX_FOR_DYNACONF")
data = data or {}
data.update(kwargs)
if not data:
raise AttributeError("Data must be provided")
redis_data = {
key.upper(): unparse_conf_data(value) for key, value in data.items()
}
client.hmset(holder.upper(), redis_data)
load(obj) | Write a value in to loader source
:param obj: settings object
:param data: vars to be stored
:param kwargs: vars to be stored
:return: | Below is the the instruction that describes the task:
### Input:
Write a value in to loader source
:param obj: settings object
:param data: vars to be stored
:param kwargs: vars to be stored
:return:
### Response:
def write(obj, data=None, **kwargs):
"""Write a value in to loader source
:param obj: settings object
:param data: vars to be stored
:param kwargs: vars to be stored
:return:
"""
if obj.REDIS_ENABLED_FOR_DYNACONF is False:
raise RuntimeError(
"Redis is not configured \n"
"export REDIS_ENABLED_FOR_DYNACONF=true\n"
"and configure the REDIS_FOR_DYNACONF_* variables"
)
client = StrictRedis(**obj.REDIS_FOR_DYNACONF)
holder = obj.get("ENVVAR_PREFIX_FOR_DYNACONF")
data = data or {}
data.update(kwargs)
if not data:
raise AttributeError("Data must be provided")
redis_data = {
key.upper(): unparse_conf_data(value) for key, value in data.items()
}
client.hmset(holder.upper(), redis_data)
load(obj) |
def _UploadChunk(self, chunk):
"""Uploads a single chunk to the transfer store flow.
Args:
chunk: A chunk to upload.
Returns:
A `BlobImageChunkDescriptor` object.
"""
blob = _CompressedDataBlob(chunk)
self._action.ChargeBytesToSession(len(chunk.data))
self._action.SendReply(blob, session_id=self._TRANSFER_STORE_SESSION_ID)
return rdf_client_fs.BlobImageChunkDescriptor(
digest=hashlib.sha256(chunk.data).digest(),
offset=chunk.offset,
length=len(chunk.data)) | Uploads a single chunk to the transfer store flow.
Args:
chunk: A chunk to upload.
Returns:
A `BlobImageChunkDescriptor` object. | Below is the the instruction that describes the task:
### Input:
Uploads a single chunk to the transfer store flow.
Args:
chunk: A chunk to upload.
Returns:
A `BlobImageChunkDescriptor` object.
### Response:
def _UploadChunk(self, chunk):
"""Uploads a single chunk to the transfer store flow.
Args:
chunk: A chunk to upload.
Returns:
A `BlobImageChunkDescriptor` object.
"""
blob = _CompressedDataBlob(chunk)
self._action.ChargeBytesToSession(len(chunk.data))
self._action.SendReply(blob, session_id=self._TRANSFER_STORE_SESSION_ID)
return rdf_client_fs.BlobImageChunkDescriptor(
digest=hashlib.sha256(chunk.data).digest(),
offset=chunk.offset,
length=len(chunk.data)) |
def delete_field(field_uri):
"""
Helper function to request deletion of a field. This is necessary when you want to overwrite values instead of
appending.
<t:DeleteItemField>
<t:FieldURI FieldURI="calendar:Resources"/>
</t:DeleteItemField>
"""
root = T.DeleteItemField(
T.FieldURI(FieldURI=field_uri)
)
return root | Helper function to request deletion of a field. This is necessary when you want to overwrite values instead of
appending.
<t:DeleteItemField>
<t:FieldURI FieldURI="calendar:Resources"/>
</t:DeleteItemField> | Below is the the instruction that describes the task:
### Input:
Helper function to request deletion of a field. This is necessary when you want to overwrite values instead of
appending.
<t:DeleteItemField>
<t:FieldURI FieldURI="calendar:Resources"/>
</t:DeleteItemField>
### Response:
def delete_field(field_uri):
"""
Helper function to request deletion of a field. This is necessary when you want to overwrite values instead of
appending.
<t:DeleteItemField>
<t:FieldURI FieldURI="calendar:Resources"/>
</t:DeleteItemField>
"""
root = T.DeleteItemField(
T.FieldURI(FieldURI=field_uri)
)
return root |
def check_config(config, data):
'''Check config file inputs
:param dict config:
Configuration settings for the function
'''
essential_keys = ['input_mmin', 'b-value', 'sigma-b']
for key in essential_keys:
if not key in config.keys():
raise ValueError('For KijkoSellevolBayes the key %s needs to '
'be set in the configuation' % key)
if 'tolerance' not in config.keys() or not config['tolerance']:
config['tolerance'] = 1E-5
if not config.get('maximum_iterations', False):
config['maximum_iterations'] = 1000
if config['input_mmin'] < np.min(data['magnitude']):
config['input_mmin'] = np.min(data['magnitude'])
if fabs(config['sigma-b'] < 1E-15):
raise ValueError('Sigma-b must be greater than zero!')
return config | Check config file inputs
:param dict config:
Configuration settings for the function | Below is the the instruction that describes the task:
### Input:
Check config file inputs
:param dict config:
Configuration settings for the function
### Response:
def check_config(config, data):
'''Check config file inputs
:param dict config:
Configuration settings for the function
'''
essential_keys = ['input_mmin', 'b-value', 'sigma-b']
for key in essential_keys:
if not key in config.keys():
raise ValueError('For KijkoSellevolBayes the key %s needs to '
'be set in the configuation' % key)
if 'tolerance' not in config.keys() or not config['tolerance']:
config['tolerance'] = 1E-5
if not config.get('maximum_iterations', False):
config['maximum_iterations'] = 1000
if config['input_mmin'] < np.min(data['magnitude']):
config['input_mmin'] = np.min(data['magnitude'])
if fabs(config['sigma-b'] < 1E-15):
raise ValueError('Sigma-b must be greater than zero!')
return config |
def _generate_phrases(self, sentences):
"""Method to generate contender phrases given the sentences of the text
document.
:param sentences: List of strings where each string represents a
sentence which forms the text.
:return: Set of string tuples where each tuple is a collection
of words forming a contender phrase.
"""
phrase_list = set()
# Create contender phrases from sentences.
for sentence in sentences:
word_list = [word.lower() for word in wordpunct_tokenize(sentence)]
phrase_list.update(self._get_phrase_list_from_words(word_list))
return phrase_list | Method to generate contender phrases given the sentences of the text
document.
:param sentences: List of strings where each string represents a
sentence which forms the text.
:return: Set of string tuples where each tuple is a collection
of words forming a contender phrase. | Below is the the instruction that describes the task:
### Input:
Method to generate contender phrases given the sentences of the text
document.
:param sentences: List of strings where each string represents a
sentence which forms the text.
:return: Set of string tuples where each tuple is a collection
of words forming a contender phrase.
### Response:
def _generate_phrases(self, sentences):
"""Method to generate contender phrases given the sentences of the text
document.
:param sentences: List of strings where each string represents a
sentence which forms the text.
:return: Set of string tuples where each tuple is a collection
of words forming a contender phrase.
"""
phrase_list = set()
# Create contender phrases from sentences.
for sentence in sentences:
word_list = [word.lower() for word in wordpunct_tokenize(sentence)]
phrase_list.update(self._get_phrase_list_from_words(word_list))
return phrase_list |
def _lexists(self, path):
'''IMPORTANT: expects `path` to already be deref()'erenced.'''
try:
return bool(self._lstat(path))
except os.error:
return False | IMPORTANT: expects `path` to already be deref()'erenced. | Below is the the instruction that describes the task:
### Input:
IMPORTANT: expects `path` to already be deref()'erenced.
### Response:
def _lexists(self, path):
'''IMPORTANT: expects `path` to already be deref()'erenced.'''
try:
return bool(self._lstat(path))
except os.error:
return False |
def normalize_hex(hex_color):
"""Transform a xxx hex color to xxxxxx.
"""
hex_color = hex_color.replace('#', '').lower()
length = len(hex_color)
if length in (6, 8):
return '#' + hex_color
if length not in (3, 4):
return None
strhex = u'#%s%s%s' % (
hex_color[0] * 2,
hex_color[1] * 2,
hex_color[2] * 2)
if length == 4:
strhex += hex_color[3] * 2
return strhex | Transform a xxx hex color to xxxxxx. | Below is the the instruction that describes the task:
### Input:
Transform a xxx hex color to xxxxxx.
### Response:
def normalize_hex(hex_color):
"""Transform a xxx hex color to xxxxxx.
"""
hex_color = hex_color.replace('#', '').lower()
length = len(hex_color)
if length in (6, 8):
return '#' + hex_color
if length not in (3, 4):
return None
strhex = u'#%s%s%s' % (
hex_color[0] * 2,
hex_color[1] * 2,
hex_color[2] * 2)
if length == 4:
strhex += hex_color[3] * 2
return strhex |
def _filter_dependencies_graph(self, internal):
"""build the internal or the external depedency graph"""
graph = collections.defaultdict(set)
for importee, importers in self.stats["dependencies"].items():
for importer in importers:
package = self._module_pkg.get(importer, importer)
is_inside = importee.startswith(package)
if is_inside and internal or not is_inside and not internal:
graph[importee].add(importer)
return graph | build the internal or the external depedency graph | Below is the the instruction that describes the task:
### Input:
build the internal or the external depedency graph
### Response:
def _filter_dependencies_graph(self, internal):
"""build the internal or the external depedency graph"""
graph = collections.defaultdict(set)
for importee, importers in self.stats["dependencies"].items():
for importer in importers:
package = self._module_pkg.get(importer, importer)
is_inside = importee.startswith(package)
if is_inside and internal or not is_inside and not internal:
graph[importee].add(importer)
return graph |
def get_table_meta(self, db_patterns, tbl_patterns, tbl_types):
"""
Parameters:
- db_patterns
- tbl_patterns
- tbl_types
"""
self.send_get_table_meta(db_patterns, tbl_patterns, tbl_types)
return self.recv_get_table_meta() | Parameters:
- db_patterns
- tbl_patterns
- tbl_types | Below is the the instruction that describes the task:
### Input:
Parameters:
- db_patterns
- tbl_patterns
- tbl_types
### Response:
def get_table_meta(self, db_patterns, tbl_patterns, tbl_types):
"""
Parameters:
- db_patterns
- tbl_patterns
- tbl_types
"""
self.send_get_table_meta(db_patterns, tbl_patterns, tbl_types)
return self.recv_get_table_meta() |
def get(self, *args, **kwargs):
""" wraps the default get() and deals with encoding """
value, stat = super(XClient, self).get(*args, **kwargs)
try:
if value is not None:
value = value.decode(encoding="utf-8")
except UnicodeDecodeError:
pass
return (value, stat) | wraps the default get() and deals with encoding | Below is the the instruction that describes the task:
### Input:
wraps the default get() and deals with encoding
### Response:
def get(self, *args, **kwargs):
""" wraps the default get() and deals with encoding """
value, stat = super(XClient, self).get(*args, **kwargs)
try:
if value is not None:
value = value.decode(encoding="utf-8")
except UnicodeDecodeError:
pass
return (value, stat) |
def _get_response_body_mime_type(self):
"""
Returns the response body MIME type. This might differ from the
overall response mime type e.g. in ATOM responses where the body
MIME type is XML.
"""
mime_type = self._get_response_mime_type()
if mime_type is AtomMime:
# FIXME: This cements using XML as the representation to use in
# ATOM bodies (which is perhaps not too worrisome).
mime_type = XmlMime
return mime_type | Returns the response body MIME type. This might differ from the
overall response mime type e.g. in ATOM responses where the body
MIME type is XML. | Below is the the instruction that describes the task:
### Input:
Returns the response body MIME type. This might differ from the
overall response mime type e.g. in ATOM responses where the body
MIME type is XML.
### Response:
def _get_response_body_mime_type(self):
"""
Returns the response body MIME type. This might differ from the
overall response mime type e.g. in ATOM responses where the body
MIME type is XML.
"""
mime_type = self._get_response_mime_type()
if mime_type is AtomMime:
# FIXME: This cements using XML as the representation to use in
# ATOM bodies (which is perhaps not too worrisome).
mime_type = XmlMime
return mime_type |
def link(origin=None, rel=None, value=None, attributes=None, source=None):
'''
Action function generator to create a link based on the context's current link, or on provided parameters
:param origin: IRI/string, or list of same; origins for the created relationships.
If None, the action context provides the parameter.
:param rel: IRI/string, or list of same; IDs for the created relationships.
If None, the action context provides the parameter.
:param value: IRI/string, or list of same; values/targets for the created relationships.
If None, the action context provides the parameter.
:param source: pattern action to be executed, generating contexts to determine the output statements. If given, overrides specific origin, rel or value params
:return: Versa action function to do the actual work
'''
attributes = attributes or {}
#rel = I(iri.absolutize(rel, ctx.base))
def _link(ctx):
if source:
if not callable(source):
raise ValueError('Link source must be a pattern action function')
contexts = source(ctx)
for ctx in contexts:
ctx.output_model.add(ctx.current_link[ORIGIN], ctx.current_link[RELATIONSHIP], ctx.current_link[TARGET], attributes)
return
(o, r, v, a) = ctx.current_link
_origin = origin(ctx) if callable(origin) else origin
o_list = [o] if _origin is None else (_origin if isinstance(_origin, list) else [_origin])
#_origin = _origin if isinstance(_origin, set) else set([_origin])
_rel = rel(ctx) if callable(rel) else rel
r_list = [r] if _rel is None else (_rel if isinstance(_rel, list) else [_rel])
#_rel = _rel if isinstance(_rel, set) else set([_rel])
_value = value(ctx) if callable(value) else value
v_list = [v] if _value is None else (_value if isinstance(_value, list) else [_value])
#_target = _target if isinstance(_target, set) else set([_target])
_attributes = attributes(ctx) if callable(attributes) else attributes
#(ctx_o, ctx_r, ctx_t, ctx_a) = ctx.current_link
#FIXME: Add test for IRI output via wrapper action function
for (o, r, v, a) in [ (o, r, v, a) for o in o_list for r in r_list for v in v_list ]:
ctx.output_model.add(o, r, v, attributes)
return
return _link | Action function generator to create a link based on the context's current link, or on provided parameters
:param origin: IRI/string, or list of same; origins for the created relationships.
If None, the action context provides the parameter.
:param rel: IRI/string, or list of same; IDs for the created relationships.
If None, the action context provides the parameter.
:param value: IRI/string, or list of same; values/targets for the created relationships.
If None, the action context provides the parameter.
:param source: pattern action to be executed, generating contexts to determine the output statements. If given, overrides specific origin, rel or value params
:return: Versa action function to do the actual work | Below is the the instruction that describes the task:
### Input:
Action function generator to create a link based on the context's current link, or on provided parameters
:param origin: IRI/string, or list of same; origins for the created relationships.
If None, the action context provides the parameter.
:param rel: IRI/string, or list of same; IDs for the created relationships.
If None, the action context provides the parameter.
:param value: IRI/string, or list of same; values/targets for the created relationships.
If None, the action context provides the parameter.
:param source: pattern action to be executed, generating contexts to determine the output statements. If given, overrides specific origin, rel or value params
:return: Versa action function to do the actual work
### Response:
def link(origin=None, rel=None, value=None, attributes=None, source=None):
'''
Action function generator to create a link based on the context's current link, or on provided parameters
:param origin: IRI/string, or list of same; origins for the created relationships.
If None, the action context provides the parameter.
:param rel: IRI/string, or list of same; IDs for the created relationships.
If None, the action context provides the parameter.
:param value: IRI/string, or list of same; values/targets for the created relationships.
If None, the action context provides the parameter.
:param source: pattern action to be executed, generating contexts to determine the output statements. If given, overrides specific origin, rel or value params
:return: Versa action function to do the actual work
'''
attributes = attributes or {}
#rel = I(iri.absolutize(rel, ctx.base))
def _link(ctx):
if source:
if not callable(source):
raise ValueError('Link source must be a pattern action function')
contexts = source(ctx)
for ctx in contexts:
ctx.output_model.add(ctx.current_link[ORIGIN], ctx.current_link[RELATIONSHIP], ctx.current_link[TARGET], attributes)
return
(o, r, v, a) = ctx.current_link
_origin = origin(ctx) if callable(origin) else origin
o_list = [o] if _origin is None else (_origin if isinstance(_origin, list) else [_origin])
#_origin = _origin if isinstance(_origin, set) else set([_origin])
_rel = rel(ctx) if callable(rel) else rel
r_list = [r] if _rel is None else (_rel if isinstance(_rel, list) else [_rel])
#_rel = _rel if isinstance(_rel, set) else set([_rel])
_value = value(ctx) if callable(value) else value
v_list = [v] if _value is None else (_value if isinstance(_value, list) else [_value])
#_target = _target if isinstance(_target, set) else set([_target])
_attributes = attributes(ctx) if callable(attributes) else attributes
#(ctx_o, ctx_r, ctx_t, ctx_a) = ctx.current_link
#FIXME: Add test for IRI output via wrapper action function
for (o, r, v, a) in [ (o, r, v, a) for o in o_list for r in r_list for v in v_list ]:
ctx.output_model.add(o, r, v, attributes)
return
return _link |
def show(block=False):
"""Show current figures using vispy
Parameters
----------
block : bool
If True, blocking mode will be used. If False, then non-blocking
/ interactive mode will be used.
Returns
-------
canvases : list
List of the vispy canvases that were created.
"""
if not has_matplotlib():
raise ImportError('Requires matplotlib version >= 1.2')
cs = [_mpl_to_vispy(plt.figure(ii)) for ii in plt.get_fignums()]
if block and len(cs) > 0:
cs[0].app.run()
return cs | Show current figures using vispy
Parameters
----------
block : bool
If True, blocking mode will be used. If False, then non-blocking
/ interactive mode will be used.
Returns
-------
canvases : list
List of the vispy canvases that were created. | Below is the the instruction that describes the task:
### Input:
Show current figures using vispy
Parameters
----------
block : bool
If True, blocking mode will be used. If False, then non-blocking
/ interactive mode will be used.
Returns
-------
canvases : list
List of the vispy canvases that were created.
### Response:
def show(block=False):
"""Show current figures using vispy
Parameters
----------
block : bool
If True, blocking mode will be used. If False, then non-blocking
/ interactive mode will be used.
Returns
-------
canvases : list
List of the vispy canvases that were created.
"""
if not has_matplotlib():
raise ImportError('Requires matplotlib version >= 1.2')
cs = [_mpl_to_vispy(plt.figure(ii)) for ii in plt.get_fignums()]
if block and len(cs) > 0:
cs[0].app.run()
return cs |
def cumprod(self, axis=0, *args, **kwargs):
"""
Cumulative product for each group.
"""
nv.validate_groupby_func('cumprod', args, kwargs,
['numeric_only', 'skipna'])
if axis != 0:
return self.apply(lambda x: x.cumprod(axis=axis, **kwargs))
return self._cython_transform('cumprod', **kwargs) | Cumulative product for each group. | Below is the the instruction that describes the task:
### Input:
Cumulative product for each group.
### Response:
def cumprod(self, axis=0, *args, **kwargs):
"""
Cumulative product for each group.
"""
nv.validate_groupby_func('cumprod', args, kwargs,
['numeric_only', 'skipna'])
if axis != 0:
return self.apply(lambda x: x.cumprod(axis=axis, **kwargs))
return self._cython_transform('cumprod', **kwargs) |
def _remove_init_all(r):
"""Remove any __all__ in __init__.py file."""
new_r = redbaron.NodeList()
for n in r.node_list:
if n.type == 'assignment' and n.target.value == '__all__':
pass
else:
new_r.append(n)
return new_r | Remove any __all__ in __init__.py file. | Below is the the instruction that describes the task:
### Input:
Remove any __all__ in __init__.py file.
### Response:
def _remove_init_all(r):
"""Remove any __all__ in __init__.py file."""
new_r = redbaron.NodeList()
for n in r.node_list:
if n.type == 'assignment' and n.target.value == '__all__':
pass
else:
new_r.append(n)
return new_r |
def shaddalike(partial, fully):
"""
If the two words has the same letters and the same harakats, this fuction return True.
The first word is partially vocalized, the second is fully
if the partially contians a shadda, it must be at the same place in the fully
@param partial: the partially vocalized word
@type partial: unicode
@param fully: the fully vocalized word
@type fully: unicode
@return: if contains shadda
@rtype: Boolean
"""
# المدخل ليس به شدة، لا داعي للبحث
if not has_shadda(partial):
return True
# المدخل به شدة، والنتيجة ليس بها شدة، خاطئ
elif not has_shadda(fully) and has_shadda(partial):
return False
# المدخل والمخرج بهما شدة، نتأكد من موقعهما
partial = strip_harakat(partial)
fully = strip_harakat(fully)
pstack = stack.Stack(partial)
vstack = stack.Stack(fully)
plast = pstack.pop()
vlast = vstack.pop()
# if debug: print "+0", Pstack, Vstack
while plast != None and vlast != None:
if plast == vlast:
plast = pstack.pop()
vlast = vstack.pop()
elif plast == SHADDA and vlast != SHADDA:
# if debug: print "+2", Pstack.items, Plast, Vstack.items, Vlast
break
elif plast != SHADDA and vlast == SHADDA:
# if debug: print "+2", Pstack.items, Plast, Vstack.items, Vlast
vlast = vstack.pop()
else:
# if debug: print "+2", Pstack.items, Plast, Vstack.items, Vlast
break
if not (pstack.is_empty() and vstack.is_empty()):
return False
else:
return True | If the two words has the same letters and the same harakats, this fuction return True.
The first word is partially vocalized, the second is fully
if the partially contians a shadda, it must be at the same place in the fully
@param partial: the partially vocalized word
@type partial: unicode
@param fully: the fully vocalized word
@type fully: unicode
@return: if contains shadda
@rtype: Boolean | Below is the the instruction that describes the task:
### Input:
If the two words has the same letters and the same harakats, this fuction return True.
The first word is partially vocalized, the second is fully
if the partially contians a shadda, it must be at the same place in the fully
@param partial: the partially vocalized word
@type partial: unicode
@param fully: the fully vocalized word
@type fully: unicode
@return: if contains shadda
@rtype: Boolean
### Response:
def shaddalike(partial, fully):
"""
If the two words has the same letters and the same harakats, this fuction return True.
The first word is partially vocalized, the second is fully
if the partially contians a shadda, it must be at the same place in the fully
@param partial: the partially vocalized word
@type partial: unicode
@param fully: the fully vocalized word
@type fully: unicode
@return: if contains shadda
@rtype: Boolean
"""
# المدخل ليس به شدة، لا داعي للبحث
if not has_shadda(partial):
return True
# المدخل به شدة، والنتيجة ليس بها شدة، خاطئ
elif not has_shadda(fully) and has_shadda(partial):
return False
# المدخل والمخرج بهما شدة، نتأكد من موقعهما
partial = strip_harakat(partial)
fully = strip_harakat(fully)
pstack = stack.Stack(partial)
vstack = stack.Stack(fully)
plast = pstack.pop()
vlast = vstack.pop()
# if debug: print "+0", Pstack, Vstack
while plast != None and vlast != None:
if plast == vlast:
plast = pstack.pop()
vlast = vstack.pop()
elif plast == SHADDA and vlast != SHADDA:
# if debug: print "+2", Pstack.items, Plast, Vstack.items, Vlast
break
elif plast != SHADDA and vlast == SHADDA:
# if debug: print "+2", Pstack.items, Plast, Vstack.items, Vlast
vlast = vstack.pop()
else:
# if debug: print "+2", Pstack.items, Plast, Vstack.items, Vlast
break
if not (pstack.is_empty() and vstack.is_empty()):
return False
else:
return True |
def get_latex_expression(s, pos, **parse_flags):
"""
Reads a latex expression, e.g. macro argument. This may be a single char, an escape
sequence, or a expression placed in braces.
Returns a tuple `(<LatexNode instance>, pos, len)`. `pos` is the first char of the
expression, and `len` is its length.
.. deprecated:: 1.0
Please use :py:meth:`LatexWalker.get_latex_expression()` instead.
"""
return LatexWalker(s, **parse_flags).get_latex_expression(pos=pos) | Reads a latex expression, e.g. macro argument. This may be a single char, an escape
sequence, or a expression placed in braces.
Returns a tuple `(<LatexNode instance>, pos, len)`. `pos` is the first char of the
expression, and `len` is its length.
.. deprecated:: 1.0
Please use :py:meth:`LatexWalker.get_latex_expression()` instead. | Below is the the instruction that describes the task:
### Input:
Reads a latex expression, e.g. macro argument. This may be a single char, an escape
sequence, or a expression placed in braces.
Returns a tuple `(<LatexNode instance>, pos, len)`. `pos` is the first char of the
expression, and `len` is its length.
.. deprecated:: 1.0
Please use :py:meth:`LatexWalker.get_latex_expression()` instead.
### Response:
def get_latex_expression(s, pos, **parse_flags):
"""
Reads a latex expression, e.g. macro argument. This may be a single char, an escape
sequence, or a expression placed in braces.
Returns a tuple `(<LatexNode instance>, pos, len)`. `pos` is the first char of the
expression, and `len` is its length.
.. deprecated:: 1.0
Please use :py:meth:`LatexWalker.get_latex_expression()` instead.
"""
return LatexWalker(s, **parse_flags).get_latex_expression(pos=pos) |
def send_error(self, status_code: int = 500, **kwargs: Any) -> None:
"""Sends the given HTTP error code to the browser.
If `flush()` has already been called, it is not possible to send
an error, so this method will simply terminate the response.
If output has been written but not yet flushed, it will be discarded
and replaced with the error page.
Override `write_error()` to customize the error page that is returned.
Additional keyword arguments are passed through to `write_error`.
"""
if self._headers_written:
gen_log.error("Cannot send error response after headers written")
if not self._finished:
# If we get an error between writing headers and finishing,
# we are unlikely to be able to finish due to a
# Content-Length mismatch. Try anyway to release the
# socket.
try:
self.finish()
except Exception:
gen_log.error("Failed to flush partial response", exc_info=True)
return
self.clear()
reason = kwargs.get("reason")
if "exc_info" in kwargs:
exception = kwargs["exc_info"][1]
if isinstance(exception, HTTPError) and exception.reason:
reason = exception.reason
self.set_status(status_code, reason=reason)
try:
self.write_error(status_code, **kwargs)
except Exception:
app_log.error("Uncaught exception in write_error", exc_info=True)
if not self._finished:
self.finish() | Sends the given HTTP error code to the browser.
If `flush()` has already been called, it is not possible to send
an error, so this method will simply terminate the response.
If output has been written but not yet flushed, it will be discarded
and replaced with the error page.
Override `write_error()` to customize the error page that is returned.
Additional keyword arguments are passed through to `write_error`. | Below is the the instruction that describes the task:
### Input:
Sends the given HTTP error code to the browser.
If `flush()` has already been called, it is not possible to send
an error, so this method will simply terminate the response.
If output has been written but not yet flushed, it will be discarded
and replaced with the error page.
Override `write_error()` to customize the error page that is returned.
Additional keyword arguments are passed through to `write_error`.
### Response:
def send_error(self, status_code: int = 500, **kwargs: Any) -> None:
"""Sends the given HTTP error code to the browser.
If `flush()` has already been called, it is not possible to send
an error, so this method will simply terminate the response.
If output has been written but not yet flushed, it will be discarded
and replaced with the error page.
Override `write_error()` to customize the error page that is returned.
Additional keyword arguments are passed through to `write_error`.
"""
if self._headers_written:
gen_log.error("Cannot send error response after headers written")
if not self._finished:
# If we get an error between writing headers and finishing,
# we are unlikely to be able to finish due to a
# Content-Length mismatch. Try anyway to release the
# socket.
try:
self.finish()
except Exception:
gen_log.error("Failed to flush partial response", exc_info=True)
return
self.clear()
reason = kwargs.get("reason")
if "exc_info" in kwargs:
exception = kwargs["exc_info"][1]
if isinstance(exception, HTTPError) and exception.reason:
reason = exception.reason
self.set_status(status_code, reason=reason)
try:
self.write_error(status_code, **kwargs)
except Exception:
app_log.error("Uncaught exception in write_error", exc_info=True)
if not self._finished:
self.finish() |
def add(self, original_index, operation):
"""Add an operation to this Run instance.
:Parameters:
- `original_index`: The original index of this operation
within a larger bulk operation.
- `operation`: The operation document.
"""
self.index_map.append(original_index)
self.ops.append(operation) | Add an operation to this Run instance.
:Parameters:
- `original_index`: The original index of this operation
within a larger bulk operation.
- `operation`: The operation document. | Below is the the instruction that describes the task:
### Input:
Add an operation to this Run instance.
:Parameters:
- `original_index`: The original index of this operation
within a larger bulk operation.
- `operation`: The operation document.
### Response:
def add(self, original_index, operation):
"""Add an operation to this Run instance.
:Parameters:
- `original_index`: The original index of this operation
within a larger bulk operation.
- `operation`: The operation document.
"""
self.index_map.append(original_index)
self.ops.append(operation) |
def parse_voc_rec(filename):
"""
parse pascal voc record into a dictionary
:param filename: xml file path
:return: list of dict
"""
import xml.etree.ElementTree as ET
tree = ET.parse(filename)
objects = []
for obj in tree.findall('object'):
obj_dict = dict()
obj_dict['name'] = obj.find('name').text
obj_dict['difficult'] = int(obj.find('difficult').text)
bbox = obj.find('bndbox')
obj_dict['bbox'] = [int(bbox.find('xmin').text),
int(bbox.find('ymin').text),
int(bbox.find('xmax').text),
int(bbox.find('ymax').text)]
objects.append(obj_dict)
return objects | parse pascal voc record into a dictionary
:param filename: xml file path
:return: list of dict | Below is the the instruction that describes the task:
### Input:
parse pascal voc record into a dictionary
:param filename: xml file path
:return: list of dict
### Response:
def parse_voc_rec(filename):
"""
parse pascal voc record into a dictionary
:param filename: xml file path
:return: list of dict
"""
import xml.etree.ElementTree as ET
tree = ET.parse(filename)
objects = []
for obj in tree.findall('object'):
obj_dict = dict()
obj_dict['name'] = obj.find('name').text
obj_dict['difficult'] = int(obj.find('difficult').text)
bbox = obj.find('bndbox')
obj_dict['bbox'] = [int(bbox.find('xmin').text),
int(bbox.find('ymin').text),
int(bbox.find('xmax').text),
int(bbox.find('ymax').text)]
objects.append(obj_dict)
return objects |
def update_project(self, project_id, name=None, description=None,
reference_language=None):
"""
Updates project settings (name, description, reference language)
If optional parameters are not sent, their respective fields are not updated.
"""
kwargs = {}
if name is not None:
kwargs['name'] = name
if description is not None:
kwargs['description'] = description
if reference_language is not None:
kwargs['reference_language'] = reference_language
data = self._run(
url_path="projects/update",
id=project_id,
**kwargs
)
return data['result']['project']['id'] | Updates project settings (name, description, reference language)
If optional parameters are not sent, their respective fields are not updated. | Below is the the instruction that describes the task:
### Input:
Updates project settings (name, description, reference language)
If optional parameters are not sent, their respective fields are not updated.
### Response:
def update_project(self, project_id, name=None, description=None,
reference_language=None):
"""
Updates project settings (name, description, reference language)
If optional parameters are not sent, their respective fields are not updated.
"""
kwargs = {}
if name is not None:
kwargs['name'] = name
if description is not None:
kwargs['description'] = description
if reference_language is not None:
kwargs['reference_language'] = reference_language
data = self._run(
url_path="projects/update",
id=project_id,
**kwargs
)
return data['result']['project']['id'] |
def bits_to_dict(bits):
"""Convert a Django template tag's kwargs into a dictionary of Python types.
The only necessary types are number, boolean, list, and string.
http://pygments.org/docs/formatters/#HtmlFormatter
from: ["style='monokai'", "cssclass='cssclass',", "boolean='true',", 'num=0,', "list='[]'"]
to: {'style': 'monokai', 'cssclass': 'cssclass', 'boolean': True, 'num': 0, 'list': [],}
"""
# Strip any trailing commas
cleaned_bits = [bit[:-1] if bit.endswith(',') else bit for bit in bits]
# Create dictionary by splitting on equal signs
options = dict(bit.split('=') for bit in cleaned_bits)
# Coerce strings of types to Python types
for key in options:
if options[key] == "'true'" or options[key] == "'false'":
options[key] = options[key].title()
options[key] = ast.literal_eval(options[key])
return options | Convert a Django template tag's kwargs into a dictionary of Python types.
The only necessary types are number, boolean, list, and string.
http://pygments.org/docs/formatters/#HtmlFormatter
from: ["style='monokai'", "cssclass='cssclass',", "boolean='true',", 'num=0,', "list='[]'"]
to: {'style': 'monokai', 'cssclass': 'cssclass', 'boolean': True, 'num': 0, 'list': [],} | Below is the the instruction that describes the task:
### Input:
Convert a Django template tag's kwargs into a dictionary of Python types.
The only necessary types are number, boolean, list, and string.
http://pygments.org/docs/formatters/#HtmlFormatter
from: ["style='monokai'", "cssclass='cssclass',", "boolean='true',", 'num=0,', "list='[]'"]
to: {'style': 'monokai', 'cssclass': 'cssclass', 'boolean': True, 'num': 0, 'list': [],}
### Response:
def bits_to_dict(bits):
"""Convert a Django template tag's kwargs into a dictionary of Python types.
The only necessary types are number, boolean, list, and string.
http://pygments.org/docs/formatters/#HtmlFormatter
from: ["style='monokai'", "cssclass='cssclass',", "boolean='true',", 'num=0,', "list='[]'"]
to: {'style': 'monokai', 'cssclass': 'cssclass', 'boolean': True, 'num': 0, 'list': [],}
"""
# Strip any trailing commas
cleaned_bits = [bit[:-1] if bit.endswith(',') else bit for bit in bits]
# Create dictionary by splitting on equal signs
options = dict(bit.split('=') for bit in cleaned_bits)
# Coerce strings of types to Python types
for key in options:
if options[key] == "'true'" or options[key] == "'false'":
options[key] = options[key].title()
options[key] = ast.literal_eval(options[key])
return options |
def loadCommandMap(Class, subparsers=None, instantiate=True, **cmd_kwargs):
"""Instantiate each registered command to a dict mapping name/alias to
instance.
Due to aliases, the returned length may be greater there the number of
commands, but the unique instance count will match.
"""
if not Class._registered_commands:
raise ValueError("No commands have been registered with {}"
.format(Class))
all = {}
for Cmd in set(Class._registered_commands[Class].values()):
cmd = Cmd(subparsers=subparsers, **cmd_kwargs) \
if instantiate else Cmd
for name in [Cmd.name()] + Cmd.aliases():
all[name] = cmd
return all | Instantiate each registered command to a dict mapping name/alias to
instance.
Due to aliases, the returned length may be greater there the number of
commands, but the unique instance count will match. | Below is the the instruction that describes the task:
### Input:
Instantiate each registered command to a dict mapping name/alias to
instance.
Due to aliases, the returned length may be greater there the number of
commands, but the unique instance count will match.
### Response:
def loadCommandMap(Class, subparsers=None, instantiate=True, **cmd_kwargs):
"""Instantiate each registered command to a dict mapping name/alias to
instance.
Due to aliases, the returned length may be greater there the number of
commands, but the unique instance count will match.
"""
if not Class._registered_commands:
raise ValueError("No commands have been registered with {}"
.format(Class))
all = {}
for Cmd in set(Class._registered_commands[Class].values()):
cmd = Cmd(subparsers=subparsers, **cmd_kwargs) \
if instantiate else Cmd
for name in [Cmd.name()] + Cmd.aliases():
all[name] = cmd
return all |
def wrann(record_name, extension, sample, symbol=None, subtype=None, chan=None,
num=None, aux_note=None, label_store=None, fs=None,
custom_labels=None, write_dir=''):
"""
Write a WFDB annotation file.
Specify at least the following:
- The record name of the WFDB record (record_name)
- The annotation file extension (extension)
- The annotation locations in samples relative to the beginning of
the record (sample)
- Either the numerical values used to store the labels
(`label_store`), or more commonly, the display symbols of each
label (`symbol`).
Parameters
----------
record_name : str
The string name of the WFDB record to be written (without any file
extensions).
extension : str
The string annotation file extension.
sample : numpy array
A numpy array containing the annotation locations in samples relative to
the beginning of the record.
symbol : list, or numpy array, optional
The symbols used to display the annotation labels. List or numpy array.
If this field is present, `label_store` must not be present.
subtype : numpy array, optional
A numpy array containing the marked class/category of each annotation.
chan : numpy array, optional
A numpy array containing the signal channel associated with each
annotation.
num : numpy array, optional
A numpy array containing the labelled annotation number for each
annotation.
aux_note : list, optional
A list containing the auxiliary information string (or None for
annotations without notes) for each annotation.
label_store : numpy array, optional
A numpy array containing the integer values used to store the
annotation labels. If this field is present, `symbol` must not be
present.
fs : int, or float, optional
The numerical sampling frequency of the record to be written to the file.
custom_labels : pandas dataframe, optional
The map of custom defined annotation labels used for this annotation, in
addition to the standard WFDB annotation labels. Custom labels are
defined by two or three fields:
- The integer values used to store custom annotation labels in the file
(optional)
- Their short display symbols
- Their long descriptions.
This input argument may come in four formats:
1. A pandas.DataFrame object with columns:
['label_store', 'symbol', 'description']
2. A pandas.DataFrame object with columns: ['symbol', 'description']
If this option is chosen, label_store values are automatically chosen.
3. A list or tuple of tuple triplets, with triplet elements
representing: (label_store, symbol, description).
4. A list or tuple of tuple pairs, with pair elements representing:
(symbol, description). If this option is chosen, label_store values
are automatically chosen.
If the `label_store` field is given for this function, and
`custom_labels` is defined, `custom_labels` must contain `label_store`
in its mapping. ie. it must come in format 1 or 3 above.
write_dir : str, optional
The directory in which to write the annotation file
Notes
-----
This is a gateway function, written as a simple way to write WFDB annotation
files without needing to explicity create an Annotation object. You may also
create an Annotation object, manually set its attributes, and call its
`wrann` instance method.
Each annotation stored in a WFDB annotation file contains a sample field and
a label field. All other fields may or may not be present.
Examples
--------
>>> # Read an annotation as an Annotation object
>>> annotation = wfdb.rdann('b001', 'atr', pb_dir='cebsdb')
>>> # Write a copy of the annotation file
>>> wfdb.wrann('b001', 'cpy', annotation.sample, annotation.symbol)
"""
# Create Annotation object
annotation = Annotation(record_name=record_name, extension=extension,
sample=sample, symbol=symbol, subtype=subtype,
chan=chan, num=num, aux_note=aux_note,
label_store=label_store, fs=fs,
custom_labels=custom_labels)
# Find out which input field describes the labels
if symbol is None:
if label_store is None:
raise Exception("Either the 'symbol' field or the 'label_store' field must be set")
else:
if label_store is None:
annotation.sym_to_aux()
else:
raise Exception("Only one of the 'symbol' and 'label_store' fields may be input, for describing annotation labels")
# Perform field checks and write the annotation file
annotation.wrann(write_fs=True, write_dir=write_dir) | Write a WFDB annotation file.
Specify at least the following:
- The record name of the WFDB record (record_name)
- The annotation file extension (extension)
- The annotation locations in samples relative to the beginning of
the record (sample)
- Either the numerical values used to store the labels
(`label_store`), or more commonly, the display symbols of each
label (`symbol`).
Parameters
----------
record_name : str
The string name of the WFDB record to be written (without any file
extensions).
extension : str
The string annotation file extension.
sample : numpy array
A numpy array containing the annotation locations in samples relative to
the beginning of the record.
symbol : list, or numpy array, optional
The symbols used to display the annotation labels. List or numpy array.
If this field is present, `label_store` must not be present.
subtype : numpy array, optional
A numpy array containing the marked class/category of each annotation.
chan : numpy array, optional
A numpy array containing the signal channel associated with each
annotation.
num : numpy array, optional
A numpy array containing the labelled annotation number for each
annotation.
aux_note : list, optional
A list containing the auxiliary information string (or None for
annotations without notes) for each annotation.
label_store : numpy array, optional
A numpy array containing the integer values used to store the
annotation labels. If this field is present, `symbol` must not be
present.
fs : int, or float, optional
The numerical sampling frequency of the record to be written to the file.
custom_labels : pandas dataframe, optional
The map of custom defined annotation labels used for this annotation, in
addition to the standard WFDB annotation labels. Custom labels are
defined by two or three fields:
- The integer values used to store custom annotation labels in the file
(optional)
- Their short display symbols
- Their long descriptions.
This input argument may come in four formats:
1. A pandas.DataFrame object with columns:
['label_store', 'symbol', 'description']
2. A pandas.DataFrame object with columns: ['symbol', 'description']
If this option is chosen, label_store values are automatically chosen.
3. A list or tuple of tuple triplets, with triplet elements
representing: (label_store, symbol, description).
4. A list or tuple of tuple pairs, with pair elements representing:
(symbol, description). If this option is chosen, label_store values
are automatically chosen.
If the `label_store` field is given for this function, and
`custom_labels` is defined, `custom_labels` must contain `label_store`
in its mapping. ie. it must come in format 1 or 3 above.
write_dir : str, optional
The directory in which to write the annotation file
Notes
-----
This is a gateway function, written as a simple way to write WFDB annotation
files without needing to explicity create an Annotation object. You may also
create an Annotation object, manually set its attributes, and call its
`wrann` instance method.
Each annotation stored in a WFDB annotation file contains a sample field and
a label field. All other fields may or may not be present.
Examples
--------
>>> # Read an annotation as an Annotation object
>>> annotation = wfdb.rdann('b001', 'atr', pb_dir='cebsdb')
>>> # Write a copy of the annotation file
>>> wfdb.wrann('b001', 'cpy', annotation.sample, annotation.symbol) | Below is the the instruction that describes the task:
### Input:
Write a WFDB annotation file.
Specify at least the following:
- The record name of the WFDB record (record_name)
- The annotation file extension (extension)
- The annotation locations in samples relative to the beginning of
the record (sample)
- Either the numerical values used to store the labels
(`label_store`), or more commonly, the display symbols of each
label (`symbol`).
Parameters
----------
record_name : str
The string name of the WFDB record to be written (without any file
extensions).
extension : str
The string annotation file extension.
sample : numpy array
A numpy array containing the annotation locations in samples relative to
the beginning of the record.
symbol : list, or numpy array, optional
The symbols used to display the annotation labels. List or numpy array.
If this field is present, `label_store` must not be present.
subtype : numpy array, optional
A numpy array containing the marked class/category of each annotation.
chan : numpy array, optional
A numpy array containing the signal channel associated with each
annotation.
num : numpy array, optional
A numpy array containing the labelled annotation number for each
annotation.
aux_note : list, optional
A list containing the auxiliary information string (or None for
annotations without notes) for each annotation.
label_store : numpy array, optional
A numpy array containing the integer values used to store the
annotation labels. If this field is present, `symbol` must not be
present.
fs : int, or float, optional
The numerical sampling frequency of the record to be written to the file.
custom_labels : pandas dataframe, optional
The map of custom defined annotation labels used for this annotation, in
addition to the standard WFDB annotation labels. Custom labels are
defined by two or three fields:
- The integer values used to store custom annotation labels in the file
(optional)
- Their short display symbols
- Their long descriptions.
This input argument may come in four formats:
1. A pandas.DataFrame object with columns:
['label_store', 'symbol', 'description']
2. A pandas.DataFrame object with columns: ['symbol', 'description']
If this option is chosen, label_store values are automatically chosen.
3. A list or tuple of tuple triplets, with triplet elements
representing: (label_store, symbol, description).
4. A list or tuple of tuple pairs, with pair elements representing:
(symbol, description). If this option is chosen, label_store values
are automatically chosen.
If the `label_store` field is given for this function, and
`custom_labels` is defined, `custom_labels` must contain `label_store`
in its mapping. ie. it must come in format 1 or 3 above.
write_dir : str, optional
The directory in which to write the annotation file
Notes
-----
This is a gateway function, written as a simple way to write WFDB annotation
files without needing to explicity create an Annotation object. You may also
create an Annotation object, manually set its attributes, and call its
`wrann` instance method.
Each annotation stored in a WFDB annotation file contains a sample field and
a label field. All other fields may or may not be present.
Examples
--------
>>> # Read an annotation as an Annotation object
>>> annotation = wfdb.rdann('b001', 'atr', pb_dir='cebsdb')
>>> # Write a copy of the annotation file
>>> wfdb.wrann('b001', 'cpy', annotation.sample, annotation.symbol)
### Response:
def wrann(record_name, extension, sample, symbol=None, subtype=None, chan=None,
num=None, aux_note=None, label_store=None, fs=None,
custom_labels=None, write_dir=''):
"""
Write a WFDB annotation file.
Specify at least the following:
- The record name of the WFDB record (record_name)
- The annotation file extension (extension)
- The annotation locations in samples relative to the beginning of
the record (sample)
- Either the numerical values used to store the labels
(`label_store`), or more commonly, the display symbols of each
label (`symbol`).
Parameters
----------
record_name : str
The string name of the WFDB record to be written (without any file
extensions).
extension : str
The string annotation file extension.
sample : numpy array
A numpy array containing the annotation locations in samples relative to
the beginning of the record.
symbol : list, or numpy array, optional
The symbols used to display the annotation labels. List or numpy array.
If this field is present, `label_store` must not be present.
subtype : numpy array, optional
A numpy array containing the marked class/category of each annotation.
chan : numpy array, optional
A numpy array containing the signal channel associated with each
annotation.
num : numpy array, optional
A numpy array containing the labelled annotation number for each
annotation.
aux_note : list, optional
A list containing the auxiliary information string (or None for
annotations without notes) for each annotation.
label_store : numpy array, optional
A numpy array containing the integer values used to store the
annotation labels. If this field is present, `symbol` must not be
present.
fs : int, or float, optional
The numerical sampling frequency of the record to be written to the file.
custom_labels : pandas dataframe, optional
The map of custom defined annotation labels used for this annotation, in
addition to the standard WFDB annotation labels. Custom labels are
defined by two or three fields:
- The integer values used to store custom annotation labels in the file
(optional)
- Their short display symbols
- Their long descriptions.
This input argument may come in four formats:
1. A pandas.DataFrame object with columns:
['label_store', 'symbol', 'description']
2. A pandas.DataFrame object with columns: ['symbol', 'description']
If this option is chosen, label_store values are automatically chosen.
3. A list or tuple of tuple triplets, with triplet elements
representing: (label_store, symbol, description).
4. A list or tuple of tuple pairs, with pair elements representing:
(symbol, description). If this option is chosen, label_store values
are automatically chosen.
If the `label_store` field is given for this function, and
`custom_labels` is defined, `custom_labels` must contain `label_store`
in its mapping. ie. it must come in format 1 or 3 above.
write_dir : str, optional
The directory in which to write the annotation file
Notes
-----
This is a gateway function, written as a simple way to write WFDB annotation
files without needing to explicity create an Annotation object. You may also
create an Annotation object, manually set its attributes, and call its
`wrann` instance method.
Each annotation stored in a WFDB annotation file contains a sample field and
a label field. All other fields may or may not be present.
Examples
--------
>>> # Read an annotation as an Annotation object
>>> annotation = wfdb.rdann('b001', 'atr', pb_dir='cebsdb')
>>> # Write a copy of the annotation file
>>> wfdb.wrann('b001', 'cpy', annotation.sample, annotation.symbol)
"""
# Create Annotation object
annotation = Annotation(record_name=record_name, extension=extension,
sample=sample, symbol=symbol, subtype=subtype,
chan=chan, num=num, aux_note=aux_note,
label_store=label_store, fs=fs,
custom_labels=custom_labels)
# Find out which input field describes the labels
if symbol is None:
if label_store is None:
raise Exception("Either the 'symbol' field or the 'label_store' field must be set")
else:
if label_store is None:
annotation.sym_to_aux()
else:
raise Exception("Only one of the 'symbol' and 'label_store' fields may be input, for describing annotation labels")
# Perform field checks and write the annotation file
annotation.wrann(write_fs=True, write_dir=write_dir) |
def make_empty(self, axes=None):
""" return an empty BlockManager with the items axis of len 0 """
if axes is None:
axes = [ensure_index([])] + [ensure_index(a)
for a in self.axes[1:]]
# preserve dtype if possible
if self.ndim == 1:
blocks = np.array([], dtype=self.array_dtype)
else:
blocks = []
return self.__class__(blocks, axes) | return an empty BlockManager with the items axis of len 0 | Below is the the instruction that describes the task:
### Input:
return an empty BlockManager with the items axis of len 0
### Response:
def make_empty(self, axes=None):
""" return an empty BlockManager with the items axis of len 0 """
if axes is None:
axes = [ensure_index([])] + [ensure_index(a)
for a in self.axes[1:]]
# preserve dtype if possible
if self.ndim == 1:
blocks = np.array([], dtype=self.array_dtype)
else:
blocks = []
return self.__class__(blocks, axes) |
def natsorted(seq, key=None, reverse=False, alg=ns.DEFAULT):
"""
Sorts an iterable naturally.
Parameters
----------
seq : iterable
The input to sort.
key : callable, optional
A key used to determine how to sort each element of the iterable.
It is **not** applied recursively.
It should accept a single argument and return a single value.
reverse : {{True, False}}, optional
Return the list in reversed sorted order. The default is
`False`.
alg : ns enum, optional
This option is used to control which algorithm `natsort`
uses when sorting. For details into these options, please see
the :class:`ns` class documentation. The default is `ns.INT`.
Returns
-------
out: list
The sorted input.
See Also
--------
natsort_keygen : Generates the key that makes natural sorting possible.
realsorted : A wrapper for ``natsorted(seq, alg=ns.REAL)``.
humansorted : A wrapper for ``natsorted(seq, alg=ns.LOCALE)``.
index_natsorted : Returns the sorted indexes from `natsorted`.
Examples
--------
Use `natsorted` just like the builtin `sorted`::
>>> a = ['num3', 'num5', 'num2']
>>> natsorted(a)
[{u}'num2', {u}'num3', {u}'num5']
"""
key = natsort_keygen(key, alg)
return sorted(seq, reverse=reverse, key=key) | Sorts an iterable naturally.
Parameters
----------
seq : iterable
The input to sort.
key : callable, optional
A key used to determine how to sort each element of the iterable.
It is **not** applied recursively.
It should accept a single argument and return a single value.
reverse : {{True, False}}, optional
Return the list in reversed sorted order. The default is
`False`.
alg : ns enum, optional
This option is used to control which algorithm `natsort`
uses when sorting. For details into these options, please see
the :class:`ns` class documentation. The default is `ns.INT`.
Returns
-------
out: list
The sorted input.
See Also
--------
natsort_keygen : Generates the key that makes natural sorting possible.
realsorted : A wrapper for ``natsorted(seq, alg=ns.REAL)``.
humansorted : A wrapper for ``natsorted(seq, alg=ns.LOCALE)``.
index_natsorted : Returns the sorted indexes from `natsorted`.
Examples
--------
Use `natsorted` just like the builtin `sorted`::
>>> a = ['num3', 'num5', 'num2']
>>> natsorted(a)
[{u}'num2', {u}'num3', {u}'num5'] | Below is the the instruction that describes the task:
### Input:
Sorts an iterable naturally.
Parameters
----------
seq : iterable
The input to sort.
key : callable, optional
A key used to determine how to sort each element of the iterable.
It is **not** applied recursively.
It should accept a single argument and return a single value.
reverse : {{True, False}}, optional
Return the list in reversed sorted order. The default is
`False`.
alg : ns enum, optional
This option is used to control which algorithm `natsort`
uses when sorting. For details into these options, please see
the :class:`ns` class documentation. The default is `ns.INT`.
Returns
-------
out: list
The sorted input.
See Also
--------
natsort_keygen : Generates the key that makes natural sorting possible.
realsorted : A wrapper for ``natsorted(seq, alg=ns.REAL)``.
humansorted : A wrapper for ``natsorted(seq, alg=ns.LOCALE)``.
index_natsorted : Returns the sorted indexes from `natsorted`.
Examples
--------
Use `natsorted` just like the builtin `sorted`::
>>> a = ['num3', 'num5', 'num2']
>>> natsorted(a)
[{u}'num2', {u}'num3', {u}'num5']
### Response:
def natsorted(seq, key=None, reverse=False, alg=ns.DEFAULT):
"""
Sorts an iterable naturally.
Parameters
----------
seq : iterable
The input to sort.
key : callable, optional
A key used to determine how to sort each element of the iterable.
It is **not** applied recursively.
It should accept a single argument and return a single value.
reverse : {{True, False}}, optional
Return the list in reversed sorted order. The default is
`False`.
alg : ns enum, optional
This option is used to control which algorithm `natsort`
uses when sorting. For details into these options, please see
the :class:`ns` class documentation. The default is `ns.INT`.
Returns
-------
out: list
The sorted input.
See Also
--------
natsort_keygen : Generates the key that makes natural sorting possible.
realsorted : A wrapper for ``natsorted(seq, alg=ns.REAL)``.
humansorted : A wrapper for ``natsorted(seq, alg=ns.LOCALE)``.
index_natsorted : Returns the sorted indexes from `natsorted`.
Examples
--------
Use `natsorted` just like the builtin `sorted`::
>>> a = ['num3', 'num5', 'num2']
>>> natsorted(a)
[{u}'num2', {u}'num3', {u}'num5']
"""
key = natsort_keygen(key, alg)
return sorted(seq, reverse=reverse, key=key) |
def write_results(self, data, name=None):
"""
Write JSON to file with the specified name.
:param name: Path to the file to be written to. If no path is passed
a new JSON file "results.json" will be created in the
current working directory.
:param output: JSON object.
"""
if name:
filepath = os.path.abspath(name)
else:
filepath = os.path.join(os.path.getcwd(), "results.json")
with open(filepath, "w", encoding="utf8") as f:
try:
f.write(unicode(json.dumps(data, indent=4)))
except NameError:
f.write(json.dumps(data, indent=4)) | Write JSON to file with the specified name.
:param name: Path to the file to be written to. If no path is passed
a new JSON file "results.json" will be created in the
current working directory.
:param output: JSON object. | Below is the the instruction that describes the task:
### Input:
Write JSON to file with the specified name.
:param name: Path to the file to be written to. If no path is passed
a new JSON file "results.json" will be created in the
current working directory.
:param output: JSON object.
### Response:
def write_results(self, data, name=None):
"""
Write JSON to file with the specified name.
:param name: Path to the file to be written to. If no path is passed
a new JSON file "results.json" will be created in the
current working directory.
:param output: JSON object.
"""
if name:
filepath = os.path.abspath(name)
else:
filepath = os.path.join(os.path.getcwd(), "results.json")
with open(filepath, "w", encoding="utf8") as f:
try:
f.write(unicode(json.dumps(data, indent=4)))
except NameError:
f.write(json.dumps(data, indent=4)) |
async def is_object_synced_to_cn(self, client, pid):
"""Check if object with {pid} has successfully synced to the CN.
CNRead.describe() is used as it's a light-weight HTTP HEAD request.
This assumes that the call is being made over a connection that has been
authenticated and has read or better access on the given object if it exists.
"""
try:
await client.describe(pid)
except d1_common.types.exceptions.DataONEException:
return False
return True | Check if object with {pid} has successfully synced to the CN.
CNRead.describe() is used as it's a light-weight HTTP HEAD request.
This assumes that the call is being made over a connection that has been
authenticated and has read or better access on the given object if it exists. | Below is the the instruction that describes the task:
### Input:
Check if object with {pid} has successfully synced to the CN.
CNRead.describe() is used as it's a light-weight HTTP HEAD request.
This assumes that the call is being made over a connection that has been
authenticated and has read or better access on the given object if it exists.
### Response:
async def is_object_synced_to_cn(self, client, pid):
"""Check if object with {pid} has successfully synced to the CN.
CNRead.describe() is used as it's a light-weight HTTP HEAD request.
This assumes that the call is being made over a connection that has been
authenticated and has read or better access on the given object if it exists.
"""
try:
await client.describe(pid)
except d1_common.types.exceptions.DataONEException:
return False
return True |
def main():
"""
Main function, called when run as an application.
"""
global args, server_address
# parse the command line arguments
parser = ArgumentParser(description=__doc__)
parser.add_argument(
"host", nargs='?',
help="address of host (default %r)" % (SERVER_HOST,),
default=SERVER_HOST,
)
parser.add_argument(
"port", nargs='?', type=int,
help="server port (default %r)" % (SERVER_PORT,),
default=SERVER_PORT,
)
parser.add_argument(
"--hello", action="store_true",
default=False,
help="send a hello message",
)
parser.add_argument(
"--connect-timeout", nargs='?', type=int,
help="idle connection timeout",
default=CONNECT_TIMEOUT,
)
parser.add_argument(
"--idle-timeout", nargs='?', type=int,
help="idle connection timeout",
default=IDLE_TIMEOUT,
)
args = parser.parse_args()
if _debug: _log.debug("initialization")
if _debug: _log.debug(" - args: %r", args)
# extract the server address and port
host = args.host
port = args.port
server_address = (host, port)
if _debug: _log.debug(" - server_address: %r", server_address)
# build the stack
this_console = ConsoleClient()
if _debug: _log.debug(" - this_console: %r", this_console)
this_middle_man = MiddleMan()
if _debug: _log.debug(" - this_middle_man: %r", this_middle_man)
this_director = TCPClientDirector(
connect_timeout=args.connect_timeout,
idle_timeout=args.idle_timeout,
)
if _debug: _log.debug(" - this_director: %r", this_director)
bind(this_console, this_middle_man, this_director)
bind(MiddleManASE(), this_director)
# create a task manager for scheduled functions
task_manager = TaskManager()
if _debug: _log.debug(" - task_manager: %r", task_manager)
# don't wait to connect
deferred(this_director.connect, server_address)
# send hello maybe
if args.hello:
deferred(this_middle_man.indication, PDU(b'Hello, world!\n'))
if _debug: _log.debug("running")
run()
if _debug: _log.debug("fini") | Main function, called when run as an application. | Below is the the instruction that describes the task:
### Input:
Main function, called when run as an application.
### Response:
def main():
"""
Main function, called when run as an application.
"""
global args, server_address
# parse the command line arguments
parser = ArgumentParser(description=__doc__)
parser.add_argument(
"host", nargs='?',
help="address of host (default %r)" % (SERVER_HOST,),
default=SERVER_HOST,
)
parser.add_argument(
"port", nargs='?', type=int,
help="server port (default %r)" % (SERVER_PORT,),
default=SERVER_PORT,
)
parser.add_argument(
"--hello", action="store_true",
default=False,
help="send a hello message",
)
parser.add_argument(
"--connect-timeout", nargs='?', type=int,
help="idle connection timeout",
default=CONNECT_TIMEOUT,
)
parser.add_argument(
"--idle-timeout", nargs='?', type=int,
help="idle connection timeout",
default=IDLE_TIMEOUT,
)
args = parser.parse_args()
if _debug: _log.debug("initialization")
if _debug: _log.debug(" - args: %r", args)
# extract the server address and port
host = args.host
port = args.port
server_address = (host, port)
if _debug: _log.debug(" - server_address: %r", server_address)
# build the stack
this_console = ConsoleClient()
if _debug: _log.debug(" - this_console: %r", this_console)
this_middle_man = MiddleMan()
if _debug: _log.debug(" - this_middle_man: %r", this_middle_man)
this_director = TCPClientDirector(
connect_timeout=args.connect_timeout,
idle_timeout=args.idle_timeout,
)
if _debug: _log.debug(" - this_director: %r", this_director)
bind(this_console, this_middle_man, this_director)
bind(MiddleManASE(), this_director)
# create a task manager for scheduled functions
task_manager = TaskManager()
if _debug: _log.debug(" - task_manager: %r", task_manager)
# don't wait to connect
deferred(this_director.connect, server_address)
# send hello maybe
if args.hello:
deferred(this_middle_man.indication, PDU(b'Hello, world!\n'))
if _debug: _log.debug("running")
run()
if _debug: _log.debug("fini") |
def get_root_objective_bank_ids(self, alias):
"""Gets the root objective bank Ids in this hierarchy.
return: (osid.id.IdList) - the root objective bank Ids
raise: OperationFailed - unable to complete request
raise: PermissionDenied - authorization failure
compliance: mandatory - This method must be implemented.
"""
url_path = self._urls.roots(alias)
return self._get_request(url_path) | Gets the root objective bank Ids in this hierarchy.
return: (osid.id.IdList) - the root objective bank Ids
raise: OperationFailed - unable to complete request
raise: PermissionDenied - authorization failure
compliance: mandatory - This method must be implemented. | Below is the the instruction that describes the task:
### Input:
Gets the root objective bank Ids in this hierarchy.
return: (osid.id.IdList) - the root objective bank Ids
raise: OperationFailed - unable to complete request
raise: PermissionDenied - authorization failure
compliance: mandatory - This method must be implemented.
### Response:
def get_root_objective_bank_ids(self, alias):
"""Gets the root objective bank Ids in this hierarchy.
return: (osid.id.IdList) - the root objective bank Ids
raise: OperationFailed - unable to complete request
raise: PermissionDenied - authorization failure
compliance: mandatory - This method must be implemented.
"""
url_path = self._urls.roots(alias)
return self._get_request(url_path) |
def load_class(location):
""" Take a string of the form 'fedmsg.consumers.ircbot:IRCBotConsumer'
and return the IRCBotConsumer class.
"""
mod_name, cls_name = location = location.strip().split(':')
tokens = mod_name.split('.')
fromlist = '[]'
if len(tokens) > 1:
fromlist = '.'.join(tokens[:-1])
module = __import__(mod_name, fromlist=fromlist)
try:
return getattr(module, cls_name)
except AttributeError:
raise ImportError("%r not found in %r" % (cls_name, mod_name)) | Take a string of the form 'fedmsg.consumers.ircbot:IRCBotConsumer'
and return the IRCBotConsumer class. | Below is the the instruction that describes the task:
### Input:
Take a string of the form 'fedmsg.consumers.ircbot:IRCBotConsumer'
and return the IRCBotConsumer class.
### Response:
def load_class(location):
""" Take a string of the form 'fedmsg.consumers.ircbot:IRCBotConsumer'
and return the IRCBotConsumer class.
"""
mod_name, cls_name = location = location.strip().split(':')
tokens = mod_name.split('.')
fromlist = '[]'
if len(tokens) > 1:
fromlist = '.'.join(tokens[:-1])
module = __import__(mod_name, fromlist=fromlist)
try:
return getattr(module, cls_name)
except AttributeError:
raise ImportError("%r not found in %r" % (cls_name, mod_name)) |
def _process_sample (self, ap1, ap2, ap3, triple, tflags):
"""We have computed one independent phase closure triple in one timeslot.
"""
# Frequency-resolved:
np.divide (triple, np.abs (triple), triple)
phase = np.angle (triple)
self.ap_spec_stats_by_ddid[self.cur_ddid].accum (ap1, phase, tflags + 0.)
self.ap_spec_stats_by_ddid[self.cur_ddid].accum (ap2, phase, tflags + 0.)
self.ap_spec_stats_by_ddid[self.cur_ddid].accum (ap3, phase, tflags + 0.)
# Frequency-averaged:
triple = np.dot (triple, tflags) / tflags.sum ()
phase = np.angle (triple)
self.global_stats_by_time.accum (self.cur_time, phase)
self.ap_stats_by_ddid[self.cur_ddid].accum (ap1, phase)
self.ap_stats_by_ddid[self.cur_ddid].accum (ap2, phase)
self.ap_stats_by_ddid[self.cur_ddid].accum (ap3, phase)
self.bp_stats_by_ddid[self.cur_ddid].accum ((ap1, ap2), phase)
self.bp_stats_by_ddid[self.cur_ddid].accum ((ap1, ap3), phase)
self.bp_stats_by_ddid[self.cur_ddid].accum ((ap2, ap3), phase)
self.ap_time_stats_by_ddid[self.cur_ddid].accum (self.cur_time, ap1, phase)
self.ap_time_stats_by_ddid[self.cur_ddid].accum (self.cur_time, ap2, phase)
self.ap_time_stats_by_ddid[self.cur_ddid].accum (self.cur_time, ap3, phase) | We have computed one independent phase closure triple in one timeslot. | Below is the the instruction that describes the task:
### Input:
We have computed one independent phase closure triple in one timeslot.
### Response:
def _process_sample (self, ap1, ap2, ap3, triple, tflags):
"""We have computed one independent phase closure triple in one timeslot.
"""
# Frequency-resolved:
np.divide (triple, np.abs (triple), triple)
phase = np.angle (triple)
self.ap_spec_stats_by_ddid[self.cur_ddid].accum (ap1, phase, tflags + 0.)
self.ap_spec_stats_by_ddid[self.cur_ddid].accum (ap2, phase, tflags + 0.)
self.ap_spec_stats_by_ddid[self.cur_ddid].accum (ap3, phase, tflags + 0.)
# Frequency-averaged:
triple = np.dot (triple, tflags) / tflags.sum ()
phase = np.angle (triple)
self.global_stats_by_time.accum (self.cur_time, phase)
self.ap_stats_by_ddid[self.cur_ddid].accum (ap1, phase)
self.ap_stats_by_ddid[self.cur_ddid].accum (ap2, phase)
self.ap_stats_by_ddid[self.cur_ddid].accum (ap3, phase)
self.bp_stats_by_ddid[self.cur_ddid].accum ((ap1, ap2), phase)
self.bp_stats_by_ddid[self.cur_ddid].accum ((ap1, ap3), phase)
self.bp_stats_by_ddid[self.cur_ddid].accum ((ap2, ap3), phase)
self.ap_time_stats_by_ddid[self.cur_ddid].accum (self.cur_time, ap1, phase)
self.ap_time_stats_by_ddid[self.cur_ddid].accum (self.cur_time, ap2, phase)
self.ap_time_stats_by_ddid[self.cur_ddid].accum (self.cur_time, ap3, phase) |
def get(self, key, default=None, *, section=DataStoreDocumentSection.Data):
""" Return the field specified by its key from the specified section.
This method access the specified section of the workflow document and returns the
value for the given key.
Args:
key (str): The key pointing to the value that should be retrieved. It supports
MongoDB's dot notation for nested fields.
default: The default value that is returned if the key does not exist.
section (DataStoreDocumentSection): The section from which the data should
be retrieved.
Returns:
object: The value from the field that the specified key is pointing to. If the
key does not exist, the default value is returned. If no default value
is provided and the key does not exist ``None`` is returned.
"""
key_notation = '.'.join([section, key])
try:
return self._decode_value(self._data_from_dotnotation(key_notation, default))
except KeyError:
return None | Return the field specified by its key from the specified section.
This method access the specified section of the workflow document and returns the
value for the given key.
Args:
key (str): The key pointing to the value that should be retrieved. It supports
MongoDB's dot notation for nested fields.
default: The default value that is returned if the key does not exist.
section (DataStoreDocumentSection): The section from which the data should
be retrieved.
Returns:
object: The value from the field that the specified key is pointing to. If the
key does not exist, the default value is returned. If no default value
is provided and the key does not exist ``None`` is returned. | Below is the the instruction that describes the task:
### Input:
Return the field specified by its key from the specified section.
This method access the specified section of the workflow document and returns the
value for the given key.
Args:
key (str): The key pointing to the value that should be retrieved. It supports
MongoDB's dot notation for nested fields.
default: The default value that is returned if the key does not exist.
section (DataStoreDocumentSection): The section from which the data should
be retrieved.
Returns:
object: The value from the field that the specified key is pointing to. If the
key does not exist, the default value is returned. If no default value
is provided and the key does not exist ``None`` is returned.
### Response:
def get(self, key, default=None, *, section=DataStoreDocumentSection.Data):
""" Return the field specified by its key from the specified section.
This method access the specified section of the workflow document and returns the
value for the given key.
Args:
key (str): The key pointing to the value that should be retrieved. It supports
MongoDB's dot notation for nested fields.
default: The default value that is returned if the key does not exist.
section (DataStoreDocumentSection): The section from which the data should
be retrieved.
Returns:
object: The value from the field that the specified key is pointing to. If the
key does not exist, the default value is returned. If no default value
is provided and the key does not exist ``None`` is returned.
"""
key_notation = '.'.join([section, key])
try:
return self._decode_value(self._data_from_dotnotation(key_notation, default))
except KeyError:
return None |
def add_dict_to_cookiejar(cj, cookie_dict):
"""Returns a CookieJar from a key/value dictionary.
:param cj: CookieJar to insert cookies into.
:param cookie_dict: Dict of key/values to insert into CookieJar.
"""
cj2 = cookiejar_from_dict(cookie_dict)
cj.update(cj2)
return cj | Returns a CookieJar from a key/value dictionary.
:param cj: CookieJar to insert cookies into.
:param cookie_dict: Dict of key/values to insert into CookieJar. | Below is the the instruction that describes the task:
### Input:
Returns a CookieJar from a key/value dictionary.
:param cj: CookieJar to insert cookies into.
:param cookie_dict: Dict of key/values to insert into CookieJar.
### Response:
def add_dict_to_cookiejar(cj, cookie_dict):
"""Returns a CookieJar from a key/value dictionary.
:param cj: CookieJar to insert cookies into.
:param cookie_dict: Dict of key/values to insert into CookieJar.
"""
cj2 = cookiejar_from_dict(cookie_dict)
cj.update(cj2)
return cj |
def render(self, context=None):
"""Renders the error message, optionally using the given context (which, if specified,
will override the internal context)."""
ctx = context.render() if context else self.get_error_context().render()
return "%s: %s%s%s" % (
self.get_error_kind(),
self.get_error_message(),
(" (%s)." % ctx) if ctx else "",
self.get_additional_error_detail()
) | Renders the error message, optionally using the given context (which, if specified,
will override the internal context). | Below is the the instruction that describes the task:
### Input:
Renders the error message, optionally using the given context (which, if specified,
will override the internal context).
### Response:
def render(self, context=None):
"""Renders the error message, optionally using the given context (which, if specified,
will override the internal context)."""
ctx = context.render() if context else self.get_error_context().render()
return "%s: %s%s%s" % (
self.get_error_kind(),
self.get_error_message(),
(" (%s)." % ctx) if ctx else "",
self.get_additional_error_detail()
) |
def _get_size(fileno):
# Thanks to fabric (fabfile.org), and
# http://sqizit.bartletts.id.au/2011/02/14/pseudo-terminals-in-python/
"""
Get the size of this pseudo terminal.
:param fileno: stdout.fileno()
:returns: A (rows, cols) tuple.
"""
# Inline imports, because these modules are not available on Windows.
# (This file is used by ConEmuOutput, which is used on Windows.)
import fcntl
import termios
# Buffer for the C call
buf = array.array(b'h' if six.PY2 else u'h', [0, 0, 0, 0])
# Do TIOCGWINSZ (Get)
# Note: We should not pass 'True' as a fourth parameter to 'ioctl'. (True
# is the default.) This causes segmentation faults on some systems.
# See: https://github.com/jonathanslenders/python-prompt-toolkit/pull/364
fcntl.ioctl(fileno, termios.TIOCGWINSZ, buf)
# Return rows, cols
return buf[0], buf[1] | Get the size of this pseudo terminal.
:param fileno: stdout.fileno()
:returns: A (rows, cols) tuple. | Below is the the instruction that describes the task:
### Input:
Get the size of this pseudo terminal.
:param fileno: stdout.fileno()
:returns: A (rows, cols) tuple.
### Response:
def _get_size(fileno):
# Thanks to fabric (fabfile.org), and
# http://sqizit.bartletts.id.au/2011/02/14/pseudo-terminals-in-python/
"""
Get the size of this pseudo terminal.
:param fileno: stdout.fileno()
:returns: A (rows, cols) tuple.
"""
# Inline imports, because these modules are not available on Windows.
# (This file is used by ConEmuOutput, which is used on Windows.)
import fcntl
import termios
# Buffer for the C call
buf = array.array(b'h' if six.PY2 else u'h', [0, 0, 0, 0])
# Do TIOCGWINSZ (Get)
# Note: We should not pass 'True' as a fourth parameter to 'ioctl'. (True
# is the default.) This causes segmentation faults on some systems.
# See: https://github.com/jonathanslenders/python-prompt-toolkit/pull/364
fcntl.ioctl(fileno, termios.TIOCGWINSZ, buf)
# Return rows, cols
return buf[0], buf[1] |
def to_json(df, values):
"""Format output for the json response."""
records = []
if df.empty:
return {"data": []}
sum_ = float(np.sum([df[c].iloc[0] for c in values]))
for c in values:
records.append({
"label": values[c],
"value": "%.2f"%np.around(df[c].iloc[0] / sum_, decimals=2)
})
return {
"data" : records
} | Format output for the json response. | Below is the the instruction that describes the task:
### Input:
Format output for the json response.
### Response:
def to_json(df, values):
"""Format output for the json response."""
records = []
if df.empty:
return {"data": []}
sum_ = float(np.sum([df[c].iloc[0] for c in values]))
for c in values:
records.append({
"label": values[c],
"value": "%.2f"%np.around(df[c].iloc[0] / sum_, decimals=2)
})
return {
"data" : records
} |
def set_chebyshev_approximators(self, deg_forward=50, deg_backwards=200):
r'''Method to derive and set coefficients for chebyshev polynomial
function approximation of the height-volume and volume-height
relationship.
A single set of chebyshev coefficients is used for the entire height-
volume and volume-height relationships respectively.
The forward relationship, `V_from_h`, requires
far fewer coefficients in its fit than the reverse to obtain the same
relative accuracy.
Optionally, deg_forward or deg_backwards can be set to None to try to
automatically fit the series to machine precision.
Parameters
----------
deg_forward : int, optional
The degree of the chebyshev polynomial to be created for the
`V_from_h` curve, [-]
deg_backwards : int, optional
The degree of the chebyshev polynomial to be created for the
`h_from_V` curve, [-]
'''
from fluids.optional.pychebfun import Chebfun
to_fit = lambda h: self.V_from_h(h, 'full')
# These high-degree polynomials cannot safety be evaluated using Horner's methods
# chebval is 2.5x as slow but 100% required; around 40 coefficients results are junk
self.c_forward = Chebfun.from_function(np.vectorize(to_fit),
[0.0, self.h_max], N=deg_forward).coefficients().tolist()
self.V_from_h_cheb = lambda x : chebval((2.0*x-self.h_max)/(self.h_max), self.c_forward)
to_fit = lambda h: self.h_from_V(h, 'brenth')
self.c_backward = Chebfun.from_function(np.vectorize(to_fit), [0.0, self.V_total], N=deg_backwards).coefficients().tolist()
self.h_from_V_cheb = lambda x : chebval((2.0*x-self.V_total)/(self.V_total), self.c_backward)
self.chebyshev = True | r'''Method to derive and set coefficients for chebyshev polynomial
function approximation of the height-volume and volume-height
relationship.
A single set of chebyshev coefficients is used for the entire height-
volume and volume-height relationships respectively.
The forward relationship, `V_from_h`, requires
far fewer coefficients in its fit than the reverse to obtain the same
relative accuracy.
Optionally, deg_forward or deg_backwards can be set to None to try to
automatically fit the series to machine precision.
Parameters
----------
deg_forward : int, optional
The degree of the chebyshev polynomial to be created for the
`V_from_h` curve, [-]
deg_backwards : int, optional
The degree of the chebyshev polynomial to be created for the
`h_from_V` curve, [-] | Below is the the instruction that describes the task:
### Input:
r'''Method to derive and set coefficients for chebyshev polynomial
function approximation of the height-volume and volume-height
relationship.
A single set of chebyshev coefficients is used for the entire height-
volume and volume-height relationships respectively.
The forward relationship, `V_from_h`, requires
far fewer coefficients in its fit than the reverse to obtain the same
relative accuracy.
Optionally, deg_forward or deg_backwards can be set to None to try to
automatically fit the series to machine precision.
Parameters
----------
deg_forward : int, optional
The degree of the chebyshev polynomial to be created for the
`V_from_h` curve, [-]
deg_backwards : int, optional
The degree of the chebyshev polynomial to be created for the
`h_from_V` curve, [-]
### Response:
def set_chebyshev_approximators(self, deg_forward=50, deg_backwards=200):
r'''Method to derive and set coefficients for chebyshev polynomial
function approximation of the height-volume and volume-height
relationship.
A single set of chebyshev coefficients is used for the entire height-
volume and volume-height relationships respectively.
The forward relationship, `V_from_h`, requires
far fewer coefficients in its fit than the reverse to obtain the same
relative accuracy.
Optionally, deg_forward or deg_backwards can be set to None to try to
automatically fit the series to machine precision.
Parameters
----------
deg_forward : int, optional
The degree of the chebyshev polynomial to be created for the
`V_from_h` curve, [-]
deg_backwards : int, optional
The degree of the chebyshev polynomial to be created for the
`h_from_V` curve, [-]
'''
from fluids.optional.pychebfun import Chebfun
to_fit = lambda h: self.V_from_h(h, 'full')
# These high-degree polynomials cannot safety be evaluated using Horner's methods
# chebval is 2.5x as slow but 100% required; around 40 coefficients results are junk
self.c_forward = Chebfun.from_function(np.vectorize(to_fit),
[0.0, self.h_max], N=deg_forward).coefficients().tolist()
self.V_from_h_cheb = lambda x : chebval((2.0*x-self.h_max)/(self.h_max), self.c_forward)
to_fit = lambda h: self.h_from_V(h, 'brenth')
self.c_backward = Chebfun.from_function(np.vectorize(to_fit), [0.0, self.V_total], N=deg_backwards).coefficients().tolist()
self.h_from_V_cheb = lambda x : chebval((2.0*x-self.V_total)/(self.V_total), self.c_backward)
self.chebyshev = True |
def keep_session_alive(self):
"""If the session expired, logs back in."""
try:
self.resources()
except xmlrpclib.Fault as fault:
if fault.faultCode == 5:
self.login()
else:
raise | If the session expired, logs back in. | Below is the the instruction that describes the task:
### Input:
If the session expired, logs back in.
### Response:
def keep_session_alive(self):
"""If the session expired, logs back in."""
try:
self.resources()
except xmlrpclib.Fault as fault:
if fault.faultCode == 5:
self.login()
else:
raise |
def createElementsFromHTML(cls, html, encoding='utf-8'):
'''
createElementsFromHTML - Creates elements from provided html, and returns a list of the root-level elements
children of these root-level nodes are accessable via the usual means.
@param html <str> - Some html data
@param encoding <str> - Encoding to use for document
@return list<AdvancedTag> - The root (top-level) tags from parsed html.
NOTE: If there is text outside the tags, they will be lost in this.
Use createBlocksFromHTML instead if you need to retain both text and tags.
Also, if you are just appending to an existing tag, use AdvancedTag.appendInnerHTML
'''
# TODO: If text is present outside a tag, it will be lost.
parser = cls(encoding=encoding)
parser.parseStr(html)
rootNode = parser.getRoot()
rootNode.remove() # Detatch from temp document
if isInvisibleRootTag(rootNode):
return rootNode.children
return [rootNode] | createElementsFromHTML - Creates elements from provided html, and returns a list of the root-level elements
children of these root-level nodes are accessable via the usual means.
@param html <str> - Some html data
@param encoding <str> - Encoding to use for document
@return list<AdvancedTag> - The root (top-level) tags from parsed html.
NOTE: If there is text outside the tags, they will be lost in this.
Use createBlocksFromHTML instead if you need to retain both text and tags.
Also, if you are just appending to an existing tag, use AdvancedTag.appendInnerHTML | Below is the the instruction that describes the task:
### Input:
createElementsFromHTML - Creates elements from provided html, and returns a list of the root-level elements
children of these root-level nodes are accessable via the usual means.
@param html <str> - Some html data
@param encoding <str> - Encoding to use for document
@return list<AdvancedTag> - The root (top-level) tags from parsed html.
NOTE: If there is text outside the tags, they will be lost in this.
Use createBlocksFromHTML instead if you need to retain both text and tags.
Also, if you are just appending to an existing tag, use AdvancedTag.appendInnerHTML
### Response:
def createElementsFromHTML(cls, html, encoding='utf-8'):
'''
createElementsFromHTML - Creates elements from provided html, and returns a list of the root-level elements
children of these root-level nodes are accessable via the usual means.
@param html <str> - Some html data
@param encoding <str> - Encoding to use for document
@return list<AdvancedTag> - The root (top-level) tags from parsed html.
NOTE: If there is text outside the tags, they will be lost in this.
Use createBlocksFromHTML instead if you need to retain both text and tags.
Also, if you are just appending to an existing tag, use AdvancedTag.appendInnerHTML
'''
# TODO: If text is present outside a tag, it will be lost.
parser = cls(encoding=encoding)
parser.parseStr(html)
rootNode = parser.getRoot()
rootNode.remove() # Detatch from temp document
if isInvisibleRootTag(rootNode):
return rootNode.children
return [rootNode] |
def l_endian(v):
""" 小端序 """
w = struct.pack('<H', v)
return str(binascii.hexlify(w), encoding='gbk') | 小端序 | Below is the the instruction that describes the task:
### Input:
小端序
### Response:
def l_endian(v):
""" 小端序 """
w = struct.pack('<H', v)
return str(binascii.hexlify(w), encoding='gbk') |
def set_exception(self, exception):
"""Signal unsuccessful completion."""
was_handled = self._finish(self.errbacks, exception)
if not was_handled:
traceback.print_exception(
type(exception), exception, exception.__traceback__) | Signal unsuccessful completion. | Below is the the instruction that describes the task:
### Input:
Signal unsuccessful completion.
### Response:
def set_exception(self, exception):
"""Signal unsuccessful completion."""
was_handled = self._finish(self.errbacks, exception)
if not was_handled:
traceback.print_exception(
type(exception), exception, exception.__traceback__) |
def set_position_target_global_int_send(self, time_boot_ms, target_system, target_component, coordinate_frame, type_mask, lat_int, lon_int, alt, vx, vy, vz, afx, afy, afz, yaw, yaw_rate, force_mavlink1=False):
'''
Sets a desired vehicle position, velocity, and/or acceleration in a
global coordinate system (WGS84). Used by an external
controller to command the vehicle (manual controller
or other system).
time_boot_ms : Timestamp in milliseconds since system boot. The rationale for the timestamp in the setpoint is to allow the system to compensate for the transport delay of the setpoint. This allows the system to compensate processing latency. (uint32_t)
target_system : System ID (uint8_t)
target_component : Component ID (uint8_t)
coordinate_frame : Valid options are: MAV_FRAME_GLOBAL_INT = 5, MAV_FRAME_GLOBAL_RELATIVE_ALT_INT = 6, MAV_FRAME_GLOBAL_TERRAIN_ALT_INT = 11 (uint8_t)
type_mask : Bitmask to indicate which dimensions should be ignored by the vehicle: a value of 0b0000000000000000 or 0b0000001000000000 indicates that none of the setpoint dimensions should be ignored. If bit 10 is set the floats afx afy afz should be interpreted as force instead of acceleration. Mapping: bit 1: x, bit 2: y, bit 3: z, bit 4: vx, bit 5: vy, bit 6: vz, bit 7: ax, bit 8: ay, bit 9: az, bit 10: is force setpoint, bit 11: yaw, bit 12: yaw rate (uint16_t)
lat_int : X Position in WGS84 frame in 1e7 * meters (int32_t)
lon_int : Y Position in WGS84 frame in 1e7 * meters (int32_t)
alt : Altitude in meters in AMSL altitude, not WGS84 if absolute or relative, above terrain if GLOBAL_TERRAIN_ALT_INT (float)
vx : X velocity in NED frame in meter / s (float)
vy : Y velocity in NED frame in meter / s (float)
vz : Z velocity in NED frame in meter / s (float)
afx : X acceleration or force (if bit 10 of type_mask is set) in NED frame in meter / s^2 or N (float)
afy : Y acceleration or force (if bit 10 of type_mask is set) in NED frame in meter / s^2 or N (float)
afz : Z acceleration or force (if bit 10 of type_mask is set) in NED frame in meter / s^2 or N (float)
yaw : yaw setpoint in rad (float)
yaw_rate : yaw rate setpoint in rad/s (float)
'''
return self.send(self.set_position_target_global_int_encode(time_boot_ms, target_system, target_component, coordinate_frame, type_mask, lat_int, lon_int, alt, vx, vy, vz, afx, afy, afz, yaw, yaw_rate), force_mavlink1=force_mavlink1) | Sets a desired vehicle position, velocity, and/or acceleration in a
global coordinate system (WGS84). Used by an external
controller to command the vehicle (manual controller
or other system).
time_boot_ms : Timestamp in milliseconds since system boot. The rationale for the timestamp in the setpoint is to allow the system to compensate for the transport delay of the setpoint. This allows the system to compensate processing latency. (uint32_t)
target_system : System ID (uint8_t)
target_component : Component ID (uint8_t)
coordinate_frame : Valid options are: MAV_FRAME_GLOBAL_INT = 5, MAV_FRAME_GLOBAL_RELATIVE_ALT_INT = 6, MAV_FRAME_GLOBAL_TERRAIN_ALT_INT = 11 (uint8_t)
type_mask : Bitmask to indicate which dimensions should be ignored by the vehicle: a value of 0b0000000000000000 or 0b0000001000000000 indicates that none of the setpoint dimensions should be ignored. If bit 10 is set the floats afx afy afz should be interpreted as force instead of acceleration. Mapping: bit 1: x, bit 2: y, bit 3: z, bit 4: vx, bit 5: vy, bit 6: vz, bit 7: ax, bit 8: ay, bit 9: az, bit 10: is force setpoint, bit 11: yaw, bit 12: yaw rate (uint16_t)
lat_int : X Position in WGS84 frame in 1e7 * meters (int32_t)
lon_int : Y Position in WGS84 frame in 1e7 * meters (int32_t)
alt : Altitude in meters in AMSL altitude, not WGS84 if absolute or relative, above terrain if GLOBAL_TERRAIN_ALT_INT (float)
vx : X velocity in NED frame in meter / s (float)
vy : Y velocity in NED frame in meter / s (float)
vz : Z velocity in NED frame in meter / s (float)
afx : X acceleration or force (if bit 10 of type_mask is set) in NED frame in meter / s^2 or N (float)
afy : Y acceleration or force (if bit 10 of type_mask is set) in NED frame in meter / s^2 or N (float)
afz : Z acceleration or force (if bit 10 of type_mask is set) in NED frame in meter / s^2 or N (float)
yaw : yaw setpoint in rad (float)
yaw_rate : yaw rate setpoint in rad/s (float) | Below is the the instruction that describes the task:
### Input:
Sets a desired vehicle position, velocity, and/or acceleration in a
global coordinate system (WGS84). Used by an external
controller to command the vehicle (manual controller
or other system).
time_boot_ms : Timestamp in milliseconds since system boot. The rationale for the timestamp in the setpoint is to allow the system to compensate for the transport delay of the setpoint. This allows the system to compensate processing latency. (uint32_t)
target_system : System ID (uint8_t)
target_component : Component ID (uint8_t)
coordinate_frame : Valid options are: MAV_FRAME_GLOBAL_INT = 5, MAV_FRAME_GLOBAL_RELATIVE_ALT_INT = 6, MAV_FRAME_GLOBAL_TERRAIN_ALT_INT = 11 (uint8_t)
type_mask : Bitmask to indicate which dimensions should be ignored by the vehicle: a value of 0b0000000000000000 or 0b0000001000000000 indicates that none of the setpoint dimensions should be ignored. If bit 10 is set the floats afx afy afz should be interpreted as force instead of acceleration. Mapping: bit 1: x, bit 2: y, bit 3: z, bit 4: vx, bit 5: vy, bit 6: vz, bit 7: ax, bit 8: ay, bit 9: az, bit 10: is force setpoint, bit 11: yaw, bit 12: yaw rate (uint16_t)
lat_int : X Position in WGS84 frame in 1e7 * meters (int32_t)
lon_int : Y Position in WGS84 frame in 1e7 * meters (int32_t)
alt : Altitude in meters in AMSL altitude, not WGS84 if absolute or relative, above terrain if GLOBAL_TERRAIN_ALT_INT (float)
vx : X velocity in NED frame in meter / s (float)
vy : Y velocity in NED frame in meter / s (float)
vz : Z velocity in NED frame in meter / s (float)
afx : X acceleration or force (if bit 10 of type_mask is set) in NED frame in meter / s^2 or N (float)
afy : Y acceleration or force (if bit 10 of type_mask is set) in NED frame in meter / s^2 or N (float)
afz : Z acceleration or force (if bit 10 of type_mask is set) in NED frame in meter / s^2 or N (float)
yaw : yaw setpoint in rad (float)
yaw_rate : yaw rate setpoint in rad/s (float)
### Response:
def set_position_target_global_int_send(self, time_boot_ms, target_system, target_component, coordinate_frame, type_mask, lat_int, lon_int, alt, vx, vy, vz, afx, afy, afz, yaw, yaw_rate, force_mavlink1=False):
'''
Sets a desired vehicle position, velocity, and/or acceleration in a
global coordinate system (WGS84). Used by an external
controller to command the vehicle (manual controller
or other system).
time_boot_ms : Timestamp in milliseconds since system boot. The rationale for the timestamp in the setpoint is to allow the system to compensate for the transport delay of the setpoint. This allows the system to compensate processing latency. (uint32_t)
target_system : System ID (uint8_t)
target_component : Component ID (uint8_t)
coordinate_frame : Valid options are: MAV_FRAME_GLOBAL_INT = 5, MAV_FRAME_GLOBAL_RELATIVE_ALT_INT = 6, MAV_FRAME_GLOBAL_TERRAIN_ALT_INT = 11 (uint8_t)
type_mask : Bitmask to indicate which dimensions should be ignored by the vehicle: a value of 0b0000000000000000 or 0b0000001000000000 indicates that none of the setpoint dimensions should be ignored. If bit 10 is set the floats afx afy afz should be interpreted as force instead of acceleration. Mapping: bit 1: x, bit 2: y, bit 3: z, bit 4: vx, bit 5: vy, bit 6: vz, bit 7: ax, bit 8: ay, bit 9: az, bit 10: is force setpoint, bit 11: yaw, bit 12: yaw rate (uint16_t)
lat_int : X Position in WGS84 frame in 1e7 * meters (int32_t)
lon_int : Y Position in WGS84 frame in 1e7 * meters (int32_t)
alt : Altitude in meters in AMSL altitude, not WGS84 if absolute or relative, above terrain if GLOBAL_TERRAIN_ALT_INT (float)
vx : X velocity in NED frame in meter / s (float)
vy : Y velocity in NED frame in meter / s (float)
vz : Z velocity in NED frame in meter / s (float)
afx : X acceleration or force (if bit 10 of type_mask is set) in NED frame in meter / s^2 or N (float)
afy : Y acceleration or force (if bit 10 of type_mask is set) in NED frame in meter / s^2 or N (float)
afz : Z acceleration or force (if bit 10 of type_mask is set) in NED frame in meter / s^2 or N (float)
yaw : yaw setpoint in rad (float)
yaw_rate : yaw rate setpoint in rad/s (float)
'''
return self.send(self.set_position_target_global_int_encode(time_boot_ms, target_system, target_component, coordinate_frame, type_mask, lat_int, lon_int, alt, vx, vy, vz, afx, afy, afz, yaw, yaw_rate), force_mavlink1=force_mavlink1) |
def get_lldp_neighbor_detail_output_lldp_neighbor_detail_remote_interface_mac(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
get_lldp_neighbor_detail = ET.Element("get_lldp_neighbor_detail")
config = get_lldp_neighbor_detail
output = ET.SubElement(get_lldp_neighbor_detail, "output")
lldp_neighbor_detail = ET.SubElement(output, "lldp-neighbor-detail")
local_interface_name_key = ET.SubElement(lldp_neighbor_detail, "local-interface-name")
local_interface_name_key.text = kwargs.pop('local_interface_name')
remote_interface_name_key = ET.SubElement(lldp_neighbor_detail, "remote-interface-name")
remote_interface_name_key.text = kwargs.pop('remote_interface_name')
remote_interface_mac = ET.SubElement(lldp_neighbor_detail, "remote-interface-mac")
remote_interface_mac.text = kwargs.pop('remote_interface_mac')
callback = kwargs.pop('callback', self._callback)
return callback(config) | Auto Generated Code | Below is the the instruction that describes the task:
### Input:
Auto Generated Code
### Response:
def get_lldp_neighbor_detail_output_lldp_neighbor_detail_remote_interface_mac(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
get_lldp_neighbor_detail = ET.Element("get_lldp_neighbor_detail")
config = get_lldp_neighbor_detail
output = ET.SubElement(get_lldp_neighbor_detail, "output")
lldp_neighbor_detail = ET.SubElement(output, "lldp-neighbor-detail")
local_interface_name_key = ET.SubElement(lldp_neighbor_detail, "local-interface-name")
local_interface_name_key.text = kwargs.pop('local_interface_name')
remote_interface_name_key = ET.SubElement(lldp_neighbor_detail, "remote-interface-name")
remote_interface_name_key.text = kwargs.pop('remote_interface_name')
remote_interface_mac = ET.SubElement(lldp_neighbor_detail, "remote-interface-mac")
remote_interface_mac.text = kwargs.pop('remote_interface_mac')
callback = kwargs.pop('callback', self._callback)
return callback(config) |
def init_variables(self, verbose=False):
"""Redefine the causes of the graph."""
for j in range(1, self.nodes):
nb_parents = np.random.randint(0, min([self.parents_max, j])+1)
for i in np.random.choice(range(0, j), nb_parents, replace=False):
self.adjacency_matrix[i, j] = 1
try:
self.g = nx.DiGraph(self.adjacency_matrix)
assert not list(nx.simple_cycles(self.g))
except AssertionError:
if verbose:
print("Regenerating, graph non valid...")
self.init_variables()
# Mechanisms
self.cfunctions = [self.mechanism(int(sum(self.adjacency_matrix[:, i])),
self.points, self.noise, noise_coeff=self.noise_coeff)
if sum(self.adjacency_matrix[:, i])
else self.initial_generator for i in range(self.nodes)] | Redefine the causes of the graph. | Below is the the instruction that describes the task:
### Input:
Redefine the causes of the graph.
### Response:
def init_variables(self, verbose=False):
"""Redefine the causes of the graph."""
for j in range(1, self.nodes):
nb_parents = np.random.randint(0, min([self.parents_max, j])+1)
for i in np.random.choice(range(0, j), nb_parents, replace=False):
self.adjacency_matrix[i, j] = 1
try:
self.g = nx.DiGraph(self.adjacency_matrix)
assert not list(nx.simple_cycles(self.g))
except AssertionError:
if verbose:
print("Regenerating, graph non valid...")
self.init_variables()
# Mechanisms
self.cfunctions = [self.mechanism(int(sum(self.adjacency_matrix[:, i])),
self.points, self.noise, noise_coeff=self.noise_coeff)
if sum(self.adjacency_matrix[:, i])
else self.initial_generator for i in range(self.nodes)] |
def path_complete(self, text: str, line: str, begidx: int, endidx: int,
path_filter: Optional[Callable[[str], bool]] = None) -> List[str]:
"""Performs completion of local file system paths
:param text: the string prefix we are attempting to match (all returned matches must begin with it)
:param line: the current input line with leading whitespace removed
:param begidx: the beginning index of the prefix text
:param endidx: the ending index of the prefix text
:param path_filter: optional filter function that determines if a path belongs in the results
this function takes a path as its argument and returns True if the path should
be kept in the results
:return: a list of possible tab completions
"""
# Used to complete ~ and ~user strings
def complete_users() -> List[str]:
# We are returning ~user strings that resolve to directories,
# so don't append a space or quote in the case of a single result.
self.allow_appended_space = False
self.allow_closing_quote = False
users = []
# Windows lacks the pwd module so we can't get a list of users.
# Instead we will return a result once the user enters text that
# resolves to an existing home directory.
if sys.platform.startswith('win'):
expanded_path = os.path.expanduser(text)
if os.path.isdir(expanded_path):
user = text
if add_trailing_sep_if_dir:
user += os.path.sep
users.append(user)
else:
import pwd
# Iterate through a list of users from the password database
for cur_pw in pwd.getpwall():
# Check if the user has an existing home dir
if os.path.isdir(cur_pw.pw_dir):
# Add a ~ to the user to match against text
cur_user = '~' + cur_pw.pw_name
if cur_user.startswith(text):
if add_trailing_sep_if_dir:
cur_user += os.path.sep
users.append(cur_user)
return users
# Determine if a trailing separator should be appended to directory completions
add_trailing_sep_if_dir = False
if endidx == len(line) or (endidx < len(line) and line[endidx] != os.path.sep):
add_trailing_sep_if_dir = True
# Used to replace cwd in the final results
cwd = os.getcwd()
cwd_added = False
# Used to replace expanded user path in final result
orig_tilde_path = ''
expanded_tilde_path = ''
# If the search text is blank, then search in the CWD for *
if not text:
search_str = os.path.join(os.getcwd(), '*')
cwd_added = True
else:
# Purposely don't match any path containing wildcards
wildcards = ['*', '?']
for wildcard in wildcards:
if wildcard in text:
return []
# Start the search string
search_str = text + '*'
# Handle tilde expansion and completion
if text.startswith('~'):
sep_index = text.find(os.path.sep, 1)
# If there is no slash, then the user is still completing the user after the tilde
if sep_index == -1:
return complete_users()
# Otherwise expand the user dir
else:
search_str = os.path.expanduser(search_str)
# Get what we need to restore the original tilde path later
orig_tilde_path = text[:sep_index]
expanded_tilde_path = os.path.expanduser(orig_tilde_path)
# If the search text does not have a directory, then use the cwd
elif not os.path.dirname(text):
search_str = os.path.join(os.getcwd(), search_str)
cwd_added = True
# Set this to True for proper quoting of paths with spaces
self.matches_delimited = True
# Find all matching path completions
matches = glob.glob(search_str)
# Filter out results that don't belong
if path_filter is not None:
matches = [c for c in matches if path_filter(c)]
# Don't append a space or closing quote to directory
if len(matches) == 1 and os.path.isdir(matches[0]):
self.allow_appended_space = False
self.allow_closing_quote = False
# Sort the matches before any trailing slashes are added
matches.sort(key=self.matches_sort_key)
self.matches_sorted = True
# Build display_matches and add a slash to directories
for index, cur_match in enumerate(matches):
# Display only the basename of this path in the tab-completion suggestions
self.display_matches.append(os.path.basename(cur_match))
# Add a separator after directories if the next character isn't already a separator
if os.path.isdir(cur_match) and add_trailing_sep_if_dir:
matches[index] += os.path.sep
self.display_matches[index] += os.path.sep
# Remove cwd if it was added to match the text readline expects
if cwd_added:
if cwd == os.path.sep:
to_replace = cwd
else:
to_replace = cwd + os.path.sep
matches = [cur_path.replace(to_replace, '', 1) for cur_path in matches]
# Restore the tilde string if we expanded one to match the text readline expects
if expanded_tilde_path:
matches = [cur_path.replace(expanded_tilde_path, orig_tilde_path, 1) for cur_path in matches]
return matches | Performs completion of local file system paths
:param text: the string prefix we are attempting to match (all returned matches must begin with it)
:param line: the current input line with leading whitespace removed
:param begidx: the beginning index of the prefix text
:param endidx: the ending index of the prefix text
:param path_filter: optional filter function that determines if a path belongs in the results
this function takes a path as its argument and returns True if the path should
be kept in the results
:return: a list of possible tab completions | Below is the the instruction that describes the task:
### Input:
Performs completion of local file system paths
:param text: the string prefix we are attempting to match (all returned matches must begin with it)
:param line: the current input line with leading whitespace removed
:param begidx: the beginning index of the prefix text
:param endidx: the ending index of the prefix text
:param path_filter: optional filter function that determines if a path belongs in the results
this function takes a path as its argument and returns True if the path should
be kept in the results
:return: a list of possible tab completions
### Response:
def path_complete(self, text: str, line: str, begidx: int, endidx: int,
path_filter: Optional[Callable[[str], bool]] = None) -> List[str]:
"""Performs completion of local file system paths
:param text: the string prefix we are attempting to match (all returned matches must begin with it)
:param line: the current input line with leading whitespace removed
:param begidx: the beginning index of the prefix text
:param endidx: the ending index of the prefix text
:param path_filter: optional filter function that determines if a path belongs in the results
this function takes a path as its argument and returns True if the path should
be kept in the results
:return: a list of possible tab completions
"""
# Used to complete ~ and ~user strings
def complete_users() -> List[str]:
# We are returning ~user strings that resolve to directories,
# so don't append a space or quote in the case of a single result.
self.allow_appended_space = False
self.allow_closing_quote = False
users = []
# Windows lacks the pwd module so we can't get a list of users.
# Instead we will return a result once the user enters text that
# resolves to an existing home directory.
if sys.platform.startswith('win'):
expanded_path = os.path.expanduser(text)
if os.path.isdir(expanded_path):
user = text
if add_trailing_sep_if_dir:
user += os.path.sep
users.append(user)
else:
import pwd
# Iterate through a list of users from the password database
for cur_pw in pwd.getpwall():
# Check if the user has an existing home dir
if os.path.isdir(cur_pw.pw_dir):
# Add a ~ to the user to match against text
cur_user = '~' + cur_pw.pw_name
if cur_user.startswith(text):
if add_trailing_sep_if_dir:
cur_user += os.path.sep
users.append(cur_user)
return users
# Determine if a trailing separator should be appended to directory completions
add_trailing_sep_if_dir = False
if endidx == len(line) or (endidx < len(line) and line[endidx] != os.path.sep):
add_trailing_sep_if_dir = True
# Used to replace cwd in the final results
cwd = os.getcwd()
cwd_added = False
# Used to replace expanded user path in final result
orig_tilde_path = ''
expanded_tilde_path = ''
# If the search text is blank, then search in the CWD for *
if not text:
search_str = os.path.join(os.getcwd(), '*')
cwd_added = True
else:
# Purposely don't match any path containing wildcards
wildcards = ['*', '?']
for wildcard in wildcards:
if wildcard in text:
return []
# Start the search string
search_str = text + '*'
# Handle tilde expansion and completion
if text.startswith('~'):
sep_index = text.find(os.path.sep, 1)
# If there is no slash, then the user is still completing the user after the tilde
if sep_index == -1:
return complete_users()
# Otherwise expand the user dir
else:
search_str = os.path.expanduser(search_str)
# Get what we need to restore the original tilde path later
orig_tilde_path = text[:sep_index]
expanded_tilde_path = os.path.expanduser(orig_tilde_path)
# If the search text does not have a directory, then use the cwd
elif not os.path.dirname(text):
search_str = os.path.join(os.getcwd(), search_str)
cwd_added = True
# Set this to True for proper quoting of paths with spaces
self.matches_delimited = True
# Find all matching path completions
matches = glob.glob(search_str)
# Filter out results that don't belong
if path_filter is not None:
matches = [c for c in matches if path_filter(c)]
# Don't append a space or closing quote to directory
if len(matches) == 1 and os.path.isdir(matches[0]):
self.allow_appended_space = False
self.allow_closing_quote = False
# Sort the matches before any trailing slashes are added
matches.sort(key=self.matches_sort_key)
self.matches_sorted = True
# Build display_matches and add a slash to directories
for index, cur_match in enumerate(matches):
# Display only the basename of this path in the tab-completion suggestions
self.display_matches.append(os.path.basename(cur_match))
# Add a separator after directories if the next character isn't already a separator
if os.path.isdir(cur_match) and add_trailing_sep_if_dir:
matches[index] += os.path.sep
self.display_matches[index] += os.path.sep
# Remove cwd if it was added to match the text readline expects
if cwd_added:
if cwd == os.path.sep:
to_replace = cwd
else:
to_replace = cwd + os.path.sep
matches = [cur_path.replace(to_replace, '', 1) for cur_path in matches]
# Restore the tilde string if we expanded one to match the text readline expects
if expanded_tilde_path:
matches = [cur_path.replace(expanded_tilde_path, orig_tilde_path, 1) for cur_path in matches]
return matches |
def _create_session(self, scope):
"""
Instantiate a new session object for use in connecting with Degreed
"""
now = datetime.datetime.utcnow()
if self.session is None or self.expires_at is None or now >= self.expires_at:
# Create a new session with a valid token
if self.session:
self.session.close()
oauth_access_token, expires_at = self._get_oauth_access_token(
self.enterprise_configuration.key,
self.enterprise_configuration.secret,
self.enterprise_configuration.degreed_user_id,
self.enterprise_configuration.degreed_user_password,
scope
)
session = requests.Session()
session.timeout = self.SESSION_TIMEOUT
session.headers['Authorization'] = 'Bearer {}'.format(oauth_access_token)
session.headers['content-type'] = 'application/json'
self.session = session
self.expires_at = expires_at | Instantiate a new session object for use in connecting with Degreed | Below is the the instruction that describes the task:
### Input:
Instantiate a new session object for use in connecting with Degreed
### Response:
def _create_session(self, scope):
"""
Instantiate a new session object for use in connecting with Degreed
"""
now = datetime.datetime.utcnow()
if self.session is None or self.expires_at is None or now >= self.expires_at:
# Create a new session with a valid token
if self.session:
self.session.close()
oauth_access_token, expires_at = self._get_oauth_access_token(
self.enterprise_configuration.key,
self.enterprise_configuration.secret,
self.enterprise_configuration.degreed_user_id,
self.enterprise_configuration.degreed_user_password,
scope
)
session = requests.Session()
session.timeout = self.SESSION_TIMEOUT
session.headers['Authorization'] = 'Bearer {}'.format(oauth_access_token)
session.headers['content-type'] = 'application/json'
self.session = session
self.expires_at = expires_at |
def get_by_natural_key(self, *args):
"""
Return the object corresponding to the provided natural key.
(This is a generic implementation of the standard Django function)
"""
kwargs = self.natural_key_kwargs(*args)
# Since kwargs already has __ lookups in it, we could just do this:
# return self.get(**kwargs)
# But, we should call each related model's get_by_natural_key in case
# it's been overridden
for name, rel_to in self.model.get_natural_key_info():
if not rel_to:
continue
# Extract natural key for related object
nested_key = extract_nested_key(kwargs, rel_to, name)
if nested_key:
# Update kwargs with related object
try:
kwargs[name] = rel_to.objects.get_by_natural_key(
*nested_key
)
except rel_to.DoesNotExist:
# If related object doesn't exist, assume this one doesn't
raise self.model.DoesNotExist()
else:
kwargs[name] = None
return self.get(**kwargs) | Return the object corresponding to the provided natural key.
(This is a generic implementation of the standard Django function) | Below is the the instruction that describes the task:
### Input:
Return the object corresponding to the provided natural key.
(This is a generic implementation of the standard Django function)
### Response:
def get_by_natural_key(self, *args):
"""
Return the object corresponding to the provided natural key.
(This is a generic implementation of the standard Django function)
"""
kwargs = self.natural_key_kwargs(*args)
# Since kwargs already has __ lookups in it, we could just do this:
# return self.get(**kwargs)
# But, we should call each related model's get_by_natural_key in case
# it's been overridden
for name, rel_to in self.model.get_natural_key_info():
if not rel_to:
continue
# Extract natural key for related object
nested_key = extract_nested_key(kwargs, rel_to, name)
if nested_key:
# Update kwargs with related object
try:
kwargs[name] = rel_to.objects.get_by_natural_key(
*nested_key
)
except rel_to.DoesNotExist:
# If related object doesn't exist, assume this one doesn't
raise self.model.DoesNotExist()
else:
kwargs[name] = None
return self.get(**kwargs) |
def process_apk(self, data, name):
"""
Processes Android application
:param data:
:param name:
:return:
"""
try:
from apk_parse.apk import APK
except Exception as e:
logger.warning('Could not import apk_parse, try running: pip install apk_parse_ph4')
return [TestResult(fname=name, type='apk-pem-cert', error='cannot-import')]
ret = []
try:
from cryptography.x509.base import load_der_x509_certificate
apkf = APK(data, process_now=False, process_file_types=False, raw=True,
temp_dir=self.args.tmp_dir)
apkf.process()
self.num_apk += 1
pem = apkf.cert_pem
aux = {'subtype': 'apk'}
x509 = load_der_x509_certificate(pem_to_der(pem), self.get_backend())
sub = self.process_x509(x509, name=name, idx=0, data=data, pem=True, source='apk-pem-cert', aux=aux)
ret.append(sub)
except Exception as e:
logger.debug('Exception in processing APK %s : %s' % (name, e))
self.trace_logger.log(e)
return ret | Processes Android application
:param data:
:param name:
:return: | Below is the the instruction that describes the task:
### Input:
Processes Android application
:param data:
:param name:
:return:
### Response:
def process_apk(self, data, name):
"""
Processes Android application
:param data:
:param name:
:return:
"""
try:
from apk_parse.apk import APK
except Exception as e:
logger.warning('Could not import apk_parse, try running: pip install apk_parse_ph4')
return [TestResult(fname=name, type='apk-pem-cert', error='cannot-import')]
ret = []
try:
from cryptography.x509.base import load_der_x509_certificate
apkf = APK(data, process_now=False, process_file_types=False, raw=True,
temp_dir=self.args.tmp_dir)
apkf.process()
self.num_apk += 1
pem = apkf.cert_pem
aux = {'subtype': 'apk'}
x509 = load_der_x509_certificate(pem_to_der(pem), self.get_backend())
sub = self.process_x509(x509, name=name, idx=0, data=data, pem=True, source='apk-pem-cert', aux=aux)
ret.append(sub)
except Exception as e:
logger.debug('Exception in processing APK %s : %s' % (name, e))
self.trace_logger.log(e)
return ret |
def sg_summary_audio(tensor, sample_rate=16000, prefix=None, name=None):
r"""Register `tensor` to summary report as audio
Args:
tensor: A `Tensor` to log as audio
sample_rate : An int. Sample rate to report. Default is 16000.
prefix: A `string`. A prefix to display in the tensor board web UI.
name: A `string`. A name to display in the tensor board web UI.
Returns:
None
"""
# defaults
prefix = '' if prefix is None else prefix + '/'
# summary name
name = prefix + _pretty_name(tensor) if name is None else prefix + name
# summary statistics
if not tf.get_variable_scope().reuse:
tf.summary.audio(name + '-au', tensor, sample_rate) | r"""Register `tensor` to summary report as audio
Args:
tensor: A `Tensor` to log as audio
sample_rate : An int. Sample rate to report. Default is 16000.
prefix: A `string`. A prefix to display in the tensor board web UI.
name: A `string`. A name to display in the tensor board web UI.
Returns:
None | Below is the the instruction that describes the task:
### Input:
r"""Register `tensor` to summary report as audio
Args:
tensor: A `Tensor` to log as audio
sample_rate : An int. Sample rate to report. Default is 16000.
prefix: A `string`. A prefix to display in the tensor board web UI.
name: A `string`. A name to display in the tensor board web UI.
Returns:
None
### Response:
def sg_summary_audio(tensor, sample_rate=16000, prefix=None, name=None):
r"""Register `tensor` to summary report as audio
Args:
tensor: A `Tensor` to log as audio
sample_rate : An int. Sample rate to report. Default is 16000.
prefix: A `string`. A prefix to display in the tensor board web UI.
name: A `string`. A name to display in the tensor board web UI.
Returns:
None
"""
# defaults
prefix = '' if prefix is None else prefix + '/'
# summary name
name = prefix + _pretty_name(tensor) if name is None else prefix + name
# summary statistics
if not tf.get_variable_scope().reuse:
tf.summary.audio(name + '-au', tensor, sample_rate) |
def is_mode_supported(mode, ver):
"""\
Returns if `mode` is supported by `version`.
Note: This function does not check if `version` is actually a valid
(Micro) QR Code version. Invalid versions like ``41`` may return an illegal
value.
:param int mode: Canonicalized mode.
:param int or None ver: (Micro) QR Code version constant.
:rtype: bool
"""
ver = None if ver > 0 else ver
try:
return ver in consts.SUPPORTED_MODES[mode]
except KeyError:
raise ModeError('Unknown mode "{0}"'.format(mode)) | \
Returns if `mode` is supported by `version`.
Note: This function does not check if `version` is actually a valid
(Micro) QR Code version. Invalid versions like ``41`` may return an illegal
value.
:param int mode: Canonicalized mode.
:param int or None ver: (Micro) QR Code version constant.
:rtype: bool | Below is the the instruction that describes the task:
### Input:
\
Returns if `mode` is supported by `version`.
Note: This function does not check if `version` is actually a valid
(Micro) QR Code version. Invalid versions like ``41`` may return an illegal
value.
:param int mode: Canonicalized mode.
:param int or None ver: (Micro) QR Code version constant.
:rtype: bool
### Response:
def is_mode_supported(mode, ver):
"""\
Returns if `mode` is supported by `version`.
Note: This function does not check if `version` is actually a valid
(Micro) QR Code version. Invalid versions like ``41`` may return an illegal
value.
:param int mode: Canonicalized mode.
:param int or None ver: (Micro) QR Code version constant.
:rtype: bool
"""
ver = None if ver > 0 else ver
try:
return ver in consts.SUPPORTED_MODES[mode]
except KeyError:
raise ModeError('Unknown mode "{0}"'.format(mode)) |
def replace(pretty, old_str, new_str):
""" Replace strings giving some info on where
the replacement was done
"""
out_str = ''
line_number = 1
changes = 0
for line in pretty.splitlines(keepends=True):
new_line = line.replace(old_str, new_str)
if line.find(old_str) != -1:
logging.debug('%s', line_number)
logging.debug('< %s', line)
logging.debug('> %s', new_line)
changes += 1
out_str += new_line
line_number += 1
logging.info('Total changes(%s): %s', old_str, changes)
return out_str | Replace strings giving some info on where
the replacement was done | Below is the the instruction that describes the task:
### Input:
Replace strings giving some info on where
the replacement was done
### Response:
def replace(pretty, old_str, new_str):
""" Replace strings giving some info on where
the replacement was done
"""
out_str = ''
line_number = 1
changes = 0
for line in pretty.splitlines(keepends=True):
new_line = line.replace(old_str, new_str)
if line.find(old_str) != -1:
logging.debug('%s', line_number)
logging.debug('< %s', line)
logging.debug('> %s', new_line)
changes += 1
out_str += new_line
line_number += 1
logging.info('Total changes(%s): %s', old_str, changes)
return out_str |
def socket_parse(self, astr_destination):
'''
Examines <astr_destination> and if of form <str1>:<str2> assumes
that <str1> is a host to send datagram comms to over port <str2>.
Returns True or False.
'''
t_socketInfo = astr_destination.partition(':')
if len(t_socketInfo[1]):
self._b_isSocket = True
self._socketRemote = t_socketInfo[0]
self._socketPort = t_socketInfo[2]
else:
self._b_isSocket = False
return self._b_isSocket | Examines <astr_destination> and if of form <str1>:<str2> assumes
that <str1> is a host to send datagram comms to over port <str2>.
Returns True or False. | Below is the the instruction that describes the task:
### Input:
Examines <astr_destination> and if of form <str1>:<str2> assumes
that <str1> is a host to send datagram comms to over port <str2>.
Returns True or False.
### Response:
def socket_parse(self, astr_destination):
'''
Examines <astr_destination> and if of form <str1>:<str2> assumes
that <str1> is a host to send datagram comms to over port <str2>.
Returns True or False.
'''
t_socketInfo = astr_destination.partition(':')
if len(t_socketInfo[1]):
self._b_isSocket = True
self._socketRemote = t_socketInfo[0]
self._socketPort = t_socketInfo[2]
else:
self._b_isSocket = False
return self._b_isSocket |
def save_hdf(self, filename, path='', overwrite=False, append=False):
"""Saves object data to HDF file (only works if MCMC is run)
Samples are saved to /samples location under given path,
and object properties are also attached, so suitable for
re-loading via :func:`StarModel.load_hdf`.
:param filename:
Name of file to save to. Should be .h5 file.
:param path: (optional)
Path within HDF file structure to save to.
:param overwrite: (optional)
If ``True``, delete any existing file by the same name
before writing.
:param append: (optional)
If ``True``, then if a file exists, then just the path
within the file will be updated.
"""
if os.path.exists(filename):
store = pd.HDFStore(filename)
if path in store:
store.close()
if overwrite:
os.remove(filename)
elif not append:
raise IOError('{} in {} exists. Set either overwrite or append option.'.format(path,filename))
else:
store.close()
self.samples.to_hdf(filename, '{}/samples'.format(path))
store = pd.HDFStore(filename)
attrs = store.get_storer('{}/samples'.format(path)).attrs
attrs.properties = self.properties
attrs.ic_type = type(self.ic)
attrs.maxAV = self.maxAV
attrs.max_distance = self.max_distance
attrs.min_logg = self.min_logg
attrs.use_emcee = self.use_emcee
attrs._mnest_basename = self._mnest_basename
attrs.name = self.name
store.close() | Saves object data to HDF file (only works if MCMC is run)
Samples are saved to /samples location under given path,
and object properties are also attached, so suitable for
re-loading via :func:`StarModel.load_hdf`.
:param filename:
Name of file to save to. Should be .h5 file.
:param path: (optional)
Path within HDF file structure to save to.
:param overwrite: (optional)
If ``True``, delete any existing file by the same name
before writing.
:param append: (optional)
If ``True``, then if a file exists, then just the path
within the file will be updated. | Below is the the instruction that describes the task:
### Input:
Saves object data to HDF file (only works if MCMC is run)
Samples are saved to /samples location under given path,
and object properties are also attached, so suitable for
re-loading via :func:`StarModel.load_hdf`.
:param filename:
Name of file to save to. Should be .h5 file.
:param path: (optional)
Path within HDF file structure to save to.
:param overwrite: (optional)
If ``True``, delete any existing file by the same name
before writing.
:param append: (optional)
If ``True``, then if a file exists, then just the path
within the file will be updated.
### Response:
def save_hdf(self, filename, path='', overwrite=False, append=False):
"""Saves object data to HDF file (only works if MCMC is run)
Samples are saved to /samples location under given path,
and object properties are also attached, so suitable for
re-loading via :func:`StarModel.load_hdf`.
:param filename:
Name of file to save to. Should be .h5 file.
:param path: (optional)
Path within HDF file structure to save to.
:param overwrite: (optional)
If ``True``, delete any existing file by the same name
before writing.
:param append: (optional)
If ``True``, then if a file exists, then just the path
within the file will be updated.
"""
if os.path.exists(filename):
store = pd.HDFStore(filename)
if path in store:
store.close()
if overwrite:
os.remove(filename)
elif not append:
raise IOError('{} in {} exists. Set either overwrite or append option.'.format(path,filename))
else:
store.close()
self.samples.to_hdf(filename, '{}/samples'.format(path))
store = pd.HDFStore(filename)
attrs = store.get_storer('{}/samples'.format(path)).attrs
attrs.properties = self.properties
attrs.ic_type = type(self.ic)
attrs.maxAV = self.maxAV
attrs.max_distance = self.max_distance
attrs.min_logg = self.min_logg
attrs.use_emcee = self.use_emcee
attrs._mnest_basename = self._mnest_basename
attrs.name = self.name
store.close() |
def setproctitle(text):
"""
This is a wrapper for setproctitle.setproctitle(). The call sets
'text' as the new process title and returns the previous value.
The module is commonly not installed. If missing, nothing is changed,
and the call returns None.
The module is described here: https://pypi.python.org/pypi/setproctitle
"""
try:
import setproctitle
except Exception as e:
return None
else: # pragma: no cover
prev = setproctitle.getproctitle()
setproctitle.setproctitle(text)
return prev | This is a wrapper for setproctitle.setproctitle(). The call sets
'text' as the new process title and returns the previous value.
The module is commonly not installed. If missing, nothing is changed,
and the call returns None.
The module is described here: https://pypi.python.org/pypi/setproctitle | Below is the the instruction that describes the task:
### Input:
This is a wrapper for setproctitle.setproctitle(). The call sets
'text' as the new process title and returns the previous value.
The module is commonly not installed. If missing, nothing is changed,
and the call returns None.
The module is described here: https://pypi.python.org/pypi/setproctitle
### Response:
def setproctitle(text):
"""
This is a wrapper for setproctitle.setproctitle(). The call sets
'text' as the new process title and returns the previous value.
The module is commonly not installed. If missing, nothing is changed,
and the call returns None.
The module is described here: https://pypi.python.org/pypi/setproctitle
"""
try:
import setproctitle
except Exception as e:
return None
else: # pragma: no cover
prev = setproctitle.getproctitle()
setproctitle.setproctitle(text)
return prev |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.