code
stringlengths 75
104k
| docstring
stringlengths 1
46.9k
| text
stringlengths 164
112k
|
---|---|---|
def run(self, file, updateconfig=True, clean=False, path=None):
"""
Run SExtractor.
If updateconfig is True (default), the configuration
files will be updated before running SExtractor.
If clean is True (default: False), configuration files
(if any) will be deleted after SExtractor terminates.
"""
if updateconfig:
self.update_config()
# Try to find SExtractor program
# This will raise an exception if it failed
self.program, self.version = self.setup(path)
commandline = (
self.program + " -c " + self.config['CONFIG_FILE'] + " " + file)
# print commandline
rcode = os.system(commandline)
if (rcode):
raise SExtractorException(
"SExtractor command [%s] failed." % commandline
)
if clean:
self.clean() | Run SExtractor.
If updateconfig is True (default), the configuration
files will be updated before running SExtractor.
If clean is True (default: False), configuration files
(if any) will be deleted after SExtractor terminates. | Below is the the instruction that describes the task:
### Input:
Run SExtractor.
If updateconfig is True (default), the configuration
files will be updated before running SExtractor.
If clean is True (default: False), configuration files
(if any) will be deleted after SExtractor terminates.
### Response:
def run(self, file, updateconfig=True, clean=False, path=None):
"""
Run SExtractor.
If updateconfig is True (default), the configuration
files will be updated before running SExtractor.
If clean is True (default: False), configuration files
(if any) will be deleted after SExtractor terminates.
"""
if updateconfig:
self.update_config()
# Try to find SExtractor program
# This will raise an exception if it failed
self.program, self.version = self.setup(path)
commandline = (
self.program + " -c " + self.config['CONFIG_FILE'] + " " + file)
# print commandline
rcode = os.system(commandline)
if (rcode):
raise SExtractorException(
"SExtractor command [%s] failed." % commandline
)
if clean:
self.clean() |
def params(self):
"""
Returns all params ordered by name. The default implementation
uses :py:func:`dir` to get all attributes of type
:py:class:`Param`.
"""
if self._params is None:
self._params = list(filter(lambda attr: isinstance(attr, Param),
[getattr(self, x) for x in dir(self) if x != "params" and
not isinstance(getattr(type(self), x, None), property)]))
return self._params | Returns all params ordered by name. The default implementation
uses :py:func:`dir` to get all attributes of type
:py:class:`Param`. | Below is the the instruction that describes the task:
### Input:
Returns all params ordered by name. The default implementation
uses :py:func:`dir` to get all attributes of type
:py:class:`Param`.
### Response:
def params(self):
"""
Returns all params ordered by name. The default implementation
uses :py:func:`dir` to get all attributes of type
:py:class:`Param`.
"""
if self._params is None:
self._params = list(filter(lambda attr: isinstance(attr, Param),
[getattr(self, x) for x in dir(self) if x != "params" and
not isinstance(getattr(type(self), x, None), property)]))
return self._params |
def get_node_ip_address(address="8.8.8.8:53"):
"""Determine the IP address of the local node.
Args:
address (str): The IP address and port of any known live service on the
network you care about.
Returns:
The IP address of the current node.
"""
ip_address, port = address.split(":")
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
try:
# This command will raise an exception if there is no internet
# connection.
s.connect((ip_address, int(port)))
node_ip_address = s.getsockname()[0]
except Exception as e:
node_ip_address = "127.0.0.1"
# [Errno 101] Network is unreachable
if e.errno == 101:
try:
# try get node ip address from host name
host_name = socket.getfqdn(socket.gethostname())
node_ip_address = socket.gethostbyname(host_name)
except Exception:
pass
finally:
s.close()
return node_ip_address | Determine the IP address of the local node.
Args:
address (str): The IP address and port of any known live service on the
network you care about.
Returns:
The IP address of the current node. | Below is the the instruction that describes the task:
### Input:
Determine the IP address of the local node.
Args:
address (str): The IP address and port of any known live service on the
network you care about.
Returns:
The IP address of the current node.
### Response:
def get_node_ip_address(address="8.8.8.8:53"):
"""Determine the IP address of the local node.
Args:
address (str): The IP address and port of any known live service on the
network you care about.
Returns:
The IP address of the current node.
"""
ip_address, port = address.split(":")
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
try:
# This command will raise an exception if there is no internet
# connection.
s.connect((ip_address, int(port)))
node_ip_address = s.getsockname()[0]
except Exception as e:
node_ip_address = "127.0.0.1"
# [Errno 101] Network is unreachable
if e.errno == 101:
try:
# try get node ip address from host name
host_name = socket.getfqdn(socket.gethostname())
node_ip_address = socket.gethostbyname(host_name)
except Exception:
pass
finally:
s.close()
return node_ip_address |
def __rmfile(path):
"""Delete a file.
Args:
path (str): Path to the file that needs to be deleted.
Returns:
bool: True if the operation is successful, False otherwise.
"""
logger.info("rmfile: %s" % path)
try:
os.remove(path)
return True
except Exception as e:
logger.error("rmfile: %s failed! Error: %s" % (path, e))
return False | Delete a file.
Args:
path (str): Path to the file that needs to be deleted.
Returns:
bool: True if the operation is successful, False otherwise. | Below is the the instruction that describes the task:
### Input:
Delete a file.
Args:
path (str): Path to the file that needs to be deleted.
Returns:
bool: True if the operation is successful, False otherwise.
### Response:
def __rmfile(path):
"""Delete a file.
Args:
path (str): Path to the file that needs to be deleted.
Returns:
bool: True if the operation is successful, False otherwise.
"""
logger.info("rmfile: %s" % path)
try:
os.remove(path)
return True
except Exception as e:
logger.error("rmfile: %s failed! Error: %s" % (path, e))
return False |
def remove_existing_fpaths(fpath_list, verbose=VERBOSE, quiet=QUIET,
strict=False, print_caller=PRINT_CALLER,
lbl='files'):
""" checks existance before removing. then tries to remove exisint paths """
import utool as ut
if print_caller:
print(util_dbg.get_caller_name(range(1, 4)) + ' called remove_existing_fpaths')
fpath_list_ = ut.filter_Nones(fpath_list)
exists_list = list(map(exists, fpath_list_))
if verbose:
n_total = len(fpath_list)
n_valid = len(fpath_list_)
n_exist = sum(exists_list)
print('[util_path.remove_existing_fpaths] request delete of %d %s' % (
n_total, lbl))
if n_valid != n_total:
print(('[util_path.remove_existing_fpaths] '
'trying to delete %d/%d non None %s ') %
(n_valid, n_total, lbl))
print(('[util_path.remove_existing_fpaths] '
' %d/%d exist and need to be deleted')
% (n_exist, n_valid))
existing_fpath_list = ut.compress(fpath_list_, exists_list)
return remove_fpaths(existing_fpath_list, verbose=verbose, quiet=quiet,
strict=strict, print_caller=False, lbl=lbl) | checks existance before removing. then tries to remove exisint paths | Below is the the instruction that describes the task:
### Input:
checks existance before removing. then tries to remove exisint paths
### Response:
def remove_existing_fpaths(fpath_list, verbose=VERBOSE, quiet=QUIET,
strict=False, print_caller=PRINT_CALLER,
lbl='files'):
""" checks existance before removing. then tries to remove exisint paths """
import utool as ut
if print_caller:
print(util_dbg.get_caller_name(range(1, 4)) + ' called remove_existing_fpaths')
fpath_list_ = ut.filter_Nones(fpath_list)
exists_list = list(map(exists, fpath_list_))
if verbose:
n_total = len(fpath_list)
n_valid = len(fpath_list_)
n_exist = sum(exists_list)
print('[util_path.remove_existing_fpaths] request delete of %d %s' % (
n_total, lbl))
if n_valid != n_total:
print(('[util_path.remove_existing_fpaths] '
'trying to delete %d/%d non None %s ') %
(n_valid, n_total, lbl))
print(('[util_path.remove_existing_fpaths] '
' %d/%d exist and need to be deleted')
% (n_exist, n_valid))
existing_fpath_list = ut.compress(fpath_list_, exists_list)
return remove_fpaths(existing_fpath_list, verbose=verbose, quiet=quiet,
strict=strict, print_caller=False, lbl=lbl) |
def read_rtt(jlink):
"""Reads the JLink RTT buffer #0 at 10Hz and prints to stdout.
This method is a polling loop against the connected JLink unit. If
the JLink is disconnected, it will exit. Additionally, if any exceptions
are raised, they will be caught and re-raised after interrupting the
main thread.
sys.stdout.write and sys.stdout.flush are used since target terminals
are expected to transmit newlines, which may or may not line up with the
arbitrarily-chosen 1024-byte buffer that this loop uses to read.
Args:
jlink (pylink.JLink): The JLink to read.
Raises:
Exception on error.
"""
try:
while jlink.connected():
terminal_bytes = jlink.rtt_read(0, 1024)
if terminal_bytes:
sys.stdout.write("".join(map(chr, terminal_bytes)))
sys.stdout.flush()
time.sleep(0.1)
except Exception:
print("IO read thread exception, exiting...")
thread.interrupt_main()
raise | Reads the JLink RTT buffer #0 at 10Hz and prints to stdout.
This method is a polling loop against the connected JLink unit. If
the JLink is disconnected, it will exit. Additionally, if any exceptions
are raised, they will be caught and re-raised after interrupting the
main thread.
sys.stdout.write and sys.stdout.flush are used since target terminals
are expected to transmit newlines, which may or may not line up with the
arbitrarily-chosen 1024-byte buffer that this loop uses to read.
Args:
jlink (pylink.JLink): The JLink to read.
Raises:
Exception on error. | Below is the the instruction that describes the task:
### Input:
Reads the JLink RTT buffer #0 at 10Hz and prints to stdout.
This method is a polling loop against the connected JLink unit. If
the JLink is disconnected, it will exit. Additionally, if any exceptions
are raised, they will be caught and re-raised after interrupting the
main thread.
sys.stdout.write and sys.stdout.flush are used since target terminals
are expected to transmit newlines, which may or may not line up with the
arbitrarily-chosen 1024-byte buffer that this loop uses to read.
Args:
jlink (pylink.JLink): The JLink to read.
Raises:
Exception on error.
### Response:
def read_rtt(jlink):
"""Reads the JLink RTT buffer #0 at 10Hz and prints to stdout.
This method is a polling loop against the connected JLink unit. If
the JLink is disconnected, it will exit. Additionally, if any exceptions
are raised, they will be caught and re-raised after interrupting the
main thread.
sys.stdout.write and sys.stdout.flush are used since target terminals
are expected to transmit newlines, which may or may not line up with the
arbitrarily-chosen 1024-byte buffer that this loop uses to read.
Args:
jlink (pylink.JLink): The JLink to read.
Raises:
Exception on error.
"""
try:
while jlink.connected():
terminal_bytes = jlink.rtt_read(0, 1024)
if terminal_bytes:
sys.stdout.write("".join(map(chr, terminal_bytes)))
sys.stdout.flush()
time.sleep(0.1)
except Exception:
print("IO read thread exception, exiting...")
thread.interrupt_main()
raise |
def set_reps(self, reps):
"""set the number of repetitions for the stimuli (reference tone and cal stim)
:param reps: number of times to present the same stimulus
:type reps: int
"""
self.stimulus.setRepCount(reps)
self.refstim.setRepCount(reps) | set the number of repetitions for the stimuli (reference tone and cal stim)
:param reps: number of times to present the same stimulus
:type reps: int | Below is the the instruction that describes the task:
### Input:
set the number of repetitions for the stimuli (reference tone and cal stim)
:param reps: number of times to present the same stimulus
:type reps: int
### Response:
def set_reps(self, reps):
"""set the number of repetitions for the stimuli (reference tone and cal stim)
:param reps: number of times to present the same stimulus
:type reps: int
"""
self.stimulus.setRepCount(reps)
self.refstim.setRepCount(reps) |
def git_clone_job_list(job_list):
"""Deal with all git clone jobs in $job_list."""
queue = Queue()
for job in job_list:
queue.put(job)
if len(job_list) < 20:
thread_num = len(job_list)
else:
thread_num = 20
threads = []
for _ in range(thread_num):
thread = Thread(target=git_clone_worker, args=(queue, ))
thread.start()
threads.append(thread)
queue.join()
for _ in range(thread_num):
queue.put(None)
for thread in threads:
thread.join() | Deal with all git clone jobs in $job_list. | Below is the the instruction that describes the task:
### Input:
Deal with all git clone jobs in $job_list.
### Response:
def git_clone_job_list(job_list):
"""Deal with all git clone jobs in $job_list."""
queue = Queue()
for job in job_list:
queue.put(job)
if len(job_list) < 20:
thread_num = len(job_list)
else:
thread_num = 20
threads = []
for _ in range(thread_num):
thread = Thread(target=git_clone_worker, args=(queue, ))
thread.start()
threads.append(thread)
queue.join()
for _ in range(thread_num):
queue.put(None)
for thread in threads:
thread.join() |
def xgroup_setid(self, stream, group_name, latest_id='$'):
"""Set the latest ID for a consumer group"""
fut = self.execute(b'XGROUP', b'SETID', stream, group_name, latest_id)
return wait_ok(fut) | Set the latest ID for a consumer group | Below is the the instruction that describes the task:
### Input:
Set the latest ID for a consumer group
### Response:
def xgroup_setid(self, stream, group_name, latest_id='$'):
"""Set the latest ID for a consumer group"""
fut = self.execute(b'XGROUP', b'SETID', stream, group_name, latest_id)
return wait_ok(fut) |
def dmsToDeg(sign, deg, min, sec):
"""Convert dec sign, degrees, minutes, seconds into a signed angle in
degrees."""
return sign * (deg + min * degPerDmsMin + sec * degPerDmsSec) | Convert dec sign, degrees, minutes, seconds into a signed angle in
degrees. | Below is the the instruction that describes the task:
### Input:
Convert dec sign, degrees, minutes, seconds into a signed angle in
degrees.
### Response:
def dmsToDeg(sign, deg, min, sec):
"""Convert dec sign, degrees, minutes, seconds into a signed angle in
degrees."""
return sign * (deg + min * degPerDmsMin + sec * degPerDmsSec) |
def mask_and_dates_for_term(self,
term,
root_mask_term,
workspace,
all_dates):
"""
Load mask and mask row labels for term.
Parameters
----------
term : Term
The term to load the mask and labels for.
root_mask_term : Term
The term that represents the root asset exists mask.
workspace : dict[Term, any]
The values that have been computed for each term.
all_dates : pd.DatetimeIndex
All of the dates that are being computed for in the pipeline.
Returns
-------
mask : np.ndarray
The correct mask for this term.
dates : np.ndarray
The slice of dates for this term.
"""
mask = term.mask
mask_offset = self.extra_rows[mask] - self.extra_rows[term]
# This offset is computed against root_mask_term because that is what
# determines the shape of the top-level dates array.
dates_offset = (
self.extra_rows[root_mask_term] - self.extra_rows[term]
)
return workspace[mask][mask_offset:], all_dates[dates_offset:] | Load mask and mask row labels for term.
Parameters
----------
term : Term
The term to load the mask and labels for.
root_mask_term : Term
The term that represents the root asset exists mask.
workspace : dict[Term, any]
The values that have been computed for each term.
all_dates : pd.DatetimeIndex
All of the dates that are being computed for in the pipeline.
Returns
-------
mask : np.ndarray
The correct mask for this term.
dates : np.ndarray
The slice of dates for this term. | Below is the the instruction that describes the task:
### Input:
Load mask and mask row labels for term.
Parameters
----------
term : Term
The term to load the mask and labels for.
root_mask_term : Term
The term that represents the root asset exists mask.
workspace : dict[Term, any]
The values that have been computed for each term.
all_dates : pd.DatetimeIndex
All of the dates that are being computed for in the pipeline.
Returns
-------
mask : np.ndarray
The correct mask for this term.
dates : np.ndarray
The slice of dates for this term.
### Response:
def mask_and_dates_for_term(self,
term,
root_mask_term,
workspace,
all_dates):
"""
Load mask and mask row labels for term.
Parameters
----------
term : Term
The term to load the mask and labels for.
root_mask_term : Term
The term that represents the root asset exists mask.
workspace : dict[Term, any]
The values that have been computed for each term.
all_dates : pd.DatetimeIndex
All of the dates that are being computed for in the pipeline.
Returns
-------
mask : np.ndarray
The correct mask for this term.
dates : np.ndarray
The slice of dates for this term.
"""
mask = term.mask
mask_offset = self.extra_rows[mask] - self.extra_rows[term]
# This offset is computed against root_mask_term because that is what
# determines the shape of the top-level dates array.
dates_offset = (
self.extra_rows[root_mask_term] - self.extra_rows[term]
)
return workspace[mask][mask_offset:], all_dates[dates_offset:] |
def get_timestamp(self, cycle=None, dataset_number=None,
in_minutes=False, full=True):
"""Returns timestamps (in sec or minutes (if in_minutes==True)).
Args:
cycle: cycle number (all if None)
dataset_number: first dataset if None
in_minutes: return values in minutes instead of seconds if True
full: valid only for cycle=None (i.e. all cycles), returns the full
pandas.Series if True, else a list of pandas.Series
Returns:
pandas.Series (or list of pandas.Series if cycle=None og full=False)
"""
dataset_number = self._validate_dataset_number(dataset_number)
if dataset_number is None:
self._report_empty_dataset()
return
cycle_index_header = self.headers_normal.cycle_index_txt
timestamp_header = self.headers_normal.test_time_txt
v = pd.Series()
test = self.datasets[dataset_number].dfdata
if cycle:
c = test[(test[cycle_index_header] == cycle)]
if not self.is_empty(c):
v = c[timestamp_header]
else:
if not full:
self.logger.debug("getting timestapm for all cycles")
v = []
no_cycles = np.amax(test[cycle_index_header])
for j in range(1, no_cycles + 1):
txt = "Cycle %i: " % j
self.logger.debug(txt)
c = test[(test[cycle_index_header] == j)]
v.append(c[timestamp_header])
else:
self.logger.debug("returning full timestamp col")
v = test[timestamp_header]
if in_minutes and v is not None:
v /= 60.0
if in_minutes and v is not None:
v /= 60.0
return v | Returns timestamps (in sec or minutes (if in_minutes==True)).
Args:
cycle: cycle number (all if None)
dataset_number: first dataset if None
in_minutes: return values in minutes instead of seconds if True
full: valid only for cycle=None (i.e. all cycles), returns the full
pandas.Series if True, else a list of pandas.Series
Returns:
pandas.Series (or list of pandas.Series if cycle=None og full=False) | Below is the the instruction that describes the task:
### Input:
Returns timestamps (in sec or minutes (if in_minutes==True)).
Args:
cycle: cycle number (all if None)
dataset_number: first dataset if None
in_minutes: return values in minutes instead of seconds if True
full: valid only for cycle=None (i.e. all cycles), returns the full
pandas.Series if True, else a list of pandas.Series
Returns:
pandas.Series (or list of pandas.Series if cycle=None og full=False)
### Response:
def get_timestamp(self, cycle=None, dataset_number=None,
in_minutes=False, full=True):
"""Returns timestamps (in sec or minutes (if in_minutes==True)).
Args:
cycle: cycle number (all if None)
dataset_number: first dataset if None
in_minutes: return values in minutes instead of seconds if True
full: valid only for cycle=None (i.e. all cycles), returns the full
pandas.Series if True, else a list of pandas.Series
Returns:
pandas.Series (or list of pandas.Series if cycle=None og full=False)
"""
dataset_number = self._validate_dataset_number(dataset_number)
if dataset_number is None:
self._report_empty_dataset()
return
cycle_index_header = self.headers_normal.cycle_index_txt
timestamp_header = self.headers_normal.test_time_txt
v = pd.Series()
test = self.datasets[dataset_number].dfdata
if cycle:
c = test[(test[cycle_index_header] == cycle)]
if not self.is_empty(c):
v = c[timestamp_header]
else:
if not full:
self.logger.debug("getting timestapm for all cycles")
v = []
no_cycles = np.amax(test[cycle_index_header])
for j in range(1, no_cycles + 1):
txt = "Cycle %i: " % j
self.logger.debug(txt)
c = test[(test[cycle_index_header] == j)]
v.append(c[timestamp_header])
else:
self.logger.debug("returning full timestamp col")
v = test[timestamp_header]
if in_minutes and v is not None:
v /= 60.0
if in_minutes and v is not None:
v /= 60.0
return v |
def draw_line(self, img, pixmapper, pt1, pt2, colour, linewidth):
'''draw a line on the image'''
pix1 = pixmapper(pt1)
pix2 = pixmapper(pt2)
(width, height) = image_shape(img)
(ret, pix1, pix2) = cv2.clipLine((0, 0, width, height), pix1, pix2)
if ret is False:
return
cv2.line(img, pix1, pix2, colour, linewidth)
cv2.circle(img, pix2, linewidth*2, colour) | draw a line on the image | Below is the the instruction that describes the task:
### Input:
draw a line on the image
### Response:
def draw_line(self, img, pixmapper, pt1, pt2, colour, linewidth):
'''draw a line on the image'''
pix1 = pixmapper(pt1)
pix2 = pixmapper(pt2)
(width, height) = image_shape(img)
(ret, pix1, pix2) = cv2.clipLine((0, 0, width, height), pix1, pix2)
if ret is False:
return
cv2.line(img, pix1, pix2, colour, linewidth)
cv2.circle(img, pix2, linewidth*2, colour) |
def decode(self, value):
"""Decode value."""
if self.encoding:
value = value.decode(self.encoding)
return self.deserialize(value) | Decode value. | Below is the the instruction that describes the task:
### Input:
Decode value.
### Response:
def decode(self, value):
"""Decode value."""
if self.encoding:
value = value.decode(self.encoding)
return self.deserialize(value) |
def parse(readDataInstance):
"""
Returns a new L{ImageBoundImportDescriptorEntry} object.
@type readDataInstance: L{ReadData}
@param readDataInstance: A L{ReadData} object containing data to create a new L{ImageBoundImportDescriptorEntry}.
@rtype: L{ImageBoundImportDescriptorEntry}
@return: A new {ImageBoundImportDescriptorEntry} object.
"""
boundEntry = ImageBoundImportDescriptorEntry()
boundEntry.timeDateStamp.value = readDataInstance.readDword()
boundEntry.offsetModuleName.value = readDataInstance.readWord()
boundEntry.numberOfModuleForwarderRefs.value = readDataInstance.readWord()
numberOfForwarderRefsEntries = boundEntry.numberOfModuleForwarderRefs .value
if numberOfForwarderRefsEntries:
bytesToRead = numberOfForwarderRefsEntries * ImageBoundForwarderRefEntry().sizeof()
rd = utils.ReadData(readDataInstance.read(bytesToRead))
boundEntry.forwarderRefsList = ImageBoundForwarderRef.parse(rd, numberOfForwarderRefsEntries)
return boundEntry | Returns a new L{ImageBoundImportDescriptorEntry} object.
@type readDataInstance: L{ReadData}
@param readDataInstance: A L{ReadData} object containing data to create a new L{ImageBoundImportDescriptorEntry}.
@rtype: L{ImageBoundImportDescriptorEntry}
@return: A new {ImageBoundImportDescriptorEntry} object. | Below is the the instruction that describes the task:
### Input:
Returns a new L{ImageBoundImportDescriptorEntry} object.
@type readDataInstance: L{ReadData}
@param readDataInstance: A L{ReadData} object containing data to create a new L{ImageBoundImportDescriptorEntry}.
@rtype: L{ImageBoundImportDescriptorEntry}
@return: A new {ImageBoundImportDescriptorEntry} object.
### Response:
def parse(readDataInstance):
"""
Returns a new L{ImageBoundImportDescriptorEntry} object.
@type readDataInstance: L{ReadData}
@param readDataInstance: A L{ReadData} object containing data to create a new L{ImageBoundImportDescriptorEntry}.
@rtype: L{ImageBoundImportDescriptorEntry}
@return: A new {ImageBoundImportDescriptorEntry} object.
"""
boundEntry = ImageBoundImportDescriptorEntry()
boundEntry.timeDateStamp.value = readDataInstance.readDword()
boundEntry.offsetModuleName.value = readDataInstance.readWord()
boundEntry.numberOfModuleForwarderRefs.value = readDataInstance.readWord()
numberOfForwarderRefsEntries = boundEntry.numberOfModuleForwarderRefs .value
if numberOfForwarderRefsEntries:
bytesToRead = numberOfForwarderRefsEntries * ImageBoundForwarderRefEntry().sizeof()
rd = utils.ReadData(readDataInstance.read(bytesToRead))
boundEntry.forwarderRefsList = ImageBoundForwarderRef.parse(rd, numberOfForwarderRefsEntries)
return boundEntry |
def _federation_indicators(catalog, central_catalog,
identifier_search=False):
"""Cuenta la cantidad de datasets incluídos tanto en la lista
'catalogs' como en el catálogo central, y genera indicadores a partir
de esa información.
Args:
catalog (dict): catálogo ya parseado
central_catalog (str o dict): ruta a catálogo central, o un dict
con el catálogo ya parseado
"""
result = {
'datasets_federados_cant': None,
'datasets_federados_pct': None,
'datasets_no_federados_cant': None,
'datasets_federados_eliminados_cant': None,
'distribuciones_federadas_cant': None,
'datasets_federados_eliminados': [],
'datasets_no_federados': [],
'datasets_federados': [],
}
try:
central_catalog = readers.read_catalog(central_catalog)
except Exception as e:
msg = u'Error leyendo el catálogo central: {}'.format(str(e))
logger.error(msg)
return result
generator = FederationIndicatorsGenerator(central_catalog, catalog,
id_based=identifier_search)
result.update({
'datasets_federados_cant':
generator.datasets_federados_cant(),
'datasets_no_federados_cant':
generator.datasets_no_federados_cant(),
'datasets_federados_eliminados_cant':
generator.datasets_federados_eliminados_cant(),
'datasets_federados_eliminados':
generator.datasets_federados_eliminados(),
'datasets_no_federados':
generator.datasets_no_federados(),
'datasets_federados':
generator.datasets_federados(),
'datasets_federados_pct':
generator.datasets_federados_pct(),
'distribuciones_federadas_cant':
generator.distribuciones_federadas_cant()
})
return result | Cuenta la cantidad de datasets incluídos tanto en la lista
'catalogs' como en el catálogo central, y genera indicadores a partir
de esa información.
Args:
catalog (dict): catálogo ya parseado
central_catalog (str o dict): ruta a catálogo central, o un dict
con el catálogo ya parseado | Below is the the instruction that describes the task:
### Input:
Cuenta la cantidad de datasets incluídos tanto en la lista
'catalogs' como en el catálogo central, y genera indicadores a partir
de esa información.
Args:
catalog (dict): catálogo ya parseado
central_catalog (str o dict): ruta a catálogo central, o un dict
con el catálogo ya parseado
### Response:
def _federation_indicators(catalog, central_catalog,
identifier_search=False):
"""Cuenta la cantidad de datasets incluídos tanto en la lista
'catalogs' como en el catálogo central, y genera indicadores a partir
de esa información.
Args:
catalog (dict): catálogo ya parseado
central_catalog (str o dict): ruta a catálogo central, o un dict
con el catálogo ya parseado
"""
result = {
'datasets_federados_cant': None,
'datasets_federados_pct': None,
'datasets_no_federados_cant': None,
'datasets_federados_eliminados_cant': None,
'distribuciones_federadas_cant': None,
'datasets_federados_eliminados': [],
'datasets_no_federados': [],
'datasets_federados': [],
}
try:
central_catalog = readers.read_catalog(central_catalog)
except Exception as e:
msg = u'Error leyendo el catálogo central: {}'.format(str(e))
logger.error(msg)
return result
generator = FederationIndicatorsGenerator(central_catalog, catalog,
id_based=identifier_search)
result.update({
'datasets_federados_cant':
generator.datasets_federados_cant(),
'datasets_no_federados_cant':
generator.datasets_no_federados_cant(),
'datasets_federados_eliminados_cant':
generator.datasets_federados_eliminados_cant(),
'datasets_federados_eliminados':
generator.datasets_federados_eliminados(),
'datasets_no_federados':
generator.datasets_no_federados(),
'datasets_federados':
generator.datasets_federados(),
'datasets_federados_pct':
generator.datasets_federados_pct(),
'distribuciones_federadas_cant':
generator.distribuciones_federadas_cant()
})
return result |
def parseExtensionArgs(self, args, strict=False):
"""Parse the provider authentication policy arguments into the
internal state of this object
@param args: unqualified provider authentication policy
arguments
@param strict: Whether to raise an exception when bad data is
encountered
@returns: None. The data is parsed into the internal fields of
this object.
"""
policies_str = args.get('auth_policies')
if policies_str and policies_str != 'none':
self.auth_policies = policies_str.split(' ')
nist_level_str = args.get('nist_auth_level')
if nist_level_str:
try:
nist_level = int(nist_level_str)
except ValueError:
if strict:
raise ValueError('nist_auth_level must be an integer between '
'zero and four, inclusive')
else:
self.nist_auth_level = None
else:
if 0 <= nist_level < 5:
self.nist_auth_level = nist_level
auth_time = args.get('auth_time')
if auth_time:
if TIME_VALIDATOR.match(auth_time):
self.auth_time = auth_time
elif strict:
raise ValueError("auth_time must be in RFC3339 format") | Parse the provider authentication policy arguments into the
internal state of this object
@param args: unqualified provider authentication policy
arguments
@param strict: Whether to raise an exception when bad data is
encountered
@returns: None. The data is parsed into the internal fields of
this object. | Below is the the instruction that describes the task:
### Input:
Parse the provider authentication policy arguments into the
internal state of this object
@param args: unqualified provider authentication policy
arguments
@param strict: Whether to raise an exception when bad data is
encountered
@returns: None. The data is parsed into the internal fields of
this object.
### Response:
def parseExtensionArgs(self, args, strict=False):
"""Parse the provider authentication policy arguments into the
internal state of this object
@param args: unqualified provider authentication policy
arguments
@param strict: Whether to raise an exception when bad data is
encountered
@returns: None. The data is parsed into the internal fields of
this object.
"""
policies_str = args.get('auth_policies')
if policies_str and policies_str != 'none':
self.auth_policies = policies_str.split(' ')
nist_level_str = args.get('nist_auth_level')
if nist_level_str:
try:
nist_level = int(nist_level_str)
except ValueError:
if strict:
raise ValueError('nist_auth_level must be an integer between '
'zero and four, inclusive')
else:
self.nist_auth_level = None
else:
if 0 <= nist_level < 5:
self.nist_auth_level = nist_level
auth_time = args.get('auth_time')
if auth_time:
if TIME_VALIDATOR.match(auth_time):
self.auth_time = auth_time
elif strict:
raise ValueError("auth_time must be in RFC3339 format") |
def step(self, actions):
"""Makes a step in all environments.
Does any preprocessing and records frames.
Args:
actions: Batch of actions.
Returns:
(obs, rewards, dones) - batches of observations, rewards and done flags
respectively.
Raises:
ValueError: when the data for current epoch has already been loaded.
"""
if self._store_rollouts and \
self._rollouts_by_epoch_and_split[self.current_epoch]:
raise ValueError(
"Data for current epoch has already been loaded from disk."
)
(obs, unclipped_rewards, dones) = self._step(actions)
obs = self._preprocess_observations(obs)
(min_reward, max_reward) = self.reward_range
rewards = np.around(np.clip(unclipped_rewards, min_reward, max_reward))
if self._store_rollouts:
unclipped_rewards = unclipped_rewards.astype(np.float64)
encoded_obs = self._encode_observations(obs)
for (rollout, frame, action) in zip(
self._current_batch_rollouts, self._current_batch_frames, actions
):
rollout.append(frame._replace(action=action))
# orud = (observation, reward, unclipped_reward, done)
self._current_batch_frames = [
Frame(*orud, action=None)
for orud in zip(encoded_obs, rewards, unclipped_rewards, dones)
]
return (obs, rewards, dones) | Makes a step in all environments.
Does any preprocessing and records frames.
Args:
actions: Batch of actions.
Returns:
(obs, rewards, dones) - batches of observations, rewards and done flags
respectively.
Raises:
ValueError: when the data for current epoch has already been loaded. | Below is the the instruction that describes the task:
### Input:
Makes a step in all environments.
Does any preprocessing and records frames.
Args:
actions: Batch of actions.
Returns:
(obs, rewards, dones) - batches of observations, rewards and done flags
respectively.
Raises:
ValueError: when the data for current epoch has already been loaded.
### Response:
def step(self, actions):
"""Makes a step in all environments.
Does any preprocessing and records frames.
Args:
actions: Batch of actions.
Returns:
(obs, rewards, dones) - batches of observations, rewards and done flags
respectively.
Raises:
ValueError: when the data for current epoch has already been loaded.
"""
if self._store_rollouts and \
self._rollouts_by_epoch_and_split[self.current_epoch]:
raise ValueError(
"Data for current epoch has already been loaded from disk."
)
(obs, unclipped_rewards, dones) = self._step(actions)
obs = self._preprocess_observations(obs)
(min_reward, max_reward) = self.reward_range
rewards = np.around(np.clip(unclipped_rewards, min_reward, max_reward))
if self._store_rollouts:
unclipped_rewards = unclipped_rewards.astype(np.float64)
encoded_obs = self._encode_observations(obs)
for (rollout, frame, action) in zip(
self._current_batch_rollouts, self._current_batch_frames, actions
):
rollout.append(frame._replace(action=action))
# orud = (observation, reward, unclipped_reward, done)
self._current_batch_frames = [
Frame(*orud, action=None)
for orud in zip(encoded_obs, rewards, unclipped_rewards, dones)
]
return (obs, rewards, dones) |
def make(ctx, check, version, initial_release, skip_sign, sign_only):
"""Perform a set of operations needed to release a single check:
\b
* update the version in __about__.py
* update the changelog
* update the requirements-agent-release.txt file
* update in-toto metadata
* commit the above changes
You can release everything at once by setting the check to `all`.
\b
If you run into issues signing:
\b
- Ensure you did `gpg --import <YOUR_KEY_ID>.gpg.pub`
"""
# Import lazily since in-toto runs a subprocess to check for gpg2 on load
from ..signing import update_link_metadata, YubikeyException
releasing_all = check == 'all'
valid_checks = get_valid_checks()
if not releasing_all and check not in valid_checks:
abort('Check `{}` is not an Agent-based Integration'.format(check))
# don't run the task on the master branch
if get_current_branch() == 'master':
abort('This task will commit, you do not want to add commits to master directly')
if releasing_all:
if version:
abort('You cannot bump every check to the same version')
checks = sorted(valid_checks)
else:
checks = [check]
if initial_release:
version = '1.0.0'
for check in checks:
if sign_only:
break
elif initial_release and check in BETA_PACKAGES:
continue
# Initial releases will only bump if not already 1.0.0 so no need to always output
if not initial_release:
echo_success('Check `{}`'.format(check))
if version:
# sanity check on the version provided
cur_version = get_version_string(check)
if version == 'final':
# Remove any pre-release metadata
version = finalize_version(cur_version)
else:
# Keep track of intermediate version bumps
prev_version = cur_version
for method in version.split(','):
# Apply any supported version bumping methods. Chaining is required for going
# from mainline releases to development releases since e.g. x.y.z > x.y.z-rc.A.
# So for an initial bug fix dev release you can do `fix,rc`.
if method in VERSION_BUMP:
version = VERSION_BUMP[method](prev_version)
prev_version = version
p_version = parse_version_info(version)
p_current = parse_version_info(cur_version)
if p_version <= p_current:
if initial_release:
continue
else:
abort('Current version is {}, cannot bump to {}'.format(cur_version, version))
else:
cur_version, changelog_types = ctx.invoke(changes, check=check, dry_run=True)
if not changelog_types:
echo_warning('No changes for {}, skipping...'.format(check))
continue
bump_function = get_bump_function(changelog_types)
version = bump_function(cur_version)
if initial_release:
echo_success('Check `{}`'.format(check))
# update the version number
echo_info('Current version of check {}: {}'.format(check, cur_version))
echo_waiting('Bumping to {}... '.format(version), nl=False)
update_version_module(check, cur_version, version)
echo_success('success!')
# update the CHANGELOG
echo_waiting('Updating the changelog... ', nl=False)
# TODO: Avoid double GitHub API calls when bumping all checks at once
ctx.invoke(
changelog,
check=check,
version=version,
old_version=cur_version,
initial=initial_release,
quiet=True,
dry_run=False,
)
echo_success('success!')
commit_targets = [check]
# update the list of integrations to be shipped with the Agent
if check not in NOT_CHECKS:
req_file = get_agent_release_requirements()
commit_targets.append(os.path.basename(req_file))
echo_waiting('Updating the Agent requirements file... ', nl=False)
update_agent_requirements(req_file, check, get_agent_requirement_line(check, version))
echo_success('success!')
echo_waiting('Committing files...')
# commit the changes.
# do not use [ci skip] so releases get built https://docs.gitlab.com/ee/ci/yaml/#skipping-jobs
msg = '[Release] Bumped {} version to {}'.format(check, version)
git_commit(commit_targets, msg)
if not initial_release:
# Reset version
version = None
if sign_only or not skip_sign:
echo_waiting('Updating release metadata...')
echo_info('Please touch your Yubikey immediately after entering your PIN!')
try:
commit_targets = update_link_metadata(checks)
git_commit(commit_targets, '[Release] Update metadata', force=True)
except YubikeyException as e:
abort('A problem occurred while signing metadata: {}'.format(e))
# done
echo_success('All done, remember to push to origin and open a PR to merge these changes on master') | Perform a set of operations needed to release a single check:
\b
* update the version in __about__.py
* update the changelog
* update the requirements-agent-release.txt file
* update in-toto metadata
* commit the above changes
You can release everything at once by setting the check to `all`.
\b
If you run into issues signing:
\b
- Ensure you did `gpg --import <YOUR_KEY_ID>.gpg.pub` | Below is the the instruction that describes the task:
### Input:
Perform a set of operations needed to release a single check:
\b
* update the version in __about__.py
* update the changelog
* update the requirements-agent-release.txt file
* update in-toto metadata
* commit the above changes
You can release everything at once by setting the check to `all`.
\b
If you run into issues signing:
\b
- Ensure you did `gpg --import <YOUR_KEY_ID>.gpg.pub`
### Response:
def make(ctx, check, version, initial_release, skip_sign, sign_only):
"""Perform a set of operations needed to release a single check:
\b
* update the version in __about__.py
* update the changelog
* update the requirements-agent-release.txt file
* update in-toto metadata
* commit the above changes
You can release everything at once by setting the check to `all`.
\b
If you run into issues signing:
\b
- Ensure you did `gpg --import <YOUR_KEY_ID>.gpg.pub`
"""
# Import lazily since in-toto runs a subprocess to check for gpg2 on load
from ..signing import update_link_metadata, YubikeyException
releasing_all = check == 'all'
valid_checks = get_valid_checks()
if not releasing_all and check not in valid_checks:
abort('Check `{}` is not an Agent-based Integration'.format(check))
# don't run the task on the master branch
if get_current_branch() == 'master':
abort('This task will commit, you do not want to add commits to master directly')
if releasing_all:
if version:
abort('You cannot bump every check to the same version')
checks = sorted(valid_checks)
else:
checks = [check]
if initial_release:
version = '1.0.0'
for check in checks:
if sign_only:
break
elif initial_release and check in BETA_PACKAGES:
continue
# Initial releases will only bump if not already 1.0.0 so no need to always output
if not initial_release:
echo_success('Check `{}`'.format(check))
if version:
# sanity check on the version provided
cur_version = get_version_string(check)
if version == 'final':
# Remove any pre-release metadata
version = finalize_version(cur_version)
else:
# Keep track of intermediate version bumps
prev_version = cur_version
for method in version.split(','):
# Apply any supported version bumping methods. Chaining is required for going
# from mainline releases to development releases since e.g. x.y.z > x.y.z-rc.A.
# So for an initial bug fix dev release you can do `fix,rc`.
if method in VERSION_BUMP:
version = VERSION_BUMP[method](prev_version)
prev_version = version
p_version = parse_version_info(version)
p_current = parse_version_info(cur_version)
if p_version <= p_current:
if initial_release:
continue
else:
abort('Current version is {}, cannot bump to {}'.format(cur_version, version))
else:
cur_version, changelog_types = ctx.invoke(changes, check=check, dry_run=True)
if not changelog_types:
echo_warning('No changes for {}, skipping...'.format(check))
continue
bump_function = get_bump_function(changelog_types)
version = bump_function(cur_version)
if initial_release:
echo_success('Check `{}`'.format(check))
# update the version number
echo_info('Current version of check {}: {}'.format(check, cur_version))
echo_waiting('Bumping to {}... '.format(version), nl=False)
update_version_module(check, cur_version, version)
echo_success('success!')
# update the CHANGELOG
echo_waiting('Updating the changelog... ', nl=False)
# TODO: Avoid double GitHub API calls when bumping all checks at once
ctx.invoke(
changelog,
check=check,
version=version,
old_version=cur_version,
initial=initial_release,
quiet=True,
dry_run=False,
)
echo_success('success!')
commit_targets = [check]
# update the list of integrations to be shipped with the Agent
if check not in NOT_CHECKS:
req_file = get_agent_release_requirements()
commit_targets.append(os.path.basename(req_file))
echo_waiting('Updating the Agent requirements file... ', nl=False)
update_agent_requirements(req_file, check, get_agent_requirement_line(check, version))
echo_success('success!')
echo_waiting('Committing files...')
# commit the changes.
# do not use [ci skip] so releases get built https://docs.gitlab.com/ee/ci/yaml/#skipping-jobs
msg = '[Release] Bumped {} version to {}'.format(check, version)
git_commit(commit_targets, msg)
if not initial_release:
# Reset version
version = None
if sign_only or not skip_sign:
echo_waiting('Updating release metadata...')
echo_info('Please touch your Yubikey immediately after entering your PIN!')
try:
commit_targets = update_link_metadata(checks)
git_commit(commit_targets, '[Release] Update metadata', force=True)
except YubikeyException as e:
abort('A problem occurred while signing metadata: {}'.format(e))
# done
echo_success('All done, remember to push to origin and open a PR to merge these changes on master') |
def cast(self, dtype):
"""Cast this Block to use another data type.
Parameters
----------
dtype : str or numpy.dtype
The new data type.
"""
for child in self._children.values():
child.cast(dtype)
for _, param in self.params.items():
param.cast(dtype) | Cast this Block to use another data type.
Parameters
----------
dtype : str or numpy.dtype
The new data type. | Below is the the instruction that describes the task:
### Input:
Cast this Block to use another data type.
Parameters
----------
dtype : str or numpy.dtype
The new data type.
### Response:
def cast(self, dtype):
"""Cast this Block to use another data type.
Parameters
----------
dtype : str or numpy.dtype
The new data type.
"""
for child in self._children.values():
child.cast(dtype)
for _, param in self.params.items():
param.cast(dtype) |
def radianceSpectrum(Omegas,AbsorptionCoefficient,Environment={'l':100.,'T':296.},
File=None, Format='%e %e', Wavenumber=None):
"""
INPUT PARAMETERS:
Wavenumber/Omegas: wavenumber grid (required)
AbsorptionCoefficient: absorption coefficient on grid (required)
Environment: dictionary containing path length in cm.
and temperature in Kelvin.
Default={'l':100.,'T':296.}
File: name of the output file (optional)
Format: c format used in file output, default '%e %e' (optional)
OUTPUT PARAMETERS:
Wavenum: wavenumber grid
Xsect: radiance spectrum calculated on the grid
---
DESCRIPTION:
Calculate a radiance spectrum (in W/sr/cm^2/cm-1) based
on previously calculated absorption coefficient.
Radiance spectrum is calculated at an arbitrary
optical path length 'l' (1 m by default) and
temperature 'T' (296 K by default). For obtaining a
physically meaningful result 'T' must be the same
as a temperature which was used in absorption coefficient.
---
EXAMPLE OF USAGE:
nu,radi = radianceSpectrum(nu,coef)
---
"""
# compatibility with older versions
if Wavenumber: Omegas=Wavenumber
l = Environment['l']
T = Environment['T']
Alw = 1-exp(-AbsorptionCoefficient*l)
LBBTw = 2*hh*cc**2*Omegas**3 / (exp(hh*cc*Omegas/(cBolts*T)) - 1) * 1.0E-7
Xsect = Alw*LBBTw # W/sr/cm**2/cm**-1
if File: save_to_file(File,Format,Omegas,Xsect)
return Omegas,Xsect | INPUT PARAMETERS:
Wavenumber/Omegas: wavenumber grid (required)
AbsorptionCoefficient: absorption coefficient on grid (required)
Environment: dictionary containing path length in cm.
and temperature in Kelvin.
Default={'l':100.,'T':296.}
File: name of the output file (optional)
Format: c format used in file output, default '%e %e' (optional)
OUTPUT PARAMETERS:
Wavenum: wavenumber grid
Xsect: radiance spectrum calculated on the grid
---
DESCRIPTION:
Calculate a radiance spectrum (in W/sr/cm^2/cm-1) based
on previously calculated absorption coefficient.
Radiance spectrum is calculated at an arbitrary
optical path length 'l' (1 m by default) and
temperature 'T' (296 K by default). For obtaining a
physically meaningful result 'T' must be the same
as a temperature which was used in absorption coefficient.
---
EXAMPLE OF USAGE:
nu,radi = radianceSpectrum(nu,coef)
--- | Below is the the instruction that describes the task:
### Input:
INPUT PARAMETERS:
Wavenumber/Omegas: wavenumber grid (required)
AbsorptionCoefficient: absorption coefficient on grid (required)
Environment: dictionary containing path length in cm.
and temperature in Kelvin.
Default={'l':100.,'T':296.}
File: name of the output file (optional)
Format: c format used in file output, default '%e %e' (optional)
OUTPUT PARAMETERS:
Wavenum: wavenumber grid
Xsect: radiance spectrum calculated on the grid
---
DESCRIPTION:
Calculate a radiance spectrum (in W/sr/cm^2/cm-1) based
on previously calculated absorption coefficient.
Radiance spectrum is calculated at an arbitrary
optical path length 'l' (1 m by default) and
temperature 'T' (296 K by default). For obtaining a
physically meaningful result 'T' must be the same
as a temperature which was used in absorption coefficient.
---
EXAMPLE OF USAGE:
nu,radi = radianceSpectrum(nu,coef)
---
### Response:
def radianceSpectrum(Omegas,AbsorptionCoefficient,Environment={'l':100.,'T':296.},
File=None, Format='%e %e', Wavenumber=None):
"""
INPUT PARAMETERS:
Wavenumber/Omegas: wavenumber grid (required)
AbsorptionCoefficient: absorption coefficient on grid (required)
Environment: dictionary containing path length in cm.
and temperature in Kelvin.
Default={'l':100.,'T':296.}
File: name of the output file (optional)
Format: c format used in file output, default '%e %e' (optional)
OUTPUT PARAMETERS:
Wavenum: wavenumber grid
Xsect: radiance spectrum calculated on the grid
---
DESCRIPTION:
Calculate a radiance spectrum (in W/sr/cm^2/cm-1) based
on previously calculated absorption coefficient.
Radiance spectrum is calculated at an arbitrary
optical path length 'l' (1 m by default) and
temperature 'T' (296 K by default). For obtaining a
physically meaningful result 'T' must be the same
as a temperature which was used in absorption coefficient.
---
EXAMPLE OF USAGE:
nu,radi = radianceSpectrum(nu,coef)
---
"""
# compatibility with older versions
if Wavenumber: Omegas=Wavenumber
l = Environment['l']
T = Environment['T']
Alw = 1-exp(-AbsorptionCoefficient*l)
LBBTw = 2*hh*cc**2*Omegas**3 / (exp(hh*cc*Omegas/(cBolts*T)) - 1) * 1.0E-7
Xsect = Alw*LBBTw # W/sr/cm**2/cm**-1
if File: save_to_file(File,Format,Omegas,Xsect)
return Omegas,Xsect |
def get_app_model_voice(self, app_model_item):
""" App Model voice
Returns the js menu compatible voice dict if the user
can see it, None otherwise
"""
if app_model_item.get('name', None) is None:
raise ImproperlyConfigured('Model menu voices must have a name key') # noqa
if app_model_item.get('app', None) is None:
raise ImproperlyConfigured('Model menu voices must have an app key') # noqa
return self.get_model_voice(app_model_item.get('app'), app_model_item) | App Model voice
Returns the js menu compatible voice dict if the user
can see it, None otherwise | Below is the the instruction that describes the task:
### Input:
App Model voice
Returns the js menu compatible voice dict if the user
can see it, None otherwise
### Response:
def get_app_model_voice(self, app_model_item):
""" App Model voice
Returns the js menu compatible voice dict if the user
can see it, None otherwise
"""
if app_model_item.get('name', None) is None:
raise ImproperlyConfigured('Model menu voices must have a name key') # noqa
if app_model_item.get('app', None) is None:
raise ImproperlyConfigured('Model menu voices must have an app key') # noqa
return self.get_model_voice(app_model_item.get('app'), app_model_item) |
def create(self, *args, **kwargs):
"""
Allow an 'author' kwarg to automatically fill in the created_by and last_modified_by fields.
"""
if kwargs.has_key('author'):
kwargs['created_by'] = kwargs['author']
kwargs['last_modified_by'] = kwargs['author']
del kwargs['author']
return super(CMSPageManager, self).create(*args, **kwargs) | Allow an 'author' kwarg to automatically fill in the created_by and last_modified_by fields. | Below is the the instruction that describes the task:
### Input:
Allow an 'author' kwarg to automatically fill in the created_by and last_modified_by fields.
### Response:
def create(self, *args, **kwargs):
"""
Allow an 'author' kwarg to automatically fill in the created_by and last_modified_by fields.
"""
if kwargs.has_key('author'):
kwargs['created_by'] = kwargs['author']
kwargs['last_modified_by'] = kwargs['author']
del kwargs['author']
return super(CMSPageManager, self).create(*args, **kwargs) |
def parse(self, text):
"""Parse statement/expression to Ast representation."""
self.expr = text
try:
out = ast.parse(text)
except SyntaxError:
self.raise_exception(None, msg='Syntax Error', expr=text)
except:
self.raise_exception(None, msg='Runtime Error', expr=text)
return out | Parse statement/expression to Ast representation. | Below is the the instruction that describes the task:
### Input:
Parse statement/expression to Ast representation.
### Response:
def parse(self, text):
"""Parse statement/expression to Ast representation."""
self.expr = text
try:
out = ast.parse(text)
except SyntaxError:
self.raise_exception(None, msg='Syntax Error', expr=text)
except:
self.raise_exception(None, msg='Runtime Error', expr=text)
return out |
def start(self):
"""Use this method to start the Client after creating it.
Requires no parameters.
Raises:
:class:`RPCError <pyrogram.RPCError>` in case of a Telegram RPC error.
``ConnectionError`` in case you try to start an already started Client.
"""
if self.is_started:
raise ConnectionError("Client has already been started")
if self.BOT_TOKEN_RE.match(self.session_name):
self.is_bot = True
self.bot_token = self.session_name
self.session_name = self.session_name.split(":")[0]
log.warning('\nWARNING: You are using a bot token as session name!\n'
'This usage will be deprecated soon. Please use a session file name to load '
'an existing session and the bot_token argument to create new sessions.\n'
'More info: https://docs.pyrogram.ml/start/Setup#bot-authorization\n')
self.load_config()
self.load_session()
self.load_plugins()
self.session = Session(
self,
self.dc_id,
self.auth_key
)
self.session.start()
self.is_started = True
try:
if self.user_id is None:
if self.bot_token is None:
self.is_bot = False
self.authorize_user()
else:
self.is_bot = True
self.authorize_bot()
self.save_session()
if not self.is_bot:
if self.takeout:
self.takeout_id = self.send(functions.account.InitTakeoutSession()).id
log.warning("Takeout session {} initiated".format(self.takeout_id))
now = time.time()
if abs(now - self.date) > Client.OFFLINE_SLEEP:
self.peers_by_username = {}
self.peers_by_phone = {}
self.get_initial_dialogs()
self.get_contacts()
else:
self.send(functions.messages.GetPinnedDialogs())
self.get_initial_dialogs_chunk()
else:
self.send(functions.updates.GetState())
except Exception as e:
self.is_started = False
self.session.stop()
raise e
for i in range(self.UPDATES_WORKERS):
self.updates_workers_list.append(
Thread(
target=self.updates_worker,
name="UpdatesWorker#{}".format(i + 1)
)
)
self.updates_workers_list[-1].start()
for i in range(self.DOWNLOAD_WORKERS):
self.download_workers_list.append(
Thread(
target=self.download_worker,
name="DownloadWorker#{}".format(i + 1)
)
)
self.download_workers_list[-1].start()
self.dispatcher.start()
mimetypes.init()
Syncer.add(self)
return self | Use this method to start the Client after creating it.
Requires no parameters.
Raises:
:class:`RPCError <pyrogram.RPCError>` in case of a Telegram RPC error.
``ConnectionError`` in case you try to start an already started Client. | Below is the the instruction that describes the task:
### Input:
Use this method to start the Client after creating it.
Requires no parameters.
Raises:
:class:`RPCError <pyrogram.RPCError>` in case of a Telegram RPC error.
``ConnectionError`` in case you try to start an already started Client.
### Response:
def start(self):
"""Use this method to start the Client after creating it.
Requires no parameters.
Raises:
:class:`RPCError <pyrogram.RPCError>` in case of a Telegram RPC error.
``ConnectionError`` in case you try to start an already started Client.
"""
if self.is_started:
raise ConnectionError("Client has already been started")
if self.BOT_TOKEN_RE.match(self.session_name):
self.is_bot = True
self.bot_token = self.session_name
self.session_name = self.session_name.split(":")[0]
log.warning('\nWARNING: You are using a bot token as session name!\n'
'This usage will be deprecated soon. Please use a session file name to load '
'an existing session and the bot_token argument to create new sessions.\n'
'More info: https://docs.pyrogram.ml/start/Setup#bot-authorization\n')
self.load_config()
self.load_session()
self.load_plugins()
self.session = Session(
self,
self.dc_id,
self.auth_key
)
self.session.start()
self.is_started = True
try:
if self.user_id is None:
if self.bot_token is None:
self.is_bot = False
self.authorize_user()
else:
self.is_bot = True
self.authorize_bot()
self.save_session()
if not self.is_bot:
if self.takeout:
self.takeout_id = self.send(functions.account.InitTakeoutSession()).id
log.warning("Takeout session {} initiated".format(self.takeout_id))
now = time.time()
if abs(now - self.date) > Client.OFFLINE_SLEEP:
self.peers_by_username = {}
self.peers_by_phone = {}
self.get_initial_dialogs()
self.get_contacts()
else:
self.send(functions.messages.GetPinnedDialogs())
self.get_initial_dialogs_chunk()
else:
self.send(functions.updates.GetState())
except Exception as e:
self.is_started = False
self.session.stop()
raise e
for i in range(self.UPDATES_WORKERS):
self.updates_workers_list.append(
Thread(
target=self.updates_worker,
name="UpdatesWorker#{}".format(i + 1)
)
)
self.updates_workers_list[-1].start()
for i in range(self.DOWNLOAD_WORKERS):
self.download_workers_list.append(
Thread(
target=self.download_worker,
name="DownloadWorker#{}".format(i + 1)
)
)
self.download_workers_list[-1].start()
self.dispatcher.start()
mimetypes.init()
Syncer.add(self)
return self |
def note_update(self, note_id, coor_x=None, coor_y=None, width=None,
height=None, body=None):
"""Function to update a note (Requires login) (UNTESTED).
Parameters:
note_id (int): Where note_id is the note id.
coor_x (int): The x coordinates of the note in pixels,
with respect to the top-left corner of the image.
coor_y (int): The y coordinates of the note in pixels,
with respect to the top-left corner of the image.
width (int): The width of the note in pixels.
height (int): The height of the note in pixels.
body (str): The body of the note.
"""
params = {
'note[x]': coor_x,
'note[y]': coor_y,
'note[width]': width,
'note[height]': height,
'note[body]': body
}
return self._get('notes/{0}.jso'.format(note_id), params, method='PUT',
auth=True) | Function to update a note (Requires login) (UNTESTED).
Parameters:
note_id (int): Where note_id is the note id.
coor_x (int): The x coordinates of the note in pixels,
with respect to the top-left corner of the image.
coor_y (int): The y coordinates of the note in pixels,
with respect to the top-left corner of the image.
width (int): The width of the note in pixels.
height (int): The height of the note in pixels.
body (str): The body of the note. | Below is the the instruction that describes the task:
### Input:
Function to update a note (Requires login) (UNTESTED).
Parameters:
note_id (int): Where note_id is the note id.
coor_x (int): The x coordinates of the note in pixels,
with respect to the top-left corner of the image.
coor_y (int): The y coordinates of the note in pixels,
with respect to the top-left corner of the image.
width (int): The width of the note in pixels.
height (int): The height of the note in pixels.
body (str): The body of the note.
### Response:
def note_update(self, note_id, coor_x=None, coor_y=None, width=None,
height=None, body=None):
"""Function to update a note (Requires login) (UNTESTED).
Parameters:
note_id (int): Where note_id is the note id.
coor_x (int): The x coordinates of the note in pixels,
with respect to the top-left corner of the image.
coor_y (int): The y coordinates of the note in pixels,
with respect to the top-left corner of the image.
width (int): The width of the note in pixels.
height (int): The height of the note in pixels.
body (str): The body of the note.
"""
params = {
'note[x]': coor_x,
'note[y]': coor_y,
'note[width]': width,
'note[height]': height,
'note[body]': body
}
return self._get('notes/{0}.jso'.format(note_id), params, method='PUT',
auth=True) |
def set_pid():
"""
Explicitly ask the ipython kernel for its pid
"""
global pid
lines = '\n'.join(['import os', '_pid = os.getpid()'])
try:
msg_id = send(lines, silent=True, user_variables=['_pid'])
except TypeError: # change in IPython 3.0+
msg_id = send(lines, silent=True, user_expressions={'_pid':'_pid'})
# wait to get message back from kernel
try:
child = get_child_msg(msg_id)
except Empty:
echo("no reply from IPython kernel")
return
try:
pid = int(child['content']['user_variables']['_pid'])
except TypeError: # change in IPython 1.0.dev moved this out
pid = int(child['content']['user_variables']['_pid']['data']['text/plain'])
except KeyError: # change in IPython 3.0+
pid = int(
child['content']['user_expressions']['_pid']['data']['text/plain'])
except KeyError: # change in IPython 1.0.dev moved this out
echo("Could not get PID information, kernel not running Python?")
return pid | Explicitly ask the ipython kernel for its pid | Below is the the instruction that describes the task:
### Input:
Explicitly ask the ipython kernel for its pid
### Response:
def set_pid():
"""
Explicitly ask the ipython kernel for its pid
"""
global pid
lines = '\n'.join(['import os', '_pid = os.getpid()'])
try:
msg_id = send(lines, silent=True, user_variables=['_pid'])
except TypeError: # change in IPython 3.0+
msg_id = send(lines, silent=True, user_expressions={'_pid':'_pid'})
# wait to get message back from kernel
try:
child = get_child_msg(msg_id)
except Empty:
echo("no reply from IPython kernel")
return
try:
pid = int(child['content']['user_variables']['_pid'])
except TypeError: # change in IPython 1.0.dev moved this out
pid = int(child['content']['user_variables']['_pid']['data']['text/plain'])
except KeyError: # change in IPython 3.0+
pid = int(
child['content']['user_expressions']['_pid']['data']['text/plain'])
except KeyError: # change in IPython 1.0.dev moved this out
echo("Could not get PID information, kernel not running Python?")
return pid |
def delete(self, query_id=None, **kwargs): # pragma: no cover
"""Delete a query job.
Uses the DELETE HTTP method to delete a query job. After calling
this endpoint, it is an error to poll for query results using
the queryId specified here.
Args:
query_id (str): Specifies the ID of the query job.
**kwargs: Supported :meth:`~pancloud.httpclient.HTTPClient.request` parameters.
Returns:
requests.Response: Requests Response() object.
Examples:
Refer to ``logging_query.py`` example.
"""
path = "/logging-service/v1/queries/{}".format(query_id)
r = self._httpclient.request(
method="DELETE",
url=self.url,
path=path,
**kwargs
)
return r | Delete a query job.
Uses the DELETE HTTP method to delete a query job. After calling
this endpoint, it is an error to poll for query results using
the queryId specified here.
Args:
query_id (str): Specifies the ID of the query job.
**kwargs: Supported :meth:`~pancloud.httpclient.HTTPClient.request` parameters.
Returns:
requests.Response: Requests Response() object.
Examples:
Refer to ``logging_query.py`` example. | Below is the the instruction that describes the task:
### Input:
Delete a query job.
Uses the DELETE HTTP method to delete a query job. After calling
this endpoint, it is an error to poll for query results using
the queryId specified here.
Args:
query_id (str): Specifies the ID of the query job.
**kwargs: Supported :meth:`~pancloud.httpclient.HTTPClient.request` parameters.
Returns:
requests.Response: Requests Response() object.
Examples:
Refer to ``logging_query.py`` example.
### Response:
def delete(self, query_id=None, **kwargs): # pragma: no cover
"""Delete a query job.
Uses the DELETE HTTP method to delete a query job. After calling
this endpoint, it is an error to poll for query results using
the queryId specified here.
Args:
query_id (str): Specifies the ID of the query job.
**kwargs: Supported :meth:`~pancloud.httpclient.HTTPClient.request` parameters.
Returns:
requests.Response: Requests Response() object.
Examples:
Refer to ``logging_query.py`` example.
"""
path = "/logging-service/v1/queries/{}".format(query_id)
r = self._httpclient.request(
method="DELETE",
url=self.url,
path=path,
**kwargs
)
return r |
def check_categories(lines):
'''
find out how many row and col categories are available
'''
# count the number of row categories
rcat_line = lines[0].split('\t')
# calc the number of row names and categories
num_rc = 0
found_end = False
# skip first tab
for inst_string in rcat_line[1:]:
if inst_string == '':
if found_end is False:
num_rc = num_rc + 1
else:
found_end = True
max_rcat = 15
if max_rcat > len(lines):
max_rcat = len(lines) - 1
num_cc = 0
for i in range(max_rcat):
ccat_line = lines[i + 1].split('\t')
# make sure that line has length greater than one to prevent false cats from
# trailing new lines at end of matrix
if ccat_line[0] == '' and len(ccat_line) > 1:
num_cc = num_cc + 1
num_labels = {}
num_labels['row'] = num_rc + 1
num_labels['col'] = num_cc + 1
return num_labels | find out how many row and col categories are available | Below is the the instruction that describes the task:
### Input:
find out how many row and col categories are available
### Response:
def check_categories(lines):
'''
find out how many row and col categories are available
'''
# count the number of row categories
rcat_line = lines[0].split('\t')
# calc the number of row names and categories
num_rc = 0
found_end = False
# skip first tab
for inst_string in rcat_line[1:]:
if inst_string == '':
if found_end is False:
num_rc = num_rc + 1
else:
found_end = True
max_rcat = 15
if max_rcat > len(lines):
max_rcat = len(lines) - 1
num_cc = 0
for i in range(max_rcat):
ccat_line = lines[i + 1].split('\t')
# make sure that line has length greater than one to prevent false cats from
# trailing new lines at end of matrix
if ccat_line[0] == '' and len(ccat_line) > 1:
num_cc = num_cc + 1
num_labels = {}
num_labels['row'] = num_rc + 1
num_labels['col'] = num_cc + 1
return num_labels |
def get_expire_time(cache_duration=None, valid_until=None):
"""
Compares 2 dates and returns the earliest.
:param cache_duration: The duration, as a string.
:type: string
:param valid_until: The valid until date, as a string or as a timestamp
:type: string
:return: The expiration time.
:rtype: int
"""
expire_time = None
if cache_duration is not None:
expire_time = OneLogin_Saml2_Utils.parse_duration(cache_duration)
if valid_until is not None:
if isinstance(valid_until, int):
valid_until_time = valid_until
else:
valid_until_time = OneLogin_Saml2_Utils.parse_SAML_to_time(valid_until)
if expire_time is None or expire_time > valid_until_time:
expire_time = valid_until_time
if expire_time is not None:
return '%d' % expire_time
return None | Compares 2 dates and returns the earliest.
:param cache_duration: The duration, as a string.
:type: string
:param valid_until: The valid until date, as a string or as a timestamp
:type: string
:return: The expiration time.
:rtype: int | Below is the the instruction that describes the task:
### Input:
Compares 2 dates and returns the earliest.
:param cache_duration: The duration, as a string.
:type: string
:param valid_until: The valid until date, as a string or as a timestamp
:type: string
:return: The expiration time.
:rtype: int
### Response:
def get_expire_time(cache_duration=None, valid_until=None):
"""
Compares 2 dates and returns the earliest.
:param cache_duration: The duration, as a string.
:type: string
:param valid_until: The valid until date, as a string or as a timestamp
:type: string
:return: The expiration time.
:rtype: int
"""
expire_time = None
if cache_duration is not None:
expire_time = OneLogin_Saml2_Utils.parse_duration(cache_duration)
if valid_until is not None:
if isinstance(valid_until, int):
valid_until_time = valid_until
else:
valid_until_time = OneLogin_Saml2_Utils.parse_SAML_to_time(valid_until)
if expire_time is None or expire_time > valid_until_time:
expire_time = valid_until_time
if expire_time is not None:
return '%d' % expire_time
return None |
def named_eq_relations(self, name, neg=False):
'''Returns list of named eqLinks.
<name> may be string or list.
'''
if self.eqLinks and not neg:
if isinstance(name, six.string_types):
return filter(lambda x: x.relation.name == name,
self.eqLinks)
elif isinstance(name, list):
return filter(lambda x: x.relation.name in name,
self.eqLinks)
else:
return None #should rise error
elif self.eqLinks and neg:
if isinstance(name, six.string_types):
return filter(lambda x: x.relation.name != name,
self.eqLinks)
elif isinstance(name, list):
return filter(lambda x: x.relation.name not in name,
self.eqLinks)
else:
return None #should rise error
else:
return None | Returns list of named eqLinks.
<name> may be string or list. | Below is the the instruction that describes the task:
### Input:
Returns list of named eqLinks.
<name> may be string or list.
### Response:
def named_eq_relations(self, name, neg=False):
'''Returns list of named eqLinks.
<name> may be string or list.
'''
if self.eqLinks and not neg:
if isinstance(name, six.string_types):
return filter(lambda x: x.relation.name == name,
self.eqLinks)
elif isinstance(name, list):
return filter(lambda x: x.relation.name in name,
self.eqLinks)
else:
return None #should rise error
elif self.eqLinks and neg:
if isinstance(name, six.string_types):
return filter(lambda x: x.relation.name != name,
self.eqLinks)
elif isinstance(name, list):
return filter(lambda x: x.relation.name not in name,
self.eqLinks)
else:
return None #should rise error
else:
return None |
def fetch_git_package(self, config):
"""Make a remote git repository available for local use.
Args:
config (dict): git config dictionary
"""
# only loading git here when needed to avoid load errors on systems
# without git installed
from git import Repo
ref = self.determine_git_ref(config)
dir_name = self.sanitize_git_path(uri=config['uri'], ref=ref)
cached_dir_path = os.path.join(self.package_cache_dir, dir_name)
# We can skip cloning the repo if it's already been cached
if not os.path.isdir(cached_dir_path):
logger.debug("Remote repo %s does not appear to have been "
"previously downloaded - starting clone to %s",
config['uri'],
cached_dir_path)
tmp_dir = tempfile.mkdtemp(prefix='stacker')
try:
tmp_repo_path = os.path.join(tmp_dir, dir_name)
with Repo.clone_from(config['uri'], tmp_repo_path) as repo:
repo.head.reference = ref
repo.head.reset(index=True, working_tree=True)
shutil.move(tmp_repo_path, self.package_cache_dir)
finally:
shutil.rmtree(tmp_dir)
else:
logger.debug("Remote repo %s appears to have been previously "
"cloned to %s -- bypassing download",
config['uri'],
cached_dir_path)
# Update sys.path & merge in remote configs (if necessary)
self.update_paths_and_config(config=config,
pkg_dir_name=dir_name) | Make a remote git repository available for local use.
Args:
config (dict): git config dictionary | Below is the the instruction that describes the task:
### Input:
Make a remote git repository available for local use.
Args:
config (dict): git config dictionary
### Response:
def fetch_git_package(self, config):
"""Make a remote git repository available for local use.
Args:
config (dict): git config dictionary
"""
# only loading git here when needed to avoid load errors on systems
# without git installed
from git import Repo
ref = self.determine_git_ref(config)
dir_name = self.sanitize_git_path(uri=config['uri'], ref=ref)
cached_dir_path = os.path.join(self.package_cache_dir, dir_name)
# We can skip cloning the repo if it's already been cached
if not os.path.isdir(cached_dir_path):
logger.debug("Remote repo %s does not appear to have been "
"previously downloaded - starting clone to %s",
config['uri'],
cached_dir_path)
tmp_dir = tempfile.mkdtemp(prefix='stacker')
try:
tmp_repo_path = os.path.join(tmp_dir, dir_name)
with Repo.clone_from(config['uri'], tmp_repo_path) as repo:
repo.head.reference = ref
repo.head.reset(index=True, working_tree=True)
shutil.move(tmp_repo_path, self.package_cache_dir)
finally:
shutil.rmtree(tmp_dir)
else:
logger.debug("Remote repo %s appears to have been previously "
"cloned to %s -- bypassing download",
config['uri'],
cached_dir_path)
# Update sys.path & merge in remote configs (if necessary)
self.update_paths_and_config(config=config,
pkg_dir_name=dir_name) |
def as_txt(self):
"""Text rendering of error response.
Designed for use with Image API version 1.1 and above where the
error response is suggested to be text or html but not otherwise
specified. Intended to provide useful information for debugging.
"""
s = "IIIF Image Server Error\n\n"
s += self.text if (self.text) else 'UNKNOWN_ERROR'
s += "\n\n"
if (self.parameter):
s += "parameter=%s\n" % self.parameter
if (self.code):
s += "code=%d\n\n" % self.code
for header in sorted(self.headers):
s += "header %s=%s\n" % (header, self.headers[header])
return s | Text rendering of error response.
Designed for use with Image API version 1.1 and above where the
error response is suggested to be text or html but not otherwise
specified. Intended to provide useful information for debugging. | Below is the the instruction that describes the task:
### Input:
Text rendering of error response.
Designed for use with Image API version 1.1 and above where the
error response is suggested to be text or html but not otherwise
specified. Intended to provide useful information for debugging.
### Response:
def as_txt(self):
"""Text rendering of error response.
Designed for use with Image API version 1.1 and above where the
error response is suggested to be text or html but not otherwise
specified. Intended to provide useful information for debugging.
"""
s = "IIIF Image Server Error\n\n"
s += self.text if (self.text) else 'UNKNOWN_ERROR'
s += "\n\n"
if (self.parameter):
s += "parameter=%s\n" % self.parameter
if (self.code):
s += "code=%d\n\n" % self.code
for header in sorted(self.headers):
s += "header %s=%s\n" % (header, self.headers[header])
return s |
def sessions_info(self, hosts):
"""Returns ClientInfo per session.
:param hosts: comma separated lists of members of the ZK ensemble.
:returns: A dictionary of (session_id, ClientInfo).
"""
info_by_id = {}
for server_endpoint, dump in self.dump_by_server(hosts).items():
server_ip, server_port = server_endpoint
for line in dump.split("\n"):
mat = self.IP_PORT_REGEX.match(line)
if mat is None:
continue
ip, port, sid = mat.groups()
info_by_id[sid] = ClientInfo(sid, ip, port, server_ip, server_port)
return info_by_id | Returns ClientInfo per session.
:param hosts: comma separated lists of members of the ZK ensemble.
:returns: A dictionary of (session_id, ClientInfo). | Below is the the instruction that describes the task:
### Input:
Returns ClientInfo per session.
:param hosts: comma separated lists of members of the ZK ensemble.
:returns: A dictionary of (session_id, ClientInfo).
### Response:
def sessions_info(self, hosts):
"""Returns ClientInfo per session.
:param hosts: comma separated lists of members of the ZK ensemble.
:returns: A dictionary of (session_id, ClientInfo).
"""
info_by_id = {}
for server_endpoint, dump in self.dump_by_server(hosts).items():
server_ip, server_port = server_endpoint
for line in dump.split("\n"):
mat = self.IP_PORT_REGEX.match(line)
if mat is None:
continue
ip, port, sid = mat.groups()
info_by_id[sid] = ClientInfo(sid, ip, port, server_ip, server_port)
return info_by_id |
def json_integrity(baseline, suspect):
"""
Summary:
Validates baseline dict against suspect dict to ensure contain USERNAME
k,v parameters.
Args:
baseline (dict): baseline json structure
suspect (dict): json object validated against baseline structure
Returns:
Success (matches baseline) | Failure (no match), TYPE: bool
"""
try:
for k,v in baseline.items():
for ks, vs in suspect.items():
keys_baseline = set(v.keys())
keys_suspect = set(vs.keys())
intersect_keys = keys_baseline.intersection(keys_suspect)
added = keys_baseline - keys_suspect
rm = keys_suspect - keys_baseline
logger.info('keys added: %s, keys removed %s' % (str(added), str(rm)))
if keys_baseline != keys_suspect:
return False
except KeyError as e:
logger.info(
'KeyError parsing pre-existing config (%s). Replacing config file' %
str(e))
return True | Summary:
Validates baseline dict against suspect dict to ensure contain USERNAME
k,v parameters.
Args:
baseline (dict): baseline json structure
suspect (dict): json object validated against baseline structure
Returns:
Success (matches baseline) | Failure (no match), TYPE: bool | Below is the the instruction that describes the task:
### Input:
Summary:
Validates baseline dict against suspect dict to ensure contain USERNAME
k,v parameters.
Args:
baseline (dict): baseline json structure
suspect (dict): json object validated against baseline structure
Returns:
Success (matches baseline) | Failure (no match), TYPE: bool
### Response:
def json_integrity(baseline, suspect):
"""
Summary:
Validates baseline dict against suspect dict to ensure contain USERNAME
k,v parameters.
Args:
baseline (dict): baseline json structure
suspect (dict): json object validated against baseline structure
Returns:
Success (matches baseline) | Failure (no match), TYPE: bool
"""
try:
for k,v in baseline.items():
for ks, vs in suspect.items():
keys_baseline = set(v.keys())
keys_suspect = set(vs.keys())
intersect_keys = keys_baseline.intersection(keys_suspect)
added = keys_baseline - keys_suspect
rm = keys_suspect - keys_baseline
logger.info('keys added: %s, keys removed %s' % (str(added), str(rm)))
if keys_baseline != keys_suspect:
return False
except KeyError as e:
logger.info(
'KeyError parsing pre-existing config (%s). Replacing config file' %
str(e))
return True |
def spawn_spark_cluster(job,
numWorkers,
cores=None,
memory=None,
disk=None,
overrideLeaderIP=None):
'''
:param numWorkers: The number of worker nodes to have in the cluster. \
Must be greater than or equal to 1.
:param cores: Optional parameter to set the number of cores per node. \
If not provided, we use the number of cores on the node that launches \
the service.
:param memory: Optional parameter to set the memory requested per node.
:param disk: Optional parameter to set the disk requested per node.
:type leaderMemory: int or string convertable by bd2k.util.humanize.human2bytes to an int
:type numWorkers: int
:type cores: int
:type memory: int or string convertable by bd2k.util.humanize.human2bytes to an int
:type disk: int or string convertable by bd2k.util.humanize.human2bytes to an int
'''
if numWorkers < 1:
raise ValueError("Must have more than one worker. %d given." % numWorkers)
leaderService = SparkService(cores=cores,
memory=memory,
disk=disk,
overrideLeaderIP=overrideLeaderIP)
leaderIP = job.addService(leaderService)
for i in range(numWorkers):
job.addService(WorkerService(leaderIP,
cores=cores,
disk=disk,
memory=memory),
parentService=leaderService)
return leaderIP | :param numWorkers: The number of worker nodes to have in the cluster. \
Must be greater than or equal to 1.
:param cores: Optional parameter to set the number of cores per node. \
If not provided, we use the number of cores on the node that launches \
the service.
:param memory: Optional parameter to set the memory requested per node.
:param disk: Optional parameter to set the disk requested per node.
:type leaderMemory: int or string convertable by bd2k.util.humanize.human2bytes to an int
:type numWorkers: int
:type cores: int
:type memory: int or string convertable by bd2k.util.humanize.human2bytes to an int
:type disk: int or string convertable by bd2k.util.humanize.human2bytes to an int | Below is the the instruction that describes the task:
### Input:
:param numWorkers: The number of worker nodes to have in the cluster. \
Must be greater than or equal to 1.
:param cores: Optional parameter to set the number of cores per node. \
If not provided, we use the number of cores on the node that launches \
the service.
:param memory: Optional parameter to set the memory requested per node.
:param disk: Optional parameter to set the disk requested per node.
:type leaderMemory: int or string convertable by bd2k.util.humanize.human2bytes to an int
:type numWorkers: int
:type cores: int
:type memory: int or string convertable by bd2k.util.humanize.human2bytes to an int
:type disk: int or string convertable by bd2k.util.humanize.human2bytes to an int
### Response:
def spawn_spark_cluster(job,
numWorkers,
cores=None,
memory=None,
disk=None,
overrideLeaderIP=None):
'''
:param numWorkers: The number of worker nodes to have in the cluster. \
Must be greater than or equal to 1.
:param cores: Optional parameter to set the number of cores per node. \
If not provided, we use the number of cores on the node that launches \
the service.
:param memory: Optional parameter to set the memory requested per node.
:param disk: Optional parameter to set the disk requested per node.
:type leaderMemory: int or string convertable by bd2k.util.humanize.human2bytes to an int
:type numWorkers: int
:type cores: int
:type memory: int or string convertable by bd2k.util.humanize.human2bytes to an int
:type disk: int or string convertable by bd2k.util.humanize.human2bytes to an int
'''
if numWorkers < 1:
raise ValueError("Must have more than one worker. %d given." % numWorkers)
leaderService = SparkService(cores=cores,
memory=memory,
disk=disk,
overrideLeaderIP=overrideLeaderIP)
leaderIP = job.addService(leaderService)
for i in range(numWorkers):
job.addService(WorkerService(leaderIP,
cores=cores,
disk=disk,
memory=memory),
parentService=leaderService)
return leaderIP |
def keys(self, pattern=None):
"""Returns a list of keys matching ``pattern``.
By default return all keys.
>>> dc = Dictator()
>>> dc['l0'] = [1, 2, 3, 4]
>>> dc['s0'] = 'string value'
>>> dc.keys()
['l0', 's0']
>>> dc.keys('h*')
[]
>>> dc.clear()
:param pattern: key pattern
:type pattern: str
:return: list of keys in db
:rtype: list of str
"""
logger.debug('call pop %s', pattern)
if pattern is None:
pattern = '*'
return self._redis.keys(pattern=pattern) | Returns a list of keys matching ``pattern``.
By default return all keys.
>>> dc = Dictator()
>>> dc['l0'] = [1, 2, 3, 4]
>>> dc['s0'] = 'string value'
>>> dc.keys()
['l0', 's0']
>>> dc.keys('h*')
[]
>>> dc.clear()
:param pattern: key pattern
:type pattern: str
:return: list of keys in db
:rtype: list of str | Below is the the instruction that describes the task:
### Input:
Returns a list of keys matching ``pattern``.
By default return all keys.
>>> dc = Dictator()
>>> dc['l0'] = [1, 2, 3, 4]
>>> dc['s0'] = 'string value'
>>> dc.keys()
['l0', 's0']
>>> dc.keys('h*')
[]
>>> dc.clear()
:param pattern: key pattern
:type pattern: str
:return: list of keys in db
:rtype: list of str
### Response:
def keys(self, pattern=None):
"""Returns a list of keys matching ``pattern``.
By default return all keys.
>>> dc = Dictator()
>>> dc['l0'] = [1, 2, 3, 4]
>>> dc['s0'] = 'string value'
>>> dc.keys()
['l0', 's0']
>>> dc.keys('h*')
[]
>>> dc.clear()
:param pattern: key pattern
:type pattern: str
:return: list of keys in db
:rtype: list of str
"""
logger.debug('call pop %s', pattern)
if pattern is None:
pattern = '*'
return self._redis.keys(pattern=pattern) |
def planfn(*args, **kwargs):
'''
planfn(val1=fn1, val2=fn2...) uses the pimms plan mechanism to yield a function f that produces
an immutable imap when called with the correct paramters. Unlike in plan(), planfn() does not
care about the names of the calculation units; instead the val1, val2, etc. are names of the
efferent values while the given fn objects are the functions that calculate them (they are
automatically converted into calc objects, and names are automatically generated for the items
required).
planfn(res, val1=fn1, val2=fn2...) yields a function f that is equivalent to the function
lambda *args,**kw: g(*args,**kw)[res] where g = planfn(val1=fn1, val2=fn2...).
planfn((p1, p2...), val1=fn1, val2=fn2...) yields a function f that is equivalent to the
function lambda *a: g({p1:a[0], p2:a[1]...}) where g = planfn(val1=fn1, val2=fn2...).
planfn((p1, p2...), res, val1=fn1, val2=fn2...) applies both of the above translations to the
resulting function; note that either the first or second argument may be None to specify
default behavior. Additionally, the first argument may be a string s equivalent to (s,).
'''
# first: parse options
if len(args) > 2: raise ValueError('no more than 2 non-keyword arguments allowed')
elif len(args) == 0: (params,result) = (None,None)
elif len(args) == 2: (params,result) = args
elif is_str(args[0]): (params,result) = (None,args[0])
elif is_vector(args[0], str): (params,result) = (args[0],None)
else: raise ValueError('cannot interpret argument: %s' % args[0])
# okay, now, we make the calcs and the calc plan:
calcs = {}
for (k,v) in six.iteritems(kwargs):
# if v is already a calc object, we can only use this if it has exactly one efferent
if is_calc(v):
if len(v.efferents) != 1:
raise ValueError('calc objects given to planfn must have exactly 1 efferent')
eff = v.efferents[0]
if eff != k: v = v.tr({eff:k})
else: v = calc(k)(v) # otherwise convert it to a calc with the output we want
# it's possible that the names of the calc objects are not unique, so we want to auto-
# generate unique names based on their names...
calcs['calc_' + k] = v
p0 = plan(calcs)
# see if we need to fix the input
if params is None: p = p0
elif is_str(params): p = lambda arg: p0({params:arg})
elif is_vector(params, str): p = lambda *args: p0({k:v for (k,v) in zip(params,args)})
else: raise ValueError('params must be a string or list of strings or None')
# and if we need to fix the output
if result is None: return p
def f(*args, **kw): return p(*args, **kw)[result]
return f | planfn(val1=fn1, val2=fn2...) uses the pimms plan mechanism to yield a function f that produces
an immutable imap when called with the correct paramters. Unlike in plan(), planfn() does not
care about the names of the calculation units; instead the val1, val2, etc. are names of the
efferent values while the given fn objects are the functions that calculate them (they are
automatically converted into calc objects, and names are automatically generated for the items
required).
planfn(res, val1=fn1, val2=fn2...) yields a function f that is equivalent to the function
lambda *args,**kw: g(*args,**kw)[res] where g = planfn(val1=fn1, val2=fn2...).
planfn((p1, p2...), val1=fn1, val2=fn2...) yields a function f that is equivalent to the
function lambda *a: g({p1:a[0], p2:a[1]...}) where g = planfn(val1=fn1, val2=fn2...).
planfn((p1, p2...), res, val1=fn1, val2=fn2...) applies both of the above translations to the
resulting function; note that either the first or second argument may be None to specify
default behavior. Additionally, the first argument may be a string s equivalent to (s,). | Below is the the instruction that describes the task:
### Input:
planfn(val1=fn1, val2=fn2...) uses the pimms plan mechanism to yield a function f that produces
an immutable imap when called with the correct paramters. Unlike in plan(), planfn() does not
care about the names of the calculation units; instead the val1, val2, etc. are names of the
efferent values while the given fn objects are the functions that calculate them (they are
automatically converted into calc objects, and names are automatically generated for the items
required).
planfn(res, val1=fn1, val2=fn2...) yields a function f that is equivalent to the function
lambda *args,**kw: g(*args,**kw)[res] where g = planfn(val1=fn1, val2=fn2...).
planfn((p1, p2...), val1=fn1, val2=fn2...) yields a function f that is equivalent to the
function lambda *a: g({p1:a[0], p2:a[1]...}) where g = planfn(val1=fn1, val2=fn2...).
planfn((p1, p2...), res, val1=fn1, val2=fn2...) applies both of the above translations to the
resulting function; note that either the first or second argument may be None to specify
default behavior. Additionally, the first argument may be a string s equivalent to (s,).
### Response:
def planfn(*args, **kwargs):
'''
planfn(val1=fn1, val2=fn2...) uses the pimms plan mechanism to yield a function f that produces
an immutable imap when called with the correct paramters. Unlike in plan(), planfn() does not
care about the names of the calculation units; instead the val1, val2, etc. are names of the
efferent values while the given fn objects are the functions that calculate them (they are
automatically converted into calc objects, and names are automatically generated for the items
required).
planfn(res, val1=fn1, val2=fn2...) yields a function f that is equivalent to the function
lambda *args,**kw: g(*args,**kw)[res] where g = planfn(val1=fn1, val2=fn2...).
planfn((p1, p2...), val1=fn1, val2=fn2...) yields a function f that is equivalent to the
function lambda *a: g({p1:a[0], p2:a[1]...}) where g = planfn(val1=fn1, val2=fn2...).
planfn((p1, p2...), res, val1=fn1, val2=fn2...) applies both of the above translations to the
resulting function; note that either the first or second argument may be None to specify
default behavior. Additionally, the first argument may be a string s equivalent to (s,).
'''
# first: parse options
if len(args) > 2: raise ValueError('no more than 2 non-keyword arguments allowed')
elif len(args) == 0: (params,result) = (None,None)
elif len(args) == 2: (params,result) = args
elif is_str(args[0]): (params,result) = (None,args[0])
elif is_vector(args[0], str): (params,result) = (args[0],None)
else: raise ValueError('cannot interpret argument: %s' % args[0])
# okay, now, we make the calcs and the calc plan:
calcs = {}
for (k,v) in six.iteritems(kwargs):
# if v is already a calc object, we can only use this if it has exactly one efferent
if is_calc(v):
if len(v.efferents) != 1:
raise ValueError('calc objects given to planfn must have exactly 1 efferent')
eff = v.efferents[0]
if eff != k: v = v.tr({eff:k})
else: v = calc(k)(v) # otherwise convert it to a calc with the output we want
# it's possible that the names of the calc objects are not unique, so we want to auto-
# generate unique names based on their names...
calcs['calc_' + k] = v
p0 = plan(calcs)
# see if we need to fix the input
if params is None: p = p0
elif is_str(params): p = lambda arg: p0({params:arg})
elif is_vector(params, str): p = lambda *args: p0({k:v for (k,v) in zip(params,args)})
else: raise ValueError('params must be a string or list of strings or None')
# and if we need to fix the output
if result is None: return p
def f(*args, **kw): return p(*args, **kw)[result]
return f |
def to_iso_time_string(self) -> str:
""" Return the iso time string only """
short_time = self.to_short_time_string()
second = self.time.second
return f"{short_time}:{second:02}" | Return the iso time string only | Below is the the instruction that describes the task:
### Input:
Return the iso time string only
### Response:
def to_iso_time_string(self) -> str:
""" Return the iso time string only """
short_time = self.to_short_time_string()
second = self.time.second
return f"{short_time}:{second:02}" |
def set_log_level(log_level):
"""
Set logging level of this module. Using
`logbook <https://logbook.readthedocs.io/en/stable/>`__ module for logging.
:param int log_level:
One of the log level of
`logbook <https://logbook.readthedocs.io/en/stable/api/base.html>`__.
Disabled logging if ``log_level`` is ``logbook.NOTSET``.
:raises LookupError: If ``log_level`` is an invalid value.
"""
if not LOGBOOK_INSTALLED:
return
# validate log level
logbook.get_level_name(log_level)
if log_level == logger.level:
return
if log_level == logbook.NOTSET:
set_logger(is_enable=False)
else:
set_logger(is_enable=True)
logger.level = log_level
dataproperty.set_log_level(log_level)
try:
import simplesqlite
simplesqlite.set_log_level(log_level)
except ImportError:
pass | Set logging level of this module. Using
`logbook <https://logbook.readthedocs.io/en/stable/>`__ module for logging.
:param int log_level:
One of the log level of
`logbook <https://logbook.readthedocs.io/en/stable/api/base.html>`__.
Disabled logging if ``log_level`` is ``logbook.NOTSET``.
:raises LookupError: If ``log_level`` is an invalid value. | Below is the the instruction that describes the task:
### Input:
Set logging level of this module. Using
`logbook <https://logbook.readthedocs.io/en/stable/>`__ module for logging.
:param int log_level:
One of the log level of
`logbook <https://logbook.readthedocs.io/en/stable/api/base.html>`__.
Disabled logging if ``log_level`` is ``logbook.NOTSET``.
:raises LookupError: If ``log_level`` is an invalid value.
### Response:
def set_log_level(log_level):
"""
Set logging level of this module. Using
`logbook <https://logbook.readthedocs.io/en/stable/>`__ module for logging.
:param int log_level:
One of the log level of
`logbook <https://logbook.readthedocs.io/en/stable/api/base.html>`__.
Disabled logging if ``log_level`` is ``logbook.NOTSET``.
:raises LookupError: If ``log_level`` is an invalid value.
"""
if not LOGBOOK_INSTALLED:
return
# validate log level
logbook.get_level_name(log_level)
if log_level == logger.level:
return
if log_level == logbook.NOTSET:
set_logger(is_enable=False)
else:
set_logger(is_enable=True)
logger.level = log_level
dataproperty.set_log_level(log_level)
try:
import simplesqlite
simplesqlite.set_log_level(log_level)
except ImportError:
pass |
def pull_image(self, image_name, stream=None):
"""
Ask Docker to pull the container image with given name.
Parameters
----------
image_name str
Name of the image
stream samcli.lib.utils.stream_writer.StreamWriter
Optional stream writer to output to. Defaults to stderr
Raises
------
DockerImagePullFailedException
If the Docker image was not available in the server
"""
stream_writer = stream or StreamWriter(sys.stderr)
try:
result_itr = self.docker_client.api.pull(image_name, stream=True, decode=True)
except docker.errors.APIError as ex:
LOG.debug("Failed to download image with name %s", image_name)
raise DockerImagePullFailedException(str(ex))
# io streams, especially StringIO, work only with unicode strings
stream_writer.write(u"\nFetching {} Docker container image...".format(image_name))
# Each line contains information on progress of the pull. Each line is a JSON string
for _ in result_itr:
# For every line, print a dot to show progress
stream_writer.write(u'.')
stream_writer.flush()
# We are done. Go to the next line
stream_writer.write(u"\n") | Ask Docker to pull the container image with given name.
Parameters
----------
image_name str
Name of the image
stream samcli.lib.utils.stream_writer.StreamWriter
Optional stream writer to output to. Defaults to stderr
Raises
------
DockerImagePullFailedException
If the Docker image was not available in the server | Below is the the instruction that describes the task:
### Input:
Ask Docker to pull the container image with given name.
Parameters
----------
image_name str
Name of the image
stream samcli.lib.utils.stream_writer.StreamWriter
Optional stream writer to output to. Defaults to stderr
Raises
------
DockerImagePullFailedException
If the Docker image was not available in the server
### Response:
def pull_image(self, image_name, stream=None):
"""
Ask Docker to pull the container image with given name.
Parameters
----------
image_name str
Name of the image
stream samcli.lib.utils.stream_writer.StreamWriter
Optional stream writer to output to. Defaults to stderr
Raises
------
DockerImagePullFailedException
If the Docker image was not available in the server
"""
stream_writer = stream or StreamWriter(sys.stderr)
try:
result_itr = self.docker_client.api.pull(image_name, stream=True, decode=True)
except docker.errors.APIError as ex:
LOG.debug("Failed to download image with name %s", image_name)
raise DockerImagePullFailedException(str(ex))
# io streams, especially StringIO, work only with unicode strings
stream_writer.write(u"\nFetching {} Docker container image...".format(image_name))
# Each line contains information on progress of the pull. Each line is a JSON string
for _ in result_itr:
# For every line, print a dot to show progress
stream_writer.write(u'.')
stream_writer.flush()
# We are done. Go to the next line
stream_writer.write(u"\n") |
def range_daily(start=None, stop=None, timezone='UTC', count=None):
"""
This an alternative way to generating sets of Delorean objects with
DAILY stops
"""
return stops(start=start, stop=stop, freq=DAILY, timezone=timezone, count=count) | This an alternative way to generating sets of Delorean objects with
DAILY stops | Below is the the instruction that describes the task:
### Input:
This an alternative way to generating sets of Delorean objects with
DAILY stops
### Response:
def range_daily(start=None, stop=None, timezone='UTC', count=None):
"""
This an alternative way to generating sets of Delorean objects with
DAILY stops
"""
return stops(start=start, stop=stop, freq=DAILY, timezone=timezone, count=count) |
def cmd_list(unused_conf: Config):
"""Print out information on loaded builders and hooks."""
for name, builder in sorted(Plugin.builders.items()):
if builder.func:
print('+- {0:16s} implemented in {1.__module__}.{1.__name__}()'
.format(name, builder.func))
else:
print('+- {0:16s} loaded with no builder function'.format(name))
for hook_name, hook_func in sorted(Plugin.get_hooks_for_builder(name)):
print(' +- {0} hook implemented in '
'{1.__module__}.{1.__name__}()'
.format(hook_name, hook_func)) | Print out information on loaded builders and hooks. | Below is the the instruction that describes the task:
### Input:
Print out information on loaded builders and hooks.
### Response:
def cmd_list(unused_conf: Config):
"""Print out information on loaded builders and hooks."""
for name, builder in sorted(Plugin.builders.items()):
if builder.func:
print('+- {0:16s} implemented in {1.__module__}.{1.__name__}()'
.format(name, builder.func))
else:
print('+- {0:16s} loaded with no builder function'.format(name))
for hook_name, hook_func in sorted(Plugin.get_hooks_for_builder(name)):
print(' +- {0} hook implemented in '
'{1.__module__}.{1.__name__}()'
.format(hook_name, hook_func)) |
def output(self, name):
"""Returns the output buffer for the specified output name (e.g., 'stdout'), creating it if
necessary.
:API: public
"""
m = WorkUnit._valid_name_re.match(name)
if not m or m.group(0) != name:
raise Exception('Invalid output name: {}'.format(name))
if name not in self._outputs:
workunit_name = re.sub(r'\W', '_', self.name)
path = os.path.join(self.run_info_dir,
'tool_outputs', '{workunit_name}-{id}.{output_name}'
.format(workunit_name=workunit_name,
id=self.id,
output_name=name))
safe_mkdir_for(path)
self._outputs[name] = FileBackedRWBuf(path)
self._output_paths[name] = path
return self._outputs[name] | Returns the output buffer for the specified output name (e.g., 'stdout'), creating it if
necessary.
:API: public | Below is the the instruction that describes the task:
### Input:
Returns the output buffer for the specified output name (e.g., 'stdout'), creating it if
necessary.
:API: public
### Response:
def output(self, name):
"""Returns the output buffer for the specified output name (e.g., 'stdout'), creating it if
necessary.
:API: public
"""
m = WorkUnit._valid_name_re.match(name)
if not m or m.group(0) != name:
raise Exception('Invalid output name: {}'.format(name))
if name not in self._outputs:
workunit_name = re.sub(r'\W', '_', self.name)
path = os.path.join(self.run_info_dir,
'tool_outputs', '{workunit_name}-{id}.{output_name}'
.format(workunit_name=workunit_name,
id=self.id,
output_name=name))
safe_mkdir_for(path)
self._outputs[name] = FileBackedRWBuf(path)
self._output_paths[name] = path
return self._outputs[name] |
def boll(self, n, dev, array=False):
"""布林通道"""
mid = self.sma(n, array)
std = self.std(n, array)
up = mid + std * dev
down = mid - std * dev
return up, down | 布林通道 | Below is the the instruction that describes the task:
### Input:
布林通道
### Response:
def boll(self, n, dev, array=False):
"""布林通道"""
mid = self.sma(n, array)
std = self.std(n, array)
up = mid + std * dev
down = mid - std * dev
return up, down |
def get_actions(self, request):
"""Disable the "delete selected" admin action.
Otherwise the action is present even though has_delete_permission is False, it just doesn't
work.
"""
actions = super(CertificateMixin, self).get_actions(request)
actions.pop('delete_selected', '')
return actions | Disable the "delete selected" admin action.
Otherwise the action is present even though has_delete_permission is False, it just doesn't
work. | Below is the the instruction that describes the task:
### Input:
Disable the "delete selected" admin action.
Otherwise the action is present even though has_delete_permission is False, it just doesn't
work.
### Response:
def get_actions(self, request):
"""Disable the "delete selected" admin action.
Otherwise the action is present even though has_delete_permission is False, it just doesn't
work.
"""
actions = super(CertificateMixin, self).get_actions(request)
actions.pop('delete_selected', '')
return actions |
def cmd_full_return(
self,
tgt,
fun,
arg=(),
timeout=None,
tgt_type='glob',
ret='',
verbose=False,
kwarg=None,
**kwargs):
'''
Execute a salt command and return
'''
was_listening = self.event.cpub
try:
pub_data = self.run_job(
tgt,
fun,
arg,
tgt_type,
ret,
timeout,
kwarg=kwarg,
listen=True,
**kwargs)
if not pub_data:
return pub_data
return (self.get_cli_static_event_returns(pub_data['jid'],
pub_data['minions'],
timeout,
tgt,
tgt_type,
verbose))
finally:
if not was_listening:
self.event.close_pub() | Execute a salt command and return | Below is the the instruction that describes the task:
### Input:
Execute a salt command and return
### Response:
def cmd_full_return(
self,
tgt,
fun,
arg=(),
timeout=None,
tgt_type='glob',
ret='',
verbose=False,
kwarg=None,
**kwargs):
'''
Execute a salt command and return
'''
was_listening = self.event.cpub
try:
pub_data = self.run_job(
tgt,
fun,
arg,
tgt_type,
ret,
timeout,
kwarg=kwarg,
listen=True,
**kwargs)
if not pub_data:
return pub_data
return (self.get_cli_static_event_returns(pub_data['jid'],
pub_data['minions'],
timeout,
tgt,
tgt_type,
verbose))
finally:
if not was_listening:
self.event.close_pub() |
def detect_events(self, data, method, params, label):
"""Detect events and display on signal.
Parameters
----------
data : instance of ChanTime
one segment with all channels of interest
method : str
Method used for detection.
params : dict
Parameters used for detection.
label : str
Name of event type, on event labels
"""
if self.annot is None: # remove if buttons are disabled
self.parent.statusBar().showMessage('No score file loaded')
return
lg.info('Adding event type ' + label)
self.annot.add_event_type(label)
self.display_eventtype()
n_eventtype = self.idx_eventtype.count()
self.idx_eventtype.setCurrentIndex(n_eventtype - 1)
if params['max_dur'] in [0, 'None']:
params['max_dur'] = None
freq = (float(params['f1']), float(params['f2']))
duration = (params['min_dur'], params['max_dur'])
if method in SPINDLE_METHODS:
detector = DetectSpindle(method=method, frequency=freq,
duration=duration, merge=params['merge'])
detector.rolloff = params['rolloff']
detector.min_interval = params['interval']
if 'Ferrarelli2007' == method:
detector.det_thresh = params['0']
detector.sel_thresh = params['1']
if 'Nir2011' == method:
detector.smooth['dur'] = params['0']
detector.det_thresh = params['1']
detector.sel_thresh = params['2']
if 'Moelle2011' == method:
detector.moving_rms['dur'] = params['0']
detector.smooth['dur'] = params['1']
detector.det_thresh = params['2']
if 'Wamsley2012' == method:
detector.det_wavelet['dur'] = params['0']
detector.det_wavelet['sd'] = params['1']
detector.smooth['dur'] = params['2']
detector.det_thresh = params['3']
if 'Martin2013' == method:
detector.moving_rms['dur'] = params['0']
detector.moving_rms['step'] = params['1']
detector.det_thresh = params['2']
if 'Ray2015' == method:
detector.smooth['dur'] = params['0']
detector.zscore['step'] = params['1']
detector.det_thresh = params['2']
detector.sel_thresh = params['3']
if 'Lacourse2018' == method:
detector.windowing['dur'] = params['0']
detector.windowing['step'] = params['1']
detector.abs_pow_thresh = params['2']
detector.rel_pow_thresh = params['3']
detector.covar_thresh = params['4']
detector.corr_thresh = params['5']
if 'FASST' == method:
detector.det_thresh = params['0']
detector.smooth['dur'] = params['1']
if 'FASST2' == method:
detector.det_thresh = params['0']
detector.moving_rms['dur'] = params['1']
detector.smooth['dur'] = params['2']
if 'UCSD' == method:
detector.det_wavelet['dur'] = params['0']
detector.det_wavelet['width'] = params['1']
detector.det_wavelet['win'] = params['2']
detector.det_thresh = params['3']
detector.sel_thresh = params['4']
if 'Concordia' == method:
detector.moving_rms['dur'] = params['0']
detector.smooth['dur'] = params['1']
detector.det_thresh = params['2']
detector.det_thresh_hi = params['3']
detector.tolerance = params['4']
detector.sel_thresh = params['5']
elif method in SLOW_WAVE_METHODS:
detector = DetectSlowWave(method=method, duration=duration)
detector.det_filt['freq'] = freq
detector.trough_duration = (params['min_trough_dur'],
params['max_trough_dur'])
detector.max_trough_amp = params['max_trough_amp']
detector.min_ptp = params['min_ptp']
detector.invert = params['invert']
else:
lg.info('Method not recognized: ' + method)
return
events = detector(data, parent=self)
if events:
self.annot.add_events(events, name=label)
self.update_annotations() | Detect events and display on signal.
Parameters
----------
data : instance of ChanTime
one segment with all channels of interest
method : str
Method used for detection.
params : dict
Parameters used for detection.
label : str
Name of event type, on event labels | Below is the the instruction that describes the task:
### Input:
Detect events and display on signal.
Parameters
----------
data : instance of ChanTime
one segment with all channels of interest
method : str
Method used for detection.
params : dict
Parameters used for detection.
label : str
Name of event type, on event labels
### Response:
def detect_events(self, data, method, params, label):
"""Detect events and display on signal.
Parameters
----------
data : instance of ChanTime
one segment with all channels of interest
method : str
Method used for detection.
params : dict
Parameters used for detection.
label : str
Name of event type, on event labels
"""
if self.annot is None: # remove if buttons are disabled
self.parent.statusBar().showMessage('No score file loaded')
return
lg.info('Adding event type ' + label)
self.annot.add_event_type(label)
self.display_eventtype()
n_eventtype = self.idx_eventtype.count()
self.idx_eventtype.setCurrentIndex(n_eventtype - 1)
if params['max_dur'] in [0, 'None']:
params['max_dur'] = None
freq = (float(params['f1']), float(params['f2']))
duration = (params['min_dur'], params['max_dur'])
if method in SPINDLE_METHODS:
detector = DetectSpindle(method=method, frequency=freq,
duration=duration, merge=params['merge'])
detector.rolloff = params['rolloff']
detector.min_interval = params['interval']
if 'Ferrarelli2007' == method:
detector.det_thresh = params['0']
detector.sel_thresh = params['1']
if 'Nir2011' == method:
detector.smooth['dur'] = params['0']
detector.det_thresh = params['1']
detector.sel_thresh = params['2']
if 'Moelle2011' == method:
detector.moving_rms['dur'] = params['0']
detector.smooth['dur'] = params['1']
detector.det_thresh = params['2']
if 'Wamsley2012' == method:
detector.det_wavelet['dur'] = params['0']
detector.det_wavelet['sd'] = params['1']
detector.smooth['dur'] = params['2']
detector.det_thresh = params['3']
if 'Martin2013' == method:
detector.moving_rms['dur'] = params['0']
detector.moving_rms['step'] = params['1']
detector.det_thresh = params['2']
if 'Ray2015' == method:
detector.smooth['dur'] = params['0']
detector.zscore['step'] = params['1']
detector.det_thresh = params['2']
detector.sel_thresh = params['3']
if 'Lacourse2018' == method:
detector.windowing['dur'] = params['0']
detector.windowing['step'] = params['1']
detector.abs_pow_thresh = params['2']
detector.rel_pow_thresh = params['3']
detector.covar_thresh = params['4']
detector.corr_thresh = params['5']
if 'FASST' == method:
detector.det_thresh = params['0']
detector.smooth['dur'] = params['1']
if 'FASST2' == method:
detector.det_thresh = params['0']
detector.moving_rms['dur'] = params['1']
detector.smooth['dur'] = params['2']
if 'UCSD' == method:
detector.det_wavelet['dur'] = params['0']
detector.det_wavelet['width'] = params['1']
detector.det_wavelet['win'] = params['2']
detector.det_thresh = params['3']
detector.sel_thresh = params['4']
if 'Concordia' == method:
detector.moving_rms['dur'] = params['0']
detector.smooth['dur'] = params['1']
detector.det_thresh = params['2']
detector.det_thresh_hi = params['3']
detector.tolerance = params['4']
detector.sel_thresh = params['5']
elif method in SLOW_WAVE_METHODS:
detector = DetectSlowWave(method=method, duration=duration)
detector.det_filt['freq'] = freq
detector.trough_duration = (params['min_trough_dur'],
params['max_trough_dur'])
detector.max_trough_amp = params['max_trough_amp']
detector.min_ptp = params['min_ptp']
detector.invert = params['invert']
else:
lg.info('Method not recognized: ' + method)
return
events = detector(data, parent=self)
if events:
self.annot.add_events(events, name=label)
self.update_annotations() |
def text_filepaths_for_task(self, tmp_dir, task_id):
"""List of input filepaths for a particular training or dev shard.
Args:
tmp_dir: a string
task_id: an integer less than self.num_shards
Returns:
a list of tuples (filepath, start_pos, num_bytes)
"""
assert task_id >= 0
assert task_id < self.num_train_shards + self.num_dev_shards
if task_id < self.num_train_shards:
return [
f for i, f in enumerate(self.train_text_filepaths(tmp_dir))
if i % self.num_train_shards == task_id
]
else:
return [
f for i, f in enumerate(self.dev_text_filepaths(tmp_dir))
if i % self.num_dev_shards == task_id - self.num_train_shards
] | List of input filepaths for a particular training or dev shard.
Args:
tmp_dir: a string
task_id: an integer less than self.num_shards
Returns:
a list of tuples (filepath, start_pos, num_bytes) | Below is the the instruction that describes the task:
### Input:
List of input filepaths for a particular training or dev shard.
Args:
tmp_dir: a string
task_id: an integer less than self.num_shards
Returns:
a list of tuples (filepath, start_pos, num_bytes)
### Response:
def text_filepaths_for_task(self, tmp_dir, task_id):
"""List of input filepaths for a particular training or dev shard.
Args:
tmp_dir: a string
task_id: an integer less than self.num_shards
Returns:
a list of tuples (filepath, start_pos, num_bytes)
"""
assert task_id >= 0
assert task_id < self.num_train_shards + self.num_dev_shards
if task_id < self.num_train_shards:
return [
f for i, f in enumerate(self.train_text_filepaths(tmp_dir))
if i % self.num_train_shards == task_id
]
else:
return [
f for i, f in enumerate(self.dev_text_filepaths(tmp_dir))
if i % self.num_dev_shards == task_id - self.num_train_shards
] |
def l(*members, meta=None) -> List:
"""Creates a new list from members."""
return List( # pylint: disable=abstract-class-instantiated
plist(iterable=members), meta=meta
) | Creates a new list from members. | Below is the the instruction that describes the task:
### Input:
Creates a new list from members.
### Response:
def l(*members, meta=None) -> List:
"""Creates a new list from members."""
return List( # pylint: disable=abstract-class-instantiated
plist(iterable=members), meta=meta
) |
def _read(self, directory, filename, session, path, name, extension, spatial, spatialReferenceID, replaceParamFile):
"""
Index Map Read from File Method
"""
# Set file extension property
self.fileExtension = extension
# Open file and read plain text into text field
with open(path, 'r') as f:
self.rasterText = f.read()
# Retrieve metadata from header
lines = self.rasterText.split('\n')
for line in lines[0:6]:
spline = line.split()
if 'north' in spline[0].lower():
self.north = float(spline[1])
elif 'south' in spline[0].lower():
self.south = float(spline[1])
elif 'east' in spline[0].lower():
self.east = float(spline[1])
elif 'west' in spline[0].lower():
self.west = float(spline[1])
elif 'rows' in spline[0].lower():
self.rows = int(spline[1])
elif 'cols' in spline[0].lower():
self.columns = int(spline[1])
if spatial:
# Get well known binary from the raster file using the MapKit RasterLoader
wkbRaster = RasterLoader.grassAsciiRasterToWKB(session=session,
grassRasterPath=path,
srid=str(spatialReferenceID),
noData='-1')
self.raster = wkbRaster
self.srid = spatialReferenceID
# Assign other properties
self.filename = filename | Index Map Read from File Method | Below is the the instruction that describes the task:
### Input:
Index Map Read from File Method
### Response:
def _read(self, directory, filename, session, path, name, extension, spatial, spatialReferenceID, replaceParamFile):
"""
Index Map Read from File Method
"""
# Set file extension property
self.fileExtension = extension
# Open file and read plain text into text field
with open(path, 'r') as f:
self.rasterText = f.read()
# Retrieve metadata from header
lines = self.rasterText.split('\n')
for line in lines[0:6]:
spline = line.split()
if 'north' in spline[0].lower():
self.north = float(spline[1])
elif 'south' in spline[0].lower():
self.south = float(spline[1])
elif 'east' in spline[0].lower():
self.east = float(spline[1])
elif 'west' in spline[0].lower():
self.west = float(spline[1])
elif 'rows' in spline[0].lower():
self.rows = int(spline[1])
elif 'cols' in spline[0].lower():
self.columns = int(spline[1])
if spatial:
# Get well known binary from the raster file using the MapKit RasterLoader
wkbRaster = RasterLoader.grassAsciiRasterToWKB(session=session,
grassRasterPath=path,
srid=str(spatialReferenceID),
noData='-1')
self.raster = wkbRaster
self.srid = spatialReferenceID
# Assign other properties
self.filename = filename |
def generate(self):
'''
Generate noise samples.
Returns:
`np.ndarray` of samples.
'''
sampled_arr = np.zeros((self.__batch_size, self.__channel, self.__seq_len, self.__dim))
for batch in range(self.__batch_size):
for i in range(len(self.__program_list)):
program_key = self.__program_list[i]
key = np.random.randint(low=0, high=len(self.__midi_df_list))
midi_df = self.__midi_df_list[key]
midi_df = midi_df[midi_df.program == program_key]
if midi_df.shape[0] < self.__seq_len:
continue
row = np.random.uniform(
low=midi_df.start.min(),
high=midi_df.end.max() - (self.__seq_len * self.__time_fraction)
)
for seq in range(self.__seq_len):
start = row + (seq * self.__time_fraction)
end = row + ((seq+1) * self.__time_fraction)
df = midi_df[(start <= midi_df.start) & (midi_df.start <= end)]
sampled_arr[batch, i, seq] = self.__convert_into_feature(df)
return sampled_arr | Generate noise samples.
Returns:
`np.ndarray` of samples. | Below is the the instruction that describes the task:
### Input:
Generate noise samples.
Returns:
`np.ndarray` of samples.
### Response:
def generate(self):
'''
Generate noise samples.
Returns:
`np.ndarray` of samples.
'''
sampled_arr = np.zeros((self.__batch_size, self.__channel, self.__seq_len, self.__dim))
for batch in range(self.__batch_size):
for i in range(len(self.__program_list)):
program_key = self.__program_list[i]
key = np.random.randint(low=0, high=len(self.__midi_df_list))
midi_df = self.__midi_df_list[key]
midi_df = midi_df[midi_df.program == program_key]
if midi_df.shape[0] < self.__seq_len:
continue
row = np.random.uniform(
low=midi_df.start.min(),
high=midi_df.end.max() - (self.__seq_len * self.__time_fraction)
)
for seq in range(self.__seq_len):
start = row + (seq * self.__time_fraction)
end = row + ((seq+1) * self.__time_fraction)
df = midi_df[(start <= midi_df.start) & (midi_df.start <= end)]
sampled_arr[batch, i, seq] = self.__convert_into_feature(df)
return sampled_arr |
def delta_unaccelerated(self):
"""The relative delta of the unaccelerated motion vector of
the current event.
For gesture events that are not of type
:attr:`~libinput.constant.EventType.GESTURE_SWIPE_UPDATE` or
:attr:`~libinput.constant.EventType.GESTURE_PINCH_UPDATE`, this
property raises :exc:`AttributeError`.
Relative unaccelerated motion deltas are normalized to represent those
of a device with 1000dpi resolution. See
`Normalization of relative motion`_ for more details.
Note that unaccelerated events are not equivalent to 'raw' events
as read from the device.
Any rotation applied to the device also applies to gesture motion
(see :meth:`~libinput.define.DeviceConfigRotation.set_angle`).
Returns:
(float, float): The unaccelerated relative (x, y) movement since
the last event.
"""
if self.type not in {EventType.GESTURE_SWIPE_UPDATE,
EventType.GESTURE_PINCH_UPDATE}:
raise AttributeError(_wrong_prop.format(self.type))
delta_x = self._libinput.libinput_event_gesture_get_dx_unaccelerated(
self._handle)
delta_y = self._libinput.libinput_event_gesture_get_dy_unaccelerated(
self._handle)
return delta_x, delta_y | The relative delta of the unaccelerated motion vector of
the current event.
For gesture events that are not of type
:attr:`~libinput.constant.EventType.GESTURE_SWIPE_UPDATE` or
:attr:`~libinput.constant.EventType.GESTURE_PINCH_UPDATE`, this
property raises :exc:`AttributeError`.
Relative unaccelerated motion deltas are normalized to represent those
of a device with 1000dpi resolution. See
`Normalization of relative motion`_ for more details.
Note that unaccelerated events are not equivalent to 'raw' events
as read from the device.
Any rotation applied to the device also applies to gesture motion
(see :meth:`~libinput.define.DeviceConfigRotation.set_angle`).
Returns:
(float, float): The unaccelerated relative (x, y) movement since
the last event. | Below is the the instruction that describes the task:
### Input:
The relative delta of the unaccelerated motion vector of
the current event.
For gesture events that are not of type
:attr:`~libinput.constant.EventType.GESTURE_SWIPE_UPDATE` or
:attr:`~libinput.constant.EventType.GESTURE_PINCH_UPDATE`, this
property raises :exc:`AttributeError`.
Relative unaccelerated motion deltas are normalized to represent those
of a device with 1000dpi resolution. See
`Normalization of relative motion`_ for more details.
Note that unaccelerated events are not equivalent to 'raw' events
as read from the device.
Any rotation applied to the device also applies to gesture motion
(see :meth:`~libinput.define.DeviceConfigRotation.set_angle`).
Returns:
(float, float): The unaccelerated relative (x, y) movement since
the last event.
### Response:
def delta_unaccelerated(self):
"""The relative delta of the unaccelerated motion vector of
the current event.
For gesture events that are not of type
:attr:`~libinput.constant.EventType.GESTURE_SWIPE_UPDATE` or
:attr:`~libinput.constant.EventType.GESTURE_PINCH_UPDATE`, this
property raises :exc:`AttributeError`.
Relative unaccelerated motion deltas are normalized to represent those
of a device with 1000dpi resolution. See
`Normalization of relative motion`_ for more details.
Note that unaccelerated events are not equivalent to 'raw' events
as read from the device.
Any rotation applied to the device also applies to gesture motion
(see :meth:`~libinput.define.DeviceConfigRotation.set_angle`).
Returns:
(float, float): The unaccelerated relative (x, y) movement since
the last event.
"""
if self.type not in {EventType.GESTURE_SWIPE_UPDATE,
EventType.GESTURE_PINCH_UPDATE}:
raise AttributeError(_wrong_prop.format(self.type))
delta_x = self._libinput.libinput_event_gesture_get_dx_unaccelerated(
self._handle)
delta_y = self._libinput.libinput_event_gesture_get_dy_unaccelerated(
self._handle)
return delta_x, delta_y |
def cross(self, other):
'''
:other: Point or point equivalent
:return: float
Vector cross product of points U (self) and V (other), computed:
U x V = (u1*i + u2*j + u3*k) x (v1*i + v2*j + v3*k)
s1 = u2v3 - u3v2
s2 = u3v1 - u1v3
s3 = u1v2 - u2v1
U x V = s1 + s2 + s3
Returns a float.
'''
b = self.__class__._convert(other)
return sum([(self.y * b.z) - (self.z * b.y),
(self.z * b.x) - (self.x * b.z),
(self.x * b.y) - (self.y * b.x)]) | :other: Point or point equivalent
:return: float
Vector cross product of points U (self) and V (other), computed:
U x V = (u1*i + u2*j + u3*k) x (v1*i + v2*j + v3*k)
s1 = u2v3 - u3v2
s2 = u3v1 - u1v3
s3 = u1v2 - u2v1
U x V = s1 + s2 + s3
Returns a float. | Below is the the instruction that describes the task:
### Input:
:other: Point or point equivalent
:return: float
Vector cross product of points U (self) and V (other), computed:
U x V = (u1*i + u2*j + u3*k) x (v1*i + v2*j + v3*k)
s1 = u2v3 - u3v2
s2 = u3v1 - u1v3
s3 = u1v2 - u2v1
U x V = s1 + s2 + s3
Returns a float.
### Response:
def cross(self, other):
'''
:other: Point or point equivalent
:return: float
Vector cross product of points U (self) and V (other), computed:
U x V = (u1*i + u2*j + u3*k) x (v1*i + v2*j + v3*k)
s1 = u2v3 - u3v2
s2 = u3v1 - u1v3
s3 = u1v2 - u2v1
U x V = s1 + s2 + s3
Returns a float.
'''
b = self.__class__._convert(other)
return sum([(self.y * b.z) - (self.z * b.y),
(self.z * b.x) - (self.x * b.z),
(self.x * b.y) - (self.y * b.x)]) |
def console_get_height_rect(
con: tcod.console.Console, x: int, y: int, w: int, h: int, fmt: str
) -> int:
"""Return the height of this text once word-wrapped into this rectangle.
Returns:
int: The number of lines of text once word-wrapped.
.. deprecated:: 8.5
Use :any:`Console.get_height_rect` instead.
"""
return int(
lib.TCOD_console_get_height_rect_fmt(
_console(con), x, y, w, h, _fmt(fmt)
)
) | Return the height of this text once word-wrapped into this rectangle.
Returns:
int: The number of lines of text once word-wrapped.
.. deprecated:: 8.5
Use :any:`Console.get_height_rect` instead. | Below is the the instruction that describes the task:
### Input:
Return the height of this text once word-wrapped into this rectangle.
Returns:
int: The number of lines of text once word-wrapped.
.. deprecated:: 8.5
Use :any:`Console.get_height_rect` instead.
### Response:
def console_get_height_rect(
con: tcod.console.Console, x: int, y: int, w: int, h: int, fmt: str
) -> int:
"""Return the height of this text once word-wrapped into this rectangle.
Returns:
int: The number of lines of text once word-wrapped.
.. deprecated:: 8.5
Use :any:`Console.get_height_rect` instead.
"""
return int(
lib.TCOD_console_get_height_rect_fmt(
_console(con), x, y, w, h, _fmt(fmt)
)
) |
def pretty_to_link(inst, link):
'''
Create a human-readable representation of a link on the 'TO'-side
'''
values = ''
prefix = ''
metaclass = xtuml.get_metaclass(inst)
for name, ty in metaclass.attributes:
if name in link.key_map:
value = getattr(inst, name)
value = xtuml.serialize_value(value, ty)
name = link.key_map[name]
values += '%s%s=%s' % (prefix, name, value)
prefix = ', '
return '%s(%s)' % (link.kind, values) | Create a human-readable representation of a link on the 'TO'-side | Below is the the instruction that describes the task:
### Input:
Create a human-readable representation of a link on the 'TO'-side
### Response:
def pretty_to_link(inst, link):
'''
Create a human-readable representation of a link on the 'TO'-side
'''
values = ''
prefix = ''
metaclass = xtuml.get_metaclass(inst)
for name, ty in metaclass.attributes:
if name in link.key_map:
value = getattr(inst, name)
value = xtuml.serialize_value(value, ty)
name = link.key_map[name]
values += '%s%s=%s' % (prefix, name, value)
prefix = ', '
return '%s(%s)' % (link.kind, values) |
def do_GET(self):
"""Serve a GET request."""
f = self.send_head()
if f:
self.copyfile(f, self.wfile)
f.close() | Serve a GET request. | Below is the the instruction that describes the task:
### Input:
Serve a GET request.
### Response:
def do_GET(self):
"""Serve a GET request."""
f = self.send_head()
if f:
self.copyfile(f, self.wfile)
f.close() |
def _set_pfc(self, v, load=False):
"""
Setter method for pfc, mapped from YANG variable /interface/port_channel/qos/flowcontrol/pfc (list)
If this variable is read-only (config: false) in the
source YANG file, then _set_pfc is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_pfc() directly.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=YANGListType("pfc_cos",pfc.pfc, yang_name="pfc", rest_name="pfc", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='pfc-cos', extensions={u'tailf-common': {u'info': u'Priority-based Flow Control (PFC)', u'cli-no-key-completion': None, u'cli-suppress-mode': None, u'cli-suppress-list-no': None, u'callpoint': u'pfc_flowcontrol_po', u'cli-sequence-commands': None, u'cli-suppress-key-abbreviation': None, u'cli-incomplete-command': None, u'cli-full-no': None}}), is_container='list', yang_name="pfc", rest_name="pfc", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Priority-based Flow Control (PFC)', u'cli-no-key-completion': None, u'cli-suppress-mode': None, u'cli-suppress-list-no': None, u'callpoint': u'pfc_flowcontrol_po', u'cli-sequence-commands': None, u'cli-suppress-key-abbreviation': None, u'cli-incomplete-command': None, u'cli-full-no': None}}, namespace='urn:brocade.com:mgmt:brocade-qos', defining_module='brocade-qos', yang_type='list', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """pfc must be of a type compatible with list""",
'defined-type': "list",
'generated-type': """YANGDynClass(base=YANGListType("pfc_cos",pfc.pfc, yang_name="pfc", rest_name="pfc", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='pfc-cos', extensions={u'tailf-common': {u'info': u'Priority-based Flow Control (PFC)', u'cli-no-key-completion': None, u'cli-suppress-mode': None, u'cli-suppress-list-no': None, u'callpoint': u'pfc_flowcontrol_po', u'cli-sequence-commands': None, u'cli-suppress-key-abbreviation': None, u'cli-incomplete-command': None, u'cli-full-no': None}}), is_container='list', yang_name="pfc", rest_name="pfc", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Priority-based Flow Control (PFC)', u'cli-no-key-completion': None, u'cli-suppress-mode': None, u'cli-suppress-list-no': None, u'callpoint': u'pfc_flowcontrol_po', u'cli-sequence-commands': None, u'cli-suppress-key-abbreviation': None, u'cli-incomplete-command': None, u'cli-full-no': None}}, namespace='urn:brocade.com:mgmt:brocade-qos', defining_module='brocade-qos', yang_type='list', is_config=True)""",
})
self.__pfc = t
if hasattr(self, '_set'):
self._set() | Setter method for pfc, mapped from YANG variable /interface/port_channel/qos/flowcontrol/pfc (list)
If this variable is read-only (config: false) in the
source YANG file, then _set_pfc is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_pfc() directly. | Below is the the instruction that describes the task:
### Input:
Setter method for pfc, mapped from YANG variable /interface/port_channel/qos/flowcontrol/pfc (list)
If this variable is read-only (config: false) in the
source YANG file, then _set_pfc is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_pfc() directly.
### Response:
def _set_pfc(self, v, load=False):
"""
Setter method for pfc, mapped from YANG variable /interface/port_channel/qos/flowcontrol/pfc (list)
If this variable is read-only (config: false) in the
source YANG file, then _set_pfc is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_pfc() directly.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=YANGListType("pfc_cos",pfc.pfc, yang_name="pfc", rest_name="pfc", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='pfc-cos', extensions={u'tailf-common': {u'info': u'Priority-based Flow Control (PFC)', u'cli-no-key-completion': None, u'cli-suppress-mode': None, u'cli-suppress-list-no': None, u'callpoint': u'pfc_flowcontrol_po', u'cli-sequence-commands': None, u'cli-suppress-key-abbreviation': None, u'cli-incomplete-command': None, u'cli-full-no': None}}), is_container='list', yang_name="pfc", rest_name="pfc", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Priority-based Flow Control (PFC)', u'cli-no-key-completion': None, u'cli-suppress-mode': None, u'cli-suppress-list-no': None, u'callpoint': u'pfc_flowcontrol_po', u'cli-sequence-commands': None, u'cli-suppress-key-abbreviation': None, u'cli-incomplete-command': None, u'cli-full-no': None}}, namespace='urn:brocade.com:mgmt:brocade-qos', defining_module='brocade-qos', yang_type='list', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """pfc must be of a type compatible with list""",
'defined-type': "list",
'generated-type': """YANGDynClass(base=YANGListType("pfc_cos",pfc.pfc, yang_name="pfc", rest_name="pfc", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='pfc-cos', extensions={u'tailf-common': {u'info': u'Priority-based Flow Control (PFC)', u'cli-no-key-completion': None, u'cli-suppress-mode': None, u'cli-suppress-list-no': None, u'callpoint': u'pfc_flowcontrol_po', u'cli-sequence-commands': None, u'cli-suppress-key-abbreviation': None, u'cli-incomplete-command': None, u'cli-full-no': None}}), is_container='list', yang_name="pfc", rest_name="pfc", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Priority-based Flow Control (PFC)', u'cli-no-key-completion': None, u'cli-suppress-mode': None, u'cli-suppress-list-no': None, u'callpoint': u'pfc_flowcontrol_po', u'cli-sequence-commands': None, u'cli-suppress-key-abbreviation': None, u'cli-incomplete-command': None, u'cli-full-no': None}}, namespace='urn:brocade.com:mgmt:brocade-qos', defining_module='brocade-qos', yang_type='list', is_config=True)""",
})
self.__pfc = t
if hasattr(self, '_set'):
self._set() |
def get_agg_data(cls, obj, category=None):
"""
Reduces any Overlay or NdOverlay of Elements into a single
xarray Dataset that can be aggregated.
"""
paths = []
if isinstance(obj, Graph):
obj = obj.edgepaths
kdims = list(obj.kdims)
vdims = list(obj.vdims)
dims = obj.dimensions()[:2]
if isinstance(obj, Path):
glyph = 'line'
for p in obj.split(datatype='dataframe'):
paths.append(p)
elif isinstance(obj, CompositeOverlay):
element = None
for key, el in obj.data.items():
x, y, element, glyph = cls.get_agg_data(el)
dims = (x, y)
df = PandasInterface.as_dframe(element)
if isinstance(obj, NdOverlay):
df = df.assign(**dict(zip(obj.dimensions('key', True), key)))
paths.append(df)
if element is None:
dims = None
else:
kdims += element.kdims
vdims = element.vdims
elif isinstance(obj, Element):
glyph = 'line' if isinstance(obj, Curve) else 'points'
paths.append(PandasInterface.as_dframe(obj))
if dims is None or len(dims) != 2:
return None, None, None, None
else:
x, y = dims
if len(paths) > 1:
if glyph == 'line':
path = paths[0][:1]
if isinstance(path, dd.DataFrame):
path = path.compute()
empty = path.copy()
empty.iloc[0, :] = (np.NaN,) * empty.shape[1]
paths = [elem for p in paths for elem in (p, empty)][:-1]
if all(isinstance(path, dd.DataFrame) for path in paths):
df = dd.concat(paths)
else:
paths = [p.compute() if isinstance(p, dd.DataFrame) else p for p in paths]
df = pd.concat(paths)
else:
df = paths[0] if paths else pd.DataFrame([], columns=[x.name, y.name])
if category and df[category].dtype.name != 'category':
df[category] = df[category].astype('category')
is_dask = isinstance(df, dd.DataFrame)
if any((not is_dask and len(df[d.name]) and isinstance(df[d.name].values[0], cftime_types)) or
df[d.name].dtype.kind == 'M' for d in (x, y)):
df = df.copy()
for d in (x, y):
vals = df[d.name]
if not is_dask and len(vals) and isinstance(vals.values[0], cftime_types):
vals = cftime_to_timestamp(vals, 'ns')
elif df[d.name].dtype.kind == 'M':
vals = vals.astype('datetime64[ns]')
else:
continue
df[d.name] = vals.astype('int64')
return x, y, Dataset(df, kdims=kdims, vdims=vdims), glyph | Reduces any Overlay or NdOverlay of Elements into a single
xarray Dataset that can be aggregated. | Below is the the instruction that describes the task:
### Input:
Reduces any Overlay or NdOverlay of Elements into a single
xarray Dataset that can be aggregated.
### Response:
def get_agg_data(cls, obj, category=None):
"""
Reduces any Overlay or NdOverlay of Elements into a single
xarray Dataset that can be aggregated.
"""
paths = []
if isinstance(obj, Graph):
obj = obj.edgepaths
kdims = list(obj.kdims)
vdims = list(obj.vdims)
dims = obj.dimensions()[:2]
if isinstance(obj, Path):
glyph = 'line'
for p in obj.split(datatype='dataframe'):
paths.append(p)
elif isinstance(obj, CompositeOverlay):
element = None
for key, el in obj.data.items():
x, y, element, glyph = cls.get_agg_data(el)
dims = (x, y)
df = PandasInterface.as_dframe(element)
if isinstance(obj, NdOverlay):
df = df.assign(**dict(zip(obj.dimensions('key', True), key)))
paths.append(df)
if element is None:
dims = None
else:
kdims += element.kdims
vdims = element.vdims
elif isinstance(obj, Element):
glyph = 'line' if isinstance(obj, Curve) else 'points'
paths.append(PandasInterface.as_dframe(obj))
if dims is None or len(dims) != 2:
return None, None, None, None
else:
x, y = dims
if len(paths) > 1:
if glyph == 'line':
path = paths[0][:1]
if isinstance(path, dd.DataFrame):
path = path.compute()
empty = path.copy()
empty.iloc[0, :] = (np.NaN,) * empty.shape[1]
paths = [elem for p in paths for elem in (p, empty)][:-1]
if all(isinstance(path, dd.DataFrame) for path in paths):
df = dd.concat(paths)
else:
paths = [p.compute() if isinstance(p, dd.DataFrame) else p for p in paths]
df = pd.concat(paths)
else:
df = paths[0] if paths else pd.DataFrame([], columns=[x.name, y.name])
if category and df[category].dtype.name != 'category':
df[category] = df[category].astype('category')
is_dask = isinstance(df, dd.DataFrame)
if any((not is_dask and len(df[d.name]) and isinstance(df[d.name].values[0], cftime_types)) or
df[d.name].dtype.kind == 'M' for d in (x, y)):
df = df.copy()
for d in (x, y):
vals = df[d.name]
if not is_dask and len(vals) and isinstance(vals.values[0], cftime_types):
vals = cftime_to_timestamp(vals, 'ns')
elif df[d.name].dtype.kind == 'M':
vals = vals.astype('datetime64[ns]')
else:
continue
df[d.name] = vals.astype('int64')
return x, y, Dataset(df, kdims=kdims, vdims=vdims), glyph |
def getNamespace(self, prefix):
"""prefix -- deference namespace prefix in node's context.
Ascends parent nodes until found.
"""
namespace = None
if prefix == 'xmlns':
namespace = DOM.findDefaultNS(prefix, self.__node)
else:
try:
namespace = DOM.findNamespaceURI(prefix, self.__node)
except DOMException, ex:
if prefix != 'xml':
raise SchemaError, '%s namespace not declared for %s'\
%(prefix, self.__node._get_tagName())
namespace = XMLNS.XML
return namespace | prefix -- deference namespace prefix in node's context.
Ascends parent nodes until found. | Below is the the instruction that describes the task:
### Input:
prefix -- deference namespace prefix in node's context.
Ascends parent nodes until found.
### Response:
def getNamespace(self, prefix):
"""prefix -- deference namespace prefix in node's context.
Ascends parent nodes until found.
"""
namespace = None
if prefix == 'xmlns':
namespace = DOM.findDefaultNS(prefix, self.__node)
else:
try:
namespace = DOM.findNamespaceURI(prefix, self.__node)
except DOMException, ex:
if prefix != 'xml':
raise SchemaError, '%s namespace not declared for %s'\
%(prefix, self.__node._get_tagName())
namespace = XMLNS.XML
return namespace |
def enforce(self, rule, target, creds, exc=None, *args, **kwargs):
"""Checks authorization of a rule against the target and credentials."""
self.load_rules()
if isinstance(rule, checks.BaseCheck):
result = rule(target, creds, self, rule)
elif not self.rules:
# No rules means we're going to fail closed.
result = False
else:
try:
# Evaluate the rule
result = self.rules[rule](target, creds, self, rule)
except KeyError:
LOG.debug('Rule [%s] does not exist', rule)
# If the rule doesn't exist, fail closed
result = False
if self.raise_error and not result:
if exc:
raise exc(*args, **kwargs)
else:
raise PolicyNotAuthorized(rule, target, creds)
return result | Checks authorization of a rule against the target and credentials. | Below is the the instruction that describes the task:
### Input:
Checks authorization of a rule against the target and credentials.
### Response:
def enforce(self, rule, target, creds, exc=None, *args, **kwargs):
"""Checks authorization of a rule against the target and credentials."""
self.load_rules()
if isinstance(rule, checks.BaseCheck):
result = rule(target, creds, self, rule)
elif not self.rules:
# No rules means we're going to fail closed.
result = False
else:
try:
# Evaluate the rule
result = self.rules[rule](target, creds, self, rule)
except KeyError:
LOG.debug('Rule [%s] does not exist', rule)
# If the rule doesn't exist, fail closed
result = False
if self.raise_error and not result:
if exc:
raise exc(*args, **kwargs)
else:
raise PolicyNotAuthorized(rule, target, creds)
return result |
def absence_info(self):
"""Return information about the user's absences."""
# FIXME: remove recursive dep
from ..eighth.models import EighthSignup
return EighthSignup.objects.filter(user=self, was_absent=True, scheduled_activity__attendance_taken=True) | Return information about the user's absences. | Below is the the instruction that describes the task:
### Input:
Return information about the user's absences.
### Response:
def absence_info(self):
"""Return information about the user's absences."""
# FIXME: remove recursive dep
from ..eighth.models import EighthSignup
return EighthSignup.objects.filter(user=self, was_absent=True, scheduled_activity__attendance_taken=True) |
def users(self, username=None, pk=None, **kwargs):
"""
Users of KE-chain.
Provide a list of :class:`User`s of KE-chain. You can filter on username or id or any other advanced filter.
:param username: (optional) username to filter
:type username: basestring or None
:param pk: (optional) id of the user to filter
:type pk: basestring or None
:param kwargs: Additional filtering keyword=value arguments
:type kwargs: dict or None
:return: List of :class:`Users`
:raises NotFoundError: when a user could not be found
"""
request_params = {
'username': username,
'pk': pk,
}
if kwargs:
request_params.update(**kwargs)
r = self._request('GET', self._build_url('users'), params=request_params)
if r.status_code != requests.codes.ok: # pragma: no cover
raise NotFoundError("Could not find users: '{}'".format(r.json()))
data = r.json()
return [User(user, client=self) for user in data['results']] | Users of KE-chain.
Provide a list of :class:`User`s of KE-chain. You can filter on username or id or any other advanced filter.
:param username: (optional) username to filter
:type username: basestring or None
:param pk: (optional) id of the user to filter
:type pk: basestring or None
:param kwargs: Additional filtering keyword=value arguments
:type kwargs: dict or None
:return: List of :class:`Users`
:raises NotFoundError: when a user could not be found | Below is the the instruction that describes the task:
### Input:
Users of KE-chain.
Provide a list of :class:`User`s of KE-chain. You can filter on username or id or any other advanced filter.
:param username: (optional) username to filter
:type username: basestring or None
:param pk: (optional) id of the user to filter
:type pk: basestring or None
:param kwargs: Additional filtering keyword=value arguments
:type kwargs: dict or None
:return: List of :class:`Users`
:raises NotFoundError: when a user could not be found
### Response:
def users(self, username=None, pk=None, **kwargs):
"""
Users of KE-chain.
Provide a list of :class:`User`s of KE-chain. You can filter on username or id or any other advanced filter.
:param username: (optional) username to filter
:type username: basestring or None
:param pk: (optional) id of the user to filter
:type pk: basestring or None
:param kwargs: Additional filtering keyword=value arguments
:type kwargs: dict or None
:return: List of :class:`Users`
:raises NotFoundError: when a user could not be found
"""
request_params = {
'username': username,
'pk': pk,
}
if kwargs:
request_params.update(**kwargs)
r = self._request('GET', self._build_url('users'), params=request_params)
if r.status_code != requests.codes.ok: # pragma: no cover
raise NotFoundError("Could not find users: '{}'".format(r.json()))
data = r.json()
return [User(user, client=self) for user in data['results']] |
def date_to_string(date):
"""Transform a date or datetime object into a string and return it.
Examples:
>>> date_to_string(datetime.datetime(2012, 1, 3, 12, 23, 34, tzinfo=UTC))
'2012-01-03T12:23:34+00:00'
>>> date_to_string(datetime.datetime(2012, 1, 3, 12, 23, 34))
'2012-01-03T12:23:34'
>>> date_to_string(datetime.date(2012, 1, 3))
'2012-01-03'
"""
if isinstance(date, datetime.datetime):
# Create an ISO 8601 datetime string
date_str = date.strftime('%Y-%m-%dT%H:%M:%S')
tzstr = date.strftime('%z')
if tzstr:
# Yes, this is ugly. And no, I haven't found a better way to have a
# truly ISO 8601 datetime with timezone in Python.
date_str = '%s%s:%s' % (date_str, tzstr[0:3], tzstr[3:5])
elif isinstance(date, datetime.date):
# Create an ISO 8601 date string
date_str = date.strftime('%Y-%m-%d')
else:
raise TypeError('Argument is not a date or datetime. ')
return date_str | Transform a date or datetime object into a string and return it.
Examples:
>>> date_to_string(datetime.datetime(2012, 1, 3, 12, 23, 34, tzinfo=UTC))
'2012-01-03T12:23:34+00:00'
>>> date_to_string(datetime.datetime(2012, 1, 3, 12, 23, 34))
'2012-01-03T12:23:34'
>>> date_to_string(datetime.date(2012, 1, 3))
'2012-01-03' | Below is the the instruction that describes the task:
### Input:
Transform a date or datetime object into a string and return it.
Examples:
>>> date_to_string(datetime.datetime(2012, 1, 3, 12, 23, 34, tzinfo=UTC))
'2012-01-03T12:23:34+00:00'
>>> date_to_string(datetime.datetime(2012, 1, 3, 12, 23, 34))
'2012-01-03T12:23:34'
>>> date_to_string(datetime.date(2012, 1, 3))
'2012-01-03'
### Response:
def date_to_string(date):
"""Transform a date or datetime object into a string and return it.
Examples:
>>> date_to_string(datetime.datetime(2012, 1, 3, 12, 23, 34, tzinfo=UTC))
'2012-01-03T12:23:34+00:00'
>>> date_to_string(datetime.datetime(2012, 1, 3, 12, 23, 34))
'2012-01-03T12:23:34'
>>> date_to_string(datetime.date(2012, 1, 3))
'2012-01-03'
"""
if isinstance(date, datetime.datetime):
# Create an ISO 8601 datetime string
date_str = date.strftime('%Y-%m-%dT%H:%M:%S')
tzstr = date.strftime('%z')
if tzstr:
# Yes, this is ugly. And no, I haven't found a better way to have a
# truly ISO 8601 datetime with timezone in Python.
date_str = '%s%s:%s' % (date_str, tzstr[0:3], tzstr[3:5])
elif isinstance(date, datetime.date):
# Create an ISO 8601 date string
date_str = date.strftime('%Y-%m-%d')
else:
raise TypeError('Argument is not a date or datetime. ')
return date_str |
def ClientCertFromCSR(cls, csr):
"""Creates a new cert for the given common name.
Args:
csr: A CertificateSigningRequest.
Returns:
The signed cert.
"""
builder = x509.CertificateBuilder()
# Use the client CN for a cert serial_id. This will ensure we do
# not have clashing cert id.
common_name = csr.GetCN()
serial = int(common_name.split(".")[1], 16)
builder = builder.serial_number(serial)
builder = builder.subject_name(
x509.Name(
[x509.NameAttribute(oid.NameOID.COMMON_NAME, str(common_name))]))
now = rdfvalue.RDFDatetime.Now()
now_plus_year = now + rdfvalue.Duration("52w")
builder = builder.not_valid_after(now_plus_year.AsDatetime())
now_minus_ten = now - rdfvalue.Duration("10s")
builder = builder.not_valid_before(now_minus_ten.AsDatetime())
# TODO(user): dependency loop with
# grr/core/grr_response_core/config/client.py.
# pylint: disable=protected-access
ca_cert = config_lib._CONFIG["CA.certificate"]
# pylint: enable=protected-access
builder = builder.issuer_name(ca_cert.GetIssuer())
builder = builder.public_key(csr.GetPublicKey().GetRawPublicKey())
# TODO(user): dependency loop with
# grr/core/grr_response_core/config/client.py.
# pylint: disable=protected-access
ca_key = config_lib._CONFIG["PrivateKeys.ca_key"]
# pylint: enable=protected-access
return RDFX509Cert(
builder.sign(
private_key=ca_key.GetRawPrivateKey(),
algorithm=hashes.SHA256(),
backend=openssl.backend)) | Creates a new cert for the given common name.
Args:
csr: A CertificateSigningRequest.
Returns:
The signed cert. | Below is the the instruction that describes the task:
### Input:
Creates a new cert for the given common name.
Args:
csr: A CertificateSigningRequest.
Returns:
The signed cert.
### Response:
def ClientCertFromCSR(cls, csr):
"""Creates a new cert for the given common name.
Args:
csr: A CertificateSigningRequest.
Returns:
The signed cert.
"""
builder = x509.CertificateBuilder()
# Use the client CN for a cert serial_id. This will ensure we do
# not have clashing cert id.
common_name = csr.GetCN()
serial = int(common_name.split(".")[1], 16)
builder = builder.serial_number(serial)
builder = builder.subject_name(
x509.Name(
[x509.NameAttribute(oid.NameOID.COMMON_NAME, str(common_name))]))
now = rdfvalue.RDFDatetime.Now()
now_plus_year = now + rdfvalue.Duration("52w")
builder = builder.not_valid_after(now_plus_year.AsDatetime())
now_minus_ten = now - rdfvalue.Duration("10s")
builder = builder.not_valid_before(now_minus_ten.AsDatetime())
# TODO(user): dependency loop with
# grr/core/grr_response_core/config/client.py.
# pylint: disable=protected-access
ca_cert = config_lib._CONFIG["CA.certificate"]
# pylint: enable=protected-access
builder = builder.issuer_name(ca_cert.GetIssuer())
builder = builder.public_key(csr.GetPublicKey().GetRawPublicKey())
# TODO(user): dependency loop with
# grr/core/grr_response_core/config/client.py.
# pylint: disable=protected-access
ca_key = config_lib._CONFIG["PrivateKeys.ca_key"]
# pylint: enable=protected-access
return RDFX509Cert(
builder.sign(
private_key=ca_key.GetRawPrivateKey(),
algorithm=hashes.SHA256(),
backend=openssl.backend)) |
def attr_gen(self, attr):
"""Returns attribute generator that yields namedtuples containing
path value pairs
Parameters
----------
attr : str
Name of the search attribute
Returns
-------
attr_generator : generator
Returns a generator that yields named tuples with field names
path and value.
Examples
--------
>>> gen = h5f.attr_gen('elangle')
>>> pair = next(gen)
>>> print(pair.path)
'/dataset1/where'
>>> print(pair.value)
0.5
"""
HiisiHDF._clear_cache()
HiisiHDF.CACHE['search_attribute'] = attr
HiisiHDF._find_attr_paths('/', self['/']) # Check root attributes
self.visititems(HiisiHDF._find_attr_paths)
path_attr_gen = (PathValue(attr_path, self[attr_path].attrs.get(attr)) for attr_path in HiisiHDF.CACHE['attribute_paths'])
return path_attr_gen | Returns attribute generator that yields namedtuples containing
path value pairs
Parameters
----------
attr : str
Name of the search attribute
Returns
-------
attr_generator : generator
Returns a generator that yields named tuples with field names
path and value.
Examples
--------
>>> gen = h5f.attr_gen('elangle')
>>> pair = next(gen)
>>> print(pair.path)
'/dataset1/where'
>>> print(pair.value)
0.5 | Below is the the instruction that describes the task:
### Input:
Returns attribute generator that yields namedtuples containing
path value pairs
Parameters
----------
attr : str
Name of the search attribute
Returns
-------
attr_generator : generator
Returns a generator that yields named tuples with field names
path and value.
Examples
--------
>>> gen = h5f.attr_gen('elangle')
>>> pair = next(gen)
>>> print(pair.path)
'/dataset1/where'
>>> print(pair.value)
0.5
### Response:
def attr_gen(self, attr):
"""Returns attribute generator that yields namedtuples containing
path value pairs
Parameters
----------
attr : str
Name of the search attribute
Returns
-------
attr_generator : generator
Returns a generator that yields named tuples with field names
path and value.
Examples
--------
>>> gen = h5f.attr_gen('elangle')
>>> pair = next(gen)
>>> print(pair.path)
'/dataset1/where'
>>> print(pair.value)
0.5
"""
HiisiHDF._clear_cache()
HiisiHDF.CACHE['search_attribute'] = attr
HiisiHDF._find_attr_paths('/', self['/']) # Check root attributes
self.visititems(HiisiHDF._find_attr_paths)
path_attr_gen = (PathValue(attr_path, self[attr_path].attrs.get(attr)) for attr_path in HiisiHDF.CACHE['attribute_paths'])
return path_attr_gen |
def ss_reg(self):
"""Sum of squares of the regression."""
return np.sum(np.square(self.predicted - self.ybar), axis=0) | Sum of squares of the regression. | Below is the the instruction that describes the task:
### Input:
Sum of squares of the regression.
### Response:
def ss_reg(self):
"""Sum of squares of the regression."""
return np.sum(np.square(self.predicted - self.ybar), axis=0) |
def uinit(self, ushape):
"""Return initialiser for working variable U."""
if self.opt['Y0'] is None:
return np.zeros(ushape, dtype=self.dtype)
else:
# If initial Y is non-zero, initial U is chosen so that
# the relevant dual optimality criterion (see (3.10) in
# boyd-2010-distributed) is satisfied.
U0 = np.sign(self.block_sep0(self.Y)) / self.rho
U1 = self.block_sep1(self.Y) - self.S
return self.block_cat(U0, U1) | Return initialiser for working variable U. | Below is the the instruction that describes the task:
### Input:
Return initialiser for working variable U.
### Response:
def uinit(self, ushape):
"""Return initialiser for working variable U."""
if self.opt['Y0'] is None:
return np.zeros(ushape, dtype=self.dtype)
else:
# If initial Y is non-zero, initial U is chosen so that
# the relevant dual optimality criterion (see (3.10) in
# boyd-2010-distributed) is satisfied.
U0 = np.sign(self.block_sep0(self.Y)) / self.rho
U1 = self.block_sep1(self.Y) - self.S
return self.block_cat(U0, U1) |
def alphabetical_formula(self):
"""
Returns a reduced formula string with appended charge
"""
alph_formula = super().alphabetical_formula
chg_str = ""
if self.charge > 0:
chg_str = " +" + formula_double_format(self.charge, False)
elif self.charge < 0:
chg_str = " " + formula_double_format(self.charge, False)
return alph_formula + chg_str | Returns a reduced formula string with appended charge | Below is the the instruction that describes the task:
### Input:
Returns a reduced formula string with appended charge
### Response:
def alphabetical_formula(self):
"""
Returns a reduced formula string with appended charge
"""
alph_formula = super().alphabetical_formula
chg_str = ""
if self.charge > 0:
chg_str = " +" + formula_double_format(self.charge, False)
elif self.charge < 0:
chg_str = " " + formula_double_format(self.charge, False)
return alph_formula + chg_str |
def to_nifti(obj, like=None, header=None, affine=None, extensions=Ellipsis, version=1):
'''
to_nifti(obj) yields a Nifti2Image object that is as equivalent as possible to the given object
obj. If obj is a Nifti2Image already, then it is returned unmolested; other deduction rules
are described below.
The following options are accepted:
* like (default: None) may be provided to give a guide for the various header- and meta-data
that is included in the image. If this is a nifti image object, its meta-data are used; if
this is a subject, then the meta-data are deduced from the subject's voxel and native
orientation matrices. All other specific options below override anything deduced from the
like argument.
* header (default: None) may be a Nifti1 or Niti2 image header to be used as the nifti header
or to replace the header in a new image.
* affine (default: None) may specify the affine transform to be given to the image object.
* extensions (default: Ellipsis) may specify a nifti extensions object that should be included
in the header. The default value, Ellipsis, indicates that the extensions should not be
changed, and that None should be used if extensions are not implied in obj (if, for example,
obj is a data array rather than an image object with a header already.
* version (default: 2) may be specified as 1 or 2 for a Nifti1Image or Nifti2Image object,
respectively.
'''
from neuropythy.mri import Subject
obj0 = obj
# First go from like to explicit versions of affine and header:
if like is not None:
if isinstance(like, nib.analyze.AnalyzeHeader) or \
isinstance(like, nib.freesurfer.mghformat.MGHHeader):
if header is None: header = like
elif isinstance(like, nib.analyze.SpatialImage):
if header is None: header = like.header
if affine is None: affine = like.affine
elif isinstance(like, Subject):
if affine is None: affine = like.voxel_to_native_matrix
else:
raise ValueError('Could not interpret like argument with type %s' % type(like))
# check to make sure that we have to change something:
elif ((version == 1 and isinstance(obj, nib.nifti1.Nifti1Image)) or
(version == 2 and isinstance(obj, nib.nifti2.Nifti2Image))):
if ((header is None or obj.header is header) and
(extensions is Ellipsis or extensions is obj.header.extensions or
(extensions is None and len(obj.header.extensions) == 0))):
return obj
# okay, now look at the header and affine etc.
if header is None:
if isinstance(obj, nib.analyze.SpatialImage):
header = obj.header
else:
header = nib.nifti1.Nifti1Header() if version == 1 else nib.nifti2.Nifti2Header()
if affine is None:
if isinstance(obj, nib.analyze.SpatialImage):
affine = obj.affine
else:
affine = np.eye(4)
if extensions is None:
extensions = nib.nifti1.Nifti1Extensions()
# Figure out what the data is
if isinstance(obj, nib.analyze.SpatialImage):
obj = obj.dataobj
elif not pimms.is_nparray(obj):
obj = np.asarray(obj)
if len(obj.shape) < 3: obj = np.asarray([[obj]])
# Okay, make a new object now...
if version == 1:
obj = nib.nifti1.Nifti1Image(obj, affine, header)
elif version == 2:
obj = nib.nifti2.Nifti2Image(obj, affine, header)
else:
raise ValueError('invalid version given (should be 1 or 2): %s' % version)
# add the extensions if they're needed
if extensions is not Ellipsis and (len(extensions) > 0 or len(obj.header.extensions) > 0):
obj.header.extensions = extensions
# Okay, that's it!
return obj | to_nifti(obj) yields a Nifti2Image object that is as equivalent as possible to the given object
obj. If obj is a Nifti2Image already, then it is returned unmolested; other deduction rules
are described below.
The following options are accepted:
* like (default: None) may be provided to give a guide for the various header- and meta-data
that is included in the image. If this is a nifti image object, its meta-data are used; if
this is a subject, then the meta-data are deduced from the subject's voxel and native
orientation matrices. All other specific options below override anything deduced from the
like argument.
* header (default: None) may be a Nifti1 or Niti2 image header to be used as the nifti header
or to replace the header in a new image.
* affine (default: None) may specify the affine transform to be given to the image object.
* extensions (default: Ellipsis) may specify a nifti extensions object that should be included
in the header. The default value, Ellipsis, indicates that the extensions should not be
changed, and that None should be used if extensions are not implied in obj (if, for example,
obj is a data array rather than an image object with a header already.
* version (default: 2) may be specified as 1 or 2 for a Nifti1Image or Nifti2Image object,
respectively. | Below is the the instruction that describes the task:
### Input:
to_nifti(obj) yields a Nifti2Image object that is as equivalent as possible to the given object
obj. If obj is a Nifti2Image already, then it is returned unmolested; other deduction rules
are described below.
The following options are accepted:
* like (default: None) may be provided to give a guide for the various header- and meta-data
that is included in the image. If this is a nifti image object, its meta-data are used; if
this is a subject, then the meta-data are deduced from the subject's voxel and native
orientation matrices. All other specific options below override anything deduced from the
like argument.
* header (default: None) may be a Nifti1 or Niti2 image header to be used as the nifti header
or to replace the header in a new image.
* affine (default: None) may specify the affine transform to be given to the image object.
* extensions (default: Ellipsis) may specify a nifti extensions object that should be included
in the header. The default value, Ellipsis, indicates that the extensions should not be
changed, and that None should be used if extensions are not implied in obj (if, for example,
obj is a data array rather than an image object with a header already.
* version (default: 2) may be specified as 1 or 2 for a Nifti1Image or Nifti2Image object,
respectively.
### Response:
def to_nifti(obj, like=None, header=None, affine=None, extensions=Ellipsis, version=1):
'''
to_nifti(obj) yields a Nifti2Image object that is as equivalent as possible to the given object
obj. If obj is a Nifti2Image already, then it is returned unmolested; other deduction rules
are described below.
The following options are accepted:
* like (default: None) may be provided to give a guide for the various header- and meta-data
that is included in the image. If this is a nifti image object, its meta-data are used; if
this is a subject, then the meta-data are deduced from the subject's voxel and native
orientation matrices. All other specific options below override anything deduced from the
like argument.
* header (default: None) may be a Nifti1 or Niti2 image header to be used as the nifti header
or to replace the header in a new image.
* affine (default: None) may specify the affine transform to be given to the image object.
* extensions (default: Ellipsis) may specify a nifti extensions object that should be included
in the header. The default value, Ellipsis, indicates that the extensions should not be
changed, and that None should be used if extensions are not implied in obj (if, for example,
obj is a data array rather than an image object with a header already.
* version (default: 2) may be specified as 1 or 2 for a Nifti1Image or Nifti2Image object,
respectively.
'''
from neuropythy.mri import Subject
obj0 = obj
# First go from like to explicit versions of affine and header:
if like is not None:
if isinstance(like, nib.analyze.AnalyzeHeader) or \
isinstance(like, nib.freesurfer.mghformat.MGHHeader):
if header is None: header = like
elif isinstance(like, nib.analyze.SpatialImage):
if header is None: header = like.header
if affine is None: affine = like.affine
elif isinstance(like, Subject):
if affine is None: affine = like.voxel_to_native_matrix
else:
raise ValueError('Could not interpret like argument with type %s' % type(like))
# check to make sure that we have to change something:
elif ((version == 1 and isinstance(obj, nib.nifti1.Nifti1Image)) or
(version == 2 and isinstance(obj, nib.nifti2.Nifti2Image))):
if ((header is None or obj.header is header) and
(extensions is Ellipsis or extensions is obj.header.extensions or
(extensions is None and len(obj.header.extensions) == 0))):
return obj
# okay, now look at the header and affine etc.
if header is None:
if isinstance(obj, nib.analyze.SpatialImage):
header = obj.header
else:
header = nib.nifti1.Nifti1Header() if version == 1 else nib.nifti2.Nifti2Header()
if affine is None:
if isinstance(obj, nib.analyze.SpatialImage):
affine = obj.affine
else:
affine = np.eye(4)
if extensions is None:
extensions = nib.nifti1.Nifti1Extensions()
# Figure out what the data is
if isinstance(obj, nib.analyze.SpatialImage):
obj = obj.dataobj
elif not pimms.is_nparray(obj):
obj = np.asarray(obj)
if len(obj.shape) < 3: obj = np.asarray([[obj]])
# Okay, make a new object now...
if version == 1:
obj = nib.nifti1.Nifti1Image(obj, affine, header)
elif version == 2:
obj = nib.nifti2.Nifti2Image(obj, affine, header)
else:
raise ValueError('invalid version given (should be 1 or 2): %s' % version)
# add the extensions if they're needed
if extensions is not Ellipsis and (len(extensions) > 0 or len(obj.header.extensions) > 0):
obj.header.extensions = extensions
# Okay, that's it!
return obj |
def _get_bandwidth_price_id(items,
hourly=True,
no_public=False,
location=None):
"""Choose a valid price id for bandwidth."""
# Prefer pay-for-use data transfer with hourly
for item in items:
capacity = float(item.get('capacity', 0))
# Hourly and private only do pay-as-you-go bandwidth
if any([utils.lookup(item,
'itemCategory',
'categoryCode') != 'bandwidth',
(hourly or no_public) and capacity != 0.0,
not (hourly or no_public) and capacity == 0.0]):
continue
for price in item['prices']:
if not _matches_billing(price, hourly):
continue
if not _matches_location(price, location):
continue
return price['id']
raise SoftLayer.SoftLayerError(
"Could not find valid price for bandwidth option") | Choose a valid price id for bandwidth. | Below is the the instruction that describes the task:
### Input:
Choose a valid price id for bandwidth.
### Response:
def _get_bandwidth_price_id(items,
hourly=True,
no_public=False,
location=None):
"""Choose a valid price id for bandwidth."""
# Prefer pay-for-use data transfer with hourly
for item in items:
capacity = float(item.get('capacity', 0))
# Hourly and private only do pay-as-you-go bandwidth
if any([utils.lookup(item,
'itemCategory',
'categoryCode') != 'bandwidth',
(hourly or no_public) and capacity != 0.0,
not (hourly or no_public) and capacity == 0.0]):
continue
for price in item['prices']:
if not _matches_billing(price, hourly):
continue
if not _matches_location(price, location):
continue
return price['id']
raise SoftLayer.SoftLayerError(
"Could not find valid price for bandwidth option") |
def to_gremlin(self):
"""Return a unicode object with the Gremlin representation of this block."""
self.validate()
template = (
u'transform{{'
u'it, m -> new com.orientechnologies.orient.core.record.impl.ODocument([ {} ])'
u'}}')
field_representations = (
u'{name}: {expr}'.format(name=key, expr=self.fields[key].to_gremlin())
for key in sorted(self.fields.keys()) # Sort the keys for deterministic output order.
)
return template.format(u', '.join(field_representations)) | Return a unicode object with the Gremlin representation of this block. | Below is the the instruction that describes the task:
### Input:
Return a unicode object with the Gremlin representation of this block.
### Response:
def to_gremlin(self):
"""Return a unicode object with the Gremlin representation of this block."""
self.validate()
template = (
u'transform{{'
u'it, m -> new com.orientechnologies.orient.core.record.impl.ODocument([ {} ])'
u'}}')
field_representations = (
u'{name}: {expr}'.format(name=key, expr=self.fields[key].to_gremlin())
for key in sorted(self.fields.keys()) # Sort the keys for deterministic output order.
)
return template.format(u', '.join(field_representations)) |
def findall(self, string):
""" Parse string, returning all outputs as parsed by functions
"""
output = []
for match in self.pattern.findall(string):
if hasattr(match, 'strip'):
match = [match]
self._list_add(output, self.run(match))
return output | Parse string, returning all outputs as parsed by functions | Below is the the instruction that describes the task:
### Input:
Parse string, returning all outputs as parsed by functions
### Response:
def findall(self, string):
""" Parse string, returning all outputs as parsed by functions
"""
output = []
for match in self.pattern.findall(string):
if hasattr(match, 'strip'):
match = [match]
self._list_add(output, self.run(match))
return output |
def connect_to_syslog(address=None, facility=None, level=None):
"""
Create a :class:`~logging.handlers.SysLogHandler`.
:param address: The device file or network address of the system logging
daemon (a string or tuple, defaults to the result of
:func:`find_syslog_address()`).
:param facility: Refer to :class:`~logging.handlers.SysLogHandler`.
Defaults to ``LOG_USER``.
:param level: The logging level for the :class:`~logging.handlers.SysLogHandler`
(defaults to :data:`.DEFAULT_LOG_LEVEL`). This value is coerced
using :func:`~coloredlogs.level_to_number()`.
:returns: A :class:`~logging.handlers.SysLogHandler` object or :data:`None` (if the
system logging daemon is unavailable).
The process of connecting to the system logging daemon goes as follows:
- If :class:`~logging.handlers.SysLogHandler` supports the `socktype`
option (it does since Python 2.7) the following two socket types are
tried (in decreasing preference):
1. :data:`~socket.SOCK_RAW` avoids truncation of log messages but may
not be supported.
2. :data:`~socket.SOCK_STREAM` (TCP) supports longer messages than the
default (which is UDP).
- If socket types are not supported Python's (2.6) defaults are used to
connect to the selected `address`.
"""
if not address:
address = find_syslog_address()
if facility is None:
facility = logging.handlers.SysLogHandler.LOG_USER
if level is None:
level = DEFAULT_LOG_LEVEL
for socktype in socket.SOCK_RAW, socket.SOCK_STREAM, None:
kw = dict(facility=facility, address=address)
if socktype is not None:
kw['socktype'] = socktype
try:
handler = logging.handlers.SysLogHandler(**kw)
except (IOError, TypeError):
# The socktype argument was added in Python 2.7 and its use will raise a
# TypeError exception on Python 2.6. IOError is a superclass of socket.error
# (since Python 2.6) which can be raised if the system logging daemon is
# unavailable.
pass
else:
handler.setLevel(level_to_number(level))
return handler | Create a :class:`~logging.handlers.SysLogHandler`.
:param address: The device file or network address of the system logging
daemon (a string or tuple, defaults to the result of
:func:`find_syslog_address()`).
:param facility: Refer to :class:`~logging.handlers.SysLogHandler`.
Defaults to ``LOG_USER``.
:param level: The logging level for the :class:`~logging.handlers.SysLogHandler`
(defaults to :data:`.DEFAULT_LOG_LEVEL`). This value is coerced
using :func:`~coloredlogs.level_to_number()`.
:returns: A :class:`~logging.handlers.SysLogHandler` object or :data:`None` (if the
system logging daemon is unavailable).
The process of connecting to the system logging daemon goes as follows:
- If :class:`~logging.handlers.SysLogHandler` supports the `socktype`
option (it does since Python 2.7) the following two socket types are
tried (in decreasing preference):
1. :data:`~socket.SOCK_RAW` avoids truncation of log messages but may
not be supported.
2. :data:`~socket.SOCK_STREAM` (TCP) supports longer messages than the
default (which is UDP).
- If socket types are not supported Python's (2.6) defaults are used to
connect to the selected `address`. | Below is the the instruction that describes the task:
### Input:
Create a :class:`~logging.handlers.SysLogHandler`.
:param address: The device file or network address of the system logging
daemon (a string or tuple, defaults to the result of
:func:`find_syslog_address()`).
:param facility: Refer to :class:`~logging.handlers.SysLogHandler`.
Defaults to ``LOG_USER``.
:param level: The logging level for the :class:`~logging.handlers.SysLogHandler`
(defaults to :data:`.DEFAULT_LOG_LEVEL`). This value is coerced
using :func:`~coloredlogs.level_to_number()`.
:returns: A :class:`~logging.handlers.SysLogHandler` object or :data:`None` (if the
system logging daemon is unavailable).
The process of connecting to the system logging daemon goes as follows:
- If :class:`~logging.handlers.SysLogHandler` supports the `socktype`
option (it does since Python 2.7) the following two socket types are
tried (in decreasing preference):
1. :data:`~socket.SOCK_RAW` avoids truncation of log messages but may
not be supported.
2. :data:`~socket.SOCK_STREAM` (TCP) supports longer messages than the
default (which is UDP).
- If socket types are not supported Python's (2.6) defaults are used to
connect to the selected `address`.
### Response:
def connect_to_syslog(address=None, facility=None, level=None):
"""
Create a :class:`~logging.handlers.SysLogHandler`.
:param address: The device file or network address of the system logging
daemon (a string or tuple, defaults to the result of
:func:`find_syslog_address()`).
:param facility: Refer to :class:`~logging.handlers.SysLogHandler`.
Defaults to ``LOG_USER``.
:param level: The logging level for the :class:`~logging.handlers.SysLogHandler`
(defaults to :data:`.DEFAULT_LOG_LEVEL`). This value is coerced
using :func:`~coloredlogs.level_to_number()`.
:returns: A :class:`~logging.handlers.SysLogHandler` object or :data:`None` (if the
system logging daemon is unavailable).
The process of connecting to the system logging daemon goes as follows:
- If :class:`~logging.handlers.SysLogHandler` supports the `socktype`
option (it does since Python 2.7) the following two socket types are
tried (in decreasing preference):
1. :data:`~socket.SOCK_RAW` avoids truncation of log messages but may
not be supported.
2. :data:`~socket.SOCK_STREAM` (TCP) supports longer messages than the
default (which is UDP).
- If socket types are not supported Python's (2.6) defaults are used to
connect to the selected `address`.
"""
if not address:
address = find_syslog_address()
if facility is None:
facility = logging.handlers.SysLogHandler.LOG_USER
if level is None:
level = DEFAULT_LOG_LEVEL
for socktype in socket.SOCK_RAW, socket.SOCK_STREAM, None:
kw = dict(facility=facility, address=address)
if socktype is not None:
kw['socktype'] = socktype
try:
handler = logging.handlers.SysLogHandler(**kw)
except (IOError, TypeError):
# The socktype argument was added in Python 2.7 and its use will raise a
# TypeError exception on Python 2.6. IOError is a superclass of socket.error
# (since Python 2.6) which can be raised if the system logging daemon is
# unavailable.
pass
else:
handler.setLevel(level_to_number(level))
return handler |
def _build_block_element_list(self):
"""Return a list of block elements, ordered from highest priority to lowest.
"""
return sorted(
[e for e in self.block_elements.values() if not e.virtual],
key=lambda e: e.priority,
reverse=True
) | Return a list of block elements, ordered from highest priority to lowest. | Below is the the instruction that describes the task:
### Input:
Return a list of block elements, ordered from highest priority to lowest.
### Response:
def _build_block_element_list(self):
"""Return a list of block elements, ordered from highest priority to lowest.
"""
return sorted(
[e for e in self.block_elements.values() if not e.virtual],
key=lambda e: e.priority,
reverse=True
) |
def example_exc_handler(tries_remaining, exception, delay):
"""Example exception handler; prints a warning to stderr.
tries_remaining: The number of tries remaining.
exception: The exception instance which was raised.
"""
print >> sys.stderr, "Caught '%s', %d tries remaining, sleeping for %s seconds" % (
exception, tries_remaining, delay) | Example exception handler; prints a warning to stderr.
tries_remaining: The number of tries remaining.
exception: The exception instance which was raised. | Below is the the instruction that describes the task:
### Input:
Example exception handler; prints a warning to stderr.
tries_remaining: The number of tries remaining.
exception: The exception instance which was raised.
### Response:
def example_exc_handler(tries_remaining, exception, delay):
"""Example exception handler; prints a warning to stderr.
tries_remaining: The number of tries remaining.
exception: The exception instance which was raised.
"""
print >> sys.stderr, "Caught '%s', %d tries remaining, sleeping for %s seconds" % (
exception, tries_remaining, delay) |
def _validate_isvalid_uncertainty(self, isvalid_uncertainty, field, value):
"""Checks for valid given value and appropriate units with uncertainty.
Args:
isvalid_uncertainty (`bool`): flag from schema indicating uncertainty to be checked
field (`str`): property associated with the quantity in question.
value (`list`): list with the string of the value of the quantity and a dictionary of
the uncertainty
The rule's arguments are validated against this schema:
{'isvalid_uncertainty': {'type': 'bool'}, 'field': {'type': 'str'},
'value': {'type': 'list'}}
"""
self._validate_isvalid_quantity(True, field, value)
# This len check is necessary for reasons that aren't quite clear to me
# Cerberus calls this validation method even when lists have only one element
# and should therefore be validated only by isvalid_quantity
if len(value) > 1 and value[1]['uncertainty-type'] != 'relative':
if value[1].get('uncertainty') is not None:
self._validate_isvalid_quantity(True, field, [value[1]['uncertainty']])
if value[1].get('upper-uncertainty') is not None:
self._validate_isvalid_quantity(True, field, [value[1]['upper-uncertainty']])
if value[1].get('lower-uncertainty') is not None:
self._validate_isvalid_quantity(True, field, [value[1]['lower-uncertainty']]) | Checks for valid given value and appropriate units with uncertainty.
Args:
isvalid_uncertainty (`bool`): flag from schema indicating uncertainty to be checked
field (`str`): property associated with the quantity in question.
value (`list`): list with the string of the value of the quantity and a dictionary of
the uncertainty
The rule's arguments are validated against this schema:
{'isvalid_uncertainty': {'type': 'bool'}, 'field': {'type': 'str'},
'value': {'type': 'list'}} | Below is the the instruction that describes the task:
### Input:
Checks for valid given value and appropriate units with uncertainty.
Args:
isvalid_uncertainty (`bool`): flag from schema indicating uncertainty to be checked
field (`str`): property associated with the quantity in question.
value (`list`): list with the string of the value of the quantity and a dictionary of
the uncertainty
The rule's arguments are validated against this schema:
{'isvalid_uncertainty': {'type': 'bool'}, 'field': {'type': 'str'},
'value': {'type': 'list'}}
### Response:
def _validate_isvalid_uncertainty(self, isvalid_uncertainty, field, value):
"""Checks for valid given value and appropriate units with uncertainty.
Args:
isvalid_uncertainty (`bool`): flag from schema indicating uncertainty to be checked
field (`str`): property associated with the quantity in question.
value (`list`): list with the string of the value of the quantity and a dictionary of
the uncertainty
The rule's arguments are validated against this schema:
{'isvalid_uncertainty': {'type': 'bool'}, 'field': {'type': 'str'},
'value': {'type': 'list'}}
"""
self._validate_isvalid_quantity(True, field, value)
# This len check is necessary for reasons that aren't quite clear to me
# Cerberus calls this validation method even when lists have only one element
# and should therefore be validated only by isvalid_quantity
if len(value) > 1 and value[1]['uncertainty-type'] != 'relative':
if value[1].get('uncertainty') is not None:
self._validate_isvalid_quantity(True, field, [value[1]['uncertainty']])
if value[1].get('upper-uncertainty') is not None:
self._validate_isvalid_quantity(True, field, [value[1]['upper-uncertainty']])
if value[1].get('lower-uncertainty') is not None:
self._validate_isvalid_quantity(True, field, [value[1]['lower-uncertainty']]) |
def _check_self_cls_assign(self, node):
"""Check that self/cls don't get assigned"""
assign_names = {
target.name
for target in node.targets
if isinstance(target, astroid.AssignName)
}
scope = node.scope()
nonlocals_with_same_name = any(
child
for child in scope.body
if isinstance(child, astroid.Nonlocal) and assign_names & set(child.names)
)
if nonlocals_with_same_name:
scope = node.scope().parent.scope()
if not (
isinstance(scope, astroid.scoped_nodes.FunctionDef)
and scope.is_method()
and "builtins.staticmethod" not in scope.decoratornames()
):
return
argument_names = scope.argnames()
if not argument_names:
return
self_cls_name = argument_names[0]
target_assign_names = (
target.name
for target in node.targets
if isinstance(target, astroid.node_classes.AssignName)
)
if self_cls_name in target_assign_names:
self.add_message("self-cls-assignment", node=node, args=(self_cls_name)) | Check that self/cls don't get assigned | Below is the the instruction that describes the task:
### Input:
Check that self/cls don't get assigned
### Response:
def _check_self_cls_assign(self, node):
"""Check that self/cls don't get assigned"""
assign_names = {
target.name
for target in node.targets
if isinstance(target, astroid.AssignName)
}
scope = node.scope()
nonlocals_with_same_name = any(
child
for child in scope.body
if isinstance(child, astroid.Nonlocal) and assign_names & set(child.names)
)
if nonlocals_with_same_name:
scope = node.scope().parent.scope()
if not (
isinstance(scope, astroid.scoped_nodes.FunctionDef)
and scope.is_method()
and "builtins.staticmethod" not in scope.decoratornames()
):
return
argument_names = scope.argnames()
if not argument_names:
return
self_cls_name = argument_names[0]
target_assign_names = (
target.name
for target in node.targets
if isinstance(target, astroid.node_classes.AssignName)
)
if self_cls_name in target_assign_names:
self.add_message("self-cls-assignment", node=node, args=(self_cls_name)) |
def run(self):
'''
Runs all enabled tests.
'''
# Run all tests.
for cls in self.get_test_classes():
# Print informational message.
self.logger.info('Running {cls.__name__} test...'.format(cls=cls))
# Create new test instance.
test = cls(runner=self)
# Run test and evaluate result.
if test._run():
self.logger.passed('Test {cls.__name__} succeeded!'.format(cls=cls))
else:
self.logger.failed('Test {cls.__name__} failed!'.format(cls=cls))
self.has_passed = False
# Print summary.
if self.has_passed:
self.logger.passed('Summary: All tests passed!')
else:
self.logger.failed('Summary: One or more tests failed!')
return self.has_passed | Runs all enabled tests. | Below is the the instruction that describes the task:
### Input:
Runs all enabled tests.
### Response:
def run(self):
'''
Runs all enabled tests.
'''
# Run all tests.
for cls in self.get_test_classes():
# Print informational message.
self.logger.info('Running {cls.__name__} test...'.format(cls=cls))
# Create new test instance.
test = cls(runner=self)
# Run test and evaluate result.
if test._run():
self.logger.passed('Test {cls.__name__} succeeded!'.format(cls=cls))
else:
self.logger.failed('Test {cls.__name__} failed!'.format(cls=cls))
self.has_passed = False
# Print summary.
if self.has_passed:
self.logger.passed('Summary: All tests passed!')
else:
self.logger.failed('Summary: One or more tests failed!')
return self.has_passed |
def update(self, ava):
"""
Implements the dict.update() method
"""
for key, val in ava.items():
self[key] = val | Implements the dict.update() method | Below is the the instruction that describes the task:
### Input:
Implements the dict.update() method
### Response:
def update(self, ava):
"""
Implements the dict.update() method
"""
for key, val in ava.items():
self[key] = val |
def findTextBackward(self, block, column, needle):
"""Search for a needle and return (block, column)
Raise ValueError, if not found
"""
if column is not None:
index = block.text()[:column].rfind(needle)
else:
index = block.text().rfind(needle)
if index != -1:
return block, index
for block in self.iterateBlocksBackFrom(block.previous()):
column = block.text().rfind(needle)
if column != -1:
return block, column
raise ValueError('Not found') | Search for a needle and return (block, column)
Raise ValueError, if not found | Below is the the instruction that describes the task:
### Input:
Search for a needle and return (block, column)
Raise ValueError, if not found
### Response:
def findTextBackward(self, block, column, needle):
"""Search for a needle and return (block, column)
Raise ValueError, if not found
"""
if column is not None:
index = block.text()[:column].rfind(needle)
else:
index = block.text().rfind(needle)
if index != -1:
return block, index
for block in self.iterateBlocksBackFrom(block.previous()):
column = block.text().rfind(needle)
if column != -1:
return block, column
raise ValueError('Not found') |
def setVisible(self, value):
"""Override Qt method to stops timers if widget is not visible."""
if self.timer is not None:
if value:
self.timer.start(self._interval)
else:
self.timer.stop()
super(BaseTimerStatus, self).setVisible(value) | Override Qt method to stops timers if widget is not visible. | Below is the the instruction that describes the task:
### Input:
Override Qt method to stops timers if widget is not visible.
### Response:
def setVisible(self, value):
"""Override Qt method to stops timers if widget is not visible."""
if self.timer is not None:
if value:
self.timer.start(self._interval)
else:
self.timer.stop()
super(BaseTimerStatus, self).setVisible(value) |
def shorrocks_index(A):
r"""
Implements Shorrocks mobility index
Parameters
-----------
A : array_like(float)
Square matrix with transition probabilities (mobility matrix) of
dimension m
Returns
--------
Shorrocks index: float
The Shorrocks mobility index calculated as
.. math::
s(A) = \frac{m - \sum_j a_{jj} }{m - 1} \in (0, 1)
An index equal to 0 indicates complete immobility.
References
-----------
.. [1] Wealth distribution and social mobility in the US: A quantitative approach
(Benhabib, Bisin, Luo, 2017).
https://www.econ.nyu.edu/user/bisina/RevisionAugust.pdf
"""
A = np.asarray(A) # Convert to array if not already
m, n = A.shape
if m != n:
raise ValueError('A must be a square matrix')
diag_sum = np.diag(A).sum()
return (m - diag_sum) / (m - 1) | r"""
Implements Shorrocks mobility index
Parameters
-----------
A : array_like(float)
Square matrix with transition probabilities (mobility matrix) of
dimension m
Returns
--------
Shorrocks index: float
The Shorrocks mobility index calculated as
.. math::
s(A) = \frac{m - \sum_j a_{jj} }{m - 1} \in (0, 1)
An index equal to 0 indicates complete immobility.
References
-----------
.. [1] Wealth distribution and social mobility in the US: A quantitative approach
(Benhabib, Bisin, Luo, 2017).
https://www.econ.nyu.edu/user/bisina/RevisionAugust.pdf | Below is the the instruction that describes the task:
### Input:
r"""
Implements Shorrocks mobility index
Parameters
-----------
A : array_like(float)
Square matrix with transition probabilities (mobility matrix) of
dimension m
Returns
--------
Shorrocks index: float
The Shorrocks mobility index calculated as
.. math::
s(A) = \frac{m - \sum_j a_{jj} }{m - 1} \in (0, 1)
An index equal to 0 indicates complete immobility.
References
-----------
.. [1] Wealth distribution and social mobility in the US: A quantitative approach
(Benhabib, Bisin, Luo, 2017).
https://www.econ.nyu.edu/user/bisina/RevisionAugust.pdf
### Response:
def shorrocks_index(A):
r"""
Implements Shorrocks mobility index
Parameters
-----------
A : array_like(float)
Square matrix with transition probabilities (mobility matrix) of
dimension m
Returns
--------
Shorrocks index: float
The Shorrocks mobility index calculated as
.. math::
s(A) = \frac{m - \sum_j a_{jj} }{m - 1} \in (0, 1)
An index equal to 0 indicates complete immobility.
References
-----------
.. [1] Wealth distribution and social mobility in the US: A quantitative approach
(Benhabib, Bisin, Luo, 2017).
https://www.econ.nyu.edu/user/bisina/RevisionAugust.pdf
"""
A = np.asarray(A) # Convert to array if not already
m, n = A.shape
if m != n:
raise ValueError('A must be a square matrix')
diag_sum = np.diag(A).sum()
return (m - diag_sum) / (m - 1) |
def get_mac_address_table_input_request_type_get_next_request_mac_address_type(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
get_mac_address_table = ET.Element("get_mac_address_table")
config = get_mac_address_table
input = ET.SubElement(get_mac_address_table, "input")
request_type = ET.SubElement(input, "request-type")
get_next_request = ET.SubElement(request_type, "get-next-request")
mac_address_type = ET.SubElement(get_next_request, "mac-address-type")
mac_address_type.text = kwargs.pop('mac_address_type')
callback = kwargs.pop('callback', self._callback)
return callback(config) | Auto Generated Code | Below is the the instruction that describes the task:
### Input:
Auto Generated Code
### Response:
def get_mac_address_table_input_request_type_get_next_request_mac_address_type(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
get_mac_address_table = ET.Element("get_mac_address_table")
config = get_mac_address_table
input = ET.SubElement(get_mac_address_table, "input")
request_type = ET.SubElement(input, "request-type")
get_next_request = ET.SubElement(request_type, "get-next-request")
mac_address_type = ET.SubElement(get_next_request, "mac-address-type")
mac_address_type.text = kwargs.pop('mac_address_type')
callback = kwargs.pop('callback', self._callback)
return callback(config) |
def callback_parent(attr, old, new):
'''Update data directories drop down with new parent directory'''
import os
# Remove accidental white space if copy/pasted
new = new.strip()
parent_input.value = new
# Verify new parent path exists and update `datadirs_select` widget
if os.path.exists(new):
# Create sorted list of data directories, ignore files
joinisdir = lambda parent, d: os.path.isdir(os.path.join(parent, d))
options = sorted([d for d in os.listdir(new) if joinisdir(new, d)])
# Update dropdown list of available data directories and select first
datadirs_select.options = options
datadirs_select.value = options[0]
callback_datadirs('value', options[0], options[0])
else:
msg = '''
The parent path `{}` does not exist.
Check that you have entered the absolute path.
'''.format(new)
output_window.text = output_template.format(msg)
return None | Update data directories drop down with new parent directory | Below is the the instruction that describes the task:
### Input:
Update data directories drop down with new parent directory
### Response:
def callback_parent(attr, old, new):
'''Update data directories drop down with new parent directory'''
import os
# Remove accidental white space if copy/pasted
new = new.strip()
parent_input.value = new
# Verify new parent path exists and update `datadirs_select` widget
if os.path.exists(new):
# Create sorted list of data directories, ignore files
joinisdir = lambda parent, d: os.path.isdir(os.path.join(parent, d))
options = sorted([d for d in os.listdir(new) if joinisdir(new, d)])
# Update dropdown list of available data directories and select first
datadirs_select.options = options
datadirs_select.value = options[0]
callback_datadirs('value', options[0], options[0])
else:
msg = '''
The parent path `{}` does not exist.
Check that you have entered the absolute path.
'''.format(new)
output_window.text = output_template.format(msg)
return None |
def sv_variant(store, institute_id, case_name, variant_id=None, variant_obj=None, add_case=True,
get_overlapping=True):
"""Pre-process an SV variant entry for detail page.
Adds information to display variant
Args:
store(scout.adapter.MongoAdapter)
institute_id(str)
case_name(str)
variant_id(str)
variant_obj(dcit)
add_case(bool): If information about case files should be added
Returns:
detailed_information(dict): {
'institute': <institute_obj>,
'case': <case_obj>,
'variant': <variant_obj>,
'overlapping_snvs': <overlapping_snvs>,
'manual_rank_options': MANUAL_RANK_OPTIONS,
'dismiss_variant_options': DISMISS_VARIANT_OPTIONS
}
"""
institute_obj, case_obj = institute_and_case(store, institute_id, case_name)
if not variant_obj:
variant_obj = store.variant(variant_id)
if add_case:
# fill in information for pilup view
variant_case(store, case_obj, variant_obj)
# frequencies
variant_obj['frequencies'] = [
('1000G', variant_obj.get('thousand_genomes_frequency')),
('1000G (left)', variant_obj.get('thousand_genomes_frequency_left')),
('1000G (right)', variant_obj.get('thousand_genomes_frequency_right')),
('ClinGen CGH (benign)', variant_obj.get('clingen_cgh_benign')),
('ClinGen CGH (pathogenic)', variant_obj.get('clingen_cgh_pathogenic')),
('ClinGen NGI', variant_obj.get('clingen_ngi')),
('SweGen', variant_obj.get('swegen')),
('Decipher', variant_obj.get('decipher')),
]
variant_obj['callers'] = callers(variant_obj, category='sv')
overlapping_snvs = []
if get_overlapping:
overlapping_snvs = (parse_variant(store, institute_obj, case_obj, variant) for variant in
store.overlapping(variant_obj))
# parse_gene function is not called for SVs, but a link to ensembl gene is required
for gene_obj in variant_obj['genes']:
if gene_obj.get('common'):
ensembl_id = gene_obj['common']['ensembl_id']
try:
build = int(gene_obj['common'].get('build','37'))
except Exception:
build = 37
gene_obj['ensembl_link'] = ensembl(ensembl_id, build=build)
variant_obj['comments'] = store.events(institute_obj, case=case_obj,
variant_id=variant_obj['variant_id'], comments=True)
case_clinvars = store.case_to_clinVars(case_obj.get('display_name'))
if variant_id in case_clinvars:
variant_obj['clinvar_clinsig'] = case_clinvars.get(variant_id)['clinsig']
if not 'end_chrom' in variant_obj:
variant_obj['end_chrom'] = variant_obj['chromosome']
return {
'institute': institute_obj,
'case': case_obj,
'variant': variant_obj,
'overlapping_snvs': overlapping_snvs,
'manual_rank_options': MANUAL_RANK_OPTIONS,
'dismiss_variant_options': DISMISS_VARIANT_OPTIONS
} | Pre-process an SV variant entry for detail page.
Adds information to display variant
Args:
store(scout.adapter.MongoAdapter)
institute_id(str)
case_name(str)
variant_id(str)
variant_obj(dcit)
add_case(bool): If information about case files should be added
Returns:
detailed_information(dict): {
'institute': <institute_obj>,
'case': <case_obj>,
'variant': <variant_obj>,
'overlapping_snvs': <overlapping_snvs>,
'manual_rank_options': MANUAL_RANK_OPTIONS,
'dismiss_variant_options': DISMISS_VARIANT_OPTIONS
} | Below is the the instruction that describes the task:
### Input:
Pre-process an SV variant entry for detail page.
Adds information to display variant
Args:
store(scout.adapter.MongoAdapter)
institute_id(str)
case_name(str)
variant_id(str)
variant_obj(dcit)
add_case(bool): If information about case files should be added
Returns:
detailed_information(dict): {
'institute': <institute_obj>,
'case': <case_obj>,
'variant': <variant_obj>,
'overlapping_snvs': <overlapping_snvs>,
'manual_rank_options': MANUAL_RANK_OPTIONS,
'dismiss_variant_options': DISMISS_VARIANT_OPTIONS
}
### Response:
def sv_variant(store, institute_id, case_name, variant_id=None, variant_obj=None, add_case=True,
get_overlapping=True):
"""Pre-process an SV variant entry for detail page.
Adds information to display variant
Args:
store(scout.adapter.MongoAdapter)
institute_id(str)
case_name(str)
variant_id(str)
variant_obj(dcit)
add_case(bool): If information about case files should be added
Returns:
detailed_information(dict): {
'institute': <institute_obj>,
'case': <case_obj>,
'variant': <variant_obj>,
'overlapping_snvs': <overlapping_snvs>,
'manual_rank_options': MANUAL_RANK_OPTIONS,
'dismiss_variant_options': DISMISS_VARIANT_OPTIONS
}
"""
institute_obj, case_obj = institute_and_case(store, institute_id, case_name)
if not variant_obj:
variant_obj = store.variant(variant_id)
if add_case:
# fill in information for pilup view
variant_case(store, case_obj, variant_obj)
# frequencies
variant_obj['frequencies'] = [
('1000G', variant_obj.get('thousand_genomes_frequency')),
('1000G (left)', variant_obj.get('thousand_genomes_frequency_left')),
('1000G (right)', variant_obj.get('thousand_genomes_frequency_right')),
('ClinGen CGH (benign)', variant_obj.get('clingen_cgh_benign')),
('ClinGen CGH (pathogenic)', variant_obj.get('clingen_cgh_pathogenic')),
('ClinGen NGI', variant_obj.get('clingen_ngi')),
('SweGen', variant_obj.get('swegen')),
('Decipher', variant_obj.get('decipher')),
]
variant_obj['callers'] = callers(variant_obj, category='sv')
overlapping_snvs = []
if get_overlapping:
overlapping_snvs = (parse_variant(store, institute_obj, case_obj, variant) for variant in
store.overlapping(variant_obj))
# parse_gene function is not called for SVs, but a link to ensembl gene is required
for gene_obj in variant_obj['genes']:
if gene_obj.get('common'):
ensembl_id = gene_obj['common']['ensembl_id']
try:
build = int(gene_obj['common'].get('build','37'))
except Exception:
build = 37
gene_obj['ensembl_link'] = ensembl(ensembl_id, build=build)
variant_obj['comments'] = store.events(institute_obj, case=case_obj,
variant_id=variant_obj['variant_id'], comments=True)
case_clinvars = store.case_to_clinVars(case_obj.get('display_name'))
if variant_id in case_clinvars:
variant_obj['clinvar_clinsig'] = case_clinvars.get(variant_id)['clinsig']
if not 'end_chrom' in variant_obj:
variant_obj['end_chrom'] = variant_obj['chromosome']
return {
'institute': institute_obj,
'case': case_obj,
'variant': variant_obj,
'overlapping_snvs': overlapping_snvs,
'manual_rank_options': MANUAL_RANK_OPTIONS,
'dismiss_variant_options': DISMISS_VARIANT_OPTIONS
} |
def charge(self, cart, request):
"""
Use the Stripe token from the request and charge immediately.
This view is invoked by the Javascript function `scope.charge()` delivered
by `get_payment_request`.
"""
token_id = cart.extra['payment_extra_data']['token_id']
if LooseVersion(SHOP_VERSION) < LooseVersion('0.11'):
charge = stripe.Charge.create(
amount=cart.total.as_integer(),
currency=cart.total.currency,
source=token_id,
description=settings.SHOP_STRIPE['PURCHASE_DESCRIPTION']
)
if charge['status'] == 'succeeded':
order = OrderModel.objects.create_from_cart(cart, request)
order.add_stripe_payment(charge)
order.save()
else:
order = OrderModel.objects.create_from_cart(cart, request)
charge = stripe.Charge.create(
amount=cart.total.as_integer(),
currency=cart.total.currency,
source=token_id,
transfer_group=order.get_number(),
description=settings.SHOP_STRIPE['PURCHASE_DESCRIPTION'],
)
if charge['status'] == 'succeeded':
order.populate_from_cart(cart, request)
order.add_stripe_payment(charge)
order.save()
if charge['status'] != 'succeeded':
msg = "Stripe returned status '{status}' for id: {id}"
raise stripe.error.InvalidRequestError(msg.format(**charge)) | Use the Stripe token from the request and charge immediately.
This view is invoked by the Javascript function `scope.charge()` delivered
by `get_payment_request`. | Below is the the instruction that describes the task:
### Input:
Use the Stripe token from the request and charge immediately.
This view is invoked by the Javascript function `scope.charge()` delivered
by `get_payment_request`.
### Response:
def charge(self, cart, request):
"""
Use the Stripe token from the request and charge immediately.
This view is invoked by the Javascript function `scope.charge()` delivered
by `get_payment_request`.
"""
token_id = cart.extra['payment_extra_data']['token_id']
if LooseVersion(SHOP_VERSION) < LooseVersion('0.11'):
charge = stripe.Charge.create(
amount=cart.total.as_integer(),
currency=cart.total.currency,
source=token_id,
description=settings.SHOP_STRIPE['PURCHASE_DESCRIPTION']
)
if charge['status'] == 'succeeded':
order = OrderModel.objects.create_from_cart(cart, request)
order.add_stripe_payment(charge)
order.save()
else:
order = OrderModel.objects.create_from_cart(cart, request)
charge = stripe.Charge.create(
amount=cart.total.as_integer(),
currency=cart.total.currency,
source=token_id,
transfer_group=order.get_number(),
description=settings.SHOP_STRIPE['PURCHASE_DESCRIPTION'],
)
if charge['status'] == 'succeeded':
order.populate_from_cart(cart, request)
order.add_stripe_payment(charge)
order.save()
if charge['status'] != 'succeeded':
msg = "Stripe returned status '{status}' for id: {id}"
raise stripe.error.InvalidRequestError(msg.format(**charge)) |
def _ShouldPrintError(category, confidence, linenum):
"""If confidence >= verbose, category passes filter and is not suppressed."""
# There are three ways we might decide not to print an error message:
# a "NOLINT(category)" comment appears in the source,
# the verbosity level isn't high enough, or the filters filter it out.
if IsErrorSuppressedByNolint(category, linenum):
return False
if confidence < _cpplint_state.verbose_level:
return False
is_filtered = False
for one_filter in _Filters():
if one_filter.startswith('-'):
if category.startswith(one_filter[1:]):
is_filtered = True
elif one_filter.startswith('+'):
if category.startswith(one_filter[1:]):
is_filtered = False
else:
assert False # should have been checked for in SetFilter.
if is_filtered:
return False
return True | If confidence >= verbose, category passes filter and is not suppressed. | Below is the the instruction that describes the task:
### Input:
If confidence >= verbose, category passes filter and is not suppressed.
### Response:
def _ShouldPrintError(category, confidence, linenum):
"""If confidence >= verbose, category passes filter and is not suppressed."""
# There are three ways we might decide not to print an error message:
# a "NOLINT(category)" comment appears in the source,
# the verbosity level isn't high enough, or the filters filter it out.
if IsErrorSuppressedByNolint(category, linenum):
return False
if confidence < _cpplint_state.verbose_level:
return False
is_filtered = False
for one_filter in _Filters():
if one_filter.startswith('-'):
if category.startswith(one_filter[1:]):
is_filtered = True
elif one_filter.startswith('+'):
if category.startswith(one_filter[1:]):
is_filtered = False
else:
assert False # should have been checked for in SetFilter.
if is_filtered:
return False
return True |
def get_context(line, pos):
"""
computes start and end position of substring of line that is the
command string under given position
"""
commands = split_commandline(line) + ['']
i = 0
start = 0
end = len(commands[i])
while pos > end:
i += 1
start = end + 1
end += 1 + len(commands[i])
return start, end | computes start and end position of substring of line that is the
command string under given position | Below is the the instruction that describes the task:
### Input:
computes start and end position of substring of line that is the
command string under given position
### Response:
def get_context(line, pos):
"""
computes start and end position of substring of line that is the
command string under given position
"""
commands = split_commandline(line) + ['']
i = 0
start = 0
end = len(commands[i])
while pos > end:
i += 1
start = end + 1
end += 1 + len(commands[i])
return start, end |
def check_geographic_region(self, ds):
"""
6.1.1 When data is representative of geographic regions which can be identified by names but which have complex
boundaries that cannot practically be specified using longitude and latitude boundary coordinates, a labeled
axis should be used to identify the regions.
Recommend that the names be chosen from the list of standardized region names whenever possible. To indicate
that the label values are standardized the variable that contains the labels must be given the standard_name
attribute with the value region.
:param netCDF4.Dataset ds: An open netCDF dataset
:rtype: list
:return: List of results
"""
ret_val = []
region_list = [ # TODO maybe move this (and other info like it) into a config file?
'africa',
'antarctica',
'arabian_sea',
'aral_sea',
'arctic_ocean',
'asia',
'atlantic_ocean',
'australia',
'baltic_sea',
'barents_opening',
'barents_sea',
'beaufort_sea',
'bellingshausen_sea',
'bering_sea',
'bering_strait',
'black_sea',
'canadian_archipelago',
'caribbean_sea',
'caspian_sea',
'central_america',
'chukchi_sea',
'contiguous_united_states',
'denmark_strait',
'drake_passage',
'east_china_sea',
'english_channel',
'eurasia',
'europe',
'faroe_scotland_channel',
'florida_bahamas_strait',
'fram_strait',
'global',
'global_land',
'global_ocean',
'great_lakes',
'greenland',
'gulf_of_alaska',
'gulf_of_mexico',
'hudson_bay',
'iceland_faroe_channel',
'indian_ocean',
'indonesian_throughflow',
'indo_pacific_ocean',
'irish_sea',
'lake_baykal',
'lake_chad',
'lake_malawi',
'lake_tanganyika',
'lake_victoria',
'mediterranean_sea',
'mozambique_channel',
'north_america',
'north_sea',
'norwegian_sea',
'pacific_equatorial_undercurrent',
'pacific_ocean',
'persian_gulf',
'red_sea',
'ross_sea',
'sea_of_japan',
'sea_of_okhotsk',
'south_america',
'south_china_sea',
'southern_ocean',
'taiwan_luzon_straits',
'weddell_sea',
'windward_passage',
'yellow_sea'
]
for var in ds.get_variables_by_attributes(standard_name='region'):
valid_region = TestCtx(BaseCheck.MEDIUM, self.section_titles["6.1"])
region = var[:]
if np.ma.isMA(region):
region = region.data
valid_region.assert_true(''.join(region.astype(str)).lower() in region_list,
"6.1.1 '{}' specified by '{}' is not a valid region".format(
''.join(region.astype(str)), var.name
)
)
ret_val.append(valid_region.to_result())
return ret_val | 6.1.1 When data is representative of geographic regions which can be identified by names but which have complex
boundaries that cannot practically be specified using longitude and latitude boundary coordinates, a labeled
axis should be used to identify the regions.
Recommend that the names be chosen from the list of standardized region names whenever possible. To indicate
that the label values are standardized the variable that contains the labels must be given the standard_name
attribute with the value region.
:param netCDF4.Dataset ds: An open netCDF dataset
:rtype: list
:return: List of results | Below is the the instruction that describes the task:
### Input:
6.1.1 When data is representative of geographic regions which can be identified by names but which have complex
boundaries that cannot practically be specified using longitude and latitude boundary coordinates, a labeled
axis should be used to identify the regions.
Recommend that the names be chosen from the list of standardized region names whenever possible. To indicate
that the label values are standardized the variable that contains the labels must be given the standard_name
attribute with the value region.
:param netCDF4.Dataset ds: An open netCDF dataset
:rtype: list
:return: List of results
### Response:
def check_geographic_region(self, ds):
"""
6.1.1 When data is representative of geographic regions which can be identified by names but which have complex
boundaries that cannot practically be specified using longitude and latitude boundary coordinates, a labeled
axis should be used to identify the regions.
Recommend that the names be chosen from the list of standardized region names whenever possible. To indicate
that the label values are standardized the variable that contains the labels must be given the standard_name
attribute with the value region.
:param netCDF4.Dataset ds: An open netCDF dataset
:rtype: list
:return: List of results
"""
ret_val = []
region_list = [ # TODO maybe move this (and other info like it) into a config file?
'africa',
'antarctica',
'arabian_sea',
'aral_sea',
'arctic_ocean',
'asia',
'atlantic_ocean',
'australia',
'baltic_sea',
'barents_opening',
'barents_sea',
'beaufort_sea',
'bellingshausen_sea',
'bering_sea',
'bering_strait',
'black_sea',
'canadian_archipelago',
'caribbean_sea',
'caspian_sea',
'central_america',
'chukchi_sea',
'contiguous_united_states',
'denmark_strait',
'drake_passage',
'east_china_sea',
'english_channel',
'eurasia',
'europe',
'faroe_scotland_channel',
'florida_bahamas_strait',
'fram_strait',
'global',
'global_land',
'global_ocean',
'great_lakes',
'greenland',
'gulf_of_alaska',
'gulf_of_mexico',
'hudson_bay',
'iceland_faroe_channel',
'indian_ocean',
'indonesian_throughflow',
'indo_pacific_ocean',
'irish_sea',
'lake_baykal',
'lake_chad',
'lake_malawi',
'lake_tanganyika',
'lake_victoria',
'mediterranean_sea',
'mozambique_channel',
'north_america',
'north_sea',
'norwegian_sea',
'pacific_equatorial_undercurrent',
'pacific_ocean',
'persian_gulf',
'red_sea',
'ross_sea',
'sea_of_japan',
'sea_of_okhotsk',
'south_america',
'south_china_sea',
'southern_ocean',
'taiwan_luzon_straits',
'weddell_sea',
'windward_passage',
'yellow_sea'
]
for var in ds.get_variables_by_attributes(standard_name='region'):
valid_region = TestCtx(BaseCheck.MEDIUM, self.section_titles["6.1"])
region = var[:]
if np.ma.isMA(region):
region = region.data
valid_region.assert_true(''.join(region.astype(str)).lower() in region_list,
"6.1.1 '{}' specified by '{}' is not a valid region".format(
''.join(region.astype(str)), var.name
)
)
ret_val.append(valid_region.to_result())
return ret_val |
def save_sequence_rule(self, sequence_rule_form, *args, **kwargs):
"""Pass through to provider SequenceRuleAdminSession.update_sequence_rule"""
# Implemented from kitosid template for -
# osid.resource.ResourceAdminSession.update_resource
if sequence_rule_form.is_for_update():
return self.update_sequence_rule(sequence_rule_form, *args, **kwargs)
else:
return self.create_sequence_rule(sequence_rule_form, *args, **kwargs) | Pass through to provider SequenceRuleAdminSession.update_sequence_rule | Below is the the instruction that describes the task:
### Input:
Pass through to provider SequenceRuleAdminSession.update_sequence_rule
### Response:
def save_sequence_rule(self, sequence_rule_form, *args, **kwargs):
"""Pass through to provider SequenceRuleAdminSession.update_sequence_rule"""
# Implemented from kitosid template for -
# osid.resource.ResourceAdminSession.update_resource
if sequence_rule_form.is_for_update():
return self.update_sequence_rule(sequence_rule_form, *args, **kwargs)
else:
return self.create_sequence_rule(sequence_rule_form, *args, **kwargs) |
def element_wise(self, other, op):
"""
Apply an elementwise operation to data.
Both self and other data must have the same mode.
If self is in local mode, other can also be a numpy array.
Self and other must have the same shape, or other must be a scalar.
Parameters
----------
other : Data or numpy array
Data to apply elementwise operation to
op : function
Binary operator to use for elementwise operations, e.g. add, subtract
"""
if not isscalar(other) and not self.shape == other.shape:
raise ValueError("shapes %s and %s must be equal" % (self.shape, other.shape))
if not isscalar(other) and isinstance(other, Data) and not self.mode == other.mode:
raise NotImplementedError
if isscalar(other):
return self.map(lambda x: op(x, other))
if self.mode == 'local' and isinstance(other, ndarray):
return self._constructor(op(self.values, other)).__finalize__(self)
if self.mode == 'local' and isinstance(other, Data):
return self._constructor(op(self.values, other.values)).__finalize__(self)
if self.mode == 'spark' and isinstance(other, Data):
def func(record):
(k1, x), (k2, y) = record
return k1, op(x, y)
rdd = self.tordd().zip(other.tordd()).map(func)
barray = BoltArraySpark(rdd, shape=self.shape, dtype=self.dtype, split=self.values.split)
return self._constructor(barray).__finalize__(self) | Apply an elementwise operation to data.
Both self and other data must have the same mode.
If self is in local mode, other can also be a numpy array.
Self and other must have the same shape, or other must be a scalar.
Parameters
----------
other : Data or numpy array
Data to apply elementwise operation to
op : function
Binary operator to use for elementwise operations, e.g. add, subtract | Below is the the instruction that describes the task:
### Input:
Apply an elementwise operation to data.
Both self and other data must have the same mode.
If self is in local mode, other can also be a numpy array.
Self and other must have the same shape, or other must be a scalar.
Parameters
----------
other : Data or numpy array
Data to apply elementwise operation to
op : function
Binary operator to use for elementwise operations, e.g. add, subtract
### Response:
def element_wise(self, other, op):
"""
Apply an elementwise operation to data.
Both self and other data must have the same mode.
If self is in local mode, other can also be a numpy array.
Self and other must have the same shape, or other must be a scalar.
Parameters
----------
other : Data or numpy array
Data to apply elementwise operation to
op : function
Binary operator to use for elementwise operations, e.g. add, subtract
"""
if not isscalar(other) and not self.shape == other.shape:
raise ValueError("shapes %s and %s must be equal" % (self.shape, other.shape))
if not isscalar(other) and isinstance(other, Data) and not self.mode == other.mode:
raise NotImplementedError
if isscalar(other):
return self.map(lambda x: op(x, other))
if self.mode == 'local' and isinstance(other, ndarray):
return self._constructor(op(self.values, other)).__finalize__(self)
if self.mode == 'local' and isinstance(other, Data):
return self._constructor(op(self.values, other.values)).__finalize__(self)
if self.mode == 'spark' and isinstance(other, Data):
def func(record):
(k1, x), (k2, y) = record
return k1, op(x, y)
rdd = self.tordd().zip(other.tordd()).map(func)
barray = BoltArraySpark(rdd, shape=self.shape, dtype=self.dtype, split=self.values.split)
return self._constructor(barray).__finalize__(self) |
def update_terminal_regions(self, tree, X, y, residual, y_pred,
sample_weight, sample_mask,
learning_rate=1.0, k=0):
"""Least squares does not need to update terminal regions.
But it has to update the predictions.
"""
# update predictions
y_pred[:, k] += learning_rate * tree.predict(X).ravel() | Least squares does not need to update terminal regions.
But it has to update the predictions. | Below is the the instruction that describes the task:
### Input:
Least squares does not need to update terminal regions.
But it has to update the predictions.
### Response:
def update_terminal_regions(self, tree, X, y, residual, y_pred,
sample_weight, sample_mask,
learning_rate=1.0, k=0):
"""Least squares does not need to update terminal regions.
But it has to update the predictions.
"""
# update predictions
y_pred[:, k] += learning_rate * tree.predict(X).ravel() |
def _update_srcmap_file(self, sources, overwrite=True):
"""Check the contents of the source map file and generate
source maps for any components that are not present."""
if not os.path.isfile(self.files['srcmap']):
return
hdulist = fits.open(self.files['srcmap'])
hdunames = [hdu.name.upper() for hdu in hdulist]
srcmaps = {}
for src in sources:
if src.name.upper() in hdunames and not overwrite:
continue
self.logger.debug('Creating source map for %s', src.name)
srcmaps[src.name] = self._create_srcmap(src.name, src)
if srcmaps:
self.logger.debug(
'Updating source map file for component %s.', self.name)
srcmap_utils.update_source_maps(self.files['srcmap'], srcmaps,
logger=self.logger)
hdulist.close() | Check the contents of the source map file and generate
source maps for any components that are not present. | Below is the the instruction that describes the task:
### Input:
Check the contents of the source map file and generate
source maps for any components that are not present.
### Response:
def _update_srcmap_file(self, sources, overwrite=True):
"""Check the contents of the source map file and generate
source maps for any components that are not present."""
if not os.path.isfile(self.files['srcmap']):
return
hdulist = fits.open(self.files['srcmap'])
hdunames = [hdu.name.upper() for hdu in hdulist]
srcmaps = {}
for src in sources:
if src.name.upper() in hdunames and not overwrite:
continue
self.logger.debug('Creating source map for %s', src.name)
srcmaps[src.name] = self._create_srcmap(src.name, src)
if srcmaps:
self.logger.debug(
'Updating source map file for component %s.', self.name)
srcmap_utils.update_source_maps(self.files['srcmap'], srcmaps,
logger=self.logger)
hdulist.close() |
def index_list_for_sort_order(x: List[Any], key: Callable[[Any], Any] = None,
reverse: bool = False) -> List[int]:
"""
Returns a list of indexes of ``x``, IF ``x`` WERE TO BE SORTED.
Args:
x: data
key: function to be applied to the data to generate a sort key; this
function is passed as the ``key=`` parameter to :func:`sorted`;
the default is ``itemgetter(1)``
reverse: reverse the sort order?
Returns:
list of integer index values
Example:
.. code-block:: python
z = ["a", "c", "b"]
index_list_for_sort_order(z) # [0, 2, 1]
index_list_for_sort_order(z, reverse=True) # [1, 2, 0]
q = [("a", 9), ("b", 8), ("c", 7)]
index_list_for_sort_order(q, key=itemgetter(1))
"""
def key_with_user_func(idx_val: Tuple[int, Any]):
return key(idx_val[1])
if key:
sort_key = key_with_user_func
# see the simpler version below
else:
sort_key = itemgetter(1)
# enumerate, below, will return tuples of (index, value), so
# itemgetter(1) means sort by the value
index_value_list = sorted(enumerate(x), key=sort_key, reverse=reverse)
return [i for i, _ in index_value_list] | Returns a list of indexes of ``x``, IF ``x`` WERE TO BE SORTED.
Args:
x: data
key: function to be applied to the data to generate a sort key; this
function is passed as the ``key=`` parameter to :func:`sorted`;
the default is ``itemgetter(1)``
reverse: reverse the sort order?
Returns:
list of integer index values
Example:
.. code-block:: python
z = ["a", "c", "b"]
index_list_for_sort_order(z) # [0, 2, 1]
index_list_for_sort_order(z, reverse=True) # [1, 2, 0]
q = [("a", 9), ("b", 8), ("c", 7)]
index_list_for_sort_order(q, key=itemgetter(1)) | Below is the the instruction that describes the task:
### Input:
Returns a list of indexes of ``x``, IF ``x`` WERE TO BE SORTED.
Args:
x: data
key: function to be applied to the data to generate a sort key; this
function is passed as the ``key=`` parameter to :func:`sorted`;
the default is ``itemgetter(1)``
reverse: reverse the sort order?
Returns:
list of integer index values
Example:
.. code-block:: python
z = ["a", "c", "b"]
index_list_for_sort_order(z) # [0, 2, 1]
index_list_for_sort_order(z, reverse=True) # [1, 2, 0]
q = [("a", 9), ("b", 8), ("c", 7)]
index_list_for_sort_order(q, key=itemgetter(1))
### Response:
def index_list_for_sort_order(x: List[Any], key: Callable[[Any], Any] = None,
reverse: bool = False) -> List[int]:
"""
Returns a list of indexes of ``x``, IF ``x`` WERE TO BE SORTED.
Args:
x: data
key: function to be applied to the data to generate a sort key; this
function is passed as the ``key=`` parameter to :func:`sorted`;
the default is ``itemgetter(1)``
reverse: reverse the sort order?
Returns:
list of integer index values
Example:
.. code-block:: python
z = ["a", "c", "b"]
index_list_for_sort_order(z) # [0, 2, 1]
index_list_for_sort_order(z, reverse=True) # [1, 2, 0]
q = [("a", 9), ("b", 8), ("c", 7)]
index_list_for_sort_order(q, key=itemgetter(1))
"""
def key_with_user_func(idx_val: Tuple[int, Any]):
return key(idx_val[1])
if key:
sort_key = key_with_user_func
# see the simpler version below
else:
sort_key = itemgetter(1)
# enumerate, below, will return tuples of (index, value), so
# itemgetter(1) means sort by the value
index_value_list = sorted(enumerate(x), key=sort_key, reverse=reverse)
return [i for i, _ in index_value_list] |
def _load_scoped_variable_models(self):
""" Adds models for each scoped variable of the state """
self.scoped_variables = []
for scoped_variable in self.state.scoped_variables.values():
self._add_model(self.scoped_variables, scoped_variable, ScopedVariableModel) | Adds models for each scoped variable of the state | Below is the the instruction that describes the task:
### Input:
Adds models for each scoped variable of the state
### Response:
def _load_scoped_variable_models(self):
""" Adds models for each scoped variable of the state """
self.scoped_variables = []
for scoped_variable in self.state.scoped_variables.values():
self._add_model(self.scoped_variables, scoped_variable, ScopedVariableModel) |
def get_renderer(app, id):
"""Retrieve a renderer.
:param app: :class:`~flask.Flask` application to look ``id`` up on
:param id: Internal renderer id-string to look up
"""
renderer = app.extensions.get('nav_renderers', {})[id]
if isinstance(renderer, tuple):
mod_name, cls_name = renderer
mod = import_module(mod_name)
cls = mod
for name in cls_name.split('.'):
cls = getattr(cls, name)
return cls
return renderer | Retrieve a renderer.
:param app: :class:`~flask.Flask` application to look ``id`` up on
:param id: Internal renderer id-string to look up | Below is the the instruction that describes the task:
### Input:
Retrieve a renderer.
:param app: :class:`~flask.Flask` application to look ``id`` up on
:param id: Internal renderer id-string to look up
### Response:
def get_renderer(app, id):
"""Retrieve a renderer.
:param app: :class:`~flask.Flask` application to look ``id`` up on
:param id: Internal renderer id-string to look up
"""
renderer = app.extensions.get('nav_renderers', {})[id]
if isinstance(renderer, tuple):
mod_name, cls_name = renderer
mod = import_module(mod_name)
cls = mod
for name in cls_name.split('.'):
cls = getattr(cls, name)
return cls
return renderer |
def get_extra_radiation(doy, solar_constant=1366.1):
"""
Determine extraterrestrial radiation from day of year (using the spencer method).
Note:
[1] M. Reno, C. Hansen, and J. Stein, "Global Horizontal Irradiance
Clear Sky Models: Implementation and Analysis", Sandia National
Laboratories, SAND2012-2389, 2012.
[2] <http://solardat.uoregon.edu/SolarRadiationBasics.html>, Eqs.
SR1 and SR2
Args:
doy : array of integers representing the days of the year.
solar_constant : float, default 1366.1
The solar constant.
Returns:
dni_extra : float, array, or Series
The extraterrestrial radiation present in watts per square meter
on a surface which is normal to the sun. Pandas Timestamp and
DatetimeIndex inputs will yield a Pandas TimeSeries. All other
inputs will yield a float or an array of floats.
"""
# Calculates the day angle for the Earth's orbit around the Sun.
B = (2. * math.pi / 365.) * (doy - 1)
# Calculate R over R squared from the angle
RoverR0sqrd = (1.00011 + 0.034221 * math.cos(B) + 0.00128 * math.sin(B) +
0.000719 * math.cos(2 * B) + 7.7e-05 * math.sin(2 * B))
Ea = solar_constant * RoverR0sqrd
return Ea | Determine extraterrestrial radiation from day of year (using the spencer method).
Note:
[1] M. Reno, C. Hansen, and J. Stein, "Global Horizontal Irradiance
Clear Sky Models: Implementation and Analysis", Sandia National
Laboratories, SAND2012-2389, 2012.
[2] <http://solardat.uoregon.edu/SolarRadiationBasics.html>, Eqs.
SR1 and SR2
Args:
doy : array of integers representing the days of the year.
solar_constant : float, default 1366.1
The solar constant.
Returns:
dni_extra : float, array, or Series
The extraterrestrial radiation present in watts per square meter
on a surface which is normal to the sun. Pandas Timestamp and
DatetimeIndex inputs will yield a Pandas TimeSeries. All other
inputs will yield a float or an array of floats. | Below is the the instruction that describes the task:
### Input:
Determine extraterrestrial radiation from day of year (using the spencer method).
Note:
[1] M. Reno, C. Hansen, and J. Stein, "Global Horizontal Irradiance
Clear Sky Models: Implementation and Analysis", Sandia National
Laboratories, SAND2012-2389, 2012.
[2] <http://solardat.uoregon.edu/SolarRadiationBasics.html>, Eqs.
SR1 and SR2
Args:
doy : array of integers representing the days of the year.
solar_constant : float, default 1366.1
The solar constant.
Returns:
dni_extra : float, array, or Series
The extraterrestrial radiation present in watts per square meter
on a surface which is normal to the sun. Pandas Timestamp and
DatetimeIndex inputs will yield a Pandas TimeSeries. All other
inputs will yield a float or an array of floats.
### Response:
def get_extra_radiation(doy, solar_constant=1366.1):
"""
Determine extraterrestrial radiation from day of year (using the spencer method).
Note:
[1] M. Reno, C. Hansen, and J. Stein, "Global Horizontal Irradiance
Clear Sky Models: Implementation and Analysis", Sandia National
Laboratories, SAND2012-2389, 2012.
[2] <http://solardat.uoregon.edu/SolarRadiationBasics.html>, Eqs.
SR1 and SR2
Args:
doy : array of integers representing the days of the year.
solar_constant : float, default 1366.1
The solar constant.
Returns:
dni_extra : float, array, or Series
The extraterrestrial radiation present in watts per square meter
on a surface which is normal to the sun. Pandas Timestamp and
DatetimeIndex inputs will yield a Pandas TimeSeries. All other
inputs will yield a float or an array of floats.
"""
# Calculates the day angle for the Earth's orbit around the Sun.
B = (2. * math.pi / 365.) * (doy - 1)
# Calculate R over R squared from the angle
RoverR0sqrd = (1.00011 + 0.034221 * math.cos(B) + 0.00128 * math.sin(B) +
0.000719 * math.cos(2 * B) + 7.7e-05 * math.sin(2 * B))
Ea = solar_constant * RoverR0sqrd
return Ea |
def get_batch(sequence, size, start=0, endpoint=None, complete=False):
""" create a batched result record out of a sequence (catalog brains)
"""
batch = make_batch(sequence, size, start)
return {
"pagesize": batch.get_pagesize(),
"next": batch.make_next_url(),
"previous": batch.make_prev_url(),
"page": batch.get_pagenumber(),
"pages": batch.get_numpages(),
"count": batch.get_sequence_length(),
"items": make_items_for([b for b in batch.get_batch()],
endpoint, complete=complete),
} | create a batched result record out of a sequence (catalog brains) | Below is the the instruction that describes the task:
### Input:
create a batched result record out of a sequence (catalog brains)
### Response:
def get_batch(sequence, size, start=0, endpoint=None, complete=False):
""" create a batched result record out of a sequence (catalog brains)
"""
batch = make_batch(sequence, size, start)
return {
"pagesize": batch.get_pagesize(),
"next": batch.make_next_url(),
"previous": batch.make_prev_url(),
"page": batch.get_pagenumber(),
"pages": batch.get_numpages(),
"count": batch.get_sequence_length(),
"items": make_items_for([b for b in batch.get_batch()],
endpoint, complete=complete),
} |
def _assert_recur_is_tail(node: Node) -> None: # pylint: disable=too-many-branches
"""Assert that `recur` forms only appear in the tail position of this
or child AST nodes.
`recur` forms may only appear in `do` nodes (both literal and synthetic
`do` nodes) and in either the :then or :else expression of an `if` node."""
if node.op == NodeOp.DO:
assert isinstance(node, Do)
for child in node.statements:
_assert_no_recur(child)
_assert_recur_is_tail(node.ret)
elif node.op in {NodeOp.FN, NodeOp.FN_METHOD, NodeOp.METHOD}:
assert isinstance(node, (Fn, FnMethod, Method))
node.visit(_assert_recur_is_tail)
elif node.op == NodeOp.IF:
assert isinstance(node, If)
_assert_no_recur(node.test)
_assert_recur_is_tail(node.then)
_assert_recur_is_tail(node.else_)
elif node.op in {NodeOp.LET, NodeOp.LETFN}:
assert isinstance(node, (Let, LetFn))
for binding in node.bindings:
assert binding.init is not None
_assert_no_recur(binding.init)
_assert_recur_is_tail(node.body)
elif node.op == NodeOp.LOOP:
assert isinstance(node, Loop)
for binding in node.bindings:
assert binding.init is not None
_assert_no_recur(binding.init)
elif node.op == NodeOp.RECUR:
pass
elif node.op == NodeOp.TRY:
assert isinstance(node, Try)
_assert_recur_is_tail(node.body)
for catch in node.catches:
_assert_recur_is_tail(catch)
if node.finally_:
_assert_no_recur(node.finally_)
else:
node.visit(_assert_no_recur) | Assert that `recur` forms only appear in the tail position of this
or child AST nodes.
`recur` forms may only appear in `do` nodes (both literal and synthetic
`do` nodes) and in either the :then or :else expression of an `if` node. | Below is the the instruction that describes the task:
### Input:
Assert that `recur` forms only appear in the tail position of this
or child AST nodes.
`recur` forms may only appear in `do` nodes (both literal and synthetic
`do` nodes) and in either the :then or :else expression of an `if` node.
### Response:
def _assert_recur_is_tail(node: Node) -> None: # pylint: disable=too-many-branches
"""Assert that `recur` forms only appear in the tail position of this
or child AST nodes.
`recur` forms may only appear in `do` nodes (both literal and synthetic
`do` nodes) and in either the :then or :else expression of an `if` node."""
if node.op == NodeOp.DO:
assert isinstance(node, Do)
for child in node.statements:
_assert_no_recur(child)
_assert_recur_is_tail(node.ret)
elif node.op in {NodeOp.FN, NodeOp.FN_METHOD, NodeOp.METHOD}:
assert isinstance(node, (Fn, FnMethod, Method))
node.visit(_assert_recur_is_tail)
elif node.op == NodeOp.IF:
assert isinstance(node, If)
_assert_no_recur(node.test)
_assert_recur_is_tail(node.then)
_assert_recur_is_tail(node.else_)
elif node.op in {NodeOp.LET, NodeOp.LETFN}:
assert isinstance(node, (Let, LetFn))
for binding in node.bindings:
assert binding.init is not None
_assert_no_recur(binding.init)
_assert_recur_is_tail(node.body)
elif node.op == NodeOp.LOOP:
assert isinstance(node, Loop)
for binding in node.bindings:
assert binding.init is not None
_assert_no_recur(binding.init)
elif node.op == NodeOp.RECUR:
pass
elif node.op == NodeOp.TRY:
assert isinstance(node, Try)
_assert_recur_is_tail(node.body)
for catch in node.catches:
_assert_recur_is_tail(catch)
if node.finally_:
_assert_no_recur(node.finally_)
else:
node.visit(_assert_no_recur) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.